From cf00deb3954145995a9db65f845c91970f0642e6 Mon Sep 17 00:00:00 2001 From: "Alexie (Boyong) Madolid" Date: Wed, 22 Nov 2023 13:09:45 +0800 Subject: [PATCH] [OPENAI]: Support chat stream --- .../jac_misc/jac_misc/openai/main.py | 56 ++++++++++++++++++- jaseci_core/jaseci/jac/interpreter/interp.py | 3 +- 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/jaseci_ai_kit/jac_misc/jac_misc/openai/main.py b/jaseci_ai_kit/jac_misc/jac_misc/openai/main.py index 880ddace4a..c31451718f 100644 --- a/jaseci_ai_kit/jac_misc/jac_misc/openai/main.py +++ b/jaseci_ai_kit/jac_misc/jac_misc/openai/main.py @@ -190,12 +190,66 @@ def chat( stop=stop, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, - **kwargs + **kwargs, ) response = [x.message for x in response.choices] return response +@jaseci_action(act_group=["openai"], allow_remote=True) +def chat_stream( + messages: list, + model: str = "gpt-3.5-turbo", + temperature: float = 1, + top_p: float = 1, + n: int = 1, + stop: Union[str, list] = None, + presence_penalty: float = 0, + frequency_penalty: float = 0, + **kwargs +): + """ + Generate responses to a list of messages using OpenAI's GPT-3.5 model. + + Parameters: + ---------- + messages : list of str + A list of messages to prompt the GPT-3.5 model with. + model : str, optional (default='gpt-3.5-turbo') + The name of the GPT-3.5 model to use for generating responses. + temperature : float, optional (default=1.0) + Controls the randomness of the generated responses. Higher values will result in more varied responses. + top_p : float, optional (default=1.0) + Controls the diversity of the generated responses. Lower values will result in more conservative responses. + n : int, optional (default=1) + The number of responses to generate for each message. + stop : str, list of str, or None, optional (default=None) + The sequence at which the model should stop generating text. If a list is provided, the model will stop at any of the specified sequences. + presence_penalty : float, optional (default=0.0) + Controls the model's tendency to generate new words or phrases. Higher values will result in more novel responses. + frequency_penalty : float, optional (default=0.0) + Controls the model's tendency to repeat words or phrases. Higher values will result in less repetitive responses. + + Returns: + ------- + responses : list of str + A list of responses generated by the GPT-3.5 model based on the provided messages. + """ + for resp in client().chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + top_p=top_p, + n=n, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + stream=True, + **kwargs, + ): + yield resp.choices[0].delta.dict() + + @jaseci_action(act_group=["openai"], allow_remote=True) def get_embeddings(input: Union[str, list], model: str = "text-embedding-ada-002"): """ diff --git a/jaseci_core/jaseci/jac/interpreter/interp.py b/jaseci_core/jaseci/jac/interpreter/interp.py index 244b9be089..48a155ea80 100644 --- a/jaseci_core/jaseci/jac/interpreter/interp.py +++ b/jaseci_core/jaseci/jac/interpreter/interp.py @@ -23,6 +23,7 @@ from copy import copy, deepcopy from base64 import b64decode from itertools import pairwise +from types import GeneratorType from jaseci.jac.jsci_vm.op_codes import JsCmp @@ -230,7 +231,7 @@ def run_for_stmt(self, jac_ast): self.run_expression(kid[3]) lst = self.pop().value - if isinstance(lst, (list, dict)): + if isinstance(lst, (list, dict, GeneratorType)): for i in lst: self._loop_ctrl = None var.value = i