Skip to content

Commit

Permalink
Add openai.AsyncOpenAI and stream and function support (#109)
Browse files Browse the repository at this point in the history
* add logging for AsyncOpenAI
- current support only chat completion calls
- used httpx event hooks and transport to trigger logging

* add examples

* minor

* Hot fix for OpenBB function calling

* Hot fix for OpenBB function calling

* Tools and functions support

* Remove print statements

* fix response with message

* Fix non-message prompts

* flatten only for openai chat completions

* add an example of regular magentic function call

* enable stream function

* minor

* skip two xdoctest for openai v0

---------

Co-authored-by: Niklas Nielsen <nik@qni.dk>
  • Loading branch information
wenzhe-log10 and nqn committed Feb 23, 2024
1 parent c3537da commit ae273be
Show file tree
Hide file tree
Showing 8 changed files with 546 additions and 20 deletions.
34 changes: 34 additions & 0 deletions examples/logging/magentic_async_function_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import openai
from magentic import AsyncStreamedStr, FunctionCall, prompt

from log10.load import log10


log10(openai)


def add(x: int, y: int) -> int:
"""Add together two numbers."""
return x + y


@prompt("What is 1+1? Use tools", functions=[add])
async def agent() -> AsyncStreamedStr:
...


# Define an async main function
async def main():
response = await agent()
if isinstance(response, FunctionCall):
print(response)
else:
async for chunk in response:
print(chunk, end="", flush=True)


# Running the main function using asyncio
if __name__ == "__main__":
import asyncio

asyncio.run(main())
24 changes: 24 additions & 0 deletions examples/logging/magentic_async_stream_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import asyncio

import openai
from magentic import AsyncStreamedStr, prompt

from log10.load import log10, log10_session


log10(openai)


@prompt("Tell me a 200-word story about {topic}")
async def tell_story(topic: str) -> AsyncStreamedStr:
...


async def main():
with log10_session(tags=["async_tag"]):
output = await tell_story("Europe.")
async for chunk in output:
print(chunk, end="", flush=True)


asyncio.run(main())
31 changes: 31 additions & 0 deletions examples/logging/magentic_function_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# taken from Magentic README
# https://github.com/jackmpcollins/magentic/blob/2493419f2db3a3be58fb308d7df51a51bf1989c1/README.md#usage

from typing import Literal

import openai
from magentic import FunctionCall, prompt

from log10.load import log10


log10(openai)


def activate_oven(temperature: int, mode: Literal["broil", "bake", "roast"]) -> str:
"""Turn the oven on with the provided settings."""
return f"Preheating to {temperature} F with mode {mode}"


@prompt(
"Prepare the oven so I can make {food}",
functions=[activate_oven],
)
def configure_oven(food: str) -> FunctionCall[str]:
...


output = configure_oven("cookies!")
# FunctionCall(<function activate_oven at 0x1105a6200>, temperature=350, mode='bake')
print(output())
# 'Preheating to 350 F with mode bake'
22 changes: 22 additions & 0 deletions examples/logging/openai_async_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import asyncio

import openai
from openai import AsyncOpenAI

from log10.load import log10


log10(openai)

client = AsyncOpenAI()


async def main():
completion = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
)
print(completion.choices[0].message.content)


asyncio.run(main())
24 changes: 24 additions & 0 deletions examples/logging/openai_async_stream_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import asyncio

import openai
from openai import AsyncOpenAI

from log10.load import log10


log10(openai)

client = AsyncOpenAI()


async def main():
stream = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Count to 50."}],
stream=True,
)
async for chunk in stream:
print(chunk.choices[0].delta.content or "", end="", flush=True)


asyncio.run(main())
91 changes: 91 additions & 0 deletions examples/logging/openai_tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import json

from log10.load import OpenAI


client = OpenAI()


# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
else:
return json.dumps({"location": location, "temperature": "unknown"})


def run_conversation():
# Step 1: send the conversation and available functions to the model
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
}
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(response_message) # extend conversation with assistant's reply
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
second_response = client.chat.completions.create(
model="gpt-3.5-turbo-0125",
messages=messages,
) # get a new response from the model where it can see the function response
return second_response


print(run_conversation())
Loading

0 comments on commit ae273be

Please sign in to comment.