From 56df6f9f15f78f23b40eaed8f80d9a5df155362d Mon Sep 17 00:00:00 2001 From: Wenzhe Xue Date: Wed, 21 Feb 2024 22:42:47 -0800 Subject: [PATCH] add examples --- .../logging/magentic_async_stream_logging.py | 25 +++++++++++++++++++ examples/logging/openai_async_logging.py | 22 ++++++++++++++++ .../logging/openai_async_stream_logging.py | 24 ++++++++++++++++++ 3 files changed, 71 insertions(+) create mode 100644 examples/logging/magentic_async_stream_logging.py create mode 100644 examples/logging/openai_async_logging.py create mode 100644 examples/logging/openai_async_stream_logging.py diff --git a/examples/logging/magentic_async_stream_logging.py b/examples/logging/magentic_async_stream_logging.py new file mode 100644 index 00000000..c1cf5269 --- /dev/null +++ b/examples/logging/magentic_async_stream_logging.py @@ -0,0 +1,25 @@ +import asyncio + +import openai +from magentic import AsyncStreamedStr, prompt + +from log10.load import log10, log10_session + + +log10(openai) + + +@prompt("Tell me a 200-word story about {topic}") +async def tell_story(topic: str) -> AsyncStreamedStr: + ... + + +async def main(): + with log10_session(tags=["async_tag"]): + output = await tell_story("Europe.") + async for chunk in output: + print(chunk, end="", flush=True) + + +# Python 3.7+ +asyncio.run(main()) diff --git a/examples/logging/openai_async_logging.py b/examples/logging/openai_async_logging.py new file mode 100644 index 00000000..e89d0e2b --- /dev/null +++ b/examples/logging/openai_async_logging.py @@ -0,0 +1,22 @@ +import asyncio + +import openai +from openai import AsyncOpenAI + +from log10.load import log10 + + +log10(openai) + +client = AsyncOpenAI() + + +async def main(): + completion = await client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Say this is a test"}], + ) + print(completion.choices[0].message.content) + + +asyncio.run(main()) diff --git a/examples/logging/openai_async_stream_logging.py b/examples/logging/openai_async_stream_logging.py new file mode 100644 index 00000000..960dc784 --- /dev/null +++ b/examples/logging/openai_async_stream_logging.py @@ -0,0 +1,24 @@ +import asyncio + +import openai +from openai import AsyncOpenAI + +from log10.load import log10 + + +log10(openai) + +client = AsyncOpenAI() + + +async def main(): + stream = await client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Count to 50."}], + stream=True, + ) + async for chunk in stream: + print(chunk.choices[0].delta.content or "", end="", flush=True) + + +asyncio.run(main())