diff --git a/examples/logging/anthropic_async_messages.py b/examples/logging/anthropic_async_messages.py index 88268728..56941c32 100644 --- a/examples/logging/anthropic_async_messages.py +++ b/examples/logging/anthropic_async_messages.py @@ -2,6 +2,7 @@ import anthropic +from log10._httpx_utils import finalize from log10.load import log10 @@ -18,6 +19,7 @@ async def main() -> None: ) print(message) + await finalize() asyncio.run(main()) diff --git a/examples/logging/anthropic_async_messages_stream.py b/examples/logging/anthropic_async_messages_stream.py index 77fdc6c1..4db393f5 100644 --- a/examples/logging/anthropic_async_messages_stream.py +++ b/examples/logging/anthropic_async_messages_stream.py @@ -2,6 +2,7 @@ import anthropic +from log10._httpx_utils import finalize from log10.load import log10 @@ -30,6 +31,7 @@ async def main() -> None: # inside of the context manager accumulated = await stream.get_final_message() print("accumulated message: ", accumulated.to_json()) + await finalize() asyncio.run(main()) diff --git a/examples/logging/anthropic_async_messages_stream_handler.py b/examples/logging/anthropic_async_messages_stream_handler.py index 382bbb6b..9cc67730 100644 --- a/examples/logging/anthropic_async_messages_stream_handler.py +++ b/examples/logging/anthropic_async_messages_stream_handler.py @@ -5,6 +5,7 @@ from anthropic.types import MessageStreamEvent from typing_extensions import override +from log10._httpx_utils import finalize from log10.load import log10 @@ -34,5 +35,7 @@ async def main() -> None: accumulated = await stream.get_final_message() print("accumulated message: ", accumulated.to_json()) + await finalize() + asyncio.run(main()) diff --git a/examples/logging/anthropic_async_tools_stream.py b/examples/logging/anthropic_async_tools_stream.py index f0eaff2d..445e7c4c 100644 --- a/examples/logging/anthropic_async_tools_stream.py +++ b/examples/logging/anthropic_async_tools_stream.py @@ -3,6 +3,7 @@ import anthropic from anthropic import AsyncAnthropic +from log10._httpx_utils import finalize from log10.load import log10 @@ -42,6 +43,7 @@ async def run_conversation(): max_tokens=1024, ) as stream: await stream.until_done() + await finalize() asyncio.run(run_conversation()) diff --git a/examples/logging/anthropic_async_tools_stream_handler.py b/examples/logging/anthropic_async_tools_stream_handler.py index cf8826eb..c0508ca0 100644 --- a/examples/logging/anthropic_async_tools_stream_handler.py +++ b/examples/logging/anthropic_async_tools_stream_handler.py @@ -5,6 +5,7 @@ from anthropic.lib.streaming.beta import AsyncToolsBetaMessageStream from typing_extensions import override +from log10._httpx_utils import finalize from log10.load import log10 @@ -49,6 +50,7 @@ async def main() -> None: ) as stream: await stream.until_done() + await finalize() print() diff --git a/examples/logging/magentic_async_chat_prompt.py b/examples/logging/magentic_async_chat_prompt.py index 06b3c107..6c66d7b8 100644 --- a/examples/logging/magentic_async_chat_prompt.py +++ b/examples/logging/magentic_async_chat_prompt.py @@ -4,6 +4,7 @@ from magentic import UserMessage, chatprompt from magentic.chat_model.anthropic_chat_model import AnthropicChatModel +from log10._httpx_utils import finalize from log10.load import log10 @@ -18,6 +19,7 @@ async def main(topic: str) -> str: async def tell_joke(topic: str) -> str: ... print(await tell_joke(topic)) + await finalize() asyncio.run(main("cats")) diff --git a/examples/logging/magentic_async_multi_session_tags.py b/examples/logging/magentic_async_multi_session_tags.py index c54ccaa0..1c51a952 100644 --- a/examples/logging/magentic_async_multi_session_tags.py +++ b/examples/logging/magentic_async_multi_session_tags.py @@ -3,6 +3,7 @@ import openai from magentic import AsyncStreamedStr, OpenaiChatModel, prompt +from log10._httpx_utils import finalize from log10.load import log10, log10_session @@ -29,5 +30,7 @@ async def main(): async for chunk in result: print(chunk, end="", flush=True) + await finalize() + asyncio.run(main()) diff --git a/examples/logging/magentic_async_parallel_function_call.py b/examples/logging/magentic_async_parallel_function_call.py index 9f419817..6dafe63e 100644 --- a/examples/logging/magentic_async_parallel_function_call.py +++ b/examples/logging/magentic_async_parallel_function_call.py @@ -3,6 +3,7 @@ import openai from magentic import AsyncParallelFunctionCall, prompt +from log10._httpx_utils import finalize from log10.load import log10 @@ -25,6 +26,7 @@ async def main(): output = await plus_and_minus(2, 3) async for chunk in output: print(chunk) + await finalize() asyncio.run(main()) diff --git a/examples/logging/magentic_async_stream_logging.py b/examples/logging/magentic_async_stream_logging.py index db8b3ec9..ff3ac2c1 100644 --- a/examples/logging/magentic_async_stream_logging.py +++ b/examples/logging/magentic_async_stream_logging.py @@ -3,6 +3,7 @@ import openai from magentic import AsyncStreamedStr, prompt +from log10._httpx_utils import finalize from log10.load import log10, log10_session @@ -19,6 +20,7 @@ async def main(): output = await tell_story("Europe.") async for chunk in output: print(chunk, end="", flush=True) + await finalize() asyncio.run(main()) diff --git a/examples/logging/magentic_async_widget.py b/examples/logging/magentic_async_widget.py index f83b3cf8..f7ec3537 100644 --- a/examples/logging/magentic_async_widget.py +++ b/examples/logging/magentic_async_widget.py @@ -5,6 +5,7 @@ from magentic import OpenaiChatModel, prompt from pydantic import BaseModel +from log10._httpx_utils import finalize from log10.load import log10 @@ -31,6 +32,7 @@ async def _generate_title_and_description(query: str, widget_data: str) -> Widge async def main(): r = await _generate_title_and_description(query="Give me a summary of AAPL", widget_data="") rich.print(r) + await finalize() asyncio.run(main()) diff --git a/examples/logging/openai_async_logging.py b/examples/logging/openai_async_logging.py index e89d0e2b..244190c9 100644 --- a/examples/logging/openai_async_logging.py +++ b/examples/logging/openai_async_logging.py @@ -3,6 +3,7 @@ import openai from openai import AsyncOpenAI +from log10._httpx_utils import finalize from log10.load import log10 @@ -17,6 +18,7 @@ async def main(): messages=[{"role": "user", "content": "Say this is a test"}], ) print(completion.choices[0].message.content) + await finalize() asyncio.run(main()) diff --git a/examples/logging/openai_async_tools_stream.py b/examples/logging/openai_async_tools_stream.py index ea0e56d7..3b444d8e 100644 --- a/examples/logging/openai_async_tools_stream.py +++ b/examples/logging/openai_async_tools_stream.py @@ -5,6 +5,7 @@ from openai import AsyncOpenAI from rich import print +from log10._httpx_utils import finalize from log10.load import log10 @@ -72,6 +73,7 @@ async def run_conversation(): else: tool_calls[-1].function.arguments += tc[0].function.arguments print(tool_calls) + await finalize() return diff --git a/tests/test_litellm.py b/tests/test_litellm.py index 457f730c..91ad29cd 100644 --- a/tests/test_litellm.py +++ b/tests/test_litellm.py @@ -51,7 +51,6 @@ async def test_completion_async_stream(anthropic_model): ## This test doesn't get completion_id from the session ## and logged a couple times during debug mode, punt this for now - await finalize() assert output, "No output from the model." @@ -153,5 +152,4 @@ async def test_image_async_stream(session, anthropic_model): output += chunk.choices[0].delta.content time.sleep(3) - await finalize() _LogAssertion(completion_id=session.last_completion_id(), message_content=output).assert_chat_response()