Skip to content

Commit

Permalink
Merge branch 'main' into kt/trigger-tests-in-PR
Browse files Browse the repository at this point in the history
  • Loading branch information
kxtran committed May 29, 2024
2 parents 6be99d7 + bc72c3e commit 0e4c483
Show file tree
Hide file tree
Showing 18 changed files with 1,064 additions and 129 deletions.
23 changes: 23 additions & 0 deletions examples/logging/anthropic_async_messages.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import asyncio

import anthropic

from log10.load import log10


log10(anthropic)

client = anthropic.AsyncAnthropic()


async def main() -> None:
message = await client.beta.tools.messages.create(
model="claude-instant-1.2",
max_tokens=1000,
messages=[{"role": "user", "content": "Say hello!"}],
)

print(message)


asyncio.run(main())
35 changes: 35 additions & 0 deletions examples/logging/anthropic_async_messages_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import asyncio

import anthropic

from log10.load import log10


log10(anthropic)

client = anthropic.AsyncAnthropic()


async def main() -> None:
async with client.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-haiku-20240307",
) as stream:
async for text in stream.text_stream:
print(text, end="", flush=True)
print()

# you can still get the accumulated final message outside of
# the context manager, as long as the entire stream was consumed
# inside of the context manager
accumulated = await stream.get_final_message()
print("accumulated message: ", accumulated.to_json())


asyncio.run(main())
38 changes: 38 additions & 0 deletions examples/logging/anthropic_async_messages_stream_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import asyncio

import anthropic
from anthropic import AsyncAnthropic, AsyncMessageStream
from anthropic.types import MessageStreamEvent
from typing_extensions import override

from log10.load import log10


log10(anthropic)

client = AsyncAnthropic()


class MyStream(AsyncMessageStream):
@override
async def on_stream_event(self, event: MessageStreamEvent) -> None:
print("on_event fired with:", event)


async def main() -> None:
async with client.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-haiku-20240307",
event_handler=MyStream,
) as stream:
accumulated = await stream.get_final_message()
print("accumulated message: ", accumulated.to_json())


asyncio.run(main())
47 changes: 47 additions & 0 deletions examples/logging/anthropic_async_tools_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import asyncio

import anthropic
from anthropic import AsyncAnthropic

from log10.load import log10


log10(anthropic)

client = AsyncAnthropic()


async def run_conversation():
tools = [
{
"name": "get_weather",
"description": "Get the weather in a given location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": 'The unit of temperature, either "celsius" or "fahrenheit"',
},
},
"required": ["location"],
},
}
]
async with client.beta.tools.messages.stream(
model="claude-3-haiku-20240307",
tools=tools,
messages=[
{
"role": "user",
"content": "What's the weather like in San Francisco?",
}
],
max_tokens=1024,
) as stream:
await stream.until_done()


asyncio.run(run_conversation())
55 changes: 55 additions & 0 deletions examples/logging/anthropic_async_tools_stream_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import asyncio

import anthropic
from anthropic import AsyncAnthropic
from anthropic.lib.streaming.beta import AsyncToolsBetaMessageStream
from typing_extensions import override

from log10.load import log10


log10(anthropic)


client = AsyncAnthropic()


class MyHandler(AsyncToolsBetaMessageStream):
@override
async def on_input_json(self, delta: str, snapshot: object) -> None:
print(f"delta: {repr(delta)}")
print(f"snapshot: {snapshot}")
print()


async def main() -> None:
async with client.beta.tools.messages.stream(
max_tokens=1024,
model="claude-3-haiku-20240307",
tools=[
{
"name": "get_weather",
"description": "Get the weather at a specific location.",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Unit for the output",
},
},
"required": ["location"],
},
}
],
messages=[{"role": "user", "content": "What is the weather in SF?"}],
event_handler=MyHandler,
) as stream:
await stream.until_done()

print()


asyncio.run(main())
21 changes: 21 additions & 0 deletions examples/logging/anthropic_messages_not_given.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from anthropic import NOT_GIVEN

from log10.load import Anthropic


client = Anthropic()

completion = client.beta.tools.messages.create(
model="claude-3-haiku-20240307",
messages=[
{
"role": "user",
"content": "tell a short joke.",
},
],
max_tokens=1000,
tools=NOT_GIVEN,
tool_choice=NOT_GIVEN,
)

print(completion.content[0].text)
35 changes: 35 additions & 0 deletions examples/logging/anthropic_tools_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import anthropic

from log10.load import log10


log10(anthropic)


client = anthropic.Anthropic()

with client.beta.tools.messages.stream(
model="claude-3-haiku-20240307",
tools=[
{
"name": "get_weather",
"description": "Get the weather at a specific location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Unit for the output",
},
},
"required": ["location"],
},
}
],
messages=[{"role": "user", "content": "What is the weather in SF?"}],
max_tokens=1024,
) as stream:
for message in stream:
print(message)
23 changes: 23 additions & 0 deletions examples/logging/magentic_async_chat_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import asyncio

import anthropic
from magentic import UserMessage, chatprompt
from magentic.chat_model.anthropic_chat_model import AnthropicChatModel

from log10.load import log10


log10(anthropic)


async def main(topic: str) -> str:
@chatprompt(
UserMessage(f"Tell me a joke about {topic}"),
model=AnthropicChatModel("claude-3-opus-20240229"),
)
async def tell_joke(topic: str) -> str: ...

print(await tell_joke(topic))


asyncio.run(main("cats"))
60 changes: 60 additions & 0 deletions examples/logging/tags_nested.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from log10.load import OpenAI, log10_session


client = OpenAI()
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt="I am demonstrating nested tags. Write a test case for this. This is the outer most call without any tags.",
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response)

with log10_session(tags=["outer_tag"]):
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt="I am demonstrating nested tags. Write a test case for this. This is a inner call with tags.",
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response)

with log10_session(tags=["inner_tag"]):
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt="I am demonstrating nested tags. Write a test case for this. This is the inner most call with tags.",
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response)

response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt="I am demonstrating nested tags. Write a test case for this. This is a inner call which should have outer tag.",
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response)

response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt="I am demonstrating nested tags. Write a test case for this. This is the outer most call without any tags (final call)",
temperature=0,
max_tokens=1024,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
print(response)
Loading

0 comments on commit 0e4c483

Please sign in to comment.