Skip to content

Commit

Permalink
Merge branch 'main' into stabilization
Browse files Browse the repository at this point in the history
  • Loading branch information
siyangqiu authored Jul 30, 2024
2 parents c2d9170 + 4dc0382 commit 13630be
Show file tree
Hide file tree
Showing 50 changed files with 3,850 additions and 69 deletions.
36 changes: 36 additions & 0 deletions .github/workflows/notebook-gen.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
name: Check Generated Files

on: push

jobs:
check-diff:
runs-on: ubuntu-latest
steps:

- name: Checkout repository
uses: actions/checkout@v2

- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.11'

- name: Install Dependencies
if: always()
run: |
python -m pip install --upgrade pip
# Install your project dependencies here if necessary
- name: Run generate script
if: always()
run: python docs/v1/examples/notebooks/generate.py

- name: Check for differences
if: always()
run: |
if [[ `git status --porcelain` ]]; then
echo "Notebooks have been changed without being regenerated. Please run `python docs/v1/examples/notebooks/generate.py`."
exit 1
else
echo "No changes were detected. Notebooks up to date 💅🏻"
fi
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
<p align="center">
<a href="https://twitter.com/agentopsai/">🐦 Twitter</a>
<span>&nbsp;&nbsp;&nbsp;&nbsp;</span>
<a href="https://discord.gg/JHPt4C7r">📢 Discord</a>
<a href="https://discord.gg/FagdcwwXRR">📢 Discord</a>
<span>&nbsp;&nbsp;&nbsp;&nbsp;</span>
<a href="https://app.agentops.ai/?ref=gh">🖇️ AgentOps</a>
<span>&nbsp;&nbsp;&nbsp;&nbsp;</span>
Expand Down Expand Up @@ -307,4 +307,4 @@ Check out our growth in the community:
|<img class="avatar mr-2" src="https://avatars.githubusercontent.com/u/109994880?s=40&v=4" width="20" height="20" alt=""> &nbsp; [bhancockio](https://github.com/bhancockio) / [chatgpt4o-analysis](https://github.com/bhancockio/chatgpt4o-analysis) | 16 |


_Generated using [github-dependents-info](https://github.com/nvuillam/github-dependents-info), by [Nicolas Vuillamy](https://github.com/nvuillam)_
_Generated using [github-dependents-info](https://github.com/nvuillam/github-dependents-info), by [Nicolas Vuillamy](https://github.com/nvuillam)_
169 changes: 169 additions & 0 deletions agentops/llm_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ class LlmTracker:
"5.4.0": ("chat", "chat_stream"),
},
"ollama": {"0.0.1": ("chat", "Client.chat", "AsyncClient.chat")},
"groq": {
"0.9.0": ("Client.chat", "AsyncClient.chat"),
},
}

def __init__(self, client):
Expand Down Expand Up @@ -487,6 +490,128 @@ def generator():
self._safe_record(session, self.llm_event)
return response

def _handle_response_groq(
self, response, kwargs, init_timestamp, session: Optional[Session] = None
):
"""Handle responses for OpenAI versions >v1.0.0"""
from groq import AsyncStream, Stream
from groq.resources.chat import AsyncCompletions
from groq.types.chat import ChatCompletionChunk

self.llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
if session is not None:
self.llm_event.session_id = session.session_id

def handle_stream_chunk(chunk: ChatCompletionChunk):
# NOTE: prompt/completion usage not returned in response when streaming
# We take the first ChatCompletionChunk and accumulate the deltas from all subsequent chunks to build one full chat completion
if self.llm_event.returns == None:
self.llm_event.returns = chunk

try:
accumulated_delta = self.llm_event.returns.choices[0].delta
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.model = chunk.model
self.llm_event.prompt = kwargs["messages"]

# NOTE: We assume for completion only choices[0] is relevant
choice = chunk.choices[0]

if choice.delta.content:
accumulated_delta.content += choice.delta.content

if choice.delta.role:
accumulated_delta.role = choice.delta.role

if choice.delta.tool_calls:
accumulated_delta.tool_calls = choice.delta.tool_calls

if choice.delta.function_call:
accumulated_delta.function_call = choice.delta.function_call

if choice.finish_reason:
# Streaming is done. Record LLMEvent
self.llm_event.returns.choices[0].finish_reason = (
choice.finish_reason
)
self.llm_event.completion = {
"role": accumulated_delta.role,
"content": accumulated_delta.content,
"function_call": accumulated_delta.function_call,
"tool_calls": accumulated_delta.tool_calls,
}
self.llm_event.end_timestamp = get_ISO_time()

self._safe_record(session, self.llm_event)
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
chunk = pprint.pformat(chunk)
logger.warning(
f"Unable to parse a chunk for LLM call. Skipping upload to AgentOps\n"
f"chunk:\n {chunk}\n"
f"kwargs:\n {kwargs_str}\n"
)

# if the response is a generator, decorate the generator
if isinstance(response, Stream):

def generator():
for chunk in response:
handle_stream_chunk(chunk)
yield chunk

return generator()

# For asynchronous AsyncStream
elif isinstance(response, AsyncStream):

async def async_generator():
async for chunk in response:
handle_stream_chunk(chunk)
yield chunk

return async_generator()

# For async AsyncCompletion
elif isinstance(response, AsyncCompletions):

async def async_generator():
async for chunk in response:
handle_stream_chunk(chunk)
yield chunk

return async_generator()

# v1.0.0+ responses are objects
try:
self.llm_event.returns = response.model_dump()
self.llm_event.agent_id = check_call_stack_for_agent_id()
self.llm_event.prompt = kwargs["messages"]
self.llm_event.prompt_tokens = response.usage.prompt_tokens
self.llm_event.completion = response.choices[0].message.model_dump()
self.llm_event.completion_tokens = response.usage.completion_tokens
self.llm_event.model = response.model

self._safe_record(session, self.llm_event)
except Exception as e:
self._safe_record(
session, ErrorEvent(trigger_event=self.llm_event, exception=e)
)

kwargs_str = pprint.pformat(kwargs)
response = pprint.pformat(response)
logger.warning(
f"Unable to parse response for LLM call. Skipping upload to AgentOps\n"
f"response:\n {response}\n"
f"kwargs:\n {kwargs_str}\n"
)

return response

def override_openai_v1_completion(self):
from openai.resources.chat import completions

Expand Down Expand Up @@ -645,6 +770,39 @@ async def patched_function(*args, **kwargs):
# Override the original method with the patched one
AsyncClient.chat = patched_function

def override_groq_chat(self):
from groq.resources.chat import completions

original_create = completions.Completions.create

def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
result = original_create(*args, **kwargs)
return self._handle_response_groq(
result, kwargs, init_timestamp, session=session
)

# Override the original method with the patched one
completions.Completions.create = patched_function

def override_groq_chat_stream(self):
from groq.resources.chat import completions

original_create = completions.AsyncCompletions.create

def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
result = original_create(*args, **kwargs)
return self._handle_response_groq(result, kwargs, init_timestamp)

# Override the original method with the patched one
completions.AsyncCompletions.create = patched_function

def _override_method(self, api, method_path, module):
def handle_response(result, kwargs, init_timestamp):
if api == "openai":
Expand Down Expand Up @@ -746,6 +904,17 @@ def override_api(self):
f"Only Ollama>=0.0.1 supported. v{module_version} found."
)

if api == "groq":
module_version = version(api)

if Version(module_version) >= parse("0.9.0"):
self.override_groq_chat()
self.override_groq_chat_stream()
else:
logger.warning(
f"Only Groq>=0.9.0 supported. v{module_version} found."
)

def stop_instrumenting(self):
self.undo_override_openai_v1_async_completion()
self.undo_override_openai_v1_completion()
Expand Down
2 changes: 1 addition & 1 deletion agentops/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def record(self, event: Union[Event, ErrorEvent]):

event.trigger_event_id = event.trigger_event.id
event.trigger_event_type = event.trigger_event.event_type
self.record(event)
self._add_event(event.trigger_event.__dict__)
event.trigger_event = None # removes trigger_event from serialization

self._add_event(event.__dict__)
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Binary file added docs/images/external/autogen/dashboard.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/external/autogen/flow.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/external/autogen/session-replay.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 3 additions & 2 deletions docs/mint.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
"group": "Getting Started",
"pages": [
"v1/quickstart",
"v1/examples"
"v1/examples/examples"
]
},
{
Expand Down Expand Up @@ -93,7 +93,8 @@
"v1/integrations/autogen",
"v1/integrations/langchain",
"v1/integrations/cohere",
"v1/integrations/litellm"
"v1/integrations/litellm",
"v1/integrations/multion"
]
},
{
Expand Down
14 changes: 10 additions & 4 deletions docs/v1/examples.mdx → docs/v1/examples/examples.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ mode: "wide"
## Explore our example code to see AgentOps in action!

<CardGroup cols={2}>
<Card title="Simple Agent" icon="person-carry-box" iconType="solid" href="https://github.com/AgentOps-AI/agentops-py/blob/main/examples/openai-gpt.ipynb">
<Card title="Simple Agent" icon="microchip-ai" iconType="solid" href="https://github.com/AgentOps-AI/agentops-py/blob/main/examples/openai-gpt.ipynb">
Jupyter Notebook with sample code that you can run! Start here!
</Card>
<Card title="Crew.ai Example" icon="ship" href="/v1/integrations/crewai">
Expand All @@ -16,15 +16,21 @@ mode: "wide"
<Card title="AutoGen" icon="microsoft" href="/v1/integrations/autogen">
AutoGen multi-agent conversible workflow with tool usage
</Card>
<Card title="Multi-Agent Code Example" icon="people-arrows" iconType="duotone" href="https://github.com/AgentOps-AI/agentops-py/blob/main/examples/multi_agent_example.ipynb">
<Card title="Multi-Agent Code Example" icon="people-arrows" iconType="duotone" href="/v1/examples/multi_agent">
Jupyter Notebook with a simple multi-agent design
</Card>
<Card title="LangChain Example" icon="link" href="https://github.com/AgentOps-AI/agentops-py/blob/main/examples/langchain_examples.ipynb">
<Card title="LangChain Example" icon="crow" href="/v1/examples/langchain">
Jupyter Notebook with a sample LangChain integration
</Card>
<Card title="FastAPI Example" icon="computer" href="/v1/examples/fastapi">
<Card title="FastAPI Example" icon="bolt-lightning" href="/v1/examples/fastapi">
Create a REST server that performs and observes agent tasks
</Card>
<Card title="Multi Session Example" icon="computer" href="/v1/examples/multi_session">
Manage multiple sessions at the same time
</Card>
<Card title="MultiOn Example" icon="atom" href="/v1/examples/multion">
Create an autonomous browser agent capable of navigating the web and extracting information
</Card>
</CardGroup>

## Video Guides
Expand Down
3 changes: 0 additions & 3 deletions docs/v1/examples/fastapi.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@ mode: "wide"
poetry add agentops
```
</CodeGroup>
<Check>
[Give us a star](https://github.com/AgentOps-AI/agentops) on GitHub while you're at it (you may be our 2,000th 😊)
</Check>
</Step>
<Step title="Install Crew from the AgentOps fork">
<Warning>
Expand Down
10 changes: 10 additions & 0 deletions docs/v1/examples/langchain.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
title: 'Langchain Example'
description: 'Using the Langchain Callback Handler'
mode: "wide"
---
_View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/48ae12d4e4e085eed57346f1c40a054097431937/examples/langchain_examples.ipynb'} target={'_blank'}>Github</a>_

<iframe id="iframe" src={'notebooks/langchain_examples.html'}></iframe>

<script type="module" src="/scripts/full_iframe.js"></script>
10 changes: 10 additions & 0 deletions docs/v1/examples/multi_agent.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
title: 'Multi-Agent Example'
description: 'How to track events from multiple different agents'
mode: "wide"
---
_View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/main/examples/multi_agent_example.ipynb'} target={'_blank'}>Github</a>_

<iframe id="iframe" src={'notebooks/multi_agent_example.html'}></iframe>

<script type="module" src="/scripts/full_iframe.js"></script>
10 changes: 10 additions & 0 deletions docs/v1/examples/multi_session.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
title: 'Multi-Session Example'
description: 'Handling multiple sessions at the same time'
mode: "wide"
---
_View Notebook on <a href={'https://github.com/AgentOps-AI/agentops/blob/48ae12d4e4e085eed57346f1c40a054097431937/examples/multi_session_llm.ipynb'} target={'_blank'}>Github</a>_

<iframe id="iframe" src={'notebooks/multi_session_llm.html'}></iframe>

<script type="module" src="/scripts/full_iframe.js"></script>
12 changes: 12 additions & 0 deletions docs/v1/examples/multion.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
---
title: 'Multion Examples'
description: 'Tracking Multion usage with AgentOps'
mode: "wide"
---

<iframe id="iframe" src={'notebooks/multion/Autonomous_web_browsing.html'}></iframe>
<iframe id="iframe" src={'notebooks/multion/Sample_browsing_agent.html'}></iframe>
<iframe id="iframe" src={'notebooks/multion/Step_by_step_web_browsing.html'}></iframe>
<iframe id="iframe" src={'notebooks/multion/Webpage_data_retrieval.html'}></iframe>

<script type="module" src="/scripts/full_iframe.js"></script>
8 changes: 8 additions & 0 deletions docs/v1/examples/notebooks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Generate Notebooks in Docs

1. `brew install pandoc`
2. `python generate.py`

This generates HTML versions of the notebooks.

Make sure there is a corresponding
Loading

0 comments on commit 13630be

Please sign in to comment.