-
I was trying to route the following but if the agent use it tools, I got an error. here is the graph code: def supervisor_agent(state):
supervior_chain = prompt | llm.with_structured_output(routeResponse)
return supervior_chain.invoke(state)
class routeResponse(BaseModel):
next: Literal["Researcher", "Coder", "FINISH"]
def agent_node(state, agent, name):
result = agent.invoke(state)
return {
"messages": [HumanMessage(content=result["messages"][-1].content, name=name)]
}
team_members = ["Researcher", "Coder", "FINISH"]
research_agent = create_react_agent(llm, tools=[tavily_tool])
research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
code_agent = create_react_agent(llm, tools=[python_repl_tool])
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
workflow = StateGraph(AgentState)
workflow.add_node("Researcher", research_node)
workflow.add_node("Coder", code_node)
workflow.add_node("supervisor", supervisor_agent)
workflow.add_node("Researcher_Tool", ToolNode([tavily_tool]))
workflow.add_node("Coder_Tool", ToolNode([python_repl_tool]))
conditional_map = {k: k for k in team_members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
workflow.add_conditional_edges("Researcher", tools_condition, ["Researcher_Tool", "supervisor"])
workflow.add_conditional_edges("Coder", tools_condition, ["Coder_Tool", "supervisor"])
workflow.add_edge(START, "supervisor")
workflow.add_edge("Researcher_Tool", "Researcher")
workflow.add_edge("Coder_Tool", "Coder")
graph = workflow.compile() Error:
|
Beta Was this translation helpful? Give feedback.
Answered by
cris-m
Nov 18, 2024
Replies: 1 comment 2 replies
-
I changed everything and fixed the issue import functools
import operator
from typing import Annotated, Literal, Sequence
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_experimental.tools import PythonREPLTool
from langgraph.graph import END, START, StateGraph
from langgraph.prebuilt import create_react_agent
from pydantic import BaseModel
from typing_extensions import TypedDict
from langchain_core.messages import AIMessage
from langgraph.prebuilt import ToolNode
# initial tools
tavily_tool = TavilySearchResults(max_results=5)
python_repl_tool = PythonREPLTool()
def agent_node(state, agent, name):
result = agent.invoke(state)
# Return the complete messages array including the AIMessage
return {
"messages": state["messages"] + result["messages"]
}
members = ["Researcher", "Coder"]
system_prompt = (
"""You are a supervisor tasked with managing a conversation between the following workers: {members}.
Given the following user request, respond with the worker to act next. Each worker will perform a task and respond with their results and status.
If you see 'FINAL ANSWER' in any response or the task is complete, respond with FINISH.
Otherwise, choose the most appropriate worker to continue the task."""
)
options = ["FINISH"] + members
class routeResponse(BaseModel):
next: Literal["Researcher", "Coder", "FINISH"]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
(
"user",
"Given the conversation above, who should act next?"
" Or should we FINISH? Select one of: {options}",
),
]
).partial(options=str(options), members=", ".join(members))
llm = ChatAnthropic(model="claude-3-5-sonnet-20240620", temperature=0, max_tokens=1000)
def supervisor_agent(state):
supervisor_chain = prompt | llm.with_structured_output(routeResponse)
result = supervisor_chain.invoke(state)
return {
"messages": state["messages"],
"next": result.next
}
def tools_condition(state):
messages = state["messages"]
last_message = messages[-1]
if isinstance(last_message, AIMessage) and last_message.additional_kwargs.get("tool_calls"):
return "tool"
return "supervisor"
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
research_agent = create_react_agent(llm, tools=[tavily_tool])
research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
code_agent = create_react_agent(llm, tools=[python_repl_tool])
code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
workflow = StateGraph(AgentState)
workflow.add_node("Researcher", research_node)
workflow.add_node("Coder", code_node)
workflow.add_node("supervisor", supervisor_agent)
workflow.add_node("Researcher_Tool", ToolNode([tavily_tool]))
workflow.add_node("Coder_Tool", ToolNode([python_repl_tool]))
# from langgraph.prebuilt import tools_condition
conditional_map = {k: k for k in members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
workflow.add_conditional_edges(
"Researcher",
tools_condition,
{
"tool": "Researcher_Tool",
"supervisor": "supervisor"
}
)
workflow.add_conditional_edges(
"Coder",
tools_condition,
{
"tool": "Coder_Tool",
"supervisor": "supervisor"
}
)
workflow.add_edge(START, "supervisor")
workflow.add_edge("Researcher_Tool", "Researcher")
workflow.add_edge("Coder_Tool", "Coder")
graph = workflow.compile() |
Beta Was this translation helpful? Give feedback.
2 replies
Answer selected by
cris-m
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I changed everything and fixed the issue