Skip to content

Commit

Permalink
Merge pull request wise-agents#406 from fjuma/messages
Browse files Browse the repository at this point in the history
Add debug logging to be able to see the retrieved content that will be used for RAG or Graph RAG
  • Loading branch information
maeste authored Oct 10, 2024
2 parents 00cda74 + 508cfdf commit c6ddb34
Showing 1 changed file with 15 additions and 4 deletions.
19 changes: 15 additions & 4 deletions src/wiseagents/agents/rag_wise_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def process_request(self, request: WiseAgentMessage, conversation_history: List[
retrieved_documents = retrieve_documents_for_rag(request.message, self.vector_db, self.collection_name, self.k)
llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm,
self.include_sources, conversation_history,
self.metadata.system_message)
self.metadata.system_message, self.name)
return llm_response_with_sources

def process_response(self, response: WiseAgentMessage):
Expand Down Expand Up @@ -201,7 +201,7 @@ def process_request(self, request: WiseAgentMessage, conversation_history: List[
retrieved_documents = retrieve_documents_for_graph_rag(request.message, self.graph_db, self.k,
self.retrieval_query, self.params, self.metadata_filter)
llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm, self.include_sources,
conversation_history, self.metadata.system_message)
conversation_history, self.metadata.system_message, self.name)
return llm_response_with_sources

def process_response(self, response: WiseAgentMessage):
Expand Down Expand Up @@ -372,7 +372,7 @@ def create_and_process_chain_of_verification_prompts(self, message: str,
for question in verification_questions:
retrieved_documents = self.retrieve_documents(question)
llm_response = create_and_process_rag_prompt(retrieved_documents, question, self.llm, False,
[], self.metadata.system_message)
[], self.metadata.system_message, self.name)
verification_responses = (verification_responses + "Verification Question: " + question + "\n"
+ "Verification Result: " + llm_response + "\n")

Expand Down Expand Up @@ -600,7 +600,7 @@ def retrieve_documents(self, question: str) -> List[Document]:

def create_and_process_rag_prompt(retrieved_documents: List[Document], question: str, llm: WiseAgentLLM,
include_sources: bool, conversation_history: List[ChatCompletionMessageParam],
system_message: str) -> str:
system_message: str, agent_name: str) -> str:
"""
Create a RAG prompt and process it with the LLM agent.
Expand All @@ -612,7 +612,9 @@ def create_and_process_rag_prompt(retrieved_documents: List[Document], question:
can be used while processing the request. If this agent isn't involved in a type of
collaboration that makes use of the conversation history, this will be an empty list.
system_message (str): the optional system message to use
agent_name (str): the agent name
"""
log_retrieved_content(retrieved_documents, agent_name)
context = "\n".join([document.content for document in retrieved_documents])
prompt = (f"Answer the question based only on the following context:\n{context}\n"
f"Question: {question}\n")
Expand All @@ -630,6 +632,15 @@ def create_and_process_rag_prompt(retrieved_documents: List[Document], question:
return llm_response.choices[0].message.content


def log_retrieved_content(retrieved_documents: List[Document], agent_name: str):
for document in retrieved_documents:
logging.getLogger(agent_name).debug(f"Retrieved Document:\n"
f"{document.content}\n"
f"Metadata:\n"
f" Source: {document.metadata.get('source', '')}\n"
f" Matched: {document.metadata.get('matched', '')}")


def retrieve_documents_for_rag(question: str, vector_db: WiseAgentVectorDB, collection_name: str, k: int) \
-> List[Document]:
"""
Expand Down

0 comments on commit c6ddb34

Please sign in to comment.