Skip to content

Commit

Permalink
Improving the router to use the context - 2
Browse files Browse the repository at this point in the history
  • Loading branch information
Hugo Saporetti Junior committed Apr 6, 2024
1 parent ba2972f commit c280783
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 21 deletions.
2 changes: 1 addition & 1 deletion src/main/askai/core/askai.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def _ask_and_reply(self, question: str) -> bool:
log.debug('Response not found for "%s" in cache. Querying from %s.', question, self.engine.nickname())
AskAiEvents.ASKAI_BUS.events.reply.emit(message=msg.wait())
if output := router.process(question):
self.reply(output.response)
self.reply(output)
else:
log.debug("Reply found for '%s' in cache.", question)
self.reply(reply)
Expand Down
4 changes: 2 additions & 2 deletions src/main/askai/core/proxy/router.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def template(self) -> str:
return prompt.read_prompt("router-prompt.txt")

@staticmethod
def _assert_accuracy(question: str, ai_response: str) -> None:
def _assert_accuracy(question: str, ai_response: str) -> Optional[str]:
"""Function responsible for asserting that the question was properly answered."""
if ai_response:
template = PromptTemplate(input_variables=[
Expand All @@ -54,7 +54,7 @@ def _assert_accuracy(question: str, ai_response: str) -> None:
AskAiEvents.ASKAI_BUS.events.reply.emit(message=msg.assert_acc(output), verbosity='debug')
if RagResponse.of_value(status.strip()).is_bad:
raise InaccurateResponse(f"The RAG response was not 'Green' => '{output}' ")
return
return ai_response

raise InaccurateResponse(f"The RAG response was not 'Green'")

Expand Down
8 changes: 4 additions & 4 deletions src/main/askai/core/proxy/tools/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ def check_output(question: str, context: str) -> Optional[str]:
final_prompt = template.format(context=context, question=question)

if output := llm.predict(final_prompt):
shared.context.set("ANALYSIS", f"\nUser: {question}")
shared.context.push("ANALYSIS", f"\nAI: {output}", "assistant")
shared.context.set("ANALYSIS", f'\nUser: "{question}"')
shared.context.push("ANALYSIS", f'\nAI: "{output}', 'assistant')

return text_formatter.ensure_ln(output)

Expand All @@ -42,7 +42,7 @@ def stt(question: str, existing_answer: str) -> str:
llm = lc_llm.create_chat_model(temperature=Temperature.CREATIVE_WRITING.temp)

if output := llm.predict(final_prompt):
shared.context.set("ANALYSIS", f"\nUser: {question}")
shared.context.push("ANALYSIS", f"\nAI: {output}", "assistant")
shared.context.set("ANALYSIS", f'\nUser: "{question}"')
shared.context.push("ANALYSIS", f'\nAI: "{output}', 'assistant')

return text_formatter.ensure_ln(output)
14 changes: 7 additions & 7 deletions src/main/askai/core/proxy/tools/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,26 +19,26 @@
from askai.core.support.utilities import display_text


def fetch(query: str) -> Optional[str]:
def fetch(question: str) -> Optional[str]:
"""Fetch the information from the AI Database."""
template = PromptTemplate(input_variables=[
'user', 'idiom', 'question'
], template=prompt.read_prompt('generic-prompt'))
final_prompt = template.format(
user=shared.username, question=query,
user=shared.username, question=question,
idiom=shared.idiom, datetime=geo_location.datetime
)
ctx: str = shared.context.flat("GENERAL", "INTERNET")
log.info("FETCH::[QUESTION] '%s' context: '%s'", query, ctx)
log.info("FETCH::[QUESTION] '%s' context: '%s'", question, ctx)
chat_prompt = ChatPromptTemplate.from_messages([("system", "{query}\n\n{context}")])
chain = create_stuff_documents_chain(lc_llm.create_chat_model(), chat_prompt)
context = [Document(ctx)]
output = chain.invoke({"query": final_prompt, "context": context})

if output and shared.UNCERTAIN_ID not in output:
shared.context.push("GENERAL", f"\n\nUser:\n{query}")
shared.context.push("GENERAL", f"\nAI:\n{output}", "assistant")
cache.save_reply(query, output)
shared.context.set("GENERAL", f'\nUser: "{question}"')
shared.context.push("GENERAL", f'\nAI: "{output}', 'assistant')
cache.save_reply(question, output)
else:
output = msg.translate("Sorry, I don't know.")

Expand All @@ -50,7 +50,7 @@ def display(*texts: str) -> Optional[str]:
messages: str = os.linesep.join(texts)
if configs.is_interactive:
if not re.match(r'^%[a-zA-Z0-9_-]+%$', messages):
shared.context.push("GENERAL", f"\nAI:{messages}\n", "assistant")
shared.context.push("GENERAL", f'\nAI: "{messages}', 'assistant')
AskAiEvents.ASKAI_BUS.events.reply.emit(message=messages)
else:
display_text(messages, f"{shared.nickname}: ")
Expand Down
12 changes: 5 additions & 7 deletions src/main/askai/resources/assets/prompts/router-prompt.txt
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
As the interface with your computer, you have the following features:

{features}

Use the following context to answer the question at the end:

'''{context}'''

Your task is to review it and provide a structured list of actions employing one or more of the specified features. If the prompt calls for multiple features, delineate the necessary steps in the order required to meet the request. For each feature, you must include the corresponding command associated with that feature.

If you encounter any challenges understanding the query due to ambiguity, context dependency, or lack of clarity, please refer to the command output for clarification. Pay attention to file or folder names, mentions, and contents like 'file contents', 'folder listing', 'dates', and 'spoken names' to disambiguate effectively.
As the interface with your computer, you have the following features:

{features}

Placeholders can be use to indicate that the cross-references of previous commands. Placeholders must match the regex: '^%[a-zA-Z0-9_-]+%$'

Your task is to review it and provide a structured list of actions employing one or more of the specified features. If the prompt calls for multiple features, delineate the necessary steps in the order required to meet the request. For each feature, you must include the corresponding command associated with that feature.

The final output should consist of a straightforward list of commands adhering to the provided syntax with no additional description or context.

Expand Down

0 comments on commit c280783

Please sign in to comment.