Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code reviewer #213

Merged
merged 12 commits into from
Jul 26, 2024
2 changes: 1 addition & 1 deletion agents/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
sirji-messages==0.0.30
sirji-messages==0.0.32
sirji-tools==0.0.16
openai==1.35.7
anthropic==0.29.0
Expand Down
2 changes: 1 addition & 1 deletion agents/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

setup(
name='sirji-agents',
version='0.0.45',
version='0.0.47',
author='Sirji',
description='Orchestrator, Generic Agent, and Research Agent components of the Sirji AI agentic framework.',
license='MIT',
Expand Down
2 changes: 1 addition & 1 deletion agents/sirji_agents/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .researcher import ResearchAgent, CleanupFactory
from .llm.orchestrator import Orchestrator
from .llm.generic import GenericAgent
from .llm.generic.infer import GenericAgentInfer as GenericAgent

__all__ = [
'ResearchAgent',
Expand Down
Empty file.
86 changes: 86 additions & 0 deletions agents/sirji_agents/llm/generic/infer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
from sirji_tools.logger import create_logger
from sirji_messages import message_parse, MessageParsingError, MessageValidationError, ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum
from ..model_providers.factory import LLMProviderFactory
from .system_prompts.factory import SystemPromptsFactory
from ...decorators import retry_on_exception

class GenericAgentInfer():
def __init__(self, config, agent_output_folder_index, file_summaries=None):
# Initialize the logger as an instance variable
self.logger = create_logger(f"{config['id']}.log", 'debug')

self.logger.info(config)
self.logger.info(agent_output_folder_index)

self.config = config
self.agent_output_folder_index = agent_output_folder_index
self.file_summaries = file_summaries

def message(self, input_message, history=[]):
conversation = self.__prepare_conversation(input_message, history)

self.logger.info(f"Incoming: \n{input_message}")
self.logger.info("Calling OpenAI Chat Completions API\n")

response_message, prompt_tokens, completion_tokens = self.__get_response(conversation)

return response_message, conversation, prompt_tokens, completion_tokens

def __prepare_conversation(self, input_message, history):
conversation = []

if not history:
conversation.append(
{"role": "system", "content": SystemPromptsFactory.get_system_prompt(self.config, self.agent_output_folder_index)})
else:
if history[0]['role'] == "system":
history[0]['content'] = SystemPromptsFactory.get_system_prompt(self.config, self.agent_output_folder_index)
conversation = history

parsed_input_message = message_parse(input_message)
conversation.append({"role": "user", "content": input_message, "parsed_content": parsed_input_message})

return conversation

def __get_response(self, conversation):
retry_llm_count = 0
response_message = ''
prompt_tokens = 0
completion_tokens = 0

while(True):
response_message, current_prompt_tokens, current_completion_tokens = self.__call_llm(conversation)

prompt_tokens += current_prompt_tokens
completion_tokens += current_completion_tokens
try:
# Attempt parsing
parsed_response_message = message_parse(response_message)
conversation.append({"role": "assistant", "content": response_message, "parsed_content": parsed_response_message})
break
except (MessageParsingError, MessageValidationError) as e:
# Handling both MessageParsingError and MessageValidationError similarly
self.logger.info("Error while parsing the message.\n")
retry_llm_count += 1
if retry_llm_count > 2:
raise e
self.logger.info(f"Requesting LLM to resend the message in correct format.\n")
conversation.append({"role": "assistant", "content": response_message, "parsed_content": {}})
# Todo: @vaibhav - Change the error message language later.
conversation.append({"role": "user", "content": "Error! Your last response has two action in it and both has been discarded because of the below error:\nError in processing your last response. Your response must conform strictly to one of the allowed Response Templates, as it will be processed programmatically and only these templates are recognized. Your response must be enclosed within '***' at the beginning and end, without any additional text above or below these markers. Not conforming above rules will lead to response processing errors."})
except Exception as e:
self.logger.info(f"Generic error while parsing message. Error: {e}\n")
raise e

return response_message, prompt_tokens, completion_tokens

@retry_on_exception()
def __call_llm(self, conversation):
history = []

for message in conversation:
history.append({"role": message['role'], "content": message['content']})

model_provider = LLMProviderFactory.get_instance()

return model_provider.get_response(history, self.logger)
Empty file.
110 changes: 110 additions & 0 deletions agents/sirji_agents/llm/generic/system_prompts/anthropic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
import json
import os
import textwrap

from sirji_messages import ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum

class AnthropicSystemPrompt:
def __init__(self, config, agent_output_folder_index):
self.config = config
self.agent_output_folder_index = agent_output_folder_index
pass

def system_prompt(self):

initial_intro = textwrap.dedent(f"""
You are an agent named "{self.config['name']}", a component of the Sirji AI agentic framework. Sirji is a framework that enables developers to create and run custom AI agents for their everyday development tasks. A Custom Agent is a modular AI component that performs specific tasks based on predefined pseudocode.
Your Agent ID: {self.config['id']}
Your OS (referred as SIRJI_OS later): {os.name}

You are an expert having skill: {self.config['skills'][0]['skill']}""")

instructions = textwrap.dedent(f"""
You must follow these instructions:
1. Convert all points in your pseudo code into plain English steps with a maximum of 10 words each. Log these steps using the LOG_STEPS action.
2. After logging the steps, follow your pseudo code step by step to the best of your ability. Following each pseudo code step in the specified order is mandatory. Dont miss to follow any of these steps.
3. If any step is not applicable or cannot be followed, use the DO_NOTHING action to skip it.""")

pseudo_code = "\nYour pseudo code which you must follow:\n" + self.config['skills'][0]['pseudo_code']

response_specifications = textwrap.dedent(f"""
Your response must adhere rigorously to the following rules, without exception, to avoid critical system failures:
- Conform precisely to one of the Allowed Response Templates, as the system processes only these templates correctly.
- Enclose the entire response within '***' markers at both the beginning and the end, without any additional text outside these markers.
- Respond with only one action at a time.""")

understanding_the_folders = textwrap.dedent("""
Terminologies:
1. Project Folder:
- The Project Folder is your primary directory for accessing all user-specific project files, including code files, documentation, and other relevant resources.
- When initializing Sirji, the SIRJI_USER selects this folder as the primary workspace for the project. You should refer to this folder exclusively for accessing and modifying project-specific files.

2. Agent Output Folder:
- The Agent Output Folder is designated for storing the results and data outputs generated by the agents (like you) of Sirji.
- Ensure you do not confuse this folder with the Project Folder; remember, no project source files are stored here.
- This folder is different from the project folder and this ensures that operational data is kept separate from project files.

3. Agent Output Index:
- The Agent Output Index is an index file for the Agent Output Folder that keeps track of all files written by agents in that folder along with the a brief description of the file contents.
- The Agent Output Index will look as follows:
{{
'agent_id/file_name': {{
'description': 'description of the file contents'
'created_by': 'agent_id'
}}
}}""")

allowed_response_templates_str = textwrap.dedent("""
Allowed Response Templates:
Below are all the possible allowed "Response Template" formats for each of the allowed recipients. You must always respond using one of them.""")

if "sub_agents" in self.config and self.config["sub_agents"]:
for sub_agent in self.config["sub_agents"]:

allowed_response_templates_str += textwrap.dedent(f"""
Allowed Response Templates to {sub_agent['id']}:
For invoking the {sub_agent['id']}, in a fresh session, use the following response template. Please respond with the following, including the starting and ending '***', with no commentary above or below.

Response template:
***
FROM: {{Your Agent ID}}
TO: {sub_agent['id']}
ACTION: INVOKE_AGENT
STEP: "provide the step number here for the ongoing step if any."
SUMMARY: {{Display a concise summary to the user, describing the action using the present continuous tense.}}
BODY:
{{Purpose of invocation.}}
***""") + '\n'

allowed_response_templates_str += textwrap.dedent(f"""
For invoking the {sub_agent['id']}, continuing over the existing session session, use the following response template. Please respond with the following, including the starting and ending '***', with no commentary above or below.

Response template:
***
FROM: {{Your Agent ID}}
TO: {sub_agent['id']}
ACTION: INVOKE_AGENT_EXISTING_SESSION
STEP: "provide the step number here for the ongoing step if any."
SUMMARY: {{Display a concise summary to the user, describing the action using the present continuous tense.}}
BODY:
{{Purpose of invocation.}}
***""") + '\n'

allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.SIRJI_USER, permissions_dict[(AgentEnum.ANY, AgentEnum.SIRJI_USER)]) + '\n'

action_list = permissions_dict[(AgentEnum.ANY, AgentEnum.EXECUTOR)]
accessible_actions = self.config.get("accessible_actions", [])
if accessible_actions:
for action in accessible_actions:
action_list.add(ActionEnum[action])
allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.EXECUTOR, action_list) + '\n'

allowed_response_templates_str += "For updating in project folder use either FIND_AND_REPLACE, INSERT_ABOVE or INSERT_BELOW actions. Ensure you provide the exact matching string in find from file, with the exact number of lines and proper indentation for insert and replace actions.\n"
allowed_response_templates_str += '\n' + allowed_response_templates(AgentEnum.ANY, AgentEnum.CALLER, permissions_dict[(AgentEnum.ANY, AgentEnum.CALLER)]) + '\n'

current_agent_output_index = f"Current contents of Agent Output Index:\n{json.dumps(self.agent_output_folder_index, indent=4)}"

current_project_folder_structure = f"Recursive structure of the project folder:\n{os.environ.get('SIRJI_PROJECT_STRUCTURE')}"

return f"{initial_intro}\n{instructions}\n{pseudo_code}\n{response_specifications}\n{understanding_the_folders}\n{allowed_response_templates_str}\n\n{current_agent_output_index}\n\n{current_project_folder_structure}".strip()

Original file line number Diff line number Diff line change
@@ -1,96 +1,18 @@
import textwrap
import os
import json
import os
import textwrap

# TODO - log file should be dynamically created based on agent ID
from sirji_tools.logger import create_logger
from sirji_messages import message_parse, MessageParsingError, MessageValidationError, ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum
from .model_providers.factory import LLMProviderFactory
from ..decorators import retry_on_exception
from sirji_messages import ActionEnum, AgentEnum, allowed_response_templates, permissions_dict, ActionEnum

class GenericAgent():
def __init__(self, config, agent_output_folder_index, file_summaries=None):
# Initialize the logger as an instance variable
self.logger = create_logger(f"{config['id']}.log", 'debug')

self.logger.info(config)
self.logger.info(agent_output_folder_index)

class DefaultSystemPrompt:
def __init__(self, config, agent_output_folder_index):
self.config = config
self.agent_output_folder_index = agent_output_folder_index
self.file_summaries = file_summaries

def message(self, input_message, history=[]):
conversation = self.__prepare_conversation(input_message, history)

self.logger.info(f"Incoming: \n{input_message}")
self.logger.info("Calling OpenAI Chat Completions API\n")

response_message, prompt_tokens, completion_tokens = self.__get_response(conversation)

return response_message, conversation, prompt_tokens, completion_tokens

def __prepare_conversation(self, input_message, history):
conversation = []

if not history:
conversation.append(
{"role": "system", "content": self.system_prompt()})
else:
if history[0]['role'] == "system":
history[0]['content'] = self.system_prompt()
conversation = history

parsed_input_message = message_parse(input_message)
conversation.append({"role": "user", "content": input_message, "parsed_content": parsed_input_message})

return conversation

def __get_response(self, conversation):
retry_llm_count = 0
response_message = ''
prompt_tokens = 0
completion_tokens = 0

while(True):
response_message, current_prompt_tokens, current_completion_tokens = self.__call_llm(conversation)

prompt_tokens += current_prompt_tokens
completion_tokens += current_completion_tokens
try:
# Attempt parsing
parsed_response_message = message_parse(response_message)
conversation.append({"role": "assistant", "content": response_message, "parsed_content": parsed_response_message})
break
except (MessageParsingError, MessageValidationError) as e:
# Handling both MessageParsingError and MessageValidationError similarly
self.logger.info("Error while parsing the message.\n")
retry_llm_count += 1
if retry_llm_count > 2:
raise e
self.logger.info(f"Requesting LLM to resend the message in correct format.\n")
conversation.append({"role": "assistant", "content": response_message, "parsed_content": {}})
conversation.append({"role": "user", "content": "Error in processing your last response. Your response must conform strictly to one of the allowed Response Templates, as it will be processed programmatically and only these templates are recognized. Your response must be enclosed within '***' at the beginning and end, without any additional text above or below these markers. Not conforming above rules will lead to response processing errors."})
except Exception as e:
self.logger.info(f"Generic error while parsing message. Error: {e}\n")
raise e

return response_message, prompt_tokens, completion_tokens

@retry_on_exception()
def __call_llm(self, conversation):
history = []

for message in conversation:
history.append({"role": message['role'], "content": message['content']})

model_provider = LLMProviderFactory.get_instance()

return model_provider.get_response(history, self.logger)
pass

def system_prompt(self):
initial_intro = textwrap.dedent(f"""
You are an agent named "{self.config['name']}", a component of the Sirji AI agentic framework.
You are an agent named "{self.config['name']}", a component of the Sirji AI agentic framework. Sirji is a framework that enables developers to create and run custom AI agents for their everyday development tasks. A Custom Agent is a modular AI component that performs specific tasks based on predefined pseudocode.
Your Agent ID: {self.config['id']}
Your OS (referred as SIRJI_OS later): {os.name}""")

Expand Down Expand Up @@ -126,7 +48,8 @@ def system_prompt(self):
- Upon being invoked, identify which of your skills match the requirements of the task.
- Execute the sub-tasks associated with each of these matching skills.
- Do not respond with two actions in the same response. Respond with one action at a time.
- Always use STORE_IN_AGENT_OUTPUT and READ_AGENT_OUTPUT_FILES to write and read files to and from the agent output folder.
- Always use STORE_IN_AGENT_OUTPUT and READ_AGENT_OUTPUT_FILES to write and read files to and from the agent output folder.
- If any step is not applicable or cannot be followed, use the DO_NOTHING action to skip it.
""")

formatted_skills = self.__format_skills()
Expand Down Expand Up @@ -182,13 +105,8 @@ def system_prompt(self):
current_agent_output_index = f"Current contents of Agent Output Index:\n{json.dumps(self.agent_output_folder_index, indent=4)}"

current_project_folder_structure = f"Recursive structure of the project folder:\n{os.environ.get('SIRJI_PROJECT_STRUCTURE')}"
file_summaries = ""
if self.file_summaries:
file_summaries = 'Here are the concise summaries of the responsibilities and functionalities for each file currently present in the project folder:\n'
file_summaries += f"File Summaries:\n{self.file_summaries}"


return f"{initial_intro}\n{response_specifications}{understanding_the_folders}\n{instructions}\n{formatted_skills}\n{allowed_response_templates_str}\n\n{current_agent_output_index}\n\n{current_project_folder_structure}\n\n{file_summaries}".strip()

return f"{initial_intro}\n{response_specifications}{understanding_the_folders}\n{instructions}\n{formatted_skills}\n{allowed_response_templates_str}\n\n{current_agent_output_index}\n\n{current_project_folder_structure}".strip()

def __format_skills(self):
output_text = ""
Expand Down
Loading
Loading