From d383c25451b5e3d8bde35b42382e92f9c9088440 Mon Sep 17 00:00:00 2001 From: Zack Date: Wed, 25 Oct 2023 12:24:50 -0500 Subject: [PATCH] chore: sync --- autogpts/Ares | 2 +- autogpts/Spartan | 2 +- monolithic.yaml | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 monolithic.yaml diff --git a/autogpts/Ares b/autogpts/Ares index 7e131b741dc4..3d63c69ba152 160000 --- a/autogpts/Ares +++ b/autogpts/Ares @@ -1 +1 @@ -Subproject commit 7e131b741dc49405224181bdcd2ed233493aae19 +Subproject commit 3d63c69ba1527ed4fd30222af151f770a2d44712 diff --git a/autogpts/Spartan b/autogpts/Spartan index 997088bb112a..bba7414f69d0 160000 --- a/autogpts/Spartan +++ b/autogpts/Spartan @@ -1 +1 @@ -Subproject commit 997088bb112a70e8cc16d0cd0310ac32588b027f +Subproject commit bba7414f69d0c710f9ec50554fc528980329ecdf diff --git a/monolithic.yaml b/monolithic.yaml new file mode 100644 index 000000000000..07f7679ea857 --- /dev/null +++ b/monolithic.yaml @@ -0,0 +1,61 @@ +# This defines the type of the template. Don't remove it. +template_type: monolithic + +# Message processing variables +# These variables are used to process Auto-GPT's input array of messages into a string +strip_messages_from_end: 0 # Only used when an RP prompt is sent to the LLM +send_as: "System" # The name to be used when speaking to the LLM +ai_name: "AI" # Chat attribution to the AI, is typically different ai_setting sname + +# This text preceeds the LLM prompt. It is non-standard but may be useful to you. +prescript: "" + +# Strings used to construct the generic monolithic prompt sent by Auto-GPT +strings: + lead_in: 'You are ' + general_guidance: + - Your decisions must always be made independently without seeking user assistance. + - Play to your strengths as an LLM and pursue simple strategies with no legal complications. + os_prompt: '\n\nThe OS you are running on is: ' + goal_label: ".\n\nGOALS:\n\n" + constraints_label: 'Constraints:\n' + constraints: + - "~4000 word limit for short term memory. Your short term memory is short, so immediately + save important information to files." + - If you are unsure how you previously did something or want to recall past events, + thinking about similar events will help you remember. + - No user assistance + - Exclusively use the commands listed in double quotes e.g. "command name" + commands_label: '\n\nCommands:\n' + resources_label: '\n\nResources:\n' + resources: + - Internet access for searches and information gathering. + - Long Term memory management. + - GPT-3.5 powered Agents for delegation of simple tasks. + performance_eval_label: '\n\nPerformance Evaluation:\n' + performance_eval: + - Continuously review and analyze your actions to ensure you are performing to the + best of your abilities. + - Constructively self-criticize your big-picture behavior constantly. + - Reflect on past decisions and strategies to refine your approach. + - Every command has a cost, so be smart and efficient. Aim to complete tasks in + the least number of steps. + - Write all code to a file. + response_format_label: '\n\nResponse Format:\n' + response_format_pre_prompt: "You should only respond in YAML format as described + below \nResponse Format: \n\n--START TEMPLATE--\n" + response_format_post_prompt: " \n--END TEMPLATE--\n\nEnsure the response can be parsed by Python.\n\nHistory:\n" + +# This string appears at the end of the LLM prompt. It is non-standard but may be useful to you. +postscript: "Please complete the template and reply.\n\n" + +# History tags +history_start: '--Begin History--' +history_end: '--End History--' +history_none: '--No History--' + +# This YAML corresponds to a simplified JSON format that is translated by the plugin into the +# format expected by Auto-GPT. +response_format: "plan_summary: \nreasoning: \nnext_steps:\n - \n - \n + \nconsiderations: \ntts_msg: \ncommand_name: \nargs:\n - name: + \n value: \n - name: \n value: "