-
Notifications
You must be signed in to change notification settings - Fork 2
/
pi_ai_os.py
356 lines (293 loc) · 11 KB
/
pi_ai_os.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
#!/usr/bin/env python3
# This is https://github.com/VictorTaelin/AI-scripts/blob/main/chatsh.mjs but in Python.
import argparse
import os
import re
import subprocess
import sys
from omegaconf import OmegaConf
from PIL import Image
from tiny_ai_client import AI
def get_shell_info():
try:
uname = subprocess.check_output(["uname", "-a"]).decode().strip()
shell_version = (
subprocess.check_output([os.environ.get("SHELL", "/bin/sh"), "--version"])
.decode()
.strip()
)
return f"{uname}\n{shell_version}"
except subprocess.CalledProcessError:
return "Unable to determine shell information"
# System prompt to set the assistant's behavior
SYSTEM_PROMPT = """You are AIOS, an AI language model that specializes in assisting users with tasks on their system using shell commands. AIOS operates in two modes: COMMAND MODE and CHAT MODE.
# GUIDE for COMMAND NODE:
1. The USER asks you to perform a SYSTEM TASK.
2. AIOS answers with a SHELL SCRIPT to perform the task.
# GUIDE for CHAT MODE:
1. The USER asks an ARBITRARY QUESTION or OPEN-ENDED MESSAGE.
2. AIOS answers it with a concise, factual response.
## NOTES:
- In COMMAND MODE, AIOS MUST answer with A SINGLE SH BLOCK.
- In COMMAND MODE, AIOS MUST NOT answer with ENGLISH EXPLANATION.
- In TEXT MODE, AIOS MUST ALWAYS answer with TEXT.
- In TEXT MODE, AIOS MUST NEVER answer with SH BLOCK.
- AIOS MUST be CONCISE, OBJECTIVE, CORRECT and USEFUL.
- AIOS MUST NEVER attempt to install new tools. Assume they're available.
- AIOS's interpreter can only process one SH per answer.
- On Python:
- Use 'poetry run python file.py' to run.
On sh:
- If asked to change something in a file, re-write the whole file.
- You must always do that to ensure that there are no formatting issues and that you can run the file out of the box.
- When a task is completed, STOP using commands. Just answer with "Done.".
- The system shell in use is: {shell_info}.
"""
DEFAULT_CONFIG_FILE = "pi_ai_os.yaml"
ALLOWED_EXTENSIONS = [".txt", ".md", ".py", ".sh", ".json", ".go", ".rs", ".js"]
IGNORED_DIRS = [
".git",
".idea",
"__pycache__",
"__pycache__",
"node_modules",
"venv",
"assets",
]
ASSISTANT_MODEL_NAME = (
"claude-3-haiku-20240307" # hardcoded because it is fast and cheap af
)
ASSISTANT_MODEL_SYSTEM_PROMPT = """
# INPUT
You will be given a user prompt and a list of possibly relevant files on the current directory.
# OUTPUT
You must answer with a list of files that are, or that you predict WILL BE
used, directly or not, to complete the user prompt. Answer in a <FILES/> tag.
Optionally, you can also include a SHORT, 1-paragraph <JUSTIFICATION/>.
# EXAMPLE INPUT
<USER_PROMPT>
Create a script that receives two user inputs and sums them.
</USER_PROMPT>
<CONTEXT>
File: src/sum.py
Content:
```
def sum(a: int, b: int) -> int:
return a + b
```
File: src/divide.py
Content:
```
def divide(a: int, b: int) -> int:
return a / b
```
</CONTEXT>
</EXAMPLE_INPUT>
# EXAMPLE OUTPUT
<JUSTIFICATION>
The user will use the sum function to process the user inputs as per requested on the prompt.
</JUSTIFICATION>
<FILES>
src/sum.py
</FILES>
the FILES tag must contain the name of the relevant files, one per line, without anything else.
You can only return files that appeared in the context tags, even if they mention other files or directories that were not written there.
If a file does not exist (and the user asks for something that will create it), do not include it in the list of dependencies.
If there are no relevant files, just return the tags without any content.
"""
def create_context_from_chdir(target_dirs: list[str] | None = None):
context = ""
if not target_dirs:
target_dirs = ["."]
for dir_ in target_dirs:
for root, _, files in os.walk(dir_):
if any(root.endswith(dir) for dir in IGNORED_DIRS):
continue
for file in files:
if not any(file.endswith(ext) for ext in ALLOWED_EXTENSIONS):
continue
full_fname = os.path.join(root, file)
with open(full_fname, "r") as f:
file_content = f.read()
context += f"File: {full_fname}\nContent:\n```{file_content}```\n\n"
return context
def extract_dependencies(raw_text):
pattern = r"<FILES>(.*?)</FILES>"
match = re.search(pattern, raw_text, re.DOTALL)
if match:
content = match.group(1)
return [line.strip() for line in content.split("\n") if line.strip()]
else:
return []
def select_relevant_files_on_chdir(
user_prompt: str,
target_dirs: list[str] | None = None,
assistant_model: str = ASSISTANT_MODEL_NAME,
):
assistant = AI(
assistant_model,
system=ASSISTANT_MODEL_SYSTEM_PROMPT,
max_new_tokens=1024,
temperature=0,
)
context = create_context_from_chdir(target_dirs)
prompt = f"<USER_PROMPT>{user_prompt}</USER_PROMPT>\n<CONTEXT>{context}</CONTEXT>"
if os.environ.get("VERBOSE"):
print(prompt)
response = assistant(prompt)
if os.environ.get("VERBOSE"):
print(response)
dependencies = extract_dependencies(response)
print(f"Dependencies: {dependencies}")
dependencies_content = ""
for dependency in dependencies:
if not any(dependency.endswith(ext) for ext in ALLOWED_EXTENSIONS):
continue
if os.path.exists(dependency):
dependencies_content += f"File: {dependency}\nContent:\n```{subprocess.check_output(['cat', '-n', dependency], stderr=subprocess.STDOUT).decode()}```\n\n"
else:
dependencies_content += (
f"File: {dependency} does not exist.\nContent:null\n"
)
dependencies_context = f"<DEPENDENCIES>{dependencies_content}</DEPENDENCIES>"
return dependencies_context
def extract_code(text: str) -> str:
match = re.search(r"```sh([\s\S]*?)```", text)
return match.group(1).strip() if match else None
def pi_ai_os(
model: str,
initial_message: str,
config_file: str,
assistant: bool,
assistant_model: str,
):
print(f"Welcome to AIOS. Model: {model}\n")
if assistant:
print(f"Assistant model: {assistant_model}")
config_file = config_file or DEFAULT_CONFIG_FILE
if not os.path.exists(config_file):
config_file = None
target_dirs = None
if config_file:
config = OmegaConf.load(config_file)
allowed_extensions = (
config.allowed_extensions
if "allowed_extensions" in config
else ALLOWED_EXTENSIONS
)
ignored_dirs = config.ignored_dirs if "ignored_dirs" in config else IGNORED_DIRS
ALLOWED_EXTENSIONS.extend(allowed_extensions)
IGNORED_DIRS.extend(ignored_dirs)
target_dirs = config.target_dirs if "target_dirs" in config else None
system_prompt = SYSTEM_PROMPT.format(shell_info=get_shell_info())
if os.environ.get("VERBOSE"):
print(f"System prompt: {system_prompt}")
# Initialize AI
ai = AI(
model_name=model,
system=system_prompt,
max_new_tokens=4096,
)
last_output = ""
while True:
if initial_message:
user_message = initial_message
initial_message = None
else:
user_message = input("\033[1mπ \033[0m")
if user_message.strip() == "/clear" or user_message.strip() == "/c":
ai.chat = [ai.chat[0]]
print("Cleared context.")
continue
images = []
image_matches = re.findall(r"/image\s+(\S+)", user_message)
for image_path in image_matches:
image_data = Image.open(image_path)
if image_data:
images.append(image_data)
user_message = user_message.replace(
f"/image {image_path}", f"[Image: {image_path}]"
)
if assistant:
dependencies_context = select_relevant_files_on_chdir(
user_message, target_dirs, assistant_model
)
user_message = f"{user_message}\n\nYou are also provided following context: {dependencies_context}"
if os.environ.get("VERBOSE"):
print(f"User message: {user_message}")
try:
full_message = (
f"<SYSTEM>\n{last_output.strip()}\n</SYSTEM>\n<USER>\n{user_message}\n</USER>\n"
if user_message.strip()
else f"<SYSTEM>\n{last_output.strip()}\n</SYSTEM>"
)
for text in ai.stream(full_message, images=images):
print(text, end="", flush=True)
print()
assistant_message = ai.chat[-1].text
code = extract_code(assistant_message)
last_output = ""
if code:
print("\033[31mPress enter to execute, or 'N' to cancel.\033[0m")
answer = input()
# TODO: delete the warning above from the terminal
sys.stdout.write("\033[F\033[K")
sys.stdout.write("\033[F\033[K")
if answer.lower() == "n":
print("Execution skipped.")
last_output = "Command skipped.\n"
else:
try:
output = subprocess.check_output(
code, shell=True, stderr=subprocess.STDOUT
).decode()
print("\033[2m" + output.strip() + "\033[0m")
last_output = output
except subprocess.CalledProcessError as e:
output = e.output.decode()
print("\033[2m" + output.strip() + "\033[0m")
last_output = output
except Exception as error:
print(f"Error: {str(error)}")
def main():
parser = argparse.ArgumentParser(description="AIOS: AI-powered shell assistant")
parser.add_argument(
"-m", "--model", default="claude-3-5-sonnet-20240620", help="AI model to use"
)
parser.add_argument("-c", "--config", help="Path to the configuration file")
parser.add_argument(
"-a",
"--assistant",
help="Triggers using an assistant model for context gathering",
action="store_true",
)
parser.add_argument(
"-am",
"--assistant-model",
help="Specify the assistant model name",
default=ASSISTANT_MODEL_NAME,
)
parser.add_argument(
"-v",
"--verbose",
help="Increase output verbosity",
action="store_true",
)
parser.add_argument("message", nargs="*", help="Initial message to send to the AI")
args = parser.parse_args()
if args.verbose:
os.environ["VERBOSE"] = "1"
initial_message = " ".join(args.message) if args.message else None
try:
pi_ai_os(
args.model,
initial_message,
args.config,
args.assistant,
args.assistant_model,
)
except KeyboardInterrupt:
print("\nExiting...")
sys.exit(0)
if __name__ == "__main__":
main()