Skip to content

Commit

Permalink
Reformating code
Browse files Browse the repository at this point in the history
  • Loading branch information
AmberSahdev committed Mar 1, 2024
1 parent b02c9b9 commit 687f0f6
Show file tree
Hide file tree
Showing 8 changed files with 19 additions and 21 deletions.
5 changes: 3 additions & 2 deletions app/app.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import sys
import threading
from multiprocessing import freeze_support

from core import Core
from ui import UI
from multiprocessing import freeze_support


class App:
Expand All @@ -15,7 +15,7 @@ class App:
| | GUI | |
| +-------+ |
| ^ |
| | |
| | (via MP Queues) |
| v |
| +-----------+ (Screenshot + Goal) +-----------+ |
| | | --------------------> | | |
Expand All @@ -34,6 +34,7 @@ class App:
| +-------------+ |
+----------------------------------------------------+
"""

def __init__(self):
self.core = Core()
self.ui = UI()
Expand Down
13 changes: 7 additions & 6 deletions app/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __init__(self):
self.interpreter = Interpreter(self.status_queue)
try:
self.llm = LLM()
except OpenAIError as e:
except OpenAIError as _:
self.status_queue.put("Set your OpenAPI API Key in Settings and Restart the App")

def execute_user_request(self, user_request):
Expand All @@ -41,18 +41,19 @@ def execute(self, user_request, step_num=0):

if instructions == {}:
# Sometimes LLM sends malformed JSON response, in that case retry once more.
instructions = self.llm.get_instructions_for_objective(user_request + " Please reply in valid JSON", step_num)
instructions = self.llm.get_instructions_for_objective(user_request + " Please reply in valid JSON",
step_num)

for step in instructions["steps"]:
if self.interrupt_execution:
self.status_queue.put("Interrupted")
self.interrupt_execution = False
return "Interrupted"
else:
success = self.interpreter.process_command(step)

if not success:
return "Unable to execute the request"
success = self.interpreter.process_command(step)

if not success:
return "Unable to execute the request"

except Exception as e:
self.status_queue.put(f"Exception Unable to execute the request - {e}")
Expand Down
2 changes: 1 addition & 1 deletion app/interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def execute_function(self, function_name, parameters):
1. time.sleep() - to wait for web pages, applications, and other things to load.
2. pyautogui calls to interact with system's mouse and keyboard.
"""
# Sometimes pyautogui needs warming up - i.e. sometimes first call isn't executed hence padding a random call here.
# Sometimes pyautogui needs warming up i.e. sometimes first call isn't executed hence padding a random call here
pyautogui.press("command", interval=0.1)

if function_name == "sleep" and parameters.get("secs"):
Expand Down
11 changes: 2 additions & 9 deletions app/llm.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import json
import os
from pathlib import Path

from openai import OpenAI

from utils import local_info
from utils.screen import Screen
from utils.settings import Settings

from pathlib import Path


class LLM:
"""
Expand Down Expand Up @@ -123,21 +122,15 @@ def send_message_to_llm(self, message):
def convert_llm_response_to_json(self, llm_response):
llm_response_data = llm_response.choices[0].message.content.strip()

# Our current LLM model does not guarantee a JSON response, hence we manually parse the JSON part of the response
# Our current LLM model does not guarantee a JSON response hence we manually parse the JSON part of the response
# Check for updates here - https://platform.openai.com/docs/guides/text-generation/json-mode
start_index = llm_response_data.find('{')
end_index = llm_response_data.rfind('}')

try:
json_response = json.loads(llm_response_data[start_index:end_index + 1].strip())
except Exception as e:
print(f'llm_response_data[start_index:end_index + 1] - {llm_response_data[start_index:end_index + 1]}')
print(f'Error while parsing JSON response - {e}')

# TODO: Temporary for debugging
with open("faulty_json_recieved.json", "w") as f:
f.write(llm_response_data[start_index:end_index + 1].strip())

json_response = {}

return json_response
6 changes: 4 additions & 2 deletions app/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
import tkinter as tk
import webbrowser
from multiprocessing import Queue
from pathlib import Path
from tkinter import ttk

import speech_recognition as sr
from PIL import Image, ImageTk

from utils.settings import Settings
from pathlib import Path


def open_link(url):
webbrowser.open_new(url)
Expand Down Expand Up @@ -75,7 +76,8 @@ def create_widgets(self):
# Hyperlink Label
link_label = tk.Label(self, text='Instructions', fg='#499CE4')
link_label.pack()
link_label.bind('<Button-1>', lambda e: open_link('https://www.AmberSah.dev'))
link_label.bind('<Button-1>', lambda e: open_link(
'https://github.com/AmberSahdev/Open-Interface?tab=readme-ov-file#installation'))

def save_button(self):
api_key = self.api_key_entry.get().strip()
Expand Down
2 changes: 1 addition & 1 deletion app/version.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from packaging.version import Version

version = Version('0.2.0')
version = Version('0.2.0')
Empty file added assets/video_to_gif.txt
Empty file.
1 change: 1 addition & 0 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import platform

import PyInstaller.__main__

from app.version import version


Expand Down

0 comments on commit 687f0f6

Please sign in to comment.