forked from nachollorca/promptbook
-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
182 lines (147 loc) · 7.79 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import streamlit as st
import os
import importlib.util
import sys
import inspect
from inspect import cleandoc
from src.utils import *
# set page
st.set_page_config(page_title="Promptbook", page_icon="media/logo.png", initial_sidebar_state="collapsed")
st.image("media/head.png", use_column_width=True)
with st.sidebar:
"""
# [Promptbook UI](https://github.com/nachollorca/promptbook/)
Do you find yourself writing the same kind of prompts again and again into ChatGPT?
Work your most used prompts into a comfy graphic user interface with just a few lines of code.
Promptbook allows you to:
- Have a customizable UI from a simple prompt-building Python function
- Store your GUI-prompts to reuse them on a click
- Use fantastic recipe ideas from other contributors
_Please, note that this is just a practical PoC_
## How it works
**Promptbook** is built upon Python function signatures and type hints. Then, Streamlit is used to provide a graphic interface.
In essence, a parser reads the prompt-generating function, identifies the arguments and creates according streamlit input widgets in the application.
Lastly, a Prompt class queries the OpenAI API and computes the answer, together with its token context and resulting cost.
## How to use
**To use the current recipes just get to play around in here.** If you do not have an OpenAI API key to launch the prompts, you can generate them and copy-paste into [ChatGPT](https://chat.openai.com/).
To create your own recipes, head over to [`docs/contribute.md`](https://github.com/nachollorca/promptbook/blob/main/docs/contribute.md). To learn best practices on prompt engineering, I recommend [this compendium](https://www.promptingguide.ai/introduction/tips).
"""
# initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# gui index
with st.expander("**:bookmark_tabs: Index**", expanded=True):
# load and choose recipe
recipes = sorted([item.strip(".py") for item in os.listdir("recipes") if item.endswith(".py")])
recipe = st.selectbox(label="Choose a recipe", options=recipes, on_change=reset_chat_callback)
st.caption(
f":link:[Check recipe source code](https://github.com/nachollorca/promptbook/blob/main/recipes/{recipe}.py)")
# import chosen recipe
spec = importlib.util.spec_from_file_location("recipe", f"recipes/{recipe}.py")
module = importlib.util.module_from_spec(spec)
sys.modules["recipe"] = module
spec.loader.exec_module(module)
function = getattr(module, recipe)
ui = getattr(module, "_ui", None)
signature = inspect.signature(function)
# introduce user interface
if getattr(module, "_title", None) is not None:
st.write(f'### {getattr(module, "_title")}')
if getattr(module, "_author", None) is not None:
st.caption(f'By {getattr(module, "_author")}')
if getattr(module, "_description", None) is not None:
st.write(getattr(module, "_description"))
# create dictionary to parse arguments and ui info
params = {}
for name, hint in signature.parameters.items():
# get parameter name, type and default value
params[name] = {}
params[name]["type"] = hint.annotation
params[name]["default"] = hint.default
# get information for the ui
if isinstance(hint.default, type(inspect.Parameter.empty)):
params[name]["required"] = True
params[name]["label"] = f"**{name.capitalize()}** (required)"
else:
params[name]["required"] = False
params[name]["label"] = f"**{name.capitalize()}** (optional, defaults to `{hint.default}`)"
if ui is not None and name in ui.keys():
params[name].update(ui[name])
# gui inputs
with st.expander("**:arrow_forward: Inputs**", expanded=True):
# grab arguments for the function and create user interface
args = {}
for arg, info in params.items():
if info.get("text", None) is not None:
st.write(info.get("text"))
if str(info["type"]) in ["<class 'int'>", "<class 'float'>"]:
args[arg] = st.number_input(
label=info["label"],
help=info.get("help", None),
placeholder=info.get("suggestions", None),
)
elif str(info["type"]) == "<class '_io.BytesIO'>":
args[arg] = st.file_uploader(
label=info["label"],
help=info.get("help", None)
)
else:
args[arg] = st.text_area(
label=info["label"],
help=info.get("help", None),
placeholder=info.get("suggestions", None),
)
# TODO: make input fields for other class types i.e. multiselect for lists
# fill empty fields with default values
for k, v in args.items():
if v in ["", None]:
args[k] = params[k]["default"]
# generate and clean prompt
prompt = cleandoc(function(**args))
# inspect/tune prompt
c1, c2 = st.columns(2)
if c1.button("Visualize prompt", use_container_width=True):
if are_required_filled(args, params):
st.markdown(prompt)
else:
st.warning("Please fill in all required values.")
if c2.button("Fine-tune prompt", use_container_width=True):
if are_required_filled(args, params):
prompt = st.text_area("Edit prompt", value=prompt, label_visibility="hidden")
else:
st.warning("Please fill in all required values.")
# gui ai settings
with st.expander("**:bulb: AI settings**", expanded=True):
c1, c2 = st.columns(2)
model = c1.selectbox("Model", options=["gpt-4", "gpt-3.5-turbo"])
api_key = c2.text_input("OpenAI API key", type="password", placeholder="This will never be stored",
help="If you do not have one, simply click on `Visualize prompt` above and copy paste the generated prompt into [ChatGPT]()")
temperature = st.slider("Temperature", min_value=0.0, max_value=2.0, step=0.1, value=0.0,
help="Controls the “creativity” or randomness of the output. Higher temperatures (e.g., 0.7) result in more diverse and creative output (and potentially less coherent), while a lower temperature (e.g., 0.2) makes the output more deterministic and focused.")
if st.button("Launch prompt", use_container_width=True, on_click=reset_chat_callback):
if not are_required_filled(args, params):
st.warning("Please fill in all required values.")
else:
with st.spinner("**:gear:** on it..."):
st.session_state.messages.append({"role": "user", "content": prompt})
output = launch_prompt(st.session_state.messages, api_key, model, temperature)
st.session_state.messages.append({"role": "assistant", "content": output})
#in_cost = get_token_cost(prompt, model, "user")
#out_cost = get_token_cost(output, model, "assistant")
#c1, c2 = st.columns(2)
#c1.metric("**Tokens** (input/output)", f'{in_cost["tokens"]} / {out_cost["tokens"]}')
#c2.metric("**Cost**", f'{round(in_cost["cost"] + out_cost["cost"], 5)} $')
# chat
if st.session_state.messages:
for message in st.session_state.messages[1:]:
with st.chat_message(message["role"]):
st.write(message["content"])
if user_reply := st.chat_input("Continue interacting with the AI."):
st.session_state.messages.append({"role": "user", "content": user_reply})
with st.chat_message("user"):
st.write(user_reply)
with st.spinner("**:gear:** on it..."):
ai_reply = launch_prompt(st.session_state.messages, api_key, model, temperature)
st.session_state.messages.append({"role": "assistant", "content": ai_reply})
with st.chat_message("assistant"):
st.write(ai_reply)