Skip to content

Commit

Permalink
fix(wwww) Feature Flag for Stripe integration
Browse files Browse the repository at this point in the history
  • Loading branch information
VVoruganti committed Oct 1, 2024
1 parent 3bc1928 commit eed9713
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 88 deletions.
121 changes: 75 additions & 46 deletions agent/agent/chain.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import os
from os import getenv
from typing import List

from mirascope.openai import OpenAICall, OpenAICallParams, azure_client_wrapper
from mirascope.base import BaseConfig
from openai import AzureOpenAI
from dotenv import load_dotenv

from honcho import Honcho
Expand All @@ -14,44 +13,49 @@
load_dotenv()


class HonchoCall(OpenAICall):
class HonchoCall:
def __init__(
self,
user_input: str,
app_id: str,
user_id: str,
session_id: str,
honcho: Honcho,
):
self.user_input = user_input
self.app_id = app_id
self.user_id = user_id
self.session_id = session_id
self.honcho = honcho

model_config = ConfigDict(arbitrary_types_allowed=True)

call_params = OpenAICallParams(model=os.getenv("AZURE_OPENAI_DEPLOYMENT"))
configuration = BaseConfig(
client_wrappers=[
azure_client_wrapper(
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
api_version=os.getenv("AZURE_OPENAI_API_VERSION"),
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
)
]
openai = AzureOpenAI(
api_key=getenv("AZURE_OPENAI_API_KEY", "placeholder"),
azure_endpoint=getenv("AZURE_OPENAI_ENDPOINT", "placeholder"),
api_version=getenv("AZURE_OPENAI_API_VERSION", "2024-02-01"),
)

user_input: str
app_id: str
user_id: str
session_id: str
honcho: Honcho
model = getenv("AZURE_OPENAI_DEPLOYMENT", "placeholder")


class ThinkCall(HonchoCall):
prompt_template = """
SYSTEM:
You are Bloom, a subversive-minded learning companion. Your job is to employ your theory of mind skills to predict the user’s mental state.
Generate a thought that makes a prediction about the user's needs given current dialogue and also lists other pieces of data that would help improve your prediction
previous commentary: {history}
def template(self) -> dict[str, str]:
system = (
{
"role": "system",
"content": f"""You are Bloom, a subversive-minded learning companion. Your job is to employ your theory of mind skills to predict the user’s mental state.
Generate a thought that makes a prediction about the user's needs given current dialogue and also lists other pieces of data that would help improve your prediction
previous commentary: {self.history}""",
},
)

USER: {user_input}
"""
user_input: str
return system[0]

@property
def history(self) -> List[dict]:
def history(self) -> str:
"""Get the conversation history from Honcho"""
history_list = []
history_str = ""
iter = self.honcho.apps.users.sessions.messages.list(
session_id=self.session_id,
app_id=self.app_id,
Expand All @@ -68,25 +72,40 @@ def history(self) -> List[dict]:
past_thoughts = {m.message_id: m.content for m in meta_iter.items}
for message in iter.items[::-1]:
if message.is_user:
history_list.append({"role": "user", "content": message.content})
history_list.append(
{"role": "assistant", "content": past_thoughts[message.id]}
)
return history_list
history_str += f"USER: {message.content}\n"
history_str += f"THOUGHT: {past_thoughts[message.id]}\n"
return history_str

def stream(self):
completion = self.openai.chat.completions.create(
model=getenv("AZURE_OPENAI_DEPLOYMENT", "placeholder"),
messages=[self.template(), {"role": "user", "content": self.user_input}],
stream=True,
)
for chunk in completion:
if len(chunk.choices) > 0:
yield chunk.choices[0].delta.content or ""


class RespondCall(HonchoCall):
prompt_template = """
SYSTEM: You are Bloom, a subversive-minded learning companion with deep expertise in Socratic dialogue. Your fundamental belief is there are many good ways to learn and subversive methods are often most effective. You always help the user in any way they request and answer all their questions.
{thought}
You must produce an appropriate response to the user input. Format equations in LaTeX and wrap in dollar signs like this: $\LaTeX$. Use markdown code syntax. Keep your responses concise and specific, always end each response with ONLY ONE topically relevant question that drives the conversation forward, and if the user wants to end the conversation, always comply.
MESSAGES: {history}
USER: {user_input}
"""

thought: str
def __init__(self, *args, thought, **kwargs):
self.thought = thought
super().__init__(*args, **kwargs)

def template(self):
return [
{
"role": "system",
"content": f"""You are Bloom, a subversive-minded learning companion with deep expertise in Socratic dialogue. Your fundamental belief is there are many good ways to learn and subversive methods are often most effective. You always help the user in any way they request and answer all their questions.
{self.thought}
You must produce an appropriate response to the user input. Format equations in LaTeX and wrap in dollar signs like this: $\LaTeX$. Use markdown code syntax. Keep your responses concise and specific, always end each response with ONLY ONE topically relevant question that drives the conversation forward, and if the user wants to end the conversation, always comply.
""",
},
*self.history,
{"role": "user", "content": self.user_input},
]

@property
def history(self) -> List[dict]:
Expand All @@ -101,3 +120,13 @@ def history(self) -> List[dict]:
else:
history_list.append({"role": "assistant", "content": message.content})
return history_list

def stream(self):
completion = self.openai.chat.completions.create(
model=getenv("AZURE_OPENAI_DEPLOYMENT", "placeholder"),
messages=self.template(),
stream=True,
)
for chunk in completion:
if len(chunk.choices) > 0:
yield chunk.choices[0].delta.content or ""
1 change: 0 additions & 1 deletion agent/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ authors = [
requires-python = ">=3.11"
dependencies = [
"honcho-ai>=0.0.14",
"mirascope==0.18.0",
"openai>=1.3.8",
"validators>=0.20.0",
"python-dotenv>=1.0.1",
Expand Down
8 changes: 4 additions & 4 deletions api/routers/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def convo_turn():
).stream()
thought = ""
for chunk in thought_stream:
thought += chunk.content
yield chunk.content
thought += chunk
yield chunk

yield "❀"
response_stream = RespondCall(
Expand All @@ -40,8 +40,8 @@ def convo_turn():
).stream()
response = ""
for chunk in response_stream:
response += chunk.content
yield chunk.content
response += chunk
yield chunk
yield "❀"

new_message = honcho.apps.users.sessions.messages.create(
Expand Down
35 changes: 0 additions & 35 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions www/.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ NEXT_PUBLIC_SENTRY_ENVIRONMENT=
NEXT_PUBLIC_POSTHOG_KEY=
NEXT_PUBLIC_POSTHOG_HOST=
# Stripe
NEXT_PUBLIC_STRIPE_ENABLED=false
STRIPE_SECRET_KEY=
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=
STRIPE_WEBHOOK_SECRET=
9 changes: 7 additions & 2 deletions www/app/page.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,12 @@ export default function Home() {
posthog?.identify(userId, { email: user.email });

// Check subscription status
const sub = await checkSubscription();
setIsSubscribed(sub === SubscriptionStatus.SUBSCRIBED);
if (process.env.NEXT_PUBLIC_STRIPE_ENABLED === "false") {
setIsSubscribed(true);
} else {
const sub = await checkSubscription();
setIsSubscribed(sub === SubscriptionStatus.SUBSCRIBED);
}

})();
}, [supabase]);
Expand Down Expand Up @@ -211,6 +215,7 @@ export default function Home() {
// setThought((prev) => prev + "\n" + value + "\n");
continue;
}
console.log(value)
setThought((prev) => prev + value);
// mutateMessages(newMessages, { revalidate: false });
} else {
Expand Down

0 comments on commit eed9713

Please sign in to comment.