diff --git a/backend/chat_with_user.py b/backend/chat_with_user.py index 6c82b37..505104c 100644 --- a/backend/chat_with_user.py +++ b/backend/chat_with_user.py @@ -6,12 +6,12 @@ def get_system_message(content_type: ContentType): return f""" - 1. You're a super-intelligent AI. Your task is to understand what audiocast the user wants to listen to. + 1. You're a super-intelligent AI. Your task is to understand what audiocast a user wants to listen to. 2. You will steer the conversation until you have enough context after which you should terminate. - 3. Keep the conversation short, say 2-3 back and forth - questions and answers. - 4. As soon as the user's request is clear terminate the conversation by saying, "Ok, thanks for clarifying! Please click the button below to start generating the audiocast." + 3. Keep the conversation short, say 3-5 back and forth i.e., questions and answers. + 4. As soon as the user's request is clear terminate the conversation by saying, "Ok, thanks for clarifying! Based on your specifications, you want to listen to [Best case summary of user request so far]. Please click the button below to start generating the audiocast." 5. You can also terminate the conversation using a varied response strictly similar to (4) above. - 6. If the user's request remains unclear after 3 responses for clarity, terminate the conversation by saying, "Your request is not very specific but from what I understand, you want to listen to [Best case summary of user request so far]. Please click the button below to start generating the audiocast." + 6. If the user's request remains unclear after 5 responses for clarity, terminate the conversation by saying, "Your request is not very specific but from what I understand, you want to listen to [Best case summary of user request so far]. Please click the button below to start generating the audiocast." GENERAL IDEA AND WORKFLOW: diff --git a/frontend/app.py b/frontend/app.py index 6188711..696511c 100644 --- a/frontend/app.py +++ b/frontend/app.py @@ -2,8 +2,9 @@ import httpx import streamlit as st +from chat_utils import chat_request, content_types from env_var import APP_URL, BACKEND_URL -from example_utils import content_types, display_example_cards +from example_utils import display_example_cards # Initialize session state if "chat_session_id" not in st.session_state: @@ -46,22 +47,9 @@ with st.chat_message("user"): st.write(prompt) - # Send message to backend - response = httpx.post( - f"{BACKEND_URL}/api/chat/{st.session_state.chat_session_id}", - json={ - "message": {"role": "user", "content": prompt}, - "content_type": content_type, - }, - timeout=None, - ) - - response.raise_for_status() - - if response.status_code == 200: - ai_message = "" - for line in response.iter_lines(): - ai_message += line + ai_message = chat_request(prompt, content_type) + + if ai_message: st.session_state.messages.append({"role": "assistant", "content": ai_message}) with st.chat_message("assistant"): diff --git a/frontend/chat_utils.py b/frontend/chat_utils.py new file mode 100644 index 0000000..414fabd --- /dev/null +++ b/frontend/chat_utils.py @@ -0,0 +1,38 @@ +from typing import Dict, List, Literal + +import httpx +import streamlit as st +from env_var import BACKEND_URL + +ContentType = Literal["story", "podcast", "sermon", "science"] + +content_types: List[ContentType] = ["story", "podcast", "sermon", "science"] + +content_examples: Dict[ContentType, str] = { + "story": "Tell me a story about a magical kingdom with dragons and wizards.", + "podcast": "Create a podcast about the history of space exploration.", + "sermon": "Write a sermon about finding peace in times of trouble.", + "science": "Explain the concept of black holes in simple terms.", +} + + +def chat_request(prompt: str, content_type: ContentType): + """ + Send a chat request to the backend server and return the AI response. + """ + response = httpx.post( + f"{BACKEND_URL}/api/chat/{st.session_state.chat_session_id}", + json={ + "message": {"role": "user", "content": prompt}, + "content_type": content_type, + }, + timeout=None, + ) + + response.raise_for_status() + + ai_message = "" + for line in response.iter_lines(): + ai_message += line + + return ai_message diff --git a/frontend/example_utils.py b/frontend/example_utils.py index e5f9887..1d17efb 100644 --- a/frontend/example_utils.py +++ b/frontend/example_utils.py @@ -1,19 +1,5 @@ -from typing import Dict, List, Literal - -import httpx import streamlit as st -from env_var import BACKEND_URL - -ContentType = Literal["story", "podcast", "sermon", "science"] - -content_types: List[ContentType] = ["story", "podcast", "sermon", "science"] - -content_examples: Dict[ContentType, str] = { - "story": "Tell me a story about a magical kingdom with dragons and wizards.", - "podcast": "Create a podcast about the history of space exploration.", - "sermon": "Write a sermon about finding peace in times of trouble.", - "science": "Explain the concept of black holes in simple terms.", -} +from chat_utils import chat_request, content_examples def display_example_cards(): @@ -48,23 +34,13 @@ def display_example_cards(): if st.button(example, use_container_width=True): # Add selected example to messages and trigger rerun to enter chat mode st.session_state.messages.append({"role": "user", "content": example}) - response = httpx.post( - f"{BACKEND_URL}/api/chat/{st.session_state.chat_session_id}", - json={ - "message": {"role": "user", "content": example}, - "content_type": content_type, - }, - timeout=None, - ) - - response.raise_for_status() - if response.status_code == 200: - ai_message = "" - for line in response.iter_lines(): - ai_message += line + ai_message = chat_request(example, content_type) + if ai_message: st.session_state.messages.append( {"role": "assistant", "content": ai_message} ) st.rerun() + else: + st.error("Failed to generate AI response. Please try again.")