-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
60 lines (44 loc) · 1.93 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# Building chatbot using paid LLM's and open source LLM
from langchain_openai import ChatOpenAI # Open AI API
from langchain_core.prompts import ChatPromptTemplate # Prompt template
from langchain_core.output_parsers import StrOutputParser # Default output parser whenever a LLM model gives any response
from langchain_community.llms import Ollama
from langchain_groq import ChatGroq
import streamlit as st # UI
import os
from dotenv import load_dotenv
load_dotenv()
# Langsmith tracking (Observable)
os.environ['GROQ_API_KEY'] = os.getenv("GROQ_API_KEY")
# os.environ['OPENAI_API_KEY'] = os.getenv("OPENAI_API_KEY")
# os.environ["LANGCHAIN_TRACING_VR"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
# Defining Prompt Template
prompt = ChatPromptTemplate.from_messages(
[
("system","You are a cricket assistant"), # If user asked any other unrelated topics, it will not answer. It will respond accordingly.
("user","Question:{question}")
]
)
# UI
st.title("Question Answering using langChain and Ollama API")
inputText = st.text_input("Talk with the assistant")
# Not having the open AI API key, but learning to create functionality
# Ollama enables us to run large language models locally, automatically does the compression
# llm = ChatOpenAI(model="llm")
# llm = Ollama(model="llama2") # Using ollama and llama2 model
# outputParser = StrOutputParser()
# chain = prompt|llm|outputParser # Defining chain - Combining
# Using groq inference engine
# groqllm = ChatGroq(model="llama3-70b-8192",temperature=0)
groqApi = ChatGroq(model="gemma-7b-It",temperature=0)
outputparser = StrOutputParser()
chainSec = prompt|groqApi|outputparser
# Langchain provides features that we can attach in the form of chain
#1 Prompt
#2 Integration with llm
#3 Output Parser
# if inputText:
# st.write(chain.invoke({'question':inputText}))
if inputText:
st.write(chainSec.invoke({'question':inputText}))