-
Notifications
You must be signed in to change notification settings - Fork 0
/
making_queries.py
executable file
·89 lines (71 loc) · 3.9 KB
/
making_queries.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#!/usr/bin/env python
################################################################################
# commbase-genai-slm-ollama-phi3-mini-memory-remote-rag-pinecone #
# #
# A sophisticated AI assistant's Small Language Model (Phi3), enhanced by #
# Retrieval-Augmented Generation (RAG) for improved response accuracy, and #
# supported by a Pinecone semantic vector database. #
# #
# Change History #
# 06/25/2024 Esteban Herrera Original code. #
# Add new history entries as needed. #
# #
# #
################################################################################
################################################################################
################################################################################
# #
# Copyright (c) 2022-present Esteban Herrera C. #
# stv.herrera@gmail.com #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #
# making_queries.py
# Desc
# Imports
import functions
import json
import sentence_transformers
# import time
from pinecone import Pinecone, ServerlessSpec
# Call test_embedding_model()
model, xq = functions.test_embedding_model()
# Initialize the Pinecone client with your API key
pc = Pinecone(api_key="")
index_name = 'commbase-log-chats'
# Connect to index and print the index statistics
index = pc.Index(index_name)
# query = "Who is Eva?"
query = "What happened at 20:35:08?"
# query = "What is the meaning of zero day?"
# ----
# create the query vector
xq = model.encode(query).tolist()
# now query
xc = index.query(vector=xq, top_k=5, include_metadata=True)
print(xc)
print("")
print(query)
# In the returned response xc we can see the most relevant questions to our particular query. We can reformat this response to be a little easier to read
# for result in xc['matches']:
# print(f"{round(result['score'], 2)}: {result['metadata']['text']}")
# Print the 'speaker' and 'text' along with the score
for result in xc['matches']:
score = round(result['score'], 2)
timestamp = result['metadata']['timestamp']
speaker = result['metadata']['speaker']
text = result['metadata']['text']
print(f"{score}: {timestamp} {speaker}: {text}")
# ## Add this to the prompt:
# ## Please keep your responses to a maximum of three to four sentences.