Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ChatBot #218

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions AI_CHATBOT/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# AI_CHATBOT
It is an Ai chatbot developed using natural language toolkit NLTK, pytorch
I have developed this bot for a Petari website
Feel free to update the intents.json to make the domain of the chatbot wider.
53 changes: 53 additions & 0 deletions AI_CHATBOT/chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import random
import json

import torch

from model import NeuralNet
from nltk_utils import bag_of_words, tokenize

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

with open('intents.json', 'r') as json_data:
intents = json.load(json_data)

FILE = "data.pth"
data = torch.load(FILE)

input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data['all_words']
tags = data['tags']
model_state = data["model_state"]

model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()

bot_name = "Snapitizer"
print("Let's chat! (type 'quit' to exit)")
while True:
# sentence = "do you use credit cards?"
sentence = input("You: ")
if sentence == "quit":
break

sentence = tokenize(sentence)
X = bag_of_words(sentence, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)

output = model(X)
_, predicted = torch.max(output, dim=1)

tag = tags[predicted.item()]

probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent["tag"]:
print(f"{bot_name}: {random.choice(intent['responses'])}")
else:
print(f"{bot_name}: I do not understand...")
Binary file added AI_CHATBOT/data.pth
Binary file not shown.
138 changes: 138 additions & 0 deletions AI_CHATBOT/intents.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
{
"intents": [
{
"tag": "greeting",
"patterns": [
"Hi",
"Hello",
"Hey",
"How are you",
"Is anyone there?"
],
"responses": [
"Hello! Welcome to PETARI, The Food Donate WebApp. How can I assist you today?",
"Hi there! How can I help you with PETARI today?",
"Hello! How can we make your experience better with PETARI?"
]
},
{
"tag": "goodbye",
"patterns": ["Bye", "Goodbye", "See you later"],
"responses": [
"Goodbye! Thank you for visiting PETARI.",
"See you later! Have a great day!",
"Bye! Come back again soon to PETARI."
]
},
{
"tag": "thanks",
"patterns": ["Thanks", "Thank you", "That's helpful", "Thank you so much"],
"responses": [
"You're welcome! Happy to help.",
"Anytime! We're here to assist you.",
"My pleasure! If you have more questions, feel free to ask."
]
},
{
"tag": "mission",
"patterns": [
"What is PETARI?",
"Tell me about PETARI",
"What does PETARI do?",
"What is the mission of PETARI?"
],
"responses": [
"PETARI, The Food Donate WebApp, is an initiative aiming to redistribute excess food from various events to those in need. Our mission is to achieve a hunger-free world and zero food waste.",
"PETARI focuses on collecting excess food from weddings, parties, and events to redistribute it to individuals who are hungry, thereby reducing food waste and ensuring that surplus food benefits those in need."
]
},
{
"tag": "features",
"patterns": [
"What features does PETARI offer?",
"Tell me about the features of PETARI",
"What can PETARI do?"
],
"responses": [
"PETARI offers several features, including redistribution of excess food from events, alignment with India's Sustainable Development Goals, environmental consciousness by reducing food waste, and societal impact by addressing food insecurity and hunger issues."
]
},
{
"tag": "sustainable goals",
"patterns": [
"How does PETARI align with sustainable goals?",
"What sustainable goals does PETARI support?",
"Tell me about PETARI's sustainable goals"
],
"responses": [
"PETARI's mission aligns with India's Sustainable Development Goals, particularly focusing on making the world hunger-free and reducing food waste to contribute to a sustainable and equitable society."
]
},
{
"tag": "environmental impact",
"patterns": [
"What is PETARI's environmental impact?",
"How does PETARI help the environment?",
"Tell me about PETARI's environmental efforts"
],
"responses": [
"By redistributing excess food and reducing food waste, PETARI helps alleviate the pressure on finite natural resources and minimizes the environmental impact associated with food wastage."
]
},
{
"tag": "societal impact",
"patterns": [
"How does PETARI benefit society?",
"What is PETARI's societal impact?",
"Tell me about PETARI's societal contributions"
],
"responses": [
"PETARI aims to bring benefits across society by providing access to food for individuals who may not have adequate means to access it, addressing food insecurity and hunger issues."
]
},
{
"tag": "economic impact",
"patterns": [
"What is PETARI's economic impact?",
"How does PETARI contribute to economic sustainability?",
"Tell me about PETARI's economic contributions"
],
"responses": [
"Through its activities, PETARI not only addresses social issues related to hunger but also contributes to economic sustainability by efficiently utilizing excess resources and reducing wastage."
]
},
{
"tag": "collaboration",
"patterns": [
"Does PETARI collaborate with events?",
"How does PETARI collect food?",
"Tell me about PETARI's collaborations"
],
"responses": [
"PETARI collaborates with various events, such as weddings and parties, to collect surplus food. We emphasize the importance of partnerships and community involvement in achieving our goals."
]
},
{
"tag": "volunteer",
"patterns": [
"How can I volunteer for PETARI?",
"Can I volunteer?",
"Tell me about volunteering opportunities"
],
"responses": [
"Thank you for your interest in volunteering! Please visit our website and fill out the volunteer form to get started. We appreciate your support in making a difference."
]
},
{
"tag": "donate",
"patterns": [
"How can I donate to PETARI?",
"Can I donate?",
"Tell me about donation options"
],
"responses": [
"Thank you for considering a donation! You can donate through our website by clicking on the 'Donate' button. Your contributions help us continue our mission to reduce hunger and food waste."
]
}
]
}
19 changes: 19 additions & 0 deletions AI_CHATBOT/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import torch
import torch.nn as nn


class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()

def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
Empty file added AI_CHATBOT/nitkutils.py
Empty file.
23 changes: 23 additions & 0 deletions AI_CHATBOT/nltk_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import numpy as np
import nltk
nltk.download('punkt')
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()

def tokenize(sentence):
return nltk.word_tokenize(sentence)


def stem(word):
return stemmer.stem(word.lower())


def bag_of_words(tokenized_sentence, words):

sentence_words = [stem(word) for word in tokenized_sentence]
bag = np.zeros(len(words), dtype=np.float32)
for idx, w in enumerate(words):
if w in sentence_words:
bag[idx] = 1

return bag
108 changes: 108 additions & 0 deletions AI_CHATBOT/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import numpy as np
import random
import json

import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader

from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet

with open('intents.json', 'r') as f:
intents = json.load(f)

all_words = []
tags = []
xy = []
for intent in intents['intents']:
tag = intent['tag']
tags.append(tag)
for pattern in intent['patterns']:
w = tokenize(pattern)
all_words.extend(w)
xy.append((w, tag))

ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
all_words = sorted(set(all_words))
tags = sorted(set(tags))

print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)

X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
label = tags.index(tag)
y_train.append(label)

X_train = np.array(X_train)
y_train = np.array(y_train)

num_epochs = 1000
batch_size = 8
learning_rate = 0.01
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)

class ChatDataset(Dataset):

def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train

def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples

dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = NeuralNet(input_size, hidden_size, output_size).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
outputs = model(words)

loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()

if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')


print(f'final loss: {loss.item():.4f}')

data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}

FILE = "data.pth"
torch.save(data, FILE)

print(f'training complete. file saved to {FILE}')