├── README.md ├── intents.json ├── chatbot.py └── new.py /README.md: -------------------------------------------------------------------------------- 1 | # create_chatbot_using_python 2 | This code is an implementation of a simple chatbot using TensorFlow, which is a machine learning framework developed by Google. The chatbot is trained using a neural network to classify user inputs into predefined intents and provide appropriate responses based on the detected intent. 3 | The intents.json file is the data that we will provide to our chatbot 4 | -------------------------------------------------------------------------------- /intents.json: -------------------------------------------------------------------------------- 1 | {"intents": [ 2 | {"tag": "greeting", 3 | "patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"], 4 | "responses": ["Hello", "Good to see you again", "Hi there, how can I help?"], 5 | "context": [""] 6 | }, 7 | {"tag": "goodbye", 8 | "patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"], 9 | "responses": ["See you!", "Have a nice day", "Bye! Come back again soon."], 10 | "context": [""] 11 | }, 12 | {"tag": "thanks", 13 | "patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"], 14 | "responses": ["My pleasure", "You're Welcome"], 15 | "context": [""] 16 | }, 17 | {"tag": "query", 18 | "patterns": ["What is Simplilearn?"], 19 | "responses": ["Simplilearn is the popular online Bootcamp & online courses learning platform "], 20 | "context": [""] 21 | } 22 | ]} -------------------------------------------------------------------------------- /chatbot.py: -------------------------------------------------------------------------------- 1 | import random 2 | import json 3 | import pickle 4 | import numpy as np 5 | import nltk 6 | 7 | from nltk.stem import WordNetLemmatizer 8 | from keras.models import load_model 9 | 10 | lemmatizer = WordNetLemmatizer() 11 | intents = json.loads(open('C:\Simplilearn\Python\Python projects\chatbot using python\chatbot\intents.json').read()) 12 | 13 | words = pickle.load(open('words.pkl', 'rb')) 14 | classes = pickle.load(open('classes.pkl', 'rb')) 15 | model = load_model('chatbot_model.h5') 16 | 17 | 18 | def clean_up_sentence(sentence): 19 | sentence_words = nltk.word_tokenize(sentence) 20 | sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] 21 | return sentence_words 22 | 23 | def bag_of_words (sentence): 24 | sentence_words = clean_up_sentence(sentence) 25 | bag = [0] * len(words) 26 | for w in sentence_words: 27 | for i, word in enumerate(words): 28 | if word == w: 29 | bag[i] = 1 30 | return np.array(bag) 31 | 32 | def predict_class (sentence): 33 | bow = bag_of_words (sentence) 34 | res = model.predict(np.array([bow]))[0] 35 | ERROR_THRESHOLD = 0.25 36 | results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD] 37 | 38 | results.sort(key=lambda x: x[1], reverse=True) 39 | return_list = [] 40 | for r in results: 41 | return_list.append({'intent': classes [r[0]], 'probability': str(r[1])}) 42 | return return_list 43 | 44 | def get_response(intents_list, intents_json): 45 | tag = intents_list[0]['intent'] 46 | list_of_intents = intents_json['intents'] 47 | for i in list_of_intents: 48 | if i['tag'] == tag: 49 | result = random.choice (i['responses']) 50 | break 51 | return result 52 | 53 | print("GO! Bot is running!") 54 | 55 | while True: 56 | message = input("") 57 | ints = predict_class (message) 58 | res = get_response (ints, intents) 59 | print (res) 60 | -------------------------------------------------------------------------------- /new.py: -------------------------------------------------------------------------------- 1 | import random 2 | import json 3 | import pickle 4 | import numpy as np 5 | import tensorflow as tf 6 | 7 | import nltk 8 | from nltk.stem import WordNetLemmatizer 9 | 10 | lemmatizer = WordNetLemmatizer() 11 | 12 | intents = json.loads(open('C:\Simplilearn\Python\Python projects\chatbot using python\chatbot\intents.json').read()) 13 | 14 | words = [] 15 | classes = [] 16 | documents = [] 17 | ignoreLetters = ['?', '!', '.', ','] 18 | 19 | for intent in intents['intents']: 20 | for pattern in intent['patterns']: 21 | wordList = nltk.word_tokenize(pattern) 22 | words.extend(wordList) 23 | documents.append((wordList, intent['tag'])) 24 | if intent['tag'] not in classes: 25 | classes.append(intent['tag']) 26 | 27 | words = [lemmatizer.lemmatize(word) for word in words if word not in ignoreLetters] 28 | words = sorted(set(words)) 29 | 30 | classes = sorted(set(classes)) 31 | 32 | pickle.dump(words, open('words.pkl', 'wb')) 33 | pickle.dump(classes, open('classes.pkl', 'wb')) 34 | 35 | training = [] 36 | outputEmpty = [0] * len(classes) 37 | 38 | for document in documents: 39 | bag = [] 40 | wordPatterns = document[0] 41 | wordPatterns = [lemmatizer.lemmatize(word.lower()) for word in wordPatterns] 42 | for word in words: 43 | bag.append(1) if word in wordPatterns else bag.append(0) 44 | 45 | outputRow = list(outputEmpty) 46 | outputRow[classes.index(document[1])] = 1 47 | training.append(bag + outputRow) 48 | 49 | random.shuffle(training) 50 | training = np.array(training) 51 | 52 | trainX = training[:, :len(words)] 53 | trainY = training[:, len(words):] 54 | 55 | 56 | model = tf.keras.Sequential() 57 | model.add(tf.keras.layers.Dense(128, input_shape=(len(trainX[0]),), activation = 'relu')) 58 | model.add(tf.keras.layers.Dropout(0.5)) 59 | model.add(tf.keras.layers.Dense(64, activation = 'relu')) 60 | model.add(tf.keras.layers.Dropout(0.5)) 61 | model.add(tf.keras.layers.Dense(len(trainY[0]), activation='softmax')) 62 | 63 | sgd = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True) 64 | model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) 65 | 66 | hist = model.fit(np.array(trainX), np.array(trainY), epochs=200, batch_size=5, verbose=1) 67 | model.save('chatbot_model.h5', hist) 68 | print('Done') 69 | 70 | 71 | 72 | --------------------------------------------------------------------------------