├── .gitattributes ├── LICENSE ├── README.md ├── data.zip ├── haarcascade_frontalface_default.xml ├── kerasmodel.py └── model.h5 /.gitattributes: -------------------------------------------------------------------------------- 1 | *.zip filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Mohammed Saaduddin Ahmed 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EmotionDetection_Realtime 2 | This is a Python 3 based project to display facial expressions (happy, sad, anger, fear, disgust, surprise, neutral) by performing fast & accurate face detection with OpenCV using a pre-trained deep learning face detector model shipped with the library. 3 | 4 | The model is trained on the **FER-2013** dataset which was published on International Conference on Machine Learning (ICML). This dataset consists of 35887 grayscale, 48x48 sized face images with seven emotions - angry, disgusted, fearful, happy, neutral, sad and surprised. 5 | 6 | ## Dataset 7 | Due to the limitations of upload size in github, I have uploaded the zip file of the dataset 'data.zip' on a google drive. 8 | Download the [data.zip](https://drive.google.com/file/d/1yCxHw7aOAPYTVz9VKm2Yax1sxLLtJgWk/view?usp=sharing) file and unzip it in the directory. 9 | 10 | ## Dependencies 11 | 12 | 1. Python 3.x, OpenCV 3 or 4, Tensorflow, TFlearn, Keras 13 | 2. Open terminal and enter the file path to the desired directory and install the following libraries 14 | * ``` pip install numpy``` 15 | * ``` pip install opencv-python``` 16 | * ``` pip install tensorflow``` 17 | * ``` pip install tflearn``` 18 | * ``` pip install keras``` 19 | 20 | ## Execution 21 | 22 | 1. Unzip the 'data.zip' file in the same location 23 | 2. Open terminal and enter the file path to the desired directory and paste the command given below 24 | 3. ``` python kerasmodel.py --mode display``` 25 | -------------------------------------------------------------------------------- /data.zip: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:444d392a3040701f0b482b4798f69813afcd52119bb695cff0a75c38b699fb89 3 | size 67073282 4 | -------------------------------------------------------------------------------- /kerasmodel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import cv2 4 | from keras.models import Sequential 5 | from keras.layers.core import Dense, Dropout, Flatten 6 | from keras.layers.convolutional import Conv2D 7 | from keras.optimizers import Adam 8 | from keras.layers.pooling import MaxPooling2D 9 | from keras.preprocessing.image import ImageDataGenerator 10 | import os 11 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 12 | import matplotlib as mpl 13 | mpl.use('TkAgg') 14 | import matplotlib.pyplot as plt 15 | 16 | # command line argument 17 | ap = argparse.ArgumentParser() 18 | ap.add_argument("--mode",help="train/display") 19 | a = ap.parse_args() 20 | mode = a.mode 21 | 22 | def plot_model_history(model_history): 23 | """ 24 | Plot Accuracy and Loss curves given the model_history 25 | """ 26 | fig, axs = plt.subplots(1,2,figsize=(15,5)) 27 | # summarize history for accuracy 28 | axs[0].plot(range(1,len(model_history.history['acc'])+1),model_history.history['acc']) 29 | axs[0].plot(range(1,len(model_history.history['val_acc'])+1),model_history.history['val_acc']) 30 | axs[0].set_title('Model Accuracy') 31 | axs[0].set_ylabel('Accuracy') 32 | axs[0].set_xlabel('Epoch') 33 | axs[0].set_xticks(np.arange(1,len(model_history.history['acc'])+1),len(model_history.history['acc'])/10) 34 | axs[0].legend(['train', 'val'], loc='best') 35 | # summarize history for loss 36 | axs[1].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss']) 37 | axs[1].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss']) 38 | axs[1].set_title('Model Loss') 39 | axs[1].set_ylabel('Loss') 40 | axs[1].set_xlabel('Epoch') 41 | axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10) 42 | axs[1].legend(['train', 'val'], loc='best') 43 | fig.savefig('plot.png') 44 | plt.show() 45 | 46 | # Define data generators 47 | train_dir = 'data/train' 48 | val_dir = 'data/test' 49 | 50 | num_train = 28709 51 | num_val = 7178 52 | batch_size = 64 53 | num_epoch = 50 54 | 55 | train_datagen = ImageDataGenerator(rescale=1./255) 56 | val_datagen = ImageDataGenerator(rescale=1./255) 57 | 58 | train_generator = train_datagen.flow_from_directory( 59 | train_dir, 60 | target_size=(48,48), 61 | batch_size=batch_size, 62 | color_mode="grayscale", 63 | class_mode='categorical') 64 | 65 | validation_generator = val_datagen.flow_from_directory( 66 | val_dir, 67 | target_size=(48,48), 68 | batch_size=batch_size, 69 | color_mode="grayscale", 70 | class_mode='categorical') 71 | 72 | # Create the model 73 | model = Sequential() 74 | 75 | model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1))) 76 | model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) 77 | model.add(MaxPooling2D(pool_size=(2, 2))) 78 | model.add(Dropout(0.25)) 79 | 80 | model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) 81 | model.add(MaxPooling2D(pool_size=(2, 2))) 82 | model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) 83 | model.add(MaxPooling2D(pool_size=(2, 2))) 84 | model.add(Dropout(0.25)) 85 | 86 | model.add(Flatten()) 87 | model.add(Dense(1024, activation='relu')) 88 | model.add(Dropout(0.5)) 89 | model.add(Dense(7, activation='softmax')) 90 | 91 | # If you want to train the same model or try other models, go for this 92 | if mode == "train": 93 | model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.0001, decay=1e-6),metrics=['accuracy']) 94 | 95 | model_info = model.fit_generator( 96 | train_generator, 97 | steps_per_epoch=num_train // batch_size, 98 | epochs=num_epoch, 99 | validation_data=validation_generator, 100 | validation_steps=num_val // batch_size) 101 | 102 | plot_model_history(model_info) 103 | model.save_weights('model.h5') 104 | 105 | # emotions will be displayed on your face from the webcam feed 106 | elif mode == "display": 107 | model.load_weights('model.h5') 108 | 109 | # prevents openCL usage and unnecessary logging messages 110 | cv2.ocl.setUseOpenCL(False) 111 | 112 | # dictionary which assigns each label an emotion (alphabetical order) 113 | emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"} 114 | 115 | # start the webcam feed 116 | cap = cv2.VideoCapture(0) 117 | while True: 118 | # Find haar cascade to draw bounding box around face 119 | ret, frame = cap.read() 120 | facecasc = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 121 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 122 | faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5) 123 | 124 | for (x, y, w, h) in faces: 125 | cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2) 126 | roi_gray = gray[y:y + h, x:x + w] 127 | cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0) 128 | prediction = model.predict(cropped_img) 129 | maxindex = int(np.argmax(prediction)) 130 | cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) 131 | 132 | # show the output frame 133 | cv2.imshow("Frame", frame) 134 | key = cv2.waitKey(1) & 0xFF 135 | 136 | # if the `q` key was pressed, break from the loop 137 | if key == ord("q"): 138 | break 139 | 140 | cap.release() 141 | cv2.destroyAllWindows() 142 | -------------------------------------------------------------------------------- /model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simplesaad/EmotionDetection_RealTime/a3d3a55351238a88ed9ddd2053c3f683293f4060/model.h5 --------------------------------------------------------------------------------