├── Emotion_Detection.h5 ├── README.md ├── haarcascade_frontalface_default.xml ├── test.py └── train.py /Emotion_Detection.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pypower-codes/Emotion-Detection/576b99f5806536b2c43543be1ee3baf3dd3bf872/Emotion_Detection.h5 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyPower Project - Emotion Detection using AI 2 | 3 | ## Emotion Detection 4 | 5 | - Kaggle Dataset :- https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data. 6 | 7 | - Use train.py file to train the model. 8 | 9 | - Change the number of classes according to you. 10 | 11 | - Do Experiment with different pre-trained models. 12 | 13 | - Execute the test.py file to run the Emotion Detection. 14 | 15 | - Enjoy Deep Learning. 16 | 17 | ## The detailed tutorial is available in this video. Please do refer for better understanding. 18 | 19 | - https://youtu.be/PulKlAZRoAY 20 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | ''' 2 | PyPower Projects 3 | Emotion Detection Using AI 4 | ''' 5 | 6 | #USAGE : python test.py 7 | 8 | from keras.models import load_model 9 | from time import sleep 10 | from keras.preprocessing.image import img_to_array 11 | from keras.preprocessing import image 12 | import cv2 13 | import numpy as np 14 | 15 | face_classifier = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml') 16 | classifier =load_model('./Emotion_Detection.h5') 17 | 18 | class_labels = ['Angry','Happy','Neutral','Sad','Surprise'] 19 | 20 | cap = cv2.VideoCapture(0) 21 | 22 | 23 | 24 | while True: 25 | # Grab a single frame of video 26 | ret, frame = cap.read() 27 | labels = [] 28 | gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) 29 | faces = face_classifier.detectMultiScale(gray,1.3,5) 30 | 31 | for (x,y,w,h) in faces: 32 | cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) 33 | roi_gray = gray[y:y+h,x:x+w] 34 | roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA) 35 | 36 | 37 | if np.sum([roi_gray])!=0: 38 | roi = roi_gray.astype('float')/255.0 39 | roi = img_to_array(roi) 40 | roi = np.expand_dims(roi,axis=0) 41 | 42 | # make a prediction on the ROI, then lookup the class 43 | 44 | preds = classifier.predict(roi)[0] 45 | print("\nprediction = ",preds) 46 | label=class_labels[preds.argmax()] 47 | print("\nprediction max = ",preds.argmax()) 48 | print("\nlabel = ",label) 49 | label_position = (x,y) 50 | cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3) 51 | else: 52 | cv2.putText(frame,'No Face Found',(20,60),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3) 53 | print("\n\n") 54 | cv2.imshow('Emotion Detector',frame) 55 | if cv2.waitKey(1) & 0xFF == ord('q'): 56 | break 57 | 58 | cap.release() 59 | cv2.destroyAllWindows() 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from keras.applications import MobileNet 2 | from keras.models import Sequential,Model 3 | from keras.layers import Dense,Dropout,Activation,Flatten,GlobalAveragePooling2D 4 | from keras.layers import Conv2D,MaxPooling2D,ZeroPadding2D 5 | from keras.layers.normalization import BatchNormalization 6 | from keras.preprocessing.image import ImageDataGenerator 7 | 8 | # MobileNet is designed to work with images of dim 224,224 9 | img_rows,img_cols = 224,224 10 | 11 | MobileNet = MobileNet(weights='imagenet',include_top=False,input_shape=(img_rows,img_cols,3)) 12 | 13 | # Here we freeze the last 4 layers 14 | # Layers are set to trainable as True by default 15 | 16 | for layer in MobileNet.layers: 17 | layer.trainable = True 18 | 19 | # Let's print our layers 20 | for (i,layer) in enumerate(MobileNet.layers): 21 | print(str(i),layer.__class__.__name__,layer.trainable) 22 | 23 | def addTopModelMobileNet(bottom_model, num_classes): 24 | """creates the top or head of the model that will be 25 | placed ontop of the bottom layers""" 26 | 27 | top_model = bottom_model.output 28 | top_model = GlobalAveragePooling2D()(top_model) 29 | top_model = Dense(1024,activation='relu')(top_model) 30 | 31 | top_model = Dense(1024,activation='relu')(top_model) 32 | 33 | top_model = Dense(512,activation='relu')(top_model) 34 | 35 | top_model = Dense(num_classes,activation='softmax')(top_model) 36 | 37 | return top_model 38 | 39 | num_classes = 5 40 | 41 | FC_Head = addTopModelMobileNet(MobileNet, num_classes) 42 | 43 | model = Model(inputs = MobileNet.input, outputs = FC_Head) 44 | 45 | print(model.summary()) 46 | 47 | train_data_dir = '/Users/durgeshthakur/Deep Learning Stuff/Emotion Classification/fer2013/train' 48 | validation_data_dir = '/Users/durgeshthakur/Deep Learning Stuff/Emotion Classification/fer2013/validation' 49 | 50 | train_datagen = ImageDataGenerator( 51 | rescale=1./255, 52 | rotation_range=30, 53 | width_shift_range=0.3, 54 | height_shift_range=0.3, 55 | horizontal_flip=True, 56 | fill_mode='nearest' 57 | ) 58 | 59 | validation_datagen = ImageDataGenerator(rescale=1./255) 60 | 61 | batch_size = 32 62 | 63 | train_generator = train_datagen.flow_from_directory( 64 | train_data_dir, 65 | target_size = (img_rows,img_cols), 66 | batch_size = batch_size, 67 | class_mode = 'categorical' 68 | ) 69 | 70 | validation_generator = validation_datagen.flow_from_directory( 71 | validation_data_dir, 72 | target_size=(img_rows,img_cols), 73 | batch_size=batch_size, 74 | class_mode='categorical') 75 | 76 | from keras.optimizers import RMSprop,Adam 77 | from keras.callbacks import ModelCheckpoint,EarlyStopping,ReduceLROnPlateau 78 | 79 | checkpoint = ModelCheckpoint( 80 | 'emotion_face_mobilNet.h5', 81 | monitor='val_loss', 82 | mode='min', 83 | save_best_only=True, 84 | verbose=1) 85 | 86 | earlystop = EarlyStopping( 87 | monitor='val_loss', 88 | min_delta=0, 89 | patience=10, 90 | verbose=1,restore_best_weights=True) 91 | 92 | learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', 93 | patience=5, 94 | verbose=1, 95 | factor=0.2, 96 | min_lr=0.0001) 97 | 98 | callbacks = [earlystop,checkpoint,learning_rate_reduction] 99 | 100 | model.compile(loss='categorical_crossentropy', 101 | optimizer=Adam(lr=0.001), 102 | metrics=['accuracy'] 103 | ) 104 | 105 | nb_train_samples = 24176 106 | nb_validation_samples = 3006 107 | 108 | epochs = 25 109 | 110 | history = model.fit_generator( 111 | train_generator, 112 | steps_per_epoch=nb_train_samples//batch_size, 113 | epochs=epochs, 114 | callbacks=callbacks, 115 | validation_data=validation_generator, 116 | validation_steps=nb_validation_samples//batch_size) 117 | 118 | 119 | 120 | --------------------------------------------------------------------------------