├── .gitignore ├── __pycache__ └── helper_modules.cpython-35.pyc ├── .idea └── vcs.xml ├── README.md └── commence_training.py /.gitignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | Images/ 3 | saved_models/ 4 | trained_networks/ 5 | experiment_images/ 6 | nikhil-pair.pem 7 | .idea/ 8 | -------------------------------------------------------------------------------- /__pycache__/helper_modules.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikhil-salodkar/facial_expression/HEAD/__pycache__/helper_modules.cpython-35.pyc -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Project Overview 2 | 3 | This project aims to develop a model to detect Facial Expressions of human faces on any custom image or video. The final developed application would have the capability to detect faces 4 | in any custom images/video and comment on the facial expression on each of the faces. 5 | 6 | ## About the Dataset 7 | 8 | The model is being trained using the AffectNet Dataset which contains about 420k images manually annotated for the presence of of seven discrete facial expressions (Categorical Model) 9 | and the Intensity of Valence and Arousal (Dimensional Model). 10 | AffectNet provides : 11 | * Images of the faces 12 | * Location of the faces in the images 13 | * Location of 68 facial landmarks 14 | * Eleven emotion and non-emotion categories labels 15 | * Valence and arousal values of the facial expressions in continuous domain 16 | 17 | Emotion Categories: 18 | Eleven annotated emotions are provided for images and indexed as follows: 19 | 0: Neutral, 1: Happiness, 2: Sadness, 3: Surprise, 4: Fear, 5: Disgust, 6: Anger, 20 | 7: Contempt, 8: None, 9: Uncertain, 10: No-Face 21 | 22 | Detailed informaion regarding the Dataset can be found by visiting website [AffectNet Official Website](http://mohammadmahoor.com/affectnet/) 23 | 24 | 25 | -------------------------------------------------------------------------------- /commence_training.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.metrics import accuracy_score, f1_score, cohen_kappa_score, confusion_matrix 5 | import cv2 6 | import matplotlib.pyplot as plt 7 | from sys import getsizeof 8 | import os 9 | 10 | from keras import applications 11 | from keras.preprocessing.image import ImageDataGenerator 12 | from keras import optimizers 13 | from keras.models import Sequential, Model 14 | from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Conv2D, Activation, MaxPooling2D 15 | from keras import backend as k 16 | from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping, ReduceLROnPlateau 17 | from keras.applications.resnet50 import preprocess_input 18 | 19 | import helper_modules 20 | 21 | base_model = applications.ResNet50(include_top=False, weights='imagenet', input_tensor=None, input_shape=(200,200,3), pooling=None) 22 | 23 | print(base_model.summary()) 24 | 25 | for layer in base_model.layers[:-75]: 26 | layer.trainable = False 27 | 28 | x = base_model.output 29 | x = GlobalAveragePooling2D()(x) 30 | x = Dense(512,activation='relu')(x) 31 | x = Dense(8, activation='softmax')(x) 32 | model = Model(base_model.input, x) 33 | print("---------------------------------------------------------------------------") 34 | print(model.summary()) 35 | from keras import metrics 36 | def top_2_accuracy(y_true, y_pred): 37 | return metrics.top_k_categorical_accuracy(y_true, y_pred, k=2) 38 | adam = optimizers.Adam(lr=0.000024,beta_1=0.9, beta_2=0.999, epsilon=k.epsilon(), decay=0.0) 39 | reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor = 0.5,patience = 2, min_lr = 0.00001, verbose = 1) 40 | train_directory = "Images/train_images_cropped" 41 | t_size = (200, 200) 42 | b_size = 16 43 | test_directory = "Images/validation_images_cropped_downsampled" 44 | train_gen = ImageDataGenerator( 45 | preprocessing_function=preprocess_input, 46 | horizontal_flip = True 47 | ) 48 | 49 | test_gen = ImageDataGenerator( 50 | preprocessing_function=preprocess_input, 51 | horizontal_flip = True) 52 | 53 | train_generator = train_gen.flow_from_directory( 54 | train_directory, 55 | target_size = t_size, 56 | batch_size = b_size, 57 | class_mode = "categorical") 58 | 59 | validation_generator = test_gen.flow_from_directory( 60 | test_directory, 61 | target_size = t_size, 62 | batch_size = b_size, 63 | class_mode = "categorical") 64 | 65 | 66 | def weighted_categorical_loss(weights): 67 | weights = k.variable(weights) 68 | 69 | def loss(y_true, y_pred): 70 | # scale predictions so that the class probas of each sample sum to 1 71 | y_pred /= k.sum(y_pred, axis=-1, keepdims=True) 72 | # clip to prevent NaN's and Inf's 73 | y_pred = k.clip(y_pred, k.epsilon(), 1 - k.epsilon()) 74 | # calc 75 | loss = y_true * k.log(y_pred) * weights 76 | loss = -k.sum(loss, -1) 77 | return loss 78 | 79 | return loss 80 | 81 | weights = np.array([1.0,1.0,2.0,2.0,3.0,3.0,2.0,3.0]) 82 | 83 | model.compile(optimizer=adam, loss=weighted_categorical_loss(weights), metrics=['accuracy',top_2_accuracy]) 84 | 85 | checkpoint = ModelCheckpoint(filepath='saved_models/model.weights.best._new.{epoch:02d}-{val_acc:.2f}.hdf5', verbose=1, save_best_only=False) 86 | 87 | model.load_weights(filepath="saved_models/model.weights.best.down_sampled_0102.06-0.54.hdf5") 88 | 89 | history10 = model.fit_generator(train_generator, epochs=7, validation_data=validation_generator, verbose=2, callbacks=[checkpoint,reduce_lr], steps_per_epoch=200, validation_steps=12) 90 | 91 | 92 | 93 | --------------------------------------------------------------------------------