├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── REQUIREMENTS.txt
├── datasets
└── .gitignore
├── images
├── 12_angry_men.jpg
├── color_demo.gif
├── demo_results.png
├── emotion_classification.jpg
├── gradcam_results.png
├── robocup_team.png
├── solvay_conference.jpg
└── test_image.jpg
├── report.pdf
├── src
├── __init__.py
├── image_emotion_gender_demo.py
├── image_gradcam_demo.py
├── models
│ ├── __init__.py
│ └── cnn.py
├── train_emotion_classifier.py
├── train_gender_classifier.py
├── utils
│ ├── __init__.py
│ ├── data_augmentation.py
│ ├── datasets.py
│ ├── grad_cam.py
│ ├── inference.py
│ ├── preprocessor.py
│ └── visualizer.py
├── video_emotion_color_demo.py
├── video_emotion_gender_demo.py
├── video_gradcam_demo.py
└── web
│ ├── __init__.py
│ ├── emotion_gender_processor.py
│ └── faces.py
└── trained_models
├── detection_models
└── haarcascade_frontalface_default.xml
├── emotion_models
├── KDEF_emotion_training.log
├── fer2013_mini_XCEPTION.00-0.47.hdf5
├── fer2013_mini_XCEPTION.02-0.52.hdf5
├── fer2013_mini_XCEPTION.03-0.53.hdf5
├── fer2013_mini_XCEPTION.04-0.55.hdf5
├── fer2013_mini_XCEPTION.05-0.56.hdf5
├── fer2013_mini_XCEPTION.08-0.57.hdf5
├── fer2013_mini_XCEPTION.10-0.58.hdf5
├── fer2013_mini_XCEPTION.100-0.65.hdf5
├── fer2013_mini_XCEPTION.102-0.66.hdf5
├── fer2013_mini_XCEPTION.107-0.66.hdf5
├── fer2013_mini_XCEPTION.11-0.58.hdf5
├── fer2013_mini_XCEPTION.110-0.65.hdf5
├── fer2013_mini_XCEPTION.12-0.58.hdf5
├── fer2013_mini_XCEPTION.14-0.59.hdf5
├── fer2013_mini_XCEPTION.15-0.60.hdf5
├── fer2013_mini_XCEPTION.25-0.60.hdf5
├── fer2013_mini_XCEPTION.27-0.62.hdf5
├── fer2013_mini_XCEPTION.29-0.62.hdf5
├── fer2013_mini_XCEPTION.32-0.62.hdf5
├── fer2013_mini_XCEPTION.37-0.62.hdf5
├── fer2013_mini_XCEPTION.38-0.62.hdf5
├── fer2013_mini_XCEPTION.41-0.62.hdf5
├── fer2013_mini_XCEPTION.43-0.64.hdf5
├── fer2013_mini_XCEPTION.51-0.63.hdf5
├── fer2013_mini_XCEPTION.70-0.63.hdf5
├── fer2013_mini_XCEPTION.97-0.65.hdf5
├── fer2013_mini_XCEPTION.99-0.65.hdf5
├── mini_XCEPTION_KDEF.hdf5
├── simple_CNN.530-0.65.hdf5
├── simple_CNN.985-0.66.hdf5
└── tiny_XCEPTION_KDEF.hdf5
├── fer2013_big_XCEPTION.54-0.66.hdf5
├── fer2013_mini_XCEPTION.119-0.65.hdf5
└── gender_models
├── gender_mini_XCEPTION.21-0.95.hdf5
└── simple_CNN.81-0.96.hdf5
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | src/visualizations.py
3 | *.pkl
4 | src/utils/get_gender_faces.py
5 | src/train_emotion_classifier_transfer_learning.py
6 | *.log
7 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:latest
2 |
3 | RUN apt-get -y update && apt-get install -y git python3-pip python3-dev python3-tk vim procps curl
4 |
5 | #Face classificarion dependencies & web application
6 | RUN pip3 install numpy scipy scikit-learn pillow tensorflow pandas h5py opencv-python==3.2.0.8 keras statistics pyyaml pyparsing cycler matplotlib Flask
7 |
8 | ADD . /ekholabs/face-classifier
9 |
10 | WORKDIR ekholabs/face-classifier
11 |
12 | ENV PYTHONPATH=$PYTHONPATH:src
13 | ENV FACE_CLASSIFIER_PORT=8084
14 | EXPOSE $FACE_CLASSIFIER_PORT
15 |
16 | ENTRYPOINT ["python3"]
17 | CMD ["src/web/faces.py"]
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Octavio
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # This repository is deprecated for at TF-2.0 rewrite visit:
2 | # https://github.com/oarriaga/paz
3 | ------------------------------------------------
4 | # Face classification and detection.
5 | Real-time face detection and emotion/gender classification using fer2013/IMDB datasets with a keras CNN model and openCV.
6 | * IMDB gender classification test accuracy: 96%.
7 | * fer2013 emotion classification test accuracy: 66%.
8 |
9 | For more information please consult the [publication](https://github.com/oarriaga/face_classification/blob/master/report.pdf)
10 |
11 | # Emotion/gender examples:
12 |
13 | 
14 |
15 | Guided back-prop
16 | 
17 |
18 | Real-time demo:
19 |
20 |

21 |
22 |
23 | [B-IT-BOTS](https://mas-group.inf.h-brs.de/?page_id=622) robotics team :)
24 | 
25 |
26 | ## Instructions
27 |
28 | ### Run real-time emotion demo:
29 | > python3 video_emotion_color_demo.py
30 |
31 | ### Run real-time guided back-prop demo:
32 | > python3 image_gradcam_demo.py
33 |
34 | ### Make inference on single images:
35 | > python3 image_emotion_gender_demo.py
36 |
37 | e.g.
38 |
39 | > python3 image_emotion_gender_demo.py ../images/test_image.jpg
40 |
41 | ### Running with Docker
42 |
43 | With a few steps one can get its own face classification and detection running. Follow the commands below:
44 |
45 | * ```docker pull ekholabs/face-classifier```
46 | * ```docker run -d -p 8084:8084 --name=face-classifier ekholabs/face-classifier```
47 | * ```curl -v -F image=@[path_to_image] http://localhost:8084/classifyImage > image.png```
48 |
49 | ### To train previous/new models for emotion classification:
50 |
51 |
52 | * Download the fer2013.tar.gz file from [here](https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge/data)
53 |
54 | * Move the downloaded file to the datasets directory inside this repository.
55 |
56 | * Untar the file:
57 | > tar -xzf fer2013.tar
58 |
59 | * Run the train_emotion_classification.py file
60 | > python3 train_emotion_classifier.py
61 |
62 | ### To train previous/new models for gender classification:
63 |
64 | * Download the imdb_crop.tar file from [here](https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/) (It's the 7GB button with the tittle Download faces only).
65 |
66 | * Move the downloaded file to the datasets directory inside this repository.
67 |
68 | * Untar the file:
69 | > tar -xfv imdb_crop.tar
70 |
71 | * Run the train_gender_classification.py file
72 | > python3 train_gender_classifier.py
73 |
74 |
--------------------------------------------------------------------------------
/REQUIREMENTS.txt:
--------------------------------------------------------------------------------
1 | keras==2.0.5
2 | tensorflow==1.1.0
3 | pandas==0.19.1
4 | numpy==1.12.1
5 | h5py==2.7.0
6 | statistics
7 | opencv-python==3.2.0
8 |
--------------------------------------------------------------------------------
/datasets/.gitignore:
--------------------------------------------------------------------------------
1 | gnore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/images/12_angry_men.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/12_angry_men.jpg
--------------------------------------------------------------------------------
/images/color_demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/color_demo.gif
--------------------------------------------------------------------------------
/images/demo_results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/demo_results.png
--------------------------------------------------------------------------------
/images/emotion_classification.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/emotion_classification.jpg
--------------------------------------------------------------------------------
/images/gradcam_results.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/gradcam_results.png
--------------------------------------------------------------------------------
/images/robocup_team.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/robocup_team.png
--------------------------------------------------------------------------------
/images/solvay_conference.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/solvay_conference.jpg
--------------------------------------------------------------------------------
/images/test_image.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/images/test_image.jpg
--------------------------------------------------------------------------------
/report.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/report.pdf
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/src/__init__.py
--------------------------------------------------------------------------------
/src/image_emotion_gender_demo.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import cv2
4 | from keras.models import load_model
5 | import numpy as np
6 |
7 | from utils.datasets import get_labels
8 | from utils.inference import detect_faces
9 | from utils.inference import draw_text
10 | from utils.inference import draw_bounding_box
11 | from utils.inference import apply_offsets
12 | from utils.inference import load_detection_model
13 | from utils.inference import load_image
14 | from utils.preprocessor import preprocess_input
15 |
16 | # parameters for loading data and images
17 | image_path = sys.argv[1]
18 | detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
19 | emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
20 | gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
21 | emotion_labels = get_labels('fer2013')
22 | gender_labels = get_labels('imdb')
23 | font = cv2.FONT_HERSHEY_SIMPLEX
24 |
25 | # hyper-parameters for bounding boxes shape
26 | gender_offsets = (30, 60)
27 | gender_offsets = (10, 10)
28 | emotion_offsets = (20, 40)
29 | emotion_offsets = (0, 0)
30 |
31 | # loading models
32 | face_detection = load_detection_model(detection_model_path)
33 | emotion_classifier = load_model(emotion_model_path, compile=False)
34 | gender_classifier = load_model(gender_model_path, compile=False)
35 |
36 | # getting input model shapes for inference
37 | emotion_target_size = emotion_classifier.input_shape[1:3]
38 | gender_target_size = gender_classifier.input_shape[1:3]
39 |
40 | # loading images
41 | rgb_image = load_image(image_path, grayscale=False)
42 | gray_image = load_image(image_path, grayscale=True)
43 | gray_image = np.squeeze(gray_image)
44 | gray_image = gray_image.astype('uint8')
45 |
46 | faces = detect_faces(face_detection, gray_image)
47 | for face_coordinates in faces:
48 | x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
49 | rgb_face = rgb_image[y1:y2, x1:x2]
50 |
51 | x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
52 | gray_face = gray_image[y1:y2, x1:x2]
53 |
54 | try:
55 | rgb_face = cv2.resize(rgb_face, (gender_target_size))
56 | gray_face = cv2.resize(gray_face, (emotion_target_size))
57 | except:
58 | continue
59 |
60 | rgb_face = preprocess_input(rgb_face, False)
61 | rgb_face = np.expand_dims(rgb_face, 0)
62 | gender_prediction = gender_classifier.predict(rgb_face)
63 | gender_label_arg = np.argmax(gender_prediction)
64 | gender_text = gender_labels[gender_label_arg]
65 |
66 | gray_face = preprocess_input(gray_face, True)
67 | gray_face = np.expand_dims(gray_face, 0)
68 | gray_face = np.expand_dims(gray_face, -1)
69 | emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
70 | emotion_text = emotion_labels[emotion_label_arg]
71 |
72 | if gender_text == gender_labels[0]:
73 | color = (0, 0, 255)
74 | else:
75 | color = (255, 0, 0)
76 |
77 | draw_bounding_box(face_coordinates, rgb_image, color)
78 | draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
79 | draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
80 |
81 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
82 | cv2.imwrite('../images/predicted_test_image.png', bgr_image)
83 |
--------------------------------------------------------------------------------
/src/image_gradcam_demo.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import cv2
4 | import numpy as np
5 | from keras.models import load_model
6 |
7 | from utils.grad_cam import compile_gradient_function
8 | from utils.grad_cam import compile_saliency_function
9 | from utils.grad_cam import register_gradient
10 | from utils.grad_cam import modify_backprop
11 | from utils.grad_cam import calculate_guided_gradient_CAM
12 | from utils.datasets import get_labels
13 | from utils.inference import detect_faces
14 | from utils.inference import apply_offsets
15 | from utils.inference import load_detection_model
16 | from utils.preprocessor import preprocess_input
17 | from utils.inference import draw_bounding_box
18 | from utils.inference import load_image
19 |
20 |
21 | # parameters
22 | image_path = sys.argv[1]
23 | # task = sys.argv[2]
24 | task = 'emotion'
25 | if task == 'emotion':
26 | labels = get_labels('fer2013')
27 | offsets = (0, 0)
28 | # model_filename = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'
29 | model_filename = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
30 | elif task == 'gender':
31 | labels = get_labels('imdb')
32 | offsets = (30, 60)
33 | model_filename = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'
34 |
35 | color = (0, 255, 0)
36 |
37 | # loading models
38 | detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
39 | model = load_model(model_filename, compile=False)
40 | target_size = model.input_shape[1:3]
41 | face_detection = load_detection_model(detection_model_path)
42 |
43 | # loading images
44 | rgb_image = load_image(image_path, grayscale=False)
45 | gray_image = load_image(image_path, grayscale=True)
46 | gray_image = np.squeeze(gray_image)
47 | gray_image = gray_image.astype('uint8')
48 | faces = detect_faces(face_detection, gray_image)
49 |
50 | # start prediction for every image
51 | for face_coordinates in faces:
52 |
53 | x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
54 | rgb_face = rgb_image[y1:y2, x1:x2]
55 |
56 | x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
57 | gray_face = gray_image[y1:y2, x1:x2]
58 |
59 | # processing input
60 | try:
61 | gray_face = cv2.resize(gray_face, (target_size))
62 | except:
63 | continue
64 | gray_face = preprocess_input(gray_face, True)
65 | gray_face = np.expand_dims(gray_face, 0)
66 | gray_face = np.expand_dims(gray_face, -1)
67 |
68 | # prediction
69 | predicted_class = np.argmax(model.predict(gray_face))
70 | label_text = labels[predicted_class]
71 |
72 | gradient_function = compile_gradient_function(model,
73 | predicted_class, 'conv2d_7')
74 | register_gradient()
75 | guided_model = modify_backprop(model, 'GuidedBackProp', task)
76 | saliency_function = compile_saliency_function(guided_model, 'conv2d_7')
77 |
78 | guided_gradCAM = calculate_guided_gradient_CAM(gray_face,
79 | gradient_function, saliency_function)
80 | guided_gradCAM = cv2.resize(guided_gradCAM, (x2-x1, y2-y1))
81 | rgb_guided_gradCAM = np.repeat(guided_gradCAM[:, :, np.newaxis], 3, axis=2)
82 | rgb_image[y1:y2, x1:x2, :] = rgb_guided_gradCAM
83 | draw_bounding_box((x1, y1, x2 - x1, y2 - y1), rgb_image, color)
84 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
85 | cv2.imwrite('../images/guided_gradCAM.png', bgr_image)
86 |
--------------------------------------------------------------------------------
/src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/src/models/__init__.py
--------------------------------------------------------------------------------
/src/models/cnn.py:
--------------------------------------------------------------------------------
1 | from keras.layers import Activation, Convolution2D, Dropout, Conv2D
2 | from keras.layers import AveragePooling2D, BatchNormalization
3 | from keras.layers import GlobalAveragePooling2D
4 | from keras.models import Sequential
5 | from keras.layers import Flatten
6 | from keras.models import Model
7 | from keras.layers import Input
8 | from keras.layers import MaxPooling2D
9 | from keras.layers import SeparableConv2D
10 | from keras import layers
11 | from keras.regularizers import l2
12 |
13 |
14 | def simple_CNN(input_shape, num_classes):
15 |
16 | model = Sequential()
17 | model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same',
18 | name='image_array', input_shape=input_shape))
19 | model.add(BatchNormalization())
20 | model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same'))
21 | model.add(BatchNormalization())
22 | model.add(Activation('relu'))
23 | model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
24 | model.add(Dropout(.5))
25 |
26 | model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
27 | model.add(BatchNormalization())
28 | model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
29 | model.add(BatchNormalization())
30 | model.add(Activation('relu'))
31 | model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
32 | model.add(Dropout(.5))
33 |
34 | model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
35 | model.add(BatchNormalization())
36 | model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
37 | model.add(BatchNormalization())
38 | model.add(Activation('relu'))
39 | model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
40 | model.add(Dropout(.5))
41 |
42 | model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
43 | model.add(BatchNormalization())
44 | model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
45 | model.add(BatchNormalization())
46 | model.add(Activation('relu'))
47 | model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
48 | model.add(Dropout(.5))
49 |
50 | model.add(Convolution2D(filters=256, kernel_size=(3, 3), padding='same'))
51 | model.add(BatchNormalization())
52 | model.add(Convolution2D(
53 | filters=num_classes, kernel_size=(3, 3), padding='same'))
54 | model.add(GlobalAveragePooling2D())
55 | model.add(Activation('softmax', name='predictions'))
56 | return model
57 |
58 |
59 | def simpler_CNN(input_shape, num_classes):
60 |
61 | model = Sequential()
62 | model.add(Convolution2D(filters=16, kernel_size=(5, 5), padding='same',
63 | name='image_array', input_shape=input_shape))
64 | model.add(BatchNormalization())
65 | model.add(Convolution2D(filters=16, kernel_size=(5, 5),
66 | strides=(2, 2), padding='same'))
67 | model.add(BatchNormalization())
68 | model.add(Activation('relu'))
69 | model.add(Dropout(.25))
70 |
71 | model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
72 | model.add(BatchNormalization())
73 | model.add(Convolution2D(filters=32, kernel_size=(5, 5),
74 | strides=(2, 2), padding='same'))
75 | model.add(BatchNormalization())
76 | model.add(Activation('relu'))
77 | model.add(Dropout(.25))
78 |
79 | model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
80 | model.add(BatchNormalization())
81 | model.add(Convolution2D(filters=64, kernel_size=(3, 3),
82 | strides=(2, 2), padding='same'))
83 | model.add(BatchNormalization())
84 | model.add(Activation('relu'))
85 | model.add(Dropout(.25))
86 |
87 | model.add(Convolution2D(filters=64, kernel_size=(1, 1), padding='same'))
88 | model.add(BatchNormalization())
89 | model.add(Convolution2D(filters=128, kernel_size=(3, 3),
90 | strides=(2, 2), padding='same'))
91 | model.add(BatchNormalization())
92 | model.add(Activation('relu'))
93 | model.add(Dropout(.25))
94 |
95 | model.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))
96 | model.add(BatchNormalization())
97 | model.add(Convolution2D(filters=128, kernel_size=(3, 3),
98 | strides=(2, 2), padding='same'))
99 |
100 | model.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))
101 | model.add(BatchNormalization())
102 | model.add(Convolution2D(filters=num_classes, kernel_size=(3, 3),
103 | strides=(2, 2), padding='same'))
104 |
105 | model.add(Flatten())
106 | # model.add(GlobalAveragePooling2D())
107 | model.add(Activation('softmax', name='predictions'))
108 | return model
109 |
110 |
111 | def tiny_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
112 | regularization = l2(l2_regularization)
113 |
114 | # base
115 | img_input = Input(input_shape)
116 | x = Conv2D(5, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
117 | use_bias=False)(img_input)
118 | x = BatchNormalization()(x)
119 | x = Activation('relu')(x)
120 | x = Conv2D(5, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
121 | use_bias=False)(x)
122 | x = BatchNormalization()(x)
123 | x = Activation('relu')(x)
124 |
125 | # module 1
126 | residual = Conv2D(8, (1, 1), strides=(2, 2),
127 | padding='same', use_bias=False)(x)
128 | residual = BatchNormalization()(residual)
129 |
130 | x = SeparableConv2D(8, (3, 3), padding='same',
131 | kernel_regularizer=regularization,
132 | use_bias=False)(x)
133 | x = BatchNormalization()(x)
134 | x = Activation('relu')(x)
135 | x = SeparableConv2D(8, (3, 3), padding='same',
136 | kernel_regularizer=regularization,
137 | use_bias=False)(x)
138 | x = BatchNormalization()(x)
139 |
140 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
141 | x = layers.add([x, residual])
142 |
143 | # module 2
144 | residual = Conv2D(16, (1, 1), strides=(2, 2),
145 | padding='same', use_bias=False)(x)
146 | residual = BatchNormalization()(residual)
147 |
148 | x = SeparableConv2D(16, (3, 3), padding='same',
149 | kernel_regularizer=regularization,
150 | use_bias=False)(x)
151 | x = BatchNormalization()(x)
152 | x = Activation('relu')(x)
153 | x = SeparableConv2D(16, (3, 3), padding='same',
154 | kernel_regularizer=regularization,
155 | use_bias=False)(x)
156 | x = BatchNormalization()(x)
157 |
158 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
159 | x = layers.add([x, residual])
160 |
161 | # module 3
162 | residual = Conv2D(32, (1, 1), strides=(2, 2),
163 | padding='same', use_bias=False)(x)
164 | residual = BatchNormalization()(residual)
165 |
166 | x = SeparableConv2D(32, (3, 3), padding='same',
167 | kernel_regularizer=regularization,
168 | use_bias=False)(x)
169 | x = BatchNormalization()(x)
170 | x = Activation('relu')(x)
171 | x = SeparableConv2D(32, (3, 3), padding='same',
172 | kernel_regularizer=regularization,
173 | use_bias=False)(x)
174 | x = BatchNormalization()(x)
175 |
176 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
177 | x = layers.add([x, residual])
178 |
179 | # module 4
180 | residual = Conv2D(64, (1, 1), strides=(2, 2),
181 | padding='same', use_bias=False)(x)
182 | residual = BatchNormalization()(residual)
183 |
184 | x = SeparableConv2D(64, (3, 3), padding='same',
185 | kernel_regularizer=regularization,
186 | use_bias=False)(x)
187 | x = BatchNormalization()(x)
188 | x = Activation('relu')(x)
189 | x = SeparableConv2D(64, (3, 3), padding='same',
190 | kernel_regularizer=regularization,
191 | use_bias=False)(x)
192 | x = BatchNormalization()(x)
193 |
194 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
195 | x = layers.add([x, residual])
196 |
197 | x = Conv2D(num_classes, (3, 3),
198 | # kernel_regularizer=regularization,
199 | padding='same')(x)
200 | x = GlobalAveragePooling2D()(x)
201 | output = Activation('softmax', name='predictions')(x)
202 |
203 | model = Model(img_input, output)
204 | return model
205 |
206 |
207 | def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
208 | regularization = l2(l2_regularization)
209 |
210 | # base
211 | img_input = Input(input_shape)
212 | x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
213 | use_bias=False)(img_input)
214 | x = BatchNormalization()(x)
215 | x = Activation('relu')(x)
216 | x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
217 | use_bias=False)(x)
218 | x = BatchNormalization()(x)
219 | x = Activation('relu')(x)
220 |
221 | # module 1
222 | residual = Conv2D(16, (1, 1), strides=(2, 2),
223 | padding='same', use_bias=False)(x)
224 | residual = BatchNormalization()(residual)
225 |
226 | x = SeparableConv2D(16, (3, 3), padding='same',
227 | kernel_regularizer=regularization,
228 | use_bias=False)(x)
229 | x = BatchNormalization()(x)
230 | x = Activation('relu')(x)
231 | x = SeparableConv2D(16, (3, 3), padding='same',
232 | kernel_regularizer=regularization,
233 | use_bias=False)(x)
234 | x = BatchNormalization()(x)
235 |
236 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
237 | x = layers.add([x, residual])
238 |
239 | # module 2
240 | residual = Conv2D(32, (1, 1), strides=(2, 2),
241 | padding='same', use_bias=False)(x)
242 | residual = BatchNormalization()(residual)
243 |
244 | x = SeparableConv2D(32, (3, 3), padding='same',
245 | kernel_regularizer=regularization,
246 | use_bias=False)(x)
247 | x = BatchNormalization()(x)
248 | x = Activation('relu')(x)
249 | x = SeparableConv2D(32, (3, 3), padding='same',
250 | kernel_regularizer=regularization,
251 | use_bias=False)(x)
252 | x = BatchNormalization()(x)
253 |
254 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
255 | x = layers.add([x, residual])
256 |
257 | # module 3
258 | residual = Conv2D(64, (1, 1), strides=(2, 2),
259 | padding='same', use_bias=False)(x)
260 | residual = BatchNormalization()(residual)
261 |
262 | x = SeparableConv2D(64, (3, 3), padding='same',
263 | kernel_regularizer=regularization,
264 | use_bias=False)(x)
265 | x = BatchNormalization()(x)
266 | x = Activation('relu')(x)
267 | x = SeparableConv2D(64, (3, 3), padding='same',
268 | kernel_regularizer=regularization,
269 | use_bias=False)(x)
270 | x = BatchNormalization()(x)
271 |
272 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
273 | x = layers.add([x, residual])
274 |
275 | # module 4
276 | residual = Conv2D(128, (1, 1), strides=(2, 2),
277 | padding='same', use_bias=False)(x)
278 | residual = BatchNormalization()(residual)
279 |
280 | x = SeparableConv2D(128, (3, 3), padding='same',
281 | kernel_regularizer=regularization,
282 | use_bias=False)(x)
283 | x = BatchNormalization()(x)
284 | x = Activation('relu')(x)
285 | x = SeparableConv2D(128, (3, 3), padding='same',
286 | kernel_regularizer=regularization,
287 | use_bias=False)(x)
288 | x = BatchNormalization()(x)
289 |
290 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
291 | x = layers.add([x, residual])
292 |
293 | x = Conv2D(num_classes, (3, 3),
294 | # kernel_regularizer=regularization,
295 | padding='same')(x)
296 | x = GlobalAveragePooling2D()(x)
297 | output = Activation('softmax', name='predictions')(x)
298 |
299 | model = Model(img_input, output)
300 | return model
301 |
302 |
303 | def big_XCEPTION(input_shape, num_classes):
304 | img_input = Input(input_shape)
305 | x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
306 | x = BatchNormalization(name='block1_conv1_bn')(x)
307 | x = Activation('relu', name='block1_conv1_act')(x)
308 | x = Conv2D(64, (3, 3), use_bias=False)(x)
309 | x = BatchNormalization(name='block1_conv2_bn')(x)
310 | x = Activation('relu', name='block1_conv2_act')(x)
311 |
312 | residual = Conv2D(128, (1, 1), strides=(2, 2),
313 | padding='same', use_bias=False)(x)
314 | residual = BatchNormalization()(residual)
315 |
316 | x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
317 | x = BatchNormalization(name='block2_sepconv1_bn')(x)
318 | x = Activation('relu', name='block2_sepconv2_act')(x)
319 | x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
320 | x = BatchNormalization(name='block2_sepconv2_bn')(x)
321 |
322 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
323 | x = layers.add([x, residual])
324 |
325 | residual = Conv2D(256, (1, 1), strides=(2, 2),
326 | padding='same', use_bias=False)(x)
327 | residual = BatchNormalization()(residual)
328 |
329 | x = Activation('relu', name='block3_sepconv1_act')(x)
330 | x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
331 | x = BatchNormalization(name='block3_sepconv1_bn')(x)
332 | x = Activation('relu', name='block3_sepconv2_act')(x)
333 | x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
334 | x = BatchNormalization(name='block3_sepconv2_bn')(x)
335 |
336 | x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
337 | x = layers.add([x, residual])
338 | x = Conv2D(num_classes, (3, 3),
339 | # kernel_regularizer=regularization,
340 | padding='same')(x)
341 | x = GlobalAveragePooling2D()(x)
342 | output = Activation('softmax', name='predictions')(x)
343 |
344 | model = Model(img_input, output)
345 | return model
346 |
347 |
348 | if __name__ == "__main__":
349 | input_shape = (64, 64, 1)
350 | num_classes = 7
351 | # model = tiny_XCEPTION(input_shape, num_classes)
352 | # model.summary()
353 | # model = mini_XCEPTION(input_shape, num_classes)
354 | # model.summary()
355 | # model = big_XCEPTION(input_shape, num_classes)
356 | # model.summary()
357 | model = simple_CNN((48, 48, 1), num_classes)
358 | model.summary()
359 |
--------------------------------------------------------------------------------
/src/train_emotion_classifier.py:
--------------------------------------------------------------------------------
1 | """
2 | File: train_emotion_classifier.py
3 | Author: Octavio Arriaga
4 | Email: arriaga.camargo@gmail.com
5 | Github: https://github.com/oarriaga
6 | Description: Train emotion classification model
7 | """
8 |
9 | from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
10 | from keras.callbacks import ReduceLROnPlateau
11 | from keras.preprocessing.image import ImageDataGenerator
12 |
13 | from models.cnn import mini_XCEPTION
14 | from utils.datasets import DataManager
15 | from utils.datasets import split_data
16 | from utils.preprocessor import preprocess_input
17 |
18 | # parameters
19 | batch_size = 32
20 | num_epochs = 10000
21 | input_shape = (64, 64, 1)
22 | validation_split = .2
23 | verbose = 1
24 | num_classes = 7
25 | patience = 50
26 | base_path = '../trained_models/emotion_models/'
27 |
28 | # data generator
29 | data_generator = ImageDataGenerator(
30 | featurewise_center=False,
31 | featurewise_std_normalization=False,
32 | rotation_range=10,
33 | width_shift_range=0.1,
34 | height_shift_range=0.1,
35 | zoom_range=.1,
36 | horizontal_flip=True)
37 |
38 | # model parameters/compilation
39 | model = mini_XCEPTION(input_shape, num_classes)
40 | model.compile(optimizer='adam', loss='categorical_crossentropy',
41 | metrics=['accuracy'])
42 | model.summary()
43 |
44 |
45 | datasets = ['fer2013']
46 | for dataset_name in datasets:
47 | print('Training dataset:', dataset_name)
48 |
49 | # callbacks
50 | log_file_path = base_path + dataset_name + '_emotion_training.log'
51 | csv_logger = CSVLogger(log_file_path, append=False)
52 | early_stop = EarlyStopping('val_loss', patience=patience)
53 | reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
54 | patience=int(patience/4), verbose=1)
55 | trained_models_path = base_path + dataset_name + '_mini_XCEPTION'
56 | model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
57 | model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,
58 | save_best_only=True)
59 | callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]
60 |
61 | # loading dataset
62 | data_loader = DataManager(dataset_name, image_size=input_shape[:2])
63 | faces, emotions = data_loader.get_data()
64 | faces = preprocess_input(faces)
65 | num_samples, num_classes = emotions.shape
66 | train_data, val_data = split_data(faces, emotions, validation_split)
67 | train_faces, train_emotions = train_data
68 | model.fit_generator(data_generator.flow(train_faces, train_emotions,
69 | batch_size),
70 | steps_per_epoch=len(train_faces) / batch_size,
71 | epochs=num_epochs, verbose=1, callbacks=callbacks,
72 | validation_data=val_data)
73 |
--------------------------------------------------------------------------------
/src/train_gender_classifier.py:
--------------------------------------------------------------------------------
1 | """
2 | File: train_gender_classifier.py
3 | Author: Octavio Arriaga
4 | Email: arriaga.camargo@gmail.com
5 | Github: https://github.com/oarriaga
6 | Description: Train gender classification model
7 | """
8 |
9 | from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
10 | from keras.callbacks import ReduceLROnPlateau
11 | from utils.datasets import DataManager
12 | from models.cnn import mini_XCEPTION
13 | from utils.data_augmentation import ImageGenerator
14 | from utils.datasets import split_imdb_data
15 |
16 | # parameters
17 | batch_size = 32
18 | num_epochs = 1000
19 | validation_split = .2
20 | do_random_crop = False
21 | patience = 100
22 | num_classes = 2
23 | dataset_name = 'imdb'
24 | input_shape = (64, 64, 1)
25 | if input_shape[2] == 1:
26 | grayscale = True
27 | images_path = '../datasets/imdb_crop/'
28 | log_file_path = '../trained_models/gender_models/gender_training.log'
29 | trained_models_path = '../trained_models/gender_models/gender_mini_XCEPTION'
30 |
31 | # data loader
32 | data_loader = DataManager(dataset_name)
33 | ground_truth_data = data_loader.get_data()
34 | train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split)
35 | print('Number of training samples:', len(train_keys))
36 | print('Number of validation samples:', len(val_keys))
37 | image_generator = ImageGenerator(ground_truth_data, batch_size,
38 | input_shape[:2],
39 | train_keys, val_keys, None,
40 | path_prefix=images_path,
41 | vertical_flip_probability=0,
42 | grayscale=grayscale,
43 | do_random_crop=do_random_crop)
44 |
45 | # model parameters/compilation
46 | model = mini_XCEPTION(input_shape, num_classes)
47 | model.compile(optimizer='adam',
48 | loss='categorical_crossentropy',
49 | metrics=['accuracy'])
50 | model.summary()
51 |
52 | # model callbacks
53 | early_stop = EarlyStopping('val_loss', patience=patience)
54 | reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
55 | patience=int(patience/2), verbose=1)
56 | csv_logger = CSVLogger(log_file_path, append=False)
57 | model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
58 | model_checkpoint = ModelCheckpoint(model_names,
59 | monitor='val_loss',
60 | verbose=1,
61 | save_best_only=True,
62 | save_weights_only=False)
63 | callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]
64 |
65 | # training model
66 | model.fit_generator(image_generator.flow(mode='train'),
67 | steps_per_epoch=int(len(train_keys) / batch_size),
68 | epochs=num_epochs, verbose=1,
69 | callbacks=callbacks,
70 | validation_data=image_generator.flow('val'),
71 | validation_steps=int(len(val_keys) / batch_size))
72 |
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/src/utils/__init__.py
--------------------------------------------------------------------------------
/src/utils/data_augmentation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from random import shuffle
3 | from .preprocessor import preprocess_input
4 | from .preprocessor import _imread as imread
5 | from .preprocessor import _imresize as imresize
6 | from .preprocessor import to_categorical
7 | import scipy.ndimage as ndi
8 | import cv2
9 |
10 |
11 | class ImageGenerator(object):
12 | """ Image generator with saturation, brightness, lighting, contrast,
13 | horizontal flip and vertical flip transformations. It supports
14 | bounding boxes coordinates.
15 |
16 | TODO:
17 | - Finish support for not using bounding_boxes
18 | - Random crop
19 | - Test other transformations
20 | """
21 | def __init__(self, ground_truth_data, batch_size, image_size,
22 | train_keys, validation_keys,
23 | ground_truth_transformer=None,
24 | path_prefix=None,
25 | saturation_var=0.5,
26 | brightness_var=0.5,
27 | contrast_var=0.5,
28 | lighting_std=0.5,
29 | horizontal_flip_probability=0.5,
30 | vertical_flip_probability=0.5,
31 | do_random_crop=False,
32 | grayscale=False,
33 | zoom_range=[0.75, 1.25],
34 | translation_factor=.3):
35 |
36 | self.ground_truth_data = ground_truth_data
37 | self.ground_truth_transformer = ground_truth_transformer
38 | self.batch_size = batch_size
39 | self.path_prefix = path_prefix
40 | self.train_keys = train_keys
41 | self.validation_keys = validation_keys
42 | self.image_size = image_size
43 | self.grayscale = grayscale
44 | self.color_jitter = []
45 | if saturation_var:
46 | self.saturation_var = saturation_var
47 | self.color_jitter.append(self.saturation)
48 | if brightness_var:
49 | self.brightness_var = brightness_var
50 | self.color_jitter.append(self.brightness)
51 | if contrast_var:
52 | self.contrast_var = contrast_var
53 | self.color_jitter.append(self.contrast)
54 | self.lighting_std = lighting_std
55 | self.horizontal_flip_probability = horizontal_flip_probability
56 | self.vertical_flip_probability = vertical_flip_probability
57 | self.do_random_crop = do_random_crop
58 | self.zoom_range = zoom_range
59 | self.translation_factor = translation_factor
60 |
61 | def _do_random_crop(self, image_array):
62 | """IMPORTANT: random crop only works for classification since the
63 | current implementation does no transform bounding boxes"""
64 | height = image_array.shape[0]
65 | width = image_array.shape[1]
66 | x_offset = np.random.uniform(0, self.translation_factor * width)
67 | y_offset = np.random.uniform(0, self.translation_factor * height)
68 | offset = np.array([x_offset, y_offset])
69 | scale_factor = np.random.uniform(self.zoom_range[0],
70 | self.zoom_range[1])
71 | crop_matrix = np.array([[scale_factor, 0],
72 | [0, scale_factor]])
73 |
74 | image_array = np.rollaxis(image_array, axis=-1, start=0)
75 | image_channel = [ndi.interpolation.affine_transform(image_channel,
76 | crop_matrix, offset=offset, order=0, mode='nearest',
77 | cval=0.0) for image_channel in image_array]
78 |
79 | image_array = np.stack(image_channel, axis=0)
80 | image_array = np.rollaxis(image_array, 0, 3)
81 | return image_array
82 |
83 | def do_random_rotation(self, image_array):
84 | """IMPORTANT: random rotation only works for classification since the
85 | current implementation does no transform bounding boxes"""
86 | height = image_array.shape[0]
87 | width = image_array.shape[1]
88 | x_offset = np.random.uniform(0, self.translation_factor * width)
89 | y_offset = np.random.uniform(0, self.translation_factor * height)
90 | offset = np.array([x_offset, y_offset])
91 | scale_factor = np.random.uniform(self.zoom_range[0],
92 | self.zoom_range[1])
93 | crop_matrix = np.array([[scale_factor, 0],
94 | [0, scale_factor]])
95 |
96 | image_array = np.rollaxis(image_array, axis=-1, start=0)
97 | image_channel = [ndi.interpolation.affine_transform(image_channel,
98 | crop_matrix, offset=offset, order=0, mode='nearest',
99 | cval=0.0) for image_channel in image_array]
100 |
101 | image_array = np.stack(image_channel, axis=0)
102 | image_array = np.rollaxis(image_array, 0, 3)
103 | return image_array
104 |
105 | def _gray_scale(self, image_array):
106 | return image_array.dot([0.299, 0.587, 0.114])
107 |
108 | def saturation(self, image_array):
109 | gray_scale = self._gray_scale(image_array)
110 | alpha = 2.0 * np.random.random() * self.brightness_var
111 | alpha = alpha + 1 - self.saturation_var
112 | image_array = (alpha * image_array + (1 - alpha) *
113 | gray_scale[:, :, None])
114 | return np.clip(image_array, 0, 255)
115 |
116 | def brightness(self, image_array):
117 | alpha = 2 * np.random.random() * self.brightness_var
118 | alpha = alpha + 1 - self.saturation_var
119 | image_array = alpha * image_array
120 | return np.clip(image_array, 0, 255)
121 |
122 | def contrast(self, image_array):
123 | gray_scale = (self._gray_scale(image_array).mean() *
124 | np.ones_like(image_array))
125 | alpha = 2 * np.random.random() * self.contrast_var
126 | alpha = alpha + 1 - self.contrast_var
127 | image_array = image_array * alpha + (1 - alpha) * gray_scale
128 | return np.clip(image_array, 0, 255)
129 |
130 | def lighting(self, image_array):
131 | covariance_matrix = np.cov(image_array.reshape(-1, 3) /
132 | 255.0, rowvar=False)
133 | eigen_values, eigen_vectors = np.linalg.eigh(covariance_matrix)
134 | noise = np.random.randn(3) * self.lighting_std
135 | noise = eigen_vectors.dot(eigen_values * noise) * 255
136 | image_array = image_array + noise
137 | return np.clip(image_array, 0, 255)
138 |
139 | def horizontal_flip(self, image_array, box_corners=None):
140 | if np.random.random() < self.horizontal_flip_probability:
141 | image_array = image_array[:, ::-1]
142 | if box_corners is not None:
143 | box_corners[:, [0, 2]] = 1 - box_corners[:, [2, 0]]
144 | return image_array, box_corners
145 |
146 | def vertical_flip(self, image_array, box_corners=None):
147 | if (np.random.random() < self.vertical_flip_probability):
148 | image_array = image_array[::-1]
149 | if box_corners is not None:
150 | box_corners[:, [1, 3]] = 1 - box_corners[:, [3, 1]]
151 | return image_array, box_corners
152 |
153 | def transform(self, image_array, box_corners=None):
154 | shuffle(self.color_jitter)
155 | for jitter in self.color_jitter:
156 | image_array = jitter(image_array)
157 |
158 | if self.lighting_std:
159 | image_array = self.lighting(image_array)
160 |
161 | if self.horizontal_flip_probability > 0:
162 | image_array, box_corners = self.horizontal_flip(image_array,
163 | box_corners)
164 |
165 | if self.vertical_flip_probability > 0:
166 | image_array, box_corners = self.vertical_flip(image_array,
167 | box_corners)
168 | return image_array, box_corners
169 |
170 | def preprocess_images(self, image_array):
171 | return preprocess_input(image_array)
172 |
173 | def flow(self, mode='train'):
174 | while True:
175 | if mode == 'train':
176 | shuffle(self.train_keys)
177 | keys = self.train_keys
178 | elif mode == 'val' or mode == 'demo':
179 | shuffle(self.validation_keys)
180 | keys = self.validation_keys
181 | else:
182 | raise Exception('invalid mode: %s' % mode)
183 |
184 | inputs = []
185 | targets = []
186 | for key in keys:
187 | image_path = self.path_prefix + key
188 | image_array = imread(image_path)
189 | image_array = imresize(image_array, self.image_size)
190 |
191 | num_image_channels = len(image_array.shape)
192 | if num_image_channels != 3:
193 | continue
194 |
195 | ground_truth = self.ground_truth_data[key]
196 |
197 | if self.do_random_crop:
198 | image_array = self._do_random_crop(image_array)
199 |
200 | image_array = image_array.astype('float32')
201 | if mode == 'train' or mode == 'demo':
202 | if self.ground_truth_transformer is not None:
203 | image_array, ground_truth = self.transform(
204 | image_array,
205 | ground_truth)
206 | ground_truth = (
207 | self.ground_truth_transformer.assign_boxes(
208 | ground_truth))
209 | else:
210 | image_array = self.transform(image_array)[0]
211 |
212 | if self.grayscale:
213 | image_array = cv2.cvtColor(
214 | image_array.astype('uint8'),
215 | cv2.COLOR_RGB2GRAY).astype('float32')
216 | image_array = np.expand_dims(image_array, -1)
217 |
218 | inputs.append(image_array)
219 | targets.append(ground_truth)
220 | if len(targets) == self.batch_size:
221 | inputs = np.asarray(inputs)
222 | targets = np.asarray(targets)
223 | # this will not work for boxes
224 | targets = to_categorical(targets)
225 | if mode == 'train' or mode == 'val':
226 | inputs = self.preprocess_images(inputs)
227 | yield self._wrap_in_dictionary(inputs, targets)
228 | if mode == 'demo':
229 | yield self._wrap_in_dictionary(inputs, targets)
230 | inputs = []
231 | targets = []
232 |
233 | def _wrap_in_dictionary(self, image_array, targets):
234 | return [{'input_1': image_array},
235 | {'predictions': targets}]
236 |
--------------------------------------------------------------------------------
/src/utils/datasets.py:
--------------------------------------------------------------------------------
1 | from scipy.io import loadmat
2 | import pandas as pd
3 | import numpy as np
4 | from random import shuffle
5 | import os
6 | import cv2
7 |
8 |
9 | class DataManager(object):
10 | """Class for loading fer2013 emotion classification dataset or
11 | imdb gender classification dataset."""
12 | def __init__(self, dataset_name='imdb',
13 | dataset_path=None, image_size=(48, 48)):
14 |
15 | self.dataset_name = dataset_name
16 | self.dataset_path = dataset_path
17 | self.image_size = image_size
18 | if self.dataset_path is not None:
19 | self.dataset_path = dataset_path
20 | elif self.dataset_name == 'imdb':
21 | self.dataset_path = '../datasets/imdb_crop/imdb.mat'
22 | elif self.dataset_name == 'fer2013':
23 | self.dataset_path = '../datasets/fer2013/fer2013.csv'
24 | elif self.dataset_name == 'KDEF':
25 | self.dataset_path = '../datasets/KDEF/'
26 | else:
27 | raise Exception(
28 | 'Incorrect dataset name, please input imdb or fer2013')
29 |
30 | def get_data(self):
31 | if self.dataset_name == 'imdb':
32 | ground_truth_data = self._load_imdb()
33 | elif self.dataset_name == 'fer2013':
34 | ground_truth_data = self._load_fer2013()
35 | elif self.dataset_name == 'KDEF':
36 | ground_truth_data = self._load_KDEF()
37 | return ground_truth_data
38 |
39 | def _load_imdb(self):
40 | face_score_treshold = 3
41 | dataset = loadmat(self.dataset_path)
42 | image_names_array = dataset['imdb']['full_path'][0, 0][0]
43 | gender_classes = dataset['imdb']['gender'][0, 0][0]
44 | face_score = dataset['imdb']['face_score'][0, 0][0]
45 | second_face_score = dataset['imdb']['second_face_score'][0, 0][0]
46 | face_score_mask = face_score > face_score_treshold
47 | second_face_score_mask = np.isnan(second_face_score)
48 | unknown_gender_mask = np.logical_not(np.isnan(gender_classes))
49 | mask = np.logical_and(face_score_mask, second_face_score_mask)
50 | mask = np.logical_and(mask, unknown_gender_mask)
51 | image_names_array = image_names_array[mask]
52 | gender_classes = gender_classes[mask].tolist()
53 | image_names = []
54 | for image_name_arg in range(image_names_array.shape[0]):
55 | image_name = image_names_array[image_name_arg][0]
56 | image_names.append(image_name)
57 | return dict(zip(image_names, gender_classes))
58 |
59 | def _load_fer2013(self):
60 | data = pd.read_csv(self.dataset_path)
61 | pixels = data['pixels'].tolist()
62 | width, height = 48, 48
63 | faces = []
64 | for pixel_sequence in pixels:
65 | face = [int(pixel) for pixel in pixel_sequence.split(' ')]
66 | face = np.asarray(face).reshape(width, height)
67 | face = cv2.resize(face.astype('uint8'), self.image_size)
68 | faces.append(face.astype('float32'))
69 | faces = np.asarray(faces)
70 | faces = np.expand_dims(faces, -1)
71 | emotions = pd.get_dummies(data['emotion']).as_matrix()
72 | return faces, emotions
73 |
74 | def _load_KDEF(self):
75 | class_to_arg = get_class_to_arg(self.dataset_name)
76 | num_classes = len(class_to_arg)
77 |
78 | file_paths = []
79 | for folder, subfolders, filenames in os.walk(self.dataset_path):
80 | for filename in filenames:
81 | if filename.lower().endswith(('.jpg')):
82 | file_paths.append(os.path.join(folder, filename))
83 |
84 | num_faces = len(file_paths)
85 | y_size, x_size = self.image_size
86 | faces = np.zeros(shape=(num_faces, y_size, x_size))
87 | emotions = np.zeros(shape=(num_faces, num_classes))
88 | for file_arg, file_path in enumerate(file_paths):
89 | image_array = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
90 | image_array = cv2.resize(image_array, (y_size, x_size))
91 | faces[file_arg] = image_array
92 | file_basename = os.path.basename(file_path)
93 | file_emotion = file_basename[4:6]
94 | # there are two file names in the dataset
95 | # that don't match the given classes
96 | try:
97 | emotion_arg = class_to_arg[file_emotion]
98 | except:
99 | continue
100 | emotions[file_arg, emotion_arg] = 1
101 | faces = np.expand_dims(faces, -1)
102 | return faces, emotions
103 |
104 |
105 | def get_labels(dataset_name):
106 | if dataset_name == 'fer2013':
107 | return {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy',
108 | 4: 'sad', 5: 'surprise', 6: 'neutral'}
109 | elif dataset_name == 'imdb':
110 | return {0: 'woman', 1: 'man'}
111 | elif dataset_name == 'KDEF':
112 | return {0: 'AN', 1: 'DI', 2: 'AF', 3: 'HA', 4: 'SA', 5: 'SU', 6: 'NE'}
113 | else:
114 | raise Exception('Invalid dataset name')
115 |
116 |
117 | def get_class_to_arg(dataset_name='fer2013'):
118 | if dataset_name == 'fer2013':
119 | return {'angry': 0, 'disgust': 1, 'fear': 2, 'happy': 3, 'sad': 4,
120 | 'surprise': 5, 'neutral': 6}
121 | elif dataset_name == 'imdb':
122 | return {'woman': 0, 'man': 1}
123 | elif dataset_name == 'KDEF':
124 | return {'AN': 0, 'DI': 1, 'AF': 2, 'HA': 3, 'SA': 4, 'SU': 5, 'NE': 6}
125 | else:
126 | raise Exception('Invalid dataset name')
127 |
128 |
129 | def split_imdb_data(ground_truth_data, validation_split=.2, do_shuffle=False):
130 | ground_truth_keys = sorted(ground_truth_data.keys())
131 | if do_shuffle is not False:
132 | shuffle(ground_truth_keys)
133 | training_split = 1 - validation_split
134 | num_train = int(training_split * len(ground_truth_keys))
135 | train_keys = ground_truth_keys[:num_train]
136 | validation_keys = ground_truth_keys[num_train:]
137 | return train_keys, validation_keys
138 |
139 |
140 | def split_data(x, y, validation_split=.2):
141 | num_samples = len(x)
142 | num_train_samples = int((1 - validation_split)*num_samples)
143 | train_x = x[:num_train_samples]
144 | train_y = y[:num_train_samples]
145 | val_x = x[num_train_samples:]
146 | val_y = y[num_train_samples:]
147 | train_data = (train_x, train_y)
148 | val_data = (val_x, val_y)
149 | return train_data, val_data
150 |
--------------------------------------------------------------------------------
/src/utils/grad_cam.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import h5py
3 | import keras
4 | import keras.backend as K
5 | from keras.layers.core import Lambda
6 | from keras.models import Sequential
7 | from keras.models import load_model
8 | import numpy as np
9 | import tensorflow as tf
10 | from tensorflow.python.framework import ops
11 |
12 | from .preprocessor import preprocess_input
13 |
14 |
15 | def reset_optimizer_weights(model_filename):
16 | model = h5py.File(model_filename, 'r+')
17 | del model['optimizer_weights']
18 | model.close()
19 |
20 |
21 | def target_category_loss(x, category_index, num_classes):
22 | return tf.multiply(x, K.one_hot([category_index], num_classes))
23 |
24 |
25 | def target_category_loss_output_shape(input_shape):
26 | return input_shape
27 |
28 |
29 | def normalize(x):
30 | # utility function to normalize a tensor by its L2 norm
31 | return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
32 |
33 |
34 | def load_image(image_array):
35 | image_array = np.expand_dims(image_array, axis=0)
36 | image_array = preprocess_input(image_array)
37 | return image_array
38 |
39 |
40 | def register_gradient():
41 | if "GuidedBackProp" not in ops._gradient_registry._registry:
42 | @ops.RegisterGradient("GuidedBackProp")
43 | def _GuidedBackProp(op, gradient):
44 | dtype = op.inputs[0].dtype
45 | guided_gradient = (gradient * tf.cast(gradient > 0., dtype) *
46 | tf.cast(op.inputs[0] > 0., dtype))
47 | return guided_gradient
48 |
49 |
50 | def compile_saliency_function(model, activation_layer='conv2d_7'):
51 | input_image = model.input
52 | layer_output = model.get_layer(activation_layer).output
53 | max_output = K.max(layer_output, axis=3)
54 | saliency = K.gradients(K.sum(max_output), input_image)[0]
55 | return K.function([input_image, K.learning_phase()], [saliency])
56 |
57 |
58 | def modify_backprop(model, name, task):
59 | graph = tf.get_default_graph()
60 | with graph.gradient_override_map({'Relu': name}):
61 |
62 | # get layers that have an activation
63 | activation_layers = [layer for layer in model.layers
64 | if hasattr(layer, 'activation')]
65 |
66 | # replace relu activation
67 | for layer in activation_layers:
68 | if layer.activation == keras.activations.relu:
69 | layer.activation = tf.nn.relu
70 |
71 | # re-instanciate a new model
72 | if task == 'gender':
73 | model_path = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'
74 | elif task == 'emotion':
75 | model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
76 | # model_path = '../trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5'
77 | # model_path = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'
78 | new_model = load_model(model_path, compile=False)
79 | return new_model
80 |
81 |
82 | def deprocess_image(x):
83 | """ Same normalization as in:
84 | https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
85 | """
86 | if np.ndim(x) > 3:
87 | x = np.squeeze(x)
88 | # normalize tensor: center on 0., ensure std is 0.1
89 | x = x - x.mean()
90 | x = x / (x.std() + 1e-5)
91 | x = x * 0.1
92 |
93 | # clip to [0, 1]
94 | x = x + 0.5
95 | x = np.clip(x, 0, 1)
96 |
97 | # convert to RGB array
98 | x = x * 255
99 | if K.image_dim_ordering() == 'th':
100 | x = x.transpose((1, 2, 0))
101 | x = np.clip(x, 0, 255).astype('uint8')
102 | return x
103 |
104 |
105 | def compile_gradient_function(input_model, category_index, layer_name):
106 | model = Sequential()
107 | model.add(input_model)
108 |
109 | num_classes = model.output_shape[1]
110 | target_layer = lambda x: target_category_loss(x, category_index, num_classes)
111 | model.add(Lambda(target_layer,
112 | output_shape=target_category_loss_output_shape))
113 |
114 | loss = K.sum(model.layers[-1].output)
115 | conv_output = model.layers[0].get_layer(layer_name).output
116 | gradients = normalize(K.gradients(loss, conv_output)[0])
117 | gradient_function = K.function([model.layers[0].input, K.learning_phase()],
118 | [conv_output, gradients])
119 | return gradient_function
120 |
121 |
122 | def calculate_gradient_weighted_CAM(gradient_function, image):
123 | output, evaluated_gradients = gradient_function([image, False])
124 | output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
125 | weights = np.mean(evaluated_gradients, axis=(0, 1))
126 | CAM = np.ones(output.shape[0: 2], dtype=np.float32)
127 | for weight_arg, weight in enumerate(weights):
128 | CAM = CAM + (weight * output[:, :, weight_arg])
129 | CAM = cv2.resize(CAM, (64, 64))
130 | CAM = np.maximum(CAM, 0)
131 | heatmap = CAM / np.max(CAM)
132 |
133 | # Return to BGR [0..255] from the preprocessed image
134 | image = image[0, :]
135 | image = image - np.min(image)
136 | image = np.minimum(image, 255)
137 |
138 | CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
139 | CAM = np.float32(CAM) + np.float32(image)
140 | CAM = 255 * CAM / np.max(CAM)
141 | return np.uint8(CAM), heatmap
142 |
143 |
144 | def calculate_guided_gradient_CAM(
145 | preprocessed_input, gradient_function, saliency_function):
146 | CAM, heatmap = calculate_gradient_weighted_CAM(
147 | gradient_function, preprocessed_input)
148 | saliency = saliency_function([preprocessed_input, 0])
149 | # gradCAM = saliency[0] * heatmap[..., np.newaxis]
150 | # return deprocess_image(gradCAM)
151 | return deprocess_image(saliency[0])
152 | # return saliency[0]
153 |
154 |
155 | def calculate_guided_gradient_CAM_v2(
156 | preprocessed_input, gradient_function,
157 | saliency_function, target_size=(128, 128)):
158 | CAM, heatmap = calculate_gradient_weighted_CAM(
159 | gradient_function, preprocessed_input)
160 | heatmap = np.squeeze(heatmap)
161 | heatmap = cv2.resize(heatmap.astype('uint8'), target_size)
162 | saliency = saliency_function([preprocessed_input, 0])
163 | saliency = np.squeeze(saliency[0])
164 | saliency = cv2.resize(saliency.astype('uint8'), target_size)
165 | gradCAM = saliency * heatmap
166 | gradCAM = deprocess_image(gradCAM)
167 | return np.expand_dims(gradCAM, -1)
168 |
169 |
170 | if __name__ == '__main__':
171 | import pickle
172 | faces = pickle.load(open('faces.pkl', 'rb'))
173 | face = faces[0]
174 | model_filename = '../../trained_models/emotion_models/mini_XCEPTION.523-0.65.hdf5'
175 | # reset_optimizer_weights(model_filename)
176 | model = load_model(model_filename)
177 |
178 | preprocessed_input = load_image(face)
179 | predictions = model.predict(preprocessed_input)
180 | predicted_class = np.argmax(predictions)
181 | gradient_function = compile_gradient_function(
182 | model, predicted_class, 'conv2d_6')
183 | register_gradient()
184 | guided_model = modify_backprop(model, 'GuidedBackProp')
185 | saliency_function = compile_saliency_function(guided_model)
186 | guided_gradCAM = calculate_guided_gradient_CAM(
187 | preprocessed_input, gradient_function, saliency_function)
188 |
189 | cv2.imwrite('guided_gradCAM.jpg', guided_gradCAM)
190 |
191 |
192 |
--------------------------------------------------------------------------------
/src/utils/inference.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | from keras.preprocessing import image
5 |
6 | def load_image(image_path, grayscale=False, target_size=None):
7 | pil_image = image.load_img(image_path, grayscale, target_size)
8 | return image.img_to_array(pil_image)
9 |
10 | def load_detection_model(model_path):
11 | detection_model = cv2.CascadeClassifier(model_path)
12 | return detection_model
13 |
14 | def detect_faces(detection_model, gray_image_array):
15 | return detection_model.detectMultiScale(gray_image_array, 1.3, 5)
16 |
17 | def draw_bounding_box(face_coordinates, image_array, color):
18 | x, y, w, h = face_coordinates
19 | cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
20 |
21 | def apply_offsets(face_coordinates, offsets):
22 | x, y, width, height = face_coordinates
23 | x_off, y_off = offsets
24 | return (x - x_off, x + width + x_off, y - y_off, y + height + y_off)
25 |
26 | def draw_text(coordinates, image_array, text, color, x_offset=0, y_offset=0,
27 | font_scale=2, thickness=2):
28 | x, y = coordinates[:2]
29 | cv2.putText(image_array, text, (x + x_offset, y + y_offset),
30 | cv2.FONT_HERSHEY_SIMPLEX,
31 | font_scale, color, thickness, cv2.LINE_AA)
32 |
33 | def get_colors(num_classes):
34 | colors = plt.cm.hsv(np.linspace(0, 1, num_classes)).tolist()
35 | colors = np.asarray(colors) * 255
36 | return colors
37 |
38 |
--------------------------------------------------------------------------------
/src/utils/preprocessor.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.misc import imread, imresize
3 |
4 |
5 | def preprocess_input(x, v2=True):
6 | x = x.astype('float32')
7 | x = x / 255.0
8 | if v2:
9 | x = x - 0.5
10 | x = x * 2.0
11 | return x
12 |
13 |
14 | def _imread(image_name):
15 | return imread(image_name)
16 |
17 |
18 | def _imresize(image_array, size):
19 | return imresize(image_array, size)
20 |
21 |
22 | def to_categorical(integer_classes, num_classes=2):
23 | integer_classes = np.asarray(integer_classes, dtype='int')
24 | num_samples = integer_classes.shape[0]
25 | categorical = np.zeros((num_samples, num_classes))
26 | categorical[np.arange(num_samples), integer_classes] = 1
27 | return categorical
28 |
--------------------------------------------------------------------------------
/src/utils/visualizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.cm as cm
3 | from mpl_toolkits.axes_grid1 import make_axes_locatable
4 | import matplotlib.pyplot as plt
5 | import numpy.ma as ma
6 |
7 |
8 | def make_mosaic(images, num_rows, num_cols, border=1, class_names=None):
9 | num_images = len(images)
10 | image_shape = images.shape[1:]
11 | mosaic = ma.masked_all(
12 | (num_rows * image_shape[0] + (num_rows - 1) * border,
13 | num_cols * image_shape[1] + (num_cols - 1) * border),
14 | dtype=np.float32)
15 | paddedh = image_shape[0] + border
16 | paddedw = image_shape[1] + border
17 | for image_arg in range(num_images):
18 | row = int(np.floor(image_arg / num_cols))
19 | col = image_arg % num_cols
20 | image = np.squeeze(images[image_arg])
21 | image_shape = image.shape
22 | mosaic[row * paddedh:row * paddedh + image_shape[0],
23 | col * paddedw:col * paddedw + image_shape[1]] = image
24 | return mosaic
25 |
26 |
27 | def make_mosaic_v2(images, num_mosaic_rows=None,
28 | num_mosaic_cols=None, border=1):
29 | images = np.squeeze(images)
30 | num_images, image_pixels_rows, image_pixels_cols = images.shape
31 | if num_mosaic_rows is None and num_mosaic_cols is None:
32 | box_size = int(np.ceil(np.sqrt(num_images)))
33 | num_mosaic_rows = num_mosaic_cols = box_size
34 | num_mosaic_pixel_rows = num_mosaic_rows * (image_pixels_rows + border)
35 | num_mosaic_pixel_cols = num_mosaic_cols * (image_pixels_cols + border)
36 | mosaic = np.empty(shape=(num_mosaic_pixel_rows, num_mosaic_pixel_cols))
37 | mosaic_col_arg = 0
38 | mosaic_row_arg = 0
39 | for image_arg in range(num_images):
40 | if image_arg % num_mosaic_cols == 0 and image_arg != 0:
41 | mosaic_col_arg = mosaic_col_arg + 1
42 | mosaic_row_arg = 0
43 | x0 = image_pixels_cols * (mosaic_row_arg)
44 | x1 = image_pixels_cols * (mosaic_row_arg + 1)
45 | y0 = image_pixels_rows * (mosaic_col_arg)
46 | y1 = image_pixels_rows * (mosaic_col_arg + 1)
47 | image = images[image_arg]
48 | mosaic[y0:y1, x0:x1] = image
49 | mosaic_row_arg = mosaic_row_arg + 1
50 | return mosaic
51 |
52 |
53 | def pretty_imshow(axis, data, vmin=None, vmax=None, cmap=None):
54 | if cmap is None:
55 | cmap = cm.jet
56 | if vmin is None:
57 | vmin = data.min()
58 | if vmax is None:
59 | vmax = data.max()
60 | cax = None
61 | divider = make_axes_locatable(axis)
62 | cax = divider.append_axes('right', size='5%', pad=0.05)
63 | image = axis.imshow(data, vmin=vmin, vmax=vmax,
64 | interpolation='nearest', cmap=cmap)
65 | plt.colorbar(image, cax=cax)
66 |
67 |
68 | def normal_imshow(axis, data, vmin=None, vmax=None,
69 | cmap=None, axis_off=True):
70 | if cmap is None:
71 | cmap = cm.jet
72 | if vmin is None:
73 | vmin = data.min()
74 | if vmax is None:
75 | vmax = data.max()
76 | image = axis.imshow(data, vmin=vmin, vmax=vmax,
77 | interpolation='nearest', cmap=cmap)
78 | if axis_off:
79 | plt.axis('off')
80 | return image
81 |
82 |
83 | def display_image(face, class_vector=None,
84 | class_decoder=None, pretty=False):
85 | if class_vector is not None and class_decoder is None:
86 | raise Exception('Provide class decoder')
87 | face = np.squeeze(face)
88 | color_map = None
89 | if len(face.shape) < 3:
90 | color_map = 'gray'
91 | plt.figure()
92 | if class_vector is not None:
93 | class_arg = np.argmax(class_vector)
94 | class_name = class_decoder[class_arg]
95 | plt.title(class_name)
96 | if pretty:
97 | pretty_imshow(plt.gca(), face, cmap=color_map)
98 | else:
99 | plt.imshow(face, color_map)
100 |
101 |
102 | def draw_mosaic(data, num_rows, num_cols, class_vectors=None,
103 | class_decoder=None, cmap='gray'):
104 |
105 | if class_vectors is not None and class_decoder is None:
106 | raise Exception('Provide class decoder')
107 |
108 | figure, axis_array = plt.subplots(num_rows, num_cols)
109 | figure.set_size_inches(8, 8, forward=True)
110 | titles = []
111 | if class_vectors is not None:
112 | for vector_arg in range(len(class_vectors)):
113 | class_arg = np.argmax(class_vectors[vector_arg])
114 | class_name = class_decoder[class_arg]
115 | titles.append(class_name)
116 |
117 | image_arg = 0
118 | for row_arg in range(num_rows):
119 | for col_arg in range(num_cols):
120 | image = data[image_arg]
121 | image = np.squeeze(image)
122 | axis_array[row_arg, col_arg].axis('off')
123 | axis_array[row_arg, col_arg].imshow(image, cmap=cmap)
124 | axis_array[row_arg, col_arg].set_title(titles[image_arg])
125 | image_arg = image_arg + 1
126 | plt.tight_layout()
127 |
128 |
129 | if __name__ == '__main__':
130 | # from utils.data_manager import DataManager
131 | from utils.utils import get_labels
132 | from keras.models import load_model
133 | import pickle
134 |
135 | # dataset_name = 'fer2013'
136 | # model_path = '../trained_models/emotion_models/simple_CNN.985-0.66.hdf5'
137 | dataset_name = 'fer2013'
138 | class_decoder = get_labels(dataset_name)
139 | # data_manager = DataManager(dataset_name)
140 | # faces, emotions = data_manager.get_data()
141 | faces = pickle.load(open('faces.pkl', 'rb'))
142 | emotions = pickle.load(open('emotions.pkl', 'rb'))
143 | pretty_imshow(plt.gca(), make_mosaic(faces[:4], 2, 2), cmap='gray')
144 | plt.show()
145 |
146 | """
147 | image_arg = 0
148 | face = faces[image_arg:image_arg + 1]
149 | emotion = emotions[image_arg:image_arg + 1]
150 | display_image(face, emotion, class_decoder)
151 | plt.show()
152 |
153 | normal_imshow(plt.gca(), make_mosaic(faces[:4], 3, 3), cmap='gray')
154 | plt.show()
155 |
156 | draw_mosaic(faces, 2, 2, emotions, class_decoder)
157 | plt.show()
158 |
159 | """
160 | model = load_model('../trained_models/emotion_models/simple_CNN.985-0.66.hdf5')
161 | conv1_weights = model.layers[2].get_weights()
162 | kernel_conv1_weights = conv1_weights[0]
163 | kernel_conv1_weights = np.squeeze(kernel_conv1_weights)
164 | kernel_conv1_weights = np.rollaxis(kernel_conv1_weights, 2, 0)
165 | kernel_conv1_weights = np.expand_dims(kernel_conv1_weights, -1)
166 | num_kernels = kernel_conv1_weights.shape[0]
167 | box_size = int(np.ceil(np.sqrt(num_kernels)))
168 | print('Box size:', box_size)
169 |
170 | print('Kernel shape', kernel_conv1_weights.shape)
171 | plt.figure(figsize=(15, 15))
172 | plt.title('conv1 weights')
173 | pretty_imshow(
174 | plt.gca(),
175 | make_mosaic(kernel_conv1_weights, box_size, box_size),
176 | cmap=cm.binary)
177 | plt.show()
178 |
--------------------------------------------------------------------------------
/src/video_emotion_color_demo.py:
--------------------------------------------------------------------------------
1 | from statistics import mode
2 |
3 | import cv2
4 | from keras.models import load_model
5 | import numpy as np
6 |
7 | from utils.datasets import get_labels
8 | from utils.inference import detect_faces
9 | from utils.inference import draw_text
10 | from utils.inference import draw_bounding_box
11 | from utils.inference import apply_offsets
12 | from utils.inference import load_detection_model
13 | from utils.preprocessor import preprocess_input
14 |
15 | # parameters for loading data and images
16 | detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
17 | emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
18 | emotion_labels = get_labels('fer2013')
19 |
20 | # hyper-parameters for bounding boxes shape
21 | frame_window = 10
22 | emotion_offsets = (20, 40)
23 |
24 | # loading models
25 | face_detection = load_detection_model(detection_model_path)
26 | emotion_classifier = load_model(emotion_model_path, compile=False)
27 |
28 | # getting input model shapes for inference
29 | emotion_target_size = emotion_classifier.input_shape[1:3]
30 |
31 | # starting lists for calculating modes
32 | emotion_window = []
33 |
34 | # starting video streaming
35 | cv2.namedWindow('window_frame')
36 | video_capture = cv2.VideoCapture(0)
37 | while True:
38 | bgr_image = video_capture.read()[1]
39 | gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
40 | rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
41 | faces = detect_faces(face_detection, gray_image)
42 |
43 | for face_coordinates in faces:
44 |
45 | x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
46 | gray_face = gray_image[y1:y2, x1:x2]
47 | try:
48 | gray_face = cv2.resize(gray_face, (emotion_target_size))
49 | except:
50 | continue
51 |
52 | gray_face = preprocess_input(gray_face, True)
53 | gray_face = np.expand_dims(gray_face, 0)
54 | gray_face = np.expand_dims(gray_face, -1)
55 | emotion_prediction = emotion_classifier.predict(gray_face)
56 | emotion_probability = np.max(emotion_prediction)
57 | emotion_label_arg = np.argmax(emotion_prediction)
58 | emotion_text = emotion_labels[emotion_label_arg]
59 | emotion_window.append(emotion_text)
60 |
61 | if len(emotion_window) > frame_window:
62 | emotion_window.pop(0)
63 | try:
64 | emotion_mode = mode(emotion_window)
65 | except:
66 | continue
67 |
68 | if emotion_text == 'angry':
69 | color = emotion_probability * np.asarray((255, 0, 0))
70 | elif emotion_text == 'sad':
71 | color = emotion_probability * np.asarray((0, 0, 255))
72 | elif emotion_text == 'happy':
73 | color = emotion_probability * np.asarray((255, 255, 0))
74 | elif emotion_text == 'surprise':
75 | color = emotion_probability * np.asarray((0, 255, 255))
76 | else:
77 | color = emotion_probability * np.asarray((0, 255, 0))
78 |
79 | color = color.astype(int)
80 | color = color.tolist()
81 |
82 | draw_bounding_box(face_coordinates, rgb_image, color)
83 | draw_text(face_coordinates, rgb_image, emotion_mode,
84 | color, 0, -45, 1, 1)
85 |
86 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
87 | cv2.imshow('window_frame', bgr_image)
88 | if cv2.waitKey(1) & 0xFF == ord('q'):
89 | break
90 | video_capture.release()
91 | cv2.destroyAllWindows()
92 |
--------------------------------------------------------------------------------
/src/video_emotion_gender_demo.py:
--------------------------------------------------------------------------------
1 | from statistics import mode
2 |
3 | import cv2
4 | from keras.models import load_model
5 | import numpy as np
6 |
7 | from utils.datasets import get_labels
8 | from utils.inference import detect_faces
9 | from utils.inference import draw_text
10 | from utils.inference import draw_bounding_box
11 | from utils.inference import apply_offsets
12 | from utils.inference import load_detection_model
13 | from utils.preprocessor import preprocess_input
14 |
15 | # parameters for loading data and images
16 | detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
17 | emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
18 | gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
19 | emotion_labels = get_labels('fer2013')
20 | gender_labels = get_labels('imdb')
21 | font = cv2.FONT_HERSHEY_SIMPLEX
22 |
23 | # hyper-parameters for bounding boxes shape
24 | frame_window = 10
25 | gender_offsets = (30, 60)
26 | emotion_offsets = (20, 40)
27 |
28 | # loading models
29 | face_detection = load_detection_model(detection_model_path)
30 | emotion_classifier = load_model(emotion_model_path, compile=False)
31 | gender_classifier = load_model(gender_model_path, compile=False)
32 |
33 | # getting input model shapes for inference
34 | emotion_target_size = emotion_classifier.input_shape[1:3]
35 | gender_target_size = gender_classifier.input_shape[1:3]
36 |
37 | # starting lists for calculating modes
38 | gender_window = []
39 | emotion_window = []
40 |
41 | # starting video streaming
42 | cv2.namedWindow('window_frame')
43 | video_capture = cv2.VideoCapture(0)
44 | while True:
45 |
46 | bgr_image = video_capture.read()[1]
47 | gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
48 | rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
49 | faces = detect_faces(face_detection, gray_image)
50 |
51 | for face_coordinates in faces:
52 |
53 | x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
54 | rgb_face = rgb_image[y1:y2, x1:x2]
55 |
56 | x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
57 | gray_face = gray_image[y1:y2, x1:x2]
58 | try:
59 | rgb_face = cv2.resize(rgb_face, (gender_target_size))
60 | gray_face = cv2.resize(gray_face, (emotion_target_size))
61 | except:
62 | continue
63 | gray_face = preprocess_input(gray_face, False)
64 | gray_face = np.expand_dims(gray_face, 0)
65 | gray_face = np.expand_dims(gray_face, -1)
66 | emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
67 | emotion_text = emotion_labels[emotion_label_arg]
68 | emotion_window.append(emotion_text)
69 |
70 | rgb_face = np.expand_dims(rgb_face, 0)
71 | rgb_face = preprocess_input(rgb_face, False)
72 | gender_prediction = gender_classifier.predict(rgb_face)
73 | gender_label_arg = np.argmax(gender_prediction)
74 | gender_text = gender_labels[gender_label_arg]
75 | gender_window.append(gender_text)
76 |
77 | if len(gender_window) > frame_window:
78 | emotion_window.pop(0)
79 | gender_window.pop(0)
80 | try:
81 | emotion_mode = mode(emotion_window)
82 | gender_mode = mode(gender_window)
83 | except:
84 | continue
85 |
86 | if gender_text == gender_labels[0]:
87 | color = (0, 0, 255)
88 | else:
89 | color = (255, 0, 0)
90 |
91 | draw_bounding_box(face_coordinates, rgb_image, color)
92 | draw_text(face_coordinates, rgb_image, gender_mode,
93 | color, 0, -20, 1, 1)
94 | draw_text(face_coordinates, rgb_image, emotion_mode,
95 | color, 0, -45, 1, 1)
96 |
97 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
98 | cv2.imshow('window_frame', bgr_image)
99 | if cv2.waitKey(1) & 0xFF == ord('q'):
100 | break
101 | video_capture.release()
102 | cv2.destroyAllWindows()
103 |
--------------------------------------------------------------------------------
/src/video_gradcam_demo.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import cv2
4 | import numpy as np
5 | from keras.models import load_model
6 | from utils.grad_cam import compile_gradient_function
7 | from utils.grad_cam import compile_saliency_function
8 | from utils.grad_cam import register_gradient
9 | from utils.grad_cam import modify_backprop
10 | from utils.grad_cam import calculate_guided_gradient_CAM
11 | from utils.inference import detect_faces
12 | from utils.inference import apply_offsets
13 | from utils.inference import load_detection_model
14 | from utils.preprocessor import preprocess_input
15 | from utils.inference import draw_bounding_box
16 | from utils.datasets import get_class_to_arg
17 |
18 | # getting the correct model given the input
19 | # task = sys.argv[1]
20 | # class_name = sys.argv[2]
21 | task = 'emotion'
22 | if task == 'gender':
23 | model_filename = '../trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5'
24 | class_to_arg = get_class_to_arg('imdb')
25 | # predicted_class = class_to_arg[class_name]
26 | predicted_class = 0
27 | offsets = (0, 0)
28 | elif task == 'emotion':
29 | model_filename = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
30 | # model_filename = '../trained_models/fer2013_big_XCEPTION.54-0.66.hdf5'
31 | class_to_arg = get_class_to_arg('fer2013')
32 | # predicted_class = class_to_arg[class_name]
33 | predicted_class = 0
34 | offsets = (0, 0)
35 |
36 | model = load_model(model_filename, compile=False)
37 | gradient_function = compile_gradient_function(model, predicted_class, 'conv2d_7')
38 | register_gradient()
39 | guided_model = modify_backprop(model, 'GuidedBackProp', task)
40 | saliency_function = compile_saliency_function(guided_model, 'conv2d_7')
41 |
42 | # parameters for loading data and images
43 | detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
44 | face_detection = load_detection_model(detection_model_path)
45 | color = (0, 255, 0)
46 |
47 | # getting input model shapes for inference
48 | target_size = model.input_shape[1:3]
49 |
50 | # starting lists for calculating modes
51 | emotion_window = []
52 |
53 | # starting video streaming
54 | cv2.namedWindow('window_frame')
55 | video_capture = cv2.VideoCapture(0)
56 | while True:
57 | bgr_image = video_capture.read()[1]
58 | gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
59 | rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
60 | faces = detect_faces(face_detection, gray_image)
61 |
62 | for face_coordinates in faces:
63 |
64 | x1, x2, y1, y2 = apply_offsets(face_coordinates, offsets)
65 | gray_face = gray_image[y1:y2, x1:x2]
66 | try:
67 | gray_face = cv2.resize(gray_face, (target_size))
68 | except:
69 | continue
70 |
71 | gray_face = preprocess_input(gray_face, True)
72 | gray_face = np.expand_dims(gray_face, 0)
73 | gray_face = np.expand_dims(gray_face, -1)
74 | guided_gradCAM = calculate_guided_gradient_CAM(gray_face,
75 | gradient_function, saliency_function)
76 | guided_gradCAM = cv2.resize(guided_gradCAM, (x2-x1, y2-y1))
77 | try:
78 | rgb_guided_gradCAM = np.repeat(guided_gradCAM[:, :, np.newaxis],
79 | 3, axis=2)
80 | rgb_image[y1:y2, x1:x2, :] = rgb_guided_gradCAM
81 | except:
82 | continue
83 | draw_bounding_box((x1, y1, x2 - x1, y2 - y1), rgb_image, color)
84 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
85 | try:
86 | cv2.imshow('window_frame', bgr_image)
87 | except:
88 | continue
89 | if cv2.waitKey(1) & 0xFF == ord('q'):
90 | break
91 |
92 |
93 |
--------------------------------------------------------------------------------
/src/web/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/web/emotion_gender_processor.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import logging
4 |
5 | import cv2
6 | from keras.models import load_model
7 | import numpy as np
8 |
9 | from utils.datasets import get_labels
10 | from utils.inference import detect_faces
11 | from utils.inference import draw_text
12 | from utils.inference import draw_bounding_box
13 | from utils.inference import apply_offsets
14 | from utils.inference import load_detection_model
15 | from utils.inference import load_image
16 | from utils.preprocessor import preprocess_input
17 |
18 | def process_image(image):
19 |
20 | try:
21 | # parameters for loading data and images
22 | detection_model_path = './trained_models/detection_models/haarcascade_frontalface_default.xml'
23 | emotion_model_path = './trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
24 | gender_model_path = './trained_models/gender_models/simple_CNN.81-0.96.hdf5'
25 | emotion_labels = get_labels('fer2013')
26 | gender_labels = get_labels('imdb')
27 | font = cv2.FONT_HERSHEY_SIMPLEX
28 |
29 | # hyper-parameters for bounding boxes shape
30 | gender_offsets = (30, 60)
31 | gender_offsets = (10, 10)
32 | emotion_offsets = (20, 40)
33 | emotion_offsets = (0, 0)
34 |
35 | # loading models
36 | face_detection = load_detection_model(detection_model_path)
37 | emotion_classifier = load_model(emotion_model_path, compile=False)
38 | gender_classifier = load_model(gender_model_path, compile=False)
39 |
40 | # getting input model shapes for inference
41 | emotion_target_size = emotion_classifier.input_shape[1:3]
42 | gender_target_size = gender_classifier.input_shape[1:3]
43 |
44 | # loading images
45 | image_array = np.fromstring(image, np.uint8)
46 | unchanged_image = cv2.imdecode(image_array, cv2.IMREAD_UNCHANGED)
47 |
48 | rgb_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2RGB)
49 | gray_image = cv2.cvtColor(unchanged_image, cv2.COLOR_BGR2GRAY)
50 |
51 | faces = detect_faces(face_detection, gray_image)
52 | for face_coordinates in faces:
53 | x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
54 | rgb_face = rgb_image[y1:y2, x1:x2]
55 |
56 | x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
57 | gray_face = gray_image[y1:y2, x1:x2]
58 |
59 | try:
60 | rgb_face = cv2.resize(rgb_face, (gender_target_size))
61 | gray_face = cv2.resize(gray_face, (emotion_target_size))
62 | except:
63 | continue
64 |
65 | rgb_face = preprocess_input(rgb_face, False)
66 | rgb_face = np.expand_dims(rgb_face, 0)
67 | gender_prediction = gender_classifier.predict(rgb_face)
68 | gender_label_arg = np.argmax(gender_prediction)
69 | gender_text = gender_labels[gender_label_arg]
70 |
71 | gray_face = preprocess_input(gray_face, True)
72 | gray_face = np.expand_dims(gray_face, 0)
73 | gray_face = np.expand_dims(gray_face, -1)
74 | emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
75 | emotion_text = emotion_labels[emotion_label_arg]
76 |
77 | if gender_text == gender_labels[0]:
78 | color = (0, 0, 255)
79 | else:
80 | color = (255, 0, 0)
81 |
82 | draw_bounding_box(face_coordinates, rgb_image, color)
83 | draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2)
84 | draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2)
85 | except Exception as err:
86 | logging.error('Error in emotion gender processor: "{0}"'.format(err))
87 |
88 | bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
89 |
90 | dirname = 'result'
91 | if not os.path.exists(dirname):
92 | os.mkdir(dirname)
93 |
94 | cv2.imwrite(os.path.join(dirname, 'predicted_image.png'), bgr_image)
95 |
--------------------------------------------------------------------------------
/src/web/faces.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, jsonify, make_response, request, abort, redirect, send_file
2 | import logging
3 |
4 | import emotion_gender_processor as eg_processor
5 |
6 | app = Flask(__name__)
7 |
8 | @app.route('/')
9 | def index():
10 | return redirect("https://ekholabs.ai", code=302)
11 |
12 | @app.route('/classifyImage', methods=['POST'])
13 | def upload():
14 | try:
15 | image = request.files['image'].read()
16 | eg_processor.process_image(image)
17 | return send_file('/ekholabs/face-classifier/result/predicted_image.png', mimetype='image/png')
18 | except Exception as err:
19 | logging.error('An error has occurred whilst processing the file: "{0}"'.format(err))
20 | abort(400)
21 |
22 | @app.errorhandler(400)
23 | def bad_request(erro):
24 | return make_response(jsonify({'error': 'We cannot process the file sent in the request.'}), 400)
25 |
26 | @app.errorhandler(404)
27 | def not_found(error):
28 | return make_response(jsonify({'error': 'Resource no found.'}), 404)
29 |
30 | if __name__ == '__main__':
31 | app.run(debug=True, host='0.0.0.0', port=8084)
32 |
--------------------------------------------------------------------------------
/trained_models/emotion_models/KDEF_emotion_training.log:
--------------------------------------------------------------------------------
1 | epoch,acc,loss,val_acc,val_loss
2 | 0,0.174234693878,2.07751232459,0.142857142857,2.06775901561
3 | 1,0.188265306122,2.0016279532,0.151020408163,2.0355568516
4 |
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.00-0.47.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.00-0.47.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.02-0.52.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.02-0.52.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.03-0.53.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.03-0.53.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.04-0.55.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.04-0.55.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.05-0.56.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.05-0.56.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.08-0.57.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.08-0.57.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.10-0.58.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.10-0.58.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.100-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.100-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.107-0.66.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.107-0.66.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.11-0.58.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.11-0.58.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.110-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.110-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.12-0.58.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.12-0.58.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.14-0.59.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.14-0.59.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.15-0.60.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.15-0.60.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.25-0.60.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.25-0.60.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.27-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.27-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.29-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.29-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.32-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.32-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.37-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.37-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.38-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.38-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.41-0.62.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.41-0.62.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.43-0.64.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.43-0.64.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.51-0.63.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.51-0.63.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.70-0.63.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.70-0.63.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.97-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.97-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/fer2013_mini_XCEPTION.99-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/fer2013_mini_XCEPTION.99-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/mini_XCEPTION_KDEF.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/mini_XCEPTION_KDEF.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/simple_CNN.530-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/simple_CNN.530-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/simple_CNN.985-0.66.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/simple_CNN.985-0.66.hdf5
--------------------------------------------------------------------------------
/trained_models/emotion_models/tiny_XCEPTION_KDEF.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/emotion_models/tiny_XCEPTION_KDEF.hdf5
--------------------------------------------------------------------------------
/trained_models/fer2013_big_XCEPTION.54-0.66.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/fer2013_big_XCEPTION.54-0.66.hdf5
--------------------------------------------------------------------------------
/trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/fer2013_mini_XCEPTION.119-0.65.hdf5
--------------------------------------------------------------------------------
/trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/gender_models/gender_mini_XCEPTION.21-0.95.hdf5
--------------------------------------------------------------------------------
/trained_models/gender_models/simple_CNN.81-0.96.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oarriaga/face_classification/b861d21b0e76ca5514cdeb5b56a689b7318584f4/trained_models/gender_models/simple_CNN.81-0.96.hdf5
--------------------------------------------------------------------------------