├── FaceSeamlessnetry.ipynb ├── README.md ├── face_Recognition.py ├── facefrontend.py └── haarcascade_frontalface_default.xml /FaceSeamlessnetry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Collect Images For Face Recognition" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 9, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stdout", 17 | "output_type": "stream", 18 | "text": [ 19 | "Face not found\n", 20 | "Face not found\n", 21 | "Face not found\n", 22 | "Face not found\n", 23 | "Face not found\n", 24 | "Face not found\n", 25 | "Face not found\n", 26 | "Face not found\n", 27 | "Face not found\n", 28 | "Face not found\n", 29 | "Face not found\n", 30 | "Face not found\n", 31 | "Face not found\n", 32 | "Face not found\n", 33 | "Face not found\n", 34 | "Face not found\n", 35 | "Face not found\n", 36 | "Face not found\n", 37 | "Face not found\n", 38 | "Face not found\n", 39 | "Face not found\n", 40 | "Face not found\n", 41 | "Face not found\n", 42 | "Face not found\n", 43 | "Face not found\n", 44 | "Face not found\n", 45 | "Face not found\n", 46 | "Face not found\n", 47 | "Face not found\n", 48 | "Face not found\n", 49 | "Face not found\n", 50 | "Face not found\n", 51 | "Face not found\n", 52 | "Face not found\n", 53 | "Face not found\n", 54 | "Face not found\n", 55 | "Face not found\n", 56 | "Face not found\n", 57 | "Face not found\n", 58 | "Face not found\n", 59 | "Face not found\n", 60 | "Face not found\n", 61 | "Face not found\n", 62 | "Face not found\n", 63 | "Face not found\n", 64 | "Face not found\n", 65 | "Face not found\n", 66 | "Face not found\n", 67 | "Face not found\n", 68 | "Face not found\n", 69 | "Face not found\n", 70 | "Face not found\n", 71 | "Face not found\n", 72 | "Face not found\n", 73 | "Face not found\n", 74 | "Face not found\n", 75 | "Face not found\n", 76 | "Face not found\n", 77 | "Face not found\n", 78 | "Face not found\n", 79 | "Face not found\n", 80 | "Face not found\n", 81 | "Face not found\n", 82 | "Face not found\n", 83 | "Face not found\n", 84 | "Face not found\n", 85 | "Face not found\n", 86 | "Face not found\n", 87 | "Collecting Samples Complete\n" 88 | ] 89 | } 90 | ], 91 | "source": [ 92 | "import cv2\n", 93 | "import numpy as np\n", 94 | "\n", 95 | "# Load HAAR face classifier\n", 96 | "face_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n", 97 | "\n", 98 | "# Load functions\n", 99 | "def face_extractor(img):\n", 100 | " # Function detects faces and returns the cropped face\n", 101 | " # If no face detected, it returns the input image\n", 102 | " \n", 103 | " #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", 104 | " faces = face_classifier.detectMultiScale(img, 1.3, 5)\n", 105 | " \n", 106 | " if faces is ():\n", 107 | " return None\n", 108 | " \n", 109 | " # Crop all faces found\n", 110 | " for (x,y,w,h) in faces:\n", 111 | " x=x-10\n", 112 | " y=y-10\n", 113 | " cropped_face = img[y:y+h+50, x:x+w+50]\n", 114 | "\n", 115 | " return cropped_face\n", 116 | "\n", 117 | "# Initialize Webcam\n", 118 | "cap = cv2.VideoCapture(0)\n", 119 | "count = 0\n", 120 | "\n", 121 | "# Collect 100 samples of your face from webcam input\n", 122 | "while True:\n", 123 | "\n", 124 | " ret, frame = cap.read()\n", 125 | " if face_extractor(frame) is not None:\n", 126 | " count += 1\n", 127 | " face = cv2.resize(face_extractor(frame), (400, 400))\n", 128 | " #face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)\n", 129 | "\n", 130 | " # Save file in specified directory with unique name\n", 131 | " file_name_path = './Images/' + str(count) + '.jpg'\n", 132 | " cv2.imwrite(file_name_path, face)\n", 133 | "\n", 134 | " # Put count on images and display live count\n", 135 | " cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)\n", 136 | " cv2.imshow('Face Cropper', face)\n", 137 | " \n", 138 | " else:\n", 139 | " print(\"Face not found\")\n", 140 | " pass\n", 141 | "\n", 142 | " if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key\n", 143 | " break\n", 144 | " \n", 145 | "cap.release()\n", 146 | "cv2.destroyAllWindows() \n", 147 | "print(\"Collecting Samples Complete\")\n" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": null, 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [] 156 | } 157 | ], 158 | "metadata": { 159 | "kernelspec": { 160 | "display_name": "Python 3", 161 | "language": "python", 162 | "name": "python3" 163 | }, 164 | "language_info": { 165 | "codemirror_mode": { 166 | "name": "ipython", 167 | "version": 3 168 | }, 169 | "file_extension": ".py", 170 | "mimetype": "text/x-python", 171 | "name": "python", 172 | "nbconvert_exporter": "python", 173 | "pygments_lexer": "ipython3", 174 | "version": "3.7.1" 175 | } 176 | }, 177 | "nbformat": 4, 178 | "nbformat_minor": 2 179 | } 180 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep-Learning-Face-Recognition -------------------------------------------------------------------------------- /face_Recognition.py: -------------------------------------------------------------------------------- 1 | 2 | # -*- coding: utf-8 -*- 3 | """ 4 | @author: Krish.Naik 5 | """ 6 | 7 | from keras.layers import Input, Lambda, Dense, Flatten 8 | from keras.models import Model 9 | from keras.applications.vgg16 import VGG16 10 | from keras.applications.vgg16 import preprocess_input 11 | from keras.preprocessing import image 12 | from keras.preprocessing.image import ImageDataGenerator 13 | from keras.models import Sequential 14 | import numpy as np 15 | from glob import glob 16 | import matplotlib.pyplot as plt 17 | 18 | # re-size all the images to this 19 | IMAGE_SIZE = [224, 224] 20 | 21 | train_path = 'Datasets/Train' 22 | valid_path = 'Datasets/Test' 23 | 24 | # add preprocessing layer to the front of VGG 25 | vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) 26 | 27 | # don't train existing weights 28 | for layer in vgg.layers: 29 | layer.trainable = False 30 | 31 | 32 | 33 | # useful for getting number of classes 34 | folders = glob('Datasets/Train/*') 35 | 36 | 37 | # our layers - you can add more if you want 38 | x = Flatten()(vgg.output) 39 | # x = Dense(1000, activation='relu')(x) 40 | prediction = Dense(len(folders), activation='softmax')(x) 41 | 42 | # create a model object 43 | model = Model(inputs=vgg.input, outputs=prediction) 44 | 45 | # view the structure of the model 46 | model.summary() 47 | 48 | # tell the model what cost and optimization method to use 49 | model.compile( 50 | loss='categorical_crossentropy', 51 | optimizer='adam', 52 | metrics=['accuracy'] 53 | ) 54 | 55 | 56 | from keras.preprocessing.image import ImageDataGenerator 57 | 58 | train_datagen = ImageDataGenerator(rescale = 1./255, 59 | shear_range = 0.2, 60 | zoom_range = 0.2, 61 | horizontal_flip = True) 62 | 63 | test_datagen = ImageDataGenerator(rescale = 1./255) 64 | 65 | training_set = train_datagen.flow_from_directory('Datasets/Train', 66 | target_size = (224, 224), 67 | batch_size = 32, 68 | class_mode = 'categorical') 69 | 70 | test_set = test_datagen.flow_from_directory('Datasets/Test', 71 | target_size = (224, 224), 72 | batch_size = 32, 73 | class_mode = 'categorical') 74 | 75 | '''r=model.fit_generator(training_set, 76 | samples_per_epoch = 8000, 77 | nb_epoch = 5, 78 | validation_data = test_set, 79 | nb_val_samples = 2000)''' 80 | 81 | # fit the model 82 | r = model.fit_generator( 83 | training_set, 84 | validation_data=test_set, 85 | epochs=5, 86 | steps_per_epoch=len(training_set), 87 | validation_steps=len(test_set) 88 | ) 89 | # loss 90 | plt.plot(r.history['loss'], label='train loss') 91 | plt.plot(r.history['val_loss'], label='val loss') 92 | plt.legend() 93 | plt.show() 94 | plt.savefig('LossVal_loss') 95 | 96 | # accuracies 97 | plt.plot(r.history['acc'], label='train acc') 98 | plt.plot(r.history['val_acc'], label='val acc') 99 | plt.legend() 100 | plt.show() 101 | plt.savefig('AccVal_acc') 102 | 103 | import tensorflow as tf 104 | 105 | from keras.models import load_model 106 | 107 | model.save('facefeatures_new_model.h5') 108 | 109 | -------------------------------------------------------------------------------- /facefrontend.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Jan 15 14:49:19 2019 4 | 5 | @author: krish.naik 6 | """ 7 | 8 | # Face Recognition 9 | 10 | # Importing the libraries 11 | from PIL import Image 12 | from keras.applications.vgg16 import preprocess_input 13 | import base64 14 | from io import BytesIO 15 | import json 16 | import random 17 | import cv2 18 | from keras.models import load_model 19 | import numpy as np 20 | 21 | from keras.preprocessing import image 22 | model = load_model('facefeatures_new_model_final.h5') 23 | 24 | # Loading the cascades 25 | face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 26 | 27 | def face_extractor(img): 28 | # Function detects faces and returns the cropped face 29 | # If no face detected, it returns the input image 30 | 31 | #gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 32 | faces = face_cascade.detectMultiScale(img, 1.3, 5) 33 | 34 | if faces is (): 35 | return None 36 | 37 | # Crop all faces found 38 | for (x,y,w,h) in faces: 39 | cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2) 40 | cropped_face = img[y:y+h, x:x+w] 41 | 42 | return cropped_face 43 | 44 | # Doing some Face Recognition with the webcam 45 | video_capture = cv2.VideoCapture(0) 46 | while True: 47 | _, frame = video_capture.read() 48 | #canvas = detect(gray, frame) 49 | #image, face =face_detector(frame) 50 | 51 | face=face_extractor(frame) 52 | if type(face) is np.ndarray: 53 | face = cv2.resize(face, (224, 224)) 54 | im = Image.fromarray(face, 'RGB') 55 | #Resizing into 128x128 because we trained the model with this image size. 56 | img_array = np.array(im) 57 | #Our keras model used a 4D tensor, (images x height x width x channel) 58 | #So changing dimension 128x128x3 into 1x128x128x3 59 | img_array = np.expand_dims(img_array, axis=0) 60 | pred = model.predict(img_array) 61 | print(pred) 62 | 63 | name="None matching" 64 | 65 | if(pred[0][3]>0.5): 66 | name='Krish' 67 | cv2.putText(frame,name, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2) 68 | else: 69 | cv2.putText(frame,"No face found", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2) 70 | cv2.imshow('Video', frame) 71 | if cv2.waitKey(1) & 0xFF == ord('q'): 72 | break 73 | video_capture.release() 74 | cv2.destroyAllWindows() 75 | 76 | 77 | 78 | --------------------------------------------------------------------------------