├── face_dataset ├── gaurav.npy ├── Arpit hu mai.npy └── arpit dhamija.npy ├── video read.py ├── README.md ├── face_detection.py ├── LICENSE ├── face_data.py └── face_recognition.py /face_dataset/gaurav.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/appydam/Real-time-Face-Recognition-Project/HEAD/face_dataset/gaurav.npy -------------------------------------------------------------------------------- /face_dataset/Arpit hu mai.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/appydam/Real-time-Face-Recognition-Project/HEAD/face_dataset/Arpit hu mai.npy -------------------------------------------------------------------------------- /face_dataset/arpit dhamija.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/appydam/Real-time-Face-Recognition-Project/HEAD/face_dataset/arpit dhamija.npy -------------------------------------------------------------------------------- /video read.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue Feb 12 12:51:30 2019 4 | 5 | @author: ARPIT DHAMIJA 6 | """ 7 | 8 | import cv2 9 | cap=cv2.VideoCapture(0) 10 | 11 | while True: 12 | ret,frame = cap.read() 13 | 14 | if ret == False: 15 | continue 16 | 17 | cv2.imshow("video frame",frame) 18 | 19 | key_pressed = cv2.waitKey(1) & 0xFF 20 | 21 | if key_pressed == ord('q'): 22 | break 23 | 24 | cap.release() 25 | cv2.destroyAllWindows() 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Real-time-Face-Recognition-Project 2 | 3 | Steps to run 4 | 1. First run video read.py to check that your webcam is running or not. 5 | 2. Run face_detection.py to check that whether camera is able to capture your face or not, with the help of haar cascase classifier 6 | 3. Now, run face_data.py --> this will open up the camera and extract your face from video frames multiple time. Test and store faces of multiple people. 7 | 4. At last, run face_recognition.py --> this will detect your face from the dataset made and form a bounding box with you name writtern around your face. 8 | -------------------------------------------------------------------------------- /face_detection.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | cap = cv2.VideoCapture(0) 5 | face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml") 6 | 7 | while True: 8 | ret,frame = cap.read() 9 | 10 | gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) 11 | 12 | if ret == False: 13 | continue 14 | 15 | faces = face_cascade.detectMultiScale(gray_frame,1.3,5) 16 | if len(faces) == 0: 17 | continue 18 | 19 | for face in faces[:1]: 20 | x,y,w,h = face 21 | 22 | offset = 10 23 | face_offset = frame[y-offset:y+h+offset,x-offset:x+w+offset] 24 | face_selection = cv2.resize(face_offset,(100,100)) 25 | 26 | cv2.imshow("Face", face_selection) 27 | cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2) 28 | 29 | cv2.imshow("faces",frame) 30 | 31 | key_pressed = cv2.waitKey(1) & 0xFF 32 | if key_pressed == ord('q'): 33 | break 34 | 35 | cap.release() 36 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 arpit dhamija 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /face_data.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | cap = cv2.VideoCapture(0) 5 | face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml") 6 | 7 | skip = 0 8 | face_data = [] 9 | dataset_path = "./face_dataset/" 10 | 11 | file_name = input("Enter the name of person : ") 12 | 13 | 14 | while True: 15 | ret,frame = cap.read() 16 | 17 | gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) 18 | 19 | if ret == False: 20 | continue 21 | 22 | faces = face_cascade.detectMultiScale(gray_frame,1.3,5) 23 | if len(faces) == 0: 24 | continue 25 | 26 | k = 1 27 | 28 | faces = sorted(faces, key = lambda x : x[2]*x[3] , reverse = True) 29 | 30 | skip += 1 31 | 32 | for face in faces[:1]: 33 | x,y,w,h = face 34 | 35 | offset = 5 36 | face_offset = frame[y-offset:y+h+offset,x-offset:x+w+offset] 37 | face_selection = cv2.resize(face_offset,(100,100)) 38 | 39 | if skip % 10 == 0: 40 | face_data.append(face_selection) 41 | print (len(face_data)) 42 | 43 | 44 | cv2.imshow(str(k), face_selection) 45 | k += 1 46 | 47 | cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2) 48 | 49 | cv2.imshow("faces",frame) 50 | 51 | key_pressed = cv2.waitKey(1) & 0xFF 52 | if key_pressed == ord('q'): 53 | break 54 | 55 | face_data = np.array(face_data) 56 | face_data = face_data.reshape((face_data.shape[0], -1)) 57 | print (face_data.shape) 58 | 59 | 60 | np.save(dataset_path + file_name, face_data) 61 | print ("Dataset saved at : {}".format(dataset_path + file_name + '.npy')) 62 | 63 | cap.release() 64 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /face_recognition.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | 5 | ########## KNN CODE ############ 6 | def distance(v1, v2): 7 | # Eucledian 8 | return np.sqrt(((v1-v2)**2).sum()) 9 | 10 | def knn(train, test, k=5): 11 | dist = [] 12 | 13 | for i in range(train.shape[0]): 14 | # Get the vector and label 15 | ix = train[i, :-1] 16 | iy = train[i, -1] 17 | # Compute the distance from test point 18 | d = distance(test, ix) 19 | dist.append([d, iy]) 20 | # Sort based on distance and get top k 21 | dk = sorted(dist, key=lambda x: x[0])[:k] 22 | # Retrieve only the labels 23 | labels = np.array(dk)[:, -1] 24 | 25 | # Get frequencies of each label 26 | output = np.unique(labels, return_counts=True) 27 | # Find max frequency and corresponding label 28 | index = np.argmax(output[1]) 29 | return output[0][index] 30 | ################################ 31 | 32 | cap = cv2.VideoCapture(0) 33 | face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml") 34 | 35 | dataset_path = "./face_dataset/" 36 | 37 | face_data = [] 38 | labels = [] 39 | class_id = 0 40 | names = {} 41 | 42 | 43 | # Dataset prepration 44 | for fx in os.listdir(dataset_path): 45 | if fx.endswith('.npy'): 46 | names[class_id] = fx[:-4] 47 | data_item = np.load(dataset_path + fx) 48 | face_data.append(data_item) 49 | 50 | target = class_id * np.ones((data_item.shape[0],)) 51 | class_id += 1 52 | labels.append(target) 53 | 54 | face_dataset = np.concatenate(face_data, axis=0) 55 | face_labels = np.concatenate(labels, axis=0).reshape((-1, 1)) 56 | print(face_labels.shape) 57 | print(face_dataset.shape) 58 | 59 | trainset = np.concatenate((face_dataset, face_labels), axis=1) 60 | print(trainset.shape) 61 | 62 | font = cv2.FONT_HERSHEY_SIMPLEX 63 | 64 | while True: 65 | ret, frame = cap.read() 66 | if ret == False: 67 | continue 68 | # Convert frame to grayscale 69 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 70 | 71 | # Detect multi faces in the image 72 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 73 | 74 | for face in faces: 75 | x, y, w, h = face 76 | 77 | # Get the face ROI 78 | offset = 5 79 | face_section = frame[y-offset:y+h+offset, x-offset:x+w+offset] 80 | face_section = cv2.resize(face_section, (100, 100)) 81 | 82 | out = knn(trainset, face_section.flatten()) 83 | 84 | # Draw rectangle in the original image 85 | cv2.putText(frame, names[int(out)],(x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,0),2,cv2.LINE_AA) 86 | cv2.rectangle(frame, (x,y), (x+w,y+h), (255,255,255), 2) 87 | 88 | cv2.imshow("Faces", frame) 89 | 90 | if cv2.waitKey(1) & 0xFF == ord('q'): 91 | break 92 | 93 | cap.release() 94 | cv2.destroyAllWindows() --------------------------------------------------------------------------------