├── READ.me ├── README.md ├── create_data.py ├── create_database.py ├── face_recognize.py ├── haarcascade_frontalface_default.xml ├── real_time.py └── trainer.py /READ.me: -------------------------------------------------------------------------------- 1 | To install opencv run the script 2 | $ sh ./install_opencv.sh 3 | 4 | 5 | Run the code create_database.py with parameter name. 6 | 7 | $ python create_database.py you_name 8 | 9 | Create atleast two dataset, 10 | 11 | Then run the code, face_rec.py 12 | 13 | $ python face_rec.py 14 | 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Real-time-face-recognition-in-python-using-opencv- 2 | Real time face recognition in python using opencv, Opencv 2.4.10, python 2.7.10,Numpy 1.11.0 3 | 4 | make folder name "database" 5 | 6 | Run the create_data.py, make 10 to 12 datasets 7 | 8 | Now Run the face_recognize.py 9 | -------------------------------------------------------------------------------- /create_data.py: -------------------------------------------------------------------------------- 1 | #creating database 2 | import cv2, sys, numpy, os 3 | haar_file = 'haarcascade_frontalface_default.xml' 4 | datasets = 'datasets' #All the faces data will be present this folder 5 | sub_data = 'aquib' #These are sub data sets of folder, for my faces I've used my name 6 | 7 | path = os.path.join(datasets, sub_data) 8 | if not os.path.isdir(path): 9 | os.mkdir(path) 10 | (width, height) = (130, 100) # defining the size of images 11 | 12 | 13 | face_cascade = cv2.CascadeClassifier(haar_file) 14 | webcam = cv2.VideoCapture(0) #'0' is use for my webcam, if you've any other camera attached use '1' like this 15 | 16 | # The program loops until it has 30 images of the face. 17 | count = 1 18 | while count < 31: 19 | (_, im) = webcam.read() 20 | gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) 21 | faces = face_cascade.detectMultiScale(gray, 1.3, 4) 22 | for (x,y,w,h) in faces: 23 | cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2) 24 | face = gray[y:y + h, x:x + w] 25 | face_resize = cv2.resize(face, (width, height)) 26 | cv2.imwrite('%s/%s.png' % (path,count), face_resize) 27 | count += 1 28 | 29 | cv2.imshow('OpenCV', im) 30 | key = cv2.waitKey(10) 31 | if key == 27: 32 | break 33 | -------------------------------------------------------------------------------- /create_database.py: -------------------------------------------------------------------------------- 1 | # create_database.py 2 | import cv2, sys, numpy, os, time 3 | count = 0 4 | size = 4 5 | fn_haar = 'haarcascade_frontalface_default.xml' 6 | fn_dir = 'database' 7 | fn_name = sys.argv[1] #name of the person 8 | path = os.path.join(fn_dir, fn_name) 9 | if not os.path.isdir(path): 10 | os.mkdir(path) 11 | (im_width, im_height) = (112, 92) 12 | haar_cascade = cv2.CascadeClassifier(fn_haar) 13 | webcam = cv2.VideoCapture(0) 14 | 15 | 16 | print "-----------------------Taking pictures----------------------" 17 | print "--------------------Give some expressions---------------------" 18 | # The program loops until it has 20 images of the face. 19 | 20 | while count < 45: 21 | (rval, im) = webcam.read() 22 | im = cv2.flip(im, 1, 0) 23 | gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) 24 | mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size)) 25 | faces = haar_cascade.detectMultiScale(mini) 26 | faces = sorted(faces, key=lambda x: x[3]) 27 | if faces: 28 | face_i = faces[0] 29 | (x, y, w, h) = [v * size for v in face_i] 30 | face = gray[y:y + h, x:x + w] 31 | face_resize = cv2.resize(face, (im_width, im_height)) 32 | pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path) 33 | if n[0]!='.' ]+[0])[-1] + 1 34 | cv2.imwrite('%s/%s.png' % (path, pin), face_resize) 35 | cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3) 36 | cv2.putText(im, fn_name, (x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 37 | 1,(0, 255, 0)) 38 | time.sleep(0.38) 39 | count += 1 40 | 41 | 42 | cv2.imshow('OpenCV', im) 43 | key = cv2.waitKey(10) 44 | if key == 27: 45 | break 46 | print str(count) + " images taken and saved to " + fn_name +" folder in database " 47 | 48 | 49 | -------------------------------------------------------------------------------- /face_recognize.py: -------------------------------------------------------------------------------- 1 | # facerec.py 2 | import cv2, sys, numpy, os 3 | size = 4 4 | haar_file = 'haarcascade_frontalface_default.xml' 5 | datasets = 'datasets' 6 | # Part 1: Create fisherRecognizer 7 | print('Training...') 8 | # Create a list of images and a list of corresponding names 9 | (images, lables, names, id) = ([], [], {}, 0) 10 | for (subdirs, dirs, files) in os.walk(datasets): 11 | for subdir in dirs: 12 | names[id] = subdir 13 | subjectpath = os.path.join(datasets, subdir) 14 | for filename in os.listdir(subjectpath): 15 | path = subjectpath + '/' + filename 16 | lable = id 17 | images.append(cv2.imread(path, 0)) 18 | lables.append(int(lable)) 19 | id += 1 20 | (width, height) = (130, 100) 21 | 22 | # Create a Numpy array from the two lists above 23 | (images, lables) = [numpy.array(lis) for lis in [images, lables]] 24 | 25 | # OpenCV trains a model from the images 26 | # NOTE FOR OpenCV2: remove '.face' 27 | model = cv2.createFisherFaceRecognizer() 28 | model.train(images, lables) 29 | 30 | # Part 2: Use fisherRecognizer on camera stream 31 | face_cascade = cv2.CascadeClassifier(haar_file) 32 | webcam = cv2.VideoCapture(0) 33 | while True: 34 | (_, im) = webcam.read() 35 | gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) 36 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 37 | for (x,y,w,h) in faces: 38 | cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2) 39 | face = gray[y:y + h, x:x + w] 40 | face_resize = cv2.resize(face, (width, height)) 41 | # Try to recognize the face 42 | prediction = model.predict(face_resize) 43 | cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3) 44 | 45 | if prediction[1]<500: 46 | 47 | cv2.putText(im,'%s - %.0f' % (names[prediction[0]],prediction[1]),(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0)) 48 | else: 49 | cv2.putText(im,'not recognized',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0)) 50 | 51 | cv2.imshow('OpenCV', im) 52 | key = cv2.waitKey(10) 53 | if key == 27: 54 | break 55 | -------------------------------------------------------------------------------- /real_time.py: -------------------------------------------------------------------------------- 1 | import cv2,os 2 | 3 | # Import numpy for matrices calculations 4 | import numpy as np 5 | 6 | # Create Local Binary Patterns Histograms for face recognization 7 | recognizer = cv2.createLBPHFaceRecognizer() 8 | 9 | # Load the trained mode 10 | recognizer.load('trainer.yml') 11 | fn_dir = 'database' 12 | 13 | # Load prebuilt model for Frontal Face 14 | cascadePath = "haarcascade_frontalface_default.xml" 15 | (im_width, im_height) = (112, 92) 16 | # Part 2: Use fisherRecognizer on camera stream 17 | (images, lables, names, id) = ([], [], {}, 0) 18 | for (subdirs, dirs, files) in os.walk(fn_dir): 19 | for subdir in dirs: 20 | names[id] = subdir 21 | subjectpath = os.path.join(fn_dir, subdir) 22 | for filename in os.listdir(subjectpath): 23 | path = subjectpath + '/' + filename 24 | lable = id 25 | images.append(cv2.imread(path, 0)) 26 | lables.append(int(lable)) 27 | id += 1 28 | 29 | face_cascade = cv2.CascadeClassifier(cascadePath) 30 | webcam = cv2.VideoCapture(0) 31 | while True: 32 | (_, im) = webcam.read() 33 | gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) 34 | faces = face_cascade.detectMultiScale(gray, 1.3, 5) 35 | for (x,y,w,h) in faces: 36 | cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2) 37 | face = gray[y:y + h, x:x + w] 38 | face_resize = cv2.resize(face, (im_width, im_height)) 39 | # Try to recognize the face 40 | prediction = recognizer.predict(face_resize) 41 | cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3) 42 | 43 | if prediction[1]<500: 44 | 45 | cv2.putText(im,'%s - %.0f' % (names[prediction[0]],prediction[1]),(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0)) 46 | else: 47 | cv2.putText(im,'not recognized',(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0)) 48 | 49 | cv2.imshow('OpenCV', im) 50 | key = cv2.waitKey(10) 51 | if key == 27: 52 | break 53 | -------------------------------------------------------------------------------- /trainer.py: -------------------------------------------------------------------------------- 1 | # facerec.py 2 | import cv2, sys, numpy, os,pyttsx,time 3 | size = 4 4 | fn_haar = 'haarcascade_frontalface_default.xml' 5 | fn_dir = 'database' 6 | 7 | recognizer = cv2.createLBPHFaceRecognizer() 8 | # Part 1: Create fisherRecognizer 9 | print('Training...') 10 | # Create a list of images and a list of corresponding names 11 | (images, lables, names, id) = ([], [], {}, 0) 12 | for (subdirs, dirs, files) in os.walk(fn_dir): 13 | for subdir in dirs: 14 | names[id] = subdir 15 | subjectpath = os.path.join(fn_dir, subdir) 16 | for filename in os.listdir(subjectpath): 17 | path = subjectpath + '/' + filename 18 | lable = id 19 | images.append(cv2.imread(path, 0)) 20 | lables.append(int(lable)) 21 | id += 1 22 | (im_width, im_height) = (112, 92) 23 | 24 | # Create a Numpy array from the two lists above 25 | (images, lables) = [numpy.array(lis) for lis in [images, lables]] 26 | 27 | 28 | # model = cv2.reateFisherFaceRecognizer() 29 | recognizer.train(images, lables) 30 | recognizer.save('trainer.yml') 31 | 32 | --------------------------------------------------------------------------------