├── 01_face_dataset.py ├── 02_face_training.py ├── 03_face_recognition.py ├── FaceRecogBlock.png ├── README.md ├── faceDetection.py ├── faceEyeDetection.py ├── faceSmileDetection.py ├── faceSmileEyeDetection.py ├── haarcascade_eye.xml ├── haarcascade_frontalface_default.xml └── haarcascade_smile.xml /01_face_dataset.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Capture multiple Faces from multiple users to be stored on a DataBase (dataset directory) 3 | ==> Faces will be stored on a directory: dataset/ (if does not exist, pls create one) 4 | ==> Each face will have a unique numeric integer ID as 1, 2, 3, etc 5 | 6 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 7 | 8 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 9 | 10 | ''' 11 | 12 | import cv2 13 | import os 14 | 15 | cam = cv2.VideoCapture(0) 16 | cam.set(3, 640) # set video width 17 | cam.set(4, 480) # set video height 18 | 19 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 20 | 21 | # For each person, enter one numeric face id 22 | face_id = input('\n enter user id end press ==> ') 23 | 24 | print("\n [INFO] Initializing face capture. Look the camera and wait ...") 25 | # Initialize individual sampling face count 26 | count = 0 27 | 28 | while(True): 29 | 30 | ret, img = cam.read() 31 | img = cv2.flip(img, -1) # flip video image vertically 32 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 33 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 34 | 35 | for (x,y,w,h) in faces: 36 | 37 | cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) 38 | count += 1 39 | 40 | # Save the captured image into the datasets folder 41 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 42 | 43 | cv2.imshow('image', img) 44 | 45 | k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video 46 | if k == 27: 47 | break 48 | elif count >= 30: # Take 30 face sample and stop video 49 | break 50 | 51 | # Do a bit of cleanup 52 | print("\n [INFO] Exiting Program and cleanup stuff") 53 | cam.release() 54 | cv2.destroyAllWindows() 55 | 56 | 57 | -------------------------------------------------------------------------------- /02_face_training.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Training Multiple Faces stored on a DataBase: 3 | ==> Each face should have a unique numeric integer ID as 1, 2, 3, etc 4 | ==> LBPH computed model will be saved on trainer/ directory. (if it does not exist, pls create one) 5 | ==> for using PIL, install pillow library with "pip install pillow" 6 | 7 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 8 | 9 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 10 | 11 | ''' 12 | 13 | import cv2 14 | import numpy as np 15 | from PIL import Image 16 | import os 17 | 18 | # Path for face image database 19 | path = 'dataset' 20 | 21 | recognizer = cv2.face.LBPHFaceRecognizer_create() 22 | detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); 23 | 24 | # function to get the images and label data 25 | def getImagesAndLabels(path): 26 | 27 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 28 | faceSamples=[] 29 | ids = [] 30 | 31 | for imagePath in imagePaths: 32 | 33 | PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale 34 | img_numpy = np.array(PIL_img,'uint8') 35 | 36 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 37 | faces = detector.detectMultiScale(img_numpy) 38 | 39 | for (x,y,w,h) in faces: 40 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 41 | ids.append(id) 42 | 43 | return faceSamples,ids 44 | 45 | print ("\n [INFO] Training faces. It will take a few seconds. Wait ...") 46 | faces,ids = getImagesAndLabels(path) 47 | recognizer.train(faces, np.array(ids)) 48 | 49 | # Save the model into trainer/trainer.yml 50 | recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi 51 | 52 | # Print the numer of faces trained and end program 53 | print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) 54 | -------------------------------------------------------------------------------- /03_face_recognition.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Real Time Face Recogition 3 | ==> Each face stored on dataset/ dir, should have a unique numeric integer ID as 1, 2, 3, etc 4 | ==> LBPH computed model (trained faces) should be on trainer/ dir 5 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 6 | 7 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 8 | 9 | ''' 10 | 11 | import cv2 12 | import numpy as np 13 | import os 14 | 15 | recognizer = cv2.face.LBPHFaceRecognizer_create() 16 | recognizer.read('trainer/trainer.yml') 17 | cascadePath = "haarcascade_frontalface_default.xml" 18 | faceCascade = cv2.CascadeClassifier(cascadePath); 19 | 20 | font = cv2.FONT_HERSHEY_SIMPLEX 21 | 22 | #iniciate id counter 23 | id = 0 24 | 25 | # names related to ids: example ==> Marcelo: id=1, etc 26 | names = ['None', 'Marcelo', 'Paula', 'Ilza', 'Z', 'W'] 27 | 28 | # Initialize and start realtime video capture 29 | cam = cv2.VideoCapture(0) 30 | cam.set(3, 640) # set video widht 31 | cam.set(4, 480) # set video height 32 | 33 | # Define min window size to be recognized as a face 34 | minW = 0.1*cam.get(3) 35 | minH = 0.1*cam.get(4) 36 | 37 | while True: 38 | 39 | ret, img =cam.read() 40 | img = cv2.flip(img, -1) # Flip vertically 41 | 42 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 43 | 44 | faces = faceCascade.detectMultiScale( 45 | gray, 46 | scaleFactor = 1.2, 47 | minNeighbors = 5, 48 | minSize = (int(minW), int(minH)), 49 | ) 50 | 51 | for(x,y,w,h) in faces: 52 | 53 | cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) 54 | 55 | id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) 56 | 57 | # Check if confidence is less them 100 ==> "0" is perfect match 58 | if (confidence < 100): 59 | id = names[id] 60 | confidence = " {0}%".format(round(100 - confidence)) 61 | else: 62 | id = "unknown" 63 | confidence = " {0}%".format(round(100 - confidence)) 64 | 65 | cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2) 66 | cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) 67 | 68 | cv2.imshow('camera',img) 69 | 70 | k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video 71 | if k == 27: 72 | break 73 | 74 | # Do a bit of cleanup 75 | print("\n [INFO] Exiting Program and cleanup stuff") 76 | cam.release() 77 | cv2.destroyAllWindows() 78 | -------------------------------------------------------------------------------- /FaceRecogBlock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KrithicrosonR/face-recognition-using-opencv/d6a03d84652ebf8f33d0686c7edc1a455f494908/FaceRecogBlock.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenCV-Face-Recognition 2 | Real-time face recognition project with OpenCV and Python 3 | 4 | -------------------------------------------------------------------------------- /faceDetection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Haar Cascade Face detection with OpenCV 3 | Based on tutorial by pythonprogramming.net 4 | Visit original post: https://pythonprogramming.net/haar-cascade-face-eye-detection-python-opencv-tutorial/ 5 | Adapted by Marcelo Rovai - MJRoBot.org @ 7Feb2018 6 | ''' 7 | 8 | import numpy as np 9 | import cv2 10 | 11 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 12 | faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml') 13 | 14 | cap = cv2.VideoCapture(0) 15 | cap.set(3,640) # set Width 16 | cap.set(4,480) # set Height 17 | 18 | while True: 19 | ret, img = cap.read() 20 | img = cv2.flip(img, -1) 21 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 22 | faces = faceCascade.detectMultiScale( 23 | gray, 24 | 25 | scaleFactor=1.2, 26 | minNeighbors=5 27 | , 28 | minSize=(20, 20) 29 | ) 30 | 31 | for (x,y,w,h) in faces: 32 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 33 | roi_gray = gray[y:y+h, x:x+w] 34 | roi_color = img[y:y+h, x:x+w] 35 | 36 | 37 | cv2.imshow('video',img) 38 | 39 | k = cv2.waitKey(30) & 0xff 40 | if k == 27: # press 'ESC' to quit 41 | break 42 | 43 | cap.release() 44 | cv2.destroyAllWindows() 45 | -------------------------------------------------------------------------------- /faceEyeDetection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Haar Cascade Face and Eye detection with OpenCV 3 | Based on tutorial by pythonprogramming.net 4 | Visit original post: https://pythonprogramming.net/haar-cascade-face-eye-detection-python-opencv-tutorial/ 5 | 6 | Adapted by Marcelo Rovai - MJRoBot.org @ 22Feb2018 7 | ''' 8 | 9 | import numpy as np 10 | import cv2 11 | 12 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 13 | faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml') 14 | eyeCascade = cv2.CascadeClassifier('Cascades/haarcascade_eye.xml') 15 | 16 | cap = cv2.VideoCapture(0) 17 | cap.set(3,640) # set Width 18 | cap.set(4,480) # set Height 19 | 20 | while True: 21 | ret, img = cap.read() 22 | img = cv2.flip(img, -1) 23 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 24 | faces = faceCascade.detectMultiScale( 25 | gray, 26 | scaleFactor=1.3, 27 | minNeighbors=5, 28 | minSize=(30, 30) 29 | ) 30 | 31 | for (x,y,w,h) in faces: 32 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 33 | roi_gray = gray[y:y+h, x:x+w] 34 | roi_color = img[y:y+h, x:x+w] 35 | 36 | eyes = eyeCascade.detectMultiScale( 37 | roi_gray, 38 | scaleFactor= 1.5, 39 | minNeighbors=10, 40 | minSize=(5, 5), 41 | ) 42 | 43 | for (ex, ey, ew, eh) in eyes: 44 | cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) 45 | 46 | cv2.imshow('video', img) 47 | 48 | k = cv2.waitKey(30) & 0xff 49 | if k == 27: # press 'ESC' to quit 50 | break 51 | 52 | cap.release() 53 | cv2.destroyAllWindows() 54 | -------------------------------------------------------------------------------- /faceSmileDetection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Haar Cascade Face and Smile detection with OpenCV 3 | 4 | Developed by Marcelo Rovai - MJRoBot.org @ 22Feb2018 5 | ''' 6 | 7 | import numpy as np 8 | import cv2 9 | 10 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 11 | faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml') 12 | smileCascade = cv2.CascadeClassifier('Cascades/haarcascade_smile.xml') 13 | 14 | cap = cv2.VideoCapture(0) 15 | cap.set(3,640) # set Width 16 | cap.set(4,480) # set Height 17 | 18 | while True: 19 | ret, img = cap.read() 20 | img = cv2.flip(img, -1) 21 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 22 | faces = faceCascade.detectMultiScale( 23 | gray, 24 | scaleFactor=1.3, 25 | minNeighbors=5, 26 | minSize=(30, 30) 27 | ) 28 | 29 | for (x,y,w,h) in faces: 30 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 31 | roi_gray = gray[y:y+h, x:x+w] 32 | roi_color = img[y:y+h, x:x+w] 33 | 34 | smile = smileCascade.detectMultiScale( 35 | roi_gray, 36 | scaleFactor= 1.5, 37 | minNeighbors=15, 38 | minSize=(25, 25), 39 | ) 40 | 41 | for (xx, yy, ww, hh) in smile: 42 | cv2.rectangle(roi_color, (xx, yy), (xx + ww, yy + hh), (0, 255, 0), 2) 43 | 44 | cv2.imshow('video', img) 45 | 46 | k = cv2.waitKey(30) & 0xff 47 | if k == 27: # press 'ESC' to quit 48 | break 49 | 50 | cap.release() 51 | cv2.destroyAllWindows() 52 | -------------------------------------------------------------------------------- /faceSmileEyeDetection.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Haar Cascade Face, Smile and Eye detection with OpenCV 3 | 4 | Developed by Marcelo Rovai - MJRoBot.org @ 22Feb2018 5 | ''' 6 | 7 | import numpy as np 8 | import cv2 9 | 10 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 11 | faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml') 12 | eyeCascade = cv2.CascadeClassifier('Cascades/haarcascade_eye.xml') 13 | smileCascade = cv2.CascadeClassifier('Cascades/haarcascade_smile.xml') 14 | 15 | cap = cv2.VideoCapture(0) 16 | cap.set(3,640) # set Width 17 | cap.set(4,480) # set Height 18 | 19 | while True: 20 | ret, img = cap.read() 21 | img = cv2.flip(img, -1) 22 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 23 | faces = faceCascade.detectMultiScale( 24 | gray, 25 | scaleFactor=1.3, 26 | minNeighbors=5, 27 | minSize=(30, 30) 28 | ) 29 | 30 | for (x,y,w,h) in faces: 31 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 32 | roi_gray = gray[y:y+h, x:x+w] 33 | roi_color = img[y:y+h, x:x+w] 34 | 35 | eyes = eyeCascade.detectMultiScale( 36 | roi_gray, 37 | scaleFactor= 1.5, 38 | minNeighbors=5, 39 | minSize=(5, 5), 40 | ) 41 | 42 | for (ex, ey, ew, eh) in eyes: 43 | cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) 44 | 45 | 46 | smile = smileCascade.detectMultiScale( 47 | roi_gray, 48 | scaleFactor= 1.5, 49 | minNeighbors=15, 50 | minSize=(25, 25), 51 | ) 52 | 53 | for (xx, yy, ww, hh) in smile: 54 | cv2.rectangle(roi_color, (xx, yy), (xx + ww, yy + hh), (0, 255, 0), 2) 55 | 56 | cv2.imshow('video', img) 57 | 58 | k = cv2.waitKey(30) & 0xff 59 | if k == 27: # press 'ESC' to quit 60 | break 61 | 62 | cap.release() 63 | cv2.destroyAllWindows() 64 | --------------------------------------------------------------------------------