├── .gitignore ├── 01_face_dataset.py ├── 02_face_training.py ├── 03_face_recognition.py ├── CamTest.py ├── FaceDetection.py ├── FaceEyeDetection.py ├── FaceSmileDetection.py ├── README.md └── haarcascade_frontalface_default.xml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /01_face_dataset.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | cam = cv2.VideoCapture(0) 4 | cam.set(3, 640) # set video width 5 | cam.set(4, 480) # set video height 6 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 7 | # For each person, enter one numeric face id 8 | face_id = input('\n Enter user ID end press ==> ') 9 | print("\n Initializing face capture. Look the camera and wait ...") 10 | # Initialize individual sampling face count 11 | count = 0 12 | while(True): 13 | ret, img = cam.read() 14 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 15 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 16 | for (x,y,w,h) in faces: 17 | cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) 18 | count += 1 19 | # Save the captured image into the datasets folder 20 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 21 | cv2.imshow('image', img) 22 | k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video 23 | if k == 27: 24 | break 25 | elif count >= 30: # Take 30 face sample and stop video 26 | break 27 | # Do a bit of cleanup 28 | print("\n [INFO] Exiting Program and cleanup stuff") 29 | cam.release() 30 | cv2.destroyAllWindows() 31 | -------------------------------------------------------------------------------- /02_face_training.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image 4 | import os 5 | # Path for face image database 6 | path = 'dataset' 7 | 8 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 9 | recognizer = cv2.face.LBPHFaceRecognizer_create() 10 | detector = cv2.CascadeClassifier("/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml"); 11 | # function to get the images and label data 12 | def getImagesAndLabels(path): 13 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 14 | faceSamples=[] 15 | ids = [] 16 | for imagePath in imagePaths: 17 | PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale 18 | img_numpy = np.array(PIL_img,'uint8') 19 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 20 | faces = detector.detectMultiScale(img_numpy) 21 | for (x,y,w,h) in faces: 22 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 23 | ids.append(id) 24 | return faceSamples,ids 25 | print ("\n [INFO] Training faces. It will take a few seconds. Wait ...") 26 | faces,ids = getImagesAndLabels(path) 27 | recognizer.train(faces, np.array(ids)) 28 | # Save the model into trainer/trainer.yml 29 | recognizer.write('/home/pi/FaceRecognition/trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi 30 | # Print the numer of faces trained and end program 31 | print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) 32 | -------------------------------------------------------------------------------- /03_face_recognition.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | 5 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 6 | recognizer = cv2.face.LBPHFaceRecognizer_create() 7 | recognizer.read('/home/pi/FaceRecognition/trainer/trainer.yml') 8 | cascadePath = "/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml" 9 | faceCascade = cv2.CascadeClassifier(cascadePath); 10 | 11 | font = cv2.FONT_HERSHEY_SIMPLEX 12 | 13 | #iniciate id counter 14 | id = 0 15 | 16 | # names related to ids: example ==> KUNAL: id=1, etc 17 | names = ['None', 'Kunal', 'Kaushik', 'Atharv', 'Z', 'W'] 18 | 19 | # Initialize and start realtime video capture 20 | cam = cv2.VideoCapture(0) 21 | cam.set(3, 640) # set video widht 22 | cam.set(4, 480) # set video height 23 | 24 | # Define min window size to be recognized as a face 25 | minW = 0.1*cam.get(3) 26 | minH = 0.1*cam.get(4) 27 | 28 | while True: 29 | ret, img =cam.read() 30 | #img = cv2.flip(img, -1) # Flip vertically 31 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 32 | 33 | faces = faceCascade.detectMultiScale( 34 | gray, 35 | scaleFactor = 1.2, 36 | minNeighbors = 5, 37 | minSize = (int(minW), int(minH)), 38 | ) 39 | 40 | for(x,y,w,h) in faces: 41 | cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) 42 | id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) 43 | 44 | # Check if confidence is less them 100 ==> "0" is perfect match 45 | if (confidence < 100): 46 | id = names[id] 47 | confidence = " {0}%".format(round(100 - confidence)) 48 | else: 49 | id = "unknown" 50 | confidence = " {0}%".format(round(100 - confidence)) 51 | 52 | cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2) 53 | cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) 54 | 55 | cv2.imshow('camera',img) 56 | 57 | k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video 58 | if k == 27: 59 | break 60 | 61 | # Do a bit of cleanup 62 | print("\n [INFO] Exiting Program and cleanup stuff") 63 | cam.release() 64 | cv2.destroyAllWindows() 65 | -------------------------------------------------------------------------------- /CamTest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | cap = cv2.VideoCapture(0) 4 | cap.set(3,640) # set Width 5 | cap.set(4,480) # set Height 6 | while(True): 7 | ret, frame = cap.read() 8 | #frame = cv2.flip(frame, -1) #Flip camera vertically 9 | if ret: 10 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 11 | cv2.imshow('frame', frame) 12 | cv2.imshow('gray', gray) 13 | 14 | k = cv2.waitKey(30) & 0xff 15 | if k == 27: # press 'ESC' to quit 16 | break 17 | cap.release() 18 | cv2.destroyAllWindows() 19 | -------------------------------------------------------------------------------- /FaceDetection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | faceCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml') 4 | cap = cv2.VideoCapture(0) 5 | cap.set(3,640) # set Width 6 | cap.set(4,480) # set Height 7 | while True: 8 | ret, img = cap.read() 9 | #img = cv2.flip(img, -1) 10 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 11 | faces = faceCascade.detectMultiScale( 12 | gray, 13 | scaleFactor=1.2, 14 | minNeighbors=5, 15 | minSize=(20,20) 16 | ) 17 | for (x,y,w,h) in faces: 18 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 19 | roi_gray = gray[y:y+h, x:x+w] 20 | roi_color = img[y:y+h, x:x+w] 21 | cv2.imshow('video',img) 22 | k = cv2.waitKey(30) & 0xff 23 | if k == 27: # press 'ESC' to quit 24 | break 25 | cap.release() 26 | cv2.destroyAllWindows() 27 | -------------------------------------------------------------------------------- /FaceEyeDetection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 5 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 6 | faceCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml') 7 | eyeCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_eye.xml') 8 | 9 | cap = cv2.VideoCapture(0) 10 | cap.set(3,640) # set Width 11 | cap.set(4,480) # set Height 12 | 13 | while True: 14 | ret, img = cap.read() 15 | img = cv2.flip(img, -1) 16 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 17 | faces = faceCascade.detectMultiScale( 18 | gray, 19 | scaleFactor=1.3, 20 | minNeighbors=5, 21 | minSize=(30, 30) 22 | ) 23 | 24 | for (x,y,w,h) in faces: 25 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 26 | roi_gray = gray[y:y+h, x:x+w] 27 | roi_color = img[y:y+h, x:x+w] 28 | 29 | eyes = eyeCascade.detectMultiScale( 30 | roi_gray, 31 | scaleFactor= 1.5, 32 | minNeighbors=10, 33 | minSize=(5, 5), 34 | ) 35 | 36 | for (ex, ey, ew, eh) in eyes: 37 | cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) 38 | 39 | cv2.imshow('video', img) 40 | 41 | k = cv2.waitKey(30) & 0xff 42 | if k == 27: # press 'ESC' to quit 43 | break 44 | 45 | cap.release() 46 | cv2.destroyAllWindows() 47 | -------------------------------------------------------------------------------- /FaceSmileDetection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 5 | # multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades 6 | faceCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml') 7 | smileCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_smile.xml') 8 | 9 | cap = cv2.VideoCapture(0) 10 | cap.set(3,640) # set Width 11 | cap.set(4,480) # set Height 12 | 13 | while True: 14 | ret, img = cap.read() 15 | img = cv2.flip(img, -1) 16 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 17 | faces = faceCascade.detectMultiScale( 18 | gray, 19 | scaleFactor=1.3, 20 | minNeighbors=5, 21 | minSize=(30, 30) 22 | ) 23 | 24 | for (x,y,w,h) in faces: 25 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 26 | roi_gray = gray[y:y+h, x:x+w] 27 | roi_color = img[y:y+h, x:x+w] 28 | 29 | smile = smileCascade.detectMultiScale( 30 | roi_gray, 31 | scaleFactor= 1.5, 32 | minNeighbors=15, 33 | minSize=(25, 25), 34 | ) 35 | 36 | for (xx, yy, ww, hh) in smile: 37 | cv2.rectangle(roi_color, (xx, yy), (xx + ww, yy + hh), (0, 255, 0), 2) 38 | 39 | cv2.imshow('video', img) 40 | 41 | k = cv2.waitKey(30) & 0xff 42 | if k == 27: # press 'ESC' to quit 43 | break 44 | 45 | cap.release() 46 | cv2.destroyAllWindows() 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Face-Recognition-using-Raspberry-Pi 2 | ## Description 3 | A small project which does face detection using OpenCV library on RaspberryPi. 4 | ## Content 5 | 1. [What's Raspberry pi?](#rpi) 6 | 2. [Face Recognition?](#facerecognition) 7 | 3. [OpenCV Library?](#opencv) 8 | 4. [Project](#project) 9 | * [Requirements](#requirements) 10 | * [Procedure](#procedure) 11 | 12 | ## What's Raspberry pi? 13 | > Raspberry pi is a mini computer(credit card sized), which is capable of doing various tasks but with less computation power as it doesn't have powerful resources. The Raspberry Pi is slower than a modern laptop or desktop but is still a complete Linux computer and can provide all the expected abilities that implies, at a low-power consumption level. 14 | [Explore more](https://www.raspberrypi.org/) 15 | 16 | ## what is Face Recognition? 17 | > Face Recognition/Facial Recognition is a category of biometric software which identifies people by their faces. Face is captured by digital camera and the system is trained and then it is capable of identifying the person. 18 | 19 | ## OpenCV Library 20 | > OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library. OpenCV was built to provide a common infrastructure for computer vision applications and to accelerate the use of machine perception in the commercial products. It has C++, Python and Java interfaces and supports Windows, Linux, Mac OS, iOS and Android. 21 | [Read more](https://opencv.org) 22 | ## Project 23 | > In this project we will learn how to detect real-time faces on Picam. 24 | 25 | ### Stuff used in this projects 26 | 1. [Raspberry Pi 3 Model-B](https://www.raspberrypi.org/products/raspberry-pi-3-model-b/) 27 | 2. [Raspberry Pi camera module](https://www.amazon.in/gp/product/B00L1FOIIS/ref=oh_aui_detailpage_o07_s00?ie=UTF8&psc=1) 28 | ### Softwares 29 | 1. [OpenCV Library](https://opencv.org) 30 | 2. [Python3](https://www.python.org) 31 | 3. [TextEditor](https://atom.io) 32 | 33 | ## 1] Introduction : 34 | >This project was done with "Open Source Computer Vision Library", the OpenCV. In this project, we will be using Raspberry Pi (so, Raspbian as OS) and Python.OpenCV was designed for computational efficiency and with a strong focus on real-time applications. So, it's perfect for real-time face recognition using a camera. 35 | 36 | ## 2] Project Structure : 37 | 1. Face Detection and Data Gathering. 38 | 2. Train the Sysytem. 39 | 3. Face Recognition. 40 | 41 | ## 3] Procedure : 42 | **Step 1: Installing OpenCV library** 43 | > I am using a Raspberry Pi V3 updated to the last version of Raspbian (Stretch), so the best way to have OpenCV installed, is to follow the awesome tutorial developed by Adrian Rosebrock: [Raspbian Stretch: Install OpenCV 3 + Python on your Raspberry Pi](https://www.pyimagesearch.com/2017/09/04/raspbian-stretch-install-opencv-3-python-on-your-raspberry-pi/) 44 | 45 | > Once you finished the tutorial, you should have an OpenCV virtual environment ready to run our experiments on your Pi. 46 | Let's go to our virtual environment and confirm that OpenCV 3 is correctly installed. 47 | 48 | Run the command `source` each time you open up a new terminal to ensure your system variables have been set up correctly. 49 | `source ~/.profile` 50 | 51 | Next, let's enter on our virtual environment: 52 | `workon cv` 53 | 54 | If you see the text (cv) preceding your prompt, then you are in the cv virtual environment: 55 | `(cv) pi@raspberry:~$` 56 | 57 | Now, enter in your Python interpreter : 58 | `python` 59 | 60 | then import the OpenCV library : 61 | `>>import cv2` 62 | 63 | If no error messages appear, the OpenCV is correctly installed ON YOUR PYTHON VIRTUAL ENVIRONMENT. 64 | 65 | You can also check the OpenCV version installed: 66 | `cv2.__version__` 67 | 68 | **Step 2: Testing Camera** 69 | > Once you have OpenCV installed in your Raspberry Pi, let's test to confirm that your camera is working properly. 70 | I am assuming that you have a PiCam already installed on your Raspberry Pi. 71 | 72 | > You must have the camera enabled when you ran through Adrian's tutorial, otherwise, the drivers will not be installed correctly. 73 | 74 | In case you get an error like: OpenCV Error: Assertion failed , you can try solve the issue, using the command: 75 | `sudo modprobe bcm2835-v4l2` 76 | 77 | Once you have all drivers correctly installed, enter the below Python code on your IDE or any Text Editor : 78 | ```python 79 | import numpy as np 80 | import cv2 81 | cap = cv2.VideoCapture(0) 82 | cap.set(3,640) # set Width 83 | cap.set(4,480) # set Height 84 | while(True): 85 | ret, frame = cap.read() 86 | if ret: 87 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 88 | cv2.imshow('frame', frame) 89 | cv2.imshow('gray', gray) 90 | 91 | k = cv2.waitKey(30) & 0xff 92 | if k == 27: # press 'ESC' to quit 93 | break 94 | cap.release() 95 | cv2.destroyAllWindows() 96 | ``` 97 | The above code will capture the video stream that will be generated by your PiCam, displaying both, in BGR color and Gray mode. 98 | You can download the code [CamTest.py](CamTest.py) from above as well. 99 | Now, Run the program : 100 | `python CamTest.py` 101 | 102 | Some may found issues when trying to open the camera ( "Assertion failed" error messages). That could happen if the camera was not enabled during OpenCv installation and so, camera drivers did not install correctly. To correct, use the command: 103 | `sudo modprobe bcm2835-v4l2` 104 | 105 | **Step 3: Face Detection** 106 | > The most basic task on Face Recognition is of course, "Face Detecting". Before anything, you must "capture" a face in order to recognize it, when compared with a new face captured on future. 107 | 108 | > The most common way to detect a face (or any objects), is using the "Haar Cascade classifier" 109 | 110 | > Object Detection using Haar feature-based cascade classifiers is an effective object detection method proposed by Paul Viola and Michael Jones in their paper, "Rapid Object Detection using a Boosted Cascade of Simple Features" in 2001. It is a machine learning based approach where a cascade function is trained from a lot of positive and negative images. It is then used to detect objects in other images. 111 | 112 | > Here we will work with face detection. Initially, the algorithm needs a lot of positive images (images of faces) and negative images (images without faces) to train the classifier. Then we need to extract features from it. The good news is that OpenCV comes with a trainer as well as a detector. If you want to train your own classifier for any object like car, planes etc. you can use OpenCV to create one. Its full details are given here: [Cascade Classifier Training.](https://docs.opencv.org/3.3.0/dc/d88/tutorial_traincascade.html) 113 | 114 | Enough theory, let's create a face detector with OpenCV! 115 | 116 | Download the file: [FaceDetection.py](FaceDetection.py) 117 | 118 | ```python 119 | import numpy as np 120 | import cv2 121 | faceCascade = cv2.CascadeClassifier('/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml') 122 | cap = cv2.VideoCapture(0) 123 | cap.set(3,640) # set Width 124 | cap.set(4,480) # set Height 125 | while True: 126 | ret, img = cap.read() 127 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 128 | faces = faceCascade.detectMultiScale( 129 | gray, 130 | scaleFactor=1.2, 131 | minNeighbors=5, 132 | minSize=(20,20) 133 | ) 134 | for (x,y,w,h) in faces: 135 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 136 | roi_gray = gray[y:y+h, x:x+w] 137 | roi_color = img[y:y+h, x:x+w] 138 | cv2.imshow('video',img) 139 | k = cv2.waitKey(30) & 0xff 140 | if k == 27: # press 'ESC' to quit 141 | break 142 | cap.release() 143 | cv2.destroyAllWindows() 144 | ``` 145 | 146 | When you compare with the last code used to test the camera, you will realize that few parts were added to it. Note the line below: 147 | `faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')` 148 | This is the line that loads the "classifier" (that must be in a directory named "Cascades/", under your project directory). 149 | 150 | Then, we will set our camera and inside the loop, load our input video in grayscale mode (same we saw before). 151 | 152 | Now we must call our classifier function, passing it some very important parameters, as scale factor, number of neighbors and minimum size of the detected face. 153 | 154 | ```python 155 | faces = faceCascade.detectMultiScale( 156 | gray, 157 | scaleFactor=1.2, 158 | minNeighbors=5, 159 | minSize=(20, 20) 160 | ) 161 | ``` 162 | 163 | Where, 164 | 165 | `* gray` is the input grayscale image. 166 | `* scaleFactor` is the parameter specifying how much the image size is reduced at each image scale. It is used to create the scale pyramid. 167 | `* minNeighbors` is a parameter specifying how many neighbors each candidate rectangle should have, to retain it. A higher number gives lower false positives. 168 | `* minSize` is the minimum rectangle size to be considered a face. 169 | 170 | The function will detect faces on the image. Next, we must "mark" the faces in the image, using, for example, a blue rectangle. This is done with this portion of the code: 171 | ```python 172 | for (x,y,w,h) in faces: 173 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 174 | roi_gray = gray[y:y+h, x:x+w] 175 | roi_color = img[y:y+h, x:x+w] 176 | ``` 177 | If faces are found, it returns the positions of detected faces as a rectangle with the left up corner (x,y) and having "w" as its Width and "h" as its Height ==> (x,y,w,h). 178 | 179 | Now, run the above python Script on your python environment, using the Raspberry Pi Terminal: 180 | `python FaceDetection.py` 181 | 182 | After executing the above code you will be able to see a window popping which includes your face. 183 | 184 | You can also include classifiers for "eyes detection" or even "smile detection". On those cases, you will include the classifier function and rectangle draw inside the face loop, because would be no sense to detect an eye or a smile outside of a face. 185 | 186 | > [* .FaceEyeDetection.py](FaceEyeDetection.py) 187 | > [* .FaceSmileDetection.py](FaceSmileDetection.py) 188 | > [* .FaceEyeSmileDetection.py](FaceEyeSmileDetection.py) 189 | 190 | **Step 4: Data Gathering** 191 | > Let's start the first phase of our project. What we will do here, is starting from Face Detecting, we will simply create a dataset, where we will store for each id, a group of photos in gray with the portion that was used for face detecting. 192 | 193 | First, create a directory where you develop your project, for example, FaceRecognition: 194 | `mkdir FaceRecognition` 195 | 196 | In this directory, besides the 3 python scripts that we will create for our project, we must have saved on it the Facial Classifier. You can download it from above: [haarcascade_frontalface_default.xml](haarcascade_frontalface_default.xml) 197 | 198 | Next, create a subdirectory where we will store our facial samples and name it "dataset": 199 | `mkdir dataset` 200 | 201 | > Download [01_face_dataset.py](01_face_dataset.py) 202 | 203 | ```python 204 | import cv2 205 | import os 206 | cam = cv2.VideoCapture(0) 207 | cam.set(3, 640) # set video width 208 | cam.set(4, 480) # set video height 209 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 210 | # For each person, enter one numeric face id 211 | face_id = input('\n Enter user ID end press ==> ') 212 | print("\n Initializing face capture. Look the camera and wait ...") 213 | # Initialize individual sampling face count 214 | count = 0 215 | while(True): 216 | ret, img = cam.read() 217 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 218 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 219 | for (x,y,w,h) in faces: 220 | cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) 221 | count += 1 222 | # Save the captured image into the datasets folder 223 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 224 | cv2.imshow('image', img) 225 | k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video 226 | if k == 27: 227 | break 228 | elif count >= 30: # Take 30 face sample and stop video 229 | break 230 | # Do a bit of cleanup 231 | print("\n [INFO] Exiting Program and cleanup stuff") 232 | cam.release() 233 | cv2.destroyAllWindows() 234 | ``` 235 | The code is very similar to the code that we saw for face detection. What we added, was an "input command" to capture a user id, that should be an integer number (1, 2, 3, etc) 236 | `face_id = input('\n enter user id end press ==> ')` 237 | 238 | And for each one of the captured frames, we should save it as a file on a "dataset" directory: 239 | `cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])` 240 | 241 | Note that for saving the above file, you must have imported the library "os". Each file's name will follow the structure: 242 | `User.face_id.count.jpg` 243 | 244 | For example, for a user with a face_id = 1, the 4th sample file on dataset/ directory will be something like: 245 | `User.1.4.jpg` 246 | 247 | In code, you guys can see the there's a `count` variable which counts no. of images captured. It is set to `30` , the more you capture the more accuratly system will work. 248 | 249 | **Step 6: Trainer** 250 | > On this second phase, we must take all user data from our dataset and "trainer" the OpenCV Recognizer. This is done directly by a specific OpenCV function. The result will be a `.yml` file that will be saved on a "trainer/" directory. 251 | 252 | So, let's create trainer directory first: 253 | `mkdir trainer` 254 | 255 | Download [02_face_training.py](02_face_training.py) 256 | ```python 257 | import cv2 258 | import numpy as np 259 | from PIL import Image 260 | import os 261 | # Path for face image database 262 | path = 'dataset' 263 | 264 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 265 | recognizer = cv2.face.LBPHFaceRecognizer_create() 266 | detector = cv2.CascadeClassifier("/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml"); 267 | # function to get the images and label data 268 | def getImagesAndLabels(path): 269 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 270 | faceSamples=[] 271 | ids = [] 272 | for imagePath in imagePaths: 273 | PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale 274 | img_numpy = np.array(PIL_img,'uint8') 275 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 276 | faces = detector.detectMultiScale(img_numpy) 277 | for (x,y,w,h) in faces: 278 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 279 | ids.append(id) 280 | return faceSamples,ids 281 | print ("\nTraining faces. It will take few seconds. Wait ...") 282 | faces,ids = getImagesAndLabels(path) 283 | recognizer.train(faces, np.array(ids)) 284 | # Save the model into trainer/trainer.yml 285 | recognizer.write('/home/pi/FaceRecognition/trainer/trainer.yml') 286 | # Print the numer of faces trained and end program 287 | print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) 288 | ``` 289 | Confirm if you have the PIL library installed on your Raspberry Pi. If not, run the below command in Terminal: 290 | `pip install pillow` 291 | 292 | We will use as a recognizer, the LBPH (LOCAL BINARY PATTERNS HISTOGRAMS) Face Recognizer, included in OpenCV package. This can be done by following line: 293 | `recognizer = cv2.face.LBPHFaceRecognizer_create()` 294 | 295 | The function "getImagesAndLabels(path)", will take all photos on directory: "dataset/", returning 2 arrays: "Ids" and "faces". With those arrays as input, we will "train our recognizer": 296 | `recognizer.train(faces, ids)` 297 | 298 | As a result, a file named "trainer.yml" will be saved in the trainer directory that was previously created by us. 299 | >_Note: Make sure that whenever you collect dataset i.e run program 1, you must run program 2 as well to train Rpi._ 300 | 301 | **Step 6: Recognizer** 302 | > Now, we reached the final phase of our project. Here, we will capture a fresh face on our camera and if this person had his face captured and trained before, our recognizer will make a "prediction" returning its id and an index, shown how confident the recognizer is with this match. 303 | 304 | Download [03_face_recognition.py](03_face_recognition.py) 305 | ```python 306 | import cv2 307 | import numpy as np 308 | import os 309 | 310 | os.chdir("/home/pi/opencv-3.4.1/data/haarcascades") 311 | recognizer = cv2.face.LBPHFaceRecognizer_create() 312 | recognizer.read('/home/pi/FaceRecognition/trainer/trainer.yml') 313 | cascadePath = "/home/pi/opencv-3.4.1/data/haarcascades/haarcascade_frontalface_default.xml" 314 | faceCascade = cv2.CascadeClassifier(cascadePath); 315 | 316 | font = cv2.FONT_HERSHEY_SIMPLEX 317 | 318 | #iniciate id counter 319 | id = 0 320 | 321 | # names related to ids: example ==> KUNAL: id=1, etc 322 | names = ['None', 'Kunal', 'Kaushik', 'Atharv', 'Z', 'W'] 323 | 324 | # Initialize and start realtime video capture 325 | cam = cv2.VideoCapture(0) 326 | cam.set(3, 640) # set video widht 327 | cam.set(4, 480) # set video height 328 | 329 | # Define min window size to be recognized as a face 330 | minW = 0.1*cam.get(3) 331 | minH = 0.1*cam.get(4) 332 | 333 | while True: 334 | ret, img =cam.read() 335 | #img = cv2.flip(img, -1) # Flip vertically 336 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 337 | 338 | faces = faceCascade.detectMultiScale( 339 | gray, 340 | scaleFactor = 1.2, 341 | minNeighbors = 5, 342 | minSize = (int(minW), int(minH)), 343 | ) 344 | 345 | for(x,y,w,h) in faces: 346 | cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) 347 | id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) 348 | 349 | # Check if confidence is less them 100 ==> "0" is perfect match 350 | if (confidence < 100): 351 | id = names[id] 352 | confidence = " {0}%".format(round(100 - confidence)) 353 | else: 354 | id = "unknown" 355 | confidence = " {0}%".format(round(100 - confidence)) 356 | 357 | cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2) 358 | cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) 359 | 360 | cv2.imshow('camera',img) 361 | 362 | k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video 363 | if k == 27: 364 | break 365 | 366 | # Do a bit of cleanup 367 | print("\n [INFO] Exiting Program and cleanup stuff") 368 | cam.release() 369 | cv2.destroyAllWindows() 370 | ``` 371 | We are including here a new array, so we will display "names", instead of numbered ids: 372 | 373 | names = ['None', 'Kunal', 'Kaushik', 'Tushar', 'X', 'Y' , 'Z'] 374 | So, for example: Kunal will be the user with id = 1; Kaushik: id=2, etc. 375 | 376 | Next, we will detect a face, same we did before with the haarCascade classifier. Having a detected face we can call the most important function in the above code: 377 | `id, confidence = recognizer.predict(gray portion of the face)` 378 | 379 | The `recognizer.predict ()`, will take as a parameter a captured portion of the face to be analyzed and will return its probable owner, indicating its id and how much confidence the recognizer is in relation with this match. 380 | 381 | >_Note that the confidence index will return **"zero"** if it will be cosidered a perfect match_ 382 | 383 | **Below is the final result image, you too should get similar output** 384 | 385 |
386 | 387 |
388 | 389 | ## Conclusion: 390 | > I hope this project can help others find their way into the exciting world of IoT! 391 | 392 | For more projects, follow me on github [Kunal Yelne](https://github.com/kunalyelne):+1: 393 | Thankyou :heart: 394 | 395 | ## Credits 396 | [Kunal Yelne](https://github.com/kunalyelne) 397 | 3rd Year,CSE Department. 398 | [Indian Institute of Information Technology Nagpur.](https://github.com/iiit-nagpur) 399 | --------------------------------------------------------------------------------