├── Screen Shot 2019-12-12 at 8.01.02 PM.jpg ├── README.md ├── LICENSE ├── 2. face training.py ├── 1. face dataset.py ├── 3. face recognition.py ├── 2.ipynb ├── 1.ipynb └── 3.ipynb /Screen Shot 2019-12-12 at 8.01.02 PM.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chandrikadeb7/Face-Recognition-in-Python/HEAD/Screen Shot 2019-12-12 at 8.01.02 PM.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Face-Recognition-in-Python 2 | A real time face recognition system developed in Python using Pillow, OpenCV and NumPy 3 | 4 | The steps to run this project are as follows: 5 | 1. Create two empty folders named 'dataset' and 'trainer' in your main folder after unzipping. 6 | 2. Run 1st file "face dataset.py" and enter id 1. 7 | 3. Again run "face dataset.py" with user id 2 with another face. 8 | 4. Run file "2. face training.py" and then further run file "3. face recognition.py". 9 | 5. Tadaa!! The face recognition works. 10 | 11 | For more than 2 faces to be recognized change the id number from 2 to any other greater number desired in file "3. face recognition.py". 12 | 13 | Thanks!! 14 | 15 | ![alt text](https://github.com/chandrikadeb7/Face-Recognition-in-Python/blob/master/Screen%20Shot%202019-12-12%20at%208.01.02%20PM.jpg) 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Chandrika Deb 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /2. face training.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image #pillow package 4 | import os 5 | 6 | # Path for face image database 7 | path = 'dataset' 8 | 9 | recognizer = cv2.face.LBPHFaceRecognizer_create() 10 | detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); 11 | 12 | # function to get the images and label data 13 | def getImagesAndLabels(path): 14 | 15 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 16 | faceSamples=[] 17 | ids = [] 18 | 19 | for imagePath in imagePaths: 20 | 21 | PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale 22 | img_numpy = np.array(PIL_img,'uint8') 23 | 24 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 25 | faces = detector.detectMultiScale(img_numpy) 26 | 27 | for (x,y,w,h) in faces: 28 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 29 | ids.append(id) 30 | 31 | return faceSamples,ids 32 | 33 | print ("\n [INFO] Training faces. It will take a few seconds. Wait ...") 34 | faces,ids = getImagesAndLabels(path) 35 | recognizer.train(faces, np.array(ids)) 36 | 37 | # Save the model into trainer/trainer.yml 38 | recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi 39 | 40 | # Print the numer of faces trained and end program 41 | print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) 42 | -------------------------------------------------------------------------------- /1. face dataset.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | 4 | cam = cv2.VideoCapture(0) 5 | cam.set(3, 640) # set video width 6 | cam.set(4, 480) # set video height 7 | 8 | #make sure 'haarcascade_frontalface_default.xml' is in the same folder as this code 9 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 10 | 11 | # For each person, enter one numeric face id (must enter number start from 1, this is the lable of person 1) 12 | face_id = input('\n enter user id end press ==> ') 13 | 14 | print("\n [INFO] Initializing face capture. Look the camera and wait ...") 15 | # Initialize individual sampling face count 16 | count = 0 17 | 18 | #start detect your face and take 30 pictures 19 | while(True): 20 | 21 | ret, img = cam.read() 22 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 23 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 24 | 25 | for (x,y,w,h) in faces: 26 | 27 | cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) 28 | count += 1 29 | 30 | # Save the captured image into the datasets folder 31 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 32 | 33 | cv2.imshow('image', img) 34 | 35 | k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video 36 | if k == 27: 37 | break 38 | elif count >= 30: # Take 30 face sample and stop video 39 | break 40 | 41 | # Do a bit of cleanup 42 | print("\n [INFO] Exiting Program and cleanup stuff") 43 | cam.release() 44 | cv2.destroyAllWindows() 45 | 46 | 47 | -------------------------------------------------------------------------------- /3. face recognition.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | 5 | recognizer = cv2.face.LBPHFaceRecognizer_create() 6 | recognizer.read('trainer/trainer.yml') #load trained model 7 | cascadePath = "haarcascade_frontalface_default.xml" 8 | faceCascade = cv2.CascadeClassifier(cascadePath); 9 | 10 | font = cv2.FONT_HERSHEY_SIMPLEX 11 | 12 | #iniciate id counter, the number of persons you want to include 13 | id = 2 #two persons 14 | 15 | 16 | names = ['','Chandrika','Aditya'] #key in names, start from the second place, leave first empty 17 | 18 | # Initialize and start realtime video capture 19 | cam = cv2.VideoCapture(0) 20 | cam.set(3, 640) # set video widht 21 | cam.set(4, 480) # set video height 22 | 23 | # Define min window size to be recognized as a face 24 | minW = 0.1*cam.get(3) 25 | minH = 0.1*cam.get(4) 26 | 27 | while True: 28 | 29 | ret, img =cam.read() 30 | 31 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 32 | 33 | faces = faceCascade.detectMultiScale( 34 | gray, 35 | scaleFactor = 1.2, 36 | minNeighbors = 5, 37 | minSize = (int(minW), int(minH)), 38 | ) 39 | 40 | for(x,y,w,h) in faces: 41 | 42 | cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) 43 | 44 | id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) 45 | 46 | # Check if confidence is less them 100 ==> "0" is perfect match 47 | if (confidence < 100): 48 | id = names[id] 49 | confidence = " {0}%".format(round(100 - confidence)) 50 | else: 51 | id = "unknown" 52 | confidence = " {0}%".format(round(100 - confidence)) 53 | 54 | cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2) 55 | cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) 56 | 57 | cv2.imshow('camera',img) 58 | 59 | k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video 60 | if k == 27: 61 | break 62 | 63 | # Do a bit of cleanup 64 | print("\n [INFO] Exiting Program and cleanup stuff") 65 | cam.release() 66 | cv2.destroyAllWindows() 67 | -------------------------------------------------------------------------------- /2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import numpy as np\n", 13 | "from PIL import Image\n", 14 | "import os" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 2, 20 | "metadata": { 21 | "collapsed": false, 22 | "scrolled": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "# Path for face image database\n", 27 | "path = 'dataset'\n", 28 | "\n", 29 | "recognizer = cv2.face.LBPHFaceRecognizer_create()\n", 30 | "detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\");\n" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": { 37 | "collapsed": false 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stdout", 42 | "output_type": "stream", 43 | "text": [ 44 | "\n", 45 | " [INFO] Training faces. It will take a few seconds. Wait ...\n", 46 | "\n", 47 | " [INFO] 1 faces trained. Exiting Program\n" 48 | ] 49 | } 50 | ], 51 | "source": [ 52 | "# function to get the images and label data\n", 53 | "def getImagesAndLabels(path):\n", 54 | "\n", 55 | " imagePaths = [os.path.join(path,f) for f in os.listdir(path)] \n", 56 | " faceSamples=[]\n", 57 | " ids = []\n", 58 | "\n", 59 | " for imagePath in imagePaths:\n", 60 | "\n", 61 | " PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale\n", 62 | " img_numpy = np.array(PIL_img,'uint8')\n", 63 | "\n", 64 | " id = int(os.path.split(imagePath)[-1].split(\".\")[1])\n", 65 | " faces = detector.detectMultiScale(img_numpy)\n", 66 | "\n", 67 | " for (x,y,w,h) in faces:\n", 68 | " faceSamples.append(img_numpy[y:y+h,x:x+w])\n", 69 | " ids.append(id)\n", 70 | "\n", 71 | " return faceSamples,ids\n", 72 | "\n", 73 | "print (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\n", 74 | "faces,ids = getImagesAndLabels(path)\n", 75 | "recognizer.train(faces, np.array(ids))\n", 76 | "\n", 77 | "# Save the model into trainer/trainer.yml\n", 78 | "recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi\n", 79 | "\n", 80 | "# Print the numer of faces trained and end program\n", 81 | "print(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "metadata": { 88 | "collapsed": true 89 | }, 90 | "outputs": [], 91 | "source": [] 92 | } 93 | ], 94 | "metadata": { 95 | "anaconda-cloud": {}, 96 | "kernelspec": { 97 | "display_name": "Python [default]", 98 | "language": "python", 99 | "name": "python3" 100 | }, 101 | "language_info": { 102 | "codemirror_mode": { 103 | "name": "ipython", 104 | "version": 3 105 | }, 106 | "file_extension": ".py", 107 | "mimetype": "text/x-python", 108 | "name": "python", 109 | "nbconvert_exporter": "python", 110 | "pygments_lexer": "ipython3", 111 | "version": "3.5.2" 112 | } 113 | }, 114 | "nbformat": 4, 115 | "nbformat_minor": 1 116 | } 117 | -------------------------------------------------------------------------------- /1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import os" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "cam = cv2.VideoCapture(0)\n", 24 | "cam.set(3, 640) # set video width\n", 25 | "cam.set(4, 480) # set video height\n", 26 | "\n", 27 | "face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "### Next cell will take few seconds ! Be prepared taking photos!\n", 35 | "### Remmember change your facial expression a little bit sometimes!" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 3, 41 | "metadata": { 42 | "collapsed": false 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "\n", 50 | " enter user id end press ==> 1\n", 51 | "\n", 52 | " [INFO] Initializing face capture. Look the camera and wait ...\n", 53 | "\n", 54 | " [INFO] Exiting Program and cleanup stuff\n" 55 | ] 56 | } 57 | ], 58 | "source": [ 59 | "# For each person, enter one numeric face id\n", 60 | "face_id = input('\\n enter user id end press ==> ')\n", 61 | "\n", 62 | "print(\"\\n [INFO] Initializing face capture. Look the camera and wait ...\")\n", 63 | "# Initialize individual sampling face count\n", 64 | "count = 0\n", 65 | "\n", 66 | "while(True):\n", 67 | "\n", 68 | " ret, img = cam.read()\n", 69 | " gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n", 70 | " faces = face_detector.detectMultiScale(gray, 1.3, 5)\n", 71 | "\n", 72 | " for (x,y,w,h) in faces:\n", 73 | "\n", 74 | " cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) \n", 75 | " count += 1\n", 76 | "\n", 77 | " # Save the captured image into the datasets folder\n", 78 | " cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\", gray[y:y+h,x:x+w])\n", 79 | "\n", 80 | " cv2.imshow('image', img)\n", 81 | "\n", 82 | " k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video\n", 83 | " if k == 27:\n", 84 | " break\n", 85 | " elif count >= 80: # Take 80 face sample and stop video\n", 86 | " break\n", 87 | "\n", 88 | "# Do a bit of cleanup\n", 89 | "print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n", 90 | "cam.release()\n", 91 | "cv2.destroyAllWindows()" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "## Now you can go to 'dataset' folder see your selfies!" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": { 105 | "collapsed": true 106 | }, 107 | "outputs": [], 108 | "source": [] 109 | } 110 | ], 111 | "metadata": { 112 | "anaconda-cloud": {}, 113 | "kernelspec": { 114 | "display_name": "Python [default]", 115 | "language": "python", 116 | "name": "python3" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.5.2" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 1 133 | } 134 | -------------------------------------------------------------------------------- /3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import numpy as np\n", 13 | "import os " 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": { 20 | "collapsed": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "recognizer = cv2.face.LBPHFaceRecognizer_create()\n", 25 | "recognizer.read('trainer/trainer.yml')\n", 26 | "cascadePath = \"haarcascade_frontalface_default.xml\"\n", 27 | "faceCascade = cv2.CascadeClassifier(cascadePath);\n", 28 | "\n", 29 | "font = cv2.FONT_HERSHEY_SIMPLEX" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Remember to change the information below!!! " 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 3, 42 | "metadata": { 43 | "collapsed": true 44 | }, 45 | "outputs": [], 46 | "source": [ 47 | "#iniciate id counter\n", 48 | "id = 1\n", 49 | "\n", 50 | "# names related to ids: example ==> Marcelo: id=1, etc\n", 51 | "names = ['','xie yuan'] " 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 4, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "\n", 66 | " [INFO] Exiting Program and cleanup stuff\n" 67 | ] 68 | } 69 | ], 70 | "source": [ 71 | "# Initialize and start realtime video capture\n", 72 | "cam = cv2.VideoCapture(0)\n", 73 | "cam.set(3, 640) # set video widht\n", 74 | "cam.set(4, 480) # set video height\n", 75 | "\n", 76 | "# Define min window size to be recognized as a face\n", 77 | "minW = 0.1*cam.get(3)\n", 78 | "minH = 0.1*cam.get(4)\n", 79 | "\n", 80 | "while True:\n", 81 | "\n", 82 | " ret, img =cam.read()\n", 83 | "\n", 84 | " gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", 85 | "\n", 86 | " faces = faceCascade.detectMultiScale( \n", 87 | " gray,\n", 88 | " scaleFactor = 1.2,\n", 89 | " minNeighbors = 5,\n", 90 | " minSize = (int(minW), int(minH)),\n", 91 | " )\n", 92 | "\n", 93 | " for(x,y,w,h) in faces:\n", 94 | "\n", 95 | " cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n", 96 | "\n", 97 | " id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n", 98 | "\n", 99 | " # Check if confidence is less them 100 ==> \"0\" is perfect match \n", 100 | " if (confidence < 100):\n", 101 | " id = names[id]\n", 102 | " confidence = \" {0}%\".format(round(100 - confidence))\n", 103 | " else:\n", 104 | " id = \"unknown\"\n", 105 | " confidence = \" {0}%\".format(round(100 - confidence))\n", 106 | " \n", 107 | " cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)\n", 108 | " cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) \n", 109 | " \n", 110 | " cv2.imshow('camera',img) \n", 111 | "\n", 112 | " k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n", 113 | " if k == 27:\n", 114 | " break\n", 115 | "\n", 116 | "# Do a bit of cleanup\n", 117 | "print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n", 118 | "cam.release()\n", 119 | "cv2.destroyAllWindows()" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [] 130 | } 131 | ], 132 | "metadata": { 133 | "anaconda-cloud": {}, 134 | "kernelspec": { 135 | "display_name": "Python [default]", 136 | "language": "python", 137 | "name": "python3" 138 | }, 139 | "language_info": { 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "file_extension": ".py", 145 | "mimetype": "text/x-python", 146 | "name": "python", 147 | "nbconvert_exporter": "python", 148 | "pygments_lexer": "ipython3", 149 | "version": "3.5.2" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 1 154 | } 155 | --------------------------------------------------------------------------------