├── 1. face dataset.py ├── 1.ipynb ├── 2. face training.py ├── 2.ipynb ├── 3. face recognition.py ├── 3.ipynb ├── README.md ├── folder.png ├── haarcascade_frontalface_default.xml └── xie yuan and his friend.png /1. face dataset.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Capture multiple Faces from multiple users to be stored on a DataBase (dataset directory) 3 | ==> Faces will be stored on a directory: dataset/ (if does not exist, pls create one) 4 | ==> Each face will have a unique numeric integer ID as 1, 2, 3, etc 5 | 6 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 7 | 8 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 9 | 10 | ''' 11 | 12 | import cv2 13 | import os 14 | 15 | cam = cv2.VideoCapture(0) 16 | cam.set(3, 640) # set video width 17 | cam.set(4, 480) # set video height 18 | 19 | #make sure 'haarcascade_frontalface_default.xml' is in the same folder as this code 20 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 21 | 22 | # For each person, enter one numeric face id (must enter number start from 1, this is the lable of person 1) 23 | face_id = input('\n enter user id end press ==> ') 24 | 25 | print("\n [INFO] Initializing face capture. Look the camera and wait ...") 26 | # Initialize individual sampling face count 27 | count = 0 28 | 29 | #start detect your face and take 30 pictures 30 | while(True): 31 | 32 | ret, img = cam.read() 33 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 34 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 35 | 36 | for (x,y,w,h) in faces: 37 | 38 | cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) 39 | count += 1 40 | 41 | # Save the captured image into the datasets folder 42 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 43 | 44 | cv2.imshow('image', img) 45 | 46 | k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video 47 | if k == 27: 48 | break 49 | elif count >= 30: # Take 30 face sample and stop video 50 | break 51 | 52 | # Do a bit of cleanup 53 | print("\n [INFO] Exiting Program and cleanup stuff") 54 | cam.release() 55 | cv2.destroyAllWindows() 56 | 57 | 58 | -------------------------------------------------------------------------------- /1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import os" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "cam = cv2.VideoCapture(0)\n", 24 | "cam.set(3, 640) # set video width\n", 25 | "cam.set(4, 480) # set video height\n", 26 | "\n", 27 | "face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "### Next cell will take few seconds ! Be prepared taking photos!\n", 35 | "### Remmember change your facial expression a little bit sometimes!" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 3, 41 | "metadata": { 42 | "collapsed": false 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "\n", 50 | " enter user id end press ==> 1\n", 51 | "\n", 52 | " [INFO] Initializing face capture. Look the camera and wait ...\n", 53 | "\n", 54 | " [INFO] Exiting Program and cleanup stuff\n" 55 | ] 56 | } 57 | ], 58 | "source": [ 59 | "# For each person, enter one numeric face id\n", 60 | "face_id = input('\\n enter user id end press ==> ')\n", 61 | "\n", 62 | "print(\"\\n [INFO] Initializing face capture. Look the camera and wait ...\")\n", 63 | "# Initialize individual sampling face count\n", 64 | "count = 0\n", 65 | "\n", 66 | "while(True):\n", 67 | "\n", 68 | " ret, img = cam.read()\n", 69 | " gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n", 70 | " faces = face_detector.detectMultiScale(gray, 1.3, 5)\n", 71 | "\n", 72 | " for (x,y,w,h) in faces:\n", 73 | "\n", 74 | " cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) \n", 75 | " count += 1\n", 76 | "\n", 77 | " # Save the captured image into the datasets folder\n", 78 | " cv2.imwrite(\"dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\", gray[y:y+h,x:x+w])\n", 79 | "\n", 80 | " cv2.imshow('image', img)\n", 81 | "\n", 82 | " k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video\n", 83 | " if k == 27:\n", 84 | " break\n", 85 | " elif count >= 80: # Take 80 face sample and stop video\n", 86 | " break\n", 87 | "\n", 88 | "# Do a bit of cleanup\n", 89 | "print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n", 90 | "cam.release()\n", 91 | "cv2.destroyAllWindows()" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "## Now you can go to 'dataset' folder see your selfies!" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": { 105 | "collapsed": true 106 | }, 107 | "outputs": [], 108 | "source": [] 109 | } 110 | ], 111 | "metadata": { 112 | "anaconda-cloud": {}, 113 | "kernelspec": { 114 | "display_name": "Python [default]", 115 | "language": "python", 116 | "name": "python3" 117 | }, 118 | "language_info": { 119 | "codemirror_mode": { 120 | "name": "ipython", 121 | "version": 3 122 | }, 123 | "file_extension": ".py", 124 | "mimetype": "text/x-python", 125 | "name": "python", 126 | "nbconvert_exporter": "python", 127 | "pygments_lexer": "ipython3", 128 | "version": "3.5.2" 129 | } 130 | }, 131 | "nbformat": 4, 132 | "nbformat_minor": 1 133 | } 134 | -------------------------------------------------------------------------------- /2. face training.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Training Multiple Faces stored on a DataBase: 3 | ==> Each face should have a unique numeric integer ID as 1, 2, 3, etc 4 | ==> LBPH computed model will be saved on trainer/ directory. (if it does not exist, pls create one) 5 | ==> for using PIL, install pillow library with "pip install pillow" 6 | 7 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 8 | 9 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 10 | 11 | ''' 12 | 13 | import cv2 14 | import numpy as np 15 | from PIL import Image #pillow package 16 | import os 17 | 18 | # Path for face image database 19 | path = 'dataset' 20 | 21 | recognizer = cv2.face.LBPHFaceRecognizer_create() 22 | detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); 23 | 24 | # function to get the images and label data 25 | def getImagesAndLabels(path): 26 | 27 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 28 | faceSamples=[] 29 | ids = [] 30 | 31 | for imagePath in imagePaths: 32 | 33 | PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale 34 | img_numpy = np.array(PIL_img,'uint8') 35 | 36 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 37 | faces = detector.detectMultiScale(img_numpy) 38 | 39 | for (x,y,w,h) in faces: 40 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 41 | ids.append(id) 42 | 43 | return faceSamples,ids 44 | 45 | print ("\n [INFO] Training faces. It will take a few seconds. Wait ...") 46 | faces,ids = getImagesAndLabels(path) 47 | recognizer.train(faces, np.array(ids)) 48 | 49 | # Save the model into trainer/trainer.yml 50 | recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi 51 | 52 | # Print the numer of faces trained and end program 53 | print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids)))) 54 | -------------------------------------------------------------------------------- /2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import numpy as np\n", 13 | "from PIL import Image\n", 14 | "import os" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 2, 20 | "metadata": { 21 | "collapsed": false, 22 | "scrolled": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "# Path for face image database\n", 27 | "path = 'dataset'\n", 28 | "\n", 29 | "recognizer = cv2.face.LBPHFaceRecognizer_create()\n", 30 | "detector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\");\n" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 3, 36 | "metadata": { 37 | "collapsed": false 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stdout", 42 | "output_type": "stream", 43 | "text": [ 44 | "\n", 45 | " [INFO] Training faces. It will take a few seconds. Wait ...\n", 46 | "\n", 47 | " [INFO] 1 faces trained. Exiting Program\n" 48 | ] 49 | } 50 | ], 51 | "source": [ 52 | "# function to get the images and label data\n", 53 | "def getImagesAndLabels(path):\n", 54 | "\n", 55 | " imagePaths = [os.path.join(path,f) for f in os.listdir(path)] \n", 56 | " faceSamples=[]\n", 57 | " ids = []\n", 58 | "\n", 59 | " for imagePath in imagePaths:\n", 60 | "\n", 61 | " PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale\n", 62 | " img_numpy = np.array(PIL_img,'uint8')\n", 63 | "\n", 64 | " id = int(os.path.split(imagePath)[-1].split(\".\")[1])\n", 65 | " faces = detector.detectMultiScale(img_numpy)\n", 66 | "\n", 67 | " for (x,y,w,h) in faces:\n", 68 | " faceSamples.append(img_numpy[y:y+h,x:x+w])\n", 69 | " ids.append(id)\n", 70 | "\n", 71 | " return faceSamples,ids\n", 72 | "\n", 73 | "print (\"\\n [INFO] Training faces. It will take a few seconds. Wait ...\")\n", 74 | "faces,ids = getImagesAndLabels(path)\n", 75 | "recognizer.train(faces, np.array(ids))\n", 76 | "\n", 77 | "# Save the model into trainer/trainer.yml\n", 78 | "recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi\n", 79 | "\n", 80 | "# Print the numer of faces trained and end program\n", 81 | "print(\"\\n [INFO] {0} faces trained. Exiting Program\".format(len(np.unique(ids))))" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "metadata": { 88 | "collapsed": true 89 | }, 90 | "outputs": [], 91 | "source": [] 92 | } 93 | ], 94 | "metadata": { 95 | "anaconda-cloud": {}, 96 | "kernelspec": { 97 | "display_name": "Python [default]", 98 | "language": "python", 99 | "name": "python3" 100 | }, 101 | "language_info": { 102 | "codemirror_mode": { 103 | "name": "ipython", 104 | "version": 3 105 | }, 106 | "file_extension": ".py", 107 | "mimetype": "text/x-python", 108 | "name": "python", 109 | "nbconvert_exporter": "python", 110 | "pygments_lexer": "ipython3", 111 | "version": "3.5.2" 112 | } 113 | }, 114 | "nbformat": 4, 115 | "nbformat_minor": 1 116 | } 117 | -------------------------------------------------------------------------------- /3. face recognition.py: -------------------------------------------------------------------------------- 1 | '''' 2 | Real Time Face Recogition 3 | ==> Each face stored on dataset/ dir, should have a unique numeric integer ID as 1, 2, 3, etc 4 | ==> LBPH computed model (trained faces) should be on trainer/ dir 5 | Based on original code by Anirban Kar: https://github.com/thecodacus/Face-Recognition 6 | 7 | Developed by Marcelo Rovai - MJRoBot.org @ 21Feb18 8 | 9 | ''' 10 | 11 | import cv2 12 | import numpy as np 13 | import os 14 | 15 | recognizer = cv2.face.LBPHFaceRecognizer_create() 16 | recognizer.read('trainer/trainer.yml') #load trained model 17 | cascadePath = "haarcascade_frontalface_default.xml" 18 | faceCascade = cv2.CascadeClassifier(cascadePath); 19 | 20 | font = cv2.FONT_HERSHEY_SIMPLEX 21 | 22 | #iniciate id counter, the number of persons you want to include 23 | id = 2 #two persons (e.g. Jacob, Jack) 24 | 25 | 26 | names = ['','Jacob','Jack'] #key in names, start from the second place, leave first empty 27 | 28 | # Initialize and start realtime video capture 29 | cam = cv2.VideoCapture(0) 30 | cam.set(3, 640) # set video widht 31 | cam.set(4, 480) # set video height 32 | 33 | # Define min window size to be recognized as a face 34 | minW = 0.1*cam.get(3) 35 | minH = 0.1*cam.get(4) 36 | 37 | while True: 38 | 39 | ret, img =cam.read() 40 | 41 | gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 42 | 43 | faces = faceCascade.detectMultiScale( 44 | gray, 45 | scaleFactor = 1.2, 46 | minNeighbors = 5, 47 | minSize = (int(minW), int(minH)), 48 | ) 49 | 50 | for(x,y,w,h) in faces: 51 | 52 | cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2) 53 | 54 | id, confidence = recognizer.predict(gray[y:y+h,x:x+w]) 55 | 56 | # Check if confidence is less them 100 ==> "0" is perfect match 57 | if (confidence < 100): 58 | id = names[id] 59 | confidence = " {0}%".format(round(100 - confidence)) 60 | else: 61 | id = "unknown" 62 | confidence = " {0}%".format(round(100 - confidence)) 63 | 64 | cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2) 65 | cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) 66 | 67 | cv2.imshow('camera',img) 68 | 69 | k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video 70 | if k == 27: 71 | break 72 | 73 | # Do a bit of cleanup 74 | print("\n [INFO] Exiting Program and cleanup stuff") 75 | cam.release() 76 | cv2.destroyAllWindows() 77 | -------------------------------------------------------------------------------- /3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import cv2\n", 12 | "import numpy as np\n", 13 | "import os " 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": { 20 | "collapsed": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "recognizer = cv2.face.LBPHFaceRecognizer_create()\n", 25 | "recognizer.read('trainer/trainer.yml')\n", 26 | "cascadePath = \"haarcascade_frontalface_default.xml\"\n", 27 | "faceCascade = cv2.CascadeClassifier(cascadePath);\n", 28 | "\n", 29 | "font = cv2.FONT_HERSHEY_SIMPLEX" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Remember to change the information below!!! " 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 3, 42 | "metadata": { 43 | "collapsed": true 44 | }, 45 | "outputs": [], 46 | "source": [ 47 | "#iniciate id counter\n", 48 | "id = 1\n", 49 | "\n", 50 | "# names related to ids: example ==> Marcelo: id=1, etc\n", 51 | "names = ['','xie yuan'] " 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 4, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "\n", 66 | " [INFO] Exiting Program and cleanup stuff\n" 67 | ] 68 | } 69 | ], 70 | "source": [ 71 | "# Initialize and start realtime video capture\n", 72 | "cam = cv2.VideoCapture(0)\n", 73 | "cam.set(3, 640) # set video widht\n", 74 | "cam.set(4, 480) # set video height\n", 75 | "\n", 76 | "# Define min window size to be recognized as a face\n", 77 | "minW = 0.1*cam.get(3)\n", 78 | "minH = 0.1*cam.get(4)\n", 79 | "\n", 80 | "while True:\n", 81 | "\n", 82 | " ret, img =cam.read()\n", 83 | "\n", 84 | " gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", 85 | "\n", 86 | " faces = faceCascade.detectMultiScale( \n", 87 | " gray,\n", 88 | " scaleFactor = 1.2,\n", 89 | " minNeighbors = 5,\n", 90 | " minSize = (int(minW), int(minH)),\n", 91 | " )\n", 92 | "\n", 93 | " for(x,y,w,h) in faces:\n", 94 | "\n", 95 | " cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n", 96 | "\n", 97 | " id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n", 98 | "\n", 99 | " # Check if confidence is less them 100 ==> \"0\" is perfect match \n", 100 | " if (confidence < 100):\n", 101 | " id = names[id]\n", 102 | " confidence = \" {0}%\".format(round(100 - confidence))\n", 103 | " else:\n", 104 | " id = \"unknown\"\n", 105 | " confidence = \" {0}%\".format(round(100 - confidence))\n", 106 | " \n", 107 | " cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)\n", 108 | " cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1) \n", 109 | " \n", 110 | " cv2.imshow('camera',img) \n", 111 | "\n", 112 | " k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video\n", 113 | " if k == 27:\n", 114 | " break\n", 115 | "\n", 116 | "# Do a bit of cleanup\n", 117 | "print(\"\\n [INFO] Exiting Program and cleanup stuff\")\n", 118 | "cam.release()\n", 119 | "cv2.destroyAllWindows()" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [] 130 | } 131 | ], 132 | "metadata": { 133 | "anaconda-cloud": {}, 134 | "kernelspec": { 135 | "display_name": "Python [default]", 136 | "language": "python", 137 | "name": "python3" 138 | }, 139 | "language_info": { 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "file_extension": ".py", 145 | "mimetype": "text/x-python", 146 | "name": "python", 147 | "nbconvert_exporter": "python", 148 | "pygments_lexer": "ipython3", 149 | "version": "3.5.2" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 1 154 | } 155 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # easy-real-time-face-recognition-python 2 | 3 | ## packages needed 4 | 1. opencv 5 | 2. numpy 6 | 3. pillow 7 | 8 | ## There are three steps for face recognition: 9 | 1. Collecting face data (your face pictures) and labels and save to dataset folder. (code 1) 10 | 2. Input face data and labels into model to train a recognition model. (code 2) 11 | 3. Open up your webcam to start real time face recognition. (code 3) 12 | 13 | ## Instructions: 14 | 1. Download this repository as zip file and unzip it to a folder. 15 | 16 | 2. Crate two empty folders named 'dataset' and 'trainer' in the above folder. 17 | 18 | 19 | 20 | 3. Run the first code, enter number '1' (for first person), then computer will take your face pictures and save into 'dataset' folder. 21 | When finish taking pictures, "[INFO] Exiting Program and cleanup stuff" pops up. 22 | 23 | (3.5. Run the first code again, enter number '2' (for second person), follow the same procedure.) 24 | 25 | 4. Run the second code, just wait several seconds to train the model. 26 | 27 | 5. Run the third code, it will open your camera and start real time face recognition. 28 | 29 | ## 6. enjoy! 30 | ### hahahaha 31 | 32 | 33 | -------------------------------------------------------------------------------- /folder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jacob12138xieyuan/easy-real-time-face-recognition-python/f5c6ad9589f14124ebb93a519adfacd6e00d0dfc/folder.png -------------------------------------------------------------------------------- /xie yuan and his friend.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jacob12138xieyuan/easy-real-time-face-recognition-python/f5c6ad9589f14124ebb93a519adfacd6e00d0dfc/xie yuan and his friend.png --------------------------------------------------------------------------------