├── README.md ├── Sri_K_FaceRecProject.zip ├── creator.py ├── detector.py ├── haarcascade_frontalface_default.xml └── trainer.py /README.md: -------------------------------------------------------------------------------- 1 | # Machine-Learning-Face-Recognition-using-openCV 2 | Using python OpenCV module to train and recognize up to 5 faces and generate automated voice feedback 3 | 4 | # Project contains 3 python files: 5 | Creator.py --> Uses your webcam to take multiple pictures of your face and crops it and turns to grayscale to be analyzed later 6 | 7 | Trainer.py --> Uses the saved pictures to start analyzing trends and picks up on differentiating features of each face to generate a .YML file 8 | 9 | Detector.py --> Will Use the .YML file to run the actual face detection program. It will provide AUDIO FEEDBACK based on who it recognizes. It will also print the percent confidence of each of the detection. (You can change this in the detector.py prgram { variable called conf }) 10 | 11 | # REQUIRED: 12 | - All 3 python files 13 | 14 | - 2 folders with "dataSet" and trainer" name 15 | 16 | - A webcam set to default and good lighting conditions 17 | 18 | # STEPS: 19 | STEP 1: Run creator.py and wait until picture taking process is completed 20 | 21 | STEP 2: Run the trainer.py to analyze the JPG pictures and create YML file. IT will ask for a number, which is the ID of each of the 5 possible faces. Choose 1-5 accordingly and hit enter. 22 | 23 | STEP 3 (OPTIONAL): save .MP3 files to the same folder with audio that you want to play for each unique face detected successfully 24 | 25 | STEP 4: Edit detector.py to change the mouse click location to the trigger button in the alexa app to automate any smart home appliance! 26 | 27 | STEP 5: Run the detector.py to begin facial analysis/detection 28 | 29 | ## ENJOY! 30 | ### SRI KANIPAKLA 31 | -------------------------------------------------------------------------------- /Sri_K_FaceRecProject.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/skanipakala/Machine-Learning-Face-Recognition-using-openCV/c258e3db5af02fafdfb08f94489066170210219b/Sri_K_FaceRecProject.zip -------------------------------------------------------------------------------- /creator.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | igniter = 8880 # just a random number to avoid overwriting file names 5 | detector= cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 6 | cap = cv2.VideoCapture(1) 7 | 8 | id =input('enter the ID #') 9 | sampleNum = 1; 10 | while(True): 11 | ret, img = cap.read() 12 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 13 | faces = detector.detectMultiScale(gray, 1.3, 5) 14 | for (x,y,w,h) in faces: 15 | 16 | sampleNum = sampleNum+1 17 | cv2.imwrite("dataSet/User." + str(id) + "." + str(sampleNum*igniter) + ".jpg", gray[y:y+h, x:x+w]) 18 | cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) 19 | cv2.waitKey(100) 20 | cv2.imshow('frame',img) 21 | cv2.waitKey(1) 22 | if(sampleNum > 210): 23 | break 24 | cap.release() 25 | cv2.destroyAllWindows() 26 | 27 | print('collection complete!!!') 28 | -------------------------------------------------------------------------------- /detector.py: -------------------------------------------------------------------------------- 1 | 2 | ### SRI KANIPAKALA COVID-19 project 2020 3 | 4 | import cv2 5 | import numpy as np 6 | import time 7 | import os 8 | from pynput.mouse import Button, Controller 9 | 10 | 11 | ##recognizer = cv2.createLBPHFaceRecognizer_create() 12 | recognizer = cv2.face.LBPHFaceRecognizer_create() 13 | recognizer.read('trainner/trainner.yml') 14 | cascadePath = "haarcascade_frontalface_default.xml" 15 | faceCascade = cv2.CascadeClassifier(cascadePath); 16 | 17 | os.system("START.mp3") # play when starting face scan 18 | cam = cv2.VideoCapture(1) 19 | #font = cv2.FONT_HERSHEY_SIMPLEX 20 | ##font = cv2.InitFont(cv2.FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1) 21 | font = cv2.FONT_HERSHEY_SIMPLEX 22 | ##cv2.putText() 23 | font2 = cv2.FONT_HERSHEY_DUPLEX 24 | 25 | readingA =0 26 | readingB =0 27 | readingC =0 28 | readingD =0 29 | response="" 30 | 31 | def sayIt(name): 32 | ##os.system("start rashmi_good.mp3") 33 | print('[+] playing audio', name) 34 | os.system("start " + name) 35 | 36 | def light(): 37 | mouse = Controller() 38 | mouse.position = (1529,205) 39 | mouse.click(Button.left, 1) 40 | print("[+] Changing light to Green") 41 | time.sleep(3) 42 | mouse.position = (1529,205) 43 | mouse.click(Button.left, 1) 44 | print("[+] Resetting Light") 45 | while True: 46 | color = (0,0,255) ## RED 47 | ret, im =cam.read() 48 | gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) 49 | faces=faceCascade.detectMultiScale(gray, 1.2,5) 50 | 51 | ## cv2.putText(im, status, (100, 100), font2, 2, color , 2) ## my print status 52 | status= "Denied" 53 | 54 | check=0 55 | for(x,y,w,h) in faces: 56 | cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2) 57 | Id, conf = recognizer.predict(gray[y:y+h,x:x+w]) 58 | 59 | print("[+] Confidence Level = " + str(conf)) 60 | 61 | #print("Confidence = " + str(conf)) 62 | 63 | if(conf<61): 64 | status = "ID Match" 65 | color = (0,255,0) ## update GREEN 66 | if(Id==1): 67 | 68 | Id="MASTER" 69 | if(conf < 52): 70 | 71 | response = "START.mp3" 72 | ##os.system("start adiAllow.mp3") 73 | check=1 74 | readingA = readingA +1 75 | ##time.sleep(3) 76 | 77 | 78 | 79 | elif(Id==2): 80 | Id="USER1" 81 | ## readingA= 0; 82 | readingA = readingA +1 83 | response = "USER1.mp3" 84 | check =1 85 | 86 | elif(Id==3): 87 | Id="USER2" 88 | 89 | readingA = readingA +1 90 | response = "USER2.mp3" 91 | check =1 92 | elif(Id==4): 93 | Id="USER3" 94 | ##readingA= 0; 95 | readingA = readingA +1 96 | response = "USER3.mp3" 97 | check =1 98 | elif(Id==5): 99 | Id="USER4" 100 | ##readingA= 0; 101 | readingA = readingA +1 102 | response = "USER4.mp3" 103 | check =1 104 | elif(Id==6): 105 | check=2 106 | Id="USER5" 107 | readingA= 0; 108 | 109 | readingB = readingB +1 110 | response = "USER5.mp3" 111 | elif(Id==7): 112 | check=2 113 | Id="USER6" 114 | readingA= 0; 115 | 116 | readingB = readingB +1 117 | response = "USER6.mp3" 118 | 119 | else: 120 | Id="Unknown" 121 | readingA=0 122 | readingB=0 123 | readingC=0 124 | readingD=0 125 | 126 | cv2.putText(im, Id, (x - 1, y - 1), font,2,(0, 255, 0),7) 127 | 128 | 129 | cv2.imshow('Face Recognition', im ) 130 | ##cv2.PutText(cv2.fromarray(im),str(Id), (x,y+h),font, 255) 131 | progress = str( (readingA/10)*100 ) 132 | cv2.putText(im, "Scaning%: " + progress , (900, 100), font2, 1, (255,0,0) , 2) 133 | cv2.putText(im, status, (100, 100), font2, 2, color , 2) ## my print status 134 | 135 | 136 | 137 | if(check==2 and readingB == 5): 138 | sayIt(response) 139 | if(check==1 and readingA>=10): 140 | time.sleep(1) 141 | print('[!!!] Security Bypassed!') 142 | sayIt(response) 143 | light() 144 | ##time.sleep(2) 145 | readingA=0 146 | cv2.imshow('Face Recognition', im ) 147 | 148 | if cv2.waitKey(10) ==ord('q'): 149 | break 150 | 151 | 152 | 153 | 154 | cam.release() 155 | cv2.destroyAllWindows() 156 | -------------------------------------------------------------------------------- /trainer.py: -------------------------------------------------------------------------------- 1 | import cv2,os 2 | import numpy as np 3 | from PIL import Image 4 | import time 5 | 6 | ##recognizer = cv2.createLBPHFaceRecognizer() 7 | recognizer = cv2.face.LBPHFaceRecognizer_create() 8 | detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); 9 | 10 | def getImagesAndLabels(path): 11 | 12 | imagePaths=[os.path.join(path,f) for f in os.listdir(path)] 13 | 14 | faceSamples=[] 15 | 16 | Ids=[] 17 | 18 | for imagePath in imagePaths: 19 | 20 | if(os.path.split(imagePath)[-1].split(".")[-1]!='jpg'): 21 | continue 22 | 23 | pilImage=Image.open(imagePath).convert('L') 24 | print(imagePath) 25 | imageNp=np.array(pilImage,'uint8') 26 | Id=int(os.path.split(imagePath)[-1].split(".")[1]) 27 | 28 | faces=detector.detectMultiScale(imageNp) 29 | print('Extracting Face...') 30 | for (x,y,w,h) in faces: 31 | print('adding cropped image to face sample archive') 32 | faceSamples.append(imageNp[y:y+h,x:x+w]) 33 | Ids.append(Id) 34 | return faceSamples,Ids 35 | 36 | 37 | faces,Ids = getImagesAndLabels('dataSet') 38 | print('[+] Analysis in progress...') 39 | recognizer.train(faces, np.array(Ids)) 40 | recognizer.save('trainer/trainner.yml') 41 | print('[!!!] Image Analysis Complete!') 42 | time.sleep(2) 43 | --------------------------------------------------------------------------------