├── README.md ├── face_datasets.py ├── face_recognition.py ├── haarcascade_frontalface_default.xml ├── samplePiCamOpenCV └── training.py /README.md: -------------------------------------------------------------------------------- 1 | # Raspberry-Face-Recognition 2 | Use Python and Open CV to recognize multi face and show the name 3 | #Sample to get video from PiCam 4 | # import the necessary packages 5 | from picamera.array import PiRGBArray 6 | from picamera import PiCamera 7 | import time 8 | import cv2 9 | 10 | # initialize the camera and grab a reference to the raw camera capture 11 | camera = PiCamera() 12 | camera.resolution = (640, 480) 13 | camera.framerate = 32 14 | rawCapture = PiRGBArray(camera, size=(640, 480)) 15 | 16 | # allow the camera to warmup 17 | time.sleep(0.1) 18 | 19 | # capture frames from the camera 20 | for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 21 | # grab the raw NumPy array representing the image, then initialize the timestamp 22 | # and occupied/unoccupied text 23 | image = frame.array 24 | 25 | # show the frame 26 | cv2.imshow("Frame", image) 27 | key = cv2.waitKey(1) & 0xFF 28 | 29 | # clear the stream in preparation for the next frame 30 | rawCapture.truncate(0) 31 | 32 | # if the `q` key was pressed, break from the loop 33 | if key == ord("q"): 34 | break 35 | -------------------------------------------------------------------------------- /face_datasets.py: -------------------------------------------------------------------------------- 1 | # Import OpenCV2 for image processing 2 | import cv2 3 | 4 | # Start capturing video 5 | vid_cam = cv2.VideoCapture(0) 6 | 7 | # Detect object in video stream using Haarcascade Frontal Face 8 | face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') 9 | 10 | # For each person, one face id 11 | face_id = 5 12 | 13 | # Initialize sample face image 14 | count = 0 15 | 16 | # Start looping 17 | while(True): 18 | 19 | # Capture video frame 20 | _, image_frame = vid_cam.read() 21 | 22 | # Convert frame to grayscale 23 | gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY) 24 | 25 | # Detect frames of different sizes, list of faces rectangles 26 | faces = face_detector.detectMultiScale(gray, 1.3, 5) 27 | 28 | # Loops for each faces 29 | for (x,y,w,h) in faces: 30 | 31 | # Crop the image frame into rectangle 32 | cv2.rectangle(image_frame, (x,y), (x+w,y+h), (255,0,0), 2) 33 | 34 | # Increment sample face image 35 | count += 1 36 | 37 | # Save the captured image into the datasets folder 38 | cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) 39 | 40 | # Display the video frame, with bounded rectangle on the person's face 41 | cv2.imshow('frame', image_frame) 42 | 43 | # To stop taking video, press 'q' for at least 100ms 44 | if cv2.waitKey(100) & 0xFF == ord('q'): 45 | break 46 | 47 | # If image taken reach 100, stop taking video 48 | elif count>100: 49 | break 50 | 51 | # Stop video 52 | vid_cam.release() 53 | 54 | # Close all started windows 55 | cv2.destroyAllWindows() 56 | -------------------------------------------------------------------------------- /face_recognition.py: -------------------------------------------------------------------------------- 1 | # Import OpenCV2 for image processing 2 | import cv2 3 | 4 | # Import numpy for matrices calculations 5 | import numpy as np 6 | 7 | # Create Local Binary Patterns Histograms for face recognization 8 | recognizer = cv2.face.createLBPHFaceRecognizer() 9 | 10 | # Load the trained mode 11 | recognizer.load('trainer/trainer.yml') 12 | 13 | # Load prebuilt model for Frontal Face 14 | cascadePath = "haarcascade_frontalface_default.xml" 15 | 16 | # Create classifier from prebuilt model 17 | faceCascade = cv2.CascadeClassifier(cascadePath); 18 | 19 | # Set the font style 20 | font = cv2.FONT_HERSHEY_SIMPLEX 21 | 22 | # Initialize and start the video frame capture 23 | cam = cv2.VideoCapture(0) 24 | 25 | # Loop 26 | while True: 27 | # Read the video frame 28 | ret, im =cam.read() 29 | 30 | # Convert the captured frame into grayscale 31 | gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) 32 | 33 | # Get all face from the video frame 34 | faces = faceCascade.detectMultiScale(gray, 1.2,5) 35 | 36 | # For each face in faces 37 | for(x,y,w,h) in faces: 38 | 39 | # Create rectangle around the face 40 | cv2.rectangle(im, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4) 41 | 42 | # Recognize the face belongs to which ID 43 | Id = recognizer.predict(gray[y:y+h,x:x+w]) 44 | 45 | # Check the ID if exist 46 | if(Id == 1): 47 | Id = "Jacky" 48 | #If not exist, then it is Unknown 49 | elif(Id == 2): 50 | Id = "Jenifer" 51 | else: 52 | print(Id) 53 | Id = "Unknow" 54 | 55 | # Put text describe who is in the picture 56 | cv2.rectangle(im, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1) 57 | cv2.putText(im, str(Id), (x,y-40), font, 2, (255,255,255), 3) 58 | 59 | # Display the video frame with the bounded rectangle 60 | cv2.imshow('im',im) 61 | 62 | # If 'q' is pressed, close program 63 | if cv2.waitKey(10) & 0xFF == ord('q'): 64 | break 65 | 66 | # Stop the camera 67 | cam.release() 68 | 69 | # Close all windows 70 | cv2.destroyAllWindows() 71 | -------------------------------------------------------------------------------- /samplePiCamOpenCV: -------------------------------------------------------------------------------- 1 | # import the necessary packages 2 | from picamera.array import PiRGBArray 3 | from picamera import PiCamera 4 | import time 5 | import cv2 6 | 7 | # initialize the camera and grab a reference to the raw camera capture 8 | camera = PiCamera() 9 | camera.resolution = (640, 480) 10 | camera.framerate = 32 11 | rawCapture = PiRGBArray(camera, size=(640, 480)) 12 | 13 | # allow the camera to warmup 14 | time.sleep(0.1) 15 | 16 | # capture frames from the camera 17 | for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): 18 | # grab the raw NumPy array representing the image, then initialize the timestamp 19 | # and occupied/unoccupied text 20 | image = frame.array 21 | 22 | # show the frame 23 | cv2.imshow("Frame", image) 24 | key = cv2.waitKey(1) & 0xFF 25 | 26 | # clear the stream in preparation for the next frame 27 | rawCapture.truncate(0) 28 | 29 | # if the `q` key was pressed, break from the loop 30 | if key == ord("q"): 31 | break 32 | -------------------------------------------------------------------------------- /training.py: -------------------------------------------------------------------------------- 1 | 2 | # Import OpenCV2 for image processing 3 | # Import os for file path 4 | import cv2, os 5 | 6 | # Import numpy for matrix calculation 7 | import numpy as np 8 | 9 | # Import Python Image Library (PIL) 10 | from PIL import Image 11 | 12 | # Create Local Binary Patterns Histograms for face recognization 13 | recognizer = cv2.face.createLBPHFaceRecognizer() 14 | 15 | # Using prebuilt frontal face training model, for face detection 16 | detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml"); 17 | 18 | # Create method to get the images and label data 19 | def getImagesAndLabels(path): 20 | 21 | # Get all file path 22 | imagePaths = [os.path.join(path,f) for f in os.listdir(path)] 23 | 24 | # Initialize empty face sample 25 | faceSamples=[] 26 | 27 | # Initialize empty id 28 | ids = [] 29 | 30 | # Loop all the file path 31 | for imagePath in imagePaths: 32 | 33 | # Get the image and convert it to grayscale 34 | PIL_img = Image.open(imagePath).convert('L') 35 | 36 | # PIL image to numpy array 37 | img_numpy = np.array(PIL_img,'uint8') 38 | 39 | # Get the image id 40 | id = int(os.path.split(imagePath)[-1].split(".")[1]) 41 | print(id) 42 | 43 | # Get the face from the training images 44 | faces = detector.detectMultiScale(img_numpy) 45 | 46 | # Loop for each face, append to their respective ID 47 | for (x,y,w,h) in faces: 48 | 49 | # Add the image to face samples 50 | faceSamples.append(img_numpy[y:y+h,x:x+w]) 51 | 52 | # Add the ID to IDs 53 | ids.append(id) 54 | 55 | # Pass the face array and IDs array 56 | return faceSamples,ids 57 | 58 | # Get the faces and IDs 59 | faces,ids = getImagesAndLabels('dataset') 60 | 61 | # Train the model using the faces and IDs 62 | recognizer.train(faces, np.array(ids)) 63 | 64 | # Save the model into trainer.yml 65 | recognizer.save('trainer/trainer.yml') 66 | --------------------------------------------------------------------------------