├── .idea
├── vcs.xml
├── misc.xml
├── modules.xml
├── dictionaries
│ └── anuj5.xml
└── Opencv.iml
└── OpenCVDemo
├── face_recog_dir
├── train_model.py
├── get_training_dataset.py
├── face_normalisation.py
├── recognise_in_live_video.py
├── capture_training_dat.py
└── make_predictions.py
├── basic_image_face.py
└── basic_camera.py
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/dictionaries/anuj5.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | anuj
5 | dataset
6 | eigen
7 | envs
8 | frontalface
9 | haarcascade
10 | lbph
11 | pred
12 | profileface
13 | recog
14 |
15 |
16 |
--------------------------------------------------------------------------------
/.idea/Opencv.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/train_model.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from OpenCVDemo.face_recog_dir.get_training_dataset import collect_dataset
3 |
4 | images, labels, labels_dict = collect_dataset()
5 |
6 | # Eigen faces recogniser
7 | rec_eigen = cv2.face.EigenFaceRecognizer_create()
8 | rec_eigen.train(images, labels)
9 |
10 | # Fisher faces recogniser
11 | rec_fisher = cv2.face.FisherFaceRecognizer_create()
12 | rec_fisher.train(images, labels)
13 |
14 | # LBPH face recogniser
15 | rec_lbph = cv2.face.LBPHFaceRecognizer_create()
16 | rec_lbph.train(images, labels)
17 |
18 | print("trained successfully.")
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/get_training_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 |
5 |
6 | def collect_dataset():
7 | images = []
8 | labels = []
9 | labels_dict = {}
10 | people = [person for person in os.listdir("people/")]
11 |
12 | for i, person in enumerate(people):
13 | labels_dict[i] = person
14 |
15 | for image in os.listdir("people/" + person):
16 | images.append(cv2.imread("people/" + person + "/" + image, 0))
17 | labels.append(i)
18 |
19 | return images, np.array(labels), labels_dict
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/OpenCVDemo/basic_image_face.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | faceCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\Programs\Python\Python36\Lib\site-packages\cv2'
4 | '\data\haarcascade_frontalface_alt2.xml')
5 |
6 | img = cv2.imread('images/image1.jpg')
7 | gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
8 |
9 | faces = faceCascade.detectMultiScale(gray_img, scaleFactor=1.05, minNeighbors=5)
10 | count = 0
11 |
12 | for (x, y, w, h) in faces:
13 | roi_gray = gray_img[y: y+h, x: x+w]
14 |
15 | color = (255, 0, 0)
16 | stroke = 2
17 | cv2.rectangle(gray_img, (x, y), (x+w, y+h), color, stroke)
18 | img = 'output_folder/img_'+str(count)+".png"
19 | count += 1
20 | cv2.imwrite(img, roi_gray)
21 | print(x, y, w, h)
22 |
23 | cv2.imshow('gray_img', gray_img)
24 |
25 | cv2.waitKey(0)
26 | cv2.destroyAllWindows()
27 |
28 |
--------------------------------------------------------------------------------
/OpenCVDemo/basic_camera.py:
--------------------------------------------------------------------------------
1 | # import numpy as np
2 | import cv2
3 |
4 | cap = cv2.VideoCapture(0)
5 |
6 | faceCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\Programs\Python\Python36\Lib\site-packages\cv2'
7 | '\data\haarcascade_frontalface_alt2.xml')
8 |
9 | while True:
10 | ret, frame = cap.read()
11 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
12 | faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
13 |
14 | count = 0
15 | for (x, y, w, h) in faces:
16 | roi_gray = gray[y: y+h, x: x+w]
17 | roi_color = frame[y: y+h, x: x+w]
18 |
19 | color = (255, 0, 0)
20 | stroke = 2
21 | cv2.rectangle(frame, (x, y), (x+w, y+h), color, stroke)
22 | img = 'img_'+str(count)+".png"
23 | count += 1
24 | cv2.imwrite(img, roi_color)
25 | print(x, y, w, h)
26 |
27 | cv2.imshow('frame', frame)
28 | if cv2.waitKey(20) & 0xFF == ord('q'):
29 | break
30 |
31 | cap.release()
32 | cv2.destroyAllWindows()
33 |
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/face_normalisation.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 |
4 | def cut_face(image, face_coord):
5 | faces = []
6 | for (x, y, w, h) in face_coord:
7 | per = int(0.2 * w / 2)
8 | faces.append(image[y: y+h, x+per: x+w-per])
9 | return faces
10 |
11 |
12 | def normalise_intensity(images):
13 | norm_image = []
14 | for image in images:
15 | if len(image) == 3:
16 | image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
17 |
18 | norm_image.append(cv2.equalizeHist(image))
19 | return norm_image
20 |
21 |
22 | def resize(images, size=(128, 128)):
23 | norm_image = []
24 | for image in images:
25 | if image.shape < size:
26 | image = cv2.resize(image, size, interpolation=cv2.INTER_AREA)
27 | else:
28 | image = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
29 | norm_image.append(image)
30 | return norm_image
31 |
32 |
33 | def get_normalised_faces(frame, face_coord):
34 | faces = cut_face(frame, face_coord)
35 | faces = normalise_intensity(faces)
36 | faces = resize(faces)
37 |
38 | return faces
39 |
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/recognise_in_live_video.py:
--------------------------------------------------------------------------------
1 | from OpenCVDemo.face_recog_dir.face_normalisation import get_normalised_faces
2 | from OpenCVDemo.face_recog_dir.train_model import *
3 |
4 | cap = cv2.VideoCapture(0)
5 |
6 | faceCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\conda\conda\envs\\face_recog_env\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
7 |
8 | while True:
9 | ret, frame = cap.read()
10 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
11 | face_coord = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
12 |
13 | if len(face_coord):
14 | faces = get_normalised_faces(gray, face_coord)
15 |
16 | for i, face in enumerate(faces):
17 | pred, conf = rec_fisher.predict(face)
18 | image, labels, labels_dict = collect_dataset()
19 |
20 | threshold = 1000
21 | if conf < threshold:
22 | per = int((threshold - conf) / threshold * 100)
23 | cv2.putText(frame, labels_dict[pred].capitalize() + str(per),
24 | (face_coord[i][0], face_coord[i][1] - 10),
25 | cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)
26 | else:
27 | cv2.putText(frame, "Unknown",
28 | (face_coord[i][0], face_coord[i][1] - 10),
29 | cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 0), 1)
30 |
31 | cv2.rectangle(frame, (face_coord[i][0], face_coord[i][1]), (face_coord[i][0] + face_coord[i][2], face_coord[i][1] + face_coord[i][3]), (255, 0, 0), 1)
32 |
33 | cv2.imshow('frame', frame)
34 | if cv2.waitKey(20) & 0xFF == ord('q'):
35 | break
36 |
37 | cap.release()
38 | cv2.destroyAllWindows()
39 |
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/capture_training_dat.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 |
4 | from OpenCVDemo.face_recog_dir.face_normalisation import get_normalised_faces
5 |
6 | cap = cv2.VideoCapture(0)
7 |
8 | faceCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\conda\conda\envs\\face_recog_env\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
9 |
10 | number_of_images_for_training = 20
11 |
12 | folder = "people/" + input('Name: ').lower()
13 | if not os.path.exists(folder):
14 | os.mkdir(folder)
15 | counter = 0
16 | timer = 0
17 |
18 | while counter < number_of_images_for_training:
19 | ret, frame = cap.read()
20 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
21 | faces_coord = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5)
22 |
23 | for i, face in enumerate(faces_coord):
24 |
25 | if len(faces_coord) > 0 and timer % 100 == 0:
26 | if len(faces_coord) == 1:
27 | cv2.rectangle(frame, (faces_coord[i][0], faces_coord[i][1]),
28 | (faces_coord[i][0] + faces_coord[i][2], faces_coord[i][1] + faces_coord[i][3]), (0, 255, 0), 1)
29 | faces = get_normalised_faces(gray, faces_coord)
30 | image_path = folder + "/img_" + str(counter) + ".jpg"
31 | cv2.imwrite(image_path, faces[0])
32 | print(counter, image_path)
33 | counter += 1
34 | else:
35 | cv2.rectangle(frame, (faces_coord[i][0], faces_coord[i][1]),
36 | (faces_coord[i][0] + faces_coord[i][2], faces_coord[i][1] + faces_coord[i][3]),
37 | (0, 0, 255), 1)
38 |
39 | cv2.imshow('frame', frame)
40 | cv2.waitKey(10)
41 | timer += 10
42 | else:
43 | print("folder " + folder + " already exists")
44 |
45 | cap.release()
46 | cv2.destroyAllWindows()
47 |
--------------------------------------------------------------------------------
/OpenCVDemo/face_recog_dir/make_predictions.py:
--------------------------------------------------------------------------------
1 | from OpenCVDemo.face_recog_dir.face_normalisation import *
2 | from OpenCVDemo.face_recog_dir.train_model import *
3 |
4 | cap = cv2.VideoCapture(0)
5 |
6 | faceCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\Programs\Python\Python36\Lib\site-packages\cv2'
7 | '\data\haarcascade_frontalface_alt2.xml')
8 | eyeCascade = cv2.CascadeClassifier('C:\\Users\\anuj5\AppData\Local\conda\conda\envs\\face_recog_env\Lib\site-packages\cv2\data\haarcascade_eye.xml')
9 |
10 |
11 | def get_face():
12 | print("Hit 'Q' to take photo")
13 | while True:
14 | ret, frame = cap.read()
15 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
16 | face_coord = faceCascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
17 |
18 | for (x, y, w, h) in face_coord:
19 | cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 1)
20 |
21 | eyes_coord = eyeCascade.detectMultiScale(gray)
22 | for (ex, ey, ew, eh) in eyes_coord:
23 | if ex > x and ey > y and ex + ew < x + w and ey + eh < y + h:
24 | cv2.rectangle(frame, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)
25 |
26 | cv2.imshow('frame', frame)
27 | if cv2.waitKey(20) & 0xFF == ord('q') and len(face_coord) > 0:
28 | if len(face_coord) > 1:
29 | print("Multiple faces detected!")
30 | else:
31 | cap.release()
32 | cv2.destroyAllWindows()
33 | return get_normalised_faces(gray, face_coord)[0]
34 |
35 |
36 | image, labels, labels_dict = collect_dataset()
37 | face = get_face()
38 |
39 | e_pred, e_conf = rec_eigen.predict(face)
40 | f_pred, f_conf = rec_fisher.predict(face)
41 | l_pred, l_conf = rec_lbph.predict(face)
42 |
43 | print("Eigen ", labels_dict[e_pred], e_conf)
44 | print("Fisher ", labels_dict[f_pred], f_conf)
45 | print("LBPH ", labels_dict[l_pred], l_conf)
46 |
--------------------------------------------------------------------------------