├── README.md ├── images ├── Multicamera_singletrack.png ├── Multiple camera single camera.png ├── Trial.png ├── YouCut_20190303_085257625.mp4 ├── ezgif.com-video-to-gif.gif ├── person.png └── update.jpg └── src2 ├── Continuous.py ├── Face_cascade.xml ├── Multicamera_ob.py ├── Multiple_tracking.py ├── Multiple_trial.py ├── accucary.py ├── anna.py ├── detect_face_live.py ├── e2.py ├── explorerVideo.py ├── fe.py ├── get_points.py ├── kernal.py ├── logfiles.txt ├── main.py └── track.py /README.md: -------------------------------------------------------------------------------- 1 | # Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera 2 | 3 | ` Currently a repository for documentation, implementation details will be published in the future ` 4 | 5 | 6 | A surveillance system for CCTV cameras which recognizes selected multiple target individuals and tracks in real time across multiple cameras, with detection, recognition, and kernel-based tracking modules. 7 | 8 | 9 | A CCTV surveillance system which has the ability to recognize multiple selected individuals and track them across multiple cameras. 10 | 1.Acquisition :Multiple static CCTV cameras are considered. 11 | 2.Face detection & Recognition: detect the faces and recognize the individuals 12 | 3. Multiple Person Tracking: Out of the recognized individuals, track target individuals across multiple cameras. 13 | 14 | 15 | 16 | 17 | Facial recognition is done using HOG features and image embedding using OpenFace. We were able to perform simultaneous tracking and recognition of multiple individuals across multiple cameras in real time. 18 | 19 | Winning project, Smart India Hackathon 2019. 20 | 21 | 22 | ![method](/images/person.png) 23 | 24 | 25 | 26 | 27 | 28 | 29 | ![res1](images/Trial.png) 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | ![res2GIF](images/ezgif.com-video-to-gif.gif) 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /images/Multicamera_singletrack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/Multicamera_singletrack.png -------------------------------------------------------------------------------- /images/Multiple camera single camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/Multiple camera single camera.png -------------------------------------------------------------------------------- /images/Trial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/Trial.png -------------------------------------------------------------------------------- /images/YouCut_20190303_085257625.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/YouCut_20190303_085257625.mp4 -------------------------------------------------------------------------------- /images/ezgif.com-video-to-gif.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/ezgif.com-video-to-gif.gif -------------------------------------------------------------------------------- /images/person.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/person.png -------------------------------------------------------------------------------- /images/update.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deeptibhegde/Real-Time-Multiple-Person-Recognition-and-Tracking-for-CCTV-Camera/3d6902d673331981ada07a8575ecd5dabce5dbf6/images/update.jpg -------------------------------------------------------------------------------- /src2/Continuous.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import face_recognition 3 | import cv2 4 | import numpy as np 5 | from sklearn.metrics import accuracy_score 6 | from sklearn.metrics import mean_squared_error as mse 7 | from skimage.measure import compare_ssim as ssim 8 | import os 9 | import math 10 | camnum=1 11 | 12 | def face_distance_to_conf(face_distance, face_match_threshold=0.6): 13 | if face_distance > face_match_threshold: 14 | range = (1.0 - face_match_threshold) 15 | linear_val = (1.0 - face_distance) / (range * 2.0) 16 | return linear_val 17 | else: 18 | range = face_match_threshold 19 | linear_val = 1.0 - (face_distance / (range * 2.0)) 20 | return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2)) 21 | 22 | 23 | acc=1 24 | path = '/home/ise/SIH/app/known/' 25 | video_capture = cv2.VideoCapture(0) 26 | 27 | known_face_encodings = [ ] 28 | known_face_names = [ ] 29 | for i in os.listdir(path): 30 | Known_image = face_recognition.load_image_file(os.path.join(i)) 31 | Known_face_encoding = face_recognition.face_encodings(Known_image)[0] 32 | known_face_encodings.append(Known_face_encoding) 33 | l = i.split('.') 34 | known_face_names.append(l[0]) 35 | 36 | face_locations = [] 37 | face_encodings = [] 38 | face_names = [] 39 | process_this_frame = True 40 | acclist=[] 41 | while True: 42 | ret, frame = video_capture.read() 43 | small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) 44 | rgb_small_frame = small_frame[:, :, ::-1] 45 | if process_this_frame: 46 | face_locations = face_recognition.face_locations(rgb_small_frame) 47 | face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) 48 | face_names = [] 49 | #print('this') 50 | for face_encoding in face_encodings: 51 | matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5) 52 | name = "Unknown" 53 | #print('is') 54 | if True in matches: 55 | first_match_index = matches.index(True) 56 | name = known_face_names[first_match_index] 57 | #acc = accuracy_score(known_face_encodings[first_match_index], face_encoding) 58 | #acc=mse(known_face_encodings[first_match_index], face_encoding) 59 | acc=ssim(known_face_encodings[first_match_index], face_encoding) 60 | acc=round(acc,4) 61 | 62 | face_names.append(name) 63 | #print('not') 64 | 65 | process_this_frame = not process_this_frame 66 | for (top, right, bottom, left), name in zip(face_locations, face_names): 67 | top *= 4 68 | right *= 4 69 | bottom *= 4 70 | left *= 4 71 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2) 72 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED) 73 | font = cv2.FONT_HERSHEY_DUPLEX 74 | if(name=="Unknown"): 75 | acc=0 76 | acclist=[1] 77 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) 78 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 79 | cv2.putText(frame, name , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 80 | else: 81 | 82 | cv2.putText(frame, name+" "+str(acc) , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 83 | acclist.append(acc) 84 | 85 | with open('/run/user/1000/gvfs/smb-share:server=10.2.0.10,share=temp/sihlogfiles/logfiles.txt', 'a') as the_file: 86 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 87 | the_file.write(str(stringto)) 88 | filename=name+".txt" 89 | with open("/run/user/1000/gvfs/smb-share:server=10.2.0.10,share=temp/sihlogfiles/"+ filename, 'a') as the_file: 90 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 91 | the_file.write(str(stringto)) 92 | #rprint('okay') 93 | 94 | 95 | cv2.imshow('Video', frame) 96 | if cv2.waitKey(1) & 0xFF == ord('q'): 97 | print(name) 98 | print(sum(acclist)/len(acclist)) 99 | break 100 | 101 | video_capture.release() 102 | cv2.destroyAllWindows() 103 | 104 | 105 | -------------------------------------------------------------------------------- /src2/Multicamera_ob.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import cv2 3 | import pickle 4 | import dlib 5 | import get_points 6 | def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.4): 7 | if knn_clf is None and model_path is None: 8 | raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") 9 | if knn_clf is None: 10 | with open(model_path, 'rb') as f: 11 | knn_clf = pickle.load(f) 12 | X_face_locations = face_recognition.face_locations(X_img_path,number_of_times_to_upsample=0) 13 | if len(X_face_locations) == 0: 14 | return [] 15 | faces_encodings = face_recognition.face_encodings(X_img_path, known_face_locations=X_face_locations) 16 | closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) 17 | are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] 18 | return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 19 | 20 | def equi(x1,y1,x2,y2): 21 | return (((x1-x2)**2)+((y1-y2)**2))**0.5 22 | 23 | 24 | video1 = cv2.VideoCapture(0) 25 | video2 = cv2.VideoCapture(1) 26 | ret = True 27 | l1 =[] 28 | nae1 = [] 29 | l2 =[] 30 | nae2 = [] 31 | 32 | def track(img1,l1,nae1,img2,l2,nae2): 33 | tracker1 = [dlib.correlation_tracker() for _ in range(len(l1))] 34 | [tracker1[i].start_track(img1, dlib.rectangle(*rect)) for i, rect in enumerate(l1)] 35 | tracker2 = [dlib.correlation_tracker() for _ in range(len(l2))] 36 | [tracker2[i].start_track(img2, dlib.rectangle(*rect)) for i, rect in enumerate(l2)] 37 | ret = True 38 | if "unknown" in nae1: 39 | return True 40 | while ret: 41 | la1 = [] 42 | na1 = [] 43 | la2 = [] 44 | na2 = [] 45 | 46 | ret, img1 = video1.read() 47 | rgb_frame1 = img1[:, :, ::-1] 48 | ret, img2 = video2.read() 49 | rgb_frame2 = img2[:, :, ::-1] 50 | 51 | predictions1 = predict(rgb_frame1, model_path="trained_knn_model.clf") 52 | for name, (x,y,w,h) in predictions1: 53 | la1.append((h,x,y,w)) 54 | na1.append(name) 55 | 56 | predictions2 = predict(rgb_frame2, model_path="trained_knn_model.clf") 57 | for name, (x,y,w,h) in predictions2: 58 | la2.append((h,x,y,w)) 59 | na2.append(name) 60 | 61 | for j in na1: 62 | if j == "unknown": 63 | continue 64 | if j not in nae1: 65 | dist = [] 66 | (L,T,R,B) = la1[na1.index(j)] 67 | for k in range(len(l1)): 68 | (lef,top,rig,bot) = l1[k] 69 | dist.append(equi(L,T,lef,top)) 70 | if min(dist)<75.0: 71 | continue 72 | else: 73 | nae1.append(j) 74 | l1.append(la1[na1.index(j)]) 75 | tracker1 = [dlib.correlation_tracker() for _ in range(len(l1))] 76 | [tracker1[i].start_track(img1, dlib.rectangle(*rect)) for i, rect in enumerate(l1)] 77 | 78 | for j in na2: 79 | if j == "unknown": 80 | continue 81 | if j not in nae2: 82 | dist = [] 83 | (L,T,R,B) = la2[na2.index(j)] 84 | for k in range(len(l2)): 85 | (lef,top,rig,bot) = l2[k] 86 | dist.append(equi(L,T,lef,top)) 87 | if min(dist)<75.0: 88 | continue 89 | else: 90 | nae2.append(j) 91 | l2.append(la2[na2.index(j)]) 92 | tracker2 = [dlib.correlation_tracker() for _ in range(len(l2))] 93 | [tracker2[i].start_track(img2, dlib.rectangle(*rect)) for i, rect in enumerate(l2)] 94 | 95 | i = 0 96 | for i in range(len(tracker1)): 97 | m,n,_ = img1.shape 98 | tracker1[i].update(img1) 99 | rect = tracker1[i].get_position() 100 | l1[i] = (int(rect.left()), int(rect.top()),int(rect.right()), int(rect.bottom())) 101 | pt1 = (int(rect.left()), int(rect.top())) 102 | pt2 = (int(rect.right()), int(rect.bottom())) 103 | cv2.rectangle(img1, pt1, pt2, (0,0, 255), 3) 104 | cv2.putText(img1, nae1[i], (int(rect.left()) + 6, int(rect.bottom()) - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (0, 0, 255), 1) 105 | print("Object {} tracked at [{}, {}] \r".format(i, pt1, pt2)) 106 | 107 | i = 0 108 | for i in range(len(tracker2)): 109 | tracker2[i].update(img2) 110 | rect = tracker2[i].get_position() 111 | l2[i] = (int(rect.left()), int(rect.top()),int(rect.right()), int(rect.bottom())) 112 | pt1 = (int(rect.left()), int(rect.top())) 113 | pt2 = (int(rect.right()), int(rect.bottom())) 114 | cv2.rectangle(img2, pt1, pt2, (0,0, 255), 3) 115 | cv2.putText(img2, nae2[i], (int(rect.left()) + 6, int(rect.bottom()) - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (0, 0, 255), 1) 116 | print("Object {} tracked at [{}, {}] \r".format(i, pt1, pt2)) 117 | 118 | cv2.namedWindow("Camera 1", cv2.WINDOW_NORMAL) 119 | cv2.imshow("Camera 1", img1) 120 | cv2.namedWindow("Camera 2", cv2.WINDOW_NORMAL) 121 | cv2.imshow("Camera 2", img2) 122 | # Continue until the user presses ESC key 123 | if cv2.waitKey(1) == 27: 124 | break 125 | return False 126 | 127 | while ret: 128 | ret, frame1 = video1.read() 129 | rgb_frame1 = frame1[:, :, ::-1] 130 | 131 | ret, frame2 = video2.read() 132 | rgb_frame2 = frame2[:, :, ::-1] 133 | 134 | predictions1 = predict(rgb_frame1, model_path="trained_knn_model.clf") 135 | predictions2 = predict(rgb_frame2, model_path="trained_knn_model.clf") 136 | 137 | for name, (top, right, bottom, left) in predictions1: 138 | (x,y,w,h)=predictions1[0][1] 139 | l1 = [(h,x,y,w)] 140 | nae1.append(name) 141 | 142 | for name, (top, right, bottom, left) in predictions2: 143 | (x,y,w,h)=predictions2[0][1] 144 | l2 = [(h,x,y,w)] 145 | nae2.append(name) 146 | 147 | track(frame1,l1,nae1,frame2,l2,nae2) 148 | 149 | if cv2.waitKey(1) & 0xFF == ord('q'): 150 | ret = False 151 | 152 | # Release handle to the webcam 153 | video1.release() 154 | video2.release() 155 | cv2.destroyAllWindows() 156 | -------------------------------------------------------------------------------- /src2/Multiple_tracking.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import cv2 3 | import pickle 4 | import get_points 5 | import dlib 6 | 7 | def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.45): 8 | if knn_clf is None and model_path is None: 9 | raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") 10 | if knn_clf is None: 11 | with open(model_path, 'rb') as f: 12 | knn_clf = pickle.load(f) 13 | X_face_locations = face_recognition.face_locations(X_img_path,number_of_times_to_upsample=0) 14 | if len(X_face_locations) == 0: 15 | return [] 16 | faces_encodings = face_recognition.face_encodings(X_img_path, known_face_locations=X_face_locations) 17 | closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) 18 | are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] 19 | return [(pred, loc) if rec else ("UNKNOWN", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 20 | 21 | video1 = cv2.VideoCapture(0) 22 | video2 = cv2.VideoCapture(1) 23 | ret = True 24 | 25 | 26 | def track(img1,l1,name1,img2,l2,name2): 27 | tracker1 = dlib.correlation_tracker() 28 | tracker1.start_track(img1, dlib.rectangle(*l1[0])) 29 | tracker2 = dlib.correlation_tracker() 30 | tracker2.start_track(img1, dlib.rectangle(*l2[0])) 31 | ret = True 32 | while ret: 33 | ret, img1 = video1.read() 34 | ret, img2 = video2.read() 35 | 36 | tracker1.update(img1) 37 | tracker2.update(img2) 38 | 39 | rect1 = tracker1.get_position() 40 | rect2 = tracker2.get_position() 41 | 42 | pt11 = (int(rect1.left()), int(rect1.top())) 43 | pt21 = (int(rect1.right()), int(rect1.bottom())) 44 | 45 | pt12 = (int(rect2.left()), int(rect2.top())) 46 | pt22 = (int(rect2.right()), int(rect2.bottom())) 47 | 48 | cv2.rectangle(img1, pt11, pt21, (255, 255, 255), 3) 49 | cv2.rectangle(img2, pt12, pt22, (255, 255, 255), 3) 50 | 51 | font = cv2.FONT_HERSHEY_DUPLEX 52 | 53 | cv2.putText(img1, name1, (int(rect1.left()) + 6, int(rect1.bottom())- 6), font, 1.0, (0, 0, 255), 1) 54 | cv2.namedWindow("Image1", cv2.WINDOW_NORMAL) 55 | 56 | cv2.putText(img2, name2, (int(rect2.left()) + 6, int(rect2.bottom())- 6), font, 1.0, (0, 0, 255), 1) 57 | cv2.namedWindow("Image2", cv2.WINDOW_NORMAL) 58 | 59 | cv2.imshow("Image1", img1) 60 | cv2.imshow("Image2", img2) 61 | 62 | if cv2.waitKey(1) == 27: 63 | cv2.destroyAllWindows() 64 | ret = False 65 | 66 | 67 | 68 | while ret: 69 | ret, frame1 = video1.read() 70 | rgb_frame1 = frame1[:, :, ::-1] 71 | ret, frame2 = video2.read() 72 | rgb_frame2 = frame2[:, :, ::-1] 73 | predictions1 = predict(rgb_frame1, model_path="trained_knn_model.clf") 74 | predictions2 = predict(rgb_frame2, model_path="trained_knn_model.clf") 75 | for name1, (top, right, bottom, left) in predictions1: 76 | (x,y,w,h)=predictions1[0][1] 77 | l1 = [(h,x,y,w)] 78 | 79 | for name2, (top, right, bottom, left) in predictions2: 80 | (x,y,w,h)=predictions2[0][1] 81 | l2 = [(h,x,y,w)] 82 | track(frame1,l1,name1,frame2,l2,name2) 83 | if cv2.waitKey(1) & 0xFF == ord('q'): 84 | ret = False 85 | 86 | # Release handle to the webcam 87 | video1.release() 88 | video2.release() 89 | cv2.destroyAllWindows() 90 | -------------------------------------------------------------------------------- /src2/Multiple_trial.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import cv2 3 | import pickle 4 | def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.475): 5 | if knn_clf is None and model_path is None: 6 | raise Exception("Must supply knn classifier either thourgh knn_clf or model_path") 7 | if knn_clf is None: 8 | with open(model_path, 'rb') as f: 9 | knn_clf = pickle.load(f) 10 | X_face_locations = face_recognition.face_locations(X_img_path,number_of_times_to_upsample=0) 11 | if len(X_face_locations) == 0: 12 | return [] 13 | faces_encodings = face_recognition.face_encodings(X_img_path, known_face_locations=X_face_locations) 14 | closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1) 15 | are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))] 16 | return [(pred, loc) if rec else ("UNKNOWN", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)] 17 | 18 | video1 = cv2.VideoCapture(0) 19 | video2 = cv2.VideoCapture(1) 20 | ret = True 21 | while ret: 22 | ret, frame1 = video1.read() 23 | rgb_frame1 = frame1[:, :, ::-1] 24 | ret, frame2 = video2.read() 25 | rgb_frame2 = frame2[:, :, ::-1] 26 | predictions1 = predict(rgb_frame1, model_path="trained_knn_model.clf") 27 | predictions2 = predict(rgb_frame2, model_path="trained_knn_model.clf") 28 | for name, (top, right, bottom, left) in predictions1: 29 | cv2.rectangle(frame1, (left, top), (right, bottom), (0, 0, 255), 2) 30 | cv2.rectangle(frame1, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 31 | font = cv2.FONT_HERSHEY_DUPLEX 32 | cv2.putText(frame1, name, (left + 6, bottom - 6), font, 1.0, (255,255, 255), 1) 33 | 34 | for name, (top, right, bottom, left) in predictions2: 35 | cv2.rectangle(frame2, (left, top), (right, bottom), (0, 0, 255), 2) 36 | cv2.rectangle(frame2, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 37 | font = cv2.FONT_HERSHEY_DUPLEX 38 | cv2.putText(frame2, name, (left + 6, bottom - 6), font, 1.0, (255,255, 255), 1) 39 | 40 | cv2.imshow('Video1', frame1) 41 | cv2.imshow('Video2', frame2) 42 | # Hit 'q' on the keyboard to quit! 43 | if cv2.waitKey(1) & 0xFF == ord('q'): 44 | ret = False 45 | 46 | # Release handle to the webcam 47 | video1.release() 48 | video2.release() 49 | cv2.destroyAllWindows() 50 | -------------------------------------------------------------------------------- /src2/accucary.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import face_recognition 3 | import cv2 4 | import numpy as np 5 | from sklearn.metrics import accuracy_score 6 | from sklearn.metrics import mean_squared_error as mse 7 | from skimage.measure import compare_ssim as ssim 8 | 9 | import math 10 | camnum=1; 11 | 12 | def face_distance_to_conf(face_distance, face_match_threshold=0.6): 13 | if face_distance > face_match_threshold: 14 | range = (1.0 - face_match_threshold) 15 | linear_val = (1.0 - face_distance) / (range * 2.0) 16 | return linear_val 17 | else: 18 | range = face_match_threshold 19 | linear_val = 1.0 - (face_distance / (range * 2.0)) 20 | return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2)) 21 | 22 | 23 | acc=1 24 | video_capture = cv2.VideoCapture(0) 25 | 26 | vinni_image = face_recognition.load_image_file("/home/dikshit/Files/final_app/app/known/vinni.jpg") 27 | vinni_face_encoding = face_recognition.face_encodings(vinni_image)[0] 28 | 29 | shan_image = face_recognition.load_image_file("/home/dikshit/Files/final_app/app/known/shan.jpg") 30 | shan_face_encoding = face_recognition.face_encodings(shan_image)[0] 31 | 32 | deep_image = face_recognition.load_image_file("/home/dikshit/Files/final_app/app/known/deep.jpg") 33 | deep_face_encoding = face_recognition.face_encodings(deep_image)[0] 34 | 35 | madam = face_recognition.load_image_file("/home/dikshit/Files/final_app/app/known/madam.jpg") 36 | madam_encoding = face_recognition.face_encodings(madam)[0] 37 | 38 | #sus_image = face_recognition.load_image_file("/home/dikshit/Files/final_app/app/known/suspect1.jpg") 39 | #sus_face_encoding = face_recognition.face_encodings(sus_image)[0] 40 | 41 | #sus2_image = face_recognition.load_image_file("/home/ise/SIH/app/known/530.jpg") 42 | #sus2_face_encoding = face_recognition.face_encodings(sus2_image)[0] 43 | 44 | #sir = face_recognition.load_image_file("/home/ise/app/known/sir.jpg") 45 | #sir_encoding = face_recognition.face_encodings(sir)[0] 46 | 47 | #,sir_encoding 48 | 49 | known_face_encodings = [ vinni_face_encoding,shan_face_encoding,deep_face_encoding,madam_encoding ] 50 | known_face_names = [ "vinni","shan" , "deeps","madam "] 51 | 52 | face_locations = [] 53 | face_encodings = [] 54 | face_names = [] 55 | process_this_frame = True 56 | acclist=[] 57 | while True: 58 | ret, frame = video_capture.read() 59 | small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) 60 | rgb_small_frame = small_frame[:, :, ::-1] 61 | if process_this_frame: 62 | face_locations = face_recognition.face_locations(rgb_small_frame) 63 | face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) 64 | face_names = [] 65 | #print('this') 66 | for face_encoding in face_encodings: 67 | matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5) 68 | name = "Unknown" 69 | #print('is') 70 | if True in matches: 71 | first_match_index = matches.index(True) 72 | name = known_face_names[first_match_index] 73 | #acc = accuracy_score(known_face_encodings[first_match_index], face_encoding) 74 | #acc=mse(known_face_encodings[first_match_index], face_encoding) 75 | acc=ssim(known_face_encodings[first_match_index], face_encoding) 76 | acc=round(acc,4) 77 | 78 | face_names.append(name) 79 | #print('not') 80 | 81 | process_this_frame = not process_this_frame 82 | for (top, right, bottom, left), name in zip(face_locations, face_names): 83 | top *= 4 84 | right *= 4 85 | bottom *= 4 86 | left *= 4 87 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2) 88 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED) 89 | font = cv2.FONT_HERSHEY_DUPLEX 90 | if(name=="Unknown"): 91 | acclist=[1] 92 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) 93 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 94 | cv2.putText(frame, name , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 95 | else: 96 | 97 | cv2.putText(frame, name+" "+str(acc) , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 98 | acclist.append(acc) 99 | 100 | with open('logfiles.txt', 'a') as the_file: 101 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 102 | the_file.write(str(stringto)) 103 | filename=name+".txt" 104 | with open(filename, 'a') as the_file: 105 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 106 | the_file.write(str(stringto)) 107 | #rprint('okay') 108 | 109 | 110 | cv2.imshow('Video', frame) 111 | if cv2.waitKey(1) & 0xFF == ord('q'): 112 | print(sum(acclist)/len(acclist)) 113 | break 114 | 115 | video_capture.release() 116 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /src2/anna.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu Feb 28 16:05:30 2019 5 | 6 | @author: ise 7 | """ 8 | import datetime 9 | import face_recognition 10 | import cv2 11 | import numpy as np 12 | from sklearn.metrics import accuracy_score 13 | from sklearn.metrics import mean_squared_error as mse 14 | from skimage.measure import compare_ssim as ssim 15 | import smtplib, ssl 16 | 17 | import math 18 | camnum=1 19 | counterr=0 20 | 21 | 22 | def notif(): 23 | 24 | port = 465 # For SSL 25 | smtp_server = "smtp.gmail.com" 26 | sender_email = "vellalavineethkumar@gmail.com" # Enter your address 27 | receiver_email = "samankaranth8@gmail.com" # Enter receiver address 28 | password = "vellala@2999" 29 | message = """\ 30 | Subject: Hi there 31 | 32 | An Unknown detected in your office grounds.""" 33 | 34 | context = ssl.create_default_context() 35 | with smtplib.SMTP_SSL(smtp_server, port, context=context) as server: 36 | server.login(sender_email, password) 37 | server.sendmail(sender_email, receiver_email, message) 38 | def face_distance_to_conf(face_distance, face_match_threshold=0.6): 39 | if face_distance > face_match_threshold: 40 | range = (1.0 - face_match_threshold) 41 | linear_val = (1.0 - face_distance) / (range * 2.0) 42 | return linear_val 43 | else: 44 | range = face_match_threshold 45 | linear_val = 1.0 - (face_distance / (range * 2.0)) 46 | return linear_val + ((1.0 - linear_val) * math.pow((linear_val - 0.5) * 2, 0.2)) 47 | 48 | 49 | acc=1 50 | video_capture = cv2.VideoCapture(0) 51 | 52 | vinni_image = face_recognition.load_image_file("/home/ise/SIH/app/known/vinni.jpg") 53 | vinni_face_encoding = face_recognition.face_encodings(vinni_image)[0] 54 | 55 | shan_image = face_recognition.load_image_file("/home/ise/SIH/app/known/shan.jpg") 56 | shan_face_encoding = face_recognition.face_encodings(shan_image)[0] 57 | 58 | deep_image = face_recognition.load_image_file("/home/ise/SIH/app/known/deep.jpg") 59 | deep_face_encoding = face_recognition.face_encodings(deep_image)[0] 60 | 61 | madam = face_recognition.load_image_file("/home/ise/SIH/app/known/madam.jpg") 62 | madam_encoding = face_recognition.face_encodings(madam)[0] 63 | 64 | #sir = face_recognition.load_image_file("/home/ise/app/known/sir.jpg") 65 | #sir_encoding = face_recognition.face_encodings(sir)[0] 66 | 67 | #,sir_encoding 68 | 69 | known_face_encodings = [ vinni_face_encoding,shan_face_encoding,deep_face_encoding,madam_encoding ] 70 | known_face_names = [ "vinni","shan" , "deeps","madam "] 71 | 72 | face_locations = [] 73 | face_encodings = [] 74 | face_names = [] 75 | process_this_frame = True 76 | acclist=[] 77 | while True: 78 | ret, frame = video_capture.read() 79 | small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) 80 | rgb_small_frame = small_frame[:, :, ::-1] 81 | if process_this_frame: 82 | face_locations = face_recognition.face_locations(rgb_small_frame) 83 | face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) 84 | face_names = [] 85 | #print('this') 86 | for face_encoding in face_encodings: 87 | matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5) 88 | name = "Unknown" 89 | #print('is') 90 | if True in matches: 91 | first_match_index = matches.index(True) 92 | name = known_face_names[first_match_index] 93 | #acc = accuracy_score(known_face_encodings[first_match_index], face_encoding) 94 | #acc=mse(known_face_encodings[first_match_index], face_encoding) 95 | acc=ssim(known_face_encodings[first_match_index], face_encoding) 96 | acc=round(acc,4) 97 | 98 | face_names.append(name) 99 | #print('not') 100 | 101 | process_this_frame = not process_this_frame 102 | for (top, right, bottom, left), name in zip(face_locations, face_names): 103 | top *= 4 104 | right *= 4 105 | bottom *= 4 106 | left *= 4 107 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2) 108 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED) 109 | font = cv2.FONT_HERSHEY_DUPLEX 110 | if(name=="Unknown"): 111 | acc=0 112 | counterr=counterr+1 113 | acclist=[1] 114 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) 115 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 116 | cv2.putText(frame, name , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 117 | else: 118 | 119 | cv2.putText(frame, name+" "+str(acc) , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 120 | acclist.append(acc) 121 | 122 | with open('logfiles.txt', 'a') as the_file: 123 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 124 | the_file.write(str(stringto)) 125 | filename=name+".txt" 126 | with open(filename, 'a') as the_file: 127 | stringto=name+" with ssim "+str(acc)+" was detected in camera "+str(camnum)+" at time "+str(datetime.datetime.now())+"\n" 128 | the_file.write(str(stringto)) 129 | #rprint('okay') 130 | 131 | if(counterr>15): 132 | notif() 133 | cv2.imshow('Video', frame) 134 | if cv2.waitKey(1) & 0xFF == ord('q'): 135 | print(sum(acclist)/len(acclist)) 136 | break 137 | 138 | video_capture.release() 139 | cv2.destroyAllWindows() 140 | 141 | 142 | -------------------------------------------------------------------------------- /src2/detect_face_live.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from random import randint 3 | import cv2 4 | import sys 5 | import os 6 | count=0 7 | CASCADE="Face_cascade.xml" 8 | FACE_CASCADE=cv2.CascadeClassifier(CASCADE) 9 | 10 | def detect_faces(image): 11 | print("please be patient while we extract your face\n") 12 | print("you look awesome today\n") 13 | image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) 14 | 15 | faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0) 16 | 17 | for x,y,w,h in faces: 18 | 19 | sub_img=image[y-10:y+h+10,x-10:x+w+10] 20 | os.chdir("Extracted") 21 | cv2.imwrite(str(randint(0,10000))+".jpg",sub_img) 22 | os.chdir("../") 23 | cv2.rectangle(image,(x,y),(x+w,y+h),(255, 255,0),2) 24 | 25 | 26 | cv2.imshow('Video', frame) 27 | if (cv2.waitKey(500) & 0xFF == ord('q')) or (cv2.waitKey(2000) & 0xFF == ord('Q')): 28 | cv2.destroyAllWindows() 29 | return 30 | 31 | #cap = cv2.VideoCapture("rtsp://admin:admin@10.2.3.177") 32 | #cap = cv2.VideoCapture("rtsp://user@user12345@10.2.3.178") 33 | cap = cv2.VideoCapture(0) 34 | 35 | while(True): 36 | 37 | ret, frame = cap.read() 38 | count=count+1 39 | if(count%10==0): 40 | detect_faces(frame) 41 | else: 42 | continue 43 | 44 | 45 | if cv2.waitKey(500) & 0xFF == ord('q'): 46 | break 47 | cap.release() 48 | cv2.destroyAllWindows() 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /src2/e2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | 4 | def fn_get_txt_sysarg(): 5 | """Harvest a single (the only expected) command line argument""" 6 | try: 7 | return sys.argv[1] # str() would be redundant here 8 | except: 9 | ErrorMsg = 'Message from fn_get_txt_sysarg() in Script (' + sys.argv[0] + '):\n' + '\tThe Script did not receive a command line argument' 10 | sys.exit(ErrorMsg) 11 | 12 | def Open_Win_Explorer_and_Select_Fil(filepath): 13 | # harvested from: https://stackoverflow.com/questions/281888/open-explorer-on-a-file 14 | Popen_arg = 'explorer /select,"' + filepath + "'" # str() is redundant here also 15 | subprocess.Popen(Popen_arg) 16 | 17 | if __name__ == '__main__': 18 | filepath = fn_get_txt_sysarg() 19 | Open_Win_Explorer_and_Select_Fil(filepath) -------------------------------------------------------------------------------- /src2/explorerVideo.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | from gi.repository import Gtk 4 | import os 5 | 6 | class FileChooserWindow(Gtk.Window): 7 | 8 | def __init__(self): 9 | Gtk.Window.__init__(self, title="FileChooser Example") 10 | 11 | box = Gtk.Box(spacing=6) 12 | self.add(box) 13 | 14 | button1 = Gtk.Button("Choose File") 15 | button1.connect("clicked", self.on_file_clicked) 16 | box.add(button1) 17 | 18 | button2 = Gtk.Button("Choose Folder") 19 | button2.connect("clicked", self.on_folder_clicked) 20 | box.add(button2) 21 | 22 | def on_file_clicked(self, widget): 23 | dialog = Gtk.FileChooserDialog("Please choose a file", self, 24 | Gtk.FileChooserAction.OPEN, 25 | (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 26 | Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) 27 | 28 | self.add_filters(dialog) 29 | 30 | response = dialog.run() 31 | if response == Gtk.ResponseType.OK: 32 | print("Open clicked") 33 | print("File selected: " + dialog.get_filename()) 34 | video="vlc "+ dialog.get_filename() 35 | os.system(video) 36 | os.system("python button.py") 37 | elif response == Gtk.ResponseType.CANCEL: 38 | print("Cancel clicked") 39 | 40 | dialog.destroy() 41 | 42 | def add_filters(self, dialog): 43 | filter_text = Gtk.FileFilter() 44 | filter_text.set_name("Text files") 45 | filter_text.add_mime_type("text/plain") 46 | dialog.add_filter(filter_text) 47 | 48 | filter_py = Gtk.FileFilter() 49 | filter_py.set_name("Python files") 50 | filter_py.add_mime_type("text/x-python") 51 | dialog.add_filter(filter_py) 52 | 53 | filter_any = Gtk.FileFilter() 54 | filter_any.set_name("Any files") 55 | filter_any.add_pattern("*") 56 | dialog.add_filter(filter_any) 57 | 58 | def on_folder_clicked(self, widget): 59 | dialog = Gtk.FileChooserDialog("Please choose a folder", self, 60 | Gtk.FileChooserAction.SELECT_FOLDER, 61 | (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 62 | "Select", Gtk.ResponseType.OK)) 63 | dialog.set_default_size(800, 400) 64 | 65 | response = dialog.run() 66 | if response == Gtk.ResponseType.OK: 67 | print("Select clicked") 68 | print("Folder selected: " + dialog.get_filename()) 69 | elif response == Gtk.ResponseType.CANCEL: 70 | print("Cancel clicked") 71 | 72 | dialog.destroy() 73 | 74 | win = FileChooserWindow() 75 | win.connect("delete-event", Gtk.main_quit) 76 | win.show_all() 77 | Gtk.main() 78 | 79 | -------------------------------------------------------------------------------- /src2/fe.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | from gi.repository import Gtk 4 | import os 5 | 6 | class FileChooserWindow(Gtk.Window): 7 | 8 | def __init__(self): 9 | Gtk.Window.__init__(self, title="FileChooser Example") 10 | 11 | box = Gtk.Box(spacing=6) 12 | self.add(box) 13 | 14 | button1 = Gtk.Button("Choose File") 15 | button1.connect("clicked", self.on_file_clicked) 16 | box.add(button1) 17 | 18 | button2 = Gtk.Button("Choose Folder") 19 | button2.connect("clicked", self.on_folder_clicked) 20 | box.add(button2) 21 | 22 | def on_file_clicked(self, widget): 23 | dialog = Gtk.FileChooserDialog("Please choose a file", self, 24 | Gtk.FileChooserAction.OPEN, 25 | (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 26 | Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) 27 | 28 | self.add_filters(dialog) 29 | 30 | response = dialog.run() 31 | if response == Gtk.ResponseType.OK: 32 | print("Open clicked") 33 | print("File selected: " + dialog.get_filename()) 34 | video="vlc "+ dialog.get_filename() 35 | os.system(video) 36 | os.system("python button.py") 37 | elif response == Gtk.ResponseType.CANCEL: 38 | print("Cancel clicked") 39 | 40 | dialog.destroy() 41 | 42 | def add_filters(self, dialog): 43 | filter_text = Gtk.FileFilter() 44 | filter_text.set_name("Text files") 45 | filter_text.add_mime_type("text/plain") 46 | dialog.add_filter(filter_text) 47 | 48 | filter_py = Gtk.FileFilter() 49 | filter_py.set_name("Python files") 50 | filter_py.add_mime_type("text/x-python") 51 | dialog.add_filter(filter_py) 52 | 53 | filter_any = Gtk.FileFilter() 54 | filter_any.set_name("Any files") 55 | filter_any.add_pattern("*") 56 | dialog.add_filter(filter_any) 57 | 58 | def on_folder_clicked(self, widget): 59 | dialog = Gtk.FileChooserDialog("Please choose a folder", self, 60 | Gtk.FileChooserAction.SELECT_FOLDER, 61 | (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, 62 | "Select", Gtk.ResponseType.OK)) 63 | dialog.set_default_size(800, 400) 64 | 65 | response = dialog.run() 66 | if response == Gtk.ResponseType.OK: 67 | print("Select clicked") 68 | print("Folder selected: " + dialog.get_filename()) 69 | elif response == Gtk.ResponseType.CANCEL: 70 | print("Cancel clicked") 71 | 72 | dialog.destroy() 73 | 74 | win = FileChooserWindow() 75 | win.connect("delete-event", Gtk.main_quit) 76 | win.show_all() 77 | Gtk.main() 78 | 79 | -------------------------------------------------------------------------------- /src2/get_points.py: -------------------------------------------------------------------------------- 1 | # Import the required modules 2 | import cv2 3 | import argparse 4 | 5 | def run(im, multi=False): 6 | im_disp = im.copy() 7 | im_draw = im.copy() 8 | window_name = "Select objects to be tracked here." 9 | cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) 10 | cv2.imshow(window_name, im_draw) 11 | 12 | # List containing top-left and bottom-right to crop the image. 13 | pts_1 = [] 14 | pts_2 = [] 15 | 16 | rects = [] 17 | run.mouse_down = False 18 | 19 | def callback(event, x, y, flags, param): 20 | if event == cv2.EVENT_LBUTTONDOWN: 21 | if multi == False and len(pts_2) == 1: 22 | print("WARN: Cannot select another object in SINGLE OBJECT TRACKING MODE.") 23 | print("Delete the previously selected object using key `d` to mark a new location.") 24 | return 25 | run.mouse_down = True 26 | pts_1.append((x, y)) 27 | elif event == cv2.EVENT_LBUTTONUP and run.mouse_down == True: 28 | run.mouse_down = False 29 | pts_2.append((x, y)) 30 | print("Object selected at [{}, {}]".format(pts_1[-1], pts_2[-1])) 31 | elif event == cv2.EVENT_MOUSEMOVE and run.mouse_down == True: 32 | im_draw = im.copy() 33 | cv2.rectangle(im_draw, pts_1[-1], (x, y), (255,255,255), 3) 34 | cv2.imshow(window_name, im_draw) 35 | 36 | print("Press and release mouse around the object to be tracked. \n You can also select multiple objects.") 37 | cv2.setMouseCallback(window_name, callback) 38 | 39 | print("Press key `p` to continue with the selected points.") 40 | print("Press key `d` to discard the last object selected.") 41 | print("Press key `q` to quit the program.") 42 | 43 | while True: 44 | # Draw the rectangular boxes on the image 45 | window_name_2 = "Objects to be tracked." 46 | for pt1, pt2 in zip(pts_1, pts_2): 47 | rects.append([pt1[0],pt2[0], pt1[1], pt2[1]]) 48 | cv2.rectangle(im_disp, pt1, pt2, (255, 255, 255), 3) 49 | # Display the cropped images 50 | cv2.namedWindow(window_name_2, cv2.WINDOW_NORMAL) 51 | cv2.imshow(window_name_2, im_disp) 52 | key = cv2.waitKey(30) 53 | if key == ord('p'): 54 | # Press key `s` to return the selected points 55 | cv2.destroyAllWindows() 56 | point= [(tl + br) for tl, br in zip(pts_1, pts_2)] 57 | corrected_point=check_point(point) 58 | return corrected_point 59 | elif key == ord('q'): 60 | # Press key `q` to quit the program 61 | print("Quitting without saving.") 62 | exit() 63 | elif key == ord('d'): 64 | # Press ket `d` to delete the last rectangular region 65 | if run.mouse_down == False and pts_1: 66 | print("Object deleted at [{}, {}]".format(pts_1[-1], pts_2[-1])) 67 | pts_1.pop() 68 | pts_2.pop() 69 | im_disp = im.copy() 70 | else: 71 | print("No object to delete.") 72 | cv2.destroyAllWindows() 73 | point= [(tl + br) for tl, br in zip(pts_1, pts_2)] 74 | corrected_point=check_point(point) 75 | return corrected_point 76 | 77 | def check_point(points): 78 | out=[] 79 | for point in points: 80 | #to find min and max x coordinates 81 | if point[0] ", points) 112 | -------------------------------------------------------------------------------- /src2/kernal.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sun Mar 3 00:07:16 2019 5 | 6 | @author: samanvitha 7 | """ 8 | 9 | import face_recognition 10 | import cv2 11 | import numpy as np 12 | import os 13 | from skimage.measure import compare_ssim as ssim 14 | 15 | 16 | 17 | acc=1 18 | video_capture = cv2.VideoCapture(0) 19 | 20 | vinni_image = face_recognition.load_image_file("/home/ise/app/known/vinni.jpg") 21 | vinni_face_encoding = face_recognition.face_encodings(vinni_image)[0] 22 | 23 | shan_image = face_recognition.load_image_file("/home/ise/app/known/shan.jpg") 24 | shan_face_encoding = face_recognition.face_encodings(shan_image)[0] 25 | 26 | deep_image = face_recognition.load_image_file("/home/ise/app/known/deep.jpg") 27 | deep_face_encoding = face_recognition.face_encodings(deep_image)[0] 28 | 29 | #sam_image = face_recognition.load_image_file("/home/samanvitha/1 SIH/data/train/Samanvitha/266.jpg") 30 | #sam_face_encoding = face_recognition.face_encodings(sam_image)[0] 31 | 32 | dikshit_image = face_recognition.load_image_file("/home/ise/app/known/dikshit.jpg") 33 | dikshit_face_encoding = face_recognition.face_encodings(dikshit_image)[0] 34 | 35 | madam = face_recognition.load_image_file("/home/ise/app/known/madam.jpg") 36 | madam_encoding = face_recognition.face_encodings(madam)[0] 37 | 38 | #sir = face_recognition.load_image_file("/home/ise/app/known/sir.jpg") 39 | #sir_encoding = face_recognition.face_encodings(sir)[0] 40 | 41 | #shriya_image = face_recognition.load_image_file("/home/samanvitha/1 SIH/data/train/Shriya/39.jpg") 42 | #shriya_face_encoding = face_recognition.face_encodings(shriya_image)[0] 43 | 44 | 45 | 46 | 47 | 48 | known_face_encodings = [ vinni_face_encoding,shan_face_encoding,deep_face_encoding,dikshit_face_encoding,madam_encoding] 49 | known_face_names = [ "vinni", "shan" , "deeps", "dikshit","ma'am"] 50 | 51 | face_locations = [] 52 | face_encodings = [] 53 | face_names = [] 54 | process_this_frame = True 55 | acclist=[] 56 | 57 | nameList = [] 58 | f = open("demofile.txt", "a+",os.O_NONBLOCK) 59 | 60 | while True: 61 | ret, frame = video_capture.read(0) 62 | small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) 63 | rgb_small_frame = small_frame[:, :, ::-1] 64 | if process_this_frame: 65 | face_locations = face_recognition.face_locations(rgb_small_frame) 66 | face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) 67 | face_names = [] 68 | 69 | for face_encoding in face_encodings: 70 | matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5) 71 | name = "Unknown" 72 | 73 | if True in matches: 74 | first_match_index = matches.index(True) 75 | name = known_face_names[first_match_index] 76 | acc=ssim(known_face_encodings[first_match_index], face_encoding) 77 | acc=round(acc,4) 78 | 79 | face_names.append(name) 80 | 81 | process_this_frame = not process_this_frame 82 | for (top, right, bottom, left), name in zip(face_locations, face_names): 83 | top *= 4 84 | right *= 4 85 | bottom *= 4 86 | left *= 4 87 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2) 88 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED) 89 | font = cv2.FONT_HERSHEY_DUPLEX 90 | 91 | if name not in nameList: 92 | #cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) 93 | #cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 94 | #font = cv2.FONT_HERSHEY_DUPLEX 95 | cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 96 | l = [top, right, bottom, left] 97 | nameList.append(name) 98 | f.write(name) 99 | f.write(" ") 100 | stop = str(top) 101 | sright = str(right) 102 | sbottom = str(bottom) 103 | sleft = str(left) 104 | f.write(sleft) 105 | f.write(" ") 106 | f.write(stop) 107 | f.write(" ") 108 | f.write(sright) 109 | f.write(" ") 110 | f.write(sbottom) 111 | f.write("\n") 112 | f.flush() 113 | 114 | 115 | if(name=="Unknown"): 116 | acclist=[1] 117 | cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) 118 | cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) 119 | cv2.putText(frame, name , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 120 | else: 121 | 122 | cv2.putText(frame, name+" "+str(acc) , (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) 123 | acclist.append(acc) 124 | 125 | 126 | cv2.imshow('Video', frame) 127 | if cv2.waitKey(1) & 0xFF == ord('q'): 128 | print(name) 129 | print(sum(acclist)/len(acclist)) 130 | cv2.destroyAllWindows() 131 | break 132 | print("sam ") 133 | stringg="/home/ise/app/track.py" 134 | os.system("python "+stringg) 135 | print("sam le ni ") 136 | -------------------------------------------------------------------------------- /src2/main.py: -------------------------------------------------------------------------------- 1 | #main.py 2 | from flask import Flask 3 | from flask import url_for, jsonify, render_template 4 | import os 5 | app = Flask(__name__) 6 | 7 | @app.route('/') 8 | def index(): 9 | return render_template('index.html') 10 | 11 | 12 | @app.route('/foo', methods=['POST']) 13 | def foo(): 14 | # grab reddit data and write to csv 15 | os.system("python accucary.py") 16 | return jsonify({"message": "you're a superstar"}) 17 | 18 | @app.route('/capture', methods=['POST']) 19 | def capture(): 20 | os.system("python detect_face_live.py") 21 | return jsonify({"message": "detection phase completed"}) 22 | 23 | 24 | 25 | if __name__ == "__main__": 26 | app.run(port=8080, debug=True) 27 | -------------------------------------------------------------------------------- /src2/track.py: -------------------------------------------------------------------------------- 1 | import face_recognition 2 | import cv2 3 | import pickle 4 | import dlib 5 | import get_points 6 | import os 7 | 8 | #video_capture = cv2.VideoCapture("rtsp://admin:admin@10.2.3.163") 9 | video_capture = cv2.VideoCapture(0) 10 | a = 0 11 | ne = 0 12 | l=[] 13 | nam = [] 14 | nae =[] 15 | filepath = 'demofile.txt' 16 | 17 | def deleteContent(pfile): 18 | 19 | pfile.seek(0) 20 | pfile.truncate() 21 | pfile.seek(0) # I believe this seek is redundant 22 | 23 | return pfile 24 | 25 | with open(filepath,"r",os.O_NONBLOCK) as fp: 26 | line = fp.readline() 27 | cnt = 1 28 | while line: 29 | print("Line {}: {}".format(cnt, line.strip())) 30 | p = list(line.split()) 31 | print(p) 32 | cnt += 1 33 | nae.append(p[0]) 34 | pt = (int(p[1]),int(p[2]),int(p[3]),int(p[4])) 35 | l.append(pt) 36 | line = fp.readline() 37 | 38 | print(l) 39 | print(nae) 40 | f=deleteContent(f) 41 | 42 | def track(img,l,nae): 43 | tracker = [dlib.correlation_tracker() for _ in range(len(l))] 44 | # Provide the tracker the initial position of the object 45 | [tracker[i].start_track(img, dlib.rectangle(*rect)) for i, rect in enumerate(l)] 46 | 47 | while True: 48 | # Read frame from device or file 49 | retval, img = video_capture.read() 50 | if not retval: 51 | print("Cannot capture frame device | CODE TERMINATION :( ") 52 | exit() 53 | # Update the tracker 54 | for i in range(len(tracker)): 55 | tracker[i].update(img) 56 | # Get the position of th object, draw a 57 | # bounding box around it and display it. 58 | rect = tracker[i].get_position() 59 | pt1 = (int(rect.left()), int(rect.top())) 60 | pt2 = (int(rect.right()), int(rect.bottom())) 61 | cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3) 62 | print("Object {} tracked at [{}, {}] \r".format(i, pt1, pt2)) 63 | 64 | cv2.namedWindow("Image", cv2.WINDOW_NORMAL) 65 | cv2.imshow("Image", img) 66 | # Continue until the user presses ESC key 67 | if cv2.waitKey(1) == 27: 68 | break 69 | 70 | # Relase the VideoCapture object 71 | video_capture.release() 72 | ret = True 73 | while ret: 74 | ret, frame = video_capture.read() 75 | rgb_frame = frame[:, :, ::-1] 76 | 77 | #a = len(predictions) 78 | ret = track(frame,l,nae) --------------------------------------------------------------------------------