├── README.md
├── __init__.py
├── config.py
├── driver
├── __init__.py
├── __init__.pyc
├── cam.py
├── cam.pyc
├── face_search.py
├── face_search.pyc
└── haarcascade_frontalface_alt.xml
├── font
└── fzyy.TTF
├── images
├── black.jpg
├── mark.jpg
├── mask.jpg
└── yellow.png
├── models
├── cnn_deploy.prototxt
└── cnn_iter_3560000.caffemodel
├── tmp
└── buffer
└── ui
├── __init__.py
├── frame.py
├── frame.pyc
├── frame.ui
├── launcher.py
├── launcher_sing_face.py
├── save_face.py
├── save_face.pyc
├── save_face.ui
├── sign_face.py
├── sign_face.pyc
└── sign_face.ui
/README.md:
--------------------------------------------------------------------------------
1 | ## 基于CNN的人脸识别考勤demo
2 |
3 | ### 是什么
4 | - 帮朋友写的一个基于CNN的人脸识别考勤项目
5 | - 可以进行 人脸+姓名录入
6 | - 可以识别已经录入的人脸,返回人脸对应的姓名和相似度
7 |
8 | ### 环境
9 | - python2.7
10 | - linux
11 | - caffe+gpu
12 | - python opencv
13 | - pyqt5
14 | - python pil
15 |
16 | ### 如何运行
17 | - 运行 launcher_sing_face.py, 摄像头录入人脸,标记人脸姓名
18 | - 运行 launcher.py,摄像头识别人脸,并找到与已知人脸相似度最高的人脸的姓名
19 |
20 |
21 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/__init__.py
--------------------------------------------------------------------------------
/config.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 |
3 | import os
4 |
5 |
6 | BASE_PATH = os.path.dirname(os.path.abspath(__file__))
7 |
8 | MODEL_PATH = os.path.join(BASE_PATH, "models")
9 |
10 | DATA_PATH = os.path.join(BASE_PATH, "data")
11 |
12 | IMG_PATH = os.path.join(BASE_PATH, "images")
13 |
14 | FACE_POINT = 8
15 |
16 | BUFFER_DIR = os.path.join(BASE_PATH, "tmp")
17 |
18 | FONT_PATH = os.path.join(BASE_PATH, "font")
19 |
20 | TH = 0.7
--------------------------------------------------------------------------------
/driver/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/driver/__init__.py
--------------------------------------------------------------------------------
/driver/__init__.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/driver/__init__.pyc
--------------------------------------------------------------------------------
/driver/cam.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | import cv2
4 | import config
5 | import os
6 | import numpy
7 | from PIL import Image, ImageFont, ImageDraw
8 | ttfont = ImageFont.truetype(os.path.join(config.FONT_PATH, "fzyy.TTF"), 26)
9 |
10 |
11 | class Cam(object):
12 |
13 | CONFIG_PATH = "haarcascade_frontalface_alt.xml"
14 |
15 | def __init__(self):
16 | self.cap = cv2.VideoCapture(0)
17 | self.fps = self.cap.get(cv2.CAP_PROP_FPS)
18 | self.classifier = cv2.CascadeClassifier(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.CONFIG_PATH))
19 | self.mark = Image.open(os.path.join(config.IMG_PATH, "yellow.png"))
20 |
21 | def get_photo(self):
22 | if self.cap.isOpened():
23 | ret, img = self.cap.read()
24 | if ret:
25 | h, w = img.shape[:2]
26 | min_size = (int(h/config.FACE_POINT), int(w/config.FACE_POINT))
27 | face_rects = self.classifier.detectMultiScale(img, 1.2, 2, cv2.CASCADE_SCALE_IMAGE, min_size)
28 | if len(face_rects) > 0:
29 | return img, face_rects[0]
30 | return img, None
31 | return None, None
32 |
33 | def draw_text(self, img, face_rects, text=u"welcome admin"):
34 | x, y, h, w = face_rects
35 | # cv2.rectangle(img, (int(x), int(y)), (int(x) + int(w), int(y) + int(h)), (0, 255, 0), 2, 0)
36 | image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
37 | draw = ImageDraw.Draw(image)
38 | out = self.mark.resize((w + 50, h + 20))
39 | image.paste(out, (x - 20, y - 15), mask=out)
40 | draw.text((int(x + 20), int(y - 20)), text, fill="red", font=ttfont)
41 |
42 | img = cv2.cvtColor(numpy.asarray(image), cv2.COLOR_RGB2BGR)
43 |
44 | return img
45 |
46 | def show_image(self, image):
47 | pass
48 |
49 |
50 | cams = Cam()
51 |
52 |
53 | if __name__ == "__main__":
54 | while True:
55 |
56 | photo, face = cams.get_photo()
57 | if photo is None:
58 | break
59 | cv2.imshow("img", photo)
60 | k = cv2.waitKey(1)
61 | if(k== ord('q')):
62 | break
--------------------------------------------------------------------------------
/driver/cam.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/driver/cam.pyc
--------------------------------------------------------------------------------
/driver/face_search.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 | import os
3 | import config
4 | import caffe
5 | import cv2
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | import scipy
9 | import re
10 |
11 | cnn_model = os.path.join(config.MODEL_PATH, "cnn_iter_3560000.caffemodel")
12 | pro_txt = os.path.join(config.MODEL_PATH, "cnn_deploy.prototxt")
13 |
14 |
15 | def load_cnn_net():
16 | caffe.set_mode_cpu()
17 | net = caffe.Net(pro_txt, cnn_model, caffe.TEST)
18 | return net
19 |
20 |
21 | def load_image_paths(p):
22 | res = []
23 | if os.path.isdir(p):
24 | files = os.listdir(p)
25 | for file in files:
26 | f = os.path.join(config.DATA_PATH, file)
27 | yield f
28 | res.append(f)
29 |
30 |
31 | def load_feats(net, images):
32 | feats = np.zeros((100, 256), dtype=np.float32)
33 | inp = []
34 | for i, image in enumerate(images):
35 | input = cv2.imread(image, 0)
36 | input = cv2.resize(input, (128, 128), interpolation=cv2.INTER_CUBIC)
37 | inp.append(input)
38 | img_blobinp = input[np.newaxis, np.newaxis, :, :] / 255.0
39 | net.blobs['data'].reshape(*img_blobinp.shape)
40 | net.blobs['data'].data[...] = img_blobinp
41 | net.blobs['data'].data.shape
42 | net.forward()
43 | feature = net.blobs['eltwise_fc1'].data
44 | feats[i, :] = feature
45 | return feats, inp
46 |
47 |
48 | def load_feat(image, net):
49 |
50 | input = cv2.resize(image, (128, 128), interpolation=cv2.INTER_CUBIC)
51 | img_blobinp = input[np.newaxis, np.newaxis, :, :] / 255.0
52 | net.blobs['data'].reshape(*img_blobinp.shape)
53 | net.blobs['data'].data[...] = img_blobinp
54 | net.blobs['data'].data.shape
55 | out = net.forward()
56 | feature = net.blobs['eltwise_fc1'].data
57 | return feature
58 |
59 |
60 | def search_one(feat, feats, images):
61 |
62 | max_s = 0
63 | max_item = None
64 | for index, item in enumerate(images):
65 | s = scipy.spatial.distance.cosine(feats[index, :], feat)
66 | similar = 1 - s
67 | print similar, s
68 | if similar > max_s:
69 | max_s = similar
70 | max_item = index
71 |
72 | return max_s, max_item, re.split("[\./]", images[max_item])[-2]
73 |
74 | net = load_cnn_net()
75 | images = list(load_image_paths(config.DATA_PATH))
76 | feats, inps =load_feats(net, images)
77 |
78 |
--------------------------------------------------------------------------------
/driver/face_search.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/driver/face_search.pyc
--------------------------------------------------------------------------------
/font/fzyy.TTF:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/font/fzyy.TTF
--------------------------------------------------------------------------------
/images/black.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/images/black.jpg
--------------------------------------------------------------------------------
/images/mark.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/images/mark.jpg
--------------------------------------------------------------------------------
/images/mask.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/images/mask.jpg
--------------------------------------------------------------------------------
/images/yellow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/images/yellow.png
--------------------------------------------------------------------------------
/models/cnn_deploy.prototxt:
--------------------------------------------------------------------------------
1 | name: "LCNN_net"
2 | input: "data"
3 | input_dim: 1
4 | input_dim: 1
5 | input_dim: 128
6 | input_dim: 128
7 | layer{
8 | name: "conv1"
9 | type: "Convolution"
10 | bottom: "data"
11 | top: "conv1"
12 | param{
13 | lr_mult:1
14 | decay_mult:1
15 | }
16 | param{
17 | lr_mult:2
18 | decay_mult:0
19 | }
20 | convolution_param {
21 | num_output: 96
22 | kernel_size: 5
23 | stride: 1
24 | pad: 2
25 | weight_filler {
26 | type: "xavier"
27 | }
28 | bias_filler {
29 | type: "constant"
30 | value: 0.1
31 | }
32 | }
33 | }
34 | layer{
35 | name: "slice1"
36 | type:"Slice"
37 | slice_param {
38 | slice_dim: 1
39 | }
40 | bottom: "conv1"
41 | top: "slice1_1"
42 | top: "slice1_2"
43 | }
44 | layer{
45 | name: "etlwise1"
46 | type: "Eltwise"
47 | bottom: "slice1_1"
48 | bottom: "slice1_2"
49 | top: "eltwise1"
50 | eltwise_param {
51 | operation: MAX
52 | }
53 | }
54 | layer{
55 | name: "pool1"
56 | type: "Pooling"
57 | pooling_param {
58 | pool: MAX
59 | kernel_size: 2
60 | stride: 2
61 | }
62 | bottom: "eltwise1"
63 | top: "pool1"
64 | }
65 |
66 | layer{
67 | name: "conv2a"
68 | type: "Convolution"
69 | bottom: "pool1"
70 | top: "conv2a"
71 | param{
72 | lr_mult:1
73 | decay_mult:1
74 | }
75 | param{
76 | lr_mult:2
77 | decay_mult:0
78 | }
79 | convolution_param {
80 | num_output: 96
81 | kernel_size: 1
82 | stride: 1
83 | weight_filler {
84 | type: "xavier"
85 | }
86 | bias_filler {
87 | type: "constant"
88 | value: 0.1
89 | }
90 | }
91 | }
92 | layer{
93 | name: "slice2a"
94 | type:"Slice"
95 | slice_param {
96 | slice_dim: 1
97 | }
98 | bottom: "conv2a"
99 | top: "slice2a_1"
100 | top: "slice2a_2"
101 | }
102 | layer{
103 | name: "etlwise2a"
104 | type: "Eltwise"
105 | bottom: "slice2a_1"
106 | bottom: "slice2a_2"
107 | top: "eltwise2a"
108 | eltwise_param {
109 | operation: MAX
110 | }
111 | }
112 |
113 | layer{
114 | name: "conv2"
115 | type: "Convolution"
116 | bottom: "eltwise2a"
117 | top: "conv2"
118 | param{
119 | lr_mult:1
120 | decay_mult:1
121 | }
122 | param{
123 | lr_mult:2
124 | decay_mult:0
125 | }
126 | convolution_param {
127 | num_output: 192
128 | kernel_size: 3
129 | stride: 1
130 | pad: 1
131 | weight_filler {
132 | type: "xavier"
133 | }
134 | bias_filler {
135 | type: "constant"
136 | value: 0.1
137 | }
138 | }
139 |
140 | }
141 | layer{
142 | name: "slice2"
143 | type:"Slice"
144 | slice_param {
145 | slice_dim: 1
146 | }
147 | bottom: "conv2"
148 | top: "slice2_1"
149 | top: "slice2_2"
150 | }
151 | layer{
152 | name: "etlwise2"
153 | type: "Eltwise"
154 | bottom: "slice2_1"
155 | bottom: "slice2_2"
156 | top: "eltwise2"
157 | eltwise_param {
158 | operation: MAX
159 | }
160 | }
161 | layer{
162 | name: "pool2"
163 | type: "Pooling"
164 | pooling_param {
165 | pool: MAX
166 | kernel_size: 2
167 | stride: 2
168 | }
169 | bottom: "eltwise2"
170 | top: "pool2"
171 | }
172 |
173 | layer{
174 | name: "conv3a"
175 | type: "Convolution"
176 | bottom: "pool2"
177 | top: "conv3a"
178 | param{
179 | lr_mult:1
180 | decay_mult:1
181 | }
182 | param{
183 | lr_mult:2
184 | decay_mult:0
185 | }
186 | convolution_param {
187 | num_output: 192
188 | kernel_size: 1
189 | stride: 1
190 | weight_filler {
191 | type: "xavier"
192 | }
193 | bias_filler {
194 | type: "constant"
195 | value: 0.1
196 | }
197 | }
198 | }
199 | layer{
200 | name: "slice3a"
201 | type:"Slice"
202 | slice_param {
203 | slice_dim: 1
204 | }
205 | bottom: "conv3a"
206 | top: "slice3a_1"
207 | top: "slice3a_2"
208 | }
209 | layer{
210 | name: "etlwise3a"
211 | type: "Eltwise"
212 | bottom: "slice3a_1"
213 | bottom: "slice3a_2"
214 | top: "eltwise3a"
215 | eltwise_param {
216 | operation: MAX
217 | }
218 | }
219 |
220 | layer{
221 | name: "conv3"
222 | type: "Convolution"
223 | bottom: "eltwise3a"
224 | top: "conv3"
225 | param{
226 | lr_mult:1
227 | decay_mult:1
228 | }
229 | param{
230 | lr_mult:2
231 | decay_mult:0
232 | }
233 | convolution_param {
234 | num_output: 384
235 | kernel_size: 3
236 | stride: 1
237 | pad: 1
238 | weight_filler {
239 | type: "xavier"
240 | }
241 | bias_filler {
242 | type: "constant"
243 | value: 0.1
244 | }
245 | }
246 | }
247 | layer{
248 | name: "slice3"
249 | type:"Slice"
250 | slice_param {
251 | slice_dim: 1
252 | }
253 | bottom: "conv3"
254 | top: "slice3_1"
255 | top: "slice3_2"
256 | }
257 | layer{
258 | name: "etlwise3"
259 | type: "Eltwise"
260 | bottom: "slice3_1"
261 | bottom: "slice3_2"
262 | top: "eltwise3"
263 | eltwise_param {
264 | operation: MAX
265 | }
266 | }
267 | layer{
268 | name: "pool3"
269 | type: "Pooling"
270 | pooling_param {
271 | pool: MAX
272 | kernel_size: 2
273 | stride: 2
274 | }
275 | bottom: "eltwise3"
276 | top: "pool3"
277 | }
278 |
279 | layer{
280 | name: "conv4a"
281 | type: "Convolution"
282 | bottom: "pool3"
283 | top: "conv4a"
284 | param{
285 | lr_mult:1
286 | decay_mult:1
287 | }
288 | param{
289 | lr_mult:2
290 | decay_mult:0
291 | }
292 | convolution_param{
293 | num_output: 384
294 | kernel_size: 1
295 | stride: 1
296 | weight_filler{
297 | type:"xavier"
298 | }
299 | bias_filler{
300 | type: "constant"
301 | value: 0.1
302 | }
303 | }
304 | }
305 | layer{
306 | name: "slice4a"
307 | type:"Slice"
308 | slice_param {
309 | slice_dim: 1
310 | }
311 | bottom: "conv4a"
312 | top: "slice4a_1"
313 | top: "slice4a_2"
314 | }
315 | layer{
316 | name: "etlwise4a"
317 | type: "Eltwise"
318 | bottom: "slice4a_1"
319 | bottom: "slice4a_2"
320 | top: "eltwise4a"
321 | eltwise_param {
322 | operation: MAX
323 | }
324 | }
325 | layer{
326 | name: "conv4"
327 | type: "Convolution"
328 | bottom: "eltwise4a"
329 | top: "conv4"
330 | param{
331 | lr_mult:1
332 | decay_mult:1
333 | }
334 | param{
335 | lr_mult:2
336 | decay_mult:0
337 | }
338 | convolution_param{
339 | num_output: 256
340 | kernel_size: 3
341 | stride: 1
342 | pad: 1
343 | weight_filler{
344 | type:"xavier"
345 | }
346 | bias_filler{
347 | type: "constant"
348 | value: 0.1
349 | }
350 | }
351 | }
352 | layer{
353 | name: "slice4"
354 | type:"Slice"
355 | slice_param {
356 | slice_dim: 1
357 | }
358 | bottom: "conv4"
359 | top: "slice4_1"
360 | top: "slice4_2"
361 | }
362 | layer{
363 | name: "etlwise4"
364 | type: "Eltwise"
365 | bottom: "slice4_1"
366 | bottom: "slice4_2"
367 | top: "eltwise4"
368 | eltwise_param {
369 | operation: MAX
370 | }
371 | }
372 |
373 | layer{
374 | name: "conv5a"
375 | type: "Convolution"
376 | bottom: "eltwise4"
377 | top: "conv5a"
378 | param{
379 | lr_mult:1
380 | decay_mult:1
381 | }
382 | param{
383 | lr_mult:2
384 | decay_mult:0
385 | }
386 | convolution_param{
387 | num_output: 256
388 | kernel_size: 1
389 | stride: 1
390 | weight_filler{
391 | type:"xavier"
392 | }
393 | bias_filler{
394 | type: "constant"
395 | value: 0.1
396 | }
397 | }
398 | }
399 | layer{
400 | name: "slice5a"
401 | type:"Slice"
402 | slice_param {
403 | slice_dim: 1
404 | }
405 | bottom: "conv5a"
406 | top: "slice5a_1"
407 | top: "slice5a_2"
408 | }
409 | layer{
410 | name: "etlwise5a"
411 | type: "Eltwise"
412 | bottom: "slice5a_1"
413 | bottom: "slice5a_2"
414 | top: "eltwise5a"
415 | eltwise_param {
416 | operation: MAX
417 | }
418 | }
419 | layer{
420 | name: "conv5"
421 | type: "Convolution"
422 | bottom: "eltwise5a"
423 | top: "conv5"
424 | param{
425 | lr_mult:1
426 | decay_mult:1
427 | }
428 | param{
429 | lr_mult:2
430 | decay_mult:0
431 | }
432 | convolution_param{
433 | num_output: 256
434 | kernel_size: 3
435 | stride: 1
436 | pad: 1
437 | weight_filler{
438 | type:"xavier"
439 | }
440 | bias_filler{
441 | type: "constant"
442 | value: 0.1
443 | }
444 | }
445 | }
446 | layer{
447 | name: "slice5"
448 | type:"Slice"
449 | slice_param {
450 | slice_dim: 1
451 | }
452 | bottom: "conv5"
453 | top: "slice5_1"
454 | top: "slice5_2"
455 | }
456 | layer{
457 | name: "etlwise5"
458 | type: "Eltwise"
459 | bottom: "slice5_1"
460 | bottom: "slice5_2"
461 | top: "eltwise5"
462 | eltwise_param {
463 | operation: MAX
464 | }
465 | }
466 |
467 | layer{
468 | name: "pool4"
469 | type: "Pooling"
470 | pooling_param {
471 | pool: MAX
472 | kernel_size: 2
473 | stride: 2
474 | }
475 | bottom: "eltwise5"
476 | top: "pool4"
477 | }
478 |
479 | layer{
480 | name: "fc1"
481 | type: "InnerProduct"
482 | bottom: "pool4"
483 | top: "fc1"
484 | param{
485 | lr_mult:1
486 | decay_mult:1
487 | }
488 | param{
489 | lr_mult:2
490 | decay_mult:0
491 | }
492 | inner_product_param {
493 | num_output: 512
494 | weight_filler {
495 | type: "xavier"
496 | }
497 | bias_filler {
498 | type: "constant"
499 | value: 0.1
500 | }
501 | }
502 | }
503 | layer{
504 | name: "slice_fc1"
505 | type:"Slice"
506 | slice_param {
507 | slice_dim: 1
508 | }
509 | bottom: "fc1"
510 | top: "slice_fc1_1"
511 | top: "slice_fc1_2"
512 | }
513 | layer{
514 | name: "eltwise_fc1"
515 | type: "Eltwise"
516 | bottom: "slice_fc1_1"
517 | bottom: "slice_fc1_2"
518 | top: "eltwise_fc1"
519 | eltwise_param {
520 | operation: MAX
521 | }
522 | }
523 | # don't need dropout when testing.
524 |
--------------------------------------------------------------------------------
/models/cnn_iter_3560000.caffemodel:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/models/cnn_iter_3560000.caffemodel
--------------------------------------------------------------------------------
/tmp/buffer:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/tmp/buffer
--------------------------------------------------------------------------------
/ui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/ui/__init__.py
--------------------------------------------------------------------------------
/ui/frame.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'frame.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.10.1
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 | import os
11 | from config import IMG_PATH
12 |
13 | class Ui_MainWindow(object):
14 | def setupUi(self, MainWindow):
15 | MainWindow.setObjectName("MainWindow")
16 |
17 | MainWindow.resize(835, 517)
18 | self.centralwidget = QtWidgets.QWidget(MainWindow)
19 | self.centralwidget.setObjectName("centralwidget")
20 | self.label = QtWidgets.QLabel(self.centralwidget)
21 | self.label.setEnabled(False)
22 | self.label.setGeometry(QtCore.QRect(50, 400, 721, 51))
23 |
24 |
25 | font = QtGui.QFont()
26 | font.setFamily("TlwgTypewriter")
27 | font.setPointSize(24)
28 | font.setBold(False)
29 | font.setItalic(False)
30 | font.setWeight(50)
31 | font.setStrikeOut(False)
32 | font.setKerning(True)
33 | self.label.setFont(font)
34 | self.label.setAccessibleName("")
35 | self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
36 | self.label.setAlignment(QtCore.Qt.AlignCenter)
37 | self.label.setObjectName("label")
38 | self.calendarWidget = QtWidgets.QCalendarWidget(self.centralwidget)
39 | self.calendarWidget.setEnabled(False)
40 | self.calendarWidget.setGeometry(QtCore.QRect(350, 130, 451, 201))
41 | self.calendarWidget.setNavigationBarVisible(False)
42 | self.calendarWidget.setDateEditEnabled(False)
43 | self.calendarWidget.setObjectName("calendarWidget")
44 | self.timeEdit = QtWidgets.QTimeEdit(self.centralwidget)
45 | self.timeEdit.setEnabled(False)
46 | self.timeEdit.setGeometry(QtCore.QRect(530, 60, 118, 27))
47 | self.timeEdit.setObjectName("timeEdit")
48 | self.label_2 = QtWidgets.QLabel(self.centralwidget)
49 | self.label_2.setGeometry(QtCore.QRect(40, 60, 291, 311))
50 | self.label_2.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
51 | self.label_2.setText("")
52 | self.label_2.setObjectName("label_2")
53 | self.label_2.setScaledContents(True)
54 | self.label_2.setPixmap(QtGui.QPixmap(os.path.join(IMG_PATH, "black.jpg")))
55 | MainWindow.setCentralWidget(self.centralwidget)
56 |
57 |
58 | self.retranslateUi(MainWindow)
59 | QtCore.QMetaObject.connectSlotsByName(MainWindow)
60 |
61 | MainWindow.show()
62 |
63 | self.image = None
64 | def retranslateUi(self, MainWindow):
65 | _translate = QtCore.QCoreApplication.translate
66 | MainWindow.setWindowTitle(_translate("MainWindow", "人脸识别智能考勤"))
67 | self.label.setText(_translate("MainWindow", "欢迎使用"))
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/ui/frame.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/ui/frame.pyc
--------------------------------------------------------------------------------
/ui/frame.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | MainWindow
4 |
5 |
6 | false
7 |
8 |
9 |
10 | 0
11 | 0
12 | 835
13 | 517
14 |
15 |
16 |
17 | 人脸识别智能考勤
18 |
19 |
20 |
21 |
22 | false
23 |
24 |
25 |
26 | 50
27 | 400
28 | 721
29 | 51
30 |
31 |
32 |
33 |
34 | TlwgTypewriter
35 | 24
36 | 50
37 | false
38 | false
39 | false
40 | true
41 |
42 |
43 |
44 |
45 |
46 |
47 | Qt::LeftToRight
48 |
49 |
50 | 欢迎使用
51 |
52 |
53 | Qt::AlignCenter
54 |
55 |
56 |
57 |
58 | false
59 |
60 |
61 |
62 | 350
63 | 130
64 | 451
65 | 201
66 |
67 |
68 |
69 | false
70 |
71 |
72 | false
73 |
74 |
75 |
76 |
77 | false
78 |
79 |
80 |
81 | 530
82 | 60
83 | 118
84 | 27
85 |
86 |
87 |
88 |
89 |
90 |
91 | 40
92 | 60
93 | 291
94 | 311
95 |
96 |
97 |
98 | false
99 |
100 |
101 | Qt::DefaultContextMenu
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 | toolBar
111 |
112 |
113 | TopToolBarArea
114 |
115 |
116 | false
117 |
118 |
119 |
120 |
121 | toolBar_2
122 |
123 |
124 | TopToolBarArea
125 |
126 |
127 | false
128 |
129 |
130 |
131 |
132 | 登陆
133 |
134 |
135 |
136 |
137 | 设置
138 |
139 |
140 |
141 |
142 | 考勤
143 |
144 |
145 |
146 |
147 |
148 |
149 |
--------------------------------------------------------------------------------
/ui/launcher.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from PyQt5.QtCore import *
4 | from PyQt5.QtWidgets import *
5 | from PyQt5.QtGui import *
6 | from frame import Ui_MainWindow
7 | import time
8 | import sys
9 | from driver.cam import cams
10 | import cv2
11 | from driver.face_search import search_one, load_feat, feats, images, net
12 | import traceback
13 | import config
14 |
15 |
16 | class MainWindow(QMainWindow, Ui_MainWindow):
17 | def __init__(self, parent=None):
18 | super(MainWindow, self).__init__(parent=parent)
19 | self.setupUi(self)
20 | # 定时器
21 | self.timer = QTimer()
22 |
23 | self.timer.setInterval(100)
24 | self.timer.timeout.connect(self.flush_image)
25 | self.timer.start()
26 |
27 | # 时间刷新
28 | self.timer_time = QTimer()
29 |
30 | self.timer_time.setInterval(60000) #60000
31 | self.timer_time.timeout.connect(self.flush_time)
32 | self.timer_time.start()
33 | self.flush_time()
34 | self.cnt_timer = 5 # 延时刷新
35 |
36 | def flush_image(self):
37 | print "timer running!"
38 | photo, face = cams.get_photo()
39 | if photo is not None:
40 | if face is not None:
41 | x, y, h, w = face
42 |
43 | gray = cv2.cvtColor(photo, cv2.COLOR_BGR2GRAY)
44 | roi = gray[y:y + h, x:x + w]
45 | faces = cv2.resize(roi, (128, 128), interpolation=cv2.INTER_CUBIC)
46 | try:
47 | max_s, index, name = search_one(load_feat(faces, net), feats, images)
48 | print "max_s:{max_s}|index:{index}|name:{name}".format(**vars())
49 | except:
50 | print traceback.format_exc()
51 | else:
52 | uname = "no name!"
53 | if max_s > config.TH:
54 | uname = name
55 |
56 | self.cnt_timer = 5
57 | self.label.setText("欢迎您, {uname}".format(uname=uname))
58 | photo = cams.draw_text(photo, face_rects=face, text=u"welcome {} p:{:.2f}".format(unicode(uname, "utf-8"), max_s))
59 | else:
60 |
61 | self.cnt_timer -= 1
62 |
63 | if self.cnt_timer <= 0:
64 | self.label.setText("欢迎使用")
65 | photo = cams.draw_text(photo, face_rects=face, text=u"Sorry {name}".format(name=uname))
66 | else:
67 | self.cnt_timer -= 1
68 |
69 | if self.cnt_timer == 0:
70 | self.label.setText("欢迎使用")
71 |
72 | height, width, bytesPerComponent = photo.shape
73 | bytesPerLine = bytesPerComponent * width
74 | cv2.cvtColor(photo, cv2.COLOR_BGR2RGB, photo)
75 | image = QImage(photo.data, width, height, bytesPerLine, QImage.Format_RGB888)
76 | self.image = image
77 | self.label_2.setPixmap(QPixmap.fromImage(image).scaled(self.label_2.width(), self.label_2.height()))
78 | cv2.waitKey(1)
79 |
80 | else:
81 | print "err"
82 |
83 | def flush_time(self):
84 | now_day = time.strftime("%Y-%m-%d", time.localtime())
85 | self.calendarWidget.setSelectedDate(QDate.fromString(now_day, 'yyyy-MM-dd'))
86 | self.timeEdit.setTime(QTime.currentTime())
87 |
88 |
89 | if __name__ == "__main__":
90 | app = QApplication(sys.argv)
91 | w = MainWindow()
92 | w.show()
93 | sys.exit(app.exec_())
94 |
95 |
--------------------------------------------------------------------------------
/ui/launcher_sing_face.py:
--------------------------------------------------------------------------------
1 | # coding:utf-8
2 |
3 | from PyQt5.QtCore import *
4 | from PyQt5.QtWidgets import *
5 | from PyQt5.QtGui import *
6 | from sign_face import Ui_SignFaceWindow
7 | from save_face import Ui_Dialog
8 | import time
9 | import sys
10 | from driver.cam import cams
11 | import cv2
12 | import config
13 | import os
14 |
15 |
16 | class MainWindow(QMainWindow, Ui_SignFaceWindow):
17 | def __init__(self, parent=None):
18 | super(MainWindow, self).__init__(parent=parent)
19 | self.setupUi(self)
20 | # 定时器
21 | self.timer = QTimer()
22 |
23 | self.timer.setInterval(100)
24 | self.timer.timeout.connect(self.flush_image)
25 | self.timer.start()
26 | self.face = None
27 |
28 | def flush_image(self):
29 | photo, face = cams.get_photo()
30 | if photo is not None:
31 | if face is not None:
32 | x, y, h, w = face
33 | roi = photo[y:y + h, x:x + w]
34 | img = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
35 | self.face = img
36 |
37 | photo = cams.draw_text(photo, face_rects=face, text=u"scaning")
38 | height, width, bytesPerComponent = photo.shape
39 | bytesPerLine = bytesPerComponent * width
40 | cv2.cvtColor(photo, cv2.COLOR_BGR2RGB, photo)
41 | image = QImage(photo.data, width, height, bytesPerLine, QImage.Format_RGB888)
42 |
43 | self.cam_label.setPixmap(QPixmap.fromImage(image).scaled(self.cam_label.width(), self.cam_label.height()))
44 | cv2.waitKey(1)
45 | else:
46 | print "err"
47 |
48 | def show_save_diglog(self):
49 | if self.face is not None:
50 | save_dialog = SaveDialog(self)
51 | save_dialog.show()
52 |
53 |
54 | class SaveDialog(QDialog, Ui_Dialog):
55 | def __init__(self, parent=None):
56 | super(SaveDialog, self).__init__(parent=parent)
57 | self.setupUi(self)
58 |
59 | self.cancel_btn.clicked.connect(self.close)
60 | self.save_btn.clicked.connect(self.save)
61 |
62 | photo = parent.face
63 | self.photo = photo
64 | height, width, bytesPerComponent = photo.shape
65 | bytesPerLine = bytesPerComponent * width
66 |
67 | with open(os.path.join(config.BUFFER_DIR, "buffer"), "wb") as f:
68 | photo.tofile(f)
69 |
70 | with open(os.path.join(config.BUFFER_DIR, "buffer"), "rb") as f:
71 | image = QImage(f.read(), width, height, bytesPerLine, QImage.Format_RGB888)
72 |
73 | self.face_label.setPixmap(QPixmap.fromImage(image).scaled(self.face_label.width(), self.face_label.height()))
74 | self.image = image
75 |
76 | def save(self):
77 | timer = QTimer(self)
78 | timer.setSingleShot(True)
79 | timer.timeout.connect(self.save_image)
80 | timer.start()
81 |
82 | def save_image(self):
83 | t = self.name_input.text()
84 | if len(t) >= 2:
85 | self.image.save(os.path.join(config.DATA_PATH, t + ".jpg"), "jpg", 100)
86 | button = QMessageBox.warning(
87 | self,
88 | u"提示",
89 | u"录入成功",
90 | QMessageBox.Close
91 | )
92 | self.close()
93 |
94 | else:
95 | QMessageBox.warning(
96 | self,
97 | u"错误",
98 | u"姓名长度必须大于1!",
99 | QMessageBox.Yes
100 | )
101 |
102 |
103 | if __name__ == "__main__":
104 | app = QApplication(sys.argv)
105 | w = MainWindow()
106 | w.show()
107 | sys.exit(app.exec_())
--------------------------------------------------------------------------------
/ui/save_face.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'save_face.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.10.1
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 |
11 | class Ui_Dialog(object):
12 | def setupUi(self, Dialog):
13 | Dialog.setObjectName("Dialog")
14 | Dialog.resize(368, 546)
15 | self.face_label = QtWidgets.QLabel(Dialog)
16 | self.face_label.setGeometry(QtCore.QRect(70, 70, 231, 241))
17 | self.face_label.setText("")
18 | self.face_label.setObjectName("face_label")
19 | self.save_btn = QtWidgets.QPushButton(Dialog)
20 | self.save_btn.setGeometry(QtCore.QRect(80, 470, 99, 27))
21 | self.save_btn.setObjectName("save_btn")
22 | self.cancel_btn = QtWidgets.QPushButton(Dialog)
23 | self.cancel_btn.setGeometry(QtCore.QRect(200, 470, 99, 27))
24 | self.cancel_btn.setObjectName("cancel_btn")
25 |
26 | self.name_input = QtWidgets.QLineEdit(Dialog)
27 | self.name_input.setGeometry(QtCore.QRect(180, 360, 113, 27))
28 | self.name_input.setObjectName("name_input")
29 | self.label = QtWidgets.QLabel(Dialog)
30 | self.label.setGeometry(QtCore.QRect(70, 360, 71, 31))
31 | self.label.setObjectName("label")
32 |
33 | self.retranslateUi(Dialog)
34 | QtCore.QMetaObject.connectSlotsByName(Dialog)
35 |
36 | def retranslateUi(self, Dialog):
37 | _translate = QtCore.QCoreApplication.translate
38 | Dialog.setWindowTitle(_translate("Dialog", "录入"))
39 | self.save_btn.setText(_translate("Dialog", "录入"))
40 | self.cancel_btn.setText(_translate("Dialog", "取消"))
41 | self.name_input.setPlaceholderText(_translate("Dialog", "请输入姓名"))
42 | self.label.setText(_translate("Dialog", " 姓名"))
43 |
44 |
--------------------------------------------------------------------------------
/ui/save_face.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/ui/save_face.pyc
--------------------------------------------------------------------------------
/ui/save_face.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | Dialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 368
10 | 546
11 |
12 |
13 |
14 | 录入
15 |
16 |
17 |
18 |
19 | 70
20 | 70
21 | 231
22 | 241
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | 80
33 | 470
34 | 99
35 | 27
36 |
37 |
38 |
39 | 录入
40 |
41 |
42 |
43 |
44 |
45 | 200
46 | 470
47 | 99
48 | 27
49 |
50 |
51 |
52 | 取消
53 |
54 |
55 |
56 |
57 |
58 | 180
59 | 360
60 | 113
61 | 27
62 |
63 |
64 |
65 | 请输入姓名
66 |
67 |
68 |
69 |
70 |
71 | 70
72 | 360
73 | 71
74 | 31
75 |
76 |
77 |
78 | 姓名
79 |
80 |
81 |
82 |
83 |
84 |
85 |
--------------------------------------------------------------------------------
/ui/sign_face.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # Form implementation generated from reading ui file 'sign_face.ui'
4 | #
5 | # Created by: PyQt5 UI code generator 5.10.1
6 | #
7 | # WARNING! All changes made in this file will be lost!
8 |
9 | from PyQt5 import QtCore, QtGui, QtWidgets
10 | import time
11 |
12 | class Ui_SignFaceWindow(object):
13 | def setupUi(self, SignFaceWindow):
14 | self.catching = False
15 | SignFaceWindow.setObjectName("SignFaceWindow")
16 | SignFaceWindow.resize(800, 600)
17 | self.centralwidget = QtWidgets.QWidget(SignFaceWindow)
18 | self.centralwidget.setObjectName("centralwidget")
19 | self.cam_label = QtWidgets.QLabel(self.centralwidget)
20 | self.cam_label.setGeometry(QtCore.QRect(250, 110, 231, 271))
21 | self.cam_label.setText("")
22 | self.cam_label.setObjectName("cam_label")
23 | self.catch_btn = QtWidgets.QPushButton(self.centralwidget)
24 | self.catch_btn.setGeometry(QtCore.QRect(320, 470, 99, 27))
25 | self.catch_btn.setObjectName("catch_btn")
26 | self.catch_btn.clicked.connect(self.catch_clicked)
27 |
28 |
29 | # self.sleep_box = QtWidgets.QSpinBox(self.centralwidget)
30 | # self.sleep_box.setGeometry(QtCore.QRect(290, 470, 48, 27))
31 | # self.sleep_box.setProperty("value", 5)
32 | # self.sleep_box.setObjectName("sleep_box")
33 | # self.label_2 = QtWidgets.QLabel(self.centralwidget)
34 | # self.label_2.setGeometry(QtCore.QRect(220, 470, 67, 31))
35 | # self.label_2.setObjectName("label_2")
36 | # self.label_3 = QtWidgets.QLabel(self.centralwidget)
37 | # self.label_3.setGeometry(QtCore.QRect(350, 470, 67, 31))
38 | # self.label_3.setObjectName("label_3")
39 | SignFaceWindow.setCentralWidget(self.centralwidget)
40 | self.statusbar = QtWidgets.QStatusBar(SignFaceWindow)
41 | self.statusbar.setObjectName("statusbar")
42 | SignFaceWindow.setStatusBar(self.statusbar)
43 |
44 | self.retranslateUi(SignFaceWindow)
45 | QtCore.QMetaObject.connectSlotsByName(SignFaceWindow)
46 |
47 | def retranslateUi(self, SignFaceWindow):
48 | _translate = QtCore.QCoreApplication.translate
49 | SignFaceWindow.setWindowTitle(_translate("SignFaceWindow", "人脸数据录入"))
50 | self.catch_btn.setText(_translate("SignFaceWindow", "抓取"))
51 | #self.label_2.setText(_translate("SignFaceWindow", "延时"))
52 | #self.label_3.setText(_translate("SignFaceWindow", "秒"))
53 |
54 | def catch_clicked(self):
55 | #val = self.sleep_box.value()
56 | save_timer = QtCore.QTimer(self)
57 | save_timer.setSingleShot(True)
58 | save_timer.timeout.connect(self.save_dialog)
59 | save_timer.start()
60 |
61 | def save_dialog(self):
62 | self.show_save_diglog()
63 | self.face = None
64 | self.catching = False
65 |
66 |
67 | def show_save_diglog(self):
68 | pass
--------------------------------------------------------------------------------
/ui/sign_face.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/reece15/cnn_face_detection/d4274440050ae31f345eaa298ab2477b77c16cae/ui/sign_face.pyc
--------------------------------------------------------------------------------
/ui/sign_face.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | SignFaceWindow
4 |
5 |
6 |
7 | 0
8 | 0
9 | 800
10 | 600
11 |
12 |
13 |
14 | 人脸数据录入
15 |
16 |
17 |
18 |
19 |
20 | 250
21 | 110
22 | 231
23 | 271
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 | 420
34 | 470
35 | 99
36 | 27
37 |
38 |
39 |
40 | 抓取
41 |
42 |
43 |
44 |
45 |
46 | 290
47 | 470
48 | 48
49 | 27
50 |
51 |
52 |
53 | 5
54 |
55 |
56 |
57 |
58 |
59 | 220
60 | 470
61 | 67
62 | 31
63 |
64 |
65 |
66 | 延时
67 |
68 |
69 |
70 |
71 |
72 | 350
73 | 470
74 | 67
75 | 31
76 |
77 |
78 |
79 | 秒
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
--------------------------------------------------------------------------------