├── .DS_Store
├── .idea
├── .gitignore
├── Perception-System-of-Students-Classroom-Performance.iml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
└── vcs.xml
├── Multi_Face_Detection
├── .gitignore
├── README.md
├── infer_Video.py
├── infer_camera.py
├── infer_models
│ ├── ONet.pth
│ ├── PNet.pth
│ └── RNet.pth
├── infer_path.py
├── models
│ ├── Loss.py
│ ├── ONet.py
│ ├── PNet.py
│ ├── RNet.py
│ └── __init__.py
├── requirements.txt
└── utils
│ ├── __init__.py
│ ├── data.py
│ ├── data_format_converter.py
│ └── utils.py
├── README.assets
└── image-20230724183025706.png
├── README.md
├── __pycache__
└── web.cpython-311.pyc
├── block_1_mutil_face
└── mutil_face.py
├── block_2_FaceDetectionandExpressionRecognition
├── .DS_Store
├── README.md
└── facial-expression-recognition
│ ├── .DS_Store
│ ├── README.md
│ ├── face_test.py
│ ├── haar_cascade_face_detection.xml
│ ├── network-5Labels.h5
│ ├── requirements.txt
│ └── test.py
├── block_3_fatigue
└── .idea
│ ├── .gitignore
│ ├── dbnavigator.xml
│ ├── facevideo.iml
│ ├── inspectionProfiles
│ └── profiles_settings.xml
│ ├── misc.xml
│ └── modules.xml
└── web.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/.DS_Store
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 | # 基于编辑器的 HTTP 客户端请求
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/Perception-System-of-Students-Classroom-Performance.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | *.pyc
3 | models/__pycache__/
4 | utils/__pycache__/
5 | dataset/*
6 | !dataset/wider_face_train.txt
7 | create_dataset.py
8 | !dataset/test.jpg
9 | result.jpg
10 | test.py
--------------------------------------------------------------------------------
/Multi_Face_Detection/README.md:
--------------------------------------------------------------------------------
1 | # 前言
2 |
3 | MTCNN,Multi-task convolutional neural network(多任务卷积神经网络),将人脸区域检测与人脸关键点检测放在了一起,总体可分为P-Net、R-Net、和O-Net三层网络结构。它是2016年中国科学院深圳研究院提出的用于人脸检测任务的多任务神经网络模型,该模型主要采用了三个级联的网络,采用候选框加分类器的思想,进行快速高效的人脸检测。这三个级联的网络分别是快速生成候选窗口的P-Net、进行高精度候选窗口过滤选择的R-Net和生成最终边界框与人脸关键点的O-Net。和很多处理图像问题的卷积神经网络模型,该模型也用到了图像金字塔、边框回归、非最大值抑制等技术。
4 |
5 |
6 | # 环境
7 | - Pytorch 1.8.1
8 | - Python 3.7
9 |
10 | # 文件介绍
11 | - `models/Loss.py` MTCNN所使用的损失函数,包括分类损失函数、人脸框损失函数、关键点损失函数
12 | - `models/PNet.py` PNet网络结构
13 | - `models/RNet.py` RNet网络结构
14 | - `models/ONet.py` ONet网络结构
15 | - `utils/data_format_converter.py` 把大量的图片合并成一个文件
16 | - `utils/data.py` 训练数据读取器
17 | - `utils/utils.py` 各种工具函数
18 | - `infer_path.py` 使用路径预测图像,检测图片上人脸的位置和关键的位置,并显示
19 | - `infer_camera.py` 预测图像程序,检测图片上人脸的位置和关键的位置实时显示
20 | - `infer_Video.py` 使用视频路径,识别视频中人脸box和关键点,并显示识别结果
21 |
22 | # 预测
23 |
24 | - `python3 infer_path.py` 使用图像路径,识别图片中人脸box和关键点,并显示识别结果
25 | 
26 |
27 | - `python3 infer_camera.py` 使用相机捕获图像,识别图片中人脸box和关键点,并显示识别结果
28 |
29 | - `python3 infer_Video.py` 使用视频路径,识别视频中人脸box和关键点,并显示识别结果,创建一个face文件夹来存储当前秒的检测到的人脸图像,将视频拆分成帧并保存到path_to_save_frames文件夹,并生成一个带关键点和人脸框的视频
30 |
31 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_Video.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import cv2
5 | import numpy as np
6 | import torch
7 | import keyboard
8 |
9 | from utils.utils import generate_bbox, py_nms, convert_to_square
10 | from utils.utils import pad, calibrate_box, processed_image
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument('--model_path', type=str, default='infer_models', help='PNet、RNet、ONet三个模型文件存在的文件夹路径')
14 | args = parser.parse_args()
15 |
16 | device = torch.device("cuda")
17 |
18 | # 获取P模型
19 | pnet = torch.jit.load(os.path.join(args.model_path, 'PNet.pth'))
20 | pnet.to(device)
21 | softmax_p = torch.nn.Softmax(dim=0)
22 | pnet.eval()
23 |
24 | # 获取R模型
25 | rnet = torch.jit.load(os.path.join(args.model_path, 'RNet.pth'))
26 | rnet.to(device)
27 | softmax_r = torch.nn.Softmax(dim=-1)
28 | rnet.eval()
29 |
30 | # 获取O模型
31 | onet = torch.jit.load(os.path.join(args.model_path, 'ONet.pth'))
32 | onet.to(device)
33 | softmax_o = torch.nn.Softmax(dim=-1)
34 | onet.eval()
35 |
36 | # 输出人脸图像的文件夹路径
37 | output_face_folder = 'face'
38 | if not os.path.exists(output_face_folder):
39 | os.makedirs(output_face_folder)
40 |
41 | def save_detected_faces(frame, boxes_c, landmarks, frame_count, fps):
42 | seconds = frame_count / fps
43 | # 创建一个新的文件夹来存储当前秒的检测到的人脸图像
44 | frame_face_folder = os.path.join(output_face_folder, f'second_{int(seconds):04d}')
45 | os.makedirs(frame_face_folder, exist_ok=True)
46 |
47 | for i in range(boxes_c.shape[0]):
48 | bbox = boxes_c[i, :4]
49 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
50 | face_img = frame[corpbbox[1]:corpbbox[3], corpbbox[0]:corpbbox[2]]
51 | # 检查是否有检测到的人脸
52 | if face_img.size > 0:
53 | # 保存检测到的人脸图像
54 | face_filename = os.path.join(frame_face_folder, f"face_{i:02d}.png")
55 | cv2.imwrite(face_filename, face_img)
56 |
57 | # 使用PNet模型预测
58 | def predict_pnet(infer_data):
59 | # 添加待预测的图片
60 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
61 | infer_data = torch.unsqueeze(infer_data, dim=0)
62 | # 执行预测
63 | cls_prob, bbox_pred, _ = pnet(infer_data)
64 | cls_prob = torch.squeeze(cls_prob)
65 | cls_prob = softmax_p(cls_prob)
66 | bbox_pred = torch.squeeze(bbox_pred)
67 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
68 |
69 |
70 | # 使用RNet模型预测
71 | def predict_rnet(infer_data):
72 | # 添加待预测的图片
73 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
74 | # 执行预测
75 | cls_prob, bbox_pred, _ = rnet(infer_data)
76 | cls_prob = softmax_r(cls_prob)
77 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
78 |
79 |
80 | # 使用ONet模型预测
81 | def predict_onet(infer_data):
82 | # 添加待预测的图片
83 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
84 | # 执行预测
85 | cls_prob, bbox_pred, landmark_pred = onet(infer_data)
86 | cls_prob = softmax_o(cls_prob)
87 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy(), landmark_pred.detach().cpu().numpy()
88 |
89 |
90 | # 获取PNet网络输出结果
91 | def detect_pnet(im, min_face_size, scale_factor, thresh):
92 | """通过pnet筛选box和landmark
93 | 参数:
94 | im:输入图像[h,2,3]
95 | """
96 | net_size = 12
97 | # 人脸和输入图像的比率
98 | current_scale = float(net_size) / min_face_size
99 | im_resized = processed_image(im, current_scale)
100 | _, current_height, current_width = im_resized.shape
101 | all_boxes = list()
102 | # 图像金字塔
103 | while min(current_height, current_width) > net_size:
104 | # 类别和box
105 | cls_cls_map, reg = predict_pnet(im_resized)
106 | boxes = generate_bbox(cls_cls_map[1, :, :], reg, current_scale, thresh)
107 | current_scale *= scale_factor # 继续缩小图像做金字塔
108 | im_resized = processed_image(im, current_scale)
109 | _, current_height, current_width = im_resized.shape
110 |
111 | if boxes.size == 0:
112 | continue
113 | # 非极大值抑制留下重复低的box
114 | keep = py_nms(boxes[:, :5], 0.5, mode='Union')
115 | boxes = boxes[keep]
116 | all_boxes.append(boxes)
117 | if len(all_boxes) == 0:
118 | return None
119 | all_boxes = np.vstack(all_boxes)
120 | # 将金字塔之后的box也进行非极大值抑制
121 | keep = py_nms(all_boxes[:, 0:5], 0.7, mode='Union')
122 | all_boxes = all_boxes[keep]
123 | # box的长宽
124 | bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
125 | bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
126 | # 对应原图的box坐标和分数
127 | boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
128 | all_boxes[:, 1] + all_boxes[:, 6] * bbh,
129 | all_boxes[:, 2] + all_boxes[:, 7] * bbw,
130 | all_boxes[:, 3] + all_boxes[:, 8] * bbh,
131 | all_boxes[:, 4]])
132 | boxes_c = boxes_c.T
133 |
134 | return boxes_c
135 |
136 |
137 | # 获取RNet网络输出结果
138 | def detect_rnet(im, dets, thresh):
139 | """通过rent选择box
140 | 参数:
141 | im:输入图像
142 | dets:pnet选择的box,是相对原图的绝对坐标
143 | 返回值:
144 | box绝对坐标
145 | """
146 | h, w, c = im.shape
147 | # 将pnet的box变成包含它的正方形,可以避免信息损失
148 | dets = convert_to_square(dets)
149 | dets[:, 0:4] = np.round(dets[:, 0:4])
150 | # 调整超出图像的box
151 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
152 | delete_size = np.ones_like(tmpw) * 20
153 | ones = np.ones_like(tmpw)
154 | zeros = np.zeros_like(tmpw)
155 | num_boxes = np.sum(np.where((np.minimum(tmpw, tmph) >= delete_size), ones, zeros))
156 | cropped_ims = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32)
157 | for i in range(int(num_boxes)):
158 | # 将pnet生成的box相对与原图进行裁剪,超出部分用0补
159 | if tmph[i] < 20 or tmpw[i] < 20:
160 | continue
161 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
162 | try:
163 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
164 | img = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_LINEAR)
165 | img = img.transpose((2, 0, 1))
166 | img = (img - 127.5) / 128
167 | cropped_ims[i, :, :, :] = img
168 | except:
169 | continue
170 | cls_scores, reg = predict_rnet(cropped_ims)
171 | cls_scores = cls_scores[:, 1]
172 | keep_inds = np.where(cls_scores > thresh)[0]
173 | if len(keep_inds) > 0:
174 | boxes = dets[keep_inds]
175 | boxes[:, 4] = cls_scores[keep_inds]
176 | reg = reg[keep_inds]
177 | else:
178 | return None
179 |
180 | keep = py_nms(boxes, 0.6, mode='Union')
181 | boxes = boxes[keep]
182 | # 对pnet截取的图像的坐标进行校准,生成rnet的人脸框对于原图的绝对坐标
183 | boxes_c = calibrate_box(boxes, reg[keep])
184 | return boxes_c
185 |
186 |
187 | # 获取ONet模型预测结果
188 | def detect_onet(im, dets, thresh):
189 | """将onet的选框继续筛选基本和rnet差不多但多返回了landmark"""
190 | h, w, c = im.shape
191 | dets = convert_to_square(dets)
192 | dets[:, 0:4] = np.round(dets[:, 0:4])
193 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
194 | num_boxes = dets.shape[0]
195 | cropped_ims = np.zeros((num_boxes, 3, 48, 48), dtype=np.float32)
196 | for i in range(num_boxes):
197 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
198 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
199 | img = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_LINEAR)
200 | img = img.transpose((2, 0, 1))
201 | img = (img - 127.5) / 128
202 | cropped_ims[i, :, :, :] = img
203 |
204 | cls_scores, reg, landmark = predict_onet(cropped_ims)
205 |
206 | cls_scores = cls_scores[:, 1]
207 | keep_inds = np.where(cls_scores > thresh)[0]
208 | if len(keep_inds) > 0:
209 | boxes = dets[keep_inds]
210 | boxes[:, 4] = cls_scores[keep_inds]
211 | reg = reg[keep_inds]
212 | landmark = landmark[keep_inds]
213 | else:
214 | return None, None
215 |
216 | w = boxes[:, 2] - boxes[:, 0] + 1
217 |
218 | h = boxes[:, 3] - boxes[:, 1] + 1
219 | landmark[:, 0::2] = (np.tile(w, (5, 1)) * landmark[:, 0::2].T + np.tile(boxes[:, 0], (5, 1)) - 1).T
220 | landmark[:, 1::2] = (np.tile(h, (5, 1)) * landmark[:, 1::2].T + np.tile(boxes[:, 1], (5, 1)) - 1).T
221 | boxes_c = calibrate_box(boxes, reg)
222 |
223 | keep = py_nms(boxes_c, 0.6, mode='Minimum')
224 | boxes_c = boxes_c[keep]
225 | landmark = landmark[keep]
226 | return boxes_c, landmark
227 |
228 |
229 | # 预测图片
230 | def infer_image(im):
231 | # 调用第一个模型预测
232 | boxes_c = detect_pnet(im, 20, 0.79, 0.9)
233 | if boxes_c is None:
234 | return None, None
235 | # 调用第二个模型预测
236 | boxes_c = detect_rnet(im, boxes_c, 0.6)
237 | if boxes_c is None:
238 | return None, None
239 | # 调用第三个模型预测
240 | boxes_c, landmark = detect_onet(im, boxes_c, 0.7)
241 | if boxes_c is None:
242 | return None, None
243 |
244 | return boxes_c, landmark
245 |
246 |
247 | # 画出人脸框和关键点
248 | def draw_face(img, boxes_c, landmarks):
249 | for i in range(boxes_c.shape[0]):
250 | bbox = boxes_c[i, :4]
251 | score = boxes_c[i, 4]
252 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
253 | # 画人脸框
254 | cv2.rectangle(img, (corpbbox[0], corpbbox[1]),
255 | (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)
256 | # 判别为人脸的置信度
257 | cv2.putText(img, '{:.2f}'.format(score),
258 | (corpbbox[0], corpbbox[1] - 2),
259 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
260 | # 画关键点
261 | for i in range(landmarks.shape[0]):
262 | for j in range(len(landmarks[i]) // 2):
263 | cv2.circle(img, (int(landmarks[i][2 * j]), int(int(landmarks[i][2 * j + 1]))), 2, (0, 0, 255))
264 | cv2.imshow('result', img)
265 | cv2.waitKey(1)
266 |
267 | def detect_faces_in_video(video_path, output_path,output_folder):
268 |
269 | cap = cv2.VideoCapture(video_path)
270 |
271 | # 视频编解码参数设置
272 | fourcc = cv2.VideoWriter_fourcc(*'XVID')
273 | frame_width = int(cap.get(3))
274 | frame_height = int(cap.get(4))
275 | fps = cap.get(cv2.CAP_PROP_FPS)
276 | # print(fps)
277 | out = cv2.VideoWriter(output_path, fourcc, 30.0, (frame_width, frame_height)) #30.0指输出视频的帧速度,帧速度越大输出视频的速度越快
278 |
279 | frame_count = 0
280 | seconds_count = 0
281 | frame_skip = 90 # 每frame_skip(90)帧处理一次
282 |
283 | # 确保输出文件夹存在
284 | if not os.path.exists(output_folder):
285 | os.makedirs(output_folder)
286 |
287 | while cap.isOpened():
288 | ret, frame = cap.read()
289 | if not ret:
290 | break
291 |
292 | # 检测人脸,但只在计数是frame_skip(90)的倍数时执行
293 | if frame_count % frame_skip == 0:
294 | boxes_c, landmarks = infer_image(frame)
295 | if boxes_c is not None:
296 | # 在图像上绘制人脸框和关键点
297 | for i in range(boxes_c.shape[0]):
298 | bbox = boxes_c[i, :4]
299 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
300 | cv2.rectangle(frame, (corpbbox[0], corpbbox[1]), (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)
301 | for j in range(len(landmarks[i]) // 2):
302 | cv2.circle(frame, (int(landmarks[i][2 * j]), int(landmarks[i][2 * j + 1])), 2, (0, 0, 255))
303 |
304 | # 保存检测到的人脸图像(每秒保存一次)
305 | if int(frame_count / fps) > seconds_count:
306 | save_detected_faces(frame, boxes_c, landmarks, frame_count, fps)
307 | seconds_count += 3
308 |
309 | frame_filename = os.path.join(output_folder, f"frame_{frame_count:04d}.png")
310 | cv2.imwrite(frame_filename, frame)
311 |
312 | # 将帧写入输出视频
313 | out.write(frame)
314 | frame_count += 1
315 |
316 | # # 显示帧
317 | # cv2.imshow('Video', frame)
318 | # if cv2.waitKey(1) & 0xFF == ord('q'):
319 | # break
320 | print(f"Finished converting {frame_count} frames.")
321 | cap.release()
322 | out.release()
323 | cv2.destroyAllWindows()
324 |
325 | def video_to_frames(video_path, output_folder, frame_skip=1):
326 | """
327 | 将视频拆分成帧并保存到指定文件夹
328 | :param video_path: 要拆分的视频路径
329 | :param output_folder: 保存帧的文件夹路径
330 | :param frame_skip: 每隔多少帧保存一帧,默认为1(保存所有帧)
331 | :return: None
332 | """
333 | # 确保输出文件夹存在
334 | if not os.path.exists(output_folder):
335 | os.makedirs(output_folder)
336 |
337 | # 打开视频
338 | cap = cv2.VideoCapture(video_path)
339 | if not cap.isOpened():
340 | print("Error: Cannot open video.")
341 | return
342 |
343 | # 获取视频的帧数
344 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
345 |
346 | print(f"Converting {total_frames} frames from video {video_path} to images...")
347 |
348 | frame_count = 0
349 | while True:
350 | ret, frame = cap.read()
351 | # 如果没有帧了,退出循环
352 | if not ret:
353 | break
354 |
355 | # 根据frame_skip的值决定是否保存帧
356 | if frame_count % frame_skip == 0:
357 | # 保存帧到指定文件夹
358 | frame_filename = os.path.join(output_folder, f"frame_{frame_count:04d}.png")
359 | cv2.imwrite(frame_filename, frame)
360 |
361 | frame_count += 1
362 |
363 | cap.release()
364 | print(f"Finished converting {frame_count} frames.")
365 |
366 | # def multi_face_Detection(videoUrl ):
367 | # video_path = 'students-full.mp4'
368 | # output_path = ('2video.avi')
369 | # detect_faces_in_video(video_path, output_path)
370 |
371 | if __name__ == '__main__':
372 | video_path = 'dataset/video_test/test.mp4' #视频路径
373 | output_path = ('dataset/video_test/output_testvideo.avi') #生成的视频
374 | # 使用函数提取帧
375 | output_folder = 'path_to_save_frames' # 替换为您想要保存帧的文件夹路径
376 | detect_faces_in_video(video_path, output_path,output_folder)
377 |
378 | # video_to_frames(output_path, 'output_frames_folder', frame_skip=300) #将视频拆分成帧并保存到指定文件夹 frame_skip: 每隔多少帧保存一帧,默认为1(保存所有帧)
379 |
380 |
381 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_camera.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import cv2
5 | import numpy as np
6 | import torch
7 |
8 | from utils.utils import generate_bbox, py_nms, convert_to_square
9 | from utils.utils import pad, calibrate_box, processed_image
10 |
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument('--model_path', type=str, default='infer_models', help='PNet、RNet、ONet三个模型文件存在的文件夹路径')
13 | args = parser.parse_args()
14 |
15 | device = torch.device("cuda")
16 |
17 | # 获取P模型
18 | pnet = torch.jit.load(os.path.join(args.model_path, 'PNet.pth'))
19 | pnet.to(device)
20 | softmax_p = torch.nn.Softmax(dim=0)
21 | pnet.eval()
22 |
23 | # 获取R模型
24 | rnet = torch.jit.load(os.path.join(args.model_path, 'RNet.pth'))
25 | rnet.to(device)
26 | softmax_r = torch.nn.Softmax(dim=-1)
27 | rnet.eval()
28 |
29 | # 获取R模型
30 | onet = torch.jit.load(os.path.join(args.model_path, 'ONet.pth'))
31 | onet.to(device)
32 | softmax_o = torch.nn.Softmax(dim=-1)
33 | onet.eval()
34 |
35 |
36 | # 使用PNet模型预测
37 | def predict_pnet(infer_data):
38 | # 添加待预测的图片
39 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
40 | infer_data = torch.unsqueeze(infer_data, dim=0)
41 | # 执行预测
42 | cls_prob, bbox_pred, _ = pnet(infer_data)
43 | cls_prob = torch.squeeze(cls_prob)
44 | cls_prob = softmax_p(cls_prob)
45 | bbox_pred = torch.squeeze(bbox_pred)
46 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
47 |
48 |
49 | # 使用RNet模型预测
50 | def predict_rnet(infer_data):
51 | # 添加待预测的图片
52 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
53 | # 执行预测
54 | cls_prob, bbox_pred, _ = rnet(infer_data)
55 | cls_prob = softmax_r(cls_prob)
56 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
57 |
58 |
59 | # 使用ONet模型预测
60 | def predict_onet(infer_data):
61 | # 添加待预测的图片
62 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
63 | # 执行预测
64 | cls_prob, bbox_pred, landmark_pred = onet(infer_data)
65 | cls_prob = softmax_o(cls_prob)
66 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy(), landmark_pred.detach().cpu().numpy()
67 |
68 |
69 | # 获取PNet网络输出结果
70 | def detect_pnet(im, min_face_size, scale_factor, thresh):
71 | """通过pnet筛选box和landmark
72 | 参数:
73 | im:输入图像[h,2,3]
74 | """
75 | net_size = 12
76 | # 人脸和输入图像的比率
77 | current_scale = float(net_size) / min_face_size
78 | im_resized = processed_image(im, current_scale)
79 | _, current_height, current_width = im_resized.shape
80 | all_boxes = list()
81 | # 图像金字塔
82 | while min(current_height, current_width) > net_size:
83 | # 类别和box
84 | cls_cls_map, reg = predict_pnet(im_resized)
85 | boxes = generate_bbox(cls_cls_map[1, :, :], reg, current_scale, thresh)
86 | current_scale *= scale_factor # 继续缩小图像做金字塔
87 | im_resized = processed_image(im, current_scale)
88 | _, current_height, current_width = im_resized.shape
89 |
90 | if boxes.size == 0:
91 | continue
92 | # 非极大值抑制留下重复低的box
93 | keep = py_nms(boxes[:, :5], 0.5, mode='Union')
94 | boxes = boxes[keep]
95 | all_boxes.append(boxes)
96 | if len(all_boxes) == 0:
97 | return None
98 | all_boxes = np.vstack(all_boxes)
99 | # 将金字塔之后的box也进行非极大值抑制
100 | keep = py_nms(all_boxes[:, 0:5], 0.7, mode='Union')
101 | all_boxes = all_boxes[keep]
102 | # box的长宽
103 | bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
104 | bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
105 | # 对应原图的box坐标和分数
106 | boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
107 | all_boxes[:, 1] + all_boxes[:, 6] * bbh,
108 | all_boxes[:, 2] + all_boxes[:, 7] * bbw,
109 | all_boxes[:, 3] + all_boxes[:, 8] * bbh,
110 | all_boxes[:, 4]])
111 | boxes_c = boxes_c.T
112 |
113 | return boxes_c
114 |
115 |
116 | # 获取RNet网络输出结果
117 | def detect_rnet(im, dets, thresh):
118 | """通过rent选择box
119 | 参数:
120 | im:输入图像
121 | dets:pnet选择的box,是相对原图的绝对坐标
122 | 返回值:
123 | box绝对坐标
124 | """
125 | h, w, c = im.shape
126 | # 将pnet的box变成包含它的正方形,可以避免信息损失
127 | dets = convert_to_square(dets)
128 | dets[:, 0:4] = np.round(dets[:, 0:4])
129 | # 调整超出图像的box
130 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
131 | delete_size = np.ones_like(tmpw) * 20
132 | ones = np.ones_like(tmpw)
133 | zeros = np.zeros_like(tmpw)
134 | num_boxes = np.sum(np.where((np.minimum(tmpw, tmph) >= delete_size), ones, zeros))
135 | cropped_ims = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32)
136 | for i in range(int(num_boxes)):
137 | # 将pnet生成的box相对与原图进行裁剪,超出部分用0补
138 | if tmph[i] < 20 or tmpw[i] < 20:
139 | continue
140 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
141 | try:
142 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
143 | img = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_LINEAR)
144 | img = img.transpose((2, 0, 1))
145 | img = (img - 127.5) / 128
146 | cropped_ims[i, :, :, :] = img
147 | except:
148 | continue
149 | cls_scores, reg = predict_rnet(cropped_ims)
150 | cls_scores = cls_scores[:, 1]
151 | keep_inds = np.where(cls_scores > thresh)[0]
152 | if len(keep_inds) > 0:
153 | boxes = dets[keep_inds]
154 | boxes[:, 4] = cls_scores[keep_inds]
155 | reg = reg[keep_inds]
156 | else:
157 | return None
158 |
159 | keep = py_nms(boxes, 0.6, mode='Union')
160 | boxes = boxes[keep]
161 | # 对pnet截取的图像的坐标进行校准,生成rnet的人脸框对于原图的绝对坐标
162 | boxes_c = calibrate_box(boxes, reg[keep])
163 | return boxes_c
164 |
165 |
166 | # 获取ONet模型预测结果
167 | def detect_onet(im, dets, thresh):
168 | """将onet的选框继续筛选基本和rnet差不多但多返回了landmark"""
169 | h, w, c = im.shape
170 | dets = convert_to_square(dets)
171 | dets[:, 0:4] = np.round(dets[:, 0:4])
172 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
173 | num_boxes = dets.shape[0]
174 | cropped_ims = np.zeros((num_boxes, 3, 48, 48), dtype=np.float32)
175 | for i in range(num_boxes):
176 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
177 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
178 | img = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_LINEAR)
179 | img = img.transpose((2, 0, 1))
180 | img = (img - 127.5) / 128
181 | cropped_ims[i, :, :, :] = img
182 |
183 | cls_scores, reg, landmark = predict_onet(cropped_ims)
184 |
185 | cls_scores = cls_scores[:, 1]
186 | keep_inds = np.where(cls_scores > thresh)[0]
187 | if len(keep_inds) > 0:
188 | boxes = dets[keep_inds]
189 | boxes[:, 4] = cls_scores[keep_inds]
190 | reg = reg[keep_inds]
191 | landmark = landmark[keep_inds]
192 | else:
193 | return None, None
194 |
195 | w = boxes[:, 2] - boxes[:, 0] + 1
196 |
197 | h = boxes[:, 3] - boxes[:, 1] + 1
198 | landmark[:, 0::2] = (np.tile(w, (5, 1)) * landmark[:, 0::2].T + np.tile(boxes[:, 0], (5, 1)) - 1).T
199 | landmark[:, 1::2] = (np.tile(h, (5, 1)) * landmark[:, 1::2].T + np.tile(boxes[:, 1], (5, 1)) - 1).T
200 | boxes_c = calibrate_box(boxes, reg)
201 |
202 | keep = py_nms(boxes_c, 0.6, mode='Minimum')
203 | boxes_c = boxes_c[keep]
204 | landmark = landmark[keep]
205 | return boxes_c, landmark
206 |
207 |
208 | # 预测图片
209 | def infer_image(im):
210 | # 调用第一个模型预测
211 | boxes_c = detect_pnet(im, 20, 0.79, 0.9)
212 | if boxes_c is None:
213 | return None, None
214 | # 调用第二个模型预测
215 | boxes_c = detect_rnet(im, boxes_c, 0.6)
216 | if boxes_c is None:
217 | return None, None
218 | # 调用第三个模型预测
219 | boxes_c, landmark = detect_onet(im, boxes_c, 0.7)
220 | if boxes_c is None:
221 | return None, None
222 |
223 | return boxes_c, landmark
224 |
225 |
226 | # 画出人脸框和关键点
227 | def draw_face(img, boxes_c, landmarks):
228 | for i in range(boxes_c.shape[0]):
229 | bbox = boxes_c[i, :4]
230 | score = boxes_c[i, 4]
231 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
232 | # 画人脸框
233 | cv2.rectangle(img, (corpbbox[0], corpbbox[1]),
234 | (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)
235 | # 判别为人脸的置信度
236 | cv2.putText(img, '{:.2f}'.format(score),
237 | (corpbbox[0], corpbbox[1] - 2),
238 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
239 | # 画关键点
240 | for i in range(landmarks.shape[0]):
241 | for j in range(len(landmarks[i]) // 2):
242 | cv2.circle(img, (int(landmarks[i][2 * j]), int(int(landmarks[i][2 * j + 1]))), 2, (0, 0, 255))
243 | cv2.imshow('result', img)
244 | cv2.waitKey(1)
245 |
246 |
247 | if __name__ == '__main__':
248 | cap = cv2.VideoCapture(0)
249 | while True:
250 | ret, img = cap.read()
251 | if ret:
252 | # 预测图片获取人脸的box和关键点
253 | boxes_c, landmarks = infer_image(img)
254 | # 把关键画出来
255 | if boxes_c is not None:
256 | draw_face(img=img, boxes_c=boxes_c, landmarks=landmarks)
257 | else:
258 | cv2.imshow('result', img)
259 | cv2.waitKey(1)
260 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_models/ONet.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/Multi_Face_Detection/infer_models/ONet.pth
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_models/PNet.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/Multi_Face_Detection/infer_models/PNet.pth
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_models/RNet.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/Multi_Face_Detection/infer_models/RNet.pth
--------------------------------------------------------------------------------
/Multi_Face_Detection/infer_path.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import cv2
5 | import numpy as np
6 | import torch
7 |
8 | from utils.utils import generate_bbox, py_nms, convert_to_square
9 | from utils.utils import pad, calibrate_box, processed_image
10 |
11 | parser = argparse.ArgumentParser()
12 | parser.add_argument('--model_path', type=str, default='infer_models', help='PNet、RNet、ONet三个模型文件存在的文件夹路径')
13 | parser.add_argument('--image_path', type=str, default='dataset/path_test/test.png', help='需要预测图像的路径')
14 | args = parser.parse_args()
15 |
16 |
17 | device = torch.device("cuda")
18 |
19 | # 获取P模型
20 | pnet = torch.jit.load(os.path.join(args.model_path, 'PNet.pth'))
21 | pnet.to(device)
22 | softmax_p = torch.nn.Softmax(dim=0)
23 | pnet.eval()
24 |
25 | # 获取R模型
26 | rnet = torch.jit.load(os.path.join(args.model_path, 'RNet.pth'))
27 | rnet.to(device)
28 | softmax_r = torch.nn.Softmax(dim=-1)
29 | rnet.eval()
30 |
31 | # 获取R模型
32 | onet = torch.jit.load(os.path.join(args.model_path, 'ONet.pth'))
33 | onet.to(device)
34 | softmax_o = torch.nn.Softmax(dim=-1)
35 | onet.eval()
36 |
37 |
38 | # 使用PNet模型预测
39 | def predict_pnet(infer_data):
40 | # 添加待预测的图片
41 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
42 | infer_data = torch.unsqueeze(infer_data, dim=0)
43 | # 执行预测
44 | cls_prob, bbox_pred, _ = pnet(infer_data)
45 | cls_prob = torch.squeeze(cls_prob)
46 | cls_prob = softmax_p(cls_prob)
47 | bbox_pred = torch.squeeze(bbox_pred)
48 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
49 |
50 |
51 | # 使用RNet模型预测
52 | def predict_rnet(infer_data):
53 | # 添加待预测的图片
54 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
55 | # 执行预测
56 | cls_prob, bbox_pred, _ = rnet(infer_data)
57 | cls_prob = softmax_r(cls_prob)
58 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy()
59 |
60 |
61 | # 使用ONet模型预测
62 | def predict_onet(infer_data):
63 | # 添加待预测的图片
64 | infer_data = torch.tensor(infer_data, dtype=torch.float32, device=device)
65 | # 执行预测
66 | cls_prob, bbox_pred, landmark_pred = onet(infer_data)
67 | cls_prob = softmax_o(cls_prob)
68 | return cls_prob.detach().cpu().numpy(), bbox_pred.detach().cpu().numpy(), landmark_pred.detach().cpu().numpy()
69 |
70 |
71 | # 获取PNet网络输出结果
72 | def detect_pnet(im, min_face_size, scale_factor, thresh):
73 | """通过pnet筛选box和landmark
74 | 参数:
75 | im:输入图像[h,2,3]
76 | """
77 | net_size = 12
78 | # 人脸和输入图像的比率
79 | current_scale = float(net_size) / min_face_size
80 | im_resized = processed_image(im, current_scale)
81 | _, current_height, current_width = im_resized.shape
82 | all_boxes = list()
83 | # 图像金字塔
84 | while min(current_height, current_width) > net_size:
85 | # 类别和box
86 | cls_cls_map, reg = predict_pnet(im_resized)
87 | boxes = generate_bbox(cls_cls_map[1, :, :], reg, current_scale, thresh)
88 | current_scale *= scale_factor # 继续缩小图像做金字塔
89 | im_resized = processed_image(im, current_scale)
90 | _, current_height, current_width = im_resized.shape
91 |
92 | if boxes.size == 0:
93 | continue
94 | # 非极大值抑制留下重复低的box
95 | keep = py_nms(boxes[:, :5], 0.5, mode='Union')
96 | boxes = boxes[keep]
97 | all_boxes.append(boxes)
98 | if len(all_boxes) == 0:
99 | return None
100 | all_boxes = np.vstack(all_boxes)
101 | # 将金字塔之后的box也进行非极大值抑制
102 | keep = py_nms(all_boxes[:, 0:5], 0.7, mode='Union')
103 | all_boxes = all_boxes[keep]
104 | # box的长宽
105 | bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
106 | bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
107 | # 对应原图的box坐标和分数
108 | boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
109 | all_boxes[:, 1] + all_boxes[:, 6] * bbh,
110 | all_boxes[:, 2] + all_boxes[:, 7] * bbw,
111 | all_boxes[:, 3] + all_boxes[:, 8] * bbh,
112 | all_boxes[:, 4]])
113 | boxes_c = boxes_c.T
114 |
115 | return boxes_c
116 |
117 |
118 | # 获取RNet网络输出结果
119 | def detect_rnet(im, dets, thresh):
120 | """通过rent选择box
121 | 参数:
122 | im:输入图像
123 | dets:pnet选择的box,是相对原图的绝对坐标
124 | 返回值:
125 | box绝对坐标
126 | """
127 | h, w, c = im.shape
128 | # 将pnet的box变成包含它的正方形,可以避免信息损失
129 | dets = convert_to_square(dets)
130 | dets[:, 0:4] = np.round(dets[:, 0:4])
131 | # 调整超出图像的box
132 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
133 | delete_size = np.ones_like(tmpw) * 20
134 | ones = np.ones_like(tmpw)
135 | zeros = np.zeros_like(tmpw)
136 | num_boxes = np.sum(np.where((np.minimum(tmpw, tmph) >= delete_size), ones, zeros))
137 | cropped_ims = np.zeros((num_boxes, 3, 24, 24), dtype=np.float32)
138 | for i in range(int(num_boxes)):
139 | # 将pnet生成的box相对与原图进行裁剪,超出部分用0补
140 | if tmph[i] < 20 or tmpw[i] < 20:
141 | continue
142 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
143 | try:
144 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
145 | img = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_LINEAR)
146 | img = img.transpose((2, 0, 1))
147 | img = (img - 127.5) / 128
148 | cropped_ims[i, :, :, :] = img
149 | except:
150 | continue
151 | cls_scores, reg = predict_rnet(cropped_ims)
152 | cls_scores = cls_scores[:, 1]
153 | keep_inds = np.where(cls_scores > thresh)[0]
154 | if len(keep_inds) > 0:
155 | boxes = dets[keep_inds]
156 | boxes[:, 4] = cls_scores[keep_inds]
157 | reg = reg[keep_inds]
158 | else:
159 | return None
160 |
161 | keep = py_nms(boxes, 0.4, mode='Union')
162 | boxes = boxes[keep]
163 | # 对pnet截取的图像的坐标进行校准,生成rnet的人脸框对于原图的绝对坐标
164 | boxes_c = calibrate_box(boxes, reg[keep])
165 | return boxes_c
166 |
167 |
168 | # 获取ONet模型预测结果
169 | def detect_onet(im, dets, thresh):
170 | """将onet的选框继续筛选基本和rnet差不多但多返回了landmark"""
171 | h, w, c = im.shape
172 | dets = convert_to_square(dets)
173 | dets[:, 0:4] = np.round(dets[:, 0:4])
174 | [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(dets, w, h)
175 | num_boxes = dets.shape[0]
176 | cropped_ims = np.zeros((num_boxes, 3, 48, 48), dtype=np.float32)
177 | for i in range(num_boxes):
178 | tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
179 | tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
180 | img = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_LINEAR)
181 | img = img.transpose((2, 0, 1))
182 | img = (img - 127.5) / 128
183 | cropped_ims[i, :, :, :] = img
184 | cls_scores, reg, landmark = predict_onet(cropped_ims)
185 |
186 | cls_scores = cls_scores[:, 1]
187 | keep_inds = np.where(cls_scores > thresh)[0]
188 | if len(keep_inds) > 0:
189 | boxes = dets[keep_inds]
190 | boxes[:, 4] = cls_scores[keep_inds]
191 | reg = reg[keep_inds]
192 | landmark = landmark[keep_inds]
193 | else:
194 | return None, None
195 |
196 | w = boxes[:, 2] - boxes[:, 0] + 1
197 |
198 | h = boxes[:, 3] - boxes[:, 1] + 1
199 | landmark[:, 0::2] = (np.tile(w, (5, 1)) * landmark[:, 0::2].T + np.tile(boxes[:, 0], (5, 1)) - 1).T
200 | landmark[:, 1::2] = (np.tile(h, (5, 1)) * landmark[:, 1::2].T + np.tile(boxes[:, 1], (5, 1)) - 1).T
201 | boxes_c = calibrate_box(boxes, reg)
202 |
203 | keep = py_nms(boxes_c, 0.6, mode='Minimum')
204 | boxes_c = boxes_c[keep]
205 | landmark = landmark[keep]
206 | return boxes_c, landmark
207 |
208 |
209 | # 预测图片
210 | def infer_image(image_path):
211 | im = cv2.imread(image_path)
212 | # 调用第一个模型预测
213 | boxes_c = detect_pnet(im, 20, 0.79, 0.9)
214 | if boxes_c is None:
215 | return None, None
216 | # 调用第二个模型预测
217 | boxes_c = detect_rnet(im, boxes_c, 0.6)
218 | if boxes_c is None:
219 | return None, None
220 | # 调用第三个模型预测
221 | boxes_c, landmark = detect_onet(im, boxes_c, 0.7)
222 | if boxes_c is None:
223 | return None, None
224 |
225 | return boxes_c, landmark
226 |
227 |
228 | # 画出人脸框和关键点
229 | def draw_face(image_path, boxes_c, landmarks):
230 | img = cv2.imread(image_path)
231 | for i in range(boxes_c.shape[0]):
232 | bbox = boxes_c[i, :4]
233 | score = boxes_c[i, 4]
234 | corpbbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
235 | # 画人脸框
236 | cv2.rectangle(img, (corpbbox[0], corpbbox[1]),
237 | (corpbbox[2], corpbbox[3]), (255, 0, 0), 1)
238 | # 判别为人脸的置信度
239 | cv2.putText(img, '{:.2f}'.format(score),
240 | (corpbbox[0], corpbbox[1] - 2),
241 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
242 | # 画关键点
243 | for i in range(landmarks.shape[0]):
244 | for j in range(len(landmarks[i]) // 2):
245 | cv2.circle(img, (int(landmarks[i][2 * j]), int(int(landmarks[i][2 * j + 1]))), 2, (0, 0, 255))
246 | cv2.imshow('result', img)
247 | cv2.waitKey(0)
248 |
249 |
250 | if __name__ == '__main__':
251 | # 预测图片获取人脸的box和关键点
252 | boxes_c, landmarks = infer_image(args.image_path)
253 | print(boxes_c)
254 | print(landmarks)
255 | # 把关键画出来
256 | if boxes_c is not None:
257 | draw_face(image_path=args.image_path, boxes_c=boxes_c, landmarks=landmarks)
258 | else:
259 | print('image not have face')
260 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/models/Loss.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import numpy as np
4 |
5 |
6 | class ClassLoss(nn.Module):
7 | def __init__(self):
8 | super(ClassLoss, self).__init__()
9 | self.entropy_loss = nn.CrossEntropyLoss(ignore_index=-100, reduction='none')
10 | self.keep_ratio = 0.7
11 |
12 | def forward(self, class_out, label):
13 | # 保留neg 0 和pos 1 的数据,忽略掉part -1, landmark -2
14 | label = torch.where(label < 0, -100, label)
15 | # 求neg 0 和pos 1 的数据70%数据
16 | valid_label = torch.where(label >= 0, 1, 0)
17 | num_valid = torch.sum(valid_label)
18 | keep_num = int((num_valid * self.keep_ratio).cpu().numpy())
19 | label = torch.squeeze(label)
20 | # 计算交叉熵损失
21 | loss = self.entropy_loss(input=class_out, target=label)
22 | # 取有效数据的70%计算损失
23 | loss, _ = torch.topk(torch.squeeze(loss), k=keep_num)
24 | return torch.mean(loss)
25 |
26 |
27 | class BBoxLoss(nn.Module):
28 | def __init__(self):
29 | super(BBoxLoss, self).__init__()
30 | self.square_loss = nn.MSELoss(reduction='none')
31 | self.keep_ratio = 1.0
32 |
33 | def forward(self, bbox_out, bbox_target, label):
34 | # 保留pos 1 和part -1 的数据
35 | valid_label = torch.where(torch.abs(label) == 1, 1, 0)
36 | valid_label = torch.squeeze(valid_label)
37 | # 获取有效值的总数
38 | keep_num = int(torch.sum(valid_label).cpu().numpy() * self.keep_ratio)
39 | loss = self.square_loss(input=bbox_out, target=bbox_target)
40 | loss = torch.sum(loss, dim=1)
41 | loss = loss.cuda() * valid_label
42 | # 取有效数据计算损失
43 | loss, _ = torch.topk(loss, k=keep_num, dim=0)
44 | return torch.mean(loss)
45 |
46 |
47 | class LandmarkLoss(nn.Module):
48 | def __init__(self):
49 | super(LandmarkLoss, self).__init__()
50 | self.square_loss = nn.MSELoss(reduction='none')
51 | self.keep_ratio = 1.0
52 |
53 | def forward(self, landmark_out, landmark_target, label):
54 | # 只保留landmark数据 -2
55 | valid_label = torch.where(label == -2, 1, 0)
56 | valid_label = torch.squeeze(valid_label)
57 | # 获取有效值的总数
58 | keep_num = int(torch.sum(valid_label).cpu().numpy() * self.keep_ratio)
59 | loss = self.square_loss(input=landmark_out, target=landmark_target)
60 | loss = torch.sum(loss, dim=1)
61 | loss = loss.cuda() * valid_label
62 | # 取有效数据计算损失
63 | loss, _ = torch.topk(loss, k=keep_num, dim=0)
64 | return torch.mean(loss)
65 |
66 |
67 | # 求训练时的准确率
68 | def accuracy(class_out, label):
69 | # 查找neg 0 和pos 1所在的位置
70 | class_out = class_out.detach().cpu().numpy()
71 | label = label.cpu().numpy()
72 | label = np.squeeze(label)
73 | zeros = np.zeros(label.shape)
74 | cond = np.greater_equal(label, zeros)
75 | picked = np.where(cond)
76 | valid_label = label[picked]
77 | valid_class_out = class_out[picked]
78 | # 求neg 0 和pos 1的准确率
79 | acc = np.sum(np.argmax(valid_class_out, axis=1) == valid_label, dtype='float')
80 | acc = acc / valid_label.shape[0]
81 | return acc
82 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/models/ONet.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class ONet(nn.Module):
5 | def __init__(self):
6 | super(ONet, self).__init__()
7 | self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3))
8 | self.prelu1 = nn.PReLU()
9 | self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
10 | self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
11 | self.prelu2 = nn.PReLU()
12 | self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
13 | self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3))
14 | self.prelu3 = nn.PReLU()
15 | self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
16 | self.conv4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(2, 2))
17 | self.prelu4 = nn.PReLU()
18 | self.flatten = nn.Flatten()
19 | self.fc = nn.Linear(in_features=1152, out_features=256)
20 | self.class_fc = nn.Linear(in_features=256, out_features=2)
21 | self.bbox_fc = nn.Linear(in_features=256, out_features=4)
22 | self.landmark_fc = nn.Linear(in_features=256, out_features=10)
23 |
24 | for m in self.modules():
25 | if isinstance(m, nn.Conv2d):
26 | nn.init.kaiming_normal(m.weight, mode='fan_out', nonlinearity='relu')
27 |
28 | def forward(self, x):
29 | x = self.prelu1(self.conv1(x))
30 | x = self.pool1(x)
31 | x = self.prelu2(self.conv2(x))
32 | x = self.pool2(x)
33 | x = self.prelu3(self.conv3(x))
34 | x = self.pool3(x)
35 | x = self.prelu4(self.conv4(x))
36 | x = self.flatten(x)
37 | x = self.fc(x)
38 | # 分类是否人脸的卷积输出层
39 | class_out = self.class_fc(x)
40 | # 人脸box的回归卷积输出层
41 | bbox_out = self.bbox_fc(x)
42 | # 5个关键点的回归卷积输出层
43 | landmark_out = self.landmark_fc(x)
44 | return class_out, bbox_out, landmark_out
45 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/models/PNet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 |
5 | class PNet(nn.Module):
6 | def __init__(self):
7 | super(PNet, self).__init__()
8 | self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=(3, 3))
9 | self.prelu1 = nn.PReLU()
10 | self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
11 | self.conv2 = nn.Conv2d(in_channels=10, out_channels=16, kernel_size=(3, 3))
12 | self.prelu2 = nn.PReLU()
13 | self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3))
14 | self.prelu3 = nn.PReLU()
15 | self.conv4_1 = nn.Conv2d(in_channels=32, out_channels=2, kernel_size=(1, 1))
16 | self.conv4_2 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=(1, 1))
17 | self.conv4_3 = nn.Conv2d(in_channels=32, out_channels=10, kernel_size=(1, 1))
18 |
19 | for m in self.modules():
20 | if isinstance(m, nn.Conv2d):
21 | nn.init.kaiming_normal(m.weight, mode='fan_out', nonlinearity='relu')
22 |
23 | def forward(self, x):
24 | x = self.prelu1(self.conv1(x))
25 | x = self.pool1(x)
26 | x = self.prelu2(self.conv2(x))
27 | x = self.prelu3(self.conv3(x))
28 | # 分类是否人脸的卷积输出层
29 | class_out = self.conv4_1(x)
30 | class_out = torch.squeeze(class_out, dim=2)
31 | class_out = torch.squeeze(class_out, dim=2)
32 | # 人脸box的回归卷积输出层
33 | bbox_out = self.conv4_2(x)
34 | bbox_out = torch.squeeze(bbox_out, dim=2)
35 | bbox_out = torch.squeeze(bbox_out, dim=2)
36 | # 5个关键点的回归卷积输出层
37 | landmark_out = self.conv4_3(x)
38 | landmark_out = torch.squeeze(landmark_out, dim=2)
39 | landmark_out = torch.squeeze(landmark_out, dim=2)
40 | return class_out, bbox_out, landmark_out
41 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/models/RNet.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 |
4 |
5 | class RNet(nn.Module):
6 | def __init__(self):
7 | super(RNet, self).__init__()
8 | self.conv1 = nn.Conv2d(in_channels=3, out_channels=28, kernel_size=(3, 3))
9 | self.prelu1 = nn.PReLU()
10 | self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
11 | self.conv2 = nn.Conv2d(in_channels=28, out_channels=48, kernel_size=(3, 3))
12 | self.prelu2 = nn.PReLU()
13 | self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
14 | self.conv3 = nn.Conv2d(in_channels=48, out_channels=64, kernel_size=(2, 2))
15 | self.prelu3 = nn.PReLU()
16 | self.flatten = nn.Flatten()
17 | self.fc = nn.Linear(in_features=576, out_features=128)
18 | self.class_fc = nn.Linear(in_features=128, out_features=2)
19 | self.bbox_fc = nn.Linear(in_features=128, out_features=4)
20 | self.landmark_fc = nn.Linear(in_features=128, out_features=10)
21 |
22 | for m in self.modules():
23 | if isinstance(m, nn.Conv2d):
24 | nn.init.kaiming_normal(m.weight, mode='fan_out', nonlinearity='relu')
25 |
26 | def forward(self, x):
27 | x = self.prelu1(self.conv1(x))
28 | x = self.pool1(x)
29 | x = self.prelu2(self.conv2(x))
30 | x = self.pool2(x)
31 | x = self.prelu3(self.conv3(x))
32 | x = self.flatten(x)
33 | x = self.fc(x)
34 | # 分类是否人脸的卷积输出层
35 | class_out = self.class_fc(x)
36 | # 人脸box的回归卷积输出层
37 | bbox_out = self.bbox_fc(x)
38 | # 5个关键点的回归卷积输出层
39 | landmark_out = self.landmark_fc(x)
40 | return class_out, bbox_out, landmark_out
41 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/Multi_Face_Detection/models/__init__.py
--------------------------------------------------------------------------------
/Multi_Face_Detection/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python~=4.5.2.52
2 | numpy~=1.19.2
3 | tqdm~=4.59.0
4 | torchsummary~=1.5.1
5 | torch==1.8.1
--------------------------------------------------------------------------------
/Multi_Face_Detection/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/Multi_Face_Detection/utils/__init__.py
--------------------------------------------------------------------------------
/Multi_Face_Detection/utils/data.py:
--------------------------------------------------------------------------------
1 | import mmap
2 |
3 | import cv2
4 | import numpy as np
5 | from torch.utils.data import Dataset
6 |
7 |
8 | class ImageData(object):
9 | def __init__(self, data_path):
10 | self.offset_dict = {}
11 | for line in open(data_path + '.header', 'rb'):
12 | key, val_pos, val_len = line.split('\t'.encode('ascii'))
13 | self.offset_dict[key] = (int(val_pos), int(val_len))
14 | self.fp = open(data_path + '.data', 'rb')
15 | self.m = mmap.mmap(self.fp.fileno(), 0, access=mmap.ACCESS_READ)
16 | print('正在加载数据标签...')
17 | # 获取label
18 | self.label = {}
19 | self.box = {}
20 | self.landmark = {}
21 | label_path = data_path + '.label'
22 | for line in open(label_path, 'rb'):
23 | key, bbox, landmark, label = line.split(b'\t')
24 | self.label[key] = int(label)
25 | self.box[key] = [float(x) for x in bbox.split()]
26 | self.landmark[key] = [float(x) for x in landmark.split()]
27 | print('数据加载完成,总数据量为:%d' % len(self.label))
28 |
29 | # 获取图像数据
30 | def get_img(self, key):
31 | p = self.offset_dict.get(key, None)
32 | if p is None:
33 | return None
34 | val_pos, val_len = p
35 | return self.m[val_pos:val_pos + val_len]
36 |
37 | # 获取图像标签
38 | def get_label(self, key):
39 | return self.label.get(key)
40 |
41 | # 获取人脸box
42 | def get_bbox(self, key):
43 | return self.box.get(key)
44 |
45 | # 获取关键点
46 | def get_landmark(self, key):
47 | return self.landmark.get(key)
48 |
49 | # 获取所有keys
50 | def get_keys(self):
51 | return self.label.keys()
52 |
53 |
54 | def process(image):
55 | image = np.fromstring(image, dtype=np.uint8)
56 | image = cv2.imdecode(image, cv2.IMREAD_COLOR)
57 | assert (image is not None), 'image is None'
58 | # 把图片转换成numpy值
59 | image = np.array(image).astype(np.float32)
60 | # 转换成CHW
61 | image = image.transpose((2, 0, 1))
62 | # 归一化
63 | image = (image - 127.5) / 128
64 | return image
65 |
66 |
67 | # 数据加载器
68 | class CustomDataset(Dataset):
69 | def __init__(self, data_path):
70 | super(CustomDataset, self).__init__()
71 | self.imageData = ImageData(data_path)
72 | self.keys = self.imageData.get_keys()
73 | self.keys = list(self.keys)
74 | np.random.shuffle(self.keys)
75 |
76 | def __getitem__(self, idx):
77 | key = self.keys[idx]
78 | img = self.imageData.get_img(key)
79 | assert (img is not None)
80 | label = self.imageData.get_label(key)
81 | assert (label is not None)
82 | bbox = self.imageData.get_bbox(key)
83 | landmark = self.imageData.get_landmark(key)
84 | img = process(img)
85 | label = np.array([label], np.int64)
86 | bbox = np.array(bbox, np.float32)
87 | landmark = np.array(landmark, np.float32)
88 | return img, label, bbox, landmark
89 |
90 | def __len__(self):
91 | return len(self.keys)
--------------------------------------------------------------------------------
/Multi_Face_Detection/utils/data_format_converter.py:
--------------------------------------------------------------------------------
1 | import os
2 | import struct
3 | import uuid
4 | from tqdm import tqdm
5 | import cv2
6 |
7 |
8 | class DataSetWriter(object):
9 | def __init__(self, prefix):
10 | # 创建对应的数据文件
11 | self.data_file = open(prefix + '.data', 'wb')
12 | self.header_file = open(prefix + '.header', 'wb')
13 | self.label_file = open(prefix + '.label', 'wb')
14 | self.offset = 0
15 | self.header = ''
16 |
17 | def add_img(self, key, img):
18 | # 写入图像数据
19 | self.data_file.write(struct.pack('I', len(key)))
20 | self.data_file.write(key.encode('ascii'))
21 | self.data_file.write(struct.pack('I', len(img)))
22 | self.data_file.write(img)
23 | self.offset += 4 + len(key) + 4
24 | self.header = key + '\t' + str(self.offset) + '\t' + str(len(img)) + '\n'
25 | self.header_file.write(self.header.encode('ascii'))
26 | self.offset += len(img)
27 |
28 | def add_label(self, label):
29 | # 写入标签数据
30 | self.label_file.write(label.encode('ascii') + '\n'.encode('ascii'))
31 |
32 |
33 | # 人脸识别训练数据的格式转换
34 | def convert_data(data_folder, output_prefix):
35 | # 读取全部的数据类别获取数据
36 | data_list_path = os.path.join(data_folder, 'all_data_list.txt')
37 | train_list = open(data_list_path, "r").readlines()
38 | train_image_list = []
39 | for i, item in enumerate(train_list):
40 | sample = item.split(' ')
41 | # 获取图片路径
42 | image = sample[0]
43 | # 获取图片标签
44 | label = int(sample[1])
45 | # 做补0预操作
46 | bbox = [0, 0, 0, 0]
47 | landmark = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
48 | # 如果只有box,关键点就补0
49 | if len(sample) == 6:
50 | bbox = [float(i) for i in sample[2:]]
51 | # 如果只有关键点,那么box就补0
52 | if len(sample) == 12:
53 | landmark = [float(i) for i in sample[2:]]
54 | # 加入到数据列表中
55 | train_image_list.append((image, label, bbox, landmark))
56 | print("训练数据大小:", len(train_image_list))
57 |
58 | # 开始写入数据
59 | writer = DataSetWriter(output_prefix)
60 | for image, label, bbox, landmark in tqdm(train_image_list):
61 | try:
62 | key = str(uuid.uuid1())
63 | img = cv2.imread(image)
64 | _, img = cv2.imencode('.bmp', img)
65 | # 写入对应的数据
66 | writer.add_img(key, img.tostring())
67 | label_str = str(label)
68 | bbox_str = ' '.join([str(x) for x in bbox])
69 | landmark_str = ' '.join([str(x) for x in landmark])
70 | writer.add_label('\t'.join([key, bbox_str, landmark_str, label_str]))
71 | except:
72 | continue
73 |
--------------------------------------------------------------------------------
/Multi_Face_Detection/utils/utils.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import shutil
3 | import numpy as np
4 | import random
5 | import os
6 | import cv2
7 | from tqdm import tqdm
8 |
9 |
10 | class BBox:
11 | # 人脸的box
12 | def __init__(self, box):
13 | self.left = box[0]
14 | self.top = box[1]
15 | self.right = box[2]
16 | self.bottom = box[3]
17 |
18 | self.x = box[0]
19 | self.y = box[1]
20 | self.w = box[2] - box[0]
21 | self.h = box[3] - box[1]
22 |
23 | def project(self, point):
24 | """将关键点的绝对值转换为相对于左上角坐标偏移并归一化
25 | 参数:
26 | point:某一关键点坐标(x,y)
27 | 返回值:
28 | 处理后偏移
29 | """
30 | x = (point[0] - self.x) / self.w
31 | y = (point[1] - self.y) / self.h
32 | return np.asarray([x, y])
33 |
34 | def reproject(self, point):
35 | """将关键点的相对值转换为绝对值,与project相反
36 | 参数:
37 | point:某一关键点的相对归一化坐标
38 | 返回值:
39 | 处理后的绝对坐标
40 | """
41 | x = self.x + self.w * point[0]
42 | y = self.y + self.h * point[1]
43 | return np.asarray([x, y])
44 |
45 | def reprojectLandmark(self, landmark):
46 | """对所有关键点进行reproject操作"""
47 | p = np.zeros((len(landmark), 2))
48 | for i in range(len(landmark)):
49 | p[i] = self.reproject(landmark[i])
50 | return p
51 |
52 | def projectLandmark(self, landmark):
53 | """对所有关键点进行project操作"""
54 | p = np.zeros((len(landmark), 2))
55 | for i in range(len(landmark)):
56 | p[i] = self.project(landmark[i])
57 | return p
58 |
59 |
60 | # 预处理数据,转化图像尺度并对像素归一
61 | def processed_image(img, scale):
62 | height, width, channels = img.shape
63 | new_height = int(height * scale)
64 | new_width = int(width * scale)
65 | new_dim = (new_width, new_height)
66 | img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR)
67 | # 把图片转换成numpy值
68 | image = np.array(img_resized).astype(np.float32)
69 | # 转换成CHW
70 | image = image.transpose((2, 0, 1))
71 | # 归一化
72 | image = (image - 127.5) / 128
73 | return image
74 |
75 |
76 | def IOU(box, boxes):
77 | """裁剪的box和图片所有人脸box的iou值
78 | 参数:
79 | box:裁剪的box,当box维度为4时表示box左上右下坐标,维度为5时,最后一维为box的置信度
80 | boxes:图片所有人脸box,[n,4]
81 | 返回值:
82 | iou值,[n,]
83 | """
84 | # box面积
85 | box_area = (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
86 | # boxes面积,[n,]
87 | area = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
88 | # 重叠部分左上右下坐标
89 | xx1 = np.maximum(box[0], boxes[:, 0])
90 | yy1 = np.maximum(box[1], boxes[:, 1])
91 | xx2 = np.minimum(box[2], boxes[:, 2])
92 | yy2 = np.minimum(box[3], boxes[:, 3])
93 |
94 | # 重叠部分长宽
95 | w = np.maximum(0, xx2 - xx1 + 1)
96 | h = np.maximum(0, yy2 - yy1 + 1)
97 | # 重叠部分面积
98 | inter = w * h
99 | return inter / (box_area + area - inter + 1e-10)
100 |
101 |
102 | def get_landmark_from_lfw_neg(txt, data_path, with_landmark=True):
103 | """获取txt中的图像路径,人脸box,人脸关键点
104 | 参数:
105 | txt:数据txt文件
106 | data_path:数据存储目录
107 | with_landmark:是否留有关键点
108 | 返回值:
109 | result包含(图像路径,人脸box,关键点)
110 | """
111 | with open(txt, 'r') as f:
112 | lines = f.readlines()
113 | result = []
114 | for line in lines:
115 | line = line.strip()
116 | components = line.split(' ')
117 | # 获取图像路径
118 | img_path = os.path.join(data_path, components[0]).replace('\\', '/')
119 | # 人脸box
120 | box = (components[1], components[3], components[2], components[4])
121 | box = [float(_) for _ in box]
122 | box = list(map(int, box))
123 |
124 | if not with_landmark:
125 | result.append((img_path, BBox(box)))
126 | continue
127 | # 五个关键点(x,y)
128 | landmark = np.zeros((5, 2))
129 | for index in range(5):
130 | rv = (float(components[5 + 2 * index]), float(components[5 + 2 * index + 1]))
131 | landmark[index] = rv
132 | result.append((img_path, BBox(box), landmark))
133 | return result
134 |
135 |
136 | def get_landmark_from_celeba(data_path, with_landmark=True):
137 | """获取celeba的脸box,人脸关键点
138 | 参数:
139 | bbox_txt:数据bbox文件
140 | landmarks_txt:数据landmarks文件
141 | data_path:数据存储目录
142 | with_landmark:是否留有关键点
143 | 返回值:
144 | result包含(图像路径,人脸box,关键点)
145 | """
146 | bbox_txt = os.path.join(data_path, 'list_bbox_celeba.txt')
147 | landmarks_txt = os.path.join(data_path, 'list_landmarks_celeba.txt')
148 | # 获取图像路径,box,关键点
149 | if not os.path.exists(bbox_txt):
150 | return []
151 | with open(bbox_txt, 'r') as f:
152 | bbox_lines = f.readlines()
153 | with open(landmarks_txt, 'r') as f:
154 | landmarks_lines = f.readlines()
155 | result = []
156 | for i in range(2, len(bbox_lines)):
157 | bbox_line = bbox_lines[i]
158 | landmarks_line = landmarks_lines[i]
159 | bbox_components = bbox_line.strip().split()
160 | landmarks_components = landmarks_line.strip().split()
161 | # 获取图像路径
162 | img_path = os.path.join(data_path, 'img_celeba', bbox_components[0]).replace('\\', '/')
163 | # 人脸box
164 | box = (bbox_components[1], bbox_components[2], bbox_components[3], bbox_components[4])
165 | box = [float(_) for _ in box]
166 | box = list(map(int, box))
167 | box = [box[0], box[1], box[2] + box[0], box[3] + box[1]]
168 |
169 | if not with_landmark:
170 | result.append((img_path, BBox(box)))
171 | continue
172 | # 五个关键点(x,y)
173 | landmark = np.zeros((5, 2))
174 | for index in range(5):
175 | rv = (float(landmarks_components[1 + 2 * index]), float(landmarks_components[1 + 2 * index + 1]))
176 | landmark[index] = rv
177 | result.append((img_path, BBox(box), landmark))
178 | return result
179 |
180 |
181 | def combine_data_list(data_dir):
182 | """把每个数据列表放在同一个文件上
183 | 参数:
184 | data_dir:已经裁剪后的文件夹
185 | """
186 | npr = np.random
187 | with open(os.path.join(data_dir, 'positive.txt'), 'r') as f:
188 | pos = f.readlines()
189 | with open(os.path.join(data_dir, 'negative.txt'), 'r') as f:
190 | neg = f.readlines()
191 | with open(os.path.join(data_dir, 'part.txt'), 'r') as f:
192 | part = f.readlines()
193 | with open(os.path.join(data_dir, 'landmark.txt'), 'r') as f:
194 | landmark = f.readlines()
195 | with open(os.path.join(data_dir, 'all_data_list.txt'), 'w') as f:
196 | base_num = len(pos) // 1000 * 1000
197 | s1 = '整理前的数据:neg数量:{} pos数量:{} part数量:{} landmark: {} 基数:{}'.format(len(neg), len(pos), len(part),
198 | len(landmark), base_num)
199 | print(s1)
200 | # 打乱写入的数据顺序,并这里这里设置比例,设置size参数的比例就能得到数据集比例, 论文比例为:3:1:1:2
201 | neg_keep = npr.choice(len(neg), size=base_num * 3, replace=base_num * 3 > len(neg))
202 | part_keep = npr.choice(len(part), size=base_num, replace=base_num > len(part))
203 | pos_keep = npr.choice(len(pos), size=base_num, replace=base_num > len(pos))
204 | landmark_keep = npr.choice(len(landmark), size=base_num * 2, replace=base_num * 2 > len(landmark))
205 |
206 | s2 = '整理后的数据:neg数量:{} pos数量:{} part数量:{} landmark数量:{}'.format(len(neg_keep), len(pos_keep),
207 | len(part_keep), len(landmark_keep))
208 | print(s2)
209 | with open(os.path.join(data_dir, 'temp.txt'), 'a', encoding='utf-8') as f_temp:
210 | f_temp.write('%s\n' % s1)
211 | f_temp.write('%s\n' % s2)
212 | f_temp.flush()
213 |
214 | # 开始写入列表数据
215 | for i in pos_keep:
216 | f.write(pos[i].replace('\\', '/'))
217 | for i in neg_keep:
218 | f.write(neg[i].replace('\\', '/'))
219 | for i in part_keep:
220 | f.write(part[i].replace('\\', '/'))
221 | for i in landmark_keep:
222 | f.write(landmark[i].replace('\\', '/'))
223 |
224 |
225 | def crop_landmark_image(data_dir, data_list, size, argument=True):
226 | """裁剪并保存带有人脸关键点的图片
227 | 参数:
228 | data_dir:数据目录
229 | size:裁剪图片的大小
230 | argument:是否进行数据增强
231 | """
232 | npr = np.random
233 | image_id = 0
234 |
235 | # 数据输出路径
236 | output = os.path.join(data_dir, str(size))
237 | if not os.path.exists(output):
238 | os.makedirs(output)
239 |
240 | # 图片处理后输出路径
241 | dstdir = os.path.join(output, 'landmark')
242 | if not os.path.exists(dstdir):
243 | os.mkdir(dstdir)
244 |
245 | # 记录label的txt
246 | f = open(os.path.join(output, 'landmark.txt'), 'w')
247 | idx = 0
248 | for (imgPath, box, landmarkGt) in tqdm(data_list):
249 | # 存储人脸图片和关键点
250 | F_imgs = []
251 | F_landmarks = []
252 | img = cv2.imread(imgPath)
253 |
254 | img_h, img_w, img_c = img.shape
255 | # 转换成numpy值
256 | gt_box = np.array([box.left, box.top, box.right, box.bottom])
257 | # 裁剪人脸图片
258 | f_face = img[box.top:box.bottom + 1, box.left:box.right + 1]
259 | try:
260 | # resize成网络输入大小
261 | f_face = cv2.resize(f_face, (size, size))
262 | except Exception as e:
263 | print(e)
264 | print('resize成网络输入大小,跳过')
265 | continue
266 |
267 | # 创建一个空的关键点变量
268 | landmark = np.zeros((5, 2))
269 | for index, one in enumerate(landmarkGt):
270 | # 关键点相对于左上坐标偏移量并归一化,这个就保证了关键点都处于box内
271 | rv = ((one[0] - gt_box[0]) / (gt_box[2] - gt_box[0]), (one[1] - gt_box[1]) / (gt_box[3] - gt_box[1]))
272 | landmark[index] = rv
273 |
274 | F_imgs.append(f_face)
275 | F_landmarks.append(landmark.reshape(10))
276 |
277 | # 做数据增强处理
278 | if argument:
279 | landmark = np.zeros((5, 2))
280 | # 对图像变换
281 | idx = idx + 1
282 | x1, y1, x2, y2 = gt_box
283 | gt_w = x2 - x1 + 1
284 | gt_h = y2 - y1 + 1
285 | # 除去过小图像
286 | if max(gt_w, gt_h) < 40 or x1 < 0 or y1 < 0:
287 | continue
288 | for i in range(10):
289 | # 随机裁剪图像大小
290 | box_size = npr.randint(int(min(gt_w, gt_h) * 0.8), np.ceil(1.25 * max(gt_w, gt_h)))
291 | # 随机左上坐标偏移量
292 | try:
293 | delta_x = npr.randint(-gt_w * 0.2, gt_w * 0.2)
294 | delta_y = npr.randint(-gt_h * 0.2, gt_h * 0.2)
295 | except Exception as e:
296 | print(e)
297 | print('随机裁剪图像大小,跳过')
298 | continue
299 | # 计算左上坐标
300 | nx1 = int(max(x1 + gt_w / 2 - box_size / 2 + delta_x, 0))
301 | ny1 = int(max(y1 + gt_h / 2 - box_size / 2 + delta_y, 0))
302 | nx2 = nx1 + box_size
303 | ny2 = ny1 + box_size
304 | # 除去超过边界的
305 | if nx2 > img_w or ny2 > img_h:
306 | continue
307 | # 裁剪边框,图片
308 | crop_box = np.array([nx1, ny1, nx2, ny2])
309 | cropped_im = img[ny1:ny2 + 1, nx1:nx2 + 1, :]
310 | resized_im = cv2.resize(cropped_im, (size, size))
311 | # 计算iou值
312 | iou = IOU(crop_box, np.expand_dims(gt_box, 0))
313 |
314 | # 只保留pos图像
315 | if iou > 0.65:
316 | F_imgs.append(resized_im)
317 | # 关键点相对偏移
318 | for index, one in enumerate(landmarkGt):
319 | rv = ((one[0] - nx1) / box_size, (one[1] - ny1) / box_size)
320 | landmark[index] = rv
321 | F_landmarks.append(landmark.reshape(10))
322 | landmark = np.zeros((5, 2))
323 | landmark_ = F_landmarks[-1].reshape(-1, 2)
324 | box = BBox([nx1, ny1, nx2, ny2])
325 | # 镜像
326 | if random.choice([0, 1]) > 0:
327 | face_flipped, landmark_flipped = flip(resized_im, landmark_)
328 | face_flipped = cv2.resize(face_flipped, (size, size))
329 | F_imgs.append(face_flipped)
330 | F_landmarks.append(landmark_flipped.reshape(10))
331 | # 逆时针翻转
332 | if random.choice([0, 1]) > 0:
333 | face_rotated_by_alpha, landmark_rorated = rotate(img, box, box.reprojectLandmark(landmark_), 5)
334 | # 关键点偏移
335 | landmark_rorated = box.projectLandmark(landmark_rorated)
336 | face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
337 | F_imgs.append(face_rotated_by_alpha)
338 | F_landmarks.append(landmark_rorated.reshape(10))
339 |
340 | # 左右翻转
341 | face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rorated)
342 | face_flipped = cv2.resize(face_flipped, (size, size))
343 | F_imgs.append(face_flipped)
344 | F_landmarks.append(landmark_flipped.reshape(10))
345 | # 顺时针翻转
346 | if random.choice([0, 1]) > 0:
347 | face_rotated_by_alpha, landmark_rorated = rotate(img, box, box.reprojectLandmark(landmark_), -5)
348 | # 关键点偏移
349 | landmark_rorated = box.projectLandmark(landmark_rorated)
350 | face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
351 | F_imgs.append(face_rotated_by_alpha)
352 | F_landmarks.append(landmark_rorated.reshape(10))
353 |
354 | # 左右翻转
355 | face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rorated)
356 | face_flipped = cv2.resize(face_flipped, (size, size))
357 | F_imgs.append(face_flipped)
358 | F_landmarks.append(landmark_flipped.reshape(10))
359 | F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
360 |
361 | # 开始保存裁剪的图片和标注信息
362 | for i in range(len(F_imgs)):
363 | # 剔除数据偏移量在[0,1]之间
364 | if np.sum(np.where(F_landmarks[i] <= 0, 1, 0)) > 0:
365 | continue
366 | if np.sum(np.where(F_landmarks[i] >= 1, 1, 0)) > 0:
367 | continue
368 | # 保存裁剪带有关键点的图片
369 | cv2.imwrite(os.path.join(dstdir, '%d.jpg' % (image_id)), F_imgs[i])
370 | # 把图片路径和label,还有关键点保存到数据列表上
371 | landmarks = list(map(str, list(F_landmarks[i])))
372 | f.write(os.path.join(dstdir, '%d.jpg' % (image_id)) + ' -2 ' + ' '.join(landmarks) + '\n')
373 | image_id += 1
374 | f.close()
375 |
376 |
377 | # 镜像处理
378 | def flip(face, landmark):
379 | face_flipped_by_x = cv2.flip(face, 1)
380 | landmark_ = np.asarray([(1 - x, y) for (x, y) in landmark])
381 | landmark_[[0, 1]] = landmark_[[1, 0]]
382 | landmark_[[3, 4]] = landmark_[[4, 3]]
383 | return face_flipped_by_x, landmark_
384 |
385 |
386 | # 旋转处理
387 | def rotate(img, box, landmark, alpha):
388 | center = ((box.left + box.right) / 2, (box.top + box.bottom) / 2)
389 | rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)
390 | img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, (img.shape[1], img.shape[0]))
391 | landmark_ = np.asarray([(rot_mat[0][0] * x + rot_mat[0][1] * y + rot_mat[0][2],
392 | rot_mat[1][0] * x + rot_mat[1][1] * y + rot_mat[1][2]) for (x, y) in landmark])
393 | face = img_rotated_by_alpha[box.top:box.bottom + 1, box.left:box.right + 1]
394 | return face, landmark_
395 |
396 |
397 | def convert_to_square(box):
398 | """将box转换成更大的正方形
399 | 参数:
400 | box:预测的box,[n,5]
401 | 返回值:
402 | 调整后的正方形box,[n,5]
403 | """
404 | square_box = box.copy()
405 | h = box[:, 3] - box[:, 1] + 1
406 | w = box[:, 2] - box[:, 0] + 1
407 | # 找寻正方形最大边长
408 | max_side = np.maximum(w, h)
409 |
410 | square_box[:, 0] = box[:, 0] + w * 0.5 - max_side * 0.5
411 | square_box[:, 1] = box[:, 1] + h * 0.5 - max_side * 0.5
412 | square_box[:, 2] = square_box[:, 0] + max_side - 1
413 | square_box[:, 3] = square_box[:, 1] + max_side - 1
414 | return square_box
415 |
416 |
417 | def read_annotation(data_path, label_path):
418 | """
419 | 从原标注数据中获取图片路径和标注box
420 | :param data_path: 数据的根目录
421 | :param label_path: 标注数据的文件
422 | :return:
423 | """
424 | data = dict()
425 | images = []
426 | bboxes = []
427 | with open(label_path, 'r') as f:
428 | lines = f.readlines()
429 | for line in lines:
430 | labels = line.strip().split(' ')
431 | # 图像地址
432 | imagepath = labels[0]
433 | # 如果有一行为空,就停止读取
434 | if not imagepath:
435 | break
436 | # 获取图片的路径
437 | imagepath = data_path + 'WIDER_train/images/' + imagepath + '.jpg'
438 | images.append(imagepath)
439 | # 根据人脸的数目开始读取所有box
440 | one_image_bboxes = []
441 | for i in range(0, len(labels) - 1, 4):
442 | xmin = float(labels[1 + i])
443 | ymin = float(labels[2 + i])
444 | xmax = float(labels[3 + i])
445 | ymax = float(labels[4 + i])
446 |
447 | one_image_bboxes.append([xmin, ymin, xmax, ymax])
448 |
449 | bboxes.append(one_image_bboxes)
450 |
451 | data['images'] = images
452 | data['bboxes'] = bboxes
453 | return data
454 |
455 |
456 | def pad(bboxes, w, h):
457 | """将超出图像的box进行处理
458 | 参数:
459 | bboxes:人脸框
460 | w,h:图像长宽
461 | 返回值:
462 | dy, dx : 为调整后的box的左上角坐标相对于原box左上角的坐标
463 | edy, edx : n为调整后的box右下角相对原box左上角的相对坐标
464 | y, x : 调整后的box在原图上左上角的坐标
465 | ex, ex : 调整后的box在原图上右下角的坐标
466 | tmph, tmpw: 原始box的长宽
467 | """
468 | # box的长宽
469 | tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
470 | num_box = bboxes.shape[0]
471 |
472 | dx, dy = np.zeros((num_box,)), np.zeros((num_box,))
473 | edx, edy = tmpw.copy() - 1, tmph.copy() - 1
474 | # box左上右下的坐标
475 | x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
476 | # 找到超出右下边界的box并将ex,ey归为图像的w,h
477 | # edx,edy为调整后的box右下角相对原box左上角的相对坐标
478 | tmp_index = np.where(ex > w - 1)
479 | edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
480 | ex[tmp_index] = w - 1
481 |
482 | tmp_index = np.where(ey > h - 1)
483 | edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
484 | ey[tmp_index] = h - 1
485 | # 找到超出左上角的box并将x,y归为0
486 | # dx,dy为调整后的box的左上角坐标相对于原box左上角的坐标
487 | tmp_index = np.where(x < 0)
488 | dx[tmp_index] = 0 - x[tmp_index]
489 | x[tmp_index] = 0
490 |
491 | tmp_index = np.where(y < 0)
492 | dy[tmp_index] = 0 - y[tmp_index]
493 | y[tmp_index] = 0
494 |
495 | return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
496 | return_list = [item.astype(np.int32) for item in return_list]
497 |
498 | return return_list
499 |
500 |
501 | def calibrate_box(bbox, reg):
502 | """校准box
503 | 参数:
504 | bbox:pnet生成的box
505 |
506 | reg:rnet生成的box偏移值
507 | 返回值:
508 | 调整后的box是针对原图的绝对坐标
509 | """
510 |
511 | bbox_c = bbox.copy()
512 | w = bbox[:, 2] - bbox[:, 0] + 1
513 | w = np.expand_dims(w, 1)
514 | h = bbox[:, 3] - bbox[:, 1] + 1
515 | h = np.expand_dims(h, 1)
516 | reg_m = np.hstack([w, h, w, h])
517 | aug = reg_m * reg
518 | bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug
519 | return bbox_c
520 |
521 |
522 | def py_nms(dets, thresh, mode="Union"):
523 | """
524 | 贪婪策略选择人脸框
525 | keep boxes overlap <= thresh
526 | rule out overlap > thresh
527 | :param dets: [[x1, y1, x2, y2 score]]
528 | :param thresh: retain overlap <= thresh
529 | :return: indexes to keep
530 | """
531 | x1 = dets[:, 0]
532 | y1 = dets[:, 1]
533 | x2 = dets[:, 2]
534 | y2 = dets[:, 3]
535 | scores = dets[:, 4]
536 |
537 | areas = (x2 - x1 + 1) * (y2 - y1 + 1)
538 | # 将概率值从大到小排列
539 | order = scores.argsort()[::-1]
540 |
541 | keep = []
542 | while order.size > 0:
543 | i = order[0]
544 | keep.append(i)
545 | xx1 = np.maximum(x1[i], x1[order[1:]])
546 | yy1 = np.maximum(y1[i], y1[order[1:]])
547 | xx2 = np.minimum(x2[i], x2[order[1:]])
548 | yy2 = np.minimum(y2[i], y2[order[1:]])
549 |
550 | w = np.maximum(0.0, xx2 - xx1 + 1)
551 | h = np.maximum(0.0, yy2 - yy1 + 1)
552 | inter = w * h
553 | if mode == "Union":
554 | ovr = inter / (areas[i] + areas[order[1:]] - inter)
555 | elif mode == "Minimum":
556 | ovr = inter / np.minimum(areas[i], areas[order[1:]])
557 | # 保留小于阈值的下标,因为order[0]拿出来做比较了,所以inds+1是原来对应的下标
558 | inds = np.where(ovr <= thresh)[0]
559 | order = order[inds + 1]
560 |
561 | return keep
562 |
563 |
564 | def generate_bbox(cls_map, reg, scale, threshold):
565 | """
566 | 得到对应原图的box坐标,分类分数,box偏移量
567 | """
568 | # pnet大致将图像size缩小2倍
569 | stride = 2
570 |
571 | cellsize = 12
572 |
573 | # 将置信度高的留下
574 | t_index = np.where(cls_map > threshold)
575 |
576 | # 没有人脸
577 | if t_index[0].size == 0:
578 | return np.array([])
579 | # 偏移量
580 | dx1, dy1, dx2, dy2 = [reg[i, t_index[0], t_index[1]] for i in range(4)]
581 |
582 | reg = np.array([dx1, dy1, dx2, dy2])
583 | score = cls_map[t_index[0], t_index[1]]
584 | # 对应原图的box坐标,分类分数,box偏移量
585 | boundingbox = np.vstack([np.round((stride * t_index[1]) / scale),
586 | np.round((stride * t_index[0]) / scale),
587 | np.round((stride * t_index[1] + cellsize) / scale),
588 | np.round((stride * t_index[0] + cellsize) / scale),
589 | score,
590 | reg])
591 | # shape[n,9]
592 | return boundingbox.T
593 |
594 |
595 | # 合并图像后删除原来的文件
596 | def delete_old_img(old_image_folder, image_size):
597 | shutil.rmtree(os.path.join(old_image_folder, str(image_size), 'positive'), ignore_errors=True)
598 | shutil.rmtree(os.path.join(old_image_folder, str(image_size), 'negative'), ignore_errors=True)
599 | shutil.rmtree(os.path.join(old_image_folder, str(image_size), 'part'), ignore_errors=True)
600 | shutil.rmtree(os.path.join(old_image_folder, str(image_size), 'landmark'), ignore_errors=True)
601 |
602 | # 删除原来的数据列表文件
603 | os.remove(os.path.join(old_image_folder, str(image_size), 'positive.txt'))
604 | os.remove(os.path.join(old_image_folder, str(image_size), 'negative.txt'))
605 | os.remove(os.path.join(old_image_folder, str(image_size), 'part.txt'))
606 | os.remove(os.path.join(old_image_folder, str(image_size), 'landmark.txt'))
607 |
608 |
609 | def save_hard_example(data_path, save_size):
610 | """
611 | 根据预测的结果裁剪下一个网络所需要训练的图片的标注数据
612 | :param data_path: 数据的根目录
613 | :param save_size: 裁剪图片的大小
614 | :return:
615 | """
616 | # 获取原数据集中的标注数据
617 | filename = os.path.join(data_path, 'wider_face_train.txt')
618 | data = read_annotation(data_path, filename)
619 |
620 | # 获取原数据集中的图像路径和标注信息
621 | im_idx_list = data['images']
622 | gt_boxes_list = data['bboxes']
623 |
624 | # 保存裁剪图片数据文件夹
625 | pos_save_dir = os.path.join(data_path, '%d/positive' % save_size)
626 | part_save_dir = os.path.join(data_path, '%d/part' % save_size)
627 | neg_save_dir = os.path.join(data_path, '%d/negative' % save_size)
628 |
629 | # 创建文件夹
630 | if not os.path.exists(data_path):
631 | os.makedirs(data_path)
632 | if not os.path.exists(pos_save_dir):
633 | os.mkdir(pos_save_dir)
634 | if not os.path.exists(part_save_dir):
635 | os.mkdir(part_save_dir)
636 | if not os.path.exists(neg_save_dir):
637 | os.mkdir(neg_save_dir)
638 |
639 | # 保存图片数据的列表文件
640 | neg_file = open(os.path.join(data_path, '%d/negative.txt' % save_size), 'w')
641 | pos_file = open(os.path.join(data_path, '%d/positive.txt' % save_size), 'w')
642 | part_file = open(os.path.join(data_path, '%d/part.txt' % save_size), 'w')
643 |
644 | # 读取预测结果
645 | det_boxes = pickle.load(open(os.path.join(data_path, '%d/detections.pkl' % save_size), 'rb'))
646 |
647 | # 保证预测结果和本地数据数量是一样的
648 | assert len(det_boxes) == len(im_idx_list), "预测结果和真实数据数量不一致"
649 |
650 | # 图片的命名
651 | n_idx = 0
652 | p_idx = 0
653 | d_idx = 0
654 |
655 | # 开始裁剪下一个网络的训练图片
656 | pbar = tqdm(total=len(im_idx_list))
657 | for im_idx, dets, gts in zip(im_idx_list, det_boxes, gt_boxes_list):
658 | pbar.update(1)
659 | # 把原标注数据集以4个数据作为一个box进行变形
660 | gts = np.array(gts, dtype=np.float32).reshape(-1, 4)
661 |
662 | # 如果没有预测到数据就调成本次循环
663 | if dets.shape[0] == 0:
664 | continue
665 |
666 | # 读取原图像
667 | img = cv2.imread(im_idx)
668 |
669 | # 把预测数据转换成正方形
670 | dets = convert_to_square(dets)
671 | dets[:, 0:4] = np.round(dets[:, 0:4])
672 |
673 | neg_num = 0
674 | for box in dets:
675 | # 获取预测结果中单张图片中的单个人脸坐标,和人脸的宽高
676 | x_left, y_top, x_right, y_bottom, _ = box.astype(int)
677 | width = x_right - x_left + 1
678 | height = y_bottom - y_top + 1
679 |
680 | # 除去过小的
681 | if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[1] - 1 or y_bottom > img.shape[0] - 1:
682 | continue
683 |
684 | # 计算iou值
685 | Iou = IOU(box, gts)
686 |
687 | # 裁剪并统一大小图片
688 | cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]
689 | resized_im = cv2.resize(cropped_im, (save_size, save_size), interpolation=cv2.INTER_LINEAR)
690 |
691 | # 划分种类
692 | if np.max(Iou) < 0.3 and neg_num < 60:
693 | # 保存negative图片,同时也避免产生太多的negative图片
694 | save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
695 | # 指定label为0
696 | neg_file.write(save_file + ' 0\n')
697 | cv2.imwrite(save_file, resized_im)
698 | n_idx += 1
699 | neg_num += 1
700 | else:
701 | # 或者最大iou值的真实box坐标数据
702 | idx = np.argmax(Iou)
703 | assigned_gt = gts[idx]
704 | x1, y1, x2, y2 = assigned_gt
705 |
706 | # 计算偏移量
707 | offset_x1 = (x1 - x_left) / float(width)
708 | offset_y1 = (y1 - y_top) / float(height)
709 | offset_x2 = (x2 - x_right) / float(width)
710 | offset_y2 = (y2 - y_bottom) / float(height)
711 |
712 | # pos和part
713 | if np.max(Iou) >= 0.65:
714 | # 保存positive图片,同时也避免产生太多的positive图片
715 | save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx)
716 | # 指定label为1
717 | pos_file.write(
718 | save_file + ' 1 %.2f %.2f %.2f %.2f\n' % (offset_x1, offset_y1, offset_x2, offset_y2))
719 | cv2.imwrite(save_file, resized_im)
720 | p_idx += 1
721 |
722 | elif np.max(Iou) >= 0.4:
723 | # 保存part图片,同时也避免产生太多的part图片
724 | save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx)
725 | # 指定label为-1
726 | part_file.write(
727 | save_file + ' -1 %.2f %.2f %.2f %.2f\n' % (offset_x1, offset_y1, offset_x2, offset_y2))
728 | cv2.imwrite(save_file, resized_im)
729 | d_idx += 1
730 | pbar.close()
731 | neg_file.close()
732 | part_file.close()
733 | pos_file.close()
734 |
--------------------------------------------------------------------------------
/README.assets/image-20230724183025706.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/README.assets/image-20230724183025706.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | # 学生课堂表现感知系统
3 |
4 | ## 模块概述
5 | 本系统旨在通过多个模块综合评估学生在课堂上的表现。各模块分别负责不同方面的检测与评估:
6 |
7 | - ✅**模块一**:多目标人脸检测
8 | - ✅**模块二**:课堂表情识别
9 | - ✅**模块三**:疲劳状态检测
10 | - **模块四**:头部姿态估计
11 | - **模块五**:身体倾斜度检测
12 | - **模块六**:使用模糊综合评价算法,结合模块二至五的数据计算学生的注意力水平
13 |
14 | ### 系统框架示意图
15 | 
16 |
17 |
18 | ## 技术栈
19 | - FastAPI
20 | - Uvicorn
21 |
22 | ## 快速启动指南
23 |
24 | ### 安装依赖
25 | 首先,您需要安装所需的依赖包。可以通过以下命令安装:
26 |
27 | ```bash
28 | pip install fastapi
29 | pip install uvicorn[standard]
30 | pip install python-multipart
31 | ```
32 |
33 | ### 启动服务
34 | 使用以下命令启动服务:
35 |
36 | ```bash
37 | uvicorn web:app --reload
38 | ```
39 |
40 | ### 后端服务参考
41 | [点击此处访问GoBackendOfPcsSystem GitHub仓库](https://github.com/Pokezoom/GoBackendOfPcsSystem)
42 |
43 |
44 | ---
45 |
--------------------------------------------------------------------------------
/__pycache__/web.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/__pycache__/web.cpython-311.pyc
--------------------------------------------------------------------------------
/block_1_mutil_face/mutil_face.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from mtcnn import MTCNN
3 | import os
4 |
5 | # 读视频
6 | cap = cv2.VideoCapture('6_1690187711.mp4')
7 | detector = MTCNN()
8 |
9 | # 存图片的地方
10 | if not os.path.exists('faces'):
11 | os.makedirs('faces')
12 |
13 | i = 0
14 | face_count = 0
15 | while(cap.isOpened()):
16 | ret, frame = cap.read()
17 |
18 | if ret == False:
19 | break
20 |
21 | # 每隔10帧(即0.3秒,假设视频是30fps)检测一次人脸
22 | if i % 10 == 0:
23 | # 进行人脸检测
24 | result = detector.detect_faces(frame)
25 | for person in result:
26 | bounding_box = person['box']
27 | keypoints = person['keypoints']
28 |
29 | cv2.rectangle(frame,
30 | (bounding_box[0], bounding_box[1]),
31 | (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
32 | (0,155,255),
33 | 2)
34 |
35 | # 保存人脸图片
36 | roi_gray = frame[bounding_box[1]:bounding_box[1] + bounding_box[3], bounding_box[0]:bounding_box[0] + bounding_box[2]]
37 | cv2.imwrite('faces/face_' + str(face_count) + '.jpg', roi_gray)
38 | face_count += 1
39 |
40 | # 显示结果
41 | cv2.imshow('frame',frame)
42 |
43 | i += 1
44 |
45 | cap.release()
46 | cv2.destroyAllWindows()
47 |
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/block_2_FaceDetectionandExpressionRecognition/.DS_Store
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/block_2_FaceDetectionandExpressionRecognition/README.md
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/.DS_Store
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/README.md:
--------------------------------------------------------------------------------
1 | # Facial Expression Recognition
2 |
3 | ## Setup
4 |
5 | > **Virtual Env**
6 | >
7 | > Maybe you want to create a virtual environment using [miniconda](https://docs.conda.io/en/latest/miniconda.html) before run the `test.py`.
8 | >
9 | > **Create Env with conda**
10 | >
11 | > ```conda create -n facial-expression-recognition python=3.8```
12 | >
13 | > ```conda activate facial-expression-recognition```
14 |
15 | ```bash
16 | pip install -r requirements.txt
17 | ```
18 |
19 | ## Run
20 | ```bash
21 | python3 test.py
22 | ```
23 |
24 | ## Categorical model test
25 |
26 | | Name | Link |
27 | |----------|:-------------|
28 | | Test 1 | [Link #1](https://www.youtube.com/watch?v=t6C-5M997eM) |
29 | | Test the Voice 1 | [Link the Voice #1](https://www.youtube.com/watch?v=zbtljjdheJ4) |
30 | | Test the Voice 2 | [Link the Voice #2](https://www.youtube.com/watch?v=3M9dKjkc3kA) |
31 | | Test the Voice 3 | [Link the Voice #3](https://www.youtube.com/watch?v=HypKzNChOkc) |
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/face_test.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import tensorflow as tf
5 | from collections import Counter
6 | # 加载模型
7 | model = tf.keras.models.load_model('network-5Labels.h5')
8 | labels = ['Surprise', 'Neutral', 'Anger', 'Happy', 'Sad']
9 |
10 | # 用来存储所有预测结果的字典
11 | predictions_dict = {}
12 |
13 | # 构建桌面路径
14 | base_path = "/users/pengkezhong/Desktop/face/"
15 |
16 | # 确保路径是正确的
17 | if not os.path.exists(base_path):
18 | raise Exception(f"The path {base_path} does not exist.")
19 |
20 | # 遍历 base_path 下的所有文件夹
21 | for folder in sorted(os.listdir(base_path)):
22 | folder_path = os.path.join(base_path, folder)
23 | if os.path.isdir(folder_path):
24 | predictions_dict[folder] = []
25 |
26 | # 遍历当前文件夹内的所有图片
27 | for image_name in os.listdir(folder_path):
28 | image_path = os.path.join(folder_path, image_name)
29 | if os.path.isfile(image_path) and image_name.lower().endswith(('.png', '.jpg', '.jpeg')):
30 | img = cv2.imread(image_path)
31 | if img is not None:
32 | # 由于我们知道图片中只有单个人脸,我们可以直接缩放图片
33 | face = cv2.resize(img, (48, 48))
34 | face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
35 | face = face / 255.0
36 | prediction = model.predict(np.array([face.reshape((48, 48, 1))])).argmax()
37 | state = labels[prediction]
38 | predictions_dict[folder].append((image_name, state))
39 | else:
40 | print(f"Failed to read image {image_name}.")
41 | else:
42 | print(f"Skipped file {image_name}, not an image.")
43 |
44 | # 输出结果
45 | # for second, results in predictions_dict.items():
46 | # print(f"Results for {second}: {results}")
47 |
48 | # 用于存储每个文件夹中每个表情的计数的字典
49 | expressions_count = {}
50 |
51 | for folder, results in predictions_dict.items():
52 | # 使用Counter来计算每个表情的数量
53 | count = Counter([expression for _, expression in results])
54 | expressions_count[folder] = dict(count)
55 |
56 | # 打印每个文件夹的表情计数结果
57 | for folder, counts in expressions_count.items():
58 | print(f"Counts for {folder}: {counts}")
59 |
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/network-5Labels.h5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Pokezoom/Perception-System-of-Students-Classroom-Performance/1d1ec0fffe093267b24f70e3d2659087bfd78a34/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/network-5Labels.h5
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.19.4
2 | opencv-contrib-python==4.5.2.52
3 | opencv-python==4.4.0.46
4 | opencv-python-headless==4.5.1.48
5 | tensorflow==2.6.0
6 | tensorflow-estimator==2.6.0
7 | keras==2.6.0
8 | Keras-Preprocessing==1.1.2
--------------------------------------------------------------------------------
/block_2_FaceDetectionandExpressionRecognition/facial-expression-recognition/test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import tensorflow as tf
4 |
5 | face_detection = cv2.CascadeClassifier('haar_cascade_face_detection.xml')
6 |
7 | camera = cv2.VideoCapture(0)
8 | camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
9 | camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 768)
10 | settings = {
11 | 'scaleFactor': 1.3,
12 | 'minNeighbors': 5,
13 | 'minSize': (50, 50)
14 | }
15 |
16 | labels = ['Surprise', 'Neutral', 'Anger', 'Happy', 'Sad']
17 |
18 | model = tf.keras.models.load_model('network-5Labels.h5')
19 |
20 | while True:
21 | ret, img = camera.read()
22 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
23 | detected = face_detection.detectMultiScale(gray, **settings)
24 |
25 | for x, y, w, h in detected:
26 | cv2.rectangle(img, (x, y), (x+w, y+h), (245, 135, 66), 2)
27 | cv2.rectangle(img, (x, y), (x+w//3, y+20), (245, 135, 66), -1)
28 | face = gray[y+5:y+h-5, x+20:x+w-20]
29 | face = cv2.resize(face, (48,48))
30 | face = face/255.0
31 |
32 | predictions = model.predict(np.array([face.reshape((48,48,1))])).argmax()
33 | state = labels[predictions]
34 | font = cv2.FONT_HERSHEY_SIMPLEX
35 | cv2.putText(img,state,(x+10,y+15), font, 0.5, (255,255,255), 2, cv2.LINE_AA)
36 |
37 | cv2.imshow('Facial Expression', img)
38 |
39 | if cv2.waitKey(5) != -1:
40 | break
41 |
42 | camera.release()
43 | cv2.destroyAllWindows()
44 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/dbnavigator.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 |
387 |
388 |
389 |
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 |
402 |
403 |
404 |
405 |
406 |
407 |
408 |
409 |
410 |
411 |
412 |
413 |
414 |
415 |
416 |
417 |
418 |
419 |
420 |
421 |
422 |
423 |
424 |
425 |
426 |
427 |
428 |
429 |
430 |
431 |
432 |
433 |
434 |
435 |
436 |
437 |
438 |
439 |
440 |
441 |
442 |
443 |
444 |
445 |
446 |
447 |
448 |
449 |
450 |
451 |
452 |
453 |
454 |
455 |
456 |
457 |
458 |
459 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/facevideo.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/block_3_fatigue/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/web.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import numpy as np
4 | import tensorflow as tf
5 | from collections import Counter
6 | from fastapi import FastAPI
7 |
8 | # 创建 FastAPI 实例
9 | app = FastAPI()
10 |
11 | # 加载模型
12 | model = tf.keras.models.load_model('network-5Labels.h5')
13 | labels = ['Surprise', 'Neutral', 'Anger', 'Happy', 'Sad']
14 |
15 | # 定义处理表情预测的函数
16 | def predict_emotions():
17 | # 用来存储所有预测结果的字典
18 | predictions_dict = {}
19 |
20 | # 构建桌面路径
21 | base_path = "/users/pengkezhong/Desktop/face/"
22 |
23 | # 确保路径是正确的
24 | if not os.path.exists(base_path):
25 | raise Exception(f"The path {base_path} does not exist.")
26 |
27 | # 遍历 base_path 下的所有文件夹
28 | for folder in sorted(os.listdir(base_path)):
29 | folder_path = os.path.join(base_path, folder)
30 | if os.path.isdir(folder_path):
31 | predictions_dict[folder] = []
32 |
33 | # 遍历当前文件夹内的所有图片
34 | for image_name in os.listdir(folder_path):
35 | image_path = os.path.join(folder_path, image_name)
36 | if os.path.isfile(image_path) and image_name.lower().endswith(('.png', '.jpg', '.jpeg')):
37 | img = cv2.imread(image_path)
38 | if img is not None:
39 | # 由于我们知道图片中只有单个人脸,我们可以直接缩放图片
40 | face = cv2.resize(img, (48, 48))
41 | face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
42 | face = face / 255.0
43 | prediction = model.predict(np.array([face.reshape((48, 48, 1))])).argmax()
44 | state = labels[prediction]
45 | predictions_dict[folder].append((image_name, state))
46 | else:
47 | print(f"Failed to read image {image_name}.")
48 | else:
49 | print(f"Skipped file {image_name}, not an image.")
50 |
51 | # 用于存储每个文件夹中每个表情的计数的字典
52 | expressions_count = {}
53 |
54 | for folder, results in predictions_dict.items():
55 | # 使用 Counter 来计算每个表情的数量
56 | count = Counter([expression for _, expression in results])
57 | expressions_count[folder] = dict(count)
58 |
59 | return expressions_count # 返回表情计数结果
60 |
61 | # 定义接口路由为 "/predict_emotions",并使用 GET 方法
62 | @app.get("/predict_emotions")
63 | def get_emotions():
64 | return predict_emotions() # 调用预测函数并返回结果
65 |
66 |
67 | # 启动 FastAPI 服务器
68 | if __name__ == "__main__":
69 | import uvicorn
70 | uvicorn.run(app, host="0.0.0.0", port=8000)
71 |
--------------------------------------------------------------------------------