├── LICENSE
├── README.md
├── datasets
└── coco.py
├── func.py
├── get_train.py
├── image
├── 1.jpg
├── 11.jpg
├── 12.jpg
├── 2.jpg
├── 3.jpg
└── 4.jpg
├── main.py
├── models
└── with_mobilenet.py
├── modules
├── conv.py
├── get_parameters.py
├── keypoints.py
├── load_state.py
├── loss.py
├── one_euro_filter.py
└── pose.py
├── openpose+RandomForest.ipynb
├── openpose_train_data.csv
├── requirement.txt
├── sound
├── eyes.mp3
└── yawn.mp3
└── val.py
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 以太とTerra
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## 基于OpenPose与随机森林算法的驾驶员状态检测系统
2 |
3 | >效果展示
4 |
5 |
6 |
7 | main.py 为程序执行入口
8 |
9 | get_train.py 为OpenPose提取姿态特征文件,数据集来源为Kaggle网站上State Farm Distracted Driver Detection
10 | 地址:https://www.kaggle.com/c/state-farm-distracted-driver-detection
11 | 输出为openpose_train_data.csv文件
12 |
13 | func.py 为该程序的函数支持库,具有中文详细注释
14 |
15 | val.py 、modules与datasets为Light-OpenPose支持文件
16 |
17 | model中需放置已训练好的OpenPose、dlib与随机森林的3个模型文件
18 | 模型下载:
19 |
20 | 链接:https://pan.baidu.com/s/1qMid2zZWTuaPjE2nIkLWkw?pwd=0vzs
21 | 提取码:0vzs
22 |
23 | video文件夹中放置侧置摄像头视频与前置摄像头视频
24 |
25 | sound文件夹中为疲劳检测语音提示音
26 |
27 | image文件中为程序运行效果
28 |
29 | 需要安装的库不少,如果有缺少的,建议边看报错的提示边安装。
30 |
--------------------------------------------------------------------------------
/datasets/coco.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import json
3 | import math
4 | import os
5 | import pickle
6 |
7 | import cv2
8 | import numpy as np
9 | import pycocotools
10 |
11 | from torch.utils.data.dataset import Dataset
12 |
13 | BODY_PARTS_KPT_IDS = [[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16],
14 | [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17]]
15 |
16 |
17 | def get_mask(segmentations, mask):
18 | for segmentation in segmentations:
19 | rle = pycocotools.mask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
20 | mask[pycocotools.mask.decode(rle) > 0.5] = 0
21 | return mask
22 |
23 |
24 | class CocoTrainDataset(Dataset):
25 | def __init__(self, labels, images_folder, stride, sigma, paf_thickness, transform=None):
26 | super().__init__()
27 | self._images_folder = images_folder
28 | self._stride = stride
29 | self._sigma = sigma
30 | self._paf_thickness = paf_thickness
31 | self._transform = transform
32 | with open(labels, 'rb') as f:
33 | self._labels = pickle.load(f)
34 |
35 | def __getitem__(self, idx):
36 | label = copy.deepcopy(self._labels[idx]) # label modified in transform
37 | image = cv2.imread(os.path.join(self._images_folder, label['img_paths']), cv2.IMREAD_COLOR)
38 | mask = np.ones(shape=(label['img_height'], label['img_width']), dtype=np.float32)
39 | mask = get_mask(label['segmentations'], mask)
40 | sample = {
41 | 'label': label,
42 | 'image': image,
43 | 'mask': mask
44 | }
45 | if self._transform:
46 | sample = self._transform(sample)
47 |
48 | mask = cv2.resize(sample['mask'], dsize=None, fx=1/self._stride, fy=1/self._stride, interpolation=cv2.INTER_AREA)
49 | keypoint_maps = self._generate_keypoint_maps(sample)
50 | sample['keypoint_maps'] = keypoint_maps
51 | keypoint_mask = np.zeros(shape=keypoint_maps.shape, dtype=np.float32)
52 | for idx in range(keypoint_mask.shape[0]):
53 | keypoint_mask[idx] = mask
54 | sample['keypoint_mask'] = keypoint_mask
55 |
56 | paf_maps = self._generate_paf_maps(sample)
57 | sample['paf_maps'] = paf_maps
58 | paf_mask = np.zeros(shape=paf_maps.shape, dtype=np.float32)
59 | for idx in range(paf_mask.shape[0]):
60 | paf_mask[idx] = mask
61 | sample['paf_mask'] = paf_mask
62 |
63 | image = sample['image'].astype(np.float32)
64 | image = (image - 128) / 256
65 | sample['image'] = image.transpose((2, 0, 1))
66 | del sample['label']
67 | return sample
68 |
69 | def __len__(self):
70 | return len(self._labels)
71 |
72 | def _generate_keypoint_maps(self, sample):
73 | n_keypoints = 18
74 | n_rows, n_cols, _ = sample['image'].shape
75 | keypoint_maps = np.zeros(shape=(n_keypoints + 1,
76 | n_rows // self._stride, n_cols // self._stride), dtype=np.float32) # +1 for bg
77 |
78 | label = sample['label']
79 | for keypoint_idx in range(n_keypoints):
80 | keypoint = label['keypoints'][keypoint_idx]
81 | if keypoint[2] <= 1:
82 | self._add_gaussian(keypoint_maps[keypoint_idx], keypoint[0], keypoint[1], self._stride, self._sigma)
83 | for another_annotation in label['processed_other_annotations']:
84 | keypoint = another_annotation['keypoints'][keypoint_idx]
85 | if keypoint[2] <= 1:
86 | self._add_gaussian(keypoint_maps[keypoint_idx], keypoint[0], keypoint[1], self._stride, self._sigma)
87 | keypoint_maps[-1] = 1 - keypoint_maps.max(axis=0)
88 | return keypoint_maps
89 |
90 | def _add_gaussian(self, keypoint_map, x, y, stride, sigma):
91 | n_sigma = 4
92 | tl = [int(x - n_sigma * sigma), int(y - n_sigma * sigma)]
93 | tl[0] = max(tl[0], 0)
94 | tl[1] = max(tl[1], 0)
95 |
96 | br = [int(x + n_sigma * sigma), int(y + n_sigma * sigma)]
97 | map_h, map_w = keypoint_map.shape
98 | br[0] = min(br[0], map_w * stride)
99 | br[1] = min(br[1], map_h * stride)
100 |
101 | shift = stride / 2 - 0.5
102 | for map_y in range(tl[1] // stride, br[1] // stride):
103 | for map_x in range(tl[0] // stride, br[0] // stride):
104 | d2 = (map_x * stride + shift - x) * (map_x * stride + shift - x) + \
105 | (map_y * stride + shift - y) * (map_y * stride + shift - y)
106 | exponent = d2 / 2 / sigma / sigma
107 | if exponent > 4.6052: # threshold, ln(100), ~0.01
108 | continue
109 | keypoint_map[map_y, map_x] += math.exp(-exponent)
110 | if keypoint_map[map_y, map_x] > 1:
111 | keypoint_map[map_y, map_x] = 1
112 |
113 | def _generate_paf_maps(self, sample):
114 | n_pafs = len(BODY_PARTS_KPT_IDS)
115 | n_rows, n_cols, _ = sample['image'].shape
116 | paf_maps = np.zeros(shape=(n_pafs * 2, n_rows // self._stride, n_cols // self._stride), dtype=np.float32)
117 |
118 | label = sample['label']
119 | for paf_idx in range(n_pafs):
120 | keypoint_a = label['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][0]]
121 | keypoint_b = label['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][1]]
122 | if keypoint_a[2] <= 1 and keypoint_b[2] <= 1:
123 | self._set_paf(paf_maps[paf_idx * 2:paf_idx * 2 + 2],
124 | keypoint_a[0], keypoint_a[1], keypoint_b[0], keypoint_b[1],
125 | self._stride, self._paf_thickness)
126 | for another_annotation in label['processed_other_annotations']:
127 | keypoint_a = another_annotation['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][0]]
128 | keypoint_b = another_annotation['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][1]]
129 | if keypoint_a[2] <= 1 and keypoint_b[2] <= 1:
130 | self._set_paf(paf_maps[paf_idx * 2:paf_idx * 2 + 2],
131 | keypoint_a[0], keypoint_a[1], keypoint_b[0], keypoint_b[1],
132 | self._stride, self._paf_thickness)
133 | return paf_maps
134 |
135 | def _set_paf(self, paf_map, x_a, y_a, x_b, y_b, stride, thickness):
136 | x_a /= stride
137 | y_a /= stride
138 | x_b /= stride
139 | y_b /= stride
140 | x_ba = x_b - x_a
141 | y_ba = y_b - y_a
142 | _, h_map, w_map = paf_map.shape
143 | x_min = int(max(min(x_a, x_b) - thickness, 0))
144 | x_max = int(min(max(x_a, x_b) + thickness, w_map))
145 | y_min = int(max(min(y_a, y_b) - thickness, 0))
146 | y_max = int(min(max(y_a, y_b) + thickness, h_map))
147 | norm_ba = (x_ba * x_ba + y_ba * y_ba) ** 0.5
148 | if norm_ba < 1e-7: # Same points, no paf
149 | return
150 | x_ba /= norm_ba
151 | y_ba /= norm_ba
152 |
153 | for y in range(y_min, y_max):
154 | for x in range(x_min, x_max):
155 | x_ca = x - x_a
156 | y_ca = y - y_a
157 | d = math.fabs(x_ca * y_ba - y_ca * x_ba)
158 | if d <= thickness:
159 | paf_map[0, y, x] = x_ba
160 | paf_map[1, y, x] = y_ba
161 |
162 |
163 | class CocoValDataset(Dataset):
164 | def __init__(self, labels, images_folder):
165 | super().__init__()
166 | with open(labels, 'r') as f:
167 | self._labels = json.load(f)
168 | self._images_folder = images_folder
169 |
170 | def __getitem__(self, idx):
171 | file_name = self._labels['images'][idx]['file_name']
172 | img = cv2.imread(os.path.join(self._images_folder, file_name), cv2.IMREAD_COLOR)
173 | return {
174 | 'img': img,
175 | 'file_name': file_name
176 | }
177 |
178 | def __len__(self):
179 | return len(self._labels['images'])
180 |
--------------------------------------------------------------------------------
/func.py:
--------------------------------------------------------------------------------
1 | import dlib
2 | import joblib
3 | import numpy as np
4 | import copy
5 | import pandas as pd
6 | import pygame
7 | from imutils import face_utils
8 | from scipy.spatial import distance
9 | from tkinter import *
10 | from PIL import Image, ImageTk
11 | import tkinter.ttk
12 | import numpy
13 | from PIL import Image, ImageDraw, ImageFont
14 | import math
15 | import cv2
16 | import torch
17 | from modules.pose import Pose, track_poses
18 | from val import normalize, pad_width
19 | from modules.keypoints import extract_keypoints, group_keypoints
20 | from models.with_mobilenet import PoseEstimationWithMobileNet
21 | from modules.load_state import load_state
22 |
23 | # 均值填充
24 | avg = [108.3920476, 28.25560194, 23.83509614, 59.88074356, 38.39867032, 58.68210028, 169.0982822, 150.9596774,
25 | 59.14090161, 107.8679241, 97.66179061, 229.0530985, 190.5525496, 109.007621, 207.7391332, 154.0163144,
26 | 108.5794621, 2.335975213, -2.26903988, 1.427404258, -3.187685543, -1.970117366, -3.31392059, -4.290557895,
27 | -2.154595849, -4.042274581, -1.943534425, 2.783517288, -0.181217392, -0.706798676, -1.652686336, -0.950017573,
28 | 1.550229334, 1.209989161]
29 |
30 | switch = 0
31 | yawn = False
32 | yawn_flag = 0
33 | eye_close = False
34 | eye_flag = 0
35 | flag = 0
36 | t = 0
37 |
38 | # 加载OpenPose模型
39 | net = PoseEstimationWithMobileNet() # 加载网络结构
40 | checkpoint = torch.load('models/checkpoint_iter_370000.pth', map_location='cpu') # 加载模型参数
41 | load_state(net, checkpoint) # 拼接结构与参数
42 |
43 | thresh_eye = 0.17 # 眼睛宽高比阈值
44 | thresh_mouth = 0.85 # 嘴巴宽高比阈值
45 | frame_check = 25 # 超时警告(单位:帧)
46 | detect = dlib.get_frontal_face_detector() # 获取面部
47 | predict = dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat") # 面部68个特征点数据集
48 |
49 | # 获取眼睛特征点序号
50 | (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"] # 42~47
51 | (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"] # 36~41
52 | # 获取嘴巴特征点序号
53 | (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"] # 48~67
54 | pygame.mixer.init() # 语音模块初始化
55 | # 载入分类模型
56 | etc = joblib.load(
57 | 'models/RandomForestClassifier_model.pkl')
58 |
59 | datas = pd.DataFrame(
60 | columns=['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10', 'd11', 'd12', 'd13', 'd14', 'd15', 'd16','d17',
61 | 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16','a17'])
62 |
63 | # 中文显示函数
64 | def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
65 | if (isinstance(img, numpy.ndarray)): # 判断是否OpenCV图片类型
66 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
67 | # 创建一个可以在给定图像上绘图的对象
68 | draw = ImageDraw.Draw(img)
69 | # 字体的格式
70 | fontStyle = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8")
71 | # 绘制文本
72 | draw.text((left, top), text, textColor, font=fontStyle)
73 | # 转换回OpenCV格式
74 | return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)
75 | # 计算眼睛宽高比
76 | def eye_aspect_ratio(eye):
77 | A = distance.euclidean(eye[1], eye[5])
78 | B = distance.euclidean(eye[2], eye[4])
79 | C = distance.euclidean(eye[0], eye[3])
80 | ratio = (A + B) / (2.0 * C)
81 | return ratio
82 | # 计算嘴巴宽高比
83 | def mouth_aspect_ratio(mouth):
84 | A = distance.euclidean(mouth[2], mouth[10])
85 | B = distance.euclidean(mouth[4], mouth[8])
86 | C = distance.euclidean(mouth[0], mouth[6])
87 | ratio = (A + B) / (2.0 * C)
88 | return ratio
89 | # 骨架缩放函数
90 | def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
91 | pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1 / 256):
92 | height, width, _ = img.shape
93 | scale = net_input_height_size / height
94 |
95 | scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
96 | scaled_img = normalize(scaled_img, img_mean, img_scale) # 标准化图片
97 |
98 | min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
99 | padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
100 |
101 | tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
102 |
103 | if not cpu:
104 | tensor_img = tensor_img.cuda()
105 |
106 | stages_output = net(tensor_img) # 神经网络输出
107 |
108 | stage2_heatmaps = stages_output[-2]
109 | heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
110 | heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
111 |
112 | stage2_pafs = stages_output[-1]
113 | pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
114 | pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
115 |
116 | return heatmaps, pafs, scale, pad
117 | # 获取骨架
118 | def run_demo(net, img, height_size, cpu, track, smooth):
119 | net = net.eval() # 锁定网络参数
120 | if not cpu:
121 | net = net.cuda() # 启动GPU
122 |
123 | stride = 8
124 | upsample_ratio = 4
125 | num_keypoints = Pose.num_kpts # 18个采样点
126 | previous_poses = [] # 预测集合
127 |
128 | orig_img = img.copy()
129 | heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
130 |
131 | total_keypoints_num = 0
132 | all_keypoints_by_type = []
133 | for kpt_idx in range(num_keypoints): # 19th for bg
134 | total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
135 |
136 | pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
137 | for kpt_id in range(all_keypoints.shape[0]):
138 | all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
139 | all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
140 | current_poses = []
141 | for n in range(len(pose_entries)):
142 | if len(pose_entries[n]) == 0:
143 | continue
144 | pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
145 | for kpt_id in range(num_keypoints):
146 | if pose_entries[n][kpt_id] != -1.0: # keypoint was found
147 | pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
148 | pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
149 | pose = Pose(pose_keypoints, pose_entries[n][18])
150 | current_poses.append(pose)
151 | if track:
152 | track_poses(previous_poses, current_poses, smooth=smooth)
153 | previous_poses = current_poses
154 |
155 | if len(previous_poses) == 0:
156 | return []
157 | else:
158 | return previous_poses[0].keypoints
159 | # 将cv图像显示在Tk组件上
160 | def Showimage(imgCV_in, canva, layout="null"):
161 | global imgTK
162 | canvawidth = int(canva.winfo_reqwidth())
163 | canvaheight = int(canva.winfo_reqheight())
164 | sp = imgCV_in.shape
165 | cvheight = sp[0] # height(rows) of image
166 | cvwidth = sp[1] # width(colums) of image
167 | if (layout == "fill"):
168 | imgCV = cv2.resize(imgCV_in, (canvawidth, canvaheight), interpolation=cv2.INTER_AREA)
169 | elif (layout == "fit"):
170 | if (float(cvwidth / cvheight) > float(canvawidth / canvaheight)):
171 | imgCV = cv2.resize(imgCV_in, (canvawidth, int(canvawidth * cvheight / cvwidth)),
172 | interpolation=cv2.INTER_AREA)
173 | else:
174 | imgCV = cv2.resize(imgCV_in, (int(canvaheight * cvwidth / cvheight), canvaheight),
175 | interpolation=cv2.INTER_AREA)
176 | else:
177 | imgCV = imgCV_in
178 | imgCV2 = cv2.cvtColor(imgCV, cv2.COLOR_BGR2RGBA) # 转换颜色从BGR到RGBA
179 | current_image = Image.fromarray(imgCV2) # 将图像转换成Image对象
180 | imgTK = ImageTk.PhotoImage(image=current_image) # 将image对象转换为imageTK对象
181 | canva.create_image(0, 0, anchor=NW, image=imgTK)
182 | # 骨架计算距离函数
183 | def clac_distance(a, b):
184 | dis_square = (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2
185 | return math.sqrt(dis_square)
186 | # 骨架计算角度函数
187 | def clac_angel(a, b, c):
188 | return math.atan2((a[0] - b[0]), (a[1] - b[1])) - math.atan2((c[0] - b[0]), (c[1] - b[1]))
189 | # 获取模型输入张量
190 | def clac_keras(key_point):
191 | distance_all = []
192 | angel_all = []
193 | # 计算距离
194 | # 鼻子到脖子
195 | if key_point[0][0] != -1 and key_point[1][0] != -1:
196 | distance_all.append(clac_distance(key_point[0], key_point[1]))
197 | else:
198 | distance_all.append(avg[0])
199 | # 鼻子到右眼
200 | if key_point[0][0] != -1 and key_point[14][0] != -1:
201 | distance_all.append(clac_distance(key_point[0], key_point[14]))
202 | else:
203 | distance_all.append(avg[1])
204 | # 鼻子到左眼
205 | if key_point[0][0] != -1 and key_point[15][0] != -1:
206 | distance_all.append(clac_distance(key_point[0], key_point[15]))
207 | else:
208 | distance_all.append(avg[2])
209 | # 右眼到右耳
210 | if key_point[14][0] != -1 and key_point[16][0] != -1:
211 | distance_all.append(clac_distance(key_point[14], key_point[16]))
212 | else:
213 | distance_all.append(avg[3])
214 | # 左眼到左耳
215 | if key_point[15][0] != -1 and key_point[17][0] != -1:
216 | distance_all.append(clac_distance(key_point[15], key_point[17]))
217 | else:
218 | distance_all.append(avg[4])
219 | # 脖子到右肩
220 | if key_point[1][0] != -1 and key_point[2][0] != -1:
221 | distance_all.append(clac_distance(key_point[1], key_point[2]))
222 | else:
223 | distance_all.append(avg[5])
224 | # 右肩到右肘
225 | if key_point[2][0] != -1 and key_point[3][0] != -1:
226 | distance_all.append(clac_distance(key_point[2], key_point[3]))
227 | else:
228 | distance_all.append(avg[6])
229 | # 右肘到右腕
230 | if key_point[3][0] != -1 and key_point[4][0] != -1:
231 | distance_all.append(clac_distance(key_point[3], key_point[4]))
232 | else:
233 | distance_all.append(avg[7])
234 | # 脖子到左肩
235 | if key_point[1][0] != -1 and key_point[5][0] != -1:
236 | distance_all.append(clac_distance(key_point[1], key_point[5]))
237 | else:
238 | distance_all.append(avg[8])
239 | # 左肩到左肘
240 | if key_point[5][0] != -1 and key_point[6][0] != -1:
241 | distance_all.append(clac_distance(key_point[5], key_point[6]))
242 | else:
243 | distance_all.append(avg[9])
244 | # 左肘到左腕
245 | if key_point[6][0] != -1 and key_point[7][0] != -1:
246 | distance_all.append(clac_distance(key_point[6], key_point[7]))
247 | else:
248 | distance_all.append(avg[10])
249 | # 脖子到右臀
250 | if key_point[1][0] != -1 and key_point[8][0] != -1:
251 | distance_all.append(clac_distance(key_point[1], key_point[8]))
252 | else:
253 | distance_all.append(avg[11])
254 | # 右臀到右膝
255 | if key_point[8][0] != -1 and key_point[9][0] != -1:
256 | distance_all.append(clac_distance(key_point[8], key_point[9]))
257 | else:
258 | distance_all.append(avg[12])
259 | # 右膝到右踝
260 | if key_point[9][0] != -1 and key_point[10][0] != -1:
261 | distance_all.append(clac_distance(key_point[9], key_point[10]))
262 | else:
263 | distance_all.append(avg[13])
264 | # 脖子到左臀
265 | if key_point[1][0] != -1 and key_point[11][0] != -1:
266 | distance_all.append(clac_distance(key_point[1], key_point[11]))
267 | else:
268 | distance_all.append(avg[14])
269 | # 右臀到左膝
270 | if key_point[11][0] != -1 and key_point[12][0] != -1:
271 | distance_all.append(clac_distance(key_point[11], key_point[12]))
272 | else:
273 | distance_all.append(avg[15])
274 | # 右膝到左踝
275 | if key_point[12][0] != -1 and key_point[13][0] != -1:
276 | distance_all.append(clac_distance(key_point[12], key_point[13]))
277 | else:
278 | distance_all.append(avg[16])
279 | # 计算角度
280 | # 鼻子-右眼-右耳
281 | if key_point[0][0] != -1 and key_point[14][0] != -1 and key_point[16][0] != -1:
282 | angel_all.append(clac_angel(key_point[0], key_point[14], key_point[16]))
283 | else:
284 | angel_all.append(avg[17])
285 | # 鼻子-左眼-左耳
286 | if key_point[0][0] != -1 and key_point[15][0] != -1 and key_point[17][0] != -1:
287 | angel_all.append(clac_angel(key_point[0], key_point[15], key_point[17]))
288 | else:
289 | angel_all.append(avg[18])
290 | # 脖子-右肩-右肘
291 | if key_point[1][0] != -1 and key_point[2][0] != -1 and key_point[3][0] != -1:
292 | angel_all.append(clac_angel(key_point[1], key_point[2], key_point[3]))
293 | else:
294 | angel_all.append(avg[19])
295 | # 右肩-右肘-右腕
296 | if key_point[2][0] != -1 and key_point[3][0] != -1 and key_point[4][0] != -1:
297 | angel_all.append(clac_angel(key_point[2], key_point[3], key_point[4]))
298 | else:
299 | angel_all.append(avg[20])
300 | # 脖子-左肩-左肘
301 | if key_point[1][0] != -1 and key_point[5][0] != -1 and key_point[6][0] != -1:
302 | angel_all.append(clac_angel(key_point[1], key_point[5], key_point[6]))
303 | else:
304 | angel_all.append(avg[21])
305 | # 左肩-左肘-左腕
306 | if key_point[5][0] != -1 and key_point[6][0] != -1 and key_point[7][0] != -1:
307 | angel_all.append(clac_angel(key_point[5], key_point[6], key_point[7]))
308 | else:
309 | angel_all.append(avg[22])
310 | # 脖子-右臀-右膝
311 | if key_point[1][0] != -1 and key_point[8][0] != -1 and key_point[9][0] != -1:
312 | angel_all.append(clac_angel(key_point[1], key_point[8], key_point[9]))
313 | else:
314 | angel_all.append(avg[23])
315 | # 右臀-右膝-右踝
316 | if key_point[8][0] != -1 and key_point[9][0] != -1 and key_point[10][0] != -1:
317 | angel_all.append(clac_angel(key_point[8], key_point[9], key_point[10]))
318 | else:
319 | angel_all.append(avg[24])
320 | # 脖子-左臀-左膝
321 | if key_point[1][0] != -1 and key_point[11][0] != -1 and key_point[12][0] != -1:
322 | angel_all.append(clac_angel(key_point[1], key_point[11], key_point[12]))
323 | else:
324 | angel_all.append(avg[25])
325 | # 左臀-左膝-左踝
326 | if key_point[11][0] != -1 and key_point[12][0] != -1 and key_point[13][0] != -1:
327 | angel_all.append(clac_angel(key_point[11], key_point[12], key_point[13]))
328 | else:
329 | angel_all.append(avg[26])
330 | # 鼻子-脖子-右肩
331 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[2][0] != -1:
332 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[2]))
333 | else:
334 | angel_all.append(avg[27])
335 | # 鼻子-脖子-左肩
336 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[5][0] != -1:
337 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[5]))
338 | else:
339 | angel_all.append(avg[28])
340 | # 右眼-鼻子-左眼
341 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[15][0] != -1:
342 | angel_all.append(clac_angel(key_point[14], key_point[0], key_point[15]))
343 | else:
344 | angel_all.append(avg[29])
345 | # 右眼-鼻子-脖子
346 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1:
347 | angel_all.append(clac_angel(key_point[14], key_point[0], key_point[1]))
348 | else:
349 | angel_all.append(avg[30])
350 | # 左眼-鼻子-脖子
351 | if key_point[15][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1:
352 | angel_all.append(clac_angel(key_point[15], key_point[0], key_point[1]))
353 | else:
354 | angel_all.append(avg[31])
355 | # 鼻子-脖子-右臀
356 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[8][0] != -1:
357 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[8]))
358 | else:
359 | angel_all.append(avg[32])
360 | # 鼻子-脖子-左臀
361 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[11][0] != -1:
362 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[11]))
363 | else:
364 | angel_all.append(avg[33])
365 | data = distance_all + angel_all
366 | datas.loc[0] = data
367 | for i in [10 + 16, 8 + 16, 2 + 16, 16, 13, 4]:
368 | data.pop(i)
369 | return data
370 | # 疲劳检测函数
371 | def main_detect(cap):
372 | while switch == 1:
373 | start = cv2.getTickCount()
374 | result_show.grid_forget()
375 | canva_r.delete("all")
376 | global t, eye_close, yawn, yawn_flag
377 | ret, frame = cap.read() # 读取摄像头 大小:(480x640)
378 | frame = frame[0:1080, 0:1920 - 480]
379 | frame = cv2.resize(frame, (int(frame.shape[1] / 2.25), int(frame.shape[0] / 2.25)))
380 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 转化为灰度图
381 | subjects = detect(gray, 0)
382 | for subject in subjects:
383 | shape = predict(gray, subject)
384 | shape = face_utils.shape_to_np(shape) # 获得68个特征点的坐标
385 |
386 | # 计算左右眼平均眼宽比
387 | leftEye = shape[lStart:lEnd]
388 | rightEye = shape[rStart:rEnd]
389 | leftRatio = eye_aspect_ratio(leftEye)
390 | rightRatio = eye_aspect_ratio(rightEye)
391 | EyeRatio = (leftRatio + rightRatio) / 2.0
392 |
393 | # 计算嘴巴宽高比
394 | mouth = shape[mStart:mEnd]
395 | mouthRatio = mouth_aspect_ratio(mouth)
396 |
397 | # 画出凸包
398 | # leftEyeHull = cv2.convexHull(leftEye)
399 | # rightEyeHull = cv2.convexHull(rightEye)
400 | # mouthHull = cv2.convexHull(mouth)
401 | # cv2.drawContours(frame, [leftEyeHull], -1, (50, 50, 250), 2)
402 | # cv2.drawContours(frame, [rightEyeHull], -1, (50, 50, 250), 2)
403 | # cv2.drawContours(frame, [mouthHull], -1, (150, 50, 150), 2)
404 |
405 | # 判断是否打哈欠
406 | if mouthRatio > thresh_mouth:
407 | yawn = True
408 | yawn_flag = 0
409 | if yawn == True and yawn_flag < 40:
410 | canva_r.create_text(200, 200, text="检测到您打了一个哈欠,\n请注意不要疲劳驾驶!", font=("Lucida Console", 15), fill="red")
411 | if yawn == True and t == 0:
412 | t = 1
413 | pygame.mixer.music.stop()
414 | pygame.mixer.music.load('sound\\yawn.mp3')
415 | pygame.mixer.music.play()
416 | yawn_flag = yawn_flag + 1
417 | elif yawn == True and yawn_flag == 40:
418 | yawn = False
419 | yawn_flag = 0
420 | t = 0
421 |
422 | # 判断是否闭上眼睛
423 | if EyeRatio < thresh_eye:
424 | flag = flag + 1
425 | if flag >= frame_check:
426 | eye_close = True
427 | eye_flag = 0
428 | else:
429 | flag = 0
430 | if eye_close == True and eye_flag < 40:
431 | # WARNING
432 | canva_r.create_text(200, 200, text="警告!!!\n检测到您的眼睛已经闭合,\n请注意不要疲劳驾驶!", justify=LEFT,
433 | font=("Lucida Console", 15), fill="red")
434 | if eye_close == True and t == 0:
435 | t = 1
436 | pygame.mixer.music.stop()
437 | pygame.mixer.music.load('sound\\eyes.mp3')
438 | pygame.mixer.music.play()
439 | eye_flag = eye_flag + 1
440 | elif eye_close == True and eye_flag == 40:
441 | eye_close = False
442 | eye_flag = 0
443 | t = 0
444 | end = cv2.getTickCount()
445 | during1 = (end - start) / cv2.getTickFrequency()
446 | # 计算代码运行的时间消耗,其中最后一个参数是时钟周期
447 |
448 | FPS.set("FPS:" + str(round(1 / during1, 2)))
449 | Showimage(frame, canva_l, "fit")
450 | root.update()
451 | # 驾驶状态分类
452 | def main_class(vc):
453 | # 视频读取参数
454 | c = 0 # 开始帧
455 | timeF = 100 # 视频帧计数间隔频率
456 | result_show.grid(row=1, column=1)
457 | while switch == 0:
458 | # image = cv2.imread('img_59235.jpg')
459 | rval, image = vc.read()
460 | if c % timeF == 0:
461 | start = cv2.getTickCount()
462 | image = image[:, 240:1920 - 240, :]
463 | image = cv2.resize(image, (int(image.shape[1] / 2.25), int(image.shape[0] / 2.25)))
464 |
465 | key_point = run_demo(net, image, image.shape[0] / 3, False, 1, 1)
466 | data = clac_keras(key_point)
467 |
468 | y_pred = etc.predict([data])
469 | y_pred_proba = etc.predict_proba([data])
470 |
471 | canvas = copy.deepcopy(image)
472 | Showimage(canvas, canva_l, "fill")
473 |
474 | canva_r.delete("all")
475 | # 创建分类标签
476 | text_all = ("安全驾驶 ", "用右手发短信 ", "用右手打电话 ", "用左手发短信 ", "用左手打电话 ",
477 | "调音乐播放器 ", "喝水 ", "后面拿东西 ", "弄头发或化妆 ", "与乘客交谈 ")
478 | for i in range(10):
479 | canva_r.create_text(70, 36 * i + 20, text=text_all[i], font=("Lucida Console", 10))
480 | canva_r.create_rectangle(150, 15 + 36 * i, 150 + 100 * y_pred_proba[0][i], 25 + 36 * i, fill='cyan')
481 | canva_r.create_text(300, 36 * i + 20, text=y_pred_proba[0][i], justify=LEFT)
482 |
483 | end = cv2.getTickCount()
484 | during1 = (end - start) / cv2.getTickFrequency()
485 | # 计算代码运行的时间消耗,其中最后一个参数是时钟周期
486 | FPS.set("FPS:" + str(round(1 / during1, 2)))
487 | result.set("识别结果为:" + text_all[y_pred[0]])
488 | root.update()
489 | # c = c + 1
490 | # 按钮状态
491 | def swi():
492 | global switch
493 | switch = not switch
494 | # GUI初始化
495 | def GUI_init():
496 | global result_show, canva_r, canva_l, FPS, root, result,switch
497 | # 创建GUI
498 | root = Tk()
499 | root.title("驾驶员检测")
500 | root.minsize(710, 410)
501 | # 创建视频幕布
502 | canva_l = Canvas(root, width=480, height=360, bg="white")
503 | canva_l.grid(row=0, column=0)
504 | # 创建概率直方图幕布
505 | canva_r = Canvas(root, width=350, height=360, bg="white")
506 | canva_r.grid(row=0, column=1)
507 | # 显示FPS
508 | FPS = tkinter.StringVar()
509 | FPS_show = tkinter.Label(root, textvariable=FPS, bg="white", font=("Lucida Console", 10))
510 | FPS_show.grid(row=1, column=0)
511 | # 显示识别结果
512 | result = tkinter.StringVar()
513 | result_show = tkinter.Label(root, textvariable=result, bg="white", font=("Lucida Console", 14))
514 | result_show.grid(row=1, column=1)
515 | # 创建切换按钮
516 | cut = tkinter.Button(root, text="切换视角", command=swi, font=("Lucida Console", 14))
517 | cut.place(x=350, y=366)
--------------------------------------------------------------------------------
/get_train.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import cv2
4 | import numpy as np
5 | import torch
6 |
7 | from models.with_mobilenet import PoseEstimationWithMobileNet
8 | from modules.keypoints import extract_keypoints, group_keypoints
9 | from modules.load_state import load_state
10 | from modules.pose import Pose, track_poses
11 | from val import normalize, pad_width
12 |
13 |
14 | def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
15 | pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):
16 | height, width, _ = img.shape
17 | scale = net_input_height_size / height
18 |
19 | scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
20 | scaled_img = normalize(scaled_img, img_mean, img_scale) # 标准化图片
21 |
22 | min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
23 | padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
24 |
25 | tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
26 |
27 | if not cpu:
28 | tensor_img = tensor_img.cuda()
29 |
30 | stages_output = net(tensor_img) # 神经网络输出
31 |
32 | stage2_heatmaps = stages_output[-2]
33 | heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
34 | heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
35 |
36 | stage2_pafs = stages_output[-1]
37 | pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
38 | pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
39 |
40 | return heatmaps, pafs, scale, pad
41 |
42 | def run_demo(net, img, height_size, cpu, track, smooth):
43 | net = net.eval() # 锁定网络参数
44 | if not cpu:
45 | net = net.cuda() # 启动GPU
46 |
47 | stride = 8
48 | upsample_ratio = 4
49 | num_keypoints = Pose.num_kpts # 18个采样点
50 | previous_poses = [] # 预测集合
51 |
52 | orig_img = img.copy()
53 | heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu)
54 |
55 | total_keypoints_num = 0
56 | all_keypoints_by_type = []
57 | for kpt_idx in range(num_keypoints): # 19th for bg
58 | total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num)
59 |
60 | pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
61 | for kpt_id in range(all_keypoints.shape[0]):
62 | all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
63 | all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
64 | current_poses = []
65 | for n in range(len(pose_entries)):
66 | if len(pose_entries[n]) == 0:
67 | continue
68 | pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
69 | for kpt_id in range(num_keypoints):
70 | if pose_entries[n][kpt_id] != -1.0: # keypoint was found
71 | pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
72 | pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
73 | pose = Pose(pose_keypoints, pose_entries[n][18])
74 | current_poses.append(pose)
75 | if track:
76 | track_poses(previous_poses, current_poses, smooth=smooth)
77 | previous_poses = current_poses
78 |
79 | if len(previous_poses)==0:
80 | return []
81 | else:
82 | return previous_poses[0].keypoints
83 |
84 |
85 |
86 | BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
87 | "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
88 | "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
89 | "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 }
90 |
91 | BODY_PARTS = { "鼻子": 0, "脖子": 1,
92 | "右肩": 2, "右肘": 3, "右腕": 4,
93 | "左肩": 5, "左肘": 6, "左腕": 7,
94 | "右臀": 8, "右膝": 9, "右踝": 10,
95 | "左臀": 11,"左膝": 12,"左踝": 13,
96 | "右眼": 14,"左眼": 15,
97 | "右耳": 16,"左耳": 17 }
98 | import math
99 | import pandas as pd
100 | import os
101 |
102 | # 计算距离函数
103 | def distance(a,b):
104 | dis_square = (a[0]-b[0])**2 + (a[1]-b[1])**2
105 | return math.sqrt(dis_square)
106 |
107 | # 计算角度函数
108 | def angel(a,b,c):
109 | return math.atan2((a[0]-b[0]),(a[1]-b[1]))-math.atan2((c[0]-b[0]),(c[1]-b[1]))
110 |
111 |
112 | if __name__ == '__main__':
113 | net = PoseEstimationWithMobileNet() # 加载网络结构
114 | checkpoint = torch.load('checkpoint_iter_370000.pth', map_location='cpu') # 加载模型参数
115 | load_state(net, checkpoint) # 拼接结构与参数
116 |
117 | datas = pd.DataFrame(
118 | columns=['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10', 'd11', 'd12', 'd13', 'd14', 'd15', 'd16',
119 | 'd17',
120 | 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
121 | 'a17', 'label'])
122 |
123 | for label_name in sorted(os.listdir("../data/imgs/train")):
124 | print(label_name)
125 | for image_name in sorted(os.listdir("../data/imgs/train/" + label_name)):
126 | print(image_name)
127 | image = cv2.imread(os.path.join("../data/imgs/train/" + label_name, image_name))
128 |
129 | # print (image.shape) # (480, 640, 3)
130 | key_point = run_demo(net, image, image.shape[0]/3, False, 1, 1)
131 | distance_all=[]
132 | angel_all=[]
133 | if len(key_point)==0:
134 | continue
135 | else:
136 | # 计算距离
137 | # 鼻子到脖子
138 | if key_point[0][0]!=-1 and key_point[1][0]!=-1:
139 | distance_all.append(distance(key_point[0], key_point[1]))
140 | else:
141 | distance_all.append(np.nan)
142 | # 鼻子到右眼
143 | if key_point[0][0]!=-1 and key_point[14][0]!=-1:
144 | distance_all.append(distance(key_point[0], key_point[14]))
145 | else:
146 | distance_all.append(np.nan)
147 | # 鼻子到左眼
148 | if key_point[0][0]!=-1 and key_point[15][0]!=-1:
149 | distance_all.append(distance(key_point[0], key_point[15]))
150 | else:
151 | distance_all.append(np.nan)
152 | # 右眼到右耳
153 | if key_point[14][0]!=-1 and key_point[16][0]!=-1:
154 | distance_all.append(distance(key_point[14], key_point[16]))
155 | else:
156 | distance_all.append(np.nan)
157 | # 左眼到左耳
158 | if key_point[15][0]!=-1 and key_point[17][0]!=-1:
159 | distance_all.append(distance(key_point[15], key_point[17]))
160 | else:
161 | distance_all.append(np.nan)
162 | # 脖子到右肩
163 | if key_point[1][0]!=-1 and key_point[2][0]!=-1:
164 | distance_all.append(distance(key_point[1], key_point[2]))
165 | else:
166 | distance_all.append(np.nan)
167 | # 右肩到右肘
168 | if key_point[2][0]!=-1 and key_point[3][0]!=-1:
169 | distance_all.append(distance(key_point[2], key_point[3]))
170 | else:
171 | distance_all.append(np.nan)
172 | # 右肘到右腕
173 | if key_point[3][0]!=-1 and key_point[4][0]!=-1:
174 | distance_all.append(distance(key_point[3], key_point[4]))
175 | else:
176 | distance_all.append(np.nan)
177 | # 脖子到左肩
178 | if key_point[1][0]!=-1 and key_point[5][0]!=-1:
179 | distance_all.append(distance(key_point[1], key_point[5]))
180 | else:
181 | distance_all.append(np.nan)
182 | # 左肩到左肘
183 | if key_point[5][0]!=-1 and key_point[6][0]!=-1:
184 | distance_all.append(distance(key_point[5], key_point[6]))
185 | else:
186 | distance_all.append(np.nan)
187 | # 左肘到左腕
188 | if key_point[6][0]!=-1 and key_point[7][0]!=-1:
189 | distance_all.append(distance(key_point[6], key_point[7]))
190 | else:
191 | distance_all.append(np.nan)
192 | # 脖子到右臀
193 | if key_point[1][0]!=-1 and key_point[8][0]!=-1:
194 | distance_all.append(distance(key_point[1], key_point[8]))
195 | else:
196 | distance_all.append(np.nan)
197 | # 右臀到右膝
198 | if key_point[8][0]!=-1 and key_point[9][0]!=-1:
199 | distance_all.append(distance(key_point[8], key_point[9]))
200 | else:
201 | distance_all.append(np.nan)
202 | # 右膝到右踝
203 | if key_point[9][0]!=-1 and key_point[10][0]!=-1:
204 | distance_all.append(distance(key_point[9], key_point[10]))
205 | else:
206 | distance_all.append(np.nan)
207 | # 脖子到左臀
208 | if key_point[1][0]!=-1 and key_point[11][0]!=-1:
209 | distance_all.append(distance(key_point[1], key_point[11]))
210 | else:
211 | distance_all.append(np.nan)
212 | # 右臀到左膝
213 | if key_point[11][0]!=-1 and key_point[12][0]!=-1:
214 | distance_all.append(distance(key_point[11], key_point[12]))
215 | else:
216 | distance_all.append(np.nan)
217 | # 右膝到左踝
218 | if key_point[12][0]!=-1 and key_point[13][0]!=-1:
219 | distance_all.append(distance(key_point[12], key_point[13]))
220 | else:
221 | distance_all.append(np.nan)
222 | #计算角度
223 | # 鼻子-右眼-右耳
224 | if key_point[0][0]!=-1 and key_point[14][0]!=-1 and key_point[16][0]!=-1:
225 | angel_all.append(angel(key_point[0],key_point[14],key_point[16]))
226 | else:
227 | angel_all.append(np.nan)
228 | # 鼻子-左眼-左耳
229 | if key_point[0][0] != -1 and key_point[15][0] != -1 and key_point[17][0] != -1:
230 | angel_all.append(angel(key_point[0], key_point[15], key_point[17]))
231 | else:
232 | angel_all.append(np.nan)
233 | # 脖子-右肩-右肘
234 | if key_point[1][0] != -1 and key_point[2][0] != -1 and key_point[3][0] != -1:
235 | angel_all.append(angel(key_point[1], key_point[2], key_point[3]))
236 | else:
237 | angel_all.append(np.nan)
238 | # 右肩-右肘-右腕
239 | if key_point[2][0] != -1 and key_point[3][0] != -1 and key_point[4][0] != -1:
240 | angel_all.append(angel(key_point[2], key_point[3], key_point[4]))
241 | else:
242 | angel_all.append(np.nan)
243 | # 脖子-左肩-左肘
244 | if key_point[1][0] != -1 and key_point[5][0] != -1 and key_point[6][0] != -1:
245 | angel_all.append(angel(key_point[1], key_point[5], key_point[6]))
246 | else:
247 | angel_all.append(np.nan)
248 | # 左肩-左肘-左腕
249 | if key_point[5][0] != -1 and key_point[6][0] != -1 and key_point[7][0] != -1:
250 | angel_all.append(angel(key_point[5], key_point[6], key_point[7]))
251 | else:
252 | angel_all.append(np.nan)
253 | # 脖子-右臀-右膝
254 | if key_point[1][0] != -1 and key_point[8][0] != -1 and key_point[9][0] != -1:
255 | angel_all.append(angel(key_point[1], key_point[8], key_point[9]))
256 | else:
257 | angel_all.append(np.nan)
258 | # 右臀-右膝-右踝
259 | if key_point[8][0] != -1 and key_point[9][0] != -1 and key_point[10][0] != -1:
260 | angel_all.append(angel(key_point[8], key_point[9], key_point[10]))
261 | else:
262 | angel_all.append(np.nan)
263 | # 脖子-左臀-左膝
264 | if key_point[1][0] != -1 and key_point[11][0] != -1 and key_point[12][0] != -1:
265 | angel_all.append(angel(key_point[1], key_point[11], key_point[12]))
266 | else:
267 | angel_all.append(np.nan)
268 | # 左臀-左膝-左踝
269 | if key_point[11][0] != -1 and key_point[12][0] != -1 and key_point[13][0] != -1:
270 | angel_all.append(angel(key_point[11], key_point[12], key_point[13]))
271 | else:
272 | angel_all.append(np.nan)
273 | # 鼻子-脖子-右肩
274 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[2][0] != -1:
275 | angel_all.append(angel(key_point[0], key_point[1], key_point[2]))
276 | else:
277 | angel_all.append(np.nan)
278 | # 鼻子-脖子-左肩
279 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[5][0] != -1:
280 | angel_all.append(angel(key_point[0], key_point[1], key_point[5]))
281 | else:
282 | angel_all.append(np.nan)
283 | # 右眼-鼻子-左眼
284 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[15][0] != -1:
285 | angel_all.append(angel(key_point[14], key_point[0], key_point[15]))
286 | else:
287 | angel_all.append(np.nan)
288 | # 右眼-鼻子-脖子
289 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1:
290 | angel_all.append(angel(key_point[14], key_point[0], key_point[1]))
291 | else:
292 | angel_all.append(np.nan)
293 | # 左眼-鼻子-脖子
294 | if key_point[15][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1:
295 | angel_all.append(angel(key_point[15], key_point[0], key_point[1]))
296 | else:
297 | angel_all.append(np.nan)
298 | # 鼻子-脖子-右臀
299 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[8][0] != -1:
300 | angel_all.append(angel(key_point[0], key_point[1], key_point[8]))
301 | else:
302 | angel_all.append(np.nan)
303 | # 鼻子-脖子-左臀
304 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[11][0] != -1:
305 | angel_all.append(angel(key_point[0], key_point[1], key_point[11]))
306 | else:
307 | angel_all.append(np.nan)
308 |
309 | data = distance_all + angel_all + [label_name[1]]
310 | datas.loc[image_name] = data
311 | datas.to_csv("openpose_train_data.csv",sep=',')
312 |
--------------------------------------------------------------------------------
/image/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/1.jpg
--------------------------------------------------------------------------------
/image/11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/11.jpg
--------------------------------------------------------------------------------
/image/12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/12.jpg
--------------------------------------------------------------------------------
/image/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/2.jpg
--------------------------------------------------------------------------------
/image/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/3.jpg
--------------------------------------------------------------------------------
/image/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/4.jpg
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | from func import main_detect, main_class, GUI_init
4 |
5 |
6 | # 载入测试视频
7 | vc = cv2.VideoCapture('video/dxandcar.mp4')
8 | cap = cv2.VideoCapture('video/dxha.mp4')
9 |
10 | if __name__ == '__main__':
11 | GUI_init() # 初始化GUI界面
12 | while 1:
13 | main_detect(cap)
14 | main_class(vc)
15 |
--------------------------------------------------------------------------------
/models/with_mobilenet.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | from modules.conv import conv, conv_dw, conv_dw_no_bn
5 |
6 |
7 | class Cpm(nn.Module):
8 | def __init__(self, in_channels, out_channels):
9 | super().__init__()
10 | self.align = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False)
11 | self.trunk = nn.Sequential(
12 | conv_dw_no_bn(out_channels, out_channels),
13 | conv_dw_no_bn(out_channels, out_channels),
14 | conv_dw_no_bn(out_channels, out_channels)
15 | )
16 | self.conv = conv(out_channels, out_channels, bn=False)
17 |
18 | def forward(self, x):
19 | x = self.align(x)
20 | x = self.conv(x + self.trunk(x))
21 | return x
22 |
23 |
24 | class InitialStage(nn.Module):
25 | def __init__(self, num_channels, num_heatmaps, num_pafs):
26 | super().__init__()
27 | self.trunk = nn.Sequential(
28 | conv(num_channels, num_channels, bn=False),
29 | conv(num_channels, num_channels, bn=False),
30 | conv(num_channels, num_channels, bn=False)
31 | )
32 | self.heatmaps = nn.Sequential(
33 | conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
34 | conv(512, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False)
35 | )
36 | self.pafs = nn.Sequential(
37 | conv(num_channels, 512, kernel_size=1, padding=0, bn=False),
38 | conv(512, num_pafs, kernel_size=1, padding=0, bn=False, relu=False)
39 | )
40 |
41 | def forward(self, x):
42 | trunk_features = self.trunk(x)
43 | heatmaps = self.heatmaps(trunk_features)
44 | pafs = self.pafs(trunk_features)
45 | return [heatmaps, pafs]
46 |
47 |
48 | class RefinementStageBlock(nn.Module):
49 | def __init__(self, in_channels, out_channels):
50 | super().__init__()
51 | self.initial = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False)
52 | self.trunk = nn.Sequential(
53 | conv(out_channels, out_channels),
54 | conv(out_channels, out_channels, dilation=2, padding=2)
55 | )
56 |
57 | def forward(self, x):
58 | initial_features = self.initial(x)
59 | trunk_features = self.trunk(initial_features)
60 | return initial_features + trunk_features
61 |
62 |
63 | class RefinementStage(nn.Module):
64 | def __init__(self, in_channels, out_channels, num_heatmaps, num_pafs):
65 | super().__init__()
66 | self.trunk = nn.Sequential(
67 | RefinementStageBlock(in_channels, out_channels),
68 | RefinementStageBlock(out_channels, out_channels),
69 | RefinementStageBlock(out_channels, out_channels),
70 | RefinementStageBlock(out_channels, out_channels),
71 | RefinementStageBlock(out_channels, out_channels)
72 | )
73 | self.heatmaps = nn.Sequential(
74 | conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False),
75 | conv(out_channels, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False)
76 | )
77 | self.pafs = nn.Sequential(
78 | conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False),
79 | conv(out_channels, num_pafs, kernel_size=1, padding=0, bn=False, relu=False)
80 | )
81 |
82 | def forward(self, x):
83 | trunk_features = self.trunk(x)
84 | heatmaps = self.heatmaps(trunk_features)
85 | pafs = self.pafs(trunk_features)
86 | return [heatmaps, pafs]
87 |
88 |
89 | class PoseEstimationWithMobileNet(nn.Module):
90 | def __init__(self, num_refinement_stages=1, num_channels=128, num_heatmaps=19, num_pafs=38):
91 | super().__init__()
92 | self.model = nn.Sequential(
93 | conv( 3, 32, stride=2, bias=False),
94 | conv_dw( 32, 64),
95 | conv_dw( 64, 128, stride=2),
96 | conv_dw(128, 128),
97 | conv_dw(128, 256, stride=2),
98 | conv_dw(256, 256),
99 | conv_dw(256, 512), # conv4_2
100 | conv_dw(512, 512, dilation=2, padding=2),
101 | conv_dw(512, 512),
102 | conv_dw(512, 512),
103 | conv_dw(512, 512),
104 | conv_dw(512, 512) # conv5_5
105 | )
106 | self.cpm = Cpm(512, num_channels)
107 |
108 | self.initial_stage = InitialStage(num_channels, num_heatmaps, num_pafs)
109 | self.refinement_stages = nn.ModuleList()
110 | for idx in range(num_refinement_stages):
111 | self.refinement_stages.append(RefinementStage(num_channels + num_heatmaps + num_pafs, num_channels,
112 | num_heatmaps, num_pafs))
113 |
114 | def forward(self, x):
115 | backbone_features = self.model(x)
116 | backbone_features = self.cpm(backbone_features)
117 |
118 | stages_output = self.initial_stage(backbone_features)
119 | for refinement_stage in self.refinement_stages:
120 | stages_output.extend(
121 | refinement_stage(torch.cat([backbone_features, stages_output[-2], stages_output[-1]], dim=1)))
122 |
123 | return stages_output
124 |
--------------------------------------------------------------------------------
/modules/conv.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | def conv(in_channels, out_channels, kernel_size=3, padding=1, bn=True, dilation=1, stride=1, relu=True, bias=True):
5 | modules = [nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)]
6 | if bn:
7 | modules.append(nn.BatchNorm2d(out_channels))
8 | if relu:
9 | modules.append(nn.ReLU(inplace=True))
10 | return nn.Sequential(*modules)
11 |
12 |
13 | def conv_dw(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1):
14 | return nn.Sequential(
15 | nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation=dilation, groups=in_channels, bias=False),
16 | nn.BatchNorm2d(in_channels),
17 | nn.ReLU(inplace=True),
18 |
19 | nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
20 | nn.BatchNorm2d(out_channels),
21 | nn.ReLU(inplace=True),
22 | )
23 |
24 |
25 | def conv_dw_no_bn(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1):
26 | return nn.Sequential(
27 | nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation=dilation, groups=in_channels, bias=False),
28 | nn.ELU(inplace=True),
29 |
30 | nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
31 | nn.ELU(inplace=True),
32 | )
33 |
--------------------------------------------------------------------------------
/modules/get_parameters.py:
--------------------------------------------------------------------------------
1 | from torch import nn
2 |
3 |
4 | def get_parameters(model, predicate):
5 | for module in model.modules():
6 | for param_name, param in module.named_parameters():
7 | if predicate(module, param_name):
8 | yield param
9 |
10 |
11 | def get_parameters_conv(model, name):
12 | return get_parameters(model, lambda m, p: isinstance(m, nn.Conv2d) and m.groups == 1 and p == name)
13 |
14 |
15 | def get_parameters_conv_depthwise(model, name):
16 | return get_parameters(model, lambda m, p: isinstance(m, nn.Conv2d)
17 | and m.groups == m.in_channels
18 | and m.in_channels == m.out_channels
19 | and p == name)
20 |
21 |
22 | def get_parameters_bn(model, name):
23 | return get_parameters(model, lambda m, p: isinstance(m, nn.BatchNorm2d) and p == name)
24 |
--------------------------------------------------------------------------------
/modules/keypoints.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | from operator import itemgetter
4 |
5 | BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
6 | [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
7 | BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
8 | [6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27])
9 |
10 |
11 | def linspace2d(start, stop, n=10):
12 | points = 1 / (n - 1) * (stop - start)
13 | return points[:, None] * np.arange(n) + start[:, None]
14 |
15 |
16 | def extract_keypoints(heatmap, all_keypoints, total_keypoint_num):
17 | heatmap[heatmap < 0.1] = 0
18 | heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode='constant')
19 | heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 1:heatmap_with_borders.shape[1]-1]
20 | heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 2:heatmap_with_borders.shape[1]]
21 | heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 0:heatmap_with_borders.shape[1]-2]
22 | heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1]-1]
23 | heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0]-2, 1:heatmap_with_borders.shape[1]-1]
24 |
25 | heatmap_peaks = (heatmap_center > heatmap_left) &\
26 | (heatmap_center > heatmap_right) &\
27 | (heatmap_center > heatmap_up) &\
28 | (heatmap_center > heatmap_down)
29 | heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0]-1, 1:heatmap_center.shape[1]-1]
30 | keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
31 | keypoints = sorted(keypoints, key=itemgetter(0))
32 |
33 | suppressed = np.zeros(len(keypoints), np.uint8)
34 | keypoints_with_score_and_id = []
35 | keypoint_num = 0
36 | for i in range(len(keypoints)):
37 | if suppressed[i]:
38 | continue
39 | for j in range(i+1, len(keypoints)):
40 | if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 +
41 | (keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
42 | suppressed[j] = 1
43 | keypoint_with_score_and_id = (keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]],
44 | total_keypoint_num + keypoint_num)
45 | keypoints_with_score_and_id.append(keypoint_with_score_and_id)
46 | keypoint_num += 1
47 | all_keypoints.append(keypoints_with_score_and_id)
48 | return keypoint_num
49 |
50 |
51 | def group_keypoints(all_keypoints_by_type, pafs, pose_entry_size=20, min_paf_score=0.05, demo=False):
52 | pose_entries = []
53 | all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
54 | for part_id in range(len(BODY_PARTS_PAF_IDS)):
55 | part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
56 | kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
57 | kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
58 | num_kpts_a = len(kpts_a)
59 | num_kpts_b = len(kpts_b)
60 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
61 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
62 |
63 | if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
64 | continue
65 | elif num_kpts_a == 0: # body part has just 'b' keypoints
66 | for i in range(num_kpts_b):
67 | num = 0
68 | for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
69 | if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
70 | num += 1
71 | continue
72 | if num == 0:
73 | pose_entry = np.ones(pose_entry_size) * -1
74 | pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
75 | pose_entry[-1] = 1 # num keypoints in pose
76 | pose_entry[-2] = kpts_b[i][2] # pose score
77 | pose_entries.append(pose_entry)
78 | continue
79 | elif num_kpts_b == 0: # body part has just 'a' keypoints
80 | for i in range(num_kpts_a):
81 | num = 0
82 | for j in range(len(pose_entries)):
83 | if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
84 | num += 1
85 | continue
86 | if num == 0:
87 | pose_entry = np.ones(pose_entry_size) * -1
88 | pose_entry[kpt_a_id] = kpts_a[i][3]
89 | pose_entry[-1] = 1
90 | pose_entry[-2] = kpts_a[i][2]
91 | pose_entries.append(pose_entry)
92 | continue
93 |
94 | connections = []
95 | for i in range(num_kpts_a):
96 | kpt_a = np.array(kpts_a[i][0:2])
97 | for j in range(num_kpts_b):
98 | kpt_b = np.array(kpts_b[j][0:2])
99 | mid_point = [(), ()]
100 | mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
101 | int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
102 | mid_point[1] = mid_point[0]
103 |
104 | vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
105 | vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
106 | if vec_norm == 0:
107 | continue
108 | vec[0] /= vec_norm
109 | vec[1] /= vec_norm
110 | cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
111 | vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
112 |
113 | height_n = pafs.shape[0] // 2
114 | success_ratio = 0
115 | point_num = 10 # number of points to integration over paf
116 | if cur_point_score > -100:
117 | passed_point_score = 0
118 | passed_point_num = 0
119 | x, y = linspace2d(kpt_a, kpt_b)
120 | for point_idx in range(point_num):
121 | if not demo:
122 | px = int(round(x[point_idx]))
123 | py = int(round(y[point_idx]))
124 | else:
125 | px = int(x[point_idx])
126 | py = int(y[point_idx])
127 | paf = part_pafs[py, px, 0:2]
128 | cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
129 | if cur_point_score > min_paf_score:
130 | passed_point_score += cur_point_score
131 | passed_point_num += 1
132 | success_ratio = passed_point_num / point_num
133 | ratio = 0
134 | if passed_point_num > 0:
135 | ratio = passed_point_score / passed_point_num
136 | ratio += min(height_n / vec_norm - 1, 0)
137 | if ratio > 0 and success_ratio > 0.8:
138 | score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
139 | connections.append([i, j, ratio, score_all])
140 | if len(connections) > 0:
141 | connections = sorted(connections, key=itemgetter(2), reverse=True)
142 |
143 | num_connections = min(num_kpts_a, num_kpts_b)
144 | has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
145 | has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
146 | filtered_connections = []
147 | for row in range(len(connections)):
148 | if len(filtered_connections) == num_connections:
149 | break
150 | i, j, cur_point_score = connections[row][0:3]
151 | if not has_kpt_a[i] and not has_kpt_b[j]:
152 | filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
153 | has_kpt_a[i] = 1
154 | has_kpt_b[j] = 1
155 | connections = filtered_connections
156 | if len(connections) == 0:
157 | continue
158 |
159 | if part_id == 0:
160 | pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
161 | for i in range(len(connections)):
162 | pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
163 | pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
164 | pose_entries[i][-1] = 2
165 | pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
166 | elif part_id == 17 or part_id == 18:
167 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
168 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
169 | for i in range(len(connections)):
170 | for j in range(len(pose_entries)):
171 | if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
172 | pose_entries[j][kpt_b_id] = connections[i][1]
173 | elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
174 | pose_entries[j][kpt_a_id] = connections[i][0]
175 | continue
176 | else:
177 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
178 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
179 | for i in range(len(connections)):
180 | num = 0
181 | for j in range(len(pose_entries)):
182 | if pose_entries[j][kpt_a_id] == connections[i][0]:
183 | pose_entries[j][kpt_b_id] = connections[i][1]
184 | num += 1
185 | pose_entries[j][-1] += 1
186 | pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
187 | if num == 0:
188 | pose_entry = np.ones(pose_entry_size) * -1
189 | pose_entry[kpt_a_id] = connections[i][0]
190 | pose_entry[kpt_b_id] = connections[i][1]
191 | pose_entry[-1] = 2
192 | pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
193 | pose_entries.append(pose_entry)
194 |
195 | filtered_entries = []
196 | for i in range(len(pose_entries)):
197 | if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
198 | continue
199 | filtered_entries.append(pose_entries[i])
200 | pose_entries = np.asarray(filtered_entries)
201 | return pose_entries, all_keypoints
202 |
--------------------------------------------------------------------------------
/modules/load_state.py:
--------------------------------------------------------------------------------
1 | import collections
2 |
3 |
4 | def load_state(net, checkpoint):
5 | source_state = checkpoint['state_dict']
6 | target_state = net.state_dict()
7 | new_target_state = collections.OrderedDict()
8 | for target_key, target_value in target_state.items():
9 | if target_key in source_state and source_state[target_key].size() == target_state[target_key].size():
10 | new_target_state[target_key] = source_state[target_key]
11 | else:
12 | new_target_state[target_key] = target_state[target_key]
13 | print('[WARNING] Not found pre-trained parameters for {}'.format(target_key))
14 |
15 | net.load_state_dict(new_target_state)
16 |
17 |
18 | def load_from_mobilenet(net, checkpoint):
19 | source_state = checkpoint['state_dict']
20 | target_state = net.state_dict()
21 | new_target_state = collections.OrderedDict()
22 | for target_key, target_value in target_state.items():
23 | k = target_key
24 | if k.find('model') != -1:
25 | k = k.replace('model', 'module.model')
26 | if k in source_state and source_state[k].size() == target_state[target_key].size():
27 | new_target_state[target_key] = source_state[k]
28 | else:
29 | new_target_state[target_key] = target_state[target_key]
30 | print('[WARNING] Not found pre-trained parameters for {}'.format(target_key))
31 |
32 | net.load_state_dict(new_target_state)
33 |
--------------------------------------------------------------------------------
/modules/loss.py:
--------------------------------------------------------------------------------
1 | def l2_loss(input, target, mask, batch_size):
2 | loss = (input - target) * mask
3 | loss = (loss * loss) / 2 / batch_size
4 |
5 | return loss.sum()
6 |
--------------------------------------------------------------------------------
/modules/one_euro_filter.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 |
4 | def get_alpha(rate=30, cutoff=1):
5 | tau = 1 / (2 * math.pi * cutoff)
6 | te = 1 / rate
7 | return 1 / (1 + tau / te)
8 |
9 |
10 | class LowPassFilter:
11 | def __init__(self):
12 | self.x_previous = None
13 |
14 | def __call__(self, x, alpha=0.5):
15 | if self.x_previous is None:
16 | self.x_previous = x
17 | return x
18 | x_filtered = alpha * x + (1 - alpha) * self.x_previous
19 | self.x_previous = x_filtered
20 | return x_filtered
21 |
22 |
23 | class OneEuroFilter:
24 | def __init__(self, freq=15, mincutoff=1, beta=0.05, dcutoff=1):
25 | self.freq = freq
26 | self.mincutoff = mincutoff
27 | self.beta = beta
28 | self.dcutoff = dcutoff
29 | self.filter_x = LowPassFilter()
30 | self.filter_dx = LowPassFilter()
31 | self.x_previous = None
32 | self.dx = None
33 |
34 | def __call__(self, x):
35 | if self.dx is None:
36 | self.dx = 0
37 | else:
38 | self.dx = (x - self.x_previous) * self.freq
39 | dx_smoothed = self.filter_dx(self.dx, get_alpha(self.freq, self.dcutoff))
40 | cutoff = self.mincutoff + self.beta * abs(dx_smoothed)
41 | x_filtered = self.filter_x(x, get_alpha(self.freq, cutoff))
42 | self.x_previous = x
43 | return x_filtered
44 |
45 |
46 | if __name__ == '__main__':
47 | filter = OneEuroFilter(freq=15, beta=0.1)
48 | for val in range(10):
49 | x = val + (-1)**(val % 2)
50 | x_filtered = filter(x)
51 | print(x_filtered, x)
52 |
--------------------------------------------------------------------------------
/modules/pose.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | from modules.keypoints import BODY_PARTS_KPT_IDS, BODY_PARTS_PAF_IDS
5 | from modules.one_euro_filter import OneEuroFilter
6 |
7 |
8 | class Pose:
9 | num_kpts = 18
10 | kpt_names = ['nose', 'neck',
11 | 'r_sho', 'r_elb', 'r_wri', 'l_sho', 'l_elb', 'l_wri',
12 | 'r_hip', 'r_knee', 'r_ank', 'l_hip', 'l_knee', 'l_ank',
13 | 'r_eye', 'l_eye',
14 | 'r_ear', 'l_ear']
15 | sigmas = np.array([.26, .79, .79, .72, .62, .79, .72, .62, 1.07, .87, .89, 1.07, .87, .89, .25, .25, .35, .35],
16 | dtype=np.float32) / 10.0
17 | vars = (sigmas * 2) ** 2
18 | last_id = -1
19 | color = [0, 224, 255]
20 |
21 | def __init__(self, keypoints, confidence):
22 | super().__init__()
23 | self.keypoints = keypoints
24 | self.confidence = confidence
25 | self.bbox = Pose.get_bbox(self.keypoints)
26 | self.id = None
27 | self.filters = [[OneEuroFilter(), OneEuroFilter()] for _ in range(Pose.num_kpts)]
28 |
29 | @staticmethod
30 | def get_bbox(keypoints):
31 | found_keypoints = np.zeros((np.count_nonzero(keypoints[:, 0] != -1), 2), dtype=np.int32)
32 | found_kpt_id = 0
33 | for kpt_id in range(Pose.num_kpts):
34 | if keypoints[kpt_id, 0] == -1:
35 | continue
36 | found_keypoints[found_kpt_id] = keypoints[kpt_id]
37 | found_kpt_id += 1
38 | bbox = cv2.boundingRect(found_keypoints)
39 | return bbox
40 |
41 | def update_id(self, id=None):
42 | self.id = id
43 | if self.id is None:
44 | self.id = Pose.last_id + 1
45 | Pose.last_id += 1
46 |
47 | def draw(self, img):
48 | assert self.keypoints.shape == (Pose.num_kpts, 2)
49 |
50 | for part_id in range(len(BODY_PARTS_PAF_IDS) - 2):
51 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
52 | global_kpt_a_id = self.keypoints[kpt_a_id, 0]
53 | if global_kpt_a_id != -1:
54 | x_a, y_a = self.keypoints[kpt_a_id]
55 | cv2.circle(img, (int(x_a), int(y_a)), 3, Pose.color, -1)
56 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
57 | global_kpt_b_id = self.keypoints[kpt_b_id, 0]
58 | if global_kpt_b_id != -1:
59 | x_b, y_b = self.keypoints[kpt_b_id]
60 | cv2.circle(img, (int(x_b), int(y_b)), 3, Pose.color, -1)
61 | if global_kpt_a_id != -1 and global_kpt_b_id != -1:
62 | cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), Pose.color, 2)
63 |
64 |
65 | def get_similarity(a, b, threshold=0.5):
66 | num_similar_kpt = 0
67 | for kpt_id in range(Pose.num_kpts):
68 | if a.keypoints[kpt_id, 0] != -1 and b.keypoints[kpt_id, 0] != -1:
69 | distance = np.sum((a.keypoints[kpt_id] - b.keypoints[kpt_id]) ** 2)
70 | area = max(a.bbox[2] * a.bbox[3], b.bbox[2] * b.bbox[3])
71 | similarity = np.exp(-distance / (2 * (area + np.spacing(1)) * Pose.vars[kpt_id]))
72 | if similarity > threshold:
73 | num_similar_kpt += 1
74 | return num_similar_kpt
75 |
76 |
77 | def track_poses(previous_poses, current_poses, threshold=3, smooth=False):
78 | """Propagate poses ids from previous frame results. Id is propagated,
79 | if there are at least `threshold` similar keypoints between pose from previous frame and current.
80 | If correspondence between pose on previous and current frame was established, pose keypoints are smoothed.
81 |
82 | :param previous_poses: poses from previous frame with ids
83 | :param current_poses: poses from current frame to assign ids
84 | :param threshold: minimal number of similar keypoints between poses
85 | :param smooth: smooth pose keypoints between frames
86 | :return: None
87 | """
88 | current_poses = sorted(current_poses, key=lambda pose: pose.confidence, reverse=True) # match confident poses first
89 | mask = np.ones(len(previous_poses), dtype=np.int32)
90 | for current_pose in current_poses:
91 | best_matched_id = None
92 | best_matched_pose_id = None
93 | best_matched_iou = 0
94 | for id, previous_pose in enumerate(previous_poses):
95 | if not mask[id]:
96 | continue
97 | iou = get_similarity(current_pose, previous_pose)
98 | if iou > best_matched_iou:
99 | best_matched_iou = iou
100 | best_matched_pose_id = previous_pose.id
101 | best_matched_id = id
102 | if best_matched_iou >= threshold:
103 | mask[best_matched_id] = 0
104 | else: # pose not similar to any previous
105 | best_matched_pose_id = None
106 | current_pose.update_id(best_matched_pose_id)
107 |
108 | if smooth:
109 | for kpt_id in range(Pose.num_kpts):
110 | if current_pose.keypoints[kpt_id, 0] == -1:
111 | continue
112 | # reuse filter if previous pose has valid filter
113 | if (best_matched_pose_id is not None
114 | and previous_poses[best_matched_id].keypoints[kpt_id, 0] != -1):
115 | current_pose.filters[kpt_id] = previous_poses[best_matched_id].filters[kpt_id]
116 | current_pose.keypoints[kpt_id, 0] = current_pose.filters[kpt_id][0](current_pose.keypoints[kpt_id, 0])
117 | current_pose.keypoints[kpt_id, 1] = current_pose.filters[kpt_id][1](current_pose.keypoints[kpt_id, 1])
118 | current_pose.bbox = Pose.get_bbox(current_pose.keypoints)
119 |
--------------------------------------------------------------------------------
/openpose+RandomForest.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "9001513f",
6 | "metadata": {},
7 | "source": [
8 | "## 使用LightGBM进行分类预测"
9 | ]
10 | },
11 | {
12 | "cell_type": "markdown",
13 | "id": "c29f7d65",
14 | "metadata": {},
15 | "source": [
16 | "### 1.导入库"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 32,
22 | "id": "ebdebfdf",
23 | "metadata": {},
24 | "outputs": [],
25 | "source": [
26 | "import pandas as pd\n",
27 | "from sklearn.model_selection import train_test_split # 划分数据集\n",
28 | "from sklearn.ensemble import RandomForestClassifier # 随机森林\n",
29 | "import lightgbm as lgb\n",
30 | "from sklearn.model_selection import GridSearchCV # 网格划分\n",
31 | "from sklearn.metrics import accuracy_score,recall_score,log_loss # 模型评估\n",
32 | "import joblib # 模型保存"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "id": "2c40a63f",
38 | "metadata": {},
39 | "source": [
40 | "### 2.读取数据与预处理"
41 | ]
42 | },
43 | {
44 | "cell_type": "code",
45 | "execution_count": 34,
46 | "id": "2ae24d75",
47 | "metadata": {},
48 | "outputs": [
49 | {
50 | "data": {
51 | "text/html": [
52 | "
\n", 70 | " | Unnamed: 0 | \n", 71 | "d1 | \n", 72 | "d2 | \n", 73 | "d3 | \n", 74 | "d4 | \n", 75 | "d5 | \n", 76 | "d6 | \n", 77 | "d7 | \n", 78 | "d8 | \n", 79 | "d9 | \n", 80 | "... | \n", 81 | "a9 | \n", 82 | "a10 | \n", 83 | "a11 | \n", 84 | "a12 | \n", 85 | "a13 | \n", 86 | "a14 | \n", 87 | "a15 | \n", 88 | "a16 | \n", 89 | "a17 | \n", 90 | "label | \n", 91 | "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", 96 | "img_100026.jpg | \n", 97 | "127.137721 | \n", 98 | "24.738634 | \n", 99 | "18.973666 | \n", 100 | "61.773781 | \n", 101 | "NaN | \n", 102 | "64.621978 | \n", 103 | "157.035028 | \n", 104 | "142.618372 | \n", 105 | "63.780875 | \n", 106 | "... | \n", 107 | "-4.055314 | \n", 108 | "NaN | \n", 109 | "3.185424 | \n", 110 | "0.382155 | \n", 111 | "1.004067 | \n", 112 | "-1.479100 | \n", 113 | "-2.483167 | \n", 114 | "2.341270 | \n", 115 | "2.019520 | \n", 116 | "0 | \n", 117 | "
1 | \n", 120 | "img_10003.jpg | \n", 121 | "80.049984 | \n", 122 | "18.973666 | \n", 123 | "26.832816 | \n", 124 | "53.665631 | \n", 125 | "NaN | \n", 126 | "50.911688 | \n", 127 | "133.221620 | \n", 128 | "118.186294 | \n", 129 | "50.911688 | \n", 130 | "... | \n", 131 | "-4.414870 | \n", 132 | "NaN | \n", 133 | "3.700192 | \n", 134 | "0.558599 | \n", 135 | "0.785398 | \n", 136 | "-1.665748 | \n", 137 | "-2.451146 | \n", 138 | "2.502383 | \n", 139 | "2.199050 | \n", 140 | "0 | \n", 141 | "
2 | \n", 144 | "img_100050.jpg | \n", 145 | "96.932967 | \n", 146 | "18.973666 | \n", 147 | "NaN | \n", 148 | "43.266615 | \n", 149 | "NaN | \n", 150 | "46.861498 | \n", 151 | "164.754363 | \n", 152 | "151.789328 | \n", 153 | "50.911688 | \n", 154 | "... | \n", 155 | "-3.937187 | \n", 156 | "NaN | \n", 157 | "3.455825 | \n", 158 | "0.404892 | \n", 159 | "NaN | \n", 160 | "-1.512041 | \n", 161 | "NaN | \n", 162 | "2.211233 | \n", 163 | "1.911945 | \n", 164 | "0 | \n", 165 | "
3 | \n", 168 | "img_100074.jpg | \n", 169 | "108.166538 | \n", 170 | "37.947332 | \n", 171 | "30.594117 | \n", 172 | "64.899923 | \n", 173 | "NaN | \n", 174 | "59.093147 | \n", 175 | "197.362610 | \n", 176 | "174.000000 | \n", 177 | "48.373546 | \n", 178 | "... | \n", 179 | "-4.416948 | \n", 180 | "NaN | \n", 181 | "3.504318 | \n", 182 | "0.463648 | \n", 183 | "1.051650 | \n", 184 | "-1.837048 | \n", 185 | "-2.888699 | \n", 186 | "2.485020 | \n", 187 | "2.300696 | \n", 188 | "0 | \n", 189 | "
4 | \n", 192 | "img_10012.jpg | \n", 193 | "84.214013 | \n", 194 | "24.738634 | \n", 195 | "18.000000 | \n", 196 | "53.665631 | \n", 197 | "NaN | \n", 198 | "51.613952 | \n", 199 | "163.768129 | \n", 200 | "189.736660 | \n", 201 | "51.613952 | \n", 202 | "... | \n", 203 | "NaN | \n", 204 | "NaN | \n", 205 | "3.690535 | \n", 206 | "0.548942 | \n", 207 | "-4.957368 | \n", 208 | "-1.744468 | \n", 209 | "3.212900 | \n", 210 | "2.441489 | \n", 211 | "2.137809 | \n", 212 | "0 | \n", 213 | "
... | \n", 216 | "... | \n", 217 | "... | \n", 218 | "... | \n", 219 | "... | \n", 220 | "... | \n", 221 | "... | \n", 222 | "... | \n", 223 | "... | \n", 224 | "... | \n", 225 | "... | \n", 226 | "... | \n", 227 | "... | \n", 228 | "... | \n", 229 | "... | \n", 230 | "... | \n", 231 | "... | \n", 232 | "... | \n", 233 | "... | \n", 234 | "... | \n", 235 | "... | \n", 236 | "... | \n", 237 | "
22391 | \n", 240 | "5.jpg | \n", 241 | "98.407317 | \n", 242 | "21.633308 | \n", 243 | "18.973666 | \n", 244 | "54.332311 | \n", 245 | "NaN | \n", 246 | "43.680659 | \n", 247 | "115.412304 | \n", 248 | "122.376468 | \n", 249 | "56.920998 | \n", 250 | "... | \n", 251 | "-4.449394 | \n", 252 | "NaN | \n", 253 | "3.778394 | \n", 254 | "0.593350 | \n", 255 | "-4.978641 | \n", 256 | "-1.503103 | \n", 257 | "3.475538 | \n", 258 | "2.120696 | \n", 259 | "1.842396 | \n", 260 | "2 | \n", 261 | "
22392 | \n", 264 | "6.jpg | \n", 265 | "89.196412 | \n", 266 | "25.455844 | \n", 267 | "18.973666 | \n", 268 | "54.000000 | \n", 269 | "NaN | \n", 270 | "48.000000 | \n", 271 | "114.629839 | \n", 272 | "131.726990 | \n", 273 | "54.000000 | \n", 274 | "... | \n", 275 | "-4.468285 | \n", 276 | "NaN | \n", 277 | "3.879408 | \n", 278 | "0.737815 | \n", 279 | "-5.176037 | \n", 280 | "-1.523213 | \n", 281 | "3.652823 | \n", 282 | "1.986861 | \n", 283 | "1.659616 | \n", 284 | "2 | \n", 285 | "
22393 | \n", 288 | "7.jpg | \n", 289 | "101.823376 | \n", 290 | "25.455844 | \n", 291 | "30.000000 | \n", 292 | "48.373546 | \n", 293 | "NaN | \n", 294 | "37.947332 | \n", 295 | "121.490740 | \n", 296 | "128.825463 | \n", 297 | "43.680659 | \n", 298 | "... | \n", 299 | "-4.251517 | \n", 300 | "NaN | \n", 301 | "3.605240 | \n", 302 | "0.507099 | \n", 303 | "-5.497787 | \n", 304 | "-1.570796 | \n", 305 | "3.926991 | \n", 306 | "2.000559 | \n", 307 | "1.668523 | \n", 308 | "2 | \n", 309 | "
22394 | \n", 312 | "8.jpg | \n", 313 | "80.721744 | \n", 314 | "21.633308 | \n", 315 | "21.633308 | \n", 316 | "48.373546 | \n", 317 | "NaN | \n", 318 | "48.373546 | \n", 319 | "114.000000 | \n", 320 | "134.164079 | \n", 321 | "54.000000 | \n", 322 | "... | \n", 323 | "-4.156744 | \n", 324 | "NaN | \n", 325 | "3.855219 | \n", 326 | "0.837981 | \n", 327 | "-4.712389 | \n", 328 | "-1.425984 | \n", 329 | "3.286405 | \n", 330 | "2.095157 | \n", 331 | "1.738256 | \n", 332 | "2 | \n", 333 | "
22395 | \n", 336 | "9.jpg | \n", 337 | "101.823376 | \n", 338 | "16.970563 | \n", 339 | "18.000000 | \n", 340 | "54.000000 | \n", 341 | "NaN | \n", 342 | "48.373546 | \n", 343 | "99.859902 | \n", 344 | "113.841996 | \n", 345 | "51.264022 | \n", 346 | "... | \n", 347 | "-4.371704 | \n", 348 | "NaN | \n", 349 | "3.407845 | \n", 350 | "0.426627 | \n", 351 | "-5.497787 | \n", 352 | "-1.570796 | \n", 353 | "3.926991 | \n", 354 | "2.025897 | \n", 355 | "1.672485 | \n", 356 | "2 | \n", 357 | "
22396 rows × 36 columns
\n", 361 | "\n", 589 | " | Unnamed: 0 | \n", 590 | "d1 | \n", 591 | "d2 | \n", 592 | "d3 | \n", 593 | "d4 | \n", 594 | "d6 | \n", 595 | "d7 | \n", 596 | "d8 | \n", 597 | "d9 | \n", 598 | "d10 | \n", 599 | "... | \n", 600 | "a7 | \n", 601 | "a9 | \n", 602 | "a11 | \n", 603 | "a12 | \n", 604 | "a13 | \n", 605 | "a14 | \n", 606 | "a15 | \n", 607 | "a16 | \n", 608 | "a17 | \n", 609 | "label | \n", 610 | "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", 615 | "img_100026.jpg | \n", 616 | "127.137721 | \n", 617 | "24.738634 | \n", 618 | "18.973666 | \n", 619 | "61.773781 | \n", 620 | "64.621978 | \n", 621 | "157.035028 | \n", 622 | "142.618372 | \n", 623 | "63.780875 | \n", 624 | "120.149906 | \n", 625 | "... | \n", 626 | "-4.369365 | \n", 627 | "-4.055314 | \n", 628 | "3.185424 | \n", 629 | "0.382155 | \n", 630 | "1.004067 | \n", 631 | "-1.479100 | \n", 632 | "-2.483167 | \n", 633 | "2.341270 | \n", 634 | "2.019520 | \n", 635 | "0 | \n", 636 | "
1 | \n", 639 | "img_10003.jpg | \n", 640 | "80.049984 | \n", 641 | "18.973666 | \n", 642 | "26.832816 | \n", 643 | "53.665631 | \n", 644 | "50.911688 | \n", 645 | "133.221620 | \n", 646 | "118.186294 | \n", 647 | "50.911688 | \n", 648 | "93.145048 | \n", 649 | "... | \n", 650 | "-4.515336 | \n", 651 | "-4.414870 | \n", 652 | "3.700192 | \n", 653 | "0.558599 | \n", 654 | "0.785398 | \n", 655 | "-1.665748 | \n", 656 | "-2.451146 | \n", 657 | "2.502383 | \n", 658 | "2.199050 | \n", 659 | "0 | \n", 660 | "
2 | \n", 663 | "img_100050.jpg | \n", 664 | "96.932967 | \n", 665 | "18.973666 | \n", 666 | "22.044608 | \n", 667 | "43.266615 | \n", 668 | "46.861498 | \n", 669 | "164.754363 | \n", 670 | "151.789328 | \n", 671 | "50.911688 | \n", 672 | "97.672924 | \n", 673 | "... | \n", 674 | "-4.384618 | \n", 675 | "-3.937187 | \n", 676 | "3.455825 | \n", 677 | "0.404892 | \n", 678 | "0.245618 | \n", 679 | "-1.512041 | \n", 680 | "-1.826560 | \n", 681 | "2.211233 | \n", 682 | "1.911945 | \n", 683 | "0 | \n", 684 | "
3 | \n", 687 | "img_100074.jpg | \n", 688 | "108.166538 | \n", 689 | "37.947332 | \n", 690 | "30.594117 | \n", 691 | "64.899923 | \n", 692 | "59.093147 | \n", 693 | "197.362610 | \n", 694 | "174.000000 | \n", 695 | "48.373546 | \n", 696 | "109.489726 | \n", 697 | "... | \n", 698 | "-4.540093 | \n", 699 | "-4.416948 | \n", 700 | "3.504318 | \n", 701 | "0.463648 | \n", 702 | "1.051650 | \n", 703 | "-1.837048 | \n", 704 | "-2.888699 | \n", 705 | "2.485020 | \n", 706 | "2.300696 | \n", 707 | "0 | \n", 708 | "
4 | \n", 711 | "img_10012.jpg | \n", 712 | "84.214013 | \n", 713 | "24.738634 | \n", 714 | "18.000000 | \n", 715 | "53.665631 | \n", 716 | "51.613952 | \n", 717 | "163.768129 | \n", 718 | "189.736660 | \n", 719 | "51.613952 | \n", 720 | "124.851912 | \n", 721 | "... | \n", 722 | "-4.233964 | \n", 723 | "-4.083615 | \n", 724 | "3.690535 | \n", 725 | "0.548942 | \n", 726 | "-4.957368 | \n", 727 | "-1.744468 | \n", 728 | "3.212900 | \n", 729 | "2.441489 | \n", 730 | "2.137809 | \n", 731 | "0 | \n", 732 | "
5 rows × 30 columns
\n", 736 | "\n", 810 | " | d1 | \n", 811 | "d2 | \n", 812 | "d3 | \n", 813 | "d4 | \n", 814 | "d6 | \n", 815 | "d7 | \n", 816 | "d8 | \n", 817 | "d9 | \n", 818 | "d10 | \n", 819 | "d11 | \n", 820 | "... | \n", 821 | "a7 | \n", 822 | "a9 | \n", 823 | "a11 | \n", 824 | "a12 | \n", 825 | "a13 | \n", 826 | "a14 | \n", 827 | "a15 | \n", 828 | "a16 | \n", 829 | "a17 | \n", 830 | "label | \n", 831 | "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", 836 | "127.137721 | \n", 837 | "24.738634 | \n", 838 | "18.973666 | \n", 839 | "61.773781 | \n", 840 | "64.621978 | \n", 841 | "157.035028 | \n", 842 | "142.618372 | \n", 843 | "63.780875 | \n", 844 | "120.149906 | \n", 845 | "108.664622 | \n", 846 | "... | \n", 847 | "-4.369365 | \n", 848 | "-4.055314 | \n", 849 | "3.185424 | \n", 850 | "0.382155 | \n", 851 | "1.004067 | \n", 852 | "-1.479100 | \n", 853 | "-2.483167 | \n", 854 | "2.341270 | \n", 855 | "2.019520 | \n", 856 | "0 | \n", 857 | "
1 | \n", 860 | "80.049984 | \n", 861 | "18.973666 | \n", 862 | "26.832816 | \n", 863 | "53.665631 | \n", 864 | "50.911688 | \n", 865 | "133.221620 | \n", 866 | "118.186294 | \n", 867 | "50.911688 | \n", 868 | "93.145048 | \n", 869 | "84.000000 | \n", 870 | "... | \n", 871 | "-4.515336 | \n", 872 | "-4.414870 | \n", 873 | "3.700192 | \n", 874 | "0.558599 | \n", 875 | "0.785398 | \n", 876 | "-1.665748 | \n", 877 | "-2.451146 | \n", 878 | "2.502383 | \n", 879 | "2.199050 | \n", 880 | "0 | \n", 881 | "
2 | \n", 884 | "96.932967 | \n", 885 | "18.973666 | \n", 886 | "22.044608 | \n", 887 | "43.266615 | \n", 888 | "46.861498 | \n", 889 | "164.754363 | \n", 890 | "151.789328 | \n", 891 | "50.911688 | \n", 892 | "97.672924 | \n", 893 | "72.993150 | \n", 894 | "... | \n", 895 | "-4.384618 | \n", 896 | "-3.937187 | \n", 897 | "3.455825 | \n", 898 | "0.404892 | \n", 899 | "0.245618 | \n", 900 | "-1.512041 | \n", 901 | "-1.826560 | \n", 902 | "2.211233 | \n", 903 | "1.911945 | \n", 904 | "0 | \n", 905 | "
3 | \n", 908 | "108.166538 | \n", 909 | "37.947332 | \n", 910 | "30.594117 | \n", 911 | "64.899923 | \n", 912 | "59.093147 | \n", 913 | "197.362610 | \n", 914 | "174.000000 | \n", 915 | "48.373546 | \n", 916 | "109.489726 | \n", 917 | "116.498927 | \n", 918 | "... | \n", 919 | "-4.540093 | \n", 920 | "-4.416948 | \n", 921 | "3.504318 | \n", 922 | "0.463648 | \n", 923 | "1.051650 | \n", 924 | "-1.837048 | \n", 925 | "-2.888699 | \n", 926 | "2.485020 | \n", 927 | "2.300696 | \n", 928 | "0 | \n", 929 | "
4 | \n", 932 | "84.214013 | \n", 933 | "24.738634 | \n", 934 | "18.000000 | \n", 935 | "53.665631 | \n", 936 | "51.613952 | \n", 937 | "163.768129 | \n", 938 | "189.736660 | \n", 939 | "51.613952 | \n", 940 | "124.851912 | \n", 941 | "117.204537 | \n", 942 | "... | \n", 943 | "-4.233964 | \n", 944 | "-4.083615 | \n", 945 | "3.690535 | \n", 946 | "0.548942 | \n", 947 | "-4.957368 | \n", 948 | "-1.744468 | \n", 949 | "3.212900 | \n", 950 | "2.441489 | \n", 951 | "2.137809 | \n", 952 | "0 | \n", 953 | "
5 rows × 29 columns
\n", 957 | "