├── LICENSE ├── README.md ├── datasets └── coco.py ├── func.py ├── get_train.py ├── image ├── 1.jpg ├── 11.jpg ├── 12.jpg ├── 2.jpg ├── 3.jpg └── 4.jpg ├── main.py ├── models └── with_mobilenet.py ├── modules ├── conv.py ├── get_parameters.py ├── keypoints.py ├── load_state.py ├── loss.py ├── one_euro_filter.py └── pose.py ├── openpose+RandomForest.ipynb ├── openpose_train_data.csv ├── requirement.txt ├── sound ├── eyes.mp3 └── yawn.mp3 └── val.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 以太とTerra 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## 基于OpenPose与随机森林算法的驾驶员状态检测系统 2 | 3 | >效果展示 4 | 5 | 6 | 7 | main.py 为程序执行入口 8 | 9 | get_train.py 为OpenPose提取姿态特征文件,数据集来源为Kaggle网站上State Farm Distracted Driver Detection 10 | 地址:https://www.kaggle.com/c/state-farm-distracted-driver-detection 11 | 输出为openpose_train_data.csv文件 12 | 13 | func.py 为该程序的函数支持库,具有中文详细注释 14 | 15 | val.py 、modules与datasets为Light-OpenPose支持文件 16 | 17 | model中需放置已训练好的OpenPose、dlib与随机森林的3个模型文件 18 | 模型下载: 19 | 20 | 链接:https://pan.baidu.com/s/1qMid2zZWTuaPjE2nIkLWkw?pwd=0vzs 21 | 提取码:0vzs 22 | 23 | video文件夹中放置侧置摄像头视频与前置摄像头视频 24 | 25 | sound文件夹中为疲劳检测语音提示音 26 | 27 | image文件中为程序运行效果 28 | 29 | 需要安装的库不少,如果有缺少的,建议边看报错的提示边安装。 30 | -------------------------------------------------------------------------------- /datasets/coco.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import json 3 | import math 4 | import os 5 | import pickle 6 | 7 | import cv2 8 | import numpy as np 9 | import pycocotools 10 | 11 | from torch.utils.data.dataset import Dataset 12 | 13 | BODY_PARTS_KPT_IDS = [[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], 14 | [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17]] 15 | 16 | 17 | def get_mask(segmentations, mask): 18 | for segmentation in segmentations: 19 | rle = pycocotools.mask.frPyObjects(segmentation, mask.shape[0], mask.shape[1]) 20 | mask[pycocotools.mask.decode(rle) > 0.5] = 0 21 | return mask 22 | 23 | 24 | class CocoTrainDataset(Dataset): 25 | def __init__(self, labels, images_folder, stride, sigma, paf_thickness, transform=None): 26 | super().__init__() 27 | self._images_folder = images_folder 28 | self._stride = stride 29 | self._sigma = sigma 30 | self._paf_thickness = paf_thickness 31 | self._transform = transform 32 | with open(labels, 'rb') as f: 33 | self._labels = pickle.load(f) 34 | 35 | def __getitem__(self, idx): 36 | label = copy.deepcopy(self._labels[idx]) # label modified in transform 37 | image = cv2.imread(os.path.join(self._images_folder, label['img_paths']), cv2.IMREAD_COLOR) 38 | mask = np.ones(shape=(label['img_height'], label['img_width']), dtype=np.float32) 39 | mask = get_mask(label['segmentations'], mask) 40 | sample = { 41 | 'label': label, 42 | 'image': image, 43 | 'mask': mask 44 | } 45 | if self._transform: 46 | sample = self._transform(sample) 47 | 48 | mask = cv2.resize(sample['mask'], dsize=None, fx=1/self._stride, fy=1/self._stride, interpolation=cv2.INTER_AREA) 49 | keypoint_maps = self._generate_keypoint_maps(sample) 50 | sample['keypoint_maps'] = keypoint_maps 51 | keypoint_mask = np.zeros(shape=keypoint_maps.shape, dtype=np.float32) 52 | for idx in range(keypoint_mask.shape[0]): 53 | keypoint_mask[idx] = mask 54 | sample['keypoint_mask'] = keypoint_mask 55 | 56 | paf_maps = self._generate_paf_maps(sample) 57 | sample['paf_maps'] = paf_maps 58 | paf_mask = np.zeros(shape=paf_maps.shape, dtype=np.float32) 59 | for idx in range(paf_mask.shape[0]): 60 | paf_mask[idx] = mask 61 | sample['paf_mask'] = paf_mask 62 | 63 | image = sample['image'].astype(np.float32) 64 | image = (image - 128) / 256 65 | sample['image'] = image.transpose((2, 0, 1)) 66 | del sample['label'] 67 | return sample 68 | 69 | def __len__(self): 70 | return len(self._labels) 71 | 72 | def _generate_keypoint_maps(self, sample): 73 | n_keypoints = 18 74 | n_rows, n_cols, _ = sample['image'].shape 75 | keypoint_maps = np.zeros(shape=(n_keypoints + 1, 76 | n_rows // self._stride, n_cols // self._stride), dtype=np.float32) # +1 for bg 77 | 78 | label = sample['label'] 79 | for keypoint_idx in range(n_keypoints): 80 | keypoint = label['keypoints'][keypoint_idx] 81 | if keypoint[2] <= 1: 82 | self._add_gaussian(keypoint_maps[keypoint_idx], keypoint[0], keypoint[1], self._stride, self._sigma) 83 | for another_annotation in label['processed_other_annotations']: 84 | keypoint = another_annotation['keypoints'][keypoint_idx] 85 | if keypoint[2] <= 1: 86 | self._add_gaussian(keypoint_maps[keypoint_idx], keypoint[0], keypoint[1], self._stride, self._sigma) 87 | keypoint_maps[-1] = 1 - keypoint_maps.max(axis=0) 88 | return keypoint_maps 89 | 90 | def _add_gaussian(self, keypoint_map, x, y, stride, sigma): 91 | n_sigma = 4 92 | tl = [int(x - n_sigma * sigma), int(y - n_sigma * sigma)] 93 | tl[0] = max(tl[0], 0) 94 | tl[1] = max(tl[1], 0) 95 | 96 | br = [int(x + n_sigma * sigma), int(y + n_sigma * sigma)] 97 | map_h, map_w = keypoint_map.shape 98 | br[0] = min(br[0], map_w * stride) 99 | br[1] = min(br[1], map_h * stride) 100 | 101 | shift = stride / 2 - 0.5 102 | for map_y in range(tl[1] // stride, br[1] // stride): 103 | for map_x in range(tl[0] // stride, br[0] // stride): 104 | d2 = (map_x * stride + shift - x) * (map_x * stride + shift - x) + \ 105 | (map_y * stride + shift - y) * (map_y * stride + shift - y) 106 | exponent = d2 / 2 / sigma / sigma 107 | if exponent > 4.6052: # threshold, ln(100), ~0.01 108 | continue 109 | keypoint_map[map_y, map_x] += math.exp(-exponent) 110 | if keypoint_map[map_y, map_x] > 1: 111 | keypoint_map[map_y, map_x] = 1 112 | 113 | def _generate_paf_maps(self, sample): 114 | n_pafs = len(BODY_PARTS_KPT_IDS) 115 | n_rows, n_cols, _ = sample['image'].shape 116 | paf_maps = np.zeros(shape=(n_pafs * 2, n_rows // self._stride, n_cols // self._stride), dtype=np.float32) 117 | 118 | label = sample['label'] 119 | for paf_idx in range(n_pafs): 120 | keypoint_a = label['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][0]] 121 | keypoint_b = label['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][1]] 122 | if keypoint_a[2] <= 1 and keypoint_b[2] <= 1: 123 | self._set_paf(paf_maps[paf_idx * 2:paf_idx * 2 + 2], 124 | keypoint_a[0], keypoint_a[1], keypoint_b[0], keypoint_b[1], 125 | self._stride, self._paf_thickness) 126 | for another_annotation in label['processed_other_annotations']: 127 | keypoint_a = another_annotation['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][0]] 128 | keypoint_b = another_annotation['keypoints'][BODY_PARTS_KPT_IDS[paf_idx][1]] 129 | if keypoint_a[2] <= 1 and keypoint_b[2] <= 1: 130 | self._set_paf(paf_maps[paf_idx * 2:paf_idx * 2 + 2], 131 | keypoint_a[0], keypoint_a[1], keypoint_b[0], keypoint_b[1], 132 | self._stride, self._paf_thickness) 133 | return paf_maps 134 | 135 | def _set_paf(self, paf_map, x_a, y_a, x_b, y_b, stride, thickness): 136 | x_a /= stride 137 | y_a /= stride 138 | x_b /= stride 139 | y_b /= stride 140 | x_ba = x_b - x_a 141 | y_ba = y_b - y_a 142 | _, h_map, w_map = paf_map.shape 143 | x_min = int(max(min(x_a, x_b) - thickness, 0)) 144 | x_max = int(min(max(x_a, x_b) + thickness, w_map)) 145 | y_min = int(max(min(y_a, y_b) - thickness, 0)) 146 | y_max = int(min(max(y_a, y_b) + thickness, h_map)) 147 | norm_ba = (x_ba * x_ba + y_ba * y_ba) ** 0.5 148 | if norm_ba < 1e-7: # Same points, no paf 149 | return 150 | x_ba /= norm_ba 151 | y_ba /= norm_ba 152 | 153 | for y in range(y_min, y_max): 154 | for x in range(x_min, x_max): 155 | x_ca = x - x_a 156 | y_ca = y - y_a 157 | d = math.fabs(x_ca * y_ba - y_ca * x_ba) 158 | if d <= thickness: 159 | paf_map[0, y, x] = x_ba 160 | paf_map[1, y, x] = y_ba 161 | 162 | 163 | class CocoValDataset(Dataset): 164 | def __init__(self, labels, images_folder): 165 | super().__init__() 166 | with open(labels, 'r') as f: 167 | self._labels = json.load(f) 168 | self._images_folder = images_folder 169 | 170 | def __getitem__(self, idx): 171 | file_name = self._labels['images'][idx]['file_name'] 172 | img = cv2.imread(os.path.join(self._images_folder, file_name), cv2.IMREAD_COLOR) 173 | return { 174 | 'img': img, 175 | 'file_name': file_name 176 | } 177 | 178 | def __len__(self): 179 | return len(self._labels['images']) 180 | -------------------------------------------------------------------------------- /func.py: -------------------------------------------------------------------------------- 1 | import dlib 2 | import joblib 3 | import numpy as np 4 | import copy 5 | import pandas as pd 6 | import pygame 7 | from imutils import face_utils 8 | from scipy.spatial import distance 9 | from tkinter import * 10 | from PIL import Image, ImageTk 11 | import tkinter.ttk 12 | import numpy 13 | from PIL import Image, ImageDraw, ImageFont 14 | import math 15 | import cv2 16 | import torch 17 | from modules.pose import Pose, track_poses 18 | from val import normalize, pad_width 19 | from modules.keypoints import extract_keypoints, group_keypoints 20 | from models.with_mobilenet import PoseEstimationWithMobileNet 21 | from modules.load_state import load_state 22 | 23 | # 均值填充 24 | avg = [108.3920476, 28.25560194, 23.83509614, 59.88074356, 38.39867032, 58.68210028, 169.0982822, 150.9596774, 25 | 59.14090161, 107.8679241, 97.66179061, 229.0530985, 190.5525496, 109.007621, 207.7391332, 154.0163144, 26 | 108.5794621, 2.335975213, -2.26903988, 1.427404258, -3.187685543, -1.970117366, -3.31392059, -4.290557895, 27 | -2.154595849, -4.042274581, -1.943534425, 2.783517288, -0.181217392, -0.706798676, -1.652686336, -0.950017573, 28 | 1.550229334, 1.209989161] 29 | 30 | switch = 0 31 | yawn = False 32 | yawn_flag = 0 33 | eye_close = False 34 | eye_flag = 0 35 | flag = 0 36 | t = 0 37 | 38 | # 加载OpenPose模型 39 | net = PoseEstimationWithMobileNet() # 加载网络结构 40 | checkpoint = torch.load('models/checkpoint_iter_370000.pth', map_location='cpu') # 加载模型参数 41 | load_state(net, checkpoint) # 拼接结构与参数 42 | 43 | thresh_eye = 0.17 # 眼睛宽高比阈值 44 | thresh_mouth = 0.85 # 嘴巴宽高比阈值 45 | frame_check = 25 # 超时警告(单位:帧) 46 | detect = dlib.get_frontal_face_detector() # 获取面部 47 | predict = dlib.shape_predictor("models/shape_predictor_68_face_landmarks.dat") # 面部68个特征点数据集 48 | 49 | # 获取眼睛特征点序号 50 | (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["left_eye"] # 42~47 51 | (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["right_eye"] # 36~41 52 | # 获取嘴巴特征点序号 53 | (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"] # 48~67 54 | pygame.mixer.init() # 语音模块初始化 55 | # 载入分类模型 56 | etc = joblib.load( 57 | 'models/RandomForestClassifier_model.pkl') 58 | 59 | datas = pd.DataFrame( 60 | columns=['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10', 'd11', 'd12', 'd13', 'd14', 'd15', 'd16','d17', 61 | 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16','a17']) 62 | 63 | # 中文显示函数 64 | def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): 65 | if (isinstance(img, numpy.ndarray)): # 判断是否OpenCV图片类型 66 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 67 | # 创建一个可以在给定图像上绘图的对象 68 | draw = ImageDraw.Draw(img) 69 | # 字体的格式 70 | fontStyle = ImageFont.truetype("font/simsun.ttc", textSize, encoding="utf-8") 71 | # 绘制文本 72 | draw.text((left, top), text, textColor, font=fontStyle) 73 | # 转换回OpenCV格式 74 | return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR) 75 | # 计算眼睛宽高比 76 | def eye_aspect_ratio(eye): 77 | A = distance.euclidean(eye[1], eye[5]) 78 | B = distance.euclidean(eye[2], eye[4]) 79 | C = distance.euclidean(eye[0], eye[3]) 80 | ratio = (A + B) / (2.0 * C) 81 | return ratio 82 | # 计算嘴巴宽高比 83 | def mouth_aspect_ratio(mouth): 84 | A = distance.euclidean(mouth[2], mouth[10]) 85 | B = distance.euclidean(mouth[4], mouth[8]) 86 | C = distance.euclidean(mouth[0], mouth[6]) 87 | ratio = (A + B) / (2.0 * C) 88 | return ratio 89 | # 骨架缩放函数 90 | def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu, 91 | pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1 / 256): 92 | height, width, _ = img.shape 93 | scale = net_input_height_size / height 94 | 95 | scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) 96 | scaled_img = normalize(scaled_img, img_mean, img_scale) # 标准化图片 97 | 98 | min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)] 99 | padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims) 100 | 101 | tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float() 102 | 103 | if not cpu: 104 | tensor_img = tensor_img.cuda() 105 | 106 | stages_output = net(tensor_img) # 神经网络输出 107 | 108 | stage2_heatmaps = stages_output[-2] 109 | heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0)) 110 | heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC) 111 | 112 | stage2_pafs = stages_output[-1] 113 | pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0)) 114 | pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC) 115 | 116 | return heatmaps, pafs, scale, pad 117 | # 获取骨架 118 | def run_demo(net, img, height_size, cpu, track, smooth): 119 | net = net.eval() # 锁定网络参数 120 | if not cpu: 121 | net = net.cuda() # 启动GPU 122 | 123 | stride = 8 124 | upsample_ratio = 4 125 | num_keypoints = Pose.num_kpts # 18个采样点 126 | previous_poses = [] # 预测集合 127 | 128 | orig_img = img.copy() 129 | heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu) 130 | 131 | total_keypoints_num = 0 132 | all_keypoints_by_type = [] 133 | for kpt_idx in range(num_keypoints): # 19th for bg 134 | total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num) 135 | 136 | pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True) 137 | for kpt_id in range(all_keypoints.shape[0]): 138 | all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale 139 | all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale 140 | current_poses = [] 141 | for n in range(len(pose_entries)): 142 | if len(pose_entries[n]) == 0: 143 | continue 144 | pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1 145 | for kpt_id in range(num_keypoints): 146 | if pose_entries[n][kpt_id] != -1.0: # keypoint was found 147 | pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0]) 148 | pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1]) 149 | pose = Pose(pose_keypoints, pose_entries[n][18]) 150 | current_poses.append(pose) 151 | if track: 152 | track_poses(previous_poses, current_poses, smooth=smooth) 153 | previous_poses = current_poses 154 | 155 | if len(previous_poses) == 0: 156 | return [] 157 | else: 158 | return previous_poses[0].keypoints 159 | # 将cv图像显示在Tk组件上 160 | def Showimage(imgCV_in, canva, layout="null"): 161 | global imgTK 162 | canvawidth = int(canva.winfo_reqwidth()) 163 | canvaheight = int(canva.winfo_reqheight()) 164 | sp = imgCV_in.shape 165 | cvheight = sp[0] # height(rows) of image 166 | cvwidth = sp[1] # width(colums) of image 167 | if (layout == "fill"): 168 | imgCV = cv2.resize(imgCV_in, (canvawidth, canvaheight), interpolation=cv2.INTER_AREA) 169 | elif (layout == "fit"): 170 | if (float(cvwidth / cvheight) > float(canvawidth / canvaheight)): 171 | imgCV = cv2.resize(imgCV_in, (canvawidth, int(canvawidth * cvheight / cvwidth)), 172 | interpolation=cv2.INTER_AREA) 173 | else: 174 | imgCV = cv2.resize(imgCV_in, (int(canvaheight * cvwidth / cvheight), canvaheight), 175 | interpolation=cv2.INTER_AREA) 176 | else: 177 | imgCV = imgCV_in 178 | imgCV2 = cv2.cvtColor(imgCV, cv2.COLOR_BGR2RGBA) # 转换颜色从BGR到RGBA 179 | current_image = Image.fromarray(imgCV2) # 将图像转换成Image对象 180 | imgTK = ImageTk.PhotoImage(image=current_image) # 将image对象转换为imageTK对象 181 | canva.create_image(0, 0, anchor=NW, image=imgTK) 182 | # 骨架计算距离函数 183 | def clac_distance(a, b): 184 | dis_square = (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 185 | return math.sqrt(dis_square) 186 | # 骨架计算角度函数 187 | def clac_angel(a, b, c): 188 | return math.atan2((a[0] - b[0]), (a[1] - b[1])) - math.atan2((c[0] - b[0]), (c[1] - b[1])) 189 | # 获取模型输入张量 190 | def clac_keras(key_point): 191 | distance_all = [] 192 | angel_all = [] 193 | # 计算距离 194 | # 鼻子到脖子 195 | if key_point[0][0] != -1 and key_point[1][0] != -1: 196 | distance_all.append(clac_distance(key_point[0], key_point[1])) 197 | else: 198 | distance_all.append(avg[0]) 199 | # 鼻子到右眼 200 | if key_point[0][0] != -1 and key_point[14][0] != -1: 201 | distance_all.append(clac_distance(key_point[0], key_point[14])) 202 | else: 203 | distance_all.append(avg[1]) 204 | # 鼻子到左眼 205 | if key_point[0][0] != -1 and key_point[15][0] != -1: 206 | distance_all.append(clac_distance(key_point[0], key_point[15])) 207 | else: 208 | distance_all.append(avg[2]) 209 | # 右眼到右耳 210 | if key_point[14][0] != -1 and key_point[16][0] != -1: 211 | distance_all.append(clac_distance(key_point[14], key_point[16])) 212 | else: 213 | distance_all.append(avg[3]) 214 | # 左眼到左耳 215 | if key_point[15][0] != -1 and key_point[17][0] != -1: 216 | distance_all.append(clac_distance(key_point[15], key_point[17])) 217 | else: 218 | distance_all.append(avg[4]) 219 | # 脖子到右肩 220 | if key_point[1][0] != -1 and key_point[2][0] != -1: 221 | distance_all.append(clac_distance(key_point[1], key_point[2])) 222 | else: 223 | distance_all.append(avg[5]) 224 | # 右肩到右肘 225 | if key_point[2][0] != -1 and key_point[3][0] != -1: 226 | distance_all.append(clac_distance(key_point[2], key_point[3])) 227 | else: 228 | distance_all.append(avg[6]) 229 | # 右肘到右腕 230 | if key_point[3][0] != -1 and key_point[4][0] != -1: 231 | distance_all.append(clac_distance(key_point[3], key_point[4])) 232 | else: 233 | distance_all.append(avg[7]) 234 | # 脖子到左肩 235 | if key_point[1][0] != -1 and key_point[5][0] != -1: 236 | distance_all.append(clac_distance(key_point[1], key_point[5])) 237 | else: 238 | distance_all.append(avg[8]) 239 | # 左肩到左肘 240 | if key_point[5][0] != -1 and key_point[6][0] != -1: 241 | distance_all.append(clac_distance(key_point[5], key_point[6])) 242 | else: 243 | distance_all.append(avg[9]) 244 | # 左肘到左腕 245 | if key_point[6][0] != -1 and key_point[7][0] != -1: 246 | distance_all.append(clac_distance(key_point[6], key_point[7])) 247 | else: 248 | distance_all.append(avg[10]) 249 | # 脖子到右臀 250 | if key_point[1][0] != -1 and key_point[8][0] != -1: 251 | distance_all.append(clac_distance(key_point[1], key_point[8])) 252 | else: 253 | distance_all.append(avg[11]) 254 | # 右臀到右膝 255 | if key_point[8][0] != -1 and key_point[9][0] != -1: 256 | distance_all.append(clac_distance(key_point[8], key_point[9])) 257 | else: 258 | distance_all.append(avg[12]) 259 | # 右膝到右踝 260 | if key_point[9][0] != -1 and key_point[10][0] != -1: 261 | distance_all.append(clac_distance(key_point[9], key_point[10])) 262 | else: 263 | distance_all.append(avg[13]) 264 | # 脖子到左臀 265 | if key_point[1][0] != -1 and key_point[11][0] != -1: 266 | distance_all.append(clac_distance(key_point[1], key_point[11])) 267 | else: 268 | distance_all.append(avg[14]) 269 | # 右臀到左膝 270 | if key_point[11][0] != -1 and key_point[12][0] != -1: 271 | distance_all.append(clac_distance(key_point[11], key_point[12])) 272 | else: 273 | distance_all.append(avg[15]) 274 | # 右膝到左踝 275 | if key_point[12][0] != -1 and key_point[13][0] != -1: 276 | distance_all.append(clac_distance(key_point[12], key_point[13])) 277 | else: 278 | distance_all.append(avg[16]) 279 | # 计算角度 280 | # 鼻子-右眼-右耳 281 | if key_point[0][0] != -1 and key_point[14][0] != -1 and key_point[16][0] != -1: 282 | angel_all.append(clac_angel(key_point[0], key_point[14], key_point[16])) 283 | else: 284 | angel_all.append(avg[17]) 285 | # 鼻子-左眼-左耳 286 | if key_point[0][0] != -1 and key_point[15][0] != -1 and key_point[17][0] != -1: 287 | angel_all.append(clac_angel(key_point[0], key_point[15], key_point[17])) 288 | else: 289 | angel_all.append(avg[18]) 290 | # 脖子-右肩-右肘 291 | if key_point[1][0] != -1 and key_point[2][0] != -1 and key_point[3][0] != -1: 292 | angel_all.append(clac_angel(key_point[1], key_point[2], key_point[3])) 293 | else: 294 | angel_all.append(avg[19]) 295 | # 右肩-右肘-右腕 296 | if key_point[2][0] != -1 and key_point[3][0] != -1 and key_point[4][0] != -1: 297 | angel_all.append(clac_angel(key_point[2], key_point[3], key_point[4])) 298 | else: 299 | angel_all.append(avg[20]) 300 | # 脖子-左肩-左肘 301 | if key_point[1][0] != -1 and key_point[5][0] != -1 and key_point[6][0] != -1: 302 | angel_all.append(clac_angel(key_point[1], key_point[5], key_point[6])) 303 | else: 304 | angel_all.append(avg[21]) 305 | # 左肩-左肘-左腕 306 | if key_point[5][0] != -1 and key_point[6][0] != -1 and key_point[7][0] != -1: 307 | angel_all.append(clac_angel(key_point[5], key_point[6], key_point[7])) 308 | else: 309 | angel_all.append(avg[22]) 310 | # 脖子-右臀-右膝 311 | if key_point[1][0] != -1 and key_point[8][0] != -1 and key_point[9][0] != -1: 312 | angel_all.append(clac_angel(key_point[1], key_point[8], key_point[9])) 313 | else: 314 | angel_all.append(avg[23]) 315 | # 右臀-右膝-右踝 316 | if key_point[8][0] != -1 and key_point[9][0] != -1 and key_point[10][0] != -1: 317 | angel_all.append(clac_angel(key_point[8], key_point[9], key_point[10])) 318 | else: 319 | angel_all.append(avg[24]) 320 | # 脖子-左臀-左膝 321 | if key_point[1][0] != -1 and key_point[11][0] != -1 and key_point[12][0] != -1: 322 | angel_all.append(clac_angel(key_point[1], key_point[11], key_point[12])) 323 | else: 324 | angel_all.append(avg[25]) 325 | # 左臀-左膝-左踝 326 | if key_point[11][0] != -1 and key_point[12][0] != -1 and key_point[13][0] != -1: 327 | angel_all.append(clac_angel(key_point[11], key_point[12], key_point[13])) 328 | else: 329 | angel_all.append(avg[26]) 330 | # 鼻子-脖子-右肩 331 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[2][0] != -1: 332 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[2])) 333 | else: 334 | angel_all.append(avg[27]) 335 | # 鼻子-脖子-左肩 336 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[5][0] != -1: 337 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[5])) 338 | else: 339 | angel_all.append(avg[28]) 340 | # 右眼-鼻子-左眼 341 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[15][0] != -1: 342 | angel_all.append(clac_angel(key_point[14], key_point[0], key_point[15])) 343 | else: 344 | angel_all.append(avg[29]) 345 | # 右眼-鼻子-脖子 346 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1: 347 | angel_all.append(clac_angel(key_point[14], key_point[0], key_point[1])) 348 | else: 349 | angel_all.append(avg[30]) 350 | # 左眼-鼻子-脖子 351 | if key_point[15][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1: 352 | angel_all.append(clac_angel(key_point[15], key_point[0], key_point[1])) 353 | else: 354 | angel_all.append(avg[31]) 355 | # 鼻子-脖子-右臀 356 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[8][0] != -1: 357 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[8])) 358 | else: 359 | angel_all.append(avg[32]) 360 | # 鼻子-脖子-左臀 361 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[11][0] != -1: 362 | angel_all.append(clac_angel(key_point[0], key_point[1], key_point[11])) 363 | else: 364 | angel_all.append(avg[33]) 365 | data = distance_all + angel_all 366 | datas.loc[0] = data 367 | for i in [10 + 16, 8 + 16, 2 + 16, 16, 13, 4]: 368 | data.pop(i) 369 | return data 370 | # 疲劳检测函数 371 | def main_detect(cap): 372 | while switch == 1: 373 | start = cv2.getTickCount() 374 | result_show.grid_forget() 375 | canva_r.delete("all") 376 | global t, eye_close, yawn, yawn_flag 377 | ret, frame = cap.read() # 读取摄像头 大小:(480x640) 378 | frame = frame[0:1080, 0:1920 - 480] 379 | frame = cv2.resize(frame, (int(frame.shape[1] / 2.25), int(frame.shape[0] / 2.25))) 380 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 转化为灰度图 381 | subjects = detect(gray, 0) 382 | for subject in subjects: 383 | shape = predict(gray, subject) 384 | shape = face_utils.shape_to_np(shape) # 获得68个特征点的坐标 385 | 386 | # 计算左右眼平均眼宽比 387 | leftEye = shape[lStart:lEnd] 388 | rightEye = shape[rStart:rEnd] 389 | leftRatio = eye_aspect_ratio(leftEye) 390 | rightRatio = eye_aspect_ratio(rightEye) 391 | EyeRatio = (leftRatio + rightRatio) / 2.0 392 | 393 | # 计算嘴巴宽高比 394 | mouth = shape[mStart:mEnd] 395 | mouthRatio = mouth_aspect_ratio(mouth) 396 | 397 | # 画出凸包 398 | # leftEyeHull = cv2.convexHull(leftEye) 399 | # rightEyeHull = cv2.convexHull(rightEye) 400 | # mouthHull = cv2.convexHull(mouth) 401 | # cv2.drawContours(frame, [leftEyeHull], -1, (50, 50, 250), 2) 402 | # cv2.drawContours(frame, [rightEyeHull], -1, (50, 50, 250), 2) 403 | # cv2.drawContours(frame, [mouthHull], -1, (150, 50, 150), 2) 404 | 405 | # 判断是否打哈欠 406 | if mouthRatio > thresh_mouth: 407 | yawn = True 408 | yawn_flag = 0 409 | if yawn == True and yawn_flag < 40: 410 | canva_r.create_text(200, 200, text="检测到您打了一个哈欠,\n请注意不要疲劳驾驶!", font=("Lucida Console", 15), fill="red") 411 | if yawn == True and t == 0: 412 | t = 1 413 | pygame.mixer.music.stop() 414 | pygame.mixer.music.load('sound\\yawn.mp3') 415 | pygame.mixer.music.play() 416 | yawn_flag = yawn_flag + 1 417 | elif yawn == True and yawn_flag == 40: 418 | yawn = False 419 | yawn_flag = 0 420 | t = 0 421 | 422 | # 判断是否闭上眼睛 423 | if EyeRatio < thresh_eye: 424 | flag = flag + 1 425 | if flag >= frame_check: 426 | eye_close = True 427 | eye_flag = 0 428 | else: 429 | flag = 0 430 | if eye_close == True and eye_flag < 40: 431 | # WARNING 432 | canva_r.create_text(200, 200, text="警告!!!\n检测到您的眼睛已经闭合,\n请注意不要疲劳驾驶!", justify=LEFT, 433 | font=("Lucida Console", 15), fill="red") 434 | if eye_close == True and t == 0: 435 | t = 1 436 | pygame.mixer.music.stop() 437 | pygame.mixer.music.load('sound\\eyes.mp3') 438 | pygame.mixer.music.play() 439 | eye_flag = eye_flag + 1 440 | elif eye_close == True and eye_flag == 40: 441 | eye_close = False 442 | eye_flag = 0 443 | t = 0 444 | end = cv2.getTickCount() 445 | during1 = (end - start) / cv2.getTickFrequency() 446 | # 计算代码运行的时间消耗,其中最后一个参数是时钟周期 447 | 448 | FPS.set("FPS:" + str(round(1 / during1, 2))) 449 | Showimage(frame, canva_l, "fit") 450 | root.update() 451 | # 驾驶状态分类 452 | def main_class(vc): 453 | # 视频读取参数 454 | c = 0 # 开始帧 455 | timeF = 100 # 视频帧计数间隔频率 456 | result_show.grid(row=1, column=1) 457 | while switch == 0: 458 | # image = cv2.imread('img_59235.jpg') 459 | rval, image = vc.read() 460 | if c % timeF == 0: 461 | start = cv2.getTickCount() 462 | image = image[:, 240:1920 - 240, :] 463 | image = cv2.resize(image, (int(image.shape[1] / 2.25), int(image.shape[0] / 2.25))) 464 | 465 | key_point = run_demo(net, image, image.shape[0] / 3, False, 1, 1) 466 | data = clac_keras(key_point) 467 | 468 | y_pred = etc.predict([data]) 469 | y_pred_proba = etc.predict_proba([data]) 470 | 471 | canvas = copy.deepcopy(image) 472 | Showimage(canvas, canva_l, "fill") 473 | 474 | canva_r.delete("all") 475 | # 创建分类标签 476 | text_all = ("安全驾驶 ", "用右手发短信 ", "用右手打电话 ", "用左手发短信 ", "用左手打电话 ", 477 | "调音乐播放器 ", "喝水 ", "后面拿东西 ", "弄头发或化妆 ", "与乘客交谈 ") 478 | for i in range(10): 479 | canva_r.create_text(70, 36 * i + 20, text=text_all[i], font=("Lucida Console", 10)) 480 | canva_r.create_rectangle(150, 15 + 36 * i, 150 + 100 * y_pred_proba[0][i], 25 + 36 * i, fill='cyan') 481 | canva_r.create_text(300, 36 * i + 20, text=y_pred_proba[0][i], justify=LEFT) 482 | 483 | end = cv2.getTickCount() 484 | during1 = (end - start) / cv2.getTickFrequency() 485 | # 计算代码运行的时间消耗,其中最后一个参数是时钟周期 486 | FPS.set("FPS:" + str(round(1 / during1, 2))) 487 | result.set("识别结果为:" + text_all[y_pred[0]]) 488 | root.update() 489 | # c = c + 1 490 | # 按钮状态 491 | def swi(): 492 | global switch 493 | switch = not switch 494 | # GUI初始化 495 | def GUI_init(): 496 | global result_show, canva_r, canva_l, FPS, root, result,switch 497 | # 创建GUI 498 | root = Tk() 499 | root.title("驾驶员检测") 500 | root.minsize(710, 410) 501 | # 创建视频幕布 502 | canva_l = Canvas(root, width=480, height=360, bg="white") 503 | canva_l.grid(row=0, column=0) 504 | # 创建概率直方图幕布 505 | canva_r = Canvas(root, width=350, height=360, bg="white") 506 | canva_r.grid(row=0, column=1) 507 | # 显示FPS 508 | FPS = tkinter.StringVar() 509 | FPS_show = tkinter.Label(root, textvariable=FPS, bg="white", font=("Lucida Console", 10)) 510 | FPS_show.grid(row=1, column=0) 511 | # 显示识别结果 512 | result = tkinter.StringVar() 513 | result_show = tkinter.Label(root, textvariable=result, bg="white", font=("Lucida Console", 14)) 514 | result_show.grid(row=1, column=1) 515 | # 创建切换按钮 516 | cut = tkinter.Button(root, text="切换视角", command=swi, font=("Lucida Console", 14)) 517 | cut.place(x=350, y=366) -------------------------------------------------------------------------------- /get_train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | 7 | from models.with_mobilenet import PoseEstimationWithMobileNet 8 | from modules.keypoints import extract_keypoints, group_keypoints 9 | from modules.load_state import load_state 10 | from modules.pose import Pose, track_poses 11 | from val import normalize, pad_width 12 | 13 | 14 | def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu, 15 | pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256): 16 | height, width, _ = img.shape 17 | scale = net_input_height_size / height 18 | 19 | scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) 20 | scaled_img = normalize(scaled_img, img_mean, img_scale) # 标准化图片 21 | 22 | min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)] 23 | padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims) 24 | 25 | tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float() 26 | 27 | if not cpu: 28 | tensor_img = tensor_img.cuda() 29 | 30 | stages_output = net(tensor_img) # 神经网络输出 31 | 32 | stage2_heatmaps = stages_output[-2] 33 | heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0)) 34 | heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC) 35 | 36 | stage2_pafs = stages_output[-1] 37 | pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0)) 38 | pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC) 39 | 40 | return heatmaps, pafs, scale, pad 41 | 42 | def run_demo(net, img, height_size, cpu, track, smooth): 43 | net = net.eval() # 锁定网络参数 44 | if not cpu: 45 | net = net.cuda() # 启动GPU 46 | 47 | stride = 8 48 | upsample_ratio = 4 49 | num_keypoints = Pose.num_kpts # 18个采样点 50 | previous_poses = [] # 预测集合 51 | 52 | orig_img = img.copy() 53 | heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride, upsample_ratio, cpu) 54 | 55 | total_keypoints_num = 0 56 | all_keypoints_by_type = [] 57 | for kpt_idx in range(num_keypoints): # 19th for bg 58 | total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num) 59 | 60 | pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True) 61 | for kpt_id in range(all_keypoints.shape[0]): 62 | all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale 63 | all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale 64 | current_poses = [] 65 | for n in range(len(pose_entries)): 66 | if len(pose_entries[n]) == 0: 67 | continue 68 | pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1 69 | for kpt_id in range(num_keypoints): 70 | if pose_entries[n][kpt_id] != -1.0: # keypoint was found 71 | pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0]) 72 | pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1]) 73 | pose = Pose(pose_keypoints, pose_entries[n][18]) 74 | current_poses.append(pose) 75 | if track: 76 | track_poses(previous_poses, current_poses, smooth=smooth) 77 | previous_poses = current_poses 78 | 79 | if len(previous_poses)==0: 80 | return [] 81 | else: 82 | return previous_poses[0].keypoints 83 | 84 | 85 | 86 | BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4, 87 | "LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9, 88 | "RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14, 89 | "LEye": 15, "REar": 16, "LEar": 17, "Background": 18 } 90 | 91 | BODY_PARTS = { "鼻子": 0, "脖子": 1, 92 | "右肩": 2, "右肘": 3, "右腕": 4, 93 | "左肩": 5, "左肘": 6, "左腕": 7, 94 | "右臀": 8, "右膝": 9, "右踝": 10, 95 | "左臀": 11,"左膝": 12,"左踝": 13, 96 | "右眼": 14,"左眼": 15, 97 | "右耳": 16,"左耳": 17 } 98 | import math 99 | import pandas as pd 100 | import os 101 | 102 | # 计算距离函数 103 | def distance(a,b): 104 | dis_square = (a[0]-b[0])**2 + (a[1]-b[1])**2 105 | return math.sqrt(dis_square) 106 | 107 | # 计算角度函数 108 | def angel(a,b,c): 109 | return math.atan2((a[0]-b[0]),(a[1]-b[1]))-math.atan2((c[0]-b[0]),(c[1]-b[1])) 110 | 111 | 112 | if __name__ == '__main__': 113 | net = PoseEstimationWithMobileNet() # 加载网络结构 114 | checkpoint = torch.load('checkpoint_iter_370000.pth', map_location='cpu') # 加载模型参数 115 | load_state(net, checkpoint) # 拼接结构与参数 116 | 117 | datas = pd.DataFrame( 118 | columns=['d1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'd10', 'd11', 'd12', 'd13', 'd14', 'd15', 'd16', 119 | 'd17', 120 | 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16', 121 | 'a17', 'label']) 122 | 123 | for label_name in sorted(os.listdir("../data/imgs/train")): 124 | print(label_name) 125 | for image_name in sorted(os.listdir("../data/imgs/train/" + label_name)): 126 | print(image_name) 127 | image = cv2.imread(os.path.join("../data/imgs/train/" + label_name, image_name)) 128 | 129 | # print (image.shape) # (480, 640, 3) 130 | key_point = run_demo(net, image, image.shape[0]/3, False, 1, 1) 131 | distance_all=[] 132 | angel_all=[] 133 | if len(key_point)==0: 134 | continue 135 | else: 136 | # 计算距离 137 | # 鼻子到脖子 138 | if key_point[0][0]!=-1 and key_point[1][0]!=-1: 139 | distance_all.append(distance(key_point[0], key_point[1])) 140 | else: 141 | distance_all.append(np.nan) 142 | # 鼻子到右眼 143 | if key_point[0][0]!=-1 and key_point[14][0]!=-1: 144 | distance_all.append(distance(key_point[0], key_point[14])) 145 | else: 146 | distance_all.append(np.nan) 147 | # 鼻子到左眼 148 | if key_point[0][0]!=-1 and key_point[15][0]!=-1: 149 | distance_all.append(distance(key_point[0], key_point[15])) 150 | else: 151 | distance_all.append(np.nan) 152 | # 右眼到右耳 153 | if key_point[14][0]!=-1 and key_point[16][0]!=-1: 154 | distance_all.append(distance(key_point[14], key_point[16])) 155 | else: 156 | distance_all.append(np.nan) 157 | # 左眼到左耳 158 | if key_point[15][0]!=-1 and key_point[17][0]!=-1: 159 | distance_all.append(distance(key_point[15], key_point[17])) 160 | else: 161 | distance_all.append(np.nan) 162 | # 脖子到右肩 163 | if key_point[1][0]!=-1 and key_point[2][0]!=-1: 164 | distance_all.append(distance(key_point[1], key_point[2])) 165 | else: 166 | distance_all.append(np.nan) 167 | # 右肩到右肘 168 | if key_point[2][0]!=-1 and key_point[3][0]!=-1: 169 | distance_all.append(distance(key_point[2], key_point[3])) 170 | else: 171 | distance_all.append(np.nan) 172 | # 右肘到右腕 173 | if key_point[3][0]!=-1 and key_point[4][0]!=-1: 174 | distance_all.append(distance(key_point[3], key_point[4])) 175 | else: 176 | distance_all.append(np.nan) 177 | # 脖子到左肩 178 | if key_point[1][0]!=-1 and key_point[5][0]!=-1: 179 | distance_all.append(distance(key_point[1], key_point[5])) 180 | else: 181 | distance_all.append(np.nan) 182 | # 左肩到左肘 183 | if key_point[5][0]!=-1 and key_point[6][0]!=-1: 184 | distance_all.append(distance(key_point[5], key_point[6])) 185 | else: 186 | distance_all.append(np.nan) 187 | # 左肘到左腕 188 | if key_point[6][0]!=-1 and key_point[7][0]!=-1: 189 | distance_all.append(distance(key_point[6], key_point[7])) 190 | else: 191 | distance_all.append(np.nan) 192 | # 脖子到右臀 193 | if key_point[1][0]!=-1 and key_point[8][0]!=-1: 194 | distance_all.append(distance(key_point[1], key_point[8])) 195 | else: 196 | distance_all.append(np.nan) 197 | # 右臀到右膝 198 | if key_point[8][0]!=-1 and key_point[9][0]!=-1: 199 | distance_all.append(distance(key_point[8], key_point[9])) 200 | else: 201 | distance_all.append(np.nan) 202 | # 右膝到右踝 203 | if key_point[9][0]!=-1 and key_point[10][0]!=-1: 204 | distance_all.append(distance(key_point[9], key_point[10])) 205 | else: 206 | distance_all.append(np.nan) 207 | # 脖子到左臀 208 | if key_point[1][0]!=-1 and key_point[11][0]!=-1: 209 | distance_all.append(distance(key_point[1], key_point[11])) 210 | else: 211 | distance_all.append(np.nan) 212 | # 右臀到左膝 213 | if key_point[11][0]!=-1 and key_point[12][0]!=-1: 214 | distance_all.append(distance(key_point[11], key_point[12])) 215 | else: 216 | distance_all.append(np.nan) 217 | # 右膝到左踝 218 | if key_point[12][0]!=-1 and key_point[13][0]!=-1: 219 | distance_all.append(distance(key_point[12], key_point[13])) 220 | else: 221 | distance_all.append(np.nan) 222 | #计算角度 223 | # 鼻子-右眼-右耳 224 | if key_point[0][0]!=-1 and key_point[14][0]!=-1 and key_point[16][0]!=-1: 225 | angel_all.append(angel(key_point[0],key_point[14],key_point[16])) 226 | else: 227 | angel_all.append(np.nan) 228 | # 鼻子-左眼-左耳 229 | if key_point[0][0] != -1 and key_point[15][0] != -1 and key_point[17][0] != -1: 230 | angel_all.append(angel(key_point[0], key_point[15], key_point[17])) 231 | else: 232 | angel_all.append(np.nan) 233 | # 脖子-右肩-右肘 234 | if key_point[1][0] != -1 and key_point[2][0] != -1 and key_point[3][0] != -1: 235 | angel_all.append(angel(key_point[1], key_point[2], key_point[3])) 236 | else: 237 | angel_all.append(np.nan) 238 | # 右肩-右肘-右腕 239 | if key_point[2][0] != -1 and key_point[3][0] != -1 and key_point[4][0] != -1: 240 | angel_all.append(angel(key_point[2], key_point[3], key_point[4])) 241 | else: 242 | angel_all.append(np.nan) 243 | # 脖子-左肩-左肘 244 | if key_point[1][0] != -1 and key_point[5][0] != -1 and key_point[6][0] != -1: 245 | angel_all.append(angel(key_point[1], key_point[5], key_point[6])) 246 | else: 247 | angel_all.append(np.nan) 248 | # 左肩-左肘-左腕 249 | if key_point[5][0] != -1 and key_point[6][0] != -1 and key_point[7][0] != -1: 250 | angel_all.append(angel(key_point[5], key_point[6], key_point[7])) 251 | else: 252 | angel_all.append(np.nan) 253 | # 脖子-右臀-右膝 254 | if key_point[1][0] != -1 and key_point[8][0] != -1 and key_point[9][0] != -1: 255 | angel_all.append(angel(key_point[1], key_point[8], key_point[9])) 256 | else: 257 | angel_all.append(np.nan) 258 | # 右臀-右膝-右踝 259 | if key_point[8][0] != -1 and key_point[9][0] != -1 and key_point[10][0] != -1: 260 | angel_all.append(angel(key_point[8], key_point[9], key_point[10])) 261 | else: 262 | angel_all.append(np.nan) 263 | # 脖子-左臀-左膝 264 | if key_point[1][0] != -1 and key_point[11][0] != -1 and key_point[12][0] != -1: 265 | angel_all.append(angel(key_point[1], key_point[11], key_point[12])) 266 | else: 267 | angel_all.append(np.nan) 268 | # 左臀-左膝-左踝 269 | if key_point[11][0] != -1 and key_point[12][0] != -1 and key_point[13][0] != -1: 270 | angel_all.append(angel(key_point[11], key_point[12], key_point[13])) 271 | else: 272 | angel_all.append(np.nan) 273 | # 鼻子-脖子-右肩 274 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[2][0] != -1: 275 | angel_all.append(angel(key_point[0], key_point[1], key_point[2])) 276 | else: 277 | angel_all.append(np.nan) 278 | # 鼻子-脖子-左肩 279 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[5][0] != -1: 280 | angel_all.append(angel(key_point[0], key_point[1], key_point[5])) 281 | else: 282 | angel_all.append(np.nan) 283 | # 右眼-鼻子-左眼 284 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[15][0] != -1: 285 | angel_all.append(angel(key_point[14], key_point[0], key_point[15])) 286 | else: 287 | angel_all.append(np.nan) 288 | # 右眼-鼻子-脖子 289 | if key_point[14][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1: 290 | angel_all.append(angel(key_point[14], key_point[0], key_point[1])) 291 | else: 292 | angel_all.append(np.nan) 293 | # 左眼-鼻子-脖子 294 | if key_point[15][0] != -1 and key_point[0][0] != -1 and key_point[1][0] != -1: 295 | angel_all.append(angel(key_point[15], key_point[0], key_point[1])) 296 | else: 297 | angel_all.append(np.nan) 298 | # 鼻子-脖子-右臀 299 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[8][0] != -1: 300 | angel_all.append(angel(key_point[0], key_point[1], key_point[8])) 301 | else: 302 | angel_all.append(np.nan) 303 | # 鼻子-脖子-左臀 304 | if key_point[0][0] != -1 and key_point[1][0] != -1 and key_point[11][0] != -1: 305 | angel_all.append(angel(key_point[0], key_point[1], key_point[11])) 306 | else: 307 | angel_all.append(np.nan) 308 | 309 | data = distance_all + angel_all + [label_name[1]] 310 | datas.loc[image_name] = data 311 | datas.to_csv("openpose_train_data.csv",sep=',') 312 | -------------------------------------------------------------------------------- /image/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/1.jpg -------------------------------------------------------------------------------- /image/11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/11.jpg -------------------------------------------------------------------------------- /image/12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/12.jpg -------------------------------------------------------------------------------- /image/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/2.jpg -------------------------------------------------------------------------------- /image/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/3.jpg -------------------------------------------------------------------------------- /image/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/image/4.jpg -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | from func import main_detect, main_class, GUI_init 4 | 5 | 6 | # 载入测试视频 7 | vc = cv2.VideoCapture('video/dxandcar.mp4') 8 | cap = cv2.VideoCapture('video/dxha.mp4') 9 | 10 | if __name__ == '__main__': 11 | GUI_init() # 初始化GUI界面 12 | while 1: 13 | main_detect(cap) 14 | main_class(vc) 15 | -------------------------------------------------------------------------------- /models/with_mobilenet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from modules.conv import conv, conv_dw, conv_dw_no_bn 5 | 6 | 7 | class Cpm(nn.Module): 8 | def __init__(self, in_channels, out_channels): 9 | super().__init__() 10 | self.align = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False) 11 | self.trunk = nn.Sequential( 12 | conv_dw_no_bn(out_channels, out_channels), 13 | conv_dw_no_bn(out_channels, out_channels), 14 | conv_dw_no_bn(out_channels, out_channels) 15 | ) 16 | self.conv = conv(out_channels, out_channels, bn=False) 17 | 18 | def forward(self, x): 19 | x = self.align(x) 20 | x = self.conv(x + self.trunk(x)) 21 | return x 22 | 23 | 24 | class InitialStage(nn.Module): 25 | def __init__(self, num_channels, num_heatmaps, num_pafs): 26 | super().__init__() 27 | self.trunk = nn.Sequential( 28 | conv(num_channels, num_channels, bn=False), 29 | conv(num_channels, num_channels, bn=False), 30 | conv(num_channels, num_channels, bn=False) 31 | ) 32 | self.heatmaps = nn.Sequential( 33 | conv(num_channels, 512, kernel_size=1, padding=0, bn=False), 34 | conv(512, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False) 35 | ) 36 | self.pafs = nn.Sequential( 37 | conv(num_channels, 512, kernel_size=1, padding=0, bn=False), 38 | conv(512, num_pafs, kernel_size=1, padding=0, bn=False, relu=False) 39 | ) 40 | 41 | def forward(self, x): 42 | trunk_features = self.trunk(x) 43 | heatmaps = self.heatmaps(trunk_features) 44 | pafs = self.pafs(trunk_features) 45 | return [heatmaps, pafs] 46 | 47 | 48 | class RefinementStageBlock(nn.Module): 49 | def __init__(self, in_channels, out_channels): 50 | super().__init__() 51 | self.initial = conv(in_channels, out_channels, kernel_size=1, padding=0, bn=False) 52 | self.trunk = nn.Sequential( 53 | conv(out_channels, out_channels), 54 | conv(out_channels, out_channels, dilation=2, padding=2) 55 | ) 56 | 57 | def forward(self, x): 58 | initial_features = self.initial(x) 59 | trunk_features = self.trunk(initial_features) 60 | return initial_features + trunk_features 61 | 62 | 63 | class RefinementStage(nn.Module): 64 | def __init__(self, in_channels, out_channels, num_heatmaps, num_pafs): 65 | super().__init__() 66 | self.trunk = nn.Sequential( 67 | RefinementStageBlock(in_channels, out_channels), 68 | RefinementStageBlock(out_channels, out_channels), 69 | RefinementStageBlock(out_channels, out_channels), 70 | RefinementStageBlock(out_channels, out_channels), 71 | RefinementStageBlock(out_channels, out_channels) 72 | ) 73 | self.heatmaps = nn.Sequential( 74 | conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False), 75 | conv(out_channels, num_heatmaps, kernel_size=1, padding=0, bn=False, relu=False) 76 | ) 77 | self.pafs = nn.Sequential( 78 | conv(out_channels, out_channels, kernel_size=1, padding=0, bn=False), 79 | conv(out_channels, num_pafs, kernel_size=1, padding=0, bn=False, relu=False) 80 | ) 81 | 82 | def forward(self, x): 83 | trunk_features = self.trunk(x) 84 | heatmaps = self.heatmaps(trunk_features) 85 | pafs = self.pafs(trunk_features) 86 | return [heatmaps, pafs] 87 | 88 | 89 | class PoseEstimationWithMobileNet(nn.Module): 90 | def __init__(self, num_refinement_stages=1, num_channels=128, num_heatmaps=19, num_pafs=38): 91 | super().__init__() 92 | self.model = nn.Sequential( 93 | conv( 3, 32, stride=2, bias=False), 94 | conv_dw( 32, 64), 95 | conv_dw( 64, 128, stride=2), 96 | conv_dw(128, 128), 97 | conv_dw(128, 256, stride=2), 98 | conv_dw(256, 256), 99 | conv_dw(256, 512), # conv4_2 100 | conv_dw(512, 512, dilation=2, padding=2), 101 | conv_dw(512, 512), 102 | conv_dw(512, 512), 103 | conv_dw(512, 512), 104 | conv_dw(512, 512) # conv5_5 105 | ) 106 | self.cpm = Cpm(512, num_channels) 107 | 108 | self.initial_stage = InitialStage(num_channels, num_heatmaps, num_pafs) 109 | self.refinement_stages = nn.ModuleList() 110 | for idx in range(num_refinement_stages): 111 | self.refinement_stages.append(RefinementStage(num_channels + num_heatmaps + num_pafs, num_channels, 112 | num_heatmaps, num_pafs)) 113 | 114 | def forward(self, x): 115 | backbone_features = self.model(x) 116 | backbone_features = self.cpm(backbone_features) 117 | 118 | stages_output = self.initial_stage(backbone_features) 119 | for refinement_stage in self.refinement_stages: 120 | stages_output.extend( 121 | refinement_stage(torch.cat([backbone_features, stages_output[-2], stages_output[-1]], dim=1))) 122 | 123 | return stages_output 124 | -------------------------------------------------------------------------------- /modules/conv.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def conv(in_channels, out_channels, kernel_size=3, padding=1, bn=True, dilation=1, stride=1, relu=True, bias=True): 5 | modules = [nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=bias)] 6 | if bn: 7 | modules.append(nn.BatchNorm2d(out_channels)) 8 | if relu: 9 | modules.append(nn.ReLU(inplace=True)) 10 | return nn.Sequential(*modules) 11 | 12 | 13 | def conv_dw(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1): 14 | return nn.Sequential( 15 | nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation=dilation, groups=in_channels, bias=False), 16 | nn.BatchNorm2d(in_channels), 17 | nn.ReLU(inplace=True), 18 | 19 | nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False), 20 | nn.BatchNorm2d(out_channels), 21 | nn.ReLU(inplace=True), 22 | ) 23 | 24 | 25 | def conv_dw_no_bn(in_channels, out_channels, kernel_size=3, padding=1, stride=1, dilation=1): 26 | return nn.Sequential( 27 | nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation=dilation, groups=in_channels, bias=False), 28 | nn.ELU(inplace=True), 29 | 30 | nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False), 31 | nn.ELU(inplace=True), 32 | ) 33 | -------------------------------------------------------------------------------- /modules/get_parameters.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | 4 | def get_parameters(model, predicate): 5 | for module in model.modules(): 6 | for param_name, param in module.named_parameters(): 7 | if predicate(module, param_name): 8 | yield param 9 | 10 | 11 | def get_parameters_conv(model, name): 12 | return get_parameters(model, lambda m, p: isinstance(m, nn.Conv2d) and m.groups == 1 and p == name) 13 | 14 | 15 | def get_parameters_conv_depthwise(model, name): 16 | return get_parameters(model, lambda m, p: isinstance(m, nn.Conv2d) 17 | and m.groups == m.in_channels 18 | and m.in_channels == m.out_channels 19 | and p == name) 20 | 21 | 22 | def get_parameters_bn(model, name): 23 | return get_parameters(model, lambda m, p: isinstance(m, nn.BatchNorm2d) and p == name) 24 | -------------------------------------------------------------------------------- /modules/keypoints.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | from operator import itemgetter 4 | 5 | BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], 6 | [11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]] 7 | BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5], 8 | [6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]) 9 | 10 | 11 | def linspace2d(start, stop, n=10): 12 | points = 1 / (n - 1) * (stop - start) 13 | return points[:, None] * np.arange(n) + start[:, None] 14 | 15 | 16 | def extract_keypoints(heatmap, all_keypoints, total_keypoint_num): 17 | heatmap[heatmap < 0.1] = 0 18 | heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode='constant') 19 | heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 1:heatmap_with_borders.shape[1]-1] 20 | heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 2:heatmap_with_borders.shape[1]] 21 | heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0]-1, 0:heatmap_with_borders.shape[1]-2] 22 | heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1]-1] 23 | heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0]-2, 1:heatmap_with_borders.shape[1]-1] 24 | 25 | heatmap_peaks = (heatmap_center > heatmap_left) &\ 26 | (heatmap_center > heatmap_right) &\ 27 | (heatmap_center > heatmap_up) &\ 28 | (heatmap_center > heatmap_down) 29 | heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0]-1, 1:heatmap_center.shape[1]-1] 30 | keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h) 31 | keypoints = sorted(keypoints, key=itemgetter(0)) 32 | 33 | suppressed = np.zeros(len(keypoints), np.uint8) 34 | keypoints_with_score_and_id = [] 35 | keypoint_num = 0 36 | for i in range(len(keypoints)): 37 | if suppressed[i]: 38 | continue 39 | for j in range(i+1, len(keypoints)): 40 | if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + 41 | (keypoints[i][1] - keypoints[j][1]) ** 2) < 6: 42 | suppressed[j] = 1 43 | keypoint_with_score_and_id = (keypoints[i][0], keypoints[i][1], heatmap[keypoints[i][1], keypoints[i][0]], 44 | total_keypoint_num + keypoint_num) 45 | keypoints_with_score_and_id.append(keypoint_with_score_and_id) 46 | keypoint_num += 1 47 | all_keypoints.append(keypoints_with_score_and_id) 48 | return keypoint_num 49 | 50 | 51 | def group_keypoints(all_keypoints_by_type, pafs, pose_entry_size=20, min_paf_score=0.05, demo=False): 52 | pose_entries = [] 53 | all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist]) 54 | for part_id in range(len(BODY_PARTS_PAF_IDS)): 55 | part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]] 56 | kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]] 57 | kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]] 58 | num_kpts_a = len(kpts_a) 59 | num_kpts_b = len(kpts_b) 60 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] 61 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] 62 | 63 | if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part 64 | continue 65 | elif num_kpts_a == 0: # body part has just 'b' keypoints 66 | for i in range(num_kpts_b): 67 | num = 0 68 | for j in range(len(pose_entries)): # check if already in some pose, was added by another body part 69 | if pose_entries[j][kpt_b_id] == kpts_b[i][3]: 70 | num += 1 71 | continue 72 | if num == 0: 73 | pose_entry = np.ones(pose_entry_size) * -1 74 | pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx 75 | pose_entry[-1] = 1 # num keypoints in pose 76 | pose_entry[-2] = kpts_b[i][2] # pose score 77 | pose_entries.append(pose_entry) 78 | continue 79 | elif num_kpts_b == 0: # body part has just 'a' keypoints 80 | for i in range(num_kpts_a): 81 | num = 0 82 | for j in range(len(pose_entries)): 83 | if pose_entries[j][kpt_a_id] == kpts_a[i][3]: 84 | num += 1 85 | continue 86 | if num == 0: 87 | pose_entry = np.ones(pose_entry_size) * -1 88 | pose_entry[kpt_a_id] = kpts_a[i][3] 89 | pose_entry[-1] = 1 90 | pose_entry[-2] = kpts_a[i][2] 91 | pose_entries.append(pose_entry) 92 | continue 93 | 94 | connections = [] 95 | for i in range(num_kpts_a): 96 | kpt_a = np.array(kpts_a[i][0:2]) 97 | for j in range(num_kpts_b): 98 | kpt_b = np.array(kpts_b[j][0:2]) 99 | mid_point = [(), ()] 100 | mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)), 101 | int(round((kpt_a[1] + kpt_b[1]) * 0.5))) 102 | mid_point[1] = mid_point[0] 103 | 104 | vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]] 105 | vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2) 106 | if vec_norm == 0: 107 | continue 108 | vec[0] /= vec_norm 109 | vec[1] /= vec_norm 110 | cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] + 111 | vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1]) 112 | 113 | height_n = pafs.shape[0] // 2 114 | success_ratio = 0 115 | point_num = 10 # number of points to integration over paf 116 | if cur_point_score > -100: 117 | passed_point_score = 0 118 | passed_point_num = 0 119 | x, y = linspace2d(kpt_a, kpt_b) 120 | for point_idx in range(point_num): 121 | if not demo: 122 | px = int(round(x[point_idx])) 123 | py = int(round(y[point_idx])) 124 | else: 125 | px = int(x[point_idx]) 126 | py = int(y[point_idx]) 127 | paf = part_pafs[py, px, 0:2] 128 | cur_point_score = vec[0] * paf[0] + vec[1] * paf[1] 129 | if cur_point_score > min_paf_score: 130 | passed_point_score += cur_point_score 131 | passed_point_num += 1 132 | success_ratio = passed_point_num / point_num 133 | ratio = 0 134 | if passed_point_num > 0: 135 | ratio = passed_point_score / passed_point_num 136 | ratio += min(height_n / vec_norm - 1, 0) 137 | if ratio > 0 and success_ratio > 0.8: 138 | score_all = ratio + kpts_a[i][2] + kpts_b[j][2] 139 | connections.append([i, j, ratio, score_all]) 140 | if len(connections) > 0: 141 | connections = sorted(connections, key=itemgetter(2), reverse=True) 142 | 143 | num_connections = min(num_kpts_a, num_kpts_b) 144 | has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32) 145 | has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32) 146 | filtered_connections = [] 147 | for row in range(len(connections)): 148 | if len(filtered_connections) == num_connections: 149 | break 150 | i, j, cur_point_score = connections[row][0:3] 151 | if not has_kpt_a[i] and not has_kpt_b[j]: 152 | filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score]) 153 | has_kpt_a[i] = 1 154 | has_kpt_b[j] = 1 155 | connections = filtered_connections 156 | if len(connections) == 0: 157 | continue 158 | 159 | if part_id == 0: 160 | pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))] 161 | for i in range(len(connections)): 162 | pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0] 163 | pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1] 164 | pose_entries[i][-1] = 2 165 | pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] 166 | elif part_id == 17 or part_id == 18: 167 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] 168 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] 169 | for i in range(len(connections)): 170 | for j in range(len(pose_entries)): 171 | if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1: 172 | pose_entries[j][kpt_b_id] = connections[i][1] 173 | elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1: 174 | pose_entries[j][kpt_a_id] = connections[i][0] 175 | continue 176 | else: 177 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] 178 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] 179 | for i in range(len(connections)): 180 | num = 0 181 | for j in range(len(pose_entries)): 182 | if pose_entries[j][kpt_a_id] == connections[i][0]: 183 | pose_entries[j][kpt_b_id] = connections[i][1] 184 | num += 1 185 | pose_entries[j][-1] += 1 186 | pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2] 187 | if num == 0: 188 | pose_entry = np.ones(pose_entry_size) * -1 189 | pose_entry[kpt_a_id] = connections[i][0] 190 | pose_entry[kpt_b_id] = connections[i][1] 191 | pose_entry[-1] = 2 192 | pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2] 193 | pose_entries.append(pose_entry) 194 | 195 | filtered_entries = [] 196 | for i in range(len(pose_entries)): 197 | if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2): 198 | continue 199 | filtered_entries.append(pose_entries[i]) 200 | pose_entries = np.asarray(filtered_entries) 201 | return pose_entries, all_keypoints 202 | -------------------------------------------------------------------------------- /modules/load_state.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | 4 | def load_state(net, checkpoint): 5 | source_state = checkpoint['state_dict'] 6 | target_state = net.state_dict() 7 | new_target_state = collections.OrderedDict() 8 | for target_key, target_value in target_state.items(): 9 | if target_key in source_state and source_state[target_key].size() == target_state[target_key].size(): 10 | new_target_state[target_key] = source_state[target_key] 11 | else: 12 | new_target_state[target_key] = target_state[target_key] 13 | print('[WARNING] Not found pre-trained parameters for {}'.format(target_key)) 14 | 15 | net.load_state_dict(new_target_state) 16 | 17 | 18 | def load_from_mobilenet(net, checkpoint): 19 | source_state = checkpoint['state_dict'] 20 | target_state = net.state_dict() 21 | new_target_state = collections.OrderedDict() 22 | for target_key, target_value in target_state.items(): 23 | k = target_key 24 | if k.find('model') != -1: 25 | k = k.replace('model', 'module.model') 26 | if k in source_state and source_state[k].size() == target_state[target_key].size(): 27 | new_target_state[target_key] = source_state[k] 28 | else: 29 | new_target_state[target_key] = target_state[target_key] 30 | print('[WARNING] Not found pre-trained parameters for {}'.format(target_key)) 31 | 32 | net.load_state_dict(new_target_state) 33 | -------------------------------------------------------------------------------- /modules/loss.py: -------------------------------------------------------------------------------- 1 | def l2_loss(input, target, mask, batch_size): 2 | loss = (input - target) * mask 3 | loss = (loss * loss) / 2 / batch_size 4 | 5 | return loss.sum() 6 | -------------------------------------------------------------------------------- /modules/one_euro_filter.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | 4 | def get_alpha(rate=30, cutoff=1): 5 | tau = 1 / (2 * math.pi * cutoff) 6 | te = 1 / rate 7 | return 1 / (1 + tau / te) 8 | 9 | 10 | class LowPassFilter: 11 | def __init__(self): 12 | self.x_previous = None 13 | 14 | def __call__(self, x, alpha=0.5): 15 | if self.x_previous is None: 16 | self.x_previous = x 17 | return x 18 | x_filtered = alpha * x + (1 - alpha) * self.x_previous 19 | self.x_previous = x_filtered 20 | return x_filtered 21 | 22 | 23 | class OneEuroFilter: 24 | def __init__(self, freq=15, mincutoff=1, beta=0.05, dcutoff=1): 25 | self.freq = freq 26 | self.mincutoff = mincutoff 27 | self.beta = beta 28 | self.dcutoff = dcutoff 29 | self.filter_x = LowPassFilter() 30 | self.filter_dx = LowPassFilter() 31 | self.x_previous = None 32 | self.dx = None 33 | 34 | def __call__(self, x): 35 | if self.dx is None: 36 | self.dx = 0 37 | else: 38 | self.dx = (x - self.x_previous) * self.freq 39 | dx_smoothed = self.filter_dx(self.dx, get_alpha(self.freq, self.dcutoff)) 40 | cutoff = self.mincutoff + self.beta * abs(dx_smoothed) 41 | x_filtered = self.filter_x(x, get_alpha(self.freq, cutoff)) 42 | self.x_previous = x 43 | return x_filtered 44 | 45 | 46 | if __name__ == '__main__': 47 | filter = OneEuroFilter(freq=15, beta=0.1) 48 | for val in range(10): 49 | x = val + (-1)**(val % 2) 50 | x_filtered = filter(x) 51 | print(x_filtered, x) 52 | -------------------------------------------------------------------------------- /modules/pose.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | from modules.keypoints import BODY_PARTS_KPT_IDS, BODY_PARTS_PAF_IDS 5 | from modules.one_euro_filter import OneEuroFilter 6 | 7 | 8 | class Pose: 9 | num_kpts = 18 10 | kpt_names = ['nose', 'neck', 11 | 'r_sho', 'r_elb', 'r_wri', 'l_sho', 'l_elb', 'l_wri', 12 | 'r_hip', 'r_knee', 'r_ank', 'l_hip', 'l_knee', 'l_ank', 13 | 'r_eye', 'l_eye', 14 | 'r_ear', 'l_ear'] 15 | sigmas = np.array([.26, .79, .79, .72, .62, .79, .72, .62, 1.07, .87, .89, 1.07, .87, .89, .25, .25, .35, .35], 16 | dtype=np.float32) / 10.0 17 | vars = (sigmas * 2) ** 2 18 | last_id = -1 19 | color = [0, 224, 255] 20 | 21 | def __init__(self, keypoints, confidence): 22 | super().__init__() 23 | self.keypoints = keypoints 24 | self.confidence = confidence 25 | self.bbox = Pose.get_bbox(self.keypoints) 26 | self.id = None 27 | self.filters = [[OneEuroFilter(), OneEuroFilter()] for _ in range(Pose.num_kpts)] 28 | 29 | @staticmethod 30 | def get_bbox(keypoints): 31 | found_keypoints = np.zeros((np.count_nonzero(keypoints[:, 0] != -1), 2), dtype=np.int32) 32 | found_kpt_id = 0 33 | for kpt_id in range(Pose.num_kpts): 34 | if keypoints[kpt_id, 0] == -1: 35 | continue 36 | found_keypoints[found_kpt_id] = keypoints[kpt_id] 37 | found_kpt_id += 1 38 | bbox = cv2.boundingRect(found_keypoints) 39 | return bbox 40 | 41 | def update_id(self, id=None): 42 | self.id = id 43 | if self.id is None: 44 | self.id = Pose.last_id + 1 45 | Pose.last_id += 1 46 | 47 | def draw(self, img): 48 | assert self.keypoints.shape == (Pose.num_kpts, 2) 49 | 50 | for part_id in range(len(BODY_PARTS_PAF_IDS) - 2): 51 | kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0] 52 | global_kpt_a_id = self.keypoints[kpt_a_id, 0] 53 | if global_kpt_a_id != -1: 54 | x_a, y_a = self.keypoints[kpt_a_id] 55 | cv2.circle(img, (int(x_a), int(y_a)), 3, Pose.color, -1) 56 | kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1] 57 | global_kpt_b_id = self.keypoints[kpt_b_id, 0] 58 | if global_kpt_b_id != -1: 59 | x_b, y_b = self.keypoints[kpt_b_id] 60 | cv2.circle(img, (int(x_b), int(y_b)), 3, Pose.color, -1) 61 | if global_kpt_a_id != -1 and global_kpt_b_id != -1: 62 | cv2.line(img, (int(x_a), int(y_a)), (int(x_b), int(y_b)), Pose.color, 2) 63 | 64 | 65 | def get_similarity(a, b, threshold=0.5): 66 | num_similar_kpt = 0 67 | for kpt_id in range(Pose.num_kpts): 68 | if a.keypoints[kpt_id, 0] != -1 and b.keypoints[kpt_id, 0] != -1: 69 | distance = np.sum((a.keypoints[kpt_id] - b.keypoints[kpt_id]) ** 2) 70 | area = max(a.bbox[2] * a.bbox[3], b.bbox[2] * b.bbox[3]) 71 | similarity = np.exp(-distance / (2 * (area + np.spacing(1)) * Pose.vars[kpt_id])) 72 | if similarity > threshold: 73 | num_similar_kpt += 1 74 | return num_similar_kpt 75 | 76 | 77 | def track_poses(previous_poses, current_poses, threshold=3, smooth=False): 78 | """Propagate poses ids from previous frame results. Id is propagated, 79 | if there are at least `threshold` similar keypoints between pose from previous frame and current. 80 | If correspondence between pose on previous and current frame was established, pose keypoints are smoothed. 81 | 82 | :param previous_poses: poses from previous frame with ids 83 | :param current_poses: poses from current frame to assign ids 84 | :param threshold: minimal number of similar keypoints between poses 85 | :param smooth: smooth pose keypoints between frames 86 | :return: None 87 | """ 88 | current_poses = sorted(current_poses, key=lambda pose: pose.confidence, reverse=True) # match confident poses first 89 | mask = np.ones(len(previous_poses), dtype=np.int32) 90 | for current_pose in current_poses: 91 | best_matched_id = None 92 | best_matched_pose_id = None 93 | best_matched_iou = 0 94 | for id, previous_pose in enumerate(previous_poses): 95 | if not mask[id]: 96 | continue 97 | iou = get_similarity(current_pose, previous_pose) 98 | if iou > best_matched_iou: 99 | best_matched_iou = iou 100 | best_matched_pose_id = previous_pose.id 101 | best_matched_id = id 102 | if best_matched_iou >= threshold: 103 | mask[best_matched_id] = 0 104 | else: # pose not similar to any previous 105 | best_matched_pose_id = None 106 | current_pose.update_id(best_matched_pose_id) 107 | 108 | if smooth: 109 | for kpt_id in range(Pose.num_kpts): 110 | if current_pose.keypoints[kpt_id, 0] == -1: 111 | continue 112 | # reuse filter if previous pose has valid filter 113 | if (best_matched_pose_id is not None 114 | and previous_poses[best_matched_id].keypoints[kpt_id, 0] != -1): 115 | current_pose.filters[kpt_id] = previous_poses[best_matched_id].filters[kpt_id] 116 | current_pose.keypoints[kpt_id, 0] = current_pose.filters[kpt_id][0](current_pose.keypoints[kpt_id, 0]) 117 | current_pose.keypoints[kpt_id, 1] = current_pose.filters[kpt_id][1](current_pose.keypoints[kpt_id, 1]) 118 | current_pose.bbox = Pose.get_bbox(current_pose.keypoints) 119 | -------------------------------------------------------------------------------- /openpose+RandomForest.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "9001513f", 6 | "metadata": {}, 7 | "source": [ 8 | "## 使用LightGBM进行分类预测" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "c29f7d65", 14 | "metadata": {}, 15 | "source": [ 16 | "### 1.导入库" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 32, 22 | "id": "ebdebfdf", 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "import pandas as pd\n", 27 | "from sklearn.model_selection import train_test_split # 划分数据集\n", 28 | "from sklearn.ensemble import RandomForestClassifier # 随机森林\n", 29 | "import lightgbm as lgb\n", 30 | "from sklearn.model_selection import GridSearchCV # 网格划分\n", 31 | "from sklearn.metrics import accuracy_score,recall_score,log_loss # 模型评估\n", 32 | "import joblib # 模型保存" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "id": "2c40a63f", 38 | "metadata": {}, 39 | "source": [ 40 | "### 2.读取数据与预处理" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 34, 46 | "id": "2ae24d75", 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "data": { 51 | "text/html": [ 52 | "
\n", 53 | "\n", 66 | "\n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | " \n", 167 | " \n", 168 | " \n", 169 | " \n", 170 | " \n", 171 | " \n", 172 | " \n", 173 | " \n", 174 | " \n", 175 | " \n", 176 | " \n", 177 | " \n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | " \n", 183 | " \n", 184 | " \n", 185 | " \n", 186 | " \n", 187 | " \n", 188 | " \n", 189 | " \n", 190 | " \n", 191 | " \n", 192 | " \n", 193 | " \n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | " \n", 229 | " \n", 230 | " \n", 231 | " \n", 232 | " \n", 233 | " \n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | " \n", 255 | " \n", 256 | " \n", 257 | " \n", 258 | " \n", 259 | " \n", 260 | " \n", 261 | " \n", 262 | " \n", 263 | " \n", 264 | " \n", 265 | " \n", 266 | " \n", 267 | " \n", 268 | " \n", 269 | " \n", 270 | " \n", 271 | " \n", 272 | " \n", 273 | " \n", 274 | " \n", 275 | " \n", 276 | " \n", 277 | " \n", 278 | " \n", 279 | " \n", 280 | " \n", 281 | " \n", 282 | " \n", 283 | " \n", 284 | " \n", 285 | " \n", 286 | " \n", 287 | " \n", 288 | " \n", 289 | " \n", 290 | " \n", 291 | " \n", 292 | " \n", 293 | " \n", 294 | " \n", 295 | " \n", 296 | " \n", 297 | " \n", 298 | " \n", 299 | " \n", 300 | " \n", 301 | " \n", 302 | " \n", 303 | " \n", 304 | " \n", 305 | " \n", 306 | " \n", 307 | " \n", 308 | " \n", 309 | " \n", 310 | " \n", 311 | " \n", 312 | " \n", 313 | " \n", 314 | " \n", 315 | " \n", 316 | " \n", 317 | " \n", 318 | " \n", 319 | " \n", 320 | " \n", 321 | " \n", 322 | " \n", 323 | " \n", 324 | " \n", 325 | " \n", 326 | " \n", 327 | " \n", 328 | " \n", 329 | " \n", 330 | " \n", 331 | " \n", 332 | " \n", 333 | " \n", 334 | " \n", 335 | " \n", 336 | " \n", 337 | " \n", 338 | " \n", 339 | " \n", 340 | " \n", 341 | " \n", 342 | " \n", 343 | " \n", 344 | " \n", 345 | " \n", 346 | " \n", 347 | " \n", 348 | " \n", 349 | " \n", 350 | " \n", 351 | " \n", 352 | " \n", 353 | " \n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | "
Unnamed: 0d1d2d3d4d5d6d7d8d9...a9a10a11a12a13a14a15a16a17label
0img_100026.jpg127.13772124.73863418.97366661.773781NaN64.621978157.035028142.61837263.780875...-4.055314NaN3.1854240.3821551.004067-1.479100-2.4831672.3412702.0195200
1img_10003.jpg80.04998418.97366626.83281653.665631NaN50.911688133.221620118.18629450.911688...-4.414870NaN3.7001920.5585990.785398-1.665748-2.4511462.5023832.1990500
2img_100050.jpg96.93296718.973666NaN43.266615NaN46.861498164.754363151.78932850.911688...-3.937187NaN3.4558250.404892NaN-1.512041NaN2.2112331.9119450
3img_100074.jpg108.16653837.94733230.59411764.899923NaN59.093147197.362610174.00000048.373546...-4.416948NaN3.5043180.4636481.051650-1.837048-2.8886992.4850202.3006960
4img_10012.jpg84.21401324.73863418.00000053.665631NaN51.613952163.768129189.73666051.613952...NaNNaN3.6905350.548942-4.957368-1.7444683.2129002.4414892.1378090
..................................................................
223915.jpg98.40731721.63330818.97366654.332311NaN43.680659115.412304122.37646856.920998...-4.449394NaN3.7783940.593350-4.978641-1.5031033.4755382.1206961.8423962
223926.jpg89.19641225.45584418.97366654.000000NaN48.000000114.629839131.72699054.000000...-4.468285NaN3.8794080.737815-5.176037-1.5232133.6528231.9868611.6596162
223937.jpg101.82337625.45584430.00000048.373546NaN37.947332121.490740128.82546343.680659...-4.251517NaN3.6052400.507099-5.497787-1.5707963.9269912.0005591.6685232
223948.jpg80.72174421.63330821.63330848.373546NaN48.373546114.000000134.16407954.000000...-4.156744NaN3.8552190.837981-4.712389-1.4259843.2864052.0951571.7382562
223959.jpg101.82337616.97056318.00000054.000000NaN48.37354699.859902113.84199651.264022...-4.371704NaN3.4078450.426627-5.497787-1.5707963.9269912.0258971.6724852
\n", 360 | "

22396 rows × 36 columns

\n", 361 | "
" 362 | ], 363 | "text/plain": [ 364 | " Unnamed: 0 d1 d2 d3 d4 d5 \\\n", 365 | "0 img_100026.jpg 127.137721 24.738634 18.973666 61.773781 NaN \n", 366 | "1 img_10003.jpg 80.049984 18.973666 26.832816 53.665631 NaN \n", 367 | "2 img_100050.jpg 96.932967 18.973666 NaN 43.266615 NaN \n", 368 | "3 img_100074.jpg 108.166538 37.947332 30.594117 64.899923 NaN \n", 369 | "4 img_10012.jpg 84.214013 24.738634 18.000000 53.665631 NaN \n", 370 | "... ... ... ... ... ... .. \n", 371 | "22391 5.jpg 98.407317 21.633308 18.973666 54.332311 NaN \n", 372 | "22392 6.jpg 89.196412 25.455844 18.973666 54.000000 NaN \n", 373 | "22393 7.jpg 101.823376 25.455844 30.000000 48.373546 NaN \n", 374 | "22394 8.jpg 80.721744 21.633308 21.633308 48.373546 NaN \n", 375 | "22395 9.jpg 101.823376 16.970563 18.000000 54.000000 NaN \n", 376 | "\n", 377 | " d6 d7 d8 d9 ... a9 a10 \\\n", 378 | "0 64.621978 157.035028 142.618372 63.780875 ... -4.055314 NaN \n", 379 | "1 50.911688 133.221620 118.186294 50.911688 ... -4.414870 NaN \n", 380 | "2 46.861498 164.754363 151.789328 50.911688 ... -3.937187 NaN \n", 381 | "3 59.093147 197.362610 174.000000 48.373546 ... -4.416948 NaN \n", 382 | "4 51.613952 163.768129 189.736660 51.613952 ... NaN NaN \n", 383 | "... ... ... ... ... ... ... ... \n", 384 | "22391 43.680659 115.412304 122.376468 56.920998 ... -4.449394 NaN \n", 385 | "22392 48.000000 114.629839 131.726990 54.000000 ... -4.468285 NaN \n", 386 | "22393 37.947332 121.490740 128.825463 43.680659 ... -4.251517 NaN \n", 387 | "22394 48.373546 114.000000 134.164079 54.000000 ... -4.156744 NaN \n", 388 | "22395 48.373546 99.859902 113.841996 51.264022 ... -4.371704 NaN \n", 389 | "\n", 390 | " a11 a12 a13 a14 a15 a16 a17 \\\n", 391 | "0 3.185424 0.382155 1.004067 -1.479100 -2.483167 2.341270 2.019520 \n", 392 | "1 3.700192 0.558599 0.785398 -1.665748 -2.451146 2.502383 2.199050 \n", 393 | "2 3.455825 0.404892 NaN -1.512041 NaN 2.211233 1.911945 \n", 394 | "3 3.504318 0.463648 1.051650 -1.837048 -2.888699 2.485020 2.300696 \n", 395 | "4 3.690535 0.548942 -4.957368 -1.744468 3.212900 2.441489 2.137809 \n", 396 | "... ... ... ... ... ... ... ... \n", 397 | "22391 3.778394 0.593350 -4.978641 -1.503103 3.475538 2.120696 1.842396 \n", 398 | "22392 3.879408 0.737815 -5.176037 -1.523213 3.652823 1.986861 1.659616 \n", 399 | "22393 3.605240 0.507099 -5.497787 -1.570796 3.926991 2.000559 1.668523 \n", 400 | "22394 3.855219 0.837981 -4.712389 -1.425984 3.286405 2.095157 1.738256 \n", 401 | "22395 3.407845 0.426627 -5.497787 -1.570796 3.926991 2.025897 1.672485 \n", 402 | "\n", 403 | " label \n", 404 | "0 0 \n", 405 | "1 0 \n", 406 | "2 0 \n", 407 | "3 0 \n", 408 | "4 0 \n", 409 | "... ... \n", 410 | "22391 2 \n", 411 | "22392 2 \n", 412 | "22393 2 \n", 413 | "22394 2 \n", 414 | "22395 2 \n", 415 | "\n", 416 | "[22396 rows x 36 columns]" 417 | ] 418 | }, 419 | "execution_count": 34, 420 | "metadata": {}, 421 | "output_type": "execute_result" 422 | } 423 | ], 424 | "source": [ 425 | "data = pd.read_csv('openpose_train_data.csv')\n", 426 | "data" 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "execution_count": 35, 432 | "id": "d21f134e", 433 | "metadata": {}, 434 | "outputs": [ 435 | { 436 | "name": "stdout", 437 | "output_type": "stream", 438 | "text": [ 439 | "\n", 440 | "RangeIndex: 22396 entries, 0 to 22395\n", 441 | "Data columns (total 36 columns):\n", 442 | " # Column Non-Null Count Dtype \n", 443 | "--- ------ -------------- ----- \n", 444 | " 0 Unnamed: 0 22396 non-null object \n", 445 | " 1 d1 22169 non-null float64\n", 446 | " 2 d2 22059 non-null float64\n", 447 | " 3 d3 19920 non-null float64\n", 448 | " 4 d4 21651 non-null float64\n", 449 | " 5 d5 2643 non-null float64\n", 450 | " 6 d6 22349 non-null float64\n", 451 | " 7 d7 21747 non-null float64\n", 452 | " 8 d8 19946 non-null float64\n", 453 | " 9 d9 22345 non-null float64\n", 454 | " 10 d10 21391 non-null float64\n", 455 | " 11 d11 20228 non-null float64\n", 456 | " 12 d12 21356 non-null float64\n", 457 | " 13 d13 19525 non-null float64\n", 458 | " 14 d14 4759 non-null float64\n", 459 | " 15 d15 21668 non-null float64\n", 460 | " 16 d16 18118 non-null float64\n", 461 | " 17 d17 1804 non-null float64\n", 462 | " 18 a1 21651 non-null float64\n", 463 | " 19 a2 2643 non-null float64\n", 464 | " 20 a3 21742 non-null float64\n", 465 | " 21 a4 19946 non-null float64\n", 466 | " 22 a5 21381 non-null float64\n", 467 | " 23 a6 20228 non-null float64\n", 468 | " 24 a7 19523 non-null float64\n", 469 | " 25 a8 4759 non-null float64\n", 470 | " 26 a9 18114 non-null float64\n", 471 | " 27 a10 1804 non-null float64\n", 472 | " 28 a11 22162 non-null float64\n", 473 | " 29 a12 22157 non-null float64\n", 474 | " 30 a13 19861 non-null float64\n", 475 | " 31 a14 22059 non-null float64\n", 476 | " 32 a15 19916 non-null float64\n", 477 | " 33 a16 21272 non-null float64\n", 478 | " 34 a17 21594 non-null float64\n", 479 | " 35 label 22396 non-null int64 \n", 480 | "dtypes: float64(34), int64(1), object(1)\n", 481 | "memory usage: 6.2+ MB\n" 482 | ] 483 | } 484 | ], 485 | "source": [ 486 | "data.info()" 487 | ] 488 | }, 489 | { 490 | "cell_type": "markdown", 491 | "id": "6cc7f299", 492 | "metadata": {}, 493 | "source": [ 494 | "#### 删除缺失值过多的列" 495 | ] 496 | }, 497 | { 498 | "cell_type": "code", 499 | "execution_count": 36, 500 | "id": "298a848b", 501 | "metadata": {}, 502 | "outputs": [ 503 | { 504 | "name": "stdout", 505 | "output_type": "stream", 506 | "text": [ 507 | "\n", 508 | "RangeIndex: 22396 entries, 0 to 22395\n", 509 | "Data columns (total 30 columns):\n", 510 | " # Column Non-Null Count Dtype \n", 511 | "--- ------ -------------- ----- \n", 512 | " 0 Unnamed: 0 22396 non-null object \n", 513 | " 1 d1 22169 non-null float64\n", 514 | " 2 d2 22059 non-null float64\n", 515 | " 3 d3 19920 non-null float64\n", 516 | " 4 d4 21651 non-null float64\n", 517 | " 5 d6 22349 non-null float64\n", 518 | " 6 d7 21747 non-null float64\n", 519 | " 7 d8 19946 non-null float64\n", 520 | " 8 d9 22345 non-null float64\n", 521 | " 9 d10 21391 non-null float64\n", 522 | " 10 d11 20228 non-null float64\n", 523 | " 11 d12 21356 non-null float64\n", 524 | " 12 d13 19525 non-null float64\n", 525 | " 13 d15 21668 non-null float64\n", 526 | " 14 d16 18118 non-null float64\n", 527 | " 15 a1 21651 non-null float64\n", 528 | " 16 a3 21742 non-null float64\n", 529 | " 17 a4 19946 non-null float64\n", 530 | " 18 a5 21381 non-null float64\n", 531 | " 19 a6 20228 non-null float64\n", 532 | " 20 a7 19523 non-null float64\n", 533 | " 21 a9 18114 non-null float64\n", 534 | " 22 a11 22162 non-null float64\n", 535 | " 23 a12 22157 non-null float64\n", 536 | " 24 a13 19861 non-null float64\n", 537 | " 25 a14 22059 non-null float64\n", 538 | " 26 a15 19916 non-null float64\n", 539 | " 27 a16 21272 non-null float64\n", 540 | " 28 a17 21594 non-null float64\n", 541 | " 29 label 22396 non-null int64 \n", 542 | "dtypes: float64(28), int64(1), object(1)\n", 543 | "memory usage: 5.1+ MB\n" 544 | ] 545 | } 546 | ], 547 | "source": [ 548 | "for l in data.columns:\n", 549 | " if data[l].count() < 5000:\n", 550 | " data = data.drop([l], axis=1)\n", 551 | "data.info()" 552 | ] 553 | }, 554 | { 555 | "cell_type": "markdown", 556 | "id": "17df314e", 557 | "metadata": {}, 558 | "source": [ 559 | "#### 缺失值进行同类均值补充" 560 | ] 561 | }, 562 | { 563 | "cell_type": "code", 564 | "execution_count": 37, 565 | "id": "169896f1", 566 | "metadata": {}, 567 | "outputs": [ 568 | { 569 | "data": { 570 | "text/html": [ 571 | "
\n", 572 | "\n", 585 | "\n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | " \n", 638 | " \n", 639 | " \n", 640 | " \n", 641 | " \n", 642 | " \n", 643 | " \n", 644 | " \n", 645 | " \n", 646 | " \n", 647 | " \n", 648 | " \n", 649 | " \n", 650 | " \n", 651 | " \n", 652 | " \n", 653 | " \n", 654 | " \n", 655 | " \n", 656 | " \n", 657 | " \n", 658 | " \n", 659 | " \n", 660 | " \n", 661 | " \n", 662 | " \n", 663 | " \n", 664 | " \n", 665 | " \n", 666 | " \n", 667 | " \n", 668 | " \n", 669 | " \n", 670 | " \n", 671 | " \n", 672 | " \n", 673 | " \n", 674 | " \n", 675 | " \n", 676 | " \n", 677 | " \n", 678 | " \n", 679 | " \n", 680 | " \n", 681 | " \n", 682 | " \n", 683 | " \n", 684 | " \n", 685 | " \n", 686 | " \n", 687 | " \n", 688 | " \n", 689 | " \n", 690 | " \n", 691 | " \n", 692 | " \n", 693 | " \n", 694 | " \n", 695 | " \n", 696 | " \n", 697 | " \n", 698 | " \n", 699 | " \n", 700 | " \n", 701 | " \n", 702 | " \n", 703 | " \n", 704 | " \n", 705 | " \n", 706 | " \n", 707 | " \n", 708 | " \n", 709 | " \n", 710 | " \n", 711 | " \n", 712 | " \n", 713 | " \n", 714 | " \n", 715 | " \n", 716 | " \n", 717 | " \n", 718 | " \n", 719 | " \n", 720 | " \n", 721 | " \n", 722 | " \n", 723 | " \n", 724 | " \n", 725 | " \n", 726 | " \n", 727 | " \n", 728 | " \n", 729 | " \n", 730 | " \n", 731 | " \n", 732 | " \n", 733 | " \n", 734 | "
Unnamed: 0d1d2d3d4d6d7d8d9d10...a7a9a11a12a13a14a15a16a17label
0img_100026.jpg127.13772124.73863418.97366661.77378164.621978157.035028142.61837263.780875120.149906...-4.369365-4.0553143.1854240.3821551.004067-1.479100-2.4831672.3412702.0195200
1img_10003.jpg80.04998418.97366626.83281653.66563150.911688133.221620118.18629450.91168893.145048...-4.515336-4.4148703.7001920.5585990.785398-1.665748-2.4511462.5023832.1990500
2img_100050.jpg96.93296718.97366622.04460843.26661546.861498164.754363151.78932850.91168897.672924...-4.384618-3.9371873.4558250.4048920.245618-1.512041-1.8265602.2112331.9119450
3img_100074.jpg108.16653837.94733230.59411764.89992359.093147197.362610174.00000048.373546109.489726...-4.540093-4.4169483.5043180.4636481.051650-1.837048-2.8886992.4850202.3006960
4img_10012.jpg84.21401324.73863418.00000053.66563151.613952163.768129189.73666051.613952124.851912...-4.233964-4.0836153.6905350.548942-4.957368-1.7444683.2129002.4414892.1378090
\n", 735 | "

5 rows × 30 columns

\n", 736 | "
" 737 | ], 738 | "text/plain": [ 739 | " Unnamed: 0 d1 d2 d3 d4 d6 \\\n", 740 | "0 img_100026.jpg 127.137721 24.738634 18.973666 61.773781 64.621978 \n", 741 | "1 img_10003.jpg 80.049984 18.973666 26.832816 53.665631 50.911688 \n", 742 | "2 img_100050.jpg 96.932967 18.973666 22.044608 43.266615 46.861498 \n", 743 | "3 img_100074.jpg 108.166538 37.947332 30.594117 64.899923 59.093147 \n", 744 | "4 img_10012.jpg 84.214013 24.738634 18.000000 53.665631 51.613952 \n", 745 | "\n", 746 | " d7 d8 d9 d10 ... a7 a9 \\\n", 747 | "0 157.035028 142.618372 63.780875 120.149906 ... -4.369365 -4.055314 \n", 748 | "1 133.221620 118.186294 50.911688 93.145048 ... -4.515336 -4.414870 \n", 749 | "2 164.754363 151.789328 50.911688 97.672924 ... -4.384618 -3.937187 \n", 750 | "3 197.362610 174.000000 48.373546 109.489726 ... -4.540093 -4.416948 \n", 751 | "4 163.768129 189.736660 51.613952 124.851912 ... -4.233964 -4.083615 \n", 752 | "\n", 753 | " a11 a12 a13 a14 a15 a16 a17 label \n", 754 | "0 3.185424 0.382155 1.004067 -1.479100 -2.483167 2.341270 2.019520 0 \n", 755 | "1 3.700192 0.558599 0.785398 -1.665748 -2.451146 2.502383 2.199050 0 \n", 756 | "2 3.455825 0.404892 0.245618 -1.512041 -1.826560 2.211233 1.911945 0 \n", 757 | "3 3.504318 0.463648 1.051650 -1.837048 -2.888699 2.485020 2.300696 0 \n", 758 | "4 3.690535 0.548942 -4.957368 -1.744468 3.212900 2.441489 2.137809 0 \n", 759 | "\n", 760 | "[5 rows x 30 columns]" 761 | ] 762 | }, 763 | "execution_count": 37, 764 | "metadata": {}, 765 | "output_type": "execute_result" 766 | } 767 | ], 768 | "source": [ 769 | "for l in data['label'].value_counts().index:\n", 770 | " mean_val = data[data.label==l].mean()\n", 771 | " data[data.label==l] = data[data.label==l].fillna(mean_val)\n", 772 | "data.head()" 773 | ] 774 | }, 775 | { 776 | "cell_type": "markdown", 777 | "id": "5c52cd52", 778 | "metadata": {}, 779 | "source": [ 780 | "#### 处理第一列名称" 781 | ] 782 | }, 783 | { 784 | "cell_type": "code", 785 | "execution_count": 38, 786 | "id": "e79c6678", 787 | "metadata": {}, 788 | "outputs": [ 789 | { 790 | "data": { 791 | "text/html": [ 792 | "
\n", 793 | "\n", 806 | "\n", 807 | " \n", 808 | " \n", 809 | " \n", 810 | " \n", 811 | " \n", 812 | " \n", 813 | " \n", 814 | " \n", 815 | " \n", 816 | " \n", 817 | " \n", 818 | " \n", 819 | " \n", 820 | " \n", 821 | " \n", 822 | " \n", 823 | " \n", 824 | " \n", 825 | " \n", 826 | " \n", 827 | " \n", 828 | " \n", 829 | " \n", 830 | " \n", 831 | " \n", 832 | " \n", 833 | " \n", 834 | " \n", 835 | " \n", 836 | " \n", 837 | " \n", 838 | " \n", 839 | " \n", 840 | " \n", 841 | " \n", 842 | " \n", 843 | " \n", 844 | " \n", 845 | " \n", 846 | " \n", 847 | " \n", 848 | " \n", 849 | " \n", 850 | " \n", 851 | " \n", 852 | " \n", 853 | " \n", 854 | " \n", 855 | " \n", 856 | " \n", 857 | " \n", 858 | " \n", 859 | " \n", 860 | " \n", 861 | " \n", 862 | " \n", 863 | " \n", 864 | " \n", 865 | " \n", 866 | " \n", 867 | " \n", 868 | " \n", 869 | " \n", 870 | " \n", 871 | " \n", 872 | " \n", 873 | " \n", 874 | " \n", 875 | " \n", 876 | " \n", 877 | " \n", 878 | " \n", 879 | " \n", 880 | " \n", 881 | " \n", 882 | " \n", 883 | " \n", 884 | " \n", 885 | " \n", 886 | " \n", 887 | " \n", 888 | " \n", 889 | " \n", 890 | " \n", 891 | " \n", 892 | " \n", 893 | " \n", 894 | " \n", 895 | " \n", 896 | " \n", 897 | " \n", 898 | " \n", 899 | " \n", 900 | " \n", 901 | " \n", 902 | " \n", 903 | " \n", 904 | " \n", 905 | " \n", 906 | " \n", 907 | " \n", 908 | " \n", 909 | " \n", 910 | " \n", 911 | " \n", 912 | " \n", 913 | " \n", 914 | " \n", 915 | " \n", 916 | " \n", 917 | " \n", 918 | " \n", 919 | " \n", 920 | " \n", 921 | " \n", 922 | " \n", 923 | " \n", 924 | " \n", 925 | " \n", 926 | " \n", 927 | " \n", 928 | " \n", 929 | " \n", 930 | " \n", 931 | " \n", 932 | " \n", 933 | " \n", 934 | " \n", 935 | " \n", 936 | " \n", 937 | " \n", 938 | " \n", 939 | " \n", 940 | " \n", 941 | " \n", 942 | " \n", 943 | " \n", 944 | " \n", 945 | " \n", 946 | " \n", 947 | " \n", 948 | " \n", 949 | " \n", 950 | " \n", 951 | " \n", 952 | " \n", 953 | " \n", 954 | " \n", 955 | "
d1d2d3d4d6d7d8d9d10d11...a7a9a11a12a13a14a15a16a17label
0127.13772124.73863418.97366661.77378164.621978157.035028142.61837263.780875120.149906108.664622...-4.369365-4.0553143.1854240.3821551.004067-1.479100-2.4831672.3412702.0195200
180.04998418.97366626.83281653.66563150.911688133.221620118.18629450.91168893.14504884.000000...-4.515336-4.4148703.7001920.5585990.785398-1.665748-2.4511462.5023832.1990500
296.93296718.97366622.04460843.26661546.861498164.754363151.78932850.91168897.67292472.993150...-4.384618-3.9371873.4558250.4048920.245618-1.512041-1.8265602.2112331.9119450
3108.16653837.94733230.59411764.89992359.093147197.362610174.00000048.373546109.489726116.498927...-4.540093-4.4169483.5043180.4636481.051650-1.837048-2.8886992.4850202.3006960
484.21401324.73863418.00000053.66563151.613952163.768129189.73666051.613952124.851912117.204537...-4.233964-4.0836153.6905350.548942-4.957368-1.7444683.2129002.4414892.1378090
\n", 956 | "

5 rows × 29 columns

\n", 957 | "
" 958 | ], 959 | "text/plain": [ 960 | " d1 d2 d3 d4 d6 d7 \\\n", 961 | "0 127.137721 24.738634 18.973666 61.773781 64.621978 157.035028 \n", 962 | "1 80.049984 18.973666 26.832816 53.665631 50.911688 133.221620 \n", 963 | "2 96.932967 18.973666 22.044608 43.266615 46.861498 164.754363 \n", 964 | "3 108.166538 37.947332 30.594117 64.899923 59.093147 197.362610 \n", 965 | "4 84.214013 24.738634 18.000000 53.665631 51.613952 163.768129 \n", 966 | "\n", 967 | " d8 d9 d10 d11 ... a7 a9 \\\n", 968 | "0 142.618372 63.780875 120.149906 108.664622 ... -4.369365 -4.055314 \n", 969 | "1 118.186294 50.911688 93.145048 84.000000 ... -4.515336 -4.414870 \n", 970 | "2 151.789328 50.911688 97.672924 72.993150 ... -4.384618 -3.937187 \n", 971 | "3 174.000000 48.373546 109.489726 116.498927 ... -4.540093 -4.416948 \n", 972 | "4 189.736660 51.613952 124.851912 117.204537 ... -4.233964 -4.083615 \n", 973 | "\n", 974 | " a11 a12 a13 a14 a15 a16 a17 label \n", 975 | "0 3.185424 0.382155 1.004067 -1.479100 -2.483167 2.341270 2.019520 0 \n", 976 | "1 3.700192 0.558599 0.785398 -1.665748 -2.451146 2.502383 2.199050 0 \n", 977 | "2 3.455825 0.404892 0.245618 -1.512041 -1.826560 2.211233 1.911945 0 \n", 978 | "3 3.504318 0.463648 1.051650 -1.837048 -2.888699 2.485020 2.300696 0 \n", 979 | "4 3.690535 0.548942 -4.957368 -1.744468 3.212900 2.441489 2.137809 0 \n", 980 | "\n", 981 | "[5 rows x 29 columns]" 982 | ] 983 | }, 984 | "execution_count": 38, 985 | "metadata": {}, 986 | "output_type": "execute_result" 987 | } 988 | ], 989 | "source": [ 990 | "data = data.drop(['Unnamed: 0'], axis=1)\n", 991 | "data.head()" 992 | ] 993 | }, 994 | { 995 | "cell_type": "markdown", 996 | "id": "376cc618", 997 | "metadata": {}, 998 | "source": [ 999 | "### 3.切分数据输入(特征)与输出(预测目标变量)" 1000 | ] 1001 | }, 1002 | { 1003 | "cell_type": "code", 1004 | "execution_count": 39, 1005 | "id": "fbe2aadf", 1006 | "metadata": {}, 1007 | "outputs": [], 1008 | "source": [ 1009 | "label = data.label\n", 1010 | "feature = data.drop(['label'], axis=1)" 1011 | ] 1012 | }, 1013 | { 1014 | "cell_type": "markdown", 1015 | "id": "d3515fa8", 1016 | "metadata": {}, 1017 | "source": [ 1018 | "### 4.切分训练集与测试集(0.8:0.2)" 1019 | ] 1020 | }, 1021 | { 1022 | "cell_type": "code", 1023 | "execution_count": 43, 1024 | "id": "9e364eb3", 1025 | "metadata": {}, 1026 | "outputs": [], 1027 | "source": [ 1028 | "X_train, X_test, y_train, y_test = train_test_split(feature, label, test_size=0.00000001)" 1029 | ] 1030 | }, 1031 | { 1032 | "cell_type": "markdown", 1033 | "id": "aa69eee2", 1034 | "metadata": {}, 1035 | "source": [ 1036 | "### 5.模型训练" 1037 | ] 1038 | }, 1039 | { 1040 | "cell_type": "code", 1041 | "execution_count": 44, 1042 | "id": "e44b8a9b", 1043 | "metadata": {}, 1044 | "outputs": [ 1045 | { 1046 | "data": { 1047 | "text/plain": [ 1048 | "RandomForestClassifier(n_estimators=50, random_state=0)" 1049 | ] 1050 | }, 1051 | "execution_count": 44, 1052 | "metadata": {}, 1053 | "output_type": "execute_result" 1054 | } 1055 | ], 1056 | "source": [ 1057 | "RFC = RandomForestClassifier(n_estimators=50, random_state=0)\n", 1058 | "RFC.fit(X_train, y_train)" 1059 | ] 1060 | }, 1061 | { 1062 | "cell_type": "markdown", 1063 | "id": "02656cec", 1064 | "metadata": {}, 1065 | "source": [ 1066 | "### 6.模型的存储与加载" 1067 | ] 1068 | }, 1069 | { 1070 | "cell_type": "code", 1071 | "execution_count": 45, 1072 | "id": "bbcbb2a6", 1073 | "metadata": {}, 1074 | "outputs": [], 1075 | "source": [ 1076 | "# 模型存储\n", 1077 | "joblib.dump(RFC, 'RandomForestClassifier_model.pkl')\n", 1078 | "# 模型加载\n", 1079 | "RFC = joblib.load('RandomForestClassifier_model.pkl')" 1080 | ] 1081 | }, 1082 | { 1083 | "cell_type": "markdown", 1084 | "id": "a7ce0cd1", 1085 | "metadata": {}, 1086 | "source": [ 1087 | "### 7.模型的预测与评估" 1088 | ] 1089 | }, 1090 | { 1091 | "cell_type": "code", 1092 | "execution_count": 42, 1093 | "id": "dc03e3d7", 1094 | "metadata": {}, 1095 | "outputs": [ 1096 | { 1097 | "name": "stdout", 1098 | "output_type": "stream", 1099 | "text": [ 1100 | "正确率: 0.9392748705125915\n", 1101 | "损失值: 0.3425187971418098\n" 1102 | ] 1103 | } 1104 | ], 1105 | "source": [ 1106 | "# 模型预测\n", 1107 | "y_pred = RFC.predict(X_test)\n", 1108 | "y_pred_proba = RFC.predict_proba(X_test)\n", 1109 | "\n", 1110 | "# 模型评估\n", 1111 | "print('正确率:', accuracy_score(y_test, y_pred))\n", 1112 | "print(\"损失值:\", log_loss(y_test, y_pred_proba))" 1113 | ] 1114 | }, 1115 | { 1116 | "cell_type": "code", 1117 | "execution_count": 61, 1118 | "id": "45ef3287", 1119 | "metadata": {}, 1120 | "outputs": [ 1121 | { 1122 | "data": { 1123 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnsAAAGICAYAAAA52ia9AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdeXydZZ3//9cnSbMvbba2Kd2BlhbZ90XCIsgmDsIwIo46CCqOOA7o+FWHqbiMMAL+RkYRcR8VQREHESggKZtbQVkKXWkptKVJ2qTZmpPt8/vjvpOeniZNTnuS++Tk/Xw8ziM5173kOr0Kefe6rvu6zN0RERERkcyUFXUFRERERGT0KOyJiIiIZDCFPREREZEMprAnIiIiksEU9kREREQymMKeiIiISAaLPOyZ2YFm9h0ze8HMes2sboTXlZnZD8ysycx2mNlPzaxikPMuMrOXzKzTzF4xs8tS/iFERERE0lTkYQ9YDJwHrA5fI/ULoBb4MPBB4Fjg/vgTzOwU4FfAE8C5wIPAz83s7P2ttIiIiMh4YFEvqmxmWe7eF37/S6DS3WuHueZE4FngNHd/Miw7DvgT8A53fywsewSY5O5nxF37O6DU3U8Zjc8jIiIikk4i79nrD3pJOhfY2h/0wvv8GVgfHsPM8oDTgXsSrr0bONHMyvatxiIiIiLjR+Rhbx8tBFYOUv5qeAxgPjBpkPNeJfjcB49a7URERETSxHgNe1OA5kHKm8JjxH1NPK8p4biIiIhIxsqJugL7YbDJhjZIeeJ7G+p6M7sauBogPz//6FmzZu1vHSWN9PX1kZU1Xv99I4nUnplF7ZlZ1J5jb/Xq1Y3uXjXYsfEa9pqAwT7QZHb15DXFlSWeA4P0DLr7ncCdAAsWLPBVq1btf00lbdTV1VFbWxt1NSRF1J6ZRe2ZWdSeY8/MXh/q2HiN3SvZNTcvXvxcvnVA9yDnLQT6SG6ZFxEREZFxabyGvYeAaeE6egCY2THAvPAY7h4jWF/v0oRrLwP+4O47xqiuIiIiIpGJfBjXzAoJFlUGmAGUmtkl4fvfuXuHma0Flrn7lQDu/odwDb0fm9n1BD11NwFP96+xF/oSUGdm3yBYcPm88PXOUf9gIiIiImkg8rAHVAP3JpT1v58LbCCoZ3bCOf8A3AZ8n6CH8rfAtfEnuPvTYXD8MvAxgnX4Lnf3pSmsv4iIiEjaijzsufsGdj0hO9Q5cwYpawY+FL72du39JGyjJiIiIjJRjNc5eyIiIiIyAgp7IiIiIhlMYU9EREQkgynsiYiIiGQwhT0RERGRDKawJyIiIpLBFPZEREREMpjCnoiIiEgGU9gTERERyWAKeyIiIiIZTGFPREREJIMp7ImIiIhkMIU9ERERkQymsCciIiKSwRT2RERERDKYwp6IiIhIBlPYExEREclgCnsiIiIiGUxhT0RERCSDKeyJiIiIZDCFPREREZEMprAnIiIiksEU9kREREQymMKeiIiISAZT2BMRERHJYAp7IiIiIhlMYU9EREQkgynsiYiIiGQwhT0RERGRDKawJyIiIpLBFPZEREREMpjCnoiIiEgGU9gTERERyWAKeyIiIiIZTGFPREREJIMp7ImIiIhkMIU9ERERkQwWedgzs0Vm9riZdZjZZjO70cyyR3DdYjNbGl7XaGbfNrPihHNyzewGM1trZjvDr180s7zR+0QiIiIi6SMnyh9uZlOAx4BXgIuA+cAtBCH0C3u5rgz4PbAauAyoAG4GpgPvjjv1a8BHw3v9FTgK+DIwGfhkaj+NiIiISPqJNOwRBLEC4GJ3bwEeNbNSYImZ3RyWDeaa8LoL3b0ZwMy2A78xs2PcfXl43uXAt9391vD9E2Y2A3gfCnsiIiIyAUQ9jHsu8EhCqLubIMidtpfrjgCW9we90FLAgfPjyiYBOxKubQZsn2ssIiIiMo5EHfYWAivjC9x9I9ARHhtKPtCVUNYD9AGHxJXdBXzEzE42s2IzOxX4GHD7/lZcREREZDyIehh3CkFPW6Km8NhQ1gKXm9kkd+8Oy44GsoHyuPM+S9BL+HRc2bfc/cZ9r7KIiIjI+BF12INg6DWRDVHe77sEc+6+aWZLCB7Q+BbQG776fRq4AvgE8CJwOPAlM9vm7jfs8UPNrgauBqiqqqKuri7ZzyJprK2tTW2aQdSemUXtmVnUnukl6rDXRPBkbKIyBu/xA8DdV4bB7DbgIwTDt3cSBMStAGZWSfDk7cfd/bvhpU+aWRdwu5nd7u71Cfe9M7wPCxYs8Nra2v34aJJu6urqUJtmDrVnZlF7Zha1Z3qJes7eShLm5pnZTKCIhLl8idz9+8BU4DCgBvhn4EDgj+Ep8wge0PhbwqV/JQi5s/ez7iIiIiJpL+qw9xBwjpmVxJVdBuwElg13sbt3uvtL7r6VYLg2C7gnPPx6+PWohMuODr9u2NdKi4iIiIwXUQ/j3gFcC9xnZjcR9MYtAW6NX47FzNYCy9z9yvB9KfB54EmCp3BPB64DrnL37QDuvtXM7gduMrN8gjl7R4T3v9fdG8bkE4qIiIhEKNKw5+5NZnYmwVIoDxDM07uNIJDFyyF40rZfL3AkcBXB07YvA5e6+/0J130AuIEgUNYAm4DvAF9K6QcRERERSVNR9+zh7q8AZwxzzpyE9+3A2SO4dwtwffgSERERmXCinrMnIiIiIqNIYU9EREQkgynsiYiIiGQwhT0RERGRDKawJyIiIpLBFPZEREREMpjCnoiIiEgGU9gTERERSSM9vX20dHbT1+cpuV/kiyqLiIiIZBJ3p2VnDw1tndS3xGhoi1HfEmN7RxcdsR7aYr10dPXQFuuho6uX9lgP7V09dMR6ae/qobO7D4Ci3GwOmV7K4ppSFteUsXhGKQdVl5Cbk1xfncKeiIik1Kbmnfxx3Taad3bv/kus//uu3uCXXPiLraOrl+7uLg585RnmVhQxpzJ4Bd8XUpI/KeqPJBNUb5/T0dVDe//f1fBr8He5l7bOHhrbYtS3dtLQGqO+NUZD+Ir19O1xv0nZRlFeDkW5ORTmZgff52VTXlRIcV5cWW4OBblZbGrayYrNLdz73Jv86A+vA5CbncVBU4s5NAx/i2tKOWR66V4/h8KeiIjsl+7ePpZvaKJudT11KxtYtbV1t+N5OVkUhb/IiuO+Ti3JpzAvm6LcHDa+uYmunGz+8No27vvrpt2uryzOZU4YAudWFjGnooi3zShjZnkBZjaWH1XGEXfnzaadrGto2+MfF8H7ILD1B7egxy043t/r1t/DNpzJhZOoLsmjuiSfOXOKqC7Joyp8VZfkB19L8yjJy9mnv7O9fc6Gbe2s2NzCis07eGVzC0tfeYtfLH8DgOFuqbAnIiJJq2/ppG5VA0+squfpNY20xnqYlG0cN7ecS485hFMPqmJaWT5FudnkZA8/5FRX10ht7QkA7Ozq5fXt7WxobGd9Y0fwdVs7T65u4JfPvTlwzbTSfI6dW85xc8s5bk45B1UXk5W17+HP3dne3kXzzu4kroHO7iAwDISI/p6g/hDRP1wX66Wnr4+KouAX/65AkD/wff6k7L3+vJ1dvbv1ItW3dA4METZ1dFGaP4mq0riA0f8qDdoimaDh7uzs3tULW5iXTUVRHtn78WecqKe3j23tXfT0OVXFeUkPT8bf57XGdlZs3sGKTS28HAails6eQc/PyervYQt60grD7w8oLKQ4L3vgfX8vW38PXGFu8LWo/2teDuVFueTl7L3d9ld2ljG/qpj5VcW86/AaIGifLTs6BwLgp7429PUKeyIiMqye3j5eeLOZJ1YGAW/F5hYgCFwXHD6d2gXVnHxgJcV5+/9rpSA3m4XTSlk4bc+hqfZYD+sb2/nrG838ef12/rx+Gw+8sBkIeleOnRMEv+PmlrO4pnTQoNnc0cX6xnY2bNsVJoPv22kdIhzsq9zsrN1CQpYZL7y5g21tMQabe1+an0N1aT5VxUEg7PMgWPcPDbbG9qxfdpZRVZzH5MJJtHa20tAao6t3zx6pgknZu4XM4rycgR6t/h6ugfljsR46unvxhDpmGVQU77pHdcnuwbK/J6u921nf2B6G0vhw2j9/rZPGthjb2rt2+xnxPWTx94zvIasoyuWNpg5WbG7h5U07WLG5hZVvtQz0wuXlZLFweikXHl7D4poyDp5aTGnBpGCINAxu+xoq04mZUTO5gJrJBbxj0VQ+tZdzFfZERAiGSV5raGPF5hbeaumkKLf/F/Rg/6IPhiLzcrLG/TBiZ3fvwC/k+Inku/2Cbo0NhJPsLOPoWVP4zDsXcPqCahZOKxnTP4OivBwOnVHGoTPKeP8Js3F33ti+kz9vCILfn9dv59FXtgJQmJvN0bOnsGh6KfWtsYGA19yxq+fODGZMLmBuZRHvPmIGcyqLqCzOTapOBZP6514FvUGFeTkU5+ZQkJs9ZKjo7XO2tYfhJwxy9a2dA3O+6ltjPL+xiWwzqkvyOWR6KW8/OAiAQRDc1RtYXpi7W4+mu7NjZ3fcvToHfk59+H7VW620h711/b1U00rzg7r3/32P6/UqnJRNR1fPbn8nGlpjvLqlhca2LnoHS66P1+32NifLBgLcAVMKOHLWlIHPkJ1le/w5bNjQTn1rjK5B5r71K8nPYXFNKe87fjaLa0o5dEYZ8yqLRtSbPJEo7InIhNPZ3cvqra0Dwx8rNrfw6paWEc/P6ZeTZRTmZjOtLJ85FeF8snBO2dzKIqaW5qVVGOzo6uGvG/t7xLazYvOOQYe5sgwqw56lqaX5vG1GGVUleSyYVsKpB1ZRVpg+D0yYGbMqCplVUcglRx8ABD1hQfgLXt9d+xrTSvOZU1nEeW+bPvAQyNzKQmaWF476ENxgsrOCEFddkp/ye5sZkwtzmVyYy8FTS1J+/0S9fU5TR9du/0h47qWVHHvYIUE4DXvlJhdMSnqY3d1p6eyhIS4IN7Z1UVOWz+IazdscKYU9EdlDd28fb2zv2GOYqy3Ws2uuSm5O0Cuwx5NlOQO9YpXFuVSV5FFWMCmy/yG3dnbz6pZWVmzewcubgnC3tr6NnrAnoiQvh0U1pVx+3GwOnREsb3DAlAJ2dvfSEYubgxU/vBU3N6uts4dNzZ281thO3aqG3YbPCiZlM7uicCAEzq0oYnZFIVNLg+GoohQMee7Njo5ulr8ehp4N23npzR309DlmsCgc5qqZXLDHEFx5UW5K52WNterSfC44rIYLDgvmNvX1+X7N5ZO9y84yKovzqCzOYxHB0HtV2zpqw/C9P8yMsoJJlBVM4sDq0Q+umUphT2SccXeaO3Yfool/5B/YfThpkCHI/uPZWcYbTTvDifBBoNvQ2M4bTTt3G5Ypyc9hbmURZQWTaI8FSw20x01CH2yJgXi5OVlUFeftESp2DUkF7yuKc5m0H8MvjW2x3XrrVmzawYZtHQPHK4vzOHRGKWceUh2sWVVTyswphYMGgaK8HChO7uf39jlbduxkQ2MH68M/yw2N7aza2spjr26lu3f3oa6i3OyB+VlVpQl/NuH3Jfkjf3qvp7ePlzbt4C/rt/On9dtZtbUV92C5h8MOmMxVb5/HcXPLOXr2FEon0HImCnoy0SnsiaSxP722jf97YTNbw7lUDeGTd4mhAYL5SVUleRjsNtl6pIpys5lTWcTiGWVccFjNwDDXnIoiyoty9xo4enr7Bp46HHgKMdZDY3vXwNOCDeFneH1bB3/ZsJ2mjj2feDSD8sLc3SZkJwbC/sDYuLOPR1a8NRDq+ufa9ZtZXsDi6WW856gDWDyjlENryqguTf2QWbzsLOOAKYUcMKWQUw6q3OPPaHNzJ69vbw+Hu3afn/Tq5haWtcZoG2QCfrIKc7M5atYUznvbdI6dU86RsyYP+5SniGQuhT2RNNTR1cPND6/ih89uoCQvhxlTgqG2+VUVu/X69C+p0P9kXaK+vmDphPbYnsOQ7V09dPf2MWNyIXMqC6kq3vf5ZTnZWZQVZFFWMPLeoq6evnAx0tigDwg0tHayrr5tyHAbeI4sg/lVxZwwr5xDZ5SxqKaUxdPL0mpeGQR/Rv1zy/YmfhJ8Q2uMtmSeDjU4eGoJi2tK96uHVEQyi8KeSJpZvmE719/7Ahu2dfDBk+bwmXcuoDB33/5TzepfS2qU54bti9ycrIFlA/amr89p3tm9WyCsb42xZeNrvLv2GA6ZVkpBbub0WhXm5jC7IofZFUVRV0VEMkT6/QYQmaA6u3u5Zekq7np6PTMmF/Dzq07gxPkVUVcrcllZRnlRLuVFuSyYtmuCdl3dGxw1a0qENRMRGR8U9kTSwF83NnH9vS+wrqGd9x0/i8+dd0ha9saJiMj4o98mIvthZ1cvv31xM509ffu0XVOsp5dvPLaG7yxbx7TSfH5y5XGcelDVKNZYREQmGoU9kX2wuXknP/7D69z9l427rcY/0u2aAF7etIPr7nmBVVtb+ftjDuALFyyaUMthiIjI2FDYExkhd+e515v4wTMbeHjFW7g7Zy+axodOnsO0svyB1fr/smHP7ZqOm1POsXPLOWLmZLLMuP2JtfzPE2upLM7lBx88ltMXVkf86UREJFMp7IkMI9bTy4MvbuEHz2zgpU07KM3P4cpT5vL+E2Yzs3zXMhqzK4q49JiZwJ7bNd3y6Gog2BS9vCiXt1o6ufjIGfzHhYvTbokQERHJLAp7IkOob+3kZ3/ayP/+cSONbTHmVxXxpXcfynuOmjHsUiiJ2zU1d3SxfEMTf9mwndVbW7nxosWcvXjaWHwMERGZ4BT2ROJ0dvfyx9e28X9/28xvX9xCV28fpy+o4kMnz+XUgyr3edHhyYW5nLVoKmctmpriGouIiOydwp5MeG9s76BuVT1PrGrg2XWNdHb3UZSbzXuPm8kHTprDvKokN0gVERFJIwp7MuF09fSxfMN2nggD3tr6NgBmlRdy2TEzqV1YzYnzKrSXqIiIZASFPZkQ6ls6WfZGNz//yXKeXtNIe1cvudlZHD+vnPceN4vaBVXMqyza52FaERGRdKWwJxnvgRc2c929L9DV08f0sh1cdOQMTl9QzUnzK7RLhYiIZDz9ppOM5e7csew1bnp4JcfOmcK7ZnRyxQWnq/dOREQmlMGX9hcZ53p6+/jcr1/mpodXcuHhNfzkyuOZWZKloCciIhOOevYk47TFevj4T59n2eoGPn76fK57x4Kk9qsVERHJJAp7klG27NjJP/1wOau3tvK1i9/GPxw3K+oqiYiIREphTzLGis07+Kcf/oX2WC/f/+CxnHZwVdRVEhERiZzCnmSEulX1fPynz1NaMIl7P3oih0wvjbpKIiIiaSHyBzTMbJGZPW5mHWa22cxuNLNhV7M1s8VmtjS8rtHMvm1me2x1YGYVZvYdM3vLzHaa2Uoz+8fR+TQShZ/9aSNX/mg5syuK+PU1JyvoiYiIxIm0Z8/MpgCPAa8AFwHzgVsIQugX9nJdGfB7YDVwGVAB3AxMB94dd14p8CTQBnwCaAQWAbmp/zQy1vr6nJsfWcUdy9Zx+oIqvnn5URRr3TwREZHdRP2b8aNAAXCxu7cAj4YBbYmZ3RyWDeaa8LoL3b0ZwMy2A78xs2PcfXl43ueAPOAYd98Zlj0xWh9Gxk5ndy/X3fsCD764hfcdP4svvmsxOdmRd1SLiIiknajD3rnAIwmh7m7gJuA04IEhrjsCWN4f9EJLAQfOB/rD3oeAb8QFPUlzO7t6qW/tpL41RkNrjPqWThraYtS3xHaVtcbY3h6jz+Fz5y3kqlPnaf08ERGRIUQd9hYSDMcOcPeNZtYRHhsq7OUDXQllPUAfcAiAmc0FqoFmM/sdcBawA/gJ8Fl3T7xexlBXTx9r6ltZsbmFFZt2sGJzC6u2ttLa2bPHudlZRlVxHlUleUwvy+fwmWVUFedxwrwKTjqwMoLai4iIjB9Rh70pQPMg5U3hsaGsBS43s0nu3h2WHQ1kA+Xh+2nh15sJegvfCRwOfJUgGH4m8aZmdjVwNUBVVRV1dXXJfBYZQqzH2djax+stfQNfN7X20ePB8bxsmFWSxbFVWZTnT6Isz5icZ+HXLIpzIcuMoNl6gHYAut7cQt2bI69HW1ub2jSDqD0zi9ozs6g900vUYQ+CoddENkR5v+8CnwS+aWZLCB7Q+BbQG75g15PGK9z9qvD735tZCfA5M1vi7h27VcT9TuBOgAULFnhtbW3yn0aAYE7dzQ+vYtnqel5r7MDD1iwvymVxzWTeeWQpi2vKWFxTytyKojHZ4aKurg61aeZQe2YWtWdmUXuml6jDXhMweZDyMgbv8QPA3VeGvXC3AR8hGL69kyAgbg1P2x5+TXwg4/fAFwme/H1pn2suQ9qyYydX//g5Xt68gzMWVHPh4TUDwW56Wb7m14mIiIyhqMPeSoK5eQPMbCZQFB4bkrt/38x+BhwE1BMsq7INuCs8ZR17zuuDoNcQgoAoKfbc60185CfP0dndy3fffwxnLZoadZVEREQmtKjXqngIOCccWu13GbATWDbcxe7e6e4vuftW4AqCz3NPeKwLeBQ4I+GyM4EOgnl/kkL3LH+D9975R4rysvn1NScp6ImIiKSBqHv27gCuBe4zs5uAecAS4Nb45VjMbC2wzN2vDN+XAp8nWDC5BzgduA64yt23x93/RuBpM/sB8HPgMOCzwJfcPTbKn23C6Ont46u/W8n3n1nPyQdW8D+XH8XkQq1bLSIikg4iDXvu3mRmZwK3Eyyz0kwwD29Jwqk5BE/a9usFjgSuIlhc+WXgUne/P+H+fzazC4H/BC4nGO79SvheUqC5o4tP/PyvPLWmkQ+eNIcvnH+IFjcWERFJI1H37OHur7DnUGviOXMS3rcDZ4/w/o8Aj+xr/WRoa+tb+fCPlrOpeSc3v+cw/v7YmVFXSURERBJEHvZkfHr81a188u6/kT8pi59fdQLHzCkf/iIREREZcwp7khR359vL1vFfj6xicU0pd77/GGomF0RdLRERERmCwp6M2M6uXv7tVy/yfy9s5oLDpvNflxxOQW728BeKiIhIZBT2ZFg9vX3c9/wmvvHYara0dPLpcxZwTe18LY4sIiIyDijsyZDcnYdffouvL13FuoZ2Dj+gjFv+/ghOnF8RddVERERkhBT2ZFBPr2nk5kdW8uKbOziwupg7rjiacxZPVW+eiIjIOKOwJ7t54Y1mbn5kJc+s3caMyQXcfMlhXHzkDK2dJyIiMk4p7AkQrJn39UdW8/CKtygvyuWGCxbxvhNmkZejBzBERETGM4W9CW5T806+8ehqfvX8mxTm5vAvZx3Eh0+dR3Ge/mqIiIhkAv1Gn6B6+5y7nnqNWx5dDQ4fOnku19TOp6I4L+qqiYiISAop7E1ArzW0cf29L/D8xmbOXjSV/3jXYmZoYWQREZGMpLA3gfT1OT98dgM3P7KSvJxsvnHZEVx0RI2esBUREclgCnsTxMZtHVz/yxf48/rtnLGwmv+8+G1MLc2PuloiIiIyyhT2Mlxfn/PTP73Ofz60kmwz/uuSw7jk6APUmyciIjJBKOxlsDebOvi3X73IM2u3cepBldz0nsOo0dw8ERGRCWXEYc/MXgW+D/zY3beOXpVkf7k7v/jLG3z5wVdxd776d2/jvcfNVG+eiIjIBJRMz141cBPwFTN7CPge8KC7945KzWSfvLWjk3/71YssW93AifMquPmSw5hZXhh1tURERCQiyYS96cC7gQ8B5wMXAA1m9mPgB+7+6ijUT5LQ09vH5Xf9kS3NnXzxXYt5/wmzycpSb56IiMhENuINT929y93vcfdzgVnAvwM7gOuBl83sWTO70syKR6muMoxfPf8mrzW0841/OIIPnDRHQU9ERERGHvbiuftmd/+quy8ATgV+ABwK3AlsMbMfmNkJKaynDCPW08t/P76Ww2dO5uxFU6OujoiIiKSJfQp7CXqBPsABC79+AHjGzB43sxkp+BkyjJ//aSObmndy/dkH60EMERERGbBPYc/MpprZp83sFeAZ4ErgT8A/ABXAYuAuoJagt09GUUdXD7c/sY7j55ZzyoGVUVdHRERE0kgyS69kAxcC/wS8M7x2E/BV4HvuviHu9FeBj5hZFkEAlFH0o2dfp7Etxh1XHKVePREREdlNMk/jbgKqCIZtf0fQc/c7d+/byzXrgKJ9r54Mp6WzmzuWraN2QRXHzCmPujoiIiKSZpIJe23A/0ewzMpbI7zmW8DPk66VjNj3nlrPjp3dXH/2gqirIiIiImloxGHP3Q9M9ubu3gK0JHudjExTexffe3o95x46jUNnlEVdHREREUlDI35Aw8yqzOztZlYyxPHS8LieEBgjdyxbR3tXD//6joOjroqIiIikqWSexr0B+A3BnL3B9ITHv7C/lZLh1bd08qM/bODvjpjBQVMHzd8iIiIiSYW9s4Gl7t4x2MGw/GHgnFRUTPbu9ifW0tPr/MtZ6tUTERGRoSUT9g4AXhvmnA3heTKK3tjewc//vJG/P3YmsyoKo66OiIiIpLFkwl4MGO4pgDKC3TRkFP3342swMz5xRtLPzIiIiMgEk0zYewG4yMwG7UoysyLgovA8GSXrGtr41fNvcsXxs5leVhB1dURERCTNJRP27gCmA0vN7Mj4A2Z2FLAUmEawtp6MktseXU3+pGyuOX1+1FURERGRcSCZdfZ+YWZvBz4GLDezJmAzUANMAQy43d3vHpWaCq9sbuG3L27h46fPp7I4L+rqiIiIyDiQTM8e7v5x4GLgUcCBhQRz9B4G3uXu16a8hjLg1kdXUZKfw9WnqldPRERERiaZ7dIAcPf7gftHoS6yF89vbOKxV+u5/uyDKSucFHV1REREZJxIqmdPonPL0lVUFOXyoZPnRl0VERERGUf2KeyZ2SwzOz7cHm2PV5L3WmRmj5tZh5ltNrMbzSx7BNctNrOl4XWNZvZtMyvey/nvNjM3s+XJ1C8dPLuukWfWbuNjtfMpyku6M1ZEREQmsKSSg5m9B/gaMG+YU4cNa+H9pgCPAa8QLNsyH7iFIIQOue2amZUBvwdWA5cBFcDNBE8Lv3uQ8/OBW4GtI6lXOnF3vv7IKqaV5nPFCbOjro6IiIiMMyMOe2Z2DnAPsAn4JnAtsAx4FTgROBx4EEim5+yjQOwovJIAACAASURBVAFwsbu3AI+aWSmwxMxuDssGc0143YXu3hzWbzvwGzM7xt0T6/DpsN7rgEOTqF/knlzTyPMbm/nK3x1K/qQRZWgRERGRAckM4/4b0Awc5e7/EpY94e7XuPuRBOHvLOChJO55LvBIQqi7myDInbaX644AlvcHvdBSgieEz48/0cxmAZ8BPplEvdLGQy9toSQ/h0uPnhl1VURERGQcSibsHQX82t0b48oGuprc/XbgaeBLSdxzIbAyvsDdNwId4bGh5ANdCWU9BMvAHJJQfgtwj7s/n0S90oK789SaRk6eX0lujp6lERERkeQlM2dvErvPeesEJiec81fg6iTuOYWgtzBRU3hsKGuBy81skrt3h2VHE4TP8v6TzOx04Bzg4JFUxsyuJqx/VVUVdXV1I7ls1Gxp62NT807OqumNvC6ZoK2tTX+OGUTtmVnUnplF7Zlekgl7bxLsltFvPXB8wjkHAd0kxwcpsyHK+32XYFj2m2a2hOABjW8BveELM8sB/hv4sru/NaKKuN8J3AmwYMECr62tHdknGCU/fGY98AofvuBkZpYPuiWxJKGuro6o21RSR+2ZWdSemUXtmV6SGRt8lt3D3QPAsWb2P2Z2npl9GbgwPG+kmtizdxCgjMF7/ABw95UEPXDvBbYALwJ/Bv7Grt7Hq8J7/8jMJpvZZCAXyA7fp/3KxE+taWRORaGCnoiIiOyzZHr2fgxMM7NZ4by6/wTOI9gr96MEvXGbgOuSuOdKEubmmdlMoIiEuXyJ3P37ZvYzgt7EeqAR2AbcFZ6yADgAGKxXrwl4P/C/SdR1THX19PGH17bxnqMOiLoqIiIiMo6NOOy5+xPAE3Hvd5jZMQTr480D3gAecPfWJH7+Q8Cnzawk7rrLgJ0Ey7oMV6dO4CUAM/sAQU/lPeHh29lzW7fPAnOBjxAsGZO2nt/YREdXL6ceVBl1VURERGQcS2advXcB29396f4yd+8C7t2Pn38HwZIt95nZTQShcQlwa/xyLGa2Fljm7leG70uBzwNPEjyFezpBj+JV7r49rNtaggc54j/DB4FKd6/bjzqPiafWNJCdZZw4vyLqqoiIiMg4lsww7n3A/xAsr5IS7t5kZmcS9MI9QDBP7zaCwBcvh9135egFjiSYl1cAvAxc6u6JPXnj1lNrGjlq1mRK8tN+aqGIiIiksWTC3iZGuA1aMtz9FeCMYc6Zk/C+HTh7H37WB5O9Jgrb27t4adMOPnXWiFaMERERERlSMk/j3guca2ZFo1UZCTyzthF3NF9PRERE9lsyYe/fgTXA783snWZWNUp1mvCeWtNAaX4Ohx0w2Ko0IiIiIiOXzDBuW/jVgAcBzGyw89zdk7mvxBnYIu3ASrKzBv3zFRERERmxZELZU+x9VwtJgXUNbWzZ0cknzlDHqYiIiOy/ZNbZqx3FekjoydWNgObriYiISGokM2dPxsDTaxuZW1mkLdJEREQkJRT20kisp5c/rNumXj0RERFJmWR20Pj9CE91dz9zH+szoT3/ejM7u3s59SDN1xMREZHUSOYBjdphjjvBk7p6iGMfPbWmgZws44R55VFXRURERDLEiIdx3T1rsBdQRrA37bPAr4DcUaprxgu2SJuiLdJEREQkZfZ7zp67t7r7MuCdwFEEiy9Lkra1xXh58w7N1xMREZGUStkDGuF+tQ8BH0rVPSeSZ9ZtC7ZIO1jz9URERCR1Uv00bg4wNcX3nBCeWt1AWcEk3jajLOqqiIiISAZJWdgzs5OAywn2z5Uk9G+Rdoq2SBMREZEUS8XSKzlADTCX4GncL6egXhPK2vo23mrp1Hw9ERERSblULL3iQDPwGPANd39ofys10Ty5Jtgi7RSFPREREUmxZPbG1W4bo+SpNQ3MqyrigCnaIk1ERERSSwEuYrGeXv742jberl0zREREZBSMOOyZWZWZvd3MSoY4Xhoe11hkEp7b0ERnd5/m64mIiMioSKZn7wbgN0DvEMd7wuNf2N9KTSRPrmlkUrZxwryKqKsiIiIiGSiZsHc2sNTdOwY7GJY/DJyTiopNFE+taeCoWVMoykvmWRkRERGRkUkm7B0AvDbMORvC82QEGttirNjcwtu1a4aIiIiMkmTCXgwYbnuHMqBv36szsTyzNlhyRfP1REREZLQkE/ZeAC4ys0HXBzGzIuCi8DwZgSdXNzKlcBKLa7RFmoiIiIyOZMLeHcB0YKmZHRl/wMyOApYC04Bvpa56mSvYIq2Bk7VFmoiIiIyiZBZV/oWZvR34GLDczJqAzQRbpU0h2Crtdne/e1RqmmFWb22jvjWm9fVERERkVCW1qLK7fxy4GHiUYJu0hQRz9B4G3uXu16a8hhnqqTUNgLZIExERkdGV9Hof7n4/cP8o1GVCeXJNI/OriqiZXBB1VURERCSDabu0CHR29/Kn17ZxqoZwRUREZJQls13ax8xsrZlNH+J4TXj8qtRVLzMt39BErKePtx+sIVwREREZXcn07F0BvOnuWwY76O6bgdeBf0xFxTLZU2samJRtHD9XW6SJiIjI6Eom7C0AXhzmnJfD82QvnlzTyNGztUWaiIiIjL5kwl4hMOi+uHF2AiX7Xp3M1xbr4dUtLZw0X0O4IiIiMvqSCXsbgFOGOecUYOM+12YCeGtHJwCzygfdiEREREQkpZIJe/cDJ5rZpwY7aGbXAScC96WiYpmqvjUIe9UleRHXRERERCaCZCaN3Qz8PfB1M/tH4HF27aBxJnAYsA74WqormUkaWmMAVJcq7ImIiMjoS2a7tGYzO4Vgj9wLgcPjDwO/Bq5x9x2prWJmqW8Jwl5VSX7ENREREZGJINnt0t5y93cDMwgC3xXABUCNu18CHGZmP03mnma2yMweN7MOM9tsZjeaWfYIrltsZkvD6xrN7NtmVhx3PNvM/s3MnjKzbeFrqZkdm0z9Uq2+tZO8nCxK8/UkroiIiIy+fdpBIwx9D7r7z4BVwD+b2evAI8A/jPQ+ZjYFeIygZ/Ai4EbgOuCLw1xXBvweKAAuA64H3gP8b9xpBcBngb8A7ycIpt3A02Z29EjrmGr1rTGqS/Mws6iqICIiIhPIPnUvmVkRwfy9DwEnh8XdBA9n/DCJW32UIJRd7O4twKNmVgosMbObw7LBXBNed6G7N4d12g78xsyOcfflBMvAzHP3prh6Pw6sBv45rPuYq2+JUa0hXBERERkjSfXsmVmtmf0QeAu4i2CpFQMeBaa7+6Xu/mAStzwXeCQh1N1NEORO28t1RwDL+4NeaClBD+H5AO7eGx/0wrIuYAVQnUQdU6q+tVNP4oqIiMiYGTbsmdkcM/sPM1tH8ATuPwKbgCXAgeFpGxOD1QgtBFbGF7j7RoLFmxfu5bp8oCuhrAfoAw4Z6iIzywOOBl7Zh7qmRH1rTGFPRERExsxeh3HDYc/TCELhVuC/gZ+Gw6T95+zPz58CNA9S3hQeG8pa4HIzm+Tu3WHZ0UA2UL6X6z4f3veuwQ6a2dXA1QBVVVXU1dXttfLJ6up1Wjt7aGvcTF1dY0rvLcNra2tLeZtKdNSemUXtmVnUnulluDl7pxP0lt0K/L+4YJVKPkiZDVHe77vAJ4FvmtkSoAL4FtAbvva8odn5BGHvOndfNWhF3O8E7gRYsGCB19bWjuwTjNDGbR3w6BMcf/gh1B4zM6X3luHV1dWR6jaV6Kg9M4vaM7OoPdPLcMO4TxEEr08B683s62Z2VAp/fhMweZDyMgbv8QPA3VcS9MC9F9gCvAj8GfgbQQ/kbsLlVn4BfMfdv7H/1d432j1DRERExtpew567n0YwL+8rBE/b/ivwFzN7xcw+Z2Zz9vPnryRhbp6ZzQSKSJjLN0jdvg9MJdi5o4bgCdsDgT8m3O9g4EGC+Yaf2M/67pf6/t0z9DSuiIiIjJFhH9Bw9/XufoO7zwXeAfwMmAV8mWB7NAdmh2vmJesh4BwzK4kru4xg2ZRlI6hbp7u/5O5bCdbRywLu6T9uZtMJ1v5bB7zX3Qcd4h0r9S1hz562ShMREZExkuwOGo+7+/uB6cBHCHrRjCAEbjGzX5rZBUnc8g4gBtxnZmeFD0gsAW6NX47FzNaa2ffi3pea2U1mdr6ZnWNmXyN46OJad98enlNAECanEATTw8zshPB1ZDKfO1XqW2PkZBnlhblR/HgRERGZgPZpUWV3byV4SOK7ZnYQwQLF7wcuBv6O4KnYkdynyczOBG4HHiCYp3cbQeBLrGf8PXuBI4GrCNbkexm41N3vjztnKrv27/1twv1eB+aMpI6pVN8ao7I4j6ws7Z4hIiIiY2O/N2h19zXA58zs88DZwAeTvP4V4IxhzpmT8L49/Fl7u2YDQa9j2ujfKk1ERERkrOx32Ovn7k4wP+6RVN0z09S3dHLAlIKoqyEiIiITSFJz9mT/NLbFqNKTuCIiIjKGFPbGSE9vH9vau6jSGnsiIiIyhhT2xkhjWxfuWlBZRERExpbC3hjR7hkiIiISBYW9MVLfEu6eUao5eyIiIjJ2FPbGyK6t0tSzJyIiImNHYW+M9A/jVhYr7ImIiMjYUdgbI/WtMcqLcsnN0R+5iIiIjB0ljzFS3xLTEK6IiIiMOYW9MdLQ2qk19kRERGTMKeyNkfrWGNXaPUNERETGmMLeGOjrcxpaY1SXqmdPRERExpbC3hho6uiip881Z09ERETGnMLeGNi1xp6GcUVERGRsKeyNgYGwp2FcERERGWMKe2OgvkX74oqIiEg0FPbGgIZxRUREJCoKe2OgoTVGSV4OBbnZUVdFREREJhiFvTFQ39pJlebriYiISAQU9saAtkoTERGRqCjsjQHtniEiIiJRUdgbZe5OfWunevZEREQkEgp7o6w11kNnd5/W2BMREZFIKOyNsvoWLbsiIiIi0VHYG2X1rVpQWURERKKjsDfKGrRVmoiIiERIYW+U9Q/jVmkYV0RERCKgsDfKGtpi5OVkUZqfE3VVREREZAJS2Btl9S2dVJfmYWZRV0VEREQmIIW9UaYFlUVERCRKCnujLAh7ejhDREREoqGwN8rqW7R7hoiIiERHYW8UdXb30tLZQ3WphnFFREQkGgp7o6h/jb0q9eyJiIhIRBT2RlH/7hkKeyIiIhIVhb1RtGtfXIU9ERERiUbkYc/MFpnZ42bWYWabzexGM8sewXWLzWxpeF2jmX3bzIoHOe8iM3vJzDrN7BUzu2x0Psme6vu3StPSKyIiIhKRSMOemU0BHgMcuAi4EbgO+OIw15UBvwcKgMuA64H3AP+bcN4pwK+AJ4BzgQeBn5vZ2Sn9IEOob+0kO8uoKModix8nIiIisoeo9/D6KEFgu9jdW4BHzawUWGJmN4dlg7kmvO5Cd28GMLPtwG/M7Bh3Xx6e9+/Ak+5+bfj+CTNbDNwALB2lzzSgviVGZXEuWVnaPUNERESiEfUw7rnAIwmh7m6CIHfaXq47AljeH/RCSwl6CM8HMLM84HTgnoRr7wZODHsHR5V2zxAREZGoRR32FgIr4wvcfSPQER4bSj7QlVDWA/QBh4Tv5wOTEu8PvErwuQ/etyqPnHbPEBERkahFHfamAM2DlDeFx4ayFjjczCbFlR0NZAPlcfdmkPs3JRwfNQ2tnVSXKuyJiIhIdKKeswfB0GsiG6K833eBTwLfNLMlQAXwLaA3fO3t/jZEOWZ2NXA1QFVVFXV1dcNUfWi9fc62ti46tr1FXd32fb6PpE5bW9t+tamkF7VnZlF7Zha1Z3qJOuw1AZMHKS9j8B4/ANx9ZRjMbgM+QjB8eydBgNsad28GuX//+z3u7+53hvdhwYIFXltbO6IPMZi3dnTiSx/n2LctoPaE2ft8H0mduro69qdNJb2oPTOL2jOzqD3TS9TDuCtJmJtnZjOBIvaca7cbd/8+MBU4DKgB/hk4EPhjeMo6oDvx/uH7PmD1ftZ9r/p3z9CcPREREYlS1GHvIeAcMyuJK7sM2AksG+5id+9095fcfStwBcHnuSc8FiNYX+/ShMsuA/7g7jtSUP8hDeyeUaqncUVERCQ6UQ/j3gFcC9xnZjcB84AlwK3xy7GY2VpgmbtfGb4vBT4PPEnwFO7pBIsxX+Xu8RPkvgTUmdk3gPuB88LXO0f5c8XtnqGePREREYlOpGHP3ZvM7EzgduABgnl0txEEvng5BE/a9usFjgSuIliT72XgUne/P+H+T5vZJcCXgY8B64HL3X30F1QOh3ErixX2REREJDpR9+zh7q8AZwxzzpyE9+3AiLY8CwPg/cOemGL1rTHKi3LJzYl6pFxEREQmMiWRUVLfogWVRUREJHoKe6OkobWTKoU9ERERiZjC3ijRvrgiIiKSDhT2RkFfn9PQGtNWaSIiIhI5hb1R0Lyzm54+15w9ERERiZzC3ijYtXuGhnFFREQkWgp7o2DX7hnq2RMREZFoKeyNAu2eISIiIulCYW8UaBhXRERE0oXC3iiob4lRkpdDQW728CeLiIiIjCKFvVHQ0BqjSvP1REREJA0o7I2C+tZOzdcTERGRtKCwNwq0e4aIiIikC4W9FHN36lti6tkTERGRtKCwl2JtsR52dvdqjT0RERFJCwp7KbZrjT0N44qIiEj0FPZSbGD3DA3jioiISBpQ2EuxgQWVNYwrIiIiaUBhL8UawmHcqmIN44qIiEj0FPZSrL41Rm5OFqUFOVFXRURERERhL9XqW4IFlc0s6qqIiIiIKOylWrCgsubriYiISHpQ2Esx7Z4hIiIi6URhL8XqWzr1JK6IiIikDYW9FOrs7qWls0fDuCIiIpI2FPZSqEG7Z4iIiEiaUdhLof4Flas0jCsiIiJpQmEvhbRVmoiIiKQbhb0UqtcwroiIiKQZhb0Uqm/tJDvLqCjKjboqIiIiIoDCXko1tMaoLM4lK0u7Z4iIiEh6UNhLIS2oLCIiIulGYS+F6lu0VZqIiIikF4W9FKpvjWn3DBEREUkrCnsp0tPbx7b2GFUaxhUREZE0orCXItvau3DXGnsiIiKSXhT2UkQLKouIiEg6ijzsmdkiM3vczDrMbLOZ3Whm2SO47hgzW2pm28xsu5k9ZmbHJ5yTa2Y3mNlaM9sZfv2imaU8kfVvlVZdqmFcERERSR+Rhj0zmwI8BjhwEXAjcB3wxWGumxlelwP8I/D+8PulZjY77tSvAZ8FvgWcB3wb+Axwc0o/CPG7Z6hnT0RERNJHTsQ//6NAAXCxu7cAj5pZKbDEzG4OywZzPlASXtcMYGbPAo3sCnUAlwPfdvdbw/dPmNkM4H3AJ1P5QfqHcSuLFfZEREQkfUQ9jHsu8EhCqLubIACetpfrJgE9QFtcWVtYZgnn7Ui4tjnhnJSob+2kvCiX3Jyo/0hFREREdok6mSwEVsYXuPtGoCM8NpRfhefcYmbVZlYN3AY0AffGnXcX8BEzO9nMis3sVOBjwO0p/AxA/+4Z6tUTERGR9BL1MO4Ugp62RE3hsUG5+2YzOx34LXBtWLwFOMfdG+JO/SxBL+HTcWXfcvcb96vWg6hvjVGlsCciIiJpJuqwB8HDGYlsiPLgoNl04JfAc8CHw+KPAw+a2Ulh7yDAp4ErgE8ALwKHA18ys23ufsMg970auBqgqqqKurq6EX+INxo6OKQ8O6lrZGy1tbWpfTKI2jOzqD0zi9ozvUQd9pqAyYOUlzF4j1+/TxPU/RJ37wYws98Da4DrgWvNrBL4MvBxd/9ueN2TZtYF3G5mt7t7ffxN3f1O4E6ABQsWeG1t7Yg+hLvT+uhDHHbwbGpr9zb6LFGqq6tjpG0q6U/tmVnUnplF7Zleop6zt5KEuXnhsipFJMzlS7AQWNEf9ADcvQtYAcwPi+YRPKDxt4Rr/0oQFGeTIk0d3XT3uubsiYiISNqJOuw9BJxjZiVxZZcBO4Fle7nudeBQM8vtLwgXSj4U2BB3DsBRCdceHX7dQIoMLKisfXFFREQkzUQd9u4AYsB9ZnZWOGduCXBr/HIs4c4X34u77i6gBvi1mZ1vZhcA9wPTCYdh3X1rWHaTmX3SzE43s08RLLR8b8KDHPtlYKu0UvXsiYiISHqJdM6euzeZ2ZkES6E8QDBP7zaCwBcvB8iOu+45M3sn8B/AT8Lil4B3uPsLcdd9ALiB4IndGmAT8B3gS6n8HNo9Q0RERNJV1A9o4O6vAGcMc86cQcoeBx4f5roWggc2rk+2Xl190NgWo7wwl6ysva/BrGFcERERSVeRh710tbmtj2O+/BjZWUZlcS7VJflUl+RRVZIXfC3Np6o4j+rSPNbVt1OSl0NBbvbwNxYREREZQwp7Q6gqML74rsXUt3bS0BqjvjXGlh2dvLhpB9vaYvQlrAJ4YHVxNBUVERER2QuFvSEUTTI+cNKcQY/19jnb2mPUt8RoaA1eB01V2BMREZH0o7C3D7KzLBzW1Rw9ERERSW9RL70iIiIiIqNIYU9EREQkgynsiYiIiGQwhT0RERGRDKawJyIiIpLBFPZEREREMpjCnoiIiEgGU9gTERERyWAKeyIiIiIZTGFPREREJIMp7ImIiIhkMIU9ERERkQymsCciIiKSwRT2RERERDKYuXvUdUhLZtYKrIq6HpJSlUBj1JWQlFF7Zha1Z2ZRe4692e5eNdiBnLGuyTiyyt2PiboSkjpmtlxtmjnUnplF7ZlZ1J7pRcO4IiIiIhlMYU9EREQkgynsDe3OqCsgKac2zSxqz8yi9swsas80ogc0RERERDKYevZEREREMpjCXgIzW2Rmj5tZh5ltNrMbzSw76nrJ8MzsQDP7jpm9YGa9ZlY3yDlmZp8zszfMbKeZPWlmR0RQXRmGmV1qZv9nZpvMrM3MnjOz9yaco/YcJ8zsEjN71sy2mVmnma0ysy+YWW7cOWrPccrMZoT/nbqZFceVq03TgMJeHDObAjwGOHARcCNwHfDFKOslI7YYOA9YHb4G81ng34GbgAuBNuAxM5s2JjWUZPwrQft8CngX8ATwMzP7RNw5as/xo4KgDT8MnAt8H/g8cGvcOWrP8eu/CNorkdo0DWjOXhwz+3/AZwgWJmwJyz4DLAGm9ZdJejKzLHfvC7//JVDp7rVxx/OBrcAt7n5jWFYEbAC+4+5fGPNKy5DMrNLdGxPKfgac6O5z1Z7jn5l9Bfg4MAXIQ+05LpnZqcBvgK8ShL4Sd2/Tf6PpQz17uzsXeCQh1N0NFACnRVMlGan+oLcXJwGlwD1x17QDDxC0vaSRxKAX+itQHX6v9hz/tgH9w7hqz3EonOb0TYKRsMT/ZtWmaUJhb3cLgZXxBe6+EegIj8n4thDoBdYklL+K2ne8OAl4Jfxe7TkOmVm2mRWa2SnAtcC3PRhiUnuOTx8F8oH/GeSY2jRNaLu03U0BmgcpbwqPyfg2BWhz996E8iag0Mxy3b0rgnrJCJjZmQRzaf8pLFJ7jk/tBEO2AD8GPh1+r/YcZ8ysAvgScIW7d5tZ4ilq0zShnr09DTaJ0YYol/FnqPYd6pikATObA/wM+I27/zDukNpz/DkJOJXg4beLgNvjjqk9x5evAH9y99/t5Ry1aRpQz97umoDJg5SXMXiPn4wvTUCJmWUn/EtzMtDh7t0R1Uv2wszKgYeAjcAVcYfUnuOQuz8ffvu0mTUCPzKzW1B7jitmtpigl/3tZtb/e7Mw/FpmZr2oTdOGevZ2t5KEeQRmNhMoImEun4xLK4Fs4MCE8j3makp6MLNC4LcEk/jPDyd391N7jn/9wW8uas/x5iBgEvAHglDXxK55e28SPLShNk0TCnu7ewg4x8xK4souA3YCy6KpkqTQs0ALcGl/QRgmLiRoe0kjZpYD3EvwS+Vcd69POEXtOf6dHH5dj9pzvHkaOD3hdVN47DyCJVjUpmlCw7i7u4Pg6bD7zOwmYB7BGnu3ao299Bf+T+S88O0MoNTMLgnf/879/2/v/mOtrus4jj9fSv6AdZdIMo0mzM1QS3FalqTmKmuUYs0idK5bWTlSsV8zU4NulDRtwjC0zFlrjdIYLAuqVUBpSTJJzYUb6tWFegFBIq9oXN798fkc9t3xey7nnPvr3LPXY/vu7Hy+n+/n+z7nu8H7fn6d6JW0ELhB0k7SX5ZfIv3Rs2TYA7YDWUp6nnOB8ZLeWTi3MSL2+HmOHpJ+S9q0/jHSCs3ppHl7v4iIJ3IdP89RIm+NtLZYlufWAvwlIv6by/xMW4CTvYKI2JlX/N1K2gfoReAWUsJnre8oUk9QUeX9FNJGngtJ/9BcS9rRfwPw/ojoGaYYrX7n5dfFJef8PEefB4FOYDKwF3iS9NxuL9Tx82w/fqYtwL+gYWZmZtbGPGfPzMzMrI052TMzMzNrY072zMzMzNqYkz0zMzOzNuZkz8zMzKyNOdkzMzMza2NO9szMBpGkTkkhqXOkYzEzAyd7ZjbCJE3OyVFIWlajzhWtlEBJ6pbUPdJx1GM0xWpmQ8PJnpm1klmSpo10EAO0Ajghv5qZjTgne2bWKp4EBHx7pAMZiIjYFRGbImLXSMdiZgZO9sysdWwEVgIzJL17sBqVdLCkOZI2SHpJ0m5Ja/LvYFfXnSTpNklPSNoj6QVJGyV15fOTJQVwLHBsYfh5/xBz2Zy9wlD1jyWdKGmVpF2Stku6Q9K4XO98SX+X1Ctpi6TrSmI8RlJXrrc9x7lJ0nxJh1bfs79Yc70OSQslbZb0iqStkn4u6S0l916brx8r6WZJz0jqk3RhPj9V0s8kPZ3b2ibpAUlXNvn4zGwQjBnpAMzMCq4HLgBuBM4aaGOSBNwNfBR4BLgTOBSYCfxe0uyIuDvXHQfcDxwN/Dpf1wFMBT4HfAN4EfgmcHW+xaLC7f5RR0hT8j0eAO4AzgYuAzokLQfuIg3//hX4CLBA0rMRcVehjbPz/f8A3AccnMvmAacB5+d6B4xV0lhgHTAt3/MeUnL4MeCDkt4TEWWfawVwPOl76gN2SJoErCf9v7ISeAYYD7wNuBRYUsf3Y2ZDISJ89mjsogAABKhJREFU+PDhY8QOYDIQwC/z+5/k9zMKda7IZZ0Ntn15vu4W4KBC+ZGkYePtwOG57IJcd25JO0dWve8Gumvcs7M61sJnDGBOoXwMqUdzH7AVOLVw7k3AHuCfVe2/ERhXct8f5PbPaiDWrnzN96vKZ+TyB6vK11bKgY6qc1flczMP9P358OFjeA8P45pZq5kHvAp8J/fMDcQXSAndVyNiX6UwIl4AvkdK+t5Xdc1L1Y3k+oNhM3Bbod29wHLSXMV7I2Jj4dwWUs/dCZLGFMq3RcRrYgRuz6+vGZ7ux6VAL6nXcr+IWAWsAU6XdGLJdfMj4j812hzK78/MmuBkz8xaSkR0Az8ETgE+0Ww7eYjyJGA3cH2e07b/AM7MVStz09YBzwNLJS2T9ElJU5q9fw2PRkRUlT2XXx8uqf886d/picVCSRfn+XM7Je3Lc/MeyqePricQSR2kHsfHaiRj6/LrKSXnNpSU3UtKHFdKulPSLEl1xWJmQ8tz9sysFS0APgV0SbqnyTaOIPWYTSH1FtYyDtIqWkln5nt/mJxoSnoUuCYiVjcZR1FZb1hfP+f25tfXVQokfZ20YrmHlGD9m9QT+gZgLmlOYj068mtPjfM9VfWKtlYXRMRT+fvrAmYDn87x3g98OSLW1xmXmQ0yJ3tm1nIiokfSIuA64DNNNlNJntZGxLl13vcp4JI8bHoaKem7itRbNS0i/tVkLIMix/U14Fng5GKPnKQzSMlevSrfz8Qa5ydW1duvpHeyUv4wMFPSYcAZwIXAHGC1pOMjYnsD8ZnZIPEwrpm1qpuAHaT5ZIc3enFE7AY2ASfnId1Grt0bEesj4gbgGuAQ4LxClT7SKtjhNgF4PfC3kqHXd9W4pjTWPOeuGzhJ0viS6yqroetZZVzd9p6IWBcRXyQtjjkCmN5oO2Y2OJzsmVlLirQp8XeBY4DPN9nMraTtPxZLOqT6pKR3VBJBSW+V9OaSNio9XK8UynYAE4r72g2TbaQVuqfm3jMAJB0HXFvjmv5i/SkwFphfLJT0AdJCjw319mZKerukCSWnyr4/MxtGHsY1s1a2hDQ0eVyT1y8l9VBdBrxX0hpSwjSJNEw7lbSgoZe0KvdmSX8GHgd2kRZ4zAC2kPbdq1gDnA78Js9J+x/wq4h4pMk46xIRfZJ+RNqK5iFJq4CjSPsG/pG0N1+1/mJdSNqX70qln6m7j7TP3sdJw7efbSC8S4DL83e8GXg53/dc0uKTPzX4cc1skDjZM7OWFREvS/oWhe1KGrw+JM0GVpPm/l1EWsDwHGmT5RtJW7MA/I60mOMcYBZwGGnxw2LgpojYUWh6AWnblg+RkpmDct0hTfayr5AS0YtJSV83afPkFZQnezVjjYheSeeQ5kZeVGh7OTAvIh5vIK5lpF7C6fkQ8HSObVFEvNrQpzSzQaMa82zNzMzMrA14zp6ZmZlZG3OyZ2ZmZtbGPGfPzEYVSVeTNhDuV0TMH/pozMxan+fsmdmoIqmbtGK0XxEx0N/VNTNrC072zMzMzNqY5+yZmZmZtTEne2ZmZmZtzMmemZmZWRtzsmdmZmbWxpzsmZmZmbUxJ3tmZmZmbez/92sHC/+J/1AAAAAASUVORK5CYII=\n", 1124 | "text/plain": [ 1125 | "
" 1126 | ] 1127 | }, 1128 | "metadata": { 1129 | "needs_background": "light" 1130 | }, 1131 | "output_type": "display_data" 1132 | } 1133 | ], 1134 | "source": [ 1135 | "import matplotlib.pyplot as plt\n", 1136 | "accuracy=[]\n", 1137 | "for i in range(1,50):\n", 1138 | " RFC = RandomForestClassifier(n_estimators=i, random_state=0)\n", 1139 | " RFC.fit(X_train, y_train)\n", 1140 | " y_pred = RFC.predict(X_test)\n", 1141 | " accuracy.append(accuracy_score(y_test, y_pred))\n", 1142 | "plt.figure(figsize=(10,6))\n", 1143 | "plt.plot(accuracy)\n", 1144 | "plt.ylabel('Accuracy',fontsize=19)\n", 1145 | "plt.xlabel('N_estimators',fontsize=19)\n", 1146 | "plt.xticks(fontsize=15)\n", 1147 | "plt.yticks(fontsize=15)\n", 1148 | "plt.ylim([0.87,1])\n", 1149 | "plt.xlim([0,48])\n", 1150 | "plt.grid()" 1151 | ] 1152 | }, 1153 | { 1154 | "cell_type": "code", 1155 | "execution_count": 57, 1156 | "id": "9edccc96", 1157 | "metadata": {}, 1158 | "outputs": [ 1159 | { 1160 | "name": "stdout", 1161 | "output_type": "stream", 1162 | "text": [ 1163 | " precision recall f1-score support\n", 1164 | "\n", 1165 | " 0 0.94 0.98 0.96 518\n", 1166 | " 1 0.97 1.00 0.98 454\n", 1167 | " 2 0.97 0.99 0.98 474\n", 1168 | " 3 0.98 0.98 0.98 451\n", 1169 | " 4 0.99 0.99 0.99 446\n", 1170 | " 5 0.99 0.98 0.99 459\n", 1171 | " 6 0.98 0.97 0.97 500\n", 1172 | " 7 1.00 0.99 0.99 399\n", 1173 | " 8 0.97 0.90 0.94 378\n", 1174 | " 9 0.96 0.94 0.95 397\n", 1175 | "\n", 1176 | " accuracy 0.97 4476\n", 1177 | " macro avg 0.97 0.97 0.97 4476\n", 1178 | "weighted avg 0.97 0.97 0.97 4476\n", 1179 | "\n", 1180 | "Precision is 0.9738\n", 1181 | "Recall is 0.9736\n", 1182 | "f1 score is 0.9735\n", 1183 | "accuracy score is 0.9736\n" 1184 | ] 1185 | } 1186 | ], 1187 | "source": [ 1188 | " from sklearn.metrics import confusion_matrix, f1_score, recall_score, precision_score, classification_report, accuracy_score\n", 1189 | " f1 = f1_score(y_test, y_pred, average='weighted')\n", 1190 | " precision = precision_score(y_test, y_pred, average='weighted')\n", 1191 | " recall = recall_score(y_test, y_pred, average='weighted')\n", 1192 | " acc = accuracy_score(y_test, y_pred)\n", 1193 | " print(classification_report(y_test, y_pred))\n", 1194 | " print(\"Precision is \", round(precision,4))\n", 1195 | " print(\"Recall is \", round(recall,4))\n", 1196 | " print(\"f1 score is \", round(f1,4))\n", 1197 | " print(\"accuracy score is \", round(acc,4))" 1198 | ] 1199 | } 1200 | ], 1201 | "metadata": { 1202 | "kernelspec": { 1203 | "display_name": "Python 3", 1204 | "language": "python", 1205 | "name": "python3" 1206 | }, 1207 | "language_info": { 1208 | "codemirror_mode": { 1209 | "name": "ipython", 1210 | "version": 3 1211 | }, 1212 | "file_extension": ".py", 1213 | "mimetype": "text/x-python", 1214 | "name": "python", 1215 | "nbconvert_exporter": "python", 1216 | "pygments_lexer": "ipython3", 1217 | "version": "3.6.6" 1218 | } 1219 | }, 1220 | "nbformat": 4, 1221 | "nbformat_minor": 5 1222 | } 1223 | -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | opencv==4.5.5 2 | numpy==1.19.3 3 | pandas==1.1.5 4 | torch==1.8.1 5 | dlib==19.23.0 6 | joblib==1.0.1 7 | pygame==2.1.2 8 | imutils==0.5.4 9 | scipy==1.5.4 10 | pillow==8.2.0 11 | pycocotools==2.0.0 -------------------------------------------------------------------------------- /sound/eyes.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/sound/eyes.mp3 -------------------------------------------------------------------------------- /sound/yawn.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dou-noki/Driver-detection-based-on-OpenPose-and-RandomForest/11d31a55932e2c8d7bc1ec5c23a86de7527b3fa8/sound/yawn.mp3 -------------------------------------------------------------------------------- /val.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import cv2 3 | import json 4 | import math 5 | import numpy as np 6 | from pycocotools.coco import COCO 7 | from pycocotools.cocoeval import COCOeval 8 | 9 | import torch 10 | 11 | from datasets.coco import CocoValDataset 12 | from models.with_mobilenet import PoseEstimationWithMobileNet 13 | from modules.keypoints import extract_keypoints, group_keypoints 14 | from modules.load_state import load_state 15 | 16 | 17 | def run_coco_eval(gt_file_path, dt_file_path): 18 | annotation_type = 'keypoints' 19 | print('Running test for {} results.'.format(annotation_type)) 20 | 21 | coco_gt = COCO(gt_file_path) 22 | coco_dt = coco_gt.loadRes(dt_file_path) 23 | 24 | result = COCOeval(coco_gt, coco_dt, annotation_type) 25 | result.evaluate() 26 | result.accumulate() 27 | result.summarize() 28 | 29 | 30 | def normalize(img, img_mean, img_scale): 31 | img = np.array(img, dtype=np.float32) 32 | img = (img - img_mean) * img_scale 33 | return img 34 | 35 | 36 | def pad_width(img, stride, pad_value, min_dims): 37 | h, w, _ = img.shape 38 | h = min(min_dims[0], h) 39 | min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride 40 | min_dims[1] = max(min_dims[1], w) 41 | min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride 42 | pad = [] 43 | pad.append(int(math.floor((min_dims[0] - h) / 2.0))) 44 | pad.append(int(math.floor((min_dims[1] - w) / 2.0))) 45 | pad.append(int(min_dims[0] - h - pad[0])) 46 | pad.append(int(min_dims[1] - w - pad[1])) 47 | padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3], 48 | cv2.BORDER_CONSTANT, value=pad_value) 49 | return padded_img, pad 50 | 51 | 52 | def convert_to_coco_format(pose_entries, all_keypoints): 53 | coco_keypoints = [] 54 | scores = [] 55 | for n in range(len(pose_entries)): 56 | if len(pose_entries[n]) == 0: 57 | continue 58 | keypoints = [0] * 17 * 3 59 | to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3] 60 | person_score = pose_entries[n][-2] 61 | position_id = -1 62 | for keypoint_id in pose_entries[n][:-2]: 63 | position_id += 1 64 | if position_id == 1: # no 'neck' in COCO 65 | continue 66 | 67 | cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found 68 | if keypoint_id != -1: 69 | cx, cy, score = all_keypoints[int(keypoint_id), 0:3] 70 | cx = cx + 0.5 71 | cy = cy + 0.5 72 | visibility = 1 73 | keypoints[to_coco_map[position_id] * 3 + 0] = cx 74 | keypoints[to_coco_map[position_id] * 3 + 1] = cy 75 | keypoints[to_coco_map[position_id] * 3 + 2] = visibility 76 | coco_keypoints.append(keypoints) 77 | scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck' 78 | return coco_keypoints, scores 79 | 80 | 81 | def infer(net, img, scales, base_height, stride, pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256): 82 | normed_img = normalize(img, img_mean, img_scale) 83 | height, width, _ = normed_img.shape 84 | scales_ratios = [scale * base_height / float(height) for scale in scales] 85 | avg_heatmaps = np.zeros((height, width, 19), dtype=np.float32) 86 | avg_pafs = np.zeros((height, width, 38), dtype=np.float32) 87 | 88 | for ratio in scales_ratios: 89 | scaled_img = cv2.resize(normed_img, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC) 90 | min_dims = [base_height, max(scaled_img.shape[1], base_height)] 91 | padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims) 92 | 93 | tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float().cuda() 94 | stages_output = net(tensor_img) 95 | 96 | stage2_heatmaps = stages_output[-2] 97 | heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0)) 98 | heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) 99 | heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :] 100 | heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC) 101 | avg_heatmaps = avg_heatmaps + heatmaps / len(scales_ratios) 102 | 103 | stage2_pafs = stages_output[-1] 104 | pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0)) 105 | pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) 106 | pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :] 107 | pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC) 108 | avg_pafs = avg_pafs + pafs / len(scales_ratios) 109 | 110 | return avg_heatmaps, avg_pafs 111 | 112 | 113 | def evaluate(labels, output_name, images_folder, net, multiscale=False, visualize=False): 114 | net = net.cuda().eval() 115 | base_height = 368 116 | scales = [1] 117 | if multiscale: 118 | scales = [0.5, 1.0, 1.5, 2.0] 119 | stride = 8 120 | 121 | dataset = CocoValDataset(labels, images_folder) 122 | coco_result = [] 123 | for sample in dataset: 124 | file_name = sample['file_name'] 125 | img = sample['img'] 126 | 127 | avg_heatmaps, avg_pafs = infer(net, img, scales, base_height, stride) 128 | 129 | total_keypoints_num = 0 130 | all_keypoints_by_type = [] 131 | for kpt_idx in range(18): # 19th for bg 132 | total_keypoints_num += extract_keypoints(avg_heatmaps[:, :, kpt_idx], all_keypoints_by_type, total_keypoints_num) 133 | 134 | pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, avg_pafs) 135 | 136 | coco_keypoints, scores = convert_to_coco_format(pose_entries, all_keypoints) 137 | 138 | image_id = int(file_name[0:file_name.rfind('.')]) 139 | for idx in range(len(coco_keypoints)): 140 | coco_result.append({ 141 | 'image_id': image_id, 142 | 'category_id': 1, # person 143 | 'keypoints': coco_keypoints[idx], 144 | 'score': scores[idx] 145 | }) 146 | 147 | if visualize: 148 | for keypoints in coco_keypoints: 149 | for idx in range(len(keypoints) // 3): 150 | cv2.circle(img, (int(keypoints[idx * 3]), int(keypoints[idx * 3 + 1])), 151 | 3, (255, 0, 255), -1) 152 | cv2.imshow('keypoints', img) 153 | key = cv2.waitKey() 154 | if key == 27: # esc 155 | return 156 | 157 | with open(output_name, 'w') as f: 158 | json.dump(coco_result, f, indent=4) 159 | 160 | run_coco_eval(labels, output_name) --------------------------------------------------------------------------------