├── utils ├── __init__.py └── cvfpscalc.py ├── README.md ├── sample_facedetection.py ├── sample_objectron.py ├── sample_facemesh.py ├── LICENSE ├── sample_hand.py ├── sample_pose.py └── sample_holistic.py /utils/__init__.py: -------------------------------------------------------------------------------- 1 | from utils.cvfpscalc import CvFpsCalc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mediapipe-python 2 | Execution method 3 | 4 | ## hand pose 5 | YouTube:https://youtu.be/itG2aLR0qOU 6 | ## facemesh 7 | YouTube:https://youtu.be/MbSGLWelu0g 8 | ## holistic 9 | YouTube:https://youtu.be/Zs_rQl0te00 10 | ## objectron 11 | YouTube:https://youtu.be/YgwTU2s_Rg8 12 | 13 | ## Environment 14 | pip install opencv-python 15 | pip install mediapipe 16 | -------------------------------------------------------------------------------- /utils/cvfpscalc.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import cv2 as cv 3 | 4 | 5 | class CvFpsCalc(object): 6 | def __init__(self, buffer_len=1): 7 | self._start_tick = cv.getTickCount() 8 | self._freq = 1000.0 / cv.getTickFrequency() 9 | self._difftimes = deque(maxlen=buffer_len) 10 | 11 | def get(self): 12 | current_tick = cv.getTickCount() 13 | different_time = (current_tick - self._start_tick) * self._freq 14 | self._start_tick = current_tick 15 | 16 | self._difftimes.append(different_time) 17 | 18 | fps = 1000.0 / (sum(self._difftimes) / len(self._difftimes)) 19 | fps_rounded = round(fps, 2) 20 | 21 | return fps_rounded 22 | -------------------------------------------------------------------------------- /sample_facedetection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument("--min_detection_confidence", 21 | help='min_detection_confidence', 22 | type=float, 23 | default=0.7) 24 | 25 | parser.add_argument('--use_brect', action='store_true') 26 | 27 | args = parser.parse_args() 28 | 29 | return args 30 | 31 | 32 | def main(): 33 | # 引数解析 ################################################################# 34 | args = get_args() 35 | 36 | cap_device = args.device 37 | cap_width = args.width 38 | cap_height = args.height 39 | 40 | min_detection_confidence = args.min_detection_confidence 41 | 42 | use_brect = args.use_brect 43 | 44 | # カメラ準備 ############################################################### 45 | cap = cv.VideoCapture(cap_device) 46 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 47 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 48 | 49 | # モデルロード ############################################################# 50 | mp_face_detection = mp.solutions.face_detection 51 | face_detection = mp_face_detection.FaceDetection( 52 | min_detection_confidence=min_detection_confidence) 53 | 54 | # FPS計測モジュール ######################################################## 55 | cvFpsCalc = CvFpsCalc(buffer_len=10) 56 | 57 | while True: 58 | display_fps = cvFpsCalc.get() 59 | 60 | # カメラキャプチャ ##################################################### 61 | ret, image = cap.read() 62 | if not ret: 63 | break 64 | image = cv.flip(image, 1) # ミラー表示 65 | debug_image = copy.deepcopy(image) 66 | 67 | # 検出実施 ############################################################# 68 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 69 | results = face_detection.process(image) 70 | 71 | # 描画 ################################################################ 72 | if results.detections is not None: 73 | for detection in results.detections: 74 | # 描画 75 | debug_image = draw_detection(debug_image, detection) 76 | 77 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 78 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 79 | 80 | # キー処理(ESC:終了) ################################################# 81 | key = cv.waitKey(1) 82 | if key == 27: # ESC 83 | break 84 | 85 | # 画面反映 ############################################################# 86 | cv.imshow('MediaPipe Face Detection Demo', debug_image) 87 | 88 | cap.release() 89 | cv.destroyAllWindows() 90 | 91 | 92 | def draw_detection(image, detection): 93 | image_width, image_height = image.shape[1], image.shape[0] 94 | 95 | print(detection) 96 | print(detection.location_data.relative_keypoints[0]) 97 | print(detection.location_data.relative_keypoints[1]) 98 | print(detection.location_data.relative_keypoints[2]) 99 | print(detection.location_data.relative_keypoints[3]) 100 | print(detection.location_data.relative_keypoints[4]) 101 | print(detection.location_data.relative_keypoints[5]) 102 | 103 | # バウンディングボックス 104 | bbox = detection.location_data.relative_bounding_box 105 | bbox.xmin = int(bbox.xmin * image_width) 106 | bbox.ymin = int(bbox.ymin * image_height) 107 | bbox.width = int(bbox.width * image_width) 108 | bbox.height = int(bbox.height * image_height) 109 | 110 | cv.rectangle(image, (int(bbox.xmin), int(bbox.ymin)), 111 | (int(bbox.xmin + bbox.width), int(bbox.ymin + bbox.height)), 112 | (0, 255, 0), 2) 113 | 114 | # スコア・ラベルID 115 | cv.putText( 116 | image, 117 | str(detection.label_id[0]) + ":" + str(round(detection.score[0], 3)), 118 | (int(bbox.xmin), int(bbox.ymin) - 20), cv.FONT_HERSHEY_SIMPLEX, 1.0, 119 | (0, 255, 0), 2, cv.LINE_AA) 120 | 121 | # キーポイント:右目 122 | keypoint0 = detection.location_data.relative_keypoints[0] 123 | keypoint0.x = int(keypoint0.x * image_width) 124 | keypoint0.y = int(keypoint0.y * image_height) 125 | 126 | cv.circle(image, (int(keypoint0.x), int(keypoint0.y)), 5, (0, 255, 0), 2) 127 | 128 | # キーポイント:左目 129 | keypoint1 = detection.location_data.relative_keypoints[1] 130 | keypoint1.x = int(keypoint1.x * image_width) 131 | keypoint1.y = int(keypoint1.y * image_height) 132 | 133 | cv.circle(image, (int(keypoint1.x), int(keypoint1.y)), 5, (0, 255, 0), 2) 134 | 135 | # キーポイント:鼻 136 | keypoint2 = detection.location_data.relative_keypoints[2] 137 | keypoint2.x = int(keypoint2.x * image_width) 138 | keypoint2.y = int(keypoint2.y * image_height) 139 | 140 | cv.circle(image, (int(keypoint2.x), int(keypoint2.y)), 5, (0, 255, 0), 2) 141 | 142 | # キーポイント:口 143 | keypoint3 = detection.location_data.relative_keypoints[3] 144 | keypoint3.x = int(keypoint3.x * image_width) 145 | keypoint3.y = int(keypoint3.y * image_height) 146 | 147 | cv.circle(image, (int(keypoint3.x), int(keypoint3.y)), 5, (0, 255, 0), 2) 148 | 149 | # キーポイント:右耳 150 | keypoint4 = detection.location_data.relative_keypoints[4] 151 | keypoint4.x = int(keypoint4.x * image_width) 152 | keypoint4.y = int(keypoint4.y * image_height) 153 | 154 | cv.circle(image, (int(keypoint4.x), int(keypoint4.y)), 5, (0, 255, 0), 2) 155 | 156 | # キーポイント:左耳 157 | keypoint5 = detection.location_data.relative_keypoints[5] 158 | keypoint5.x = int(keypoint5.x * image_width) 159 | keypoint5.y = int(keypoint5.y * image_height) 160 | 161 | cv.circle(image, (int(keypoint5.x), int(keypoint5.y)), 5, (0, 255, 0), 2) 162 | 163 | return image 164 | 165 | 166 | if __name__ == '__main__': 167 | main() 168 | -------------------------------------------------------------------------------- /sample_objectron.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument('--static_image_mode', action='store_true') 21 | parser.add_argument("--max_num_objects", 22 | help='max_num_objects', 23 | type=int, 24 | default=5) 25 | parser.add_argument("--min_detection_confidence", 26 | help='min_detection_confidence', 27 | type=float, 28 | default=0.5) 29 | parser.add_argument("--min_tracking_confidence", 30 | help='min_tracking_confidence', 31 | type=int, 32 | default=0.99) 33 | parser.add_argument("--model_name", 34 | help='model_name', 35 | type=str, 36 | default='Cup') # {'Shoe', 'Chair', 'Cup', 'Camera'} 37 | 38 | args = parser.parse_args() 39 | 40 | return args 41 | 42 | 43 | def main(): 44 | # 引数解析 ################################################################# 45 | args = get_args() 46 | 47 | cap_device = args.device 48 | cap_width = args.width 49 | cap_height = args.height 50 | 51 | static_image_mode = args.static_image_mode 52 | max_num_objects = args.max_num_objects 53 | min_detection_confidence = args.min_detection_confidence 54 | min_tracking_confidence = args.min_tracking_confidence 55 | model_name = args.model_name 56 | 57 | # カメラ準備 ############################################################### 58 | cap = cv.VideoCapture(cap_device) 59 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 60 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 61 | 62 | # モデルロード ############################################################# 63 | mp_objectron = mp.solutions.objectron 64 | objectron = mp_objectron.Objectron( 65 | static_image_mode=static_image_mode, 66 | max_num_objects=max_num_objects, 67 | min_detection_confidence=min_detection_confidence, 68 | min_tracking_confidence=min_tracking_confidence, 69 | model_name=model_name, 70 | ) 71 | 72 | mp_drawing = mp.solutions.drawing_utils 73 | 74 | # FPS計測モジュール ######################################################## 75 | cvFpsCalc = CvFpsCalc(buffer_len=10) 76 | 77 | while True: 78 | display_fps = cvFpsCalc.get() 79 | 80 | # カメラキャプチャ ##################################################### 81 | ret, image = cap.read() 82 | if not ret: 83 | break 84 | image = cv.flip(image, 1) # ミラー表示 85 | debug_image = copy.deepcopy(image) 86 | 87 | # 検出実施 ############################################################# 88 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 89 | results = objectron.process(image) 90 | 91 | # 描画 ################################################################ 92 | if results.detected_objects is not None: 93 | for detected_object in results.detected_objects: 94 | mp_drawing.draw_landmarks(debug_image, 95 | detected_object.landmarks_2d, 96 | mp_objectron.BOX_CONNECTIONS) 97 | mp_drawing.draw_axis(debug_image, detected_object.rotation, 98 | detected_object.translation) 99 | 100 | # キーポイント確認用 101 | draw_landmarks(debug_image, detected_object.landmarks_2d) 102 | 103 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 104 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 105 | 106 | # キー処理(ESC:終了) ################################################# 107 | key = cv.waitKey(1) 108 | if key == 27: # ESC 109 | break 110 | 111 | # 画面反映 ############################################################# 112 | cv.imshow('MediaPipe Objectron Demo', debug_image) 113 | 114 | cap.release() 115 | cv.destroyAllWindows() 116 | 117 | 118 | def draw_landmarks(image, landmarks): 119 | image_width, image_height = image.shape[1], image.shape[0] 120 | 121 | landmark_point = [] 122 | 123 | for index, landmark in enumerate(landmarks.landmark): 124 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 125 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 126 | landmark_point.append([(landmark_x, landmark_y)]) 127 | 128 | if index == 0: # 重心 129 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 130 | if index == 1: # 131 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 132 | if index == 2: # 133 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 134 | if index == 3: # 135 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 136 | if index == 4: # 137 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 138 | if index == 5: # 139 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 140 | if index == 6: # 141 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 142 | if index == 7: # 143 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 144 | if index == 8: # 145 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 146 | 147 | return image 148 | 149 | 150 | def draw_bounding_rect(use_brect, image, brect): 151 | if use_brect: 152 | # 外接矩形 153 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 154 | (0, 255, 0), 2) 155 | 156 | return image 157 | 158 | 159 | if __name__ == '__main__': 160 | main() 161 | -------------------------------------------------------------------------------- /sample_facemesh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument("--max_num_faces", type=int, default=1) 21 | parser.add_argument("--min_detection_confidence", 22 | help='min_detection_confidence', 23 | type=float, 24 | default=0.7) 25 | parser.add_argument("--min_tracking_confidence", 26 | help='min_tracking_confidence', 27 | type=int, 28 | default=0.5) 29 | 30 | parser.add_argument('--use_brect', action='store_true') 31 | 32 | args = parser.parse_args() 33 | 34 | return args 35 | 36 | 37 | def main(): 38 | # 引数解析 ################################################################# 39 | args = get_args() 40 | 41 | cap_device = args.device 42 | cap_width = args.width 43 | cap_height = args.height 44 | 45 | max_num_faces = args.max_num_faces 46 | min_detection_confidence = args.min_detection_confidence 47 | min_tracking_confidence = args.min_tracking_confidence 48 | 49 | use_brect = args.use_brect 50 | 51 | # カメラ準備 ############################################################### 52 | cap = cv.VideoCapture(cap_device) 53 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 54 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 55 | 56 | # モデルロード ############################################################# 57 | mp_face_mesh = mp.solutions.face_mesh 58 | face_mesh = mp_face_mesh.FaceMesh( 59 | max_num_faces=max_num_faces, 60 | min_detection_confidence=min_detection_confidence, 61 | min_tracking_confidence=min_tracking_confidence, 62 | ) 63 | 64 | # FPS計測モジュール ######################################################## 65 | cvFpsCalc = CvFpsCalc(buffer_len=10) 66 | 67 | while True: 68 | display_fps = cvFpsCalc.get() 69 | 70 | # カメラキャプチャ ##################################################### 71 | ret, image = cap.read() 72 | if not ret: 73 | break 74 | image = cv.flip(image, 1) # ミラー表示 75 | debug_image = copy.deepcopy(image) 76 | 77 | # 検出実施 ############################################################# 78 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 79 | results = face_mesh.process(image) 80 | 81 | # 描画 ################################################################ 82 | if results.multi_face_landmarks is not None: 83 | for face_landmarks in results.multi_face_landmarks: 84 | # 外接矩形の計算 85 | brect = calc_bounding_rect(debug_image, face_landmarks) 86 | # 描画 87 | debug_image = draw_landmarks(debug_image, face_landmarks) 88 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 89 | 90 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 91 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 92 | 93 | # キー処理(ESC:終了) ################################################# 94 | key = cv.waitKey(1) 95 | if key == 27: # ESC 96 | break 97 | 98 | # 画面反映 ############################################################# 99 | cv.imshow('MediaPipe Face Mesh Demo', debug_image) 100 | 101 | cap.release() 102 | cv.destroyAllWindows() 103 | 104 | 105 | def calc_bounding_rect(image, landmarks): 106 | image_width, image_height = image.shape[1], image.shape[0] 107 | 108 | landmark_array = np.empty((0, 2), int) 109 | 110 | for _, landmark in enumerate(landmarks.landmark): 111 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 112 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 113 | 114 | landmark_point = [np.array((landmark_x, landmark_y))] 115 | 116 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 117 | 118 | x, y, w, h = cv.boundingRect(landmark_array) 119 | 120 | return [x, y, x + w, y + h] 121 | 122 | 123 | def draw_landmarks(image, landmarks): 124 | image_width, image_height = image.shape[1], image.shape[0] 125 | 126 | landmark_point = [] 127 | 128 | for index, landmark in enumerate(landmarks.landmark): 129 | if landmark.visibility < 0 or landmark.presence < 0: 130 | continue 131 | 132 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 133 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 134 | 135 | landmark_point.append((landmark_x, landmark_y)) 136 | 137 | cv.circle(image, (landmark_x, landmark_y), 1, (0, 255, 0), 1) 138 | 139 | if len(landmark_point) > 0: 140 | # 参考:https://github.com/tensorflow/tfjs-models/blob/master/facemesh/mesh_map.jpg 141 | 142 | # 左眉毛(55:内側、46:外側) 143 | cv.line(image, landmark_point[55], landmark_point[65], (0, 255, 0), 2) 144 | cv.line(image, landmark_point[65], landmark_point[52], (0, 255, 0), 2) 145 | cv.line(image, landmark_point[52], landmark_point[53], (0, 255, 0), 2) 146 | cv.line(image, landmark_point[53], landmark_point[46], (0, 255, 0), 2) 147 | 148 | # 右眉毛(285:内側、276:外側) 149 | cv.line(image, landmark_point[285], landmark_point[295], (0, 255, 0), 150 | 2) 151 | cv.line(image, landmark_point[295], landmark_point[282], (0, 255, 0), 152 | 2) 153 | cv.line(image, landmark_point[282], landmark_point[283], (0, 255, 0), 154 | 2) 155 | cv.line(image, landmark_point[283], landmark_point[276], (0, 255, 0), 156 | 2) 157 | 158 | # 左目 (133:目頭、246:目尻) 159 | cv.line(image, landmark_point[133], landmark_point[173], (0, 255, 0), 160 | 2) 161 | cv.line(image, landmark_point[173], landmark_point[157], (0, 255, 0), 162 | 2) 163 | cv.line(image, landmark_point[157], landmark_point[158], (0, 255, 0), 164 | 2) 165 | cv.line(image, landmark_point[158], landmark_point[159], (0, 255, 0), 166 | 2) 167 | cv.line(image, landmark_point[159], landmark_point[160], (0, 255, 0), 168 | 2) 169 | cv.line(image, landmark_point[160], landmark_point[161], (0, 255, 0), 170 | 2) 171 | cv.line(image, landmark_point[161], landmark_point[246], (0, 255, 0), 172 | 2) 173 | 174 | cv.line(image, landmark_point[246], landmark_point[163], (0, 255, 0), 175 | 2) 176 | cv.line(image, landmark_point[163], landmark_point[144], (0, 255, 0), 177 | 2) 178 | cv.line(image, landmark_point[144], landmark_point[145], (0, 255, 0), 179 | 2) 180 | cv.line(image, landmark_point[145], landmark_point[153], (0, 255, 0), 181 | 2) 182 | cv.line(image, landmark_point[153], landmark_point[154], (0, 255, 0), 183 | 2) 184 | cv.line(image, landmark_point[154], landmark_point[155], (0, 255, 0), 185 | 2) 186 | cv.line(image, landmark_point[155], landmark_point[133], (0, 255, 0), 187 | 2) 188 | 189 | # 右目 (362:目頭、466:目尻) 190 | cv.line(image, landmark_point[362], landmark_point[398], (0, 255, 0), 191 | 2) 192 | cv.line(image, landmark_point[398], landmark_point[384], (0, 255, 0), 193 | 2) 194 | cv.line(image, landmark_point[384], landmark_point[385], (0, 255, 0), 195 | 2) 196 | cv.line(image, landmark_point[385], landmark_point[386], (0, 255, 0), 197 | 2) 198 | cv.line(image, landmark_point[386], landmark_point[387], (0, 255, 0), 199 | 2) 200 | cv.line(image, landmark_point[387], landmark_point[388], (0, 255, 0), 201 | 2) 202 | cv.line(image, landmark_point[388], landmark_point[466], (0, 255, 0), 203 | 2) 204 | 205 | cv.line(image, landmark_point[466], landmark_point[390], (0, 255, 0), 206 | 2) 207 | cv.line(image, landmark_point[390], landmark_point[373], (0, 255, 0), 208 | 2) 209 | cv.line(image, landmark_point[373], landmark_point[374], (0, 255, 0), 210 | 2) 211 | cv.line(image, landmark_point[374], landmark_point[380], (0, 255, 0), 212 | 2) 213 | cv.line(image, landmark_point[380], landmark_point[381], (0, 255, 0), 214 | 2) 215 | cv.line(image, landmark_point[381], landmark_point[382], (0, 255, 0), 216 | 2) 217 | cv.line(image, landmark_point[382], landmark_point[362], (0, 255, 0), 218 | 2) 219 | 220 | # 口 (308:右端、78:左端) 221 | cv.line(image, landmark_point[308], landmark_point[415], (0, 255, 0), 222 | 2) 223 | cv.line(image, landmark_point[415], landmark_point[310], (0, 255, 0), 224 | 2) 225 | cv.line(image, landmark_point[310], landmark_point[311], (0, 255, 0), 226 | 2) 227 | cv.line(image, landmark_point[311], landmark_point[312], (0, 255, 0), 228 | 2) 229 | cv.line(image, landmark_point[312], landmark_point[13], (0, 255, 0), 2) 230 | cv.line(image, landmark_point[13], landmark_point[82], (0, 255, 0), 2) 231 | cv.line(image, landmark_point[82], landmark_point[81], (0, 255, 0), 2) 232 | cv.line(image, landmark_point[81], landmark_point[80], (0, 255, 0), 2) 233 | cv.line(image, landmark_point[80], landmark_point[191], (0, 255, 0), 2) 234 | cv.line(image, landmark_point[191], landmark_point[78], (0, 255, 0), 2) 235 | 236 | cv.line(image, landmark_point[78], landmark_point[95], (0, 255, 0), 2) 237 | cv.line(image, landmark_point[95], landmark_point[88], (0, 255, 0), 2) 238 | cv.line(image, landmark_point[88], landmark_point[178], (0, 255, 0), 2) 239 | cv.line(image, landmark_point[178], landmark_point[87], (0, 255, 0), 2) 240 | cv.line(image, landmark_point[87], landmark_point[14], (0, 255, 0), 2) 241 | cv.line(image, landmark_point[14], landmark_point[317], (0, 255, 0), 2) 242 | cv.line(image, landmark_point[317], landmark_point[402], (0, 255, 0), 243 | 2) 244 | cv.line(image, landmark_point[402], landmark_point[318], (0, 255, 0), 245 | 2) 246 | cv.line(image, landmark_point[318], landmark_point[324], (0, 255, 0), 247 | 2) 248 | cv.line(image, landmark_point[324], landmark_point[308], (0, 255, 0), 249 | 2) 250 | 251 | return image 252 | 253 | 254 | def draw_bounding_rect(use_brect, image, brect): 255 | if use_brect: 256 | # 外接矩形 257 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 258 | (0, 255, 0), 2) 259 | 260 | return image 261 | 262 | 263 | if __name__ == '__main__': 264 | main() 265 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /sample_hand.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument("--max_num_hands", type=int, default=2) 21 | parser.add_argument("--min_detection_confidence", 22 | help='min_detection_confidence', 23 | type=float, 24 | default=0.7) 25 | parser.add_argument("--min_tracking_confidence", 26 | help='min_tracking_confidence', 27 | type=int, 28 | default=0.5) 29 | 30 | parser.add_argument('--use_brect', action='store_true') 31 | 32 | args = parser.parse_args() 33 | 34 | return args 35 | 36 | 37 | def main(): 38 | # 引数解析 ################################################################# 39 | args = get_args() 40 | 41 | cap_device = args.device 42 | cap_width = args.width 43 | cap_height = args.height 44 | 45 | max_num_hands = args.max_num_hands 46 | min_detection_confidence = args.min_detection_confidence 47 | min_tracking_confidence = args.min_tracking_confidence 48 | 49 | use_brect = args.use_brect 50 | 51 | # カメラ準備 ############################################################### 52 | cap = cv.VideoCapture(cap_device) 53 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 54 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 55 | 56 | # モデルロード ############################################################# 57 | mp_hands = mp.solutions.hands 58 | hands = mp_hands.Hands( 59 | max_num_hands=max_num_hands, 60 | min_detection_confidence=min_detection_confidence, 61 | min_tracking_confidence=min_tracking_confidence, 62 | ) 63 | 64 | # FPS計測モジュール ######################################################## 65 | cvFpsCalc = CvFpsCalc(buffer_len=10) 66 | 67 | while True: 68 | display_fps = cvFpsCalc.get() 69 | 70 | # カメラキャプチャ ##################################################### 71 | ret, image = cap.read() 72 | if not ret: 73 | break 74 | image = cv.flip(image, 1) # ミラー表示 75 | debug_image = copy.deepcopy(image) 76 | 77 | # 検出実施 ############################################################# 78 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 79 | results = hands.process(image) 80 | 81 | # 描画 ################################################################ 82 | if results.multi_hand_landmarks is not None: 83 | for hand_landmarks, handedness in zip(results.multi_hand_landmarks, 84 | results.multi_handedness): 85 | # 手の平重心計算 86 | cx, cy = calc_palm_moment(debug_image, hand_landmarks) 87 | # 外接矩形の計算 88 | brect = calc_bounding_rect(debug_image, hand_landmarks) 89 | # 描画 90 | debug_image = draw_landmarks(debug_image, cx, cy, 91 | hand_landmarks, handedness) 92 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 93 | 94 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 95 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 96 | 97 | # キー処理(ESC:終了) ################################################# 98 | key = cv.waitKey(1) 99 | if key == 27: # ESC 100 | break 101 | 102 | # 画面反映 ############################################################# 103 | cv.imshow('MediaPipe Hand Demo', debug_image) 104 | 105 | cap.release() 106 | cv.destroyAllWindows() 107 | 108 | 109 | def calc_palm_moment(image, landmarks): 110 | image_width, image_height = image.shape[1], image.shape[0] 111 | 112 | palm_array = np.empty((0, 2), int) 113 | 114 | for index, landmark in enumerate(landmarks.landmark): 115 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 116 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 117 | 118 | landmark_point = [np.array((landmark_x, landmark_y))] 119 | 120 | if index == 0: # 手首1 121 | palm_array = np.append(palm_array, landmark_point, axis=0) 122 | if index == 1: # 手首2 123 | palm_array = np.append(palm_array, landmark_point, axis=0) 124 | if index == 5: # 人差指:付け根 125 | palm_array = np.append(palm_array, landmark_point, axis=0) 126 | if index == 9: # 中指:付け根 127 | palm_array = np.append(palm_array, landmark_point, axis=0) 128 | if index == 13: # 薬指:付け根 129 | palm_array = np.append(palm_array, landmark_point, axis=0) 130 | if index == 17: # 小指:付け根 131 | palm_array = np.append(palm_array, landmark_point, axis=0) 132 | M = cv.moments(palm_array) 133 | cx, cy = 0, 0 134 | if M['m00'] != 0: 135 | cx = int(M['m10'] / M['m00']) 136 | cy = int(M['m01'] / M['m00']) 137 | 138 | return cx, cy 139 | 140 | 141 | def calc_bounding_rect(image, landmarks): 142 | image_width, image_height = image.shape[1], image.shape[0] 143 | 144 | landmark_array = np.empty((0, 2), int) 145 | 146 | for _, landmark in enumerate(landmarks.landmark): 147 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 148 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 149 | 150 | landmark_point = [np.array((landmark_x, landmark_y))] 151 | 152 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 153 | 154 | x, y, w, h = cv.boundingRect(landmark_array) 155 | 156 | return [x, y, x + w, y + h] 157 | 158 | 159 | def draw_landmarks(image, cx, cy, landmarks, handedness): 160 | image_width, image_height = image.shape[1], image.shape[0] 161 | 162 | landmark_point = [] 163 | 164 | # キーポイント 165 | for index, landmark in enumerate(landmarks.landmark): 166 | if landmark.visibility < 0 or landmark.presence < 0: 167 | continue 168 | 169 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 170 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 171 | # landmark_z = landmark.z 172 | 173 | landmark_point.append((landmark_x, landmark_y)) 174 | 175 | if index == 0: # 手首1 176 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 177 | if index == 1: # 手首2 178 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 179 | if index == 2: # 親指:付け根 180 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 181 | if index == 3: # 親指:第1関節 182 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 183 | if index == 4: # 親指:指先 184 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 185 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 186 | if index == 5: # 人差指:付け根 187 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 188 | if index == 6: # 人差指:第2関節 189 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 190 | if index == 7: # 人差指:第1関節 191 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 192 | if index == 8: # 人差指:指先 193 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 194 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 195 | if index == 9: # 中指:付け根 196 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 197 | if index == 10: # 中指:第2関節 198 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 199 | if index == 11: # 中指:第1関節 200 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 201 | if index == 12: # 中指:指先 202 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 203 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 204 | if index == 13: # 薬指:付け根 205 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 206 | if index == 14: # 薬指:第2関節 207 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 208 | if index == 15: # 薬指:第1関節 209 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 210 | if index == 16: # 薬指:指先 211 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 212 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 213 | if index == 17: # 小指:付け根 214 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 215 | if index == 18: # 小指:第2関節 216 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 217 | if index == 19: # 小指:第1関節 218 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 219 | if index == 20: # 小指:指先 220 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 221 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 222 | 223 | # 接続線 224 | if len(landmark_point) > 0: 225 | # 親指 226 | cv.line(image, landmark_point[2], landmark_point[3], (0, 255, 0), 2) 227 | cv.line(image, landmark_point[3], landmark_point[4], (0, 255, 0), 2) 228 | 229 | # 人差指 230 | cv.line(image, landmark_point[5], landmark_point[6], (0, 255, 0), 2) 231 | cv.line(image, landmark_point[6], landmark_point[7], (0, 255, 0), 2) 232 | cv.line(image, landmark_point[7], landmark_point[8], (0, 255, 0), 2) 233 | 234 | # 中指 235 | cv.line(image, landmark_point[9], landmark_point[10], (0, 255, 0), 2) 236 | cv.line(image, landmark_point[10], landmark_point[11], (0, 255, 0), 2) 237 | cv.line(image, landmark_point[11], landmark_point[12], (0, 255, 0), 2) 238 | 239 | # 薬指 240 | cv.line(image, landmark_point[13], landmark_point[14], (0, 255, 0), 2) 241 | cv.line(image, landmark_point[14], landmark_point[15], (0, 255, 0), 2) 242 | cv.line(image, landmark_point[15], landmark_point[16], (0, 255, 0), 2) 243 | 244 | # 小指 245 | cv.line(image, landmark_point[17], landmark_point[18], (0, 255, 0), 2) 246 | cv.line(image, landmark_point[18], landmark_point[19], (0, 255, 0), 2) 247 | cv.line(image, landmark_point[19], landmark_point[20], (0, 255, 0), 2) 248 | 249 | # 手の平 250 | cv.line(image, landmark_point[0], landmark_point[1], (0, 255, 0), 2) 251 | cv.line(image, landmark_point[1], landmark_point[2], (0, 255, 0), 2) 252 | cv.line(image, landmark_point[2], landmark_point[5], (0, 255, 0), 2) 253 | cv.line(image, landmark_point[5], landmark_point[9], (0, 255, 0), 2) 254 | cv.line(image, landmark_point[9], landmark_point[13], (0, 255, 0), 2) 255 | cv.line(image, landmark_point[13], landmark_point[17], (0, 255, 0), 2) 256 | cv.line(image, landmark_point[17], landmark_point[0], (0, 255, 0), 2) 257 | 258 | # 重心 + 左右 259 | if len(landmark_point) > 0: 260 | # handedness.classification[0].index 261 | # handedness.classification[0].score 262 | 263 | cv.circle(image, (cx, cy), 12, (0, 255, 0), 2) 264 | cv.putText(image, handedness.classification[0].label[0], 265 | (cx - 6, cy + 6), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 266 | 2, cv.LINE_AA) # label[0]:一文字目だけ 267 | 268 | return image 269 | 270 | 271 | def draw_bounding_rect(use_brect, image, brect): 272 | if use_brect: 273 | # 外接矩形 274 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 275 | (0, 255, 0), 2) 276 | 277 | return image 278 | 279 | 280 | if __name__ == '__main__': 281 | main() 282 | -------------------------------------------------------------------------------- /sample_pose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument('--upper_body_only', action='store_true') 21 | parser.add_argument("--min_detection_confidence", 22 | help='min_detection_confidence', 23 | type=float, 24 | default=0.5) 25 | parser.add_argument("--min_tracking_confidence", 26 | help='min_tracking_confidence', 27 | type=int, 28 | default=0.5) 29 | 30 | parser.add_argument('--use_brect', action='store_true') 31 | 32 | args = parser.parse_args() 33 | 34 | return args 35 | 36 | 37 | def main(): 38 | # 引数解析 ################################################################# 39 | args = get_args() 40 | 41 | cap_device = args.device 42 | cap_width = args.width 43 | cap_height = args.height 44 | 45 | upper_body_only = args.upper_body_only 46 | min_detection_confidence = args.min_detection_confidence 47 | min_tracking_confidence = args.min_tracking_confidence 48 | 49 | use_brect = args.use_brect 50 | 51 | # カメラ準備 ############################################################### 52 | cap = cv.VideoCapture(cap_device) 53 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 54 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 55 | 56 | # モデルロード ############################################################# 57 | mp_pose = mp.solutions.pose 58 | pose = mp_pose.Pose( 59 | upper_body_only=upper_body_only, 60 | min_detection_confidence=min_detection_confidence, 61 | min_tracking_confidence=min_tracking_confidence, 62 | ) 63 | 64 | # FPS計測モジュール ######################################################## 65 | cvFpsCalc = CvFpsCalc(buffer_len=10) 66 | 67 | while True: 68 | display_fps = cvFpsCalc.get() 69 | 70 | # カメラキャプチャ ##################################################### 71 | ret, image = cap.read() 72 | if not ret: 73 | break 74 | image = cv.flip(image, 1) # ミラー表示 75 | debug_image = copy.deepcopy(image) 76 | 77 | # 検出実施 ############################################################# 78 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 79 | results = pose.process(image) 80 | 81 | # 描画 ################################################################ 82 | if results.pose_landmarks is not None: 83 | # 外接矩形の計算 84 | brect = calc_bounding_rect(debug_image, results.pose_landmarks) 85 | # 描画 86 | debug_image = draw_landmarks(debug_image, results.pose_landmarks, 87 | upper_body_only) 88 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 89 | 90 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 91 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 92 | 93 | # キー処理(ESC:終了) ################################################# 94 | key = cv.waitKey(1) 95 | if key == 27: # ESC 96 | break 97 | 98 | # 画面反映 ############################################################# 99 | cv.imshow('MediaPipe Pose Demo', debug_image) 100 | 101 | cap.release() 102 | cv.destroyAllWindows() 103 | 104 | 105 | def calc_bounding_rect(image, landmarks): 106 | image_width, image_height = image.shape[1], image.shape[0] 107 | 108 | landmark_array = np.empty((0, 2), int) 109 | 110 | for _, landmark in enumerate(landmarks.landmark): 111 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 112 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 113 | 114 | landmark_point = [np.array((landmark_x, landmark_y))] 115 | 116 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 117 | 118 | x, y, w, h = cv.boundingRect(landmark_array) 119 | 120 | return [x, y, x + w, y + h] 121 | 122 | 123 | def draw_landmarks(image, landmarks, upper_body_only, visibility_th=0.5): 124 | image_width, image_height = image.shape[1], image.shape[0] 125 | 126 | landmark_point = [] 127 | 128 | for index, landmark in enumerate(landmarks.landmark): 129 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 130 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 131 | landmark_z = landmark.z 132 | landmark_point.append([landmark.visibility, (landmark_x, landmark_y)]) 133 | 134 | if landmark.visibility < visibility_th: 135 | continue 136 | 137 | if index == 0: # 鼻 138 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 139 | if index == 1: # 右目:目頭 140 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 141 | if index == 2: # 右目:瞳 142 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 143 | if index == 3: # 右目:目尻 144 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 145 | if index == 4: # 左目:目頭 146 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 147 | if index == 5: # 左目:瞳 148 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 149 | if index == 6: # 左目:目尻 150 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 151 | if index == 7: # 右耳 152 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 153 | if index == 8: # 左耳 154 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 155 | if index == 9: # 口:左端 156 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 157 | if index == 10: # 口:左端 158 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 159 | if index == 11: # 右肩 160 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 161 | if index == 12: # 左肩 162 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 163 | if index == 13: # 右肘 164 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 165 | if index == 14: # 左肘 166 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 167 | if index == 15: # 右手首 168 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 169 | if index == 16: # 左手首 170 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 171 | if index == 17: # 右手1(外側端) 172 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 173 | if index == 18: # 左手1(外側端) 174 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 175 | if index == 19: # 右手2(先端) 176 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 177 | if index == 20: # 左手2(先端) 178 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 179 | if index == 21: # 右手3(内側端) 180 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 181 | if index == 22: # 左手3(内側端) 182 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 183 | if index == 23: # 腰(右側) 184 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 185 | if index == 24: # 腰(左側) 186 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 187 | if index == 25: # 右ひざ 188 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 189 | if index == 26: # 左ひざ 190 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 191 | if index == 27: # 右足首 192 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 193 | if index == 28: # 左足首 194 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 195 | if index == 29: # 右かかと 196 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 197 | if index == 30: # 左かかと 198 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 199 | if index == 31: # 右つま先 200 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 201 | if index == 32: # 左つま先 202 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 203 | 204 | if not upper_body_only: 205 | cv.putText(image, "z:" + str(round(landmark_z, 3)), 206 | (landmark_x - 10, landmark_y - 10), 207 | cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, 208 | cv.LINE_AA) 209 | 210 | if len(landmark_point) > 0: 211 | # 右目 212 | if landmark_point[1][0] > visibility_th and landmark_point[2][ 213 | 0] > visibility_th: 214 | cv.line(image, landmark_point[1][1], landmark_point[2][1], 215 | (0, 255, 0), 2) 216 | if landmark_point[2][0] > visibility_th and landmark_point[3][ 217 | 0] > visibility_th: 218 | cv.line(image, landmark_point[2][1], landmark_point[3][1], 219 | (0, 255, 0), 2) 220 | 221 | # 左目 222 | if landmark_point[4][0] > visibility_th and landmark_point[5][ 223 | 0] > visibility_th: 224 | cv.line(image, landmark_point[4][1], landmark_point[5][1], 225 | (0, 255, 0), 2) 226 | if landmark_point[5][0] > visibility_th and landmark_point[6][ 227 | 0] > visibility_th: 228 | cv.line(image, landmark_point[5][1], landmark_point[6][1], 229 | (0, 255, 0), 2) 230 | 231 | # 口 232 | if landmark_point[9][0] > visibility_th and landmark_point[10][ 233 | 0] > visibility_th: 234 | cv.line(image, landmark_point[9][1], landmark_point[10][1], 235 | (0, 255, 0), 2) 236 | 237 | # 肩 238 | if landmark_point[11][0] > visibility_th and landmark_point[12][ 239 | 0] > visibility_th: 240 | cv.line(image, landmark_point[11][1], landmark_point[12][1], 241 | (0, 255, 0), 2) 242 | 243 | # 右腕 244 | if landmark_point[11][0] > visibility_th and landmark_point[13][ 245 | 0] > visibility_th: 246 | cv.line(image, landmark_point[11][1], landmark_point[13][1], 247 | (0, 255, 0), 2) 248 | if landmark_point[13][0] > visibility_th and landmark_point[15][ 249 | 0] > visibility_th: 250 | cv.line(image, landmark_point[13][1], landmark_point[15][1], 251 | (0, 255, 0), 2) 252 | 253 | # 左腕 254 | if landmark_point[12][0] > visibility_th and landmark_point[14][ 255 | 0] > visibility_th: 256 | cv.line(image, landmark_point[12][1], landmark_point[14][1], 257 | (0, 255, 0), 2) 258 | if landmark_point[14][0] > visibility_th and landmark_point[16][ 259 | 0] > visibility_th: 260 | cv.line(image, landmark_point[14][1], landmark_point[16][1], 261 | (0, 255, 0), 2) 262 | 263 | # 右手 264 | if landmark_point[15][0] > visibility_th and landmark_point[17][ 265 | 0] > visibility_th: 266 | cv.line(image, landmark_point[15][1], landmark_point[17][1], 267 | (0, 255, 0), 2) 268 | if landmark_point[17][0] > visibility_th and landmark_point[19][ 269 | 0] > visibility_th: 270 | cv.line(image, landmark_point[17][1], landmark_point[19][1], 271 | (0, 255, 0), 2) 272 | if landmark_point[19][0] > visibility_th and landmark_point[21][ 273 | 0] > visibility_th: 274 | cv.line(image, landmark_point[19][1], landmark_point[21][1], 275 | (0, 255, 0), 2) 276 | if landmark_point[21][0] > visibility_th and landmark_point[15][ 277 | 0] > visibility_th: 278 | cv.line(image, landmark_point[21][1], landmark_point[15][1], 279 | (0, 255, 0), 2) 280 | 281 | # 左手 282 | if landmark_point[16][0] > visibility_th and landmark_point[18][ 283 | 0] > visibility_th: 284 | cv.line(image, landmark_point[16][1], landmark_point[18][1], 285 | (0, 255, 0), 2) 286 | if landmark_point[18][0] > visibility_th and landmark_point[20][ 287 | 0] > visibility_th: 288 | cv.line(image, landmark_point[18][1], landmark_point[20][1], 289 | (0, 255, 0), 2) 290 | if landmark_point[20][0] > visibility_th and landmark_point[22][ 291 | 0] > visibility_th: 292 | cv.line(image, landmark_point[20][1], landmark_point[22][1], 293 | (0, 255, 0), 2) 294 | if landmark_point[22][0] > visibility_th and landmark_point[16][ 295 | 0] > visibility_th: 296 | cv.line(image, landmark_point[22][1], landmark_point[16][1], 297 | (0, 255, 0), 2) 298 | 299 | # 胴体 300 | if landmark_point[11][0] > visibility_th and landmark_point[23][ 301 | 0] > visibility_th: 302 | cv.line(image, landmark_point[11][1], landmark_point[23][1], 303 | (0, 255, 0), 2) 304 | if landmark_point[12][0] > visibility_th and landmark_point[24][ 305 | 0] > visibility_th: 306 | cv.line(image, landmark_point[12][1], landmark_point[24][1], 307 | (0, 255, 0), 2) 308 | if landmark_point[23][0] > visibility_th and landmark_point[24][ 309 | 0] > visibility_th: 310 | cv.line(image, landmark_point[23][1], landmark_point[24][1], 311 | (0, 255, 0), 2) 312 | 313 | if len(landmark_point) > 25: 314 | # 右足 315 | if landmark_point[23][0] > visibility_th and landmark_point[25][ 316 | 0] > visibility_th: 317 | cv.line(image, landmark_point[23][1], landmark_point[25][1], 318 | (0, 255, 0), 2) 319 | if landmark_point[25][0] > visibility_th and landmark_point[27][ 320 | 0] > visibility_th: 321 | cv.line(image, landmark_point[25][1], landmark_point[27][1], 322 | (0, 255, 0), 2) 323 | if landmark_point[27][0] > visibility_th and landmark_point[29][ 324 | 0] > visibility_th: 325 | cv.line(image, landmark_point[27][1], landmark_point[29][1], 326 | (0, 255, 0), 2) 327 | if landmark_point[29][0] > visibility_th and landmark_point[31][ 328 | 0] > visibility_th: 329 | cv.line(image, landmark_point[29][1], landmark_point[31][1], 330 | (0, 255, 0), 2) 331 | 332 | # 左足 333 | if landmark_point[24][0] > visibility_th and landmark_point[26][ 334 | 0] > visibility_th: 335 | cv.line(image, landmark_point[24][1], landmark_point[26][1], 336 | (0, 255, 0), 2) 337 | if landmark_point[26][0] > visibility_th and landmark_point[28][ 338 | 0] > visibility_th: 339 | cv.line(image, landmark_point[26][1], landmark_point[28][1], 340 | (0, 255, 0), 2) 341 | if landmark_point[28][0] > visibility_th and landmark_point[30][ 342 | 0] > visibility_th: 343 | cv.line(image, landmark_point[28][1], landmark_point[30][1], 344 | (0, 255, 0), 2) 345 | if landmark_point[30][0] > visibility_th and landmark_point[32][ 346 | 0] > visibility_th: 347 | cv.line(image, landmark_point[30][1], landmark_point[32][1], 348 | (0, 255, 0), 2) 349 | return image 350 | 351 | 352 | def draw_bounding_rect(use_brect, image, brect): 353 | if use_brect: 354 | # 外接矩形 355 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 356 | (0, 255, 0), 2) 357 | 358 | return image 359 | 360 | 361 | if __name__ == '__main__': 362 | main() 363 | -------------------------------------------------------------------------------- /sample_holistic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import copy 4 | import argparse 5 | 6 | import cv2 as cv 7 | import numpy as np 8 | import mediapipe as mp 9 | 10 | from utils import CvFpsCalc 11 | 12 | 13 | def get_args(): 14 | parser = argparse.ArgumentParser() 15 | 16 | parser.add_argument("--device", type=int, default=0) 17 | parser.add_argument("--width", help='cap width', type=int, default=960) 18 | parser.add_argument("--height", help='cap height', type=int, default=540) 19 | 20 | parser.add_argument('--upper_body_only', action='store_true') 21 | parser.add_argument("--min_detection_confidence", 22 | help='face mesh min_detection_confidence', 23 | type=float, 24 | default=0.5) 25 | parser.add_argument("--min_tracking_confidence", 26 | help='face mesh min_tracking_confidence', 27 | type=int, 28 | default=0.5) 29 | 30 | parser.add_argument('--use_brect', action='store_true') 31 | 32 | args = parser.parse_args() 33 | 34 | return args 35 | 36 | 37 | def main(): 38 | # 引数解析 ################################################################# 39 | args = get_args() 40 | 41 | cap_device = args.device 42 | cap_width = args.width 43 | cap_height = args.height 44 | 45 | upper_body_only = args.upper_body_only 46 | min_detection_confidence = args.min_detection_confidence 47 | min_tracking_confidence = args.min_tracking_confidence 48 | 49 | use_brect = args.use_brect 50 | 51 | # カメラ準備 ############################################################### 52 | cap = cv.VideoCapture(cap_device) 53 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 54 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 55 | 56 | # モデルロード ############################################################# 57 | mp_holistic = mp.solutions.holistic 58 | holistic = mp_holistic.Holistic( 59 | upper_body_only=upper_body_only, 60 | min_detection_confidence=min_detection_confidence, 61 | min_tracking_confidence=min_tracking_confidence, 62 | ) 63 | 64 | # FPS計測モジュール ######################################################## 65 | cvFpsCalc = CvFpsCalc(buffer_len=10) 66 | 67 | while True: 68 | display_fps = cvFpsCalc.get() 69 | 70 | # カメラキャプチャ ##################################################### 71 | ret, image = cap.read() 72 | if not ret: 73 | break 74 | image = cv.flip(image, 1) # ミラー表示 75 | debug_image = copy.deepcopy(image) 76 | 77 | # 検出実施 ############################################################# 78 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 79 | 80 | image.flags.writeable = False 81 | results = holistic.process(image) 82 | image.flags.writeable = True 83 | 84 | # Face Mesh ########################################################### 85 | face_landmarks = results.face_landmarks 86 | if face_landmarks is not None: 87 | # 外接矩形の計算 88 | brect = calc_bounding_rect(debug_image, face_landmarks) 89 | # 描画 90 | debug_image = draw_face_landmarks(debug_image, face_landmarks) 91 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 92 | 93 | # Pose ############################################################### 94 | pose_landmarks = results.pose_landmarks 95 | if pose_landmarks is not None: 96 | # 外接矩形の計算 97 | brect = calc_bounding_rect(debug_image, pose_landmarks) 98 | # 描画 99 | debug_image = draw_pose_landmarks(debug_image, pose_landmarks, 100 | upper_body_only) 101 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 102 | 103 | # Hands ############################################################### 104 | left_hand_landmarks = results.left_hand_landmarks 105 | right_hand_landmarks = results.right_hand_landmarks 106 | # 左手 107 | if left_hand_landmarks is not None: 108 | # 手の平重心計算 109 | cx, cy = calc_palm_moment(debug_image, left_hand_landmarks) 110 | # 外接矩形の計算 111 | brect = calc_bounding_rect(debug_image, left_hand_landmarks) 112 | # 描画 113 | debug_image = draw_hands_landmarks(debug_image, cx, cy, 114 | left_hand_landmarks, 115 | upper_body_only, 'R') 116 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 117 | # 右手 118 | if right_hand_landmarks is not None: 119 | # 手の平重心計算 120 | cx, cy = calc_palm_moment(debug_image, right_hand_landmarks) 121 | # 外接矩形の計算 122 | brect = calc_bounding_rect(debug_image, right_hand_landmarks) 123 | # 描画 124 | debug_image = draw_hands_landmarks(debug_image, cx, cy, 125 | right_hand_landmarks, 126 | upper_body_only, 'L') 127 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 128 | 129 | cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30), 130 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA) 131 | 132 | # キー処理(ESC:終了) ################################################# 133 | key = cv.waitKey(1) 134 | if key == 27: # ESC 135 | break 136 | 137 | # 画面反映 ############################################################# 138 | cv.imshow('MediaPipe Holistic Demo', debug_image) 139 | 140 | cap.release() 141 | cv.destroyAllWindows() 142 | 143 | 144 | def calc_palm_moment(image, landmarks): 145 | image_width, image_height = image.shape[1], image.shape[0] 146 | 147 | palm_array = np.empty((0, 2), int) 148 | 149 | for index, landmark in enumerate(landmarks.landmark): 150 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 151 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 152 | 153 | landmark_point = [np.array((landmark_x, landmark_y))] 154 | 155 | if index == 0: # 手首1 156 | palm_array = np.append(palm_array, landmark_point, axis=0) 157 | if index == 1: # 手首2 158 | palm_array = np.append(palm_array, landmark_point, axis=0) 159 | if index == 5: # 人差指:付け根 160 | palm_array = np.append(palm_array, landmark_point, axis=0) 161 | if index == 9: # 中指:付け根 162 | palm_array = np.append(palm_array, landmark_point, axis=0) 163 | if index == 13: # 薬指:付け根 164 | palm_array = np.append(palm_array, landmark_point, axis=0) 165 | if index == 17: # 小指:付け根 166 | palm_array = np.append(palm_array, landmark_point, axis=0) 167 | M = cv.moments(palm_array) 168 | cx, cy = 0, 0 169 | if M['m00'] != 0: 170 | cx = int(M['m10'] / M['m00']) 171 | cy = int(M['m01'] / M['m00']) 172 | 173 | return cx, cy 174 | 175 | 176 | def calc_bounding_rect(image, landmarks): 177 | image_width, image_height = image.shape[1], image.shape[0] 178 | 179 | landmark_array = np.empty((0, 2), int) 180 | 181 | for _, landmark in enumerate(landmarks.landmark): 182 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 183 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 184 | 185 | landmark_point = [np.array((landmark_x, landmark_y))] 186 | 187 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 188 | 189 | x, y, w, h = cv.boundingRect(landmark_array) 190 | 191 | return [x, y, x + w, y + h] 192 | 193 | 194 | def draw_hands_landmarks(image, 195 | cx, 196 | cy, 197 | landmarks, 198 | upper_body_only, 199 | handedness_str='R'): 200 | image_width, image_height = image.shape[1], image.shape[0] 201 | 202 | landmark_point = [] 203 | 204 | # キーポイント 205 | for index, landmark in enumerate(landmarks.landmark): 206 | if landmark.visibility < 0 or landmark.presence < 0: 207 | continue 208 | 209 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 210 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 211 | landmark_z = landmark.z 212 | 213 | landmark_point.append((landmark_x, landmark_y)) 214 | 215 | if index == 0: # 手首1 216 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 217 | if index == 1: # 手首2 218 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 219 | if index == 2: # 親指:付け根 220 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 221 | if index == 3: # 親指:第1関節 222 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 223 | if index == 4: # 親指:指先 224 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 225 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 226 | if index == 5: # 人差指:付け根 227 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 228 | if index == 6: # 人差指:第2関節 229 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 230 | if index == 7: # 人差指:第1関節 231 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 232 | if index == 8: # 人差指:指先 233 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 234 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 235 | if index == 9: # 中指:付け根 236 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 237 | if index == 10: # 中指:第2関節 238 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 239 | if index == 11: # 中指:第1関節 240 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 241 | if index == 12: # 中指:指先 242 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 243 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 244 | if index == 13: # 薬指:付け根 245 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 246 | if index == 14: # 薬指:第2関節 247 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 248 | if index == 15: # 薬指:第1関節 249 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 250 | if index == 16: # 薬指:指先 251 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 252 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 253 | if index == 17: # 小指:付け根 254 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 255 | if index == 18: # 小指:第2関節 256 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 257 | if index == 19: # 小指:第1関節 258 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 259 | if index == 20: # 小指:指先 260 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 261 | cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2) 262 | 263 | if not upper_body_only: 264 | cv.putText(image, "z:" + str(round(landmark_z, 3)), 265 | (landmark_x - 10, landmark_y - 10), 266 | cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, 267 | cv.LINE_AA) 268 | 269 | # 接続線 270 | if len(landmark_point) > 0: 271 | # 親指 272 | cv.line(image, landmark_point[2], landmark_point[3], (0, 255, 0), 2) 273 | cv.line(image, landmark_point[3], landmark_point[4], (0, 255, 0), 2) 274 | 275 | # 人差指 276 | cv.line(image, landmark_point[5], landmark_point[6], (0, 255, 0), 2) 277 | cv.line(image, landmark_point[6], landmark_point[7], (0, 255, 0), 2) 278 | cv.line(image, landmark_point[7], landmark_point[8], (0, 255, 0), 2) 279 | 280 | # 中指 281 | cv.line(image, landmark_point[9], landmark_point[10], (0, 255, 0), 2) 282 | cv.line(image, landmark_point[10], landmark_point[11], (0, 255, 0), 2) 283 | cv.line(image, landmark_point[11], landmark_point[12], (0, 255, 0), 2) 284 | 285 | # 薬指 286 | cv.line(image, landmark_point[13], landmark_point[14], (0, 255, 0), 2) 287 | cv.line(image, landmark_point[14], landmark_point[15], (0, 255, 0), 2) 288 | cv.line(image, landmark_point[15], landmark_point[16], (0, 255, 0), 2) 289 | 290 | # 小指 291 | cv.line(image, landmark_point[17], landmark_point[18], (0, 255, 0), 2) 292 | cv.line(image, landmark_point[18], landmark_point[19], (0, 255, 0), 2) 293 | cv.line(image, landmark_point[19], landmark_point[20], (0, 255, 0), 2) 294 | 295 | # 手の平 296 | cv.line(image, landmark_point[0], landmark_point[1], (0, 255, 0), 2) 297 | cv.line(image, landmark_point[1], landmark_point[2], (0, 255, 0), 2) 298 | cv.line(image, landmark_point[2], landmark_point[5], (0, 255, 0), 2) 299 | cv.line(image, landmark_point[5], landmark_point[9], (0, 255, 0), 2) 300 | cv.line(image, landmark_point[9], landmark_point[13], (0, 255, 0), 2) 301 | cv.line(image, landmark_point[13], landmark_point[17], (0, 255, 0), 2) 302 | cv.line(image, landmark_point[17], landmark_point[0], (0, 255, 0), 2) 303 | 304 | # 重心 + 左右 305 | if len(landmark_point) > 0: 306 | cv.circle(image, (cx, cy), 12, (0, 255, 0), 2) 307 | cv.putText(image, handedness_str, (cx - 6, cy + 6), 308 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv.LINE_AA) 309 | 310 | return image 311 | 312 | 313 | def draw_face_landmarks(image, landmarks): 314 | image_width, image_height = image.shape[1], image.shape[0] 315 | 316 | landmark_point = [] 317 | 318 | for index, landmark in enumerate(landmarks.landmark): 319 | if landmark.visibility < 0 or landmark.presence < 0: 320 | continue 321 | 322 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 323 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 324 | landmark_z = landmark.z 325 | 326 | landmark_point.append((landmark_x, landmark_y)) 327 | 328 | cv.circle(image, (landmark_x, landmark_y), 1, (0, 255, 0), 1) 329 | 330 | if len(landmark_point) > 0: 331 | # 参考:https://github.com/tensorflow/tfjs-models/blob/master/facemesh/mesh_map.jpg 332 | 333 | # 左眉毛(55:内側、46:外側) 334 | cv.line(image, landmark_point[55], landmark_point[65], (0, 255, 0), 2) 335 | cv.line(image, landmark_point[65], landmark_point[52], (0, 255, 0), 2) 336 | cv.line(image, landmark_point[52], landmark_point[53], (0, 255, 0), 2) 337 | cv.line(image, landmark_point[53], landmark_point[46], (0, 255, 0), 2) 338 | 339 | # 右眉毛(285:内側、276:外側) 340 | cv.line(image, landmark_point[285], landmark_point[295], (0, 255, 0), 341 | 2) 342 | cv.line(image, landmark_point[295], landmark_point[282], (0, 255, 0), 343 | 2) 344 | cv.line(image, landmark_point[282], landmark_point[283], (0, 255, 0), 345 | 2) 346 | cv.line(image, landmark_point[283], landmark_point[276], (0, 255, 0), 347 | 2) 348 | 349 | # 左目 (133:目頭、246:目尻) 350 | cv.line(image, landmark_point[133], landmark_point[173], (0, 255, 0), 351 | 2) 352 | cv.line(image, landmark_point[173], landmark_point[157], (0, 255, 0), 353 | 2) 354 | cv.line(image, landmark_point[157], landmark_point[158], (0, 255, 0), 355 | 2) 356 | cv.line(image, landmark_point[158], landmark_point[159], (0, 255, 0), 357 | 2) 358 | cv.line(image, landmark_point[159], landmark_point[160], (0, 255, 0), 359 | 2) 360 | cv.line(image, landmark_point[160], landmark_point[161], (0, 255, 0), 361 | 2) 362 | cv.line(image, landmark_point[161], landmark_point[246], (0, 255, 0), 363 | 2) 364 | 365 | cv.line(image, landmark_point[246], landmark_point[163], (0, 255, 0), 366 | 2) 367 | cv.line(image, landmark_point[163], landmark_point[144], (0, 255, 0), 368 | 2) 369 | cv.line(image, landmark_point[144], landmark_point[145], (0, 255, 0), 370 | 2) 371 | cv.line(image, landmark_point[145], landmark_point[153], (0, 255, 0), 372 | 2) 373 | cv.line(image, landmark_point[153], landmark_point[154], (0, 255, 0), 374 | 2) 375 | cv.line(image, landmark_point[154], landmark_point[155], (0, 255, 0), 376 | 2) 377 | cv.line(image, landmark_point[155], landmark_point[133], (0, 255, 0), 378 | 2) 379 | 380 | # 右目 (362:目頭、466:目尻) 381 | cv.line(image, landmark_point[362], landmark_point[398], (0, 255, 0), 382 | 2) 383 | cv.line(image, landmark_point[398], landmark_point[384], (0, 255, 0), 384 | 2) 385 | cv.line(image, landmark_point[384], landmark_point[385], (0, 255, 0), 386 | 2) 387 | cv.line(image, landmark_point[385], landmark_point[386], (0, 255, 0), 388 | 2) 389 | cv.line(image, landmark_point[386], landmark_point[387], (0, 255, 0), 390 | 2) 391 | cv.line(image, landmark_point[387], landmark_point[388], (0, 255, 0), 392 | 2) 393 | cv.line(image, landmark_point[388], landmark_point[466], (0, 255, 0), 394 | 2) 395 | 396 | cv.line(image, landmark_point[466], landmark_point[390], (0, 255, 0), 397 | 2) 398 | cv.line(image, landmark_point[390], landmark_point[373], (0, 255, 0), 399 | 2) 400 | cv.line(image, landmark_point[373], landmark_point[374], (0, 255, 0), 401 | 2) 402 | cv.line(image, landmark_point[374], landmark_point[380], (0, 255, 0), 403 | 2) 404 | cv.line(image, landmark_point[380], landmark_point[381], (0, 255, 0), 405 | 2) 406 | cv.line(image, landmark_point[381], landmark_point[382], (0, 255, 0), 407 | 2) 408 | cv.line(image, landmark_point[382], landmark_point[362], (0, 255, 0), 409 | 2) 410 | 411 | # 口 (308:右端、78:左端) 412 | cv.line(image, landmark_point[308], landmark_point[415], (0, 255, 0), 413 | 2) 414 | cv.line(image, landmark_point[415], landmark_point[310], (0, 255, 0), 415 | 2) 416 | cv.line(image, landmark_point[310], landmark_point[311], (0, 255, 0), 417 | 2) 418 | cv.line(image, landmark_point[311], landmark_point[312], (0, 255, 0), 419 | 2) 420 | cv.line(image, landmark_point[312], landmark_point[13], (0, 255, 0), 2) 421 | cv.line(image, landmark_point[13], landmark_point[82], (0, 255, 0), 2) 422 | cv.line(image, landmark_point[82], landmark_point[81], (0, 255, 0), 2) 423 | cv.line(image, landmark_point[81], landmark_point[80], (0, 255, 0), 2) 424 | cv.line(image, landmark_point[80], landmark_point[191], (0, 255, 0), 2) 425 | cv.line(image, landmark_point[191], landmark_point[78], (0, 255, 0), 2) 426 | 427 | cv.line(image, landmark_point[78], landmark_point[95], (0, 255, 0), 2) 428 | cv.line(image, landmark_point[95], landmark_point[88], (0, 255, 0), 2) 429 | cv.line(image, landmark_point[88], landmark_point[178], (0, 255, 0), 2) 430 | cv.line(image, landmark_point[178], landmark_point[87], (0, 255, 0), 2) 431 | cv.line(image, landmark_point[87], landmark_point[14], (0, 255, 0), 2) 432 | cv.line(image, landmark_point[14], landmark_point[317], (0, 255, 0), 2) 433 | cv.line(image, landmark_point[317], landmark_point[402], (0, 255, 0), 434 | 2) 435 | cv.line(image, landmark_point[402], landmark_point[318], (0, 255, 0), 436 | 2) 437 | cv.line(image, landmark_point[318], landmark_point[324], (0, 255, 0), 438 | 2) 439 | cv.line(image, landmark_point[324], landmark_point[308], (0, 255, 0), 440 | 2) 441 | 442 | return image 443 | 444 | 445 | def draw_pose_landmarks(image, landmarks, upper_body_only, visibility_th=0.5): 446 | image_width, image_height = image.shape[1], image.shape[0] 447 | 448 | landmark_point = [] 449 | 450 | for index, landmark in enumerate(landmarks.landmark): 451 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 452 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 453 | landmark_z = landmark.z 454 | landmark_point.append([landmark.visibility, (landmark_x, landmark_y)]) 455 | 456 | if landmark.visibility < visibility_th: 457 | continue 458 | 459 | if index == 0: # 鼻 460 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 461 | if index == 1: # 右目:目頭 462 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 463 | if index == 2: # 右目:瞳 464 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 465 | if index == 3: # 右目:目尻 466 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 467 | if index == 4: # 左目:目頭 468 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 469 | if index == 5: # 左目:瞳 470 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 471 | if index == 6: # 左目:目尻 472 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 473 | if index == 7: # 右耳 474 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 475 | if index == 8: # 左耳 476 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 477 | if index == 9: # 口:左端 478 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 479 | if index == 10: # 口:左端 480 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 481 | if index == 11: # 右肩 482 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 483 | if index == 12: # 左肩 484 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 485 | if index == 13: # 右肘 486 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 487 | if index == 14: # 左肘 488 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 489 | if index == 15: # 右手首 490 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 491 | if index == 16: # 左手首 492 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 493 | if index == 17: # 右手1(外側端) 494 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 495 | if index == 18: # 左手1(外側端) 496 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 497 | if index == 19: # 右手2(先端) 498 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 499 | if index == 20: # 左手2(先端) 500 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 501 | if index == 21: # 右手3(内側端) 502 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 503 | if index == 22: # 左手3(内側端) 504 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 505 | if index == 23: # 腰(右側) 506 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 507 | if index == 24: # 腰(左側) 508 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 509 | if index == 25: # 右ひざ 510 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 511 | if index == 26: # 左ひざ 512 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 513 | if index == 27: # 右足首 514 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 515 | if index == 28: # 左足首 516 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 517 | if index == 29: # 右かかと 518 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 519 | if index == 30: # 左かかと 520 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 521 | if index == 31: # 右つま先 522 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 523 | if index == 32: # 左つま先 524 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 525 | 526 | if not upper_body_only: 527 | cv.putText(image, "z:" + str(round(landmark_z, 3)), 528 | (landmark_x - 10, landmark_y - 10), 529 | cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, 530 | cv.LINE_AA) 531 | 532 | if len(landmark_point) > 0: 533 | # 右目 534 | if landmark_point[1][0] > visibility_th and landmark_point[2][ 535 | 0] > visibility_th: 536 | cv.line(image, landmark_point[1][1], landmark_point[2][1], 537 | (0, 255, 0), 2) 538 | if landmark_point[2][0] > visibility_th and landmark_point[3][ 539 | 0] > visibility_th: 540 | cv.line(image, landmark_point[2][1], landmark_point[3][1], 541 | (0, 255, 0), 2) 542 | 543 | # 左目 544 | if landmark_point[4][0] > visibility_th and landmark_point[5][ 545 | 0] > visibility_th: 546 | cv.line(image, landmark_point[4][1], landmark_point[5][1], 547 | (0, 255, 0), 2) 548 | if landmark_point[5][0] > visibility_th and landmark_point[6][ 549 | 0] > visibility_th: 550 | cv.line(image, landmark_point[5][1], landmark_point[6][1], 551 | (0, 255, 0), 2) 552 | 553 | # 口 554 | if landmark_point[9][0] > visibility_th and landmark_point[10][ 555 | 0] > visibility_th: 556 | cv.line(image, landmark_point[9][1], landmark_point[10][1], 557 | (0, 255, 0), 2) 558 | 559 | # 肩 560 | if landmark_point[11][0] > visibility_th and landmark_point[12][ 561 | 0] > visibility_th: 562 | cv.line(image, landmark_point[11][1], landmark_point[12][1], 563 | (0, 255, 0), 2) 564 | 565 | # 右腕 566 | if landmark_point[11][0] > visibility_th and landmark_point[13][ 567 | 0] > visibility_th: 568 | cv.line(image, landmark_point[11][1], landmark_point[13][1], 569 | (0, 255, 0), 2) 570 | if landmark_point[13][0] > visibility_th and landmark_point[15][ 571 | 0] > visibility_th: 572 | cv.line(image, landmark_point[13][1], landmark_point[15][1], 573 | (0, 255, 0), 2) 574 | 575 | # 左腕 576 | if landmark_point[12][0] > visibility_th and landmark_point[14][ 577 | 0] > visibility_th: 578 | cv.line(image, landmark_point[12][1], landmark_point[14][1], 579 | (0, 255, 0), 2) 580 | if landmark_point[14][0] > visibility_th and landmark_point[16][ 581 | 0] > visibility_th: 582 | cv.line(image, landmark_point[14][1], landmark_point[16][1], 583 | (0, 255, 0), 2) 584 | 585 | # 右手 586 | if landmark_point[15][0] > visibility_th and landmark_point[17][ 587 | 0] > visibility_th: 588 | cv.line(image, landmark_point[15][1], landmark_point[17][1], 589 | (0, 255, 0), 2) 590 | if landmark_point[17][0] > visibility_th and landmark_point[19][ 591 | 0] > visibility_th: 592 | cv.line(image, landmark_point[17][1], landmark_point[19][1], 593 | (0, 255, 0), 2) 594 | if landmark_point[19][0] > visibility_th and landmark_point[21][ 595 | 0] > visibility_th: 596 | cv.line(image, landmark_point[19][1], landmark_point[21][1], 597 | (0, 255, 0), 2) 598 | if landmark_point[21][0] > visibility_th and landmark_point[15][ 599 | 0] > visibility_th: 600 | cv.line(image, landmark_point[21][1], landmark_point[15][1], 601 | (0, 255, 0), 2) 602 | 603 | # 左手 604 | if landmark_point[16][0] > visibility_th and landmark_point[18][ 605 | 0] > visibility_th: 606 | cv.line(image, landmark_point[16][1], landmark_point[18][1], 607 | (0, 255, 0), 2) 608 | if landmark_point[18][0] > visibility_th and landmark_point[20][ 609 | 0] > visibility_th: 610 | cv.line(image, landmark_point[18][1], landmark_point[20][1], 611 | (0, 255, 0), 2) 612 | if landmark_point[20][0] > visibility_th and landmark_point[22][ 613 | 0] > visibility_th: 614 | cv.line(image, landmark_point[20][1], landmark_point[22][1], 615 | (0, 255, 0), 2) 616 | if landmark_point[22][0] > visibility_th and landmark_point[16][ 617 | 0] > visibility_th: 618 | cv.line(image, landmark_point[22][1], landmark_point[16][1], 619 | (0, 255, 0), 2) 620 | 621 | # 胴体 622 | if landmark_point[11][0] > visibility_th and landmark_point[23][ 623 | 0] > visibility_th: 624 | cv.line(image, landmark_point[11][1], landmark_point[23][1], 625 | (0, 255, 0), 2) 626 | if landmark_point[12][0] > visibility_th and landmark_point[24][ 627 | 0] > visibility_th: 628 | cv.line(image, landmark_point[12][1], landmark_point[24][1], 629 | (0, 255, 0), 2) 630 | if landmark_point[23][0] > visibility_th and landmark_point[24][ 631 | 0] > visibility_th: 632 | cv.line(image, landmark_point[23][1], landmark_point[24][1], 633 | (0, 255, 0), 2) 634 | 635 | if len(landmark_point) > 25: 636 | # 右足 637 | if landmark_point[23][0] > visibility_th and landmark_point[25][ 638 | 0] > visibility_th: 639 | cv.line(image, landmark_point[23][1], landmark_point[25][1], 640 | (0, 255, 0), 2) 641 | if landmark_point[25][0] > visibility_th and landmark_point[27][ 642 | 0] > visibility_th: 643 | cv.line(image, landmark_point[25][1], landmark_point[27][1], 644 | (0, 255, 0), 2) 645 | if landmark_point[27][0] > visibility_th and landmark_point[29][ 646 | 0] > visibility_th: 647 | cv.line(image, landmark_point[27][1], landmark_point[29][1], 648 | (0, 255, 0), 2) 649 | if landmark_point[29][0] > visibility_th and landmark_point[31][ 650 | 0] > visibility_th: 651 | cv.line(image, landmark_point[29][1], landmark_point[31][1], 652 | (0, 255, 0), 2) 653 | 654 | # 左足 655 | if landmark_point[24][0] > visibility_th and landmark_point[26][ 656 | 0] > visibility_th: 657 | cv.line(image, landmark_point[24][1], landmark_point[26][1], 658 | (0, 255, 0), 2) 659 | if landmark_point[26][0] > visibility_th and landmark_point[28][ 660 | 0] > visibility_th: 661 | cv.line(image, landmark_point[26][1], landmark_point[28][1], 662 | (0, 255, 0), 2) 663 | if landmark_point[28][0] > visibility_th and landmark_point[30][ 664 | 0] > visibility_th: 665 | cv.line(image, landmark_point[28][1], landmark_point[30][1], 666 | (0, 255, 0), 2) 667 | if landmark_point[30][0] > visibility_th and landmark_point[32][ 668 | 0] > visibility_th: 669 | cv.line(image, landmark_point[30][1], landmark_point[32][1], 670 | (0, 255, 0), 2) 671 | return image 672 | 673 | 674 | def draw_bounding_rect(use_brect, image, brect): 675 | if use_brect: 676 | # 外接矩形 677 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 678 | (0, 255, 0), 2) 679 | 680 | return image 681 | 682 | 683 | if __name__ == '__main__': 684 | main() 685 | --------------------------------------------------------------------------------