├── mediapipe test.py
├── utils
├── __init__.py
└── cvfpscalc.py
├── model
├── keypoint_classifier
│ ├── keypoint_classifier_label.csv
│ ├── keypoint_classifier.hdf5
│ ├── keypoint_classifier.tflite
│ └── keypoint_classifier.py
├── point_history_classifier
│ ├── point_history_classifier_label.csv
│ ├── point_history_classifier.hdf5
│ ├── point_history_classifier.tflite
│ └── point_history_classifier.py
└── __init__.py
├── README.md
├── app.py
├── keypoint_classification.ipynb
└── keypoint_classification_EN.ipynb
/mediapipe test.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from utils.cvfpscalc import CvFpsCalc
--------------------------------------------------------------------------------
/model/keypoint_classifier/keypoint_classifier_label.csv:
--------------------------------------------------------------------------------
1 | Open
2 | Close
3 | Pointer
4 | OK
5 |
--------------------------------------------------------------------------------
/model/point_history_classifier/point_history_classifier_label.csv:
--------------------------------------------------------------------------------
1 | Stop
2 | Clockwise
3 | Counter Clockwise
4 | Move
5 |
--------------------------------------------------------------------------------
/model/__init__.py:
--------------------------------------------------------------------------------
1 | from model.keypoint_classifier.keypoint_classifier import KeyPointClassifier
2 | from model.point_history_classifier.point_history_classifier import PointHistoryClassifier
--------------------------------------------------------------------------------
/model/keypoint_classifier/keypoint_classifier.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/keypoint_classifier/keypoint_classifier.hdf5
--------------------------------------------------------------------------------
/model/keypoint_classifier/keypoint_classifier.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/keypoint_classifier/keypoint_classifier.tflite
--------------------------------------------------------------------------------
/model/point_history_classifier/point_history_classifier.hdf5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/point_history_classifier/point_history_classifier.hdf5
--------------------------------------------------------------------------------
/model/point_history_classifier/point_history_classifier.tflite:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/point_history_classifier/point_history_classifier.tflite
--------------------------------------------------------------------------------
/utils/cvfpscalc.py:
--------------------------------------------------------------------------------
1 | from collections import deque
2 | import cv2 as cv
3 |
4 |
5 | class CvFpsCalc(object):
6 | def __init__(self, buffer_len=1):
7 | self._start_tick = cv.getTickCount()
8 | self._freq = 1000.0 / cv.getTickFrequency()
9 | self._difftimes = deque(maxlen=buffer_len)
10 |
11 | def get(self):
12 | current_tick = cv.getTickCount()
13 | different_time = (current_tick - self._start_tick) * self._freq
14 | self._start_tick = current_tick
15 |
16 | self._difftimes.append(different_time)
17 |
18 | fps = 1000.0 / (sum(self._difftimes) / len(self._difftimes))
19 | fps_rounded = round(fps, 2)
20 |
21 | return fps_rounded
22 |
--------------------------------------------------------------------------------
/model/keypoint_classifier/keypoint_classifier.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 |
7 | class KeyPointClassifier(object):
8 | def __init__(
9 | self,
10 | model_path='model/keypoint_classifier/keypoint_classifier.tflite',
11 | num_threads=1,
12 | ):
13 | self.interpreter = tf.lite.Interpreter(model_path=model_path,
14 | num_threads=num_threads)
15 |
16 | self.interpreter.allocate_tensors()
17 | self.input_details = self.interpreter.get_input_details()
18 | self.output_details = self.interpreter.get_output_details()
19 |
20 | def __call__(
21 | self,
22 | landmark_list,
23 | ):
24 | input_details_tensor_index = self.input_details[0]['index']
25 | self.interpreter.set_tensor(
26 | input_details_tensor_index,
27 | np.array([landmark_list], dtype=np.float32))
28 | self.interpreter.invoke()
29 |
30 | output_details_tensor_index = self.output_details[0]['index']
31 |
32 | result = self.interpreter.get_tensor(output_details_tensor_index)
33 |
34 | result_index = np.argmax(np.squeeze(result))
35 |
36 | return result_index
37 |
--------------------------------------------------------------------------------
/model/point_history_classifier/point_history_classifier.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 |
7 | class PointHistoryClassifier(object):
8 | def __init__(
9 | self,
10 | model_path='model/point_history_classifier/point_history_classifier.tflite',
11 | score_th=0.5,
12 | invalid_value=0,
13 | num_threads=1,
14 | ):
15 | self.interpreter = tf.lite.Interpreter(model_path=model_path,
16 | num_threads=num_threads)
17 |
18 | self.interpreter.allocate_tensors()
19 | self.input_details = self.interpreter.get_input_details()
20 | self.output_details = self.interpreter.get_output_details()
21 |
22 | self.score_th = score_th
23 | self.invalid_value = invalid_value
24 |
25 | def __call__(
26 | self,
27 | point_history,
28 | ):
29 | input_details_tensor_index = self.input_details[0]['index']
30 | self.interpreter.set_tensor(
31 | input_details_tensor_index,
32 | np.array([point_history], dtype=np.float32))
33 | self.interpreter.invoke()
34 |
35 | output_details_tensor_index = self.output_details[0]['index']
36 |
37 | result = self.interpreter.get_tensor(output_details_tensor_index)
38 |
39 | result_index = np.argmax(np.squeeze(result))
40 |
41 | if np.squeeze(result)[result_index] < self.score_th:
42 | result_index = self.invalid_value
43 |
44 | return result_index
45 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # hand-gesture-recognition-using-mediapipe
2 | Estimate hand pose using MediaPipe (Python version).
This is a sample
3 | program that recognizes hand signs and finger gestures with a simple MLP using the detected key points.
4 |
❗ _️**This is English Translated version of the [original repo](https://github.com/Kazuhito00/hand-gesture-recognition-using-mediapipe). All Content is translated to english along with comments and notebooks**_ ❗
5 |
6 | 
7 |
8 | This repository contains the following contents.
9 | * Sample program
10 | * Hand sign recognition model(TFLite)
11 | * Finger gesture recognition model(TFLite)
12 | * Learning data for hand sign recognition and notebook for learning
13 | * Learning data for finger gesture recognition and notebook for learning
14 |
15 | # Requirements
16 | * mediapipe 0.8.1
17 | * OpenCV 3.4.2 or Later
18 | * Tensorflow 2.3.0 or Later
tf-nightly 2.5.0.dev or later (Only when creating a TFLite for an LSTM model)
19 | * scikit-learn 0.23.2 or Later (Only if you want to display the confusion matrix)
20 | * matplotlib 3.3.2 or Later (Only if you want to display the confusion matrix)
21 |
22 | # Demo
23 | Here's how to run the demo using your webcam.
24 | ```bash
25 | python app.py
26 | ```
27 |
28 | The following options can be specified when running the demo.
29 | * --device
Specifying the camera device number (Default:0)
30 | * --width
Width at the time of camera capture (Default:960)
31 | * --height
Height at the time of camera capture (Default:540)
32 | * --use_static_image_mode
Whether to use static_image_mode option for MediaPipe inference (Default:Unspecified)
33 | * --min_detection_confidence
34 | Detection confidence threshold (Default:0.5)
35 | * --min_tracking_confidence
36 | Tracking confidence threshold (Default:0.5)
37 |
38 | # Directory
39 |
40 | │ app.py 41 | │ keypoint_classification.ipynb 42 | │ point_history_classification.ipynb 43 | │ 44 | ├─model 45 | │ ├─keypoint_classifier 46 | │ │ │ keypoint.csv 47 | │ │ │ keypoint_classifier.hdf5 48 | │ │ │ keypoint_classifier.py 49 | │ │ │ keypoint_classifier.tflite 50 | │ │ └─ keypoint_classifier_label.csv 51 | │ │ 52 | │ └─point_history_classifier 53 | │ │ point_history.csv 54 | │ │ point_history_classifier.hdf5 55 | │ │ point_history_classifier.py 56 | │ │ point_history_classifier.tflite 57 | │ └─ point_history_classifier_label.csv 58 | │ 59 | └─utils 60 | └─cvfpscalc.py 61 |62 | ### app.py 63 | This is a sample program for inference.


104 | 
108 |
109 | #### 2.Model training
110 | Open "[keypoint_classification.ipynb](keypoint_classification.ipynb)" in Jupyter Notebook and execute from top to bottom.



129 |
130 | #### 2.Model training
131 | Open "[point_history_classification.ipynb](point_history_classification.ipynb)" in Jupyter Notebook and execute from top to bottom.
139 |
140 | # Reference
141 | * [MediaPipe](https://mediapipe.dev/)
142 |
143 | # Author
144 | Kazuhito Takahashi(https://twitter.com/KzhtTkhs)
145 |
146 | # Translation and other improvements
147 | Nikita Kiselov(https://github.com/kinivi)
148 |
149 | # License
150 | hand-gesture-recognition-using-mediapipe is under [Apache v2 license](LICENSE).
151 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import csv
4 | import copy
5 | import argparse
6 | import itertools
7 | from collections import Counter
8 | from collections import deque
9 |
10 | import cv2 as cv
11 | import numpy as np
12 | import mediapipe as mp
13 |
14 | from utils import CvFpsCalc
15 | from model import KeyPointClassifier
16 | from model import PointHistoryClassifier
17 |
18 |
19 | def get_args():
20 | parser = argparse.ArgumentParser()
21 |
22 | parser.add_argument("--device", type=int, default=0)
23 | parser.add_argument("--width", help='cap width', type=int, default=960)
24 | parser.add_argument("--height", help='cap height', type=int, default=540)
25 |
26 | parser.add_argument('--use_static_image_mode', action='store_true')
27 | parser.add_argument("--min_detection_confidence",
28 | help='min_detection_confidence',
29 | type=float,
30 | default=0.7)
31 | parser.add_argument("--min_tracking_confidence",
32 | help='min_tracking_confidence',
33 | type=int,
34 | default=0.5)
35 |
36 | args = parser.parse_args()
37 |
38 | return args
39 |
40 |
41 | def main():
42 | # Argument parsing #################################################################
43 | args = get_args()
44 |
45 | cap_device = args.device
46 | cap_width = args.width
47 | cap_height = args.height
48 |
49 | use_static_image_mode = args.use_static_image_mode
50 | min_detection_confidence = args.min_detection_confidence
51 | min_tracking_confidence = args.min_tracking_confidence
52 |
53 | use_brect = True
54 |
55 | # Camera preparation ###############################################################
56 | cap = cv.VideoCapture(cap_device)
57 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
58 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
59 |
60 | # Model load #############################################################
61 | mp_hands = mp.solutions.hands
62 | hands = mp_hands.Hands(
63 | static_image_mode=use_static_image_mode,
64 | max_num_hands=1,
65 | min_detection_confidence=min_detection_confidence,
66 | min_tracking_confidence=min_tracking_confidence,
67 | )
68 |
69 | keypoint_classifier = KeyPointClassifier()
70 |
71 | point_history_classifier = PointHistoryClassifier()
72 |
73 | # Read labels ###########################################################
74 | with open('model/keypoint_classifier/keypoint_classifier_label.csv',
75 | encoding='utf-8-sig') as f:
76 | keypoint_classifier_labels = csv.reader(f)
77 | keypoint_classifier_labels = [
78 | row[0] for row in keypoint_classifier_labels
79 | ]
80 | with open(
81 | 'model/point_history_classifier/point_history_classifier_label.csv',
82 | encoding='utf-8-sig') as f:
83 | point_history_classifier_labels = csv.reader(f)
84 | point_history_classifier_labels = [
85 | row[0] for row in point_history_classifier_labels
86 | ]
87 |
88 | # FPS Measurement ########################################################
89 | cvFpsCalc = CvFpsCalc(buffer_len=10)
90 |
91 | # Coordinate history #################################################################
92 | history_length = 16
93 | point_history = deque(maxlen=history_length)
94 |
95 | # Finger gesture history ################################################
96 | finger_gesture_history = deque(maxlen=history_length)
97 |
98 | # ########################################################################
99 | mode = 0
100 |
101 | while True:
102 | fps = cvFpsCalc.get()
103 |
104 | # Process Key (ESC: end) #################################################
105 | key = cv.waitKey(10)
106 | if key == 27: # ESC
107 | break
108 | number, mode = select_mode(key, mode)
109 |
110 | # Camera capture #####################################################
111 | ret, image = cap.read()
112 | if not ret:
113 | break
114 | image = cv.flip(image, 1) # Mirror display
115 | debug_image = copy.deepcopy(image)
116 |
117 | # Detection implementation #############################################################
118 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
119 |
120 | image.flags.writeable = False
121 | results = hands.process(image)
122 | image.flags.writeable = True
123 |
124 | # ####################################################################
125 | if results.multi_hand_landmarks is not None:
126 | for hand_landmarks, handedness in zip(results.multi_hand_landmarks,
127 | results.multi_handedness):
128 | # Bounding box calculation
129 | brect = calc_bounding_rect(debug_image, hand_landmarks)
130 | # Landmark calculation
131 | landmark_list = calc_landmark_list(debug_image, hand_landmarks)
132 |
133 | # Conversion to relative coordinates / normalized coordinates
134 | pre_processed_landmark_list = pre_process_landmark(
135 | landmark_list)
136 | pre_processed_point_history_list = pre_process_point_history(
137 | debug_image, point_history)
138 | # Write to the dataset file
139 | logging_csv(number, mode, pre_processed_landmark_list,
140 | pre_processed_point_history_list)
141 |
142 | # Hand sign classification
143 | hand_sign_id = keypoint_classifier(pre_processed_landmark_list)
144 | if hand_sign_id == 2: # Point gesture
145 | point_history.append(landmark_list[8])
146 | else:
147 | point_history.append([0, 0])
148 |
149 | # Finger gesture classification
150 | finger_gesture_id = 0
151 | point_history_len = len(pre_processed_point_history_list)
152 | if point_history_len == (history_length * 2):
153 | finger_gesture_id = point_history_classifier(
154 | pre_processed_point_history_list)
155 |
156 | # Calculates the gesture IDs in the latest detection
157 | finger_gesture_history.append(finger_gesture_id)
158 | most_common_fg_id = Counter(
159 | finger_gesture_history).most_common()
160 |
161 | # Drawing part
162 | debug_image = draw_bounding_rect(use_brect, debug_image, brect)
163 | debug_image = draw_landmarks(debug_image, landmark_list)
164 | debug_image = draw_info_text(
165 | debug_image,
166 | brect,
167 | handedness,
168 | keypoint_classifier_labels[hand_sign_id],
169 | point_history_classifier_labels[most_common_fg_id[0][0]],
170 | )
171 | else:
172 | point_history.append([0, 0])
173 |
174 | debug_image = draw_point_history(debug_image, point_history)
175 | debug_image = draw_info(debug_image, fps, mode, number)
176 |
177 | # Screen reflection #############################################################
178 | cv.imshow('Hand Gesture Recognition', debug_image)
179 |
180 | cap.release()
181 | cv.destroyAllWindows()
182 |
183 |
184 | def select_mode(key, mode):
185 | number = -1
186 | if 48 <= key <= 57: # 0 ~ 9
187 | number = key - 48
188 | if key == 110: # n
189 | mode = 0
190 | if key == 107: # k
191 | mode = 1
192 | if key == 104: # h
193 | mode = 2
194 | return number, mode
195 |
196 |
197 | def calc_bounding_rect(image, landmarks):
198 | image_width, image_height = image.shape[1], image.shape[0]
199 |
200 | landmark_array = np.empty((0, 2), int)
201 |
202 | for _, landmark in enumerate(landmarks.landmark):
203 | landmark_x = min(int(landmark.x * image_width), image_width - 1)
204 | landmark_y = min(int(landmark.y * image_height), image_height - 1)
205 |
206 | landmark_point = [np.array((landmark_x, landmark_y))]
207 |
208 | landmark_array = np.append(landmark_array, landmark_point, axis=0)
209 |
210 | x, y, w, h = cv.boundingRect(landmark_array)
211 |
212 | return [x, y, x + w, y + h]
213 |
214 |
215 | def calc_landmark_list(image, landmarks):
216 | image_width, image_height = image.shape[1], image.shape[0]
217 |
218 | landmark_point = []
219 |
220 | # Keypoint
221 | for _, landmark in enumerate(landmarks.landmark):
222 | landmark_x = min(int(landmark.x * image_width), image_width - 1)
223 | landmark_y = min(int(landmark.y * image_height), image_height - 1)
224 | # landmark_z = landmark.z
225 |
226 | landmark_point.append([landmark_x, landmark_y])
227 |
228 | return landmark_point
229 |
230 |
231 | def pre_process_landmark(landmark_list):
232 | temp_landmark_list = copy.deepcopy(landmark_list)
233 |
234 | # Convert to relative coordinates
235 | base_x, base_y = 0, 0
236 | for index, landmark_point in enumerate(temp_landmark_list):
237 | if index == 0:
238 | base_x, base_y = landmark_point[0], landmark_point[1]
239 |
240 | temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x
241 | temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y
242 |
243 | # Convert to a one-dimensional list
244 | temp_landmark_list = list(
245 | itertools.chain.from_iterable(temp_landmark_list))
246 |
247 | # Normalization
248 | max_value = max(list(map(abs, temp_landmark_list)))
249 |
250 | def normalize_(n):
251 | return n / max_value
252 |
253 | temp_landmark_list = list(map(normalize_, temp_landmark_list))
254 |
255 | return temp_landmark_list
256 |
257 |
258 | def pre_process_point_history(image, point_history):
259 | image_width, image_height = image.shape[1], image.shape[0]
260 |
261 | temp_point_history = copy.deepcopy(point_history)
262 |
263 | # Convert to relative coordinates
264 | base_x, base_y = 0, 0
265 | for index, point in enumerate(temp_point_history):
266 | if index == 0:
267 | base_x, base_y = point[0], point[1]
268 |
269 | temp_point_history[index][0] = (temp_point_history[index][0] -
270 | base_x) / image_width
271 | temp_point_history[index][1] = (temp_point_history[index][1] -
272 | base_y) / image_height
273 |
274 | # Convert to a one-dimensional list
275 | temp_point_history = list(
276 | itertools.chain.from_iterable(temp_point_history))
277 |
278 | return temp_point_history
279 |
280 |
281 | def logging_csv(number, mode, landmark_list, point_history_list):
282 | if mode == 0:
283 | pass
284 | if mode == 1 and (0 <= number <= 9):
285 | csv_path = 'model/keypoint_classifier/keypoint.csv'
286 | with open(csv_path, 'a', newline="") as f:
287 | writer = csv.writer(f)
288 | writer.writerow([number, *landmark_list])
289 | if mode == 2 and (0 <= number <= 9):
290 | csv_path = 'model/point_history_classifier/point_history.csv'
291 | with open(csv_path, 'a', newline="") as f:
292 | writer = csv.writer(f)
293 | writer.writerow([number, *point_history_list])
294 | return
295 |
296 |
297 | def draw_landmarks(image, landmark_point):
298 | if len(landmark_point) > 0:
299 | # Thumb
300 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
301 | (0, 0, 0), 6)
302 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]),
303 | (255, 255, 255), 2)
304 | cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
305 | (0, 0, 0), 6)
306 | cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]),
307 | (255, 255, 255), 2)
308 |
309 | # Index finger
310 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
311 | (0, 0, 0), 6)
312 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]),
313 | (255, 255, 255), 2)
314 | cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
315 | (0, 0, 0), 6)
316 | cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]),
317 | (255, 255, 255), 2)
318 | cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
319 | (0, 0, 0), 6)
320 | cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]),
321 | (255, 255, 255), 2)
322 |
323 | # Middle finger
324 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
325 | (0, 0, 0), 6)
326 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]),
327 | (255, 255, 255), 2)
328 | cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
329 | (0, 0, 0), 6)
330 | cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]),
331 | (255, 255, 255), 2)
332 | cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
333 | (0, 0, 0), 6)
334 | cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]),
335 | (255, 255, 255), 2)
336 |
337 | # Ring finger
338 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
339 | (0, 0, 0), 6)
340 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]),
341 | (255, 255, 255), 2)
342 | cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
343 | (0, 0, 0), 6)
344 | cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]),
345 | (255, 255, 255), 2)
346 | cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
347 | (0, 0, 0), 6)
348 | cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]),
349 | (255, 255, 255), 2)
350 |
351 | # Little finger
352 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
353 | (0, 0, 0), 6)
354 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]),
355 | (255, 255, 255), 2)
356 | cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
357 | (0, 0, 0), 6)
358 | cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]),
359 | (255, 255, 255), 2)
360 | cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
361 | (0, 0, 0), 6)
362 | cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]),
363 | (255, 255, 255), 2)
364 |
365 | # Palm
366 | cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
367 | (0, 0, 0), 6)
368 | cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]),
369 | (255, 255, 255), 2)
370 | cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
371 | (0, 0, 0), 6)
372 | cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]),
373 | (255, 255, 255), 2)
374 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
375 | (0, 0, 0), 6)
376 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]),
377 | (255, 255, 255), 2)
378 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
379 | (0, 0, 0), 6)
380 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]),
381 | (255, 255, 255), 2)
382 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
383 | (0, 0, 0), 6)
384 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]),
385 | (255, 255, 255), 2)
386 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
387 | (0, 0, 0), 6)
388 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]),
389 | (255, 255, 255), 2)
390 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
391 | (0, 0, 0), 6)
392 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]),
393 | (255, 255, 255), 2)
394 |
395 | # Key Points
396 | for index, landmark in enumerate(landmark_point):
397 | if index == 0: # 手首1
398 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
399 | -1)
400 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
401 | if index == 1: # 手首2
402 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
403 | -1)
404 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
405 | if index == 2: # 親指:付け根
406 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
407 | -1)
408 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
409 | if index == 3: # 親指:第1関節
410 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
411 | -1)
412 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
413 | if index == 4: # 親指:指先
414 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
415 | -1)
416 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
417 | if index == 5: # 人差指:付け根
418 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
419 | -1)
420 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
421 | if index == 6: # 人差指:第2関節
422 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
423 | -1)
424 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
425 | if index == 7: # 人差指:第1関節
426 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
427 | -1)
428 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
429 | if index == 8: # 人差指:指先
430 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
431 | -1)
432 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
433 | if index == 9: # 中指:付け根
434 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
435 | -1)
436 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
437 | if index == 10: # 中指:第2関節
438 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
439 | -1)
440 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
441 | if index == 11: # 中指:第1関節
442 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
443 | -1)
444 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
445 | if index == 12: # 中指:指先
446 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
447 | -1)
448 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
449 | if index == 13: # 薬指:付け根
450 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
451 | -1)
452 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
453 | if index == 14: # 薬指:第2関節
454 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
455 | -1)
456 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
457 | if index == 15: # 薬指:第1関節
458 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
459 | -1)
460 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
461 | if index == 16: # 薬指:指先
462 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
463 | -1)
464 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
465 | if index == 17: # 小指:付け根
466 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
467 | -1)
468 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
469 | if index == 18: # 小指:第2関節
470 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
471 | -1)
472 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
473 | if index == 19: # 小指:第1関節
474 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255),
475 | -1)
476 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1)
477 | if index == 20: # 小指:指先
478 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255),
479 | -1)
480 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1)
481 |
482 | return image
483 |
484 |
485 | def draw_bounding_rect(use_brect, image, brect):
486 | if use_brect:
487 | # Outer rectangle
488 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
489 | (0, 0, 0), 1)
490 |
491 | return image
492 |
493 |
494 | def draw_info_text(image, brect, handedness, hand_sign_text,
495 | finger_gesture_text):
496 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22),
497 | (0, 0, 0), -1)
498 |
499 | info_text = handedness.classification[0].label[0:]
500 | if hand_sign_text != "":
501 | info_text = info_text + ':' + hand_sign_text
502 | cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4),
503 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv.LINE_AA)
504 |
505 | if finger_gesture_text != "":
506 | cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60),
507 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 4, cv.LINE_AA)
508 | cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60),
509 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2,
510 | cv.LINE_AA)
511 |
512 | return image
513 |
514 |
515 | def draw_point_history(image, point_history):
516 | for index, point in enumerate(point_history):
517 | if point[0] != 0 and point[1] != 0:
518 | cv.circle(image, (point[0], point[1]), 1 + int(index / 2),
519 | (152, 251, 152), 2)
520 |
521 | return image
522 |
523 |
524 | def draw_info(image, fps, mode, number):
525 | cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
526 | 1.0, (0, 0, 0), 4, cv.LINE_AA)
527 | cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX,
528 | 1.0, (255, 255, 255), 2, cv.LINE_AA)
529 |
530 | mode_string = ['Logging Key Point', 'Logging Point History']
531 | if 1 <= mode <= 2:
532 | cv.putText(image, "MODE:" + mode_string[mode - 1], (10, 90),
533 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1,
534 | cv.LINE_AA)
535 | if 0 <= number <= 9:
536 | cv.putText(image, "NUM:" + str(number), (10, 110),
537 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1,
538 | cv.LINE_AA)
539 | return image
540 |
541 |
542 | if __name__ == '__main__':
543 | main()
544 |
--------------------------------------------------------------------------------
/keypoint_classification.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import csv\n",
10 | "\n",
11 | "import numpy as np\n",
12 | "import tensorflow as tf\n",
13 | "from sklearn.model_selection import train_test_split\n",
14 | "\n",
15 | "RANDOM_SEED = 42"
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "# 各パス指定"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 2,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "dataset = 'model/keypoint_classifier/keypoint.csv'\n",
32 | "model_save_path = 'model/keypoint_classifier/keypoint_classifier.hdf5'"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {},
38 | "source": [
39 | "# 分類数設定"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": 3,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "NUM_CLASSES = 3"
49 | ]
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "# 学習データ読み込み"
56 | ]
57 | },
58 | {
59 | "cell_type": "code",
60 | "execution_count": 4,
61 | "metadata": {},
62 | "outputs": [],
63 | "source": [
64 | "X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (21 * 2) + 1)))"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 5,
70 | "metadata": {},
71 | "outputs": [],
72 | "source": [
73 | "y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 6,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED)"
83 | ]
84 | },
85 | {
86 | "cell_type": "markdown",
87 | "metadata": {},
88 | "source": [
89 | "# モデル構築"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 7,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "model = tf.keras.models.Sequential([\n",
99 | " tf.keras.layers.Input((21 * 2, )),\n",
100 | " tf.keras.layers.Dropout(0.2),\n",
101 | " tf.keras.layers.Dense(20, activation='relu'),\n",
102 | " tf.keras.layers.Dropout(0.4),\n",
103 | " tf.keras.layers.Dense(10, activation='relu'),\n",
104 | " tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n",
105 | "])"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": 8,
111 | "metadata": {},
112 | "outputs": [
113 | {
114 | "name": "stdout",
115 | "output_type": "stream",
116 | "text": [
117 | "Model: \"sequential\"\n",
118 | "_________________________________________________________________\n",
119 | "Layer (type) Output Shape Param # \n",
120 | "=================================================================\n",
121 | "dropout (Dropout) (None, 42) 0 \n",
122 | "_________________________________________________________________\n",
123 | "dense (Dense) (None, 20) 860 \n",
124 | "_________________________________________________________________\n",
125 | "dropout_1 (Dropout) (None, 20) 0 \n",
126 | "_________________________________________________________________\n",
127 | "dense_1 (Dense) (None, 10) 210 \n",
128 | "_________________________________________________________________\n",
129 | "dense_2 (Dense) (None, 3) 33 \n",
130 | "=================================================================\n",
131 | "Total params: 1,103\n",
132 | "Trainable params: 1,103\n",
133 | "Non-trainable params: 0\n",
134 | "_________________________________________________________________\n"
135 | ]
136 | }
137 | ],
138 | "source": [
139 | "model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)"
140 | ]
141 | },
142 | {
143 | "cell_type": "code",
144 | "execution_count": 9,
145 | "metadata": {},
146 | "outputs": [],
147 | "source": [
148 | "# モデルチェックポイントのコールバック\n",
149 | "cp_callback = tf.keras.callbacks.ModelCheckpoint(\n",
150 | " model_save_path, verbose=1, save_weights_only=False)\n",
151 | "# 早期打ち切り用コールバック\n",
152 | "es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)"
153 | ]
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": 10,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": [
161 | "# モデルコンパイル\n",
162 | "model.compile(\n",
163 | " optimizer='adam',\n",
164 | " loss='sparse_categorical_crossentropy',\n",
165 | " metrics=['accuracy']\n",
166 | ")"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "# モデル訓練"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": 11,
179 | "metadata": {
180 | "scrolled": true
181 | },
182 | "outputs": [
183 | {
184 | "name": "stdout",
185 | "output_type": "stream",
186 | "text": [
187 | "Epoch 1/1000\n",
188 | " 1/27 [>.............................] - ETA: 0s - loss: 1.1295 - accuracy: 0.3203\n",
189 | "Epoch 00001: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
190 | "27/27 [==============================] - 0s 11ms/step - loss: 1.1004 - accuracy: 0.3602 - val_loss: 1.0431 - val_accuracy: 0.5220\n",
191 | "Epoch 2/1000\n",
192 | " 1/27 [>.............................] - ETA: 0s - loss: 1.0440 - accuracy: 0.4844\n",
193 | "Epoch 00002: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
194 | "27/27 [==============================] - 0s 3ms/step - loss: 1.0503 - accuracy: 0.4297 - val_loss: 0.9953 - val_accuracy: 0.6397\n",
195 | "Epoch 3/1000\n",
196 | " 1/27 [>.............................] - ETA: 0s - loss: 1.0043 - accuracy: 0.5312\n",
197 | "Epoch 00003: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
198 | "27/27 [==============================] - 0s 4ms/step - loss: 1.0210 - accuracy: 0.4582 - val_loss: 0.9545 - val_accuracy: 0.6523\n",
199 | "Epoch 4/1000\n",
200 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9503 - accuracy: 0.5625\n",
201 | "Epoch 00004: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
202 | "27/27 [==============================] - 0s 4ms/step - loss: 0.9906 - accuracy: 0.5022 - val_loss: 0.9168 - val_accuracy: 0.6721\n",
203 | "Epoch 5/1000\n",
204 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9562 - accuracy: 0.5469\n",
205 | "Epoch 00005: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
206 | "27/27 [==============================] - 0s 3ms/step - loss: 0.9654 - accuracy: 0.5340 - val_loss: 0.8791 - val_accuracy: 0.7017\n",
207 | "Epoch 6/1000\n",
208 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9184 - accuracy: 0.5938\n",
209 | "Epoch 00006: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
210 | "27/27 [==============================] - 0s 3ms/step - loss: 0.9256 - accuracy: 0.5577 - val_loss: 0.8344 - val_accuracy: 0.7269\n",
211 | "Epoch 7/1000\n",
212 | "27/27 [==============================] - ETA: 0s - loss: 0.9050 - accuracy: 0.5715\n",
213 | "Epoch 00007: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
214 | "27/27 [==============================] - 0s 4ms/step - loss: 0.9050 - accuracy: 0.5715 - val_loss: 0.7887 - val_accuracy: 0.7646\n",
215 | "Epoch 8/1000\n",
216 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9135 - accuracy: 0.5547\n",
217 | "Epoch 00008: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
218 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8642 - accuracy: 0.5993 - val_loss: 0.7414 - val_accuracy: 0.7996\n",
219 | "Epoch 9/1000\n",
220 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8002 - accuracy: 0.6172\n",
221 | "Epoch 00009: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
222 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8258 - accuracy: 0.6263 - val_loss: 0.6881 - val_accuracy: 0.8149\n",
223 | "Epoch 10/1000\n",
224 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8056 - accuracy: 0.6328\n",
225 | "Epoch 00010: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
226 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8008 - accuracy: 0.6341 - val_loss: 0.6461 - val_accuracy: 0.8239\n",
227 | "Epoch 11/1000\n",
228 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7746 - accuracy: 0.6719\n",
229 | "Epoch 00011: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
230 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7771 - accuracy: 0.6491 - val_loss: 0.6143 - val_accuracy: 0.8266\n",
231 | "Epoch 12/1000\n",
232 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7242 - accuracy: 0.7109\n",
233 | "Epoch 00012: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
234 | "27/27 [==============================] - 0s 4ms/step - loss: 0.7490 - accuracy: 0.6650 - val_loss: 0.5740 - val_accuracy: 0.8320\n",
235 | "Epoch 13/1000\n",
236 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8363 - accuracy: 0.6328\n",
237 | "Epoch 00013: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
238 | "27/27 [==============================] - 0s 4ms/step - loss: 0.7397 - accuracy: 0.6731 - val_loss: 0.5465 - val_accuracy: 0.8446\n",
239 | "Epoch 14/1000\n",
240 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7634 - accuracy: 0.6172\n",
241 | "Epoch 00014: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
242 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7190 - accuracy: 0.6883 - val_loss: 0.5202 - val_accuracy: 0.8589\n",
243 | "Epoch 15/1000\n",
244 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6699 - accuracy: 0.6875\n",
245 | "Epoch 00015: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
246 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7077 - accuracy: 0.6973 - val_loss: 0.4944 - val_accuracy: 0.8652\n",
247 | "Epoch 16/1000\n",
248 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6447 - accuracy: 0.7500\n",
249 | "Epoch 00016: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
250 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6903 - accuracy: 0.6928 - val_loss: 0.4781 - val_accuracy: 0.8805\n",
251 | "Epoch 17/1000\n",
252 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7165 - accuracy: 0.6875\n",
253 | "Epoch 00017: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
254 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6919 - accuracy: 0.6973 - val_loss: 0.4696 - val_accuracy: 0.8895\n",
255 | "Epoch 18/1000\n",
256 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6268 - accuracy: 0.7422\n",
257 | "Epoch 00018: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
258 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6498 - accuracy: 0.7303 - val_loss: 0.4440 - val_accuracy: 0.8967\n",
259 | "Epoch 19/1000\n",
260 | "27/27 [==============================] - ETA: 0s - loss: 0.6499 - accuracy: 0.7261\n",
261 | "Epoch 00019: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
262 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6499 - accuracy: 0.7261 - val_loss: 0.4254 - val_accuracy: 0.9039\n",
263 | "Epoch 20/1000\n",
264 | "26/27 [===========================>..] - ETA: 0s - loss: 0.6386 - accuracy: 0.7236\n",
265 | "Epoch 00020: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
266 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6415 - accuracy: 0.7228 - val_loss: 0.4082 - val_accuracy: 0.9093\n",
267 | "Epoch 21/1000\n",
268 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5631 - accuracy: 0.7500\n",
269 | "Epoch 00021: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
270 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6484 - accuracy: 0.7180 - val_loss: 0.4114 - val_accuracy: 0.9173\n",
271 | "Epoch 22/1000\n",
272 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5775 - accuracy: 0.7812\n",
273 | "Epoch 00022: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
274 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6154 - accuracy: 0.7480 - val_loss: 0.3907 - val_accuracy: 0.9218\n",
275 | "Epoch 23/1000\n",
276 | "25/27 [==========================>...] - ETA: 0s - loss: 0.5967 - accuracy: 0.7588\n",
277 | "Epoch 00023: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
278 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5971 - accuracy: 0.7582 - val_loss: 0.3763 - val_accuracy: 0.9227\n",
279 | "Epoch 24/1000\n",
280 | "26/27 [===========================>..] - ETA: 0s - loss: 0.6064 - accuracy: 0.7569\n",
281 | "Epoch 00024: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
282 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6066 - accuracy: 0.7567 - val_loss: 0.3714 - val_accuracy: 0.9254\n",
283 | "Epoch 25/1000\n",
284 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6211 - accuracy: 0.7422\n",
285 | "Epoch 00025: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
286 | "27/27 [==============================] - 0s 7ms/step - loss: 0.5954 - accuracy: 0.7579 - val_loss: 0.3611 - val_accuracy: 0.9353\n",
287 | "Epoch 26/1000\n",
288 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5621 - accuracy: 0.7812\n",
289 | "Epoch 00026: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
290 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5818 - accuracy: 0.7737 - val_loss: 0.3498 - val_accuracy: 0.9380\n",
291 | "Epoch 27/1000\n",
292 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6431 - accuracy: 0.7500\n",
293 | "Epoch 00027: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
294 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5882 - accuracy: 0.7648 - val_loss: 0.3355 - val_accuracy: 0.9416\n"
295 | ]
296 | },
297 | {
298 | "name": "stdout",
299 | "output_type": "stream",
300 | "text": [
301 | "Epoch 28/1000\n",
302 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5633 - accuracy: 0.8203\n",
303 | "Epoch 00028: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
304 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5746 - accuracy: 0.7702 - val_loss: 0.3273 - val_accuracy: 0.9425\n",
305 | "Epoch 29/1000\n",
306 | "27/27 [==============================] - ETA: 0s - loss: 0.5856 - accuracy: 0.7651\n",
307 | "Epoch 00029: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
308 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5856 - accuracy: 0.7651 - val_loss: 0.3237 - val_accuracy: 0.9434\n",
309 | "Epoch 30/1000\n",
310 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5524 - accuracy: 0.7812\n",
311 | "Epoch 00030: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
312 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5576 - accuracy: 0.7788 - val_loss: 0.3203 - val_accuracy: 0.9452\n",
313 | "Epoch 31/1000\n",
314 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5643 - accuracy: 0.7578\n",
315 | "Epoch 00031: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
316 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5450 - accuracy: 0.7773 - val_loss: 0.3111 - val_accuracy: 0.9443\n",
317 | "Epoch 32/1000\n",
318 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5507 - accuracy: 0.7812\n",
319 | "Epoch 00032: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
320 | "27/27 [==============================] - 0s 6ms/step - loss: 0.5574 - accuracy: 0.7860 - val_loss: 0.3017 - val_accuracy: 0.9434\n",
321 | "Epoch 33/1000\n",
322 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5302 - accuracy: 0.8125\n",
323 | "Epoch 00033: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
324 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5444 - accuracy: 0.7905 - val_loss: 0.2917 - val_accuracy: 0.9479\n",
325 | "Epoch 34/1000\n",
326 | "27/27 [==============================] - ETA: 0s - loss: 0.5421 - accuracy: 0.7848\n",
327 | "Epoch 00034: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
328 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5421 - accuracy: 0.7848 - val_loss: 0.2863 - val_accuracy: 0.9470\n",
329 | "Epoch 35/1000\n",
330 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4633 - accuracy: 0.8125\n",
331 | "Epoch 00035: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
332 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5331 - accuracy: 0.7980 - val_loss: 0.2804 - val_accuracy: 0.9506\n",
333 | "Epoch 36/1000\n",
334 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5972 - accuracy: 0.7812\n",
335 | "Epoch 00036: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
336 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5255 - accuracy: 0.7968 - val_loss: 0.2774 - val_accuracy: 0.9479\n",
337 | "Epoch 37/1000\n",
338 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5851 - accuracy: 0.7578\n",
339 | "Epoch 00037: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
340 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5284 - accuracy: 0.7947 - val_loss: 0.2681 - val_accuracy: 0.9497\n",
341 | "Epoch 38/1000\n",
342 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4775 - accuracy: 0.7812\n",
343 | "Epoch 00038: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
344 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5143 - accuracy: 0.7995 - val_loss: 0.2678 - val_accuracy: 0.9479\n",
345 | "Epoch 39/1000\n",
346 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5495 - accuracy: 0.7812\n",
347 | "Epoch 00039: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
348 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5250 - accuracy: 0.7935 - val_loss: 0.2657 - val_accuracy: 0.9470\n",
349 | "Epoch 40/1000\n",
350 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5244 - accuracy: 0.8203\n",
351 | "Epoch 00040: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
352 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5195 - accuracy: 0.8001 - val_loss: 0.2606 - val_accuracy: 0.9524\n",
353 | "Epoch 41/1000\n",
354 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6996 - accuracy: 0.6953\n",
355 | "Epoch 00041: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
356 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5304 - accuracy: 0.7956 - val_loss: 0.2572 - val_accuracy: 0.9515\n",
357 | "Epoch 42/1000\n",
358 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4843 - accuracy: 0.8281\n",
359 | "Epoch 00042: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
360 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5115 - accuracy: 0.8091 - val_loss: 0.2513 - val_accuracy: 0.9524\n",
361 | "Epoch 43/1000\n",
362 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3830 - accuracy: 0.8594\n",
363 | "Epoch 00043: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
364 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4939 - accuracy: 0.8133 - val_loss: 0.2423 - val_accuracy: 0.9551\n",
365 | "Epoch 44/1000\n",
366 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4531 - accuracy: 0.7969\n",
367 | "Epoch 00044: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
368 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4969 - accuracy: 0.8094 - val_loss: 0.2437 - val_accuracy: 0.9497\n",
369 | "Epoch 45/1000\n",
370 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5198 - accuracy: 0.7812\n",
371 | "Epoch 00045: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
372 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4961 - accuracy: 0.8106 - val_loss: 0.2441 - val_accuracy: 0.9533\n",
373 | "Epoch 46/1000\n",
374 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5912 - accuracy: 0.7812\n",
375 | "Epoch 00046: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
376 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4816 - accuracy: 0.8271 - val_loss: 0.2482 - val_accuracy: 0.9542\n",
377 | "Epoch 47/1000\n",
378 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5292 - accuracy: 0.8047\n",
379 | "Epoch 00047: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
380 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4789 - accuracy: 0.8127 - val_loss: 0.2313 - val_accuracy: 0.9569\n",
381 | "Epoch 48/1000\n",
382 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4428 - accuracy: 0.8125\n",
383 | "Epoch 00048: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
384 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4901 - accuracy: 0.8079 - val_loss: 0.2319 - val_accuracy: 0.9560\n",
385 | "Epoch 49/1000\n",
386 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4565 - accuracy: 0.8281\n",
387 | "Epoch 00049: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
388 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4893 - accuracy: 0.8154 - val_loss: 0.2300 - val_accuracy: 0.9533\n",
389 | "Epoch 50/1000\n",
390 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5185 - accuracy: 0.7812\n",
391 | "Epoch 00050: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
392 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5079 - accuracy: 0.8112 - val_loss: 0.2386 - val_accuracy: 0.9524\n",
393 | "Epoch 51/1000\n",
394 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4280 - accuracy: 0.8203\n",
395 | "Epoch 00051: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
396 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4692 - accuracy: 0.8205 - val_loss: 0.2332 - val_accuracy: 0.9578\n",
397 | "Epoch 52/1000\n",
398 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5299 - accuracy: 0.8203\n",
399 | "Epoch 00052: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
400 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4907 - accuracy: 0.8145 - val_loss: 0.2336 - val_accuracy: 0.9569\n",
401 | "Epoch 53/1000\n",
402 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5856 - accuracy: 0.7969\n",
403 | "Epoch 00053: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
404 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4882 - accuracy: 0.8181 - val_loss: 0.2306 - val_accuracy: 0.9605\n",
405 | "Epoch 54/1000\n",
406 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4466 - accuracy: 0.8047\n",
407 | "Epoch 00054: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
408 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4641 - accuracy: 0.8235 - val_loss: 0.2218 - val_accuracy: 0.9596\n"
409 | ]
410 | },
411 | {
412 | "name": "stdout",
413 | "output_type": "stream",
414 | "text": [
415 | "Epoch 55/1000\n",
416 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3760 - accuracy: 0.8672\n",
417 | "Epoch 00055: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
418 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4630 - accuracy: 0.8241 - val_loss: 0.2242 - val_accuracy: 0.9578\n",
419 | "Epoch 56/1000\n",
420 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4607 - accuracy: 0.7734\n",
421 | "Epoch 00056: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
422 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4546 - accuracy: 0.8277 - val_loss: 0.2168 - val_accuracy: 0.9605\n",
423 | "Epoch 57/1000\n",
424 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4946 - accuracy: 0.7969\n",
425 | "Epoch 00057: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
426 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4675 - accuracy: 0.8214 - val_loss: 0.2211 - val_accuracy: 0.9578\n",
427 | "Epoch 58/1000\n",
428 | "25/27 [==========================>...] - ETA: 0s - loss: 0.4393 - accuracy: 0.8334\n",
429 | "Epoch 00058: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
430 | "27/27 [==============================] - 0s 4ms/step - loss: 0.4418 - accuracy: 0.8325 - val_loss: 0.2115 - val_accuracy: 0.9632\n",
431 | "Epoch 59/1000\n",
432 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4987 - accuracy: 0.7969\n",
433 | "Epoch 00059: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
434 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4646 - accuracy: 0.8217 - val_loss: 0.2116 - val_accuracy: 0.9596\n",
435 | "Epoch 60/1000\n",
436 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4904 - accuracy: 0.7812\n",
437 | "Epoch 00060: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
438 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4576 - accuracy: 0.8232 - val_loss: 0.2108 - val_accuracy: 0.9569\n",
439 | "Epoch 61/1000\n",
440 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5147 - accuracy: 0.8281\n",
441 | "Epoch 00061: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
442 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4646 - accuracy: 0.8253 - val_loss: 0.2174 - val_accuracy: 0.9587\n",
443 | "Epoch 62/1000\n",
444 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3386 - accuracy: 0.8750\n",
445 | "Epoch 00062: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
446 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4433 - accuracy: 0.8310 - val_loss: 0.2145 - val_accuracy: 0.9560\n",
447 | "Epoch 63/1000\n",
448 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4222 - accuracy: 0.8594\n",
449 | "Epoch 00063: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
450 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4624 - accuracy: 0.8283 - val_loss: 0.2099 - val_accuracy: 0.9569\n",
451 | "Epoch 64/1000\n",
452 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4376 - accuracy: 0.8203\n",
453 | "Epoch 00064: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
454 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4601 - accuracy: 0.8235 - val_loss: 0.2075 - val_accuracy: 0.9641\n",
455 | "Epoch 65/1000\n",
456 | "27/27 [==============================] - ETA: 0s - loss: 0.4676 - accuracy: 0.8265\n",
457 | "Epoch 00065: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
458 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4676 - accuracy: 0.8265 - val_loss: 0.2172 - val_accuracy: 0.9551\n",
459 | "Epoch 66/1000\n",
460 | "26/27 [===========================>..] - ETA: 0s - loss: 0.4434 - accuracy: 0.8368\n",
461 | "Epoch 00066: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
462 | "27/27 [==============================] - 1s 20ms/step - loss: 0.4429 - accuracy: 0.8370 - val_loss: 0.2154 - val_accuracy: 0.9578\n",
463 | "Epoch 67/1000\n",
464 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4827 - accuracy: 0.8125\n",
465 | "Epoch 00067: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
466 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4434 - accuracy: 0.8358 - val_loss: 0.2090 - val_accuracy: 0.9587\n",
467 | "Epoch 68/1000\n",
468 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5810 - accuracy: 0.7656\n",
469 | "Epoch 00068: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
470 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4892 - accuracy: 0.8049 - val_loss: 0.2160 - val_accuracy: 0.9578\n",
471 | "Epoch 69/1000\n",
472 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4375 - accuracy: 0.7812\n",
473 | "Epoch 00069: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
474 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4597 - accuracy: 0.8250 - val_loss: 0.2100 - val_accuracy: 0.9605\n",
475 | "Epoch 70/1000\n",
476 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3782 - accuracy: 0.8359\n",
477 | "Epoch 00070: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
478 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4408 - accuracy: 0.8349 - val_loss: 0.2087 - val_accuracy: 0.9596\n",
479 | "Epoch 71/1000\n",
480 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4578 - accuracy: 0.8438\n",
481 | "Epoch 00071: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
482 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4425 - accuracy: 0.8355 - val_loss: 0.2075 - val_accuracy: 0.9587\n",
483 | "Epoch 72/1000\n",
484 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4267 - accuracy: 0.8438\n",
485 | "Epoch 00072: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
486 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4581 - accuracy: 0.8292 - val_loss: 0.2059 - val_accuracy: 0.9623\n",
487 | "Epoch 73/1000\n",
488 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4450 - accuracy: 0.8750\n",
489 | "Epoch 00073: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
490 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4508 - accuracy: 0.8403 - val_loss: 0.2083 - val_accuracy: 0.9614\n",
491 | "Epoch 74/1000\n",
492 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3858 - accuracy: 0.8906\n",
493 | "Epoch 00074: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
494 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4472 - accuracy: 0.8361 - val_loss: 0.2043 - val_accuracy: 0.9650\n",
495 | "Epoch 75/1000\n",
496 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4439 - accuracy: 0.8359\n",
497 | "Epoch 00075: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
498 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4515 - accuracy: 0.8325 - val_loss: 0.2138 - val_accuracy: 0.9632\n",
499 | "Epoch 76/1000\n",
500 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3983 - accuracy: 0.8203\n",
501 | "Epoch 00076: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
502 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4462 - accuracy: 0.8334 - val_loss: 0.2065 - val_accuracy: 0.9623\n",
503 | "Epoch 77/1000\n",
504 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5020 - accuracy: 0.8047\n",
505 | "Epoch 00077: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
506 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4325 - accuracy: 0.8388 - val_loss: 0.2061 - val_accuracy: 0.9605\n",
507 | "Epoch 78/1000\n",
508 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3215 - accuracy: 0.8672\n",
509 | "Epoch 00078: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
510 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4394 - accuracy: 0.8391 - val_loss: 0.2054 - val_accuracy: 0.9578\n",
511 | "Epoch 79/1000\n",
512 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4025 - accuracy: 0.8359\n",
513 | "Epoch 00079: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
514 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4370 - accuracy: 0.8310 - val_loss: 0.2031 - val_accuracy: 0.9605\n",
515 | "Epoch 80/1000\n",
516 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4505 - accuracy: 0.8125\n",
517 | "Epoch 00080: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
518 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4419 - accuracy: 0.8340 - val_loss: 0.2010 - val_accuracy: 0.9596\n",
519 | "Epoch 81/1000\n",
520 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5287 - accuracy: 0.7891\n",
521 | "Epoch 00081: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
522 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4369 - accuracy: 0.8304 - val_loss: 0.2081 - val_accuracy: 0.9578\n"
523 | ]
524 | },
525 | {
526 | "name": "stdout",
527 | "output_type": "stream",
528 | "text": [
529 | "Epoch 82/1000\n",
530 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5132 - accuracy: 0.8047\n",
531 | "Epoch 00082: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
532 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4360 - accuracy: 0.8460 - val_loss: 0.2045 - val_accuracy: 0.9605\n",
533 | "Epoch 83/1000\n",
534 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4239 - accuracy: 0.8125\n",
535 | "Epoch 00083: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
536 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4511 - accuracy: 0.8313 - val_loss: 0.1984 - val_accuracy: 0.9605\n",
537 | "Epoch 84/1000\n",
538 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4853 - accuracy: 0.8203\n",
539 | "Epoch 00084: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
540 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4357 - accuracy: 0.8304 - val_loss: 0.2024 - val_accuracy: 0.9623\n",
541 | "Epoch 85/1000\n",
542 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4782 - accuracy: 0.8125\n",
543 | "Epoch 00085: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
544 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4320 - accuracy: 0.8424 - val_loss: 0.2015 - val_accuracy: 0.9587\n",
545 | "Epoch 86/1000\n",
546 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3985 - accuracy: 0.8828\n",
547 | "Epoch 00086: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
548 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4422 - accuracy: 0.8349 - val_loss: 0.2087 - val_accuracy: 0.9587\n",
549 | "Epoch 87/1000\n",
550 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4810 - accuracy: 0.8359\n",
551 | "Epoch 00087: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
552 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4393 - accuracy: 0.8316 - val_loss: 0.2105 - val_accuracy: 0.9605\n",
553 | "Epoch 88/1000\n",
554 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4819 - accuracy: 0.8125\n",
555 | "Epoch 00088: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
556 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4381 - accuracy: 0.8400 - val_loss: 0.2070 - val_accuracy: 0.9623\n",
557 | "Epoch 89/1000\n",
558 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5002 - accuracy: 0.8281\n",
559 | "Epoch 00089: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
560 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4428 - accuracy: 0.8343 - val_loss: 0.2044 - val_accuracy: 0.9605\n",
561 | "Epoch 90/1000\n",
562 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3314 - accuracy: 0.9062\n",
563 | "Epoch 00090: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
564 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4156 - accuracy: 0.8406 - val_loss: 0.2026 - val_accuracy: 0.9578\n",
565 | "Epoch 91/1000\n",
566 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3850 - accuracy: 0.8594\n",
567 | "Epoch 00091: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
568 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4125 - accuracy: 0.8439 - val_loss: 0.2058 - val_accuracy: 0.9551\n",
569 | "Epoch 92/1000\n",
570 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4976 - accuracy: 0.7734\n",
571 | "Epoch 00092: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
572 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4217 - accuracy: 0.8415 - val_loss: 0.1999 - val_accuracy: 0.9623\n",
573 | "Epoch 93/1000\n",
574 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4155 - accuracy: 0.8516\n",
575 | "Epoch 00093: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
576 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4166 - accuracy: 0.8412 - val_loss: 0.1947 - val_accuracy: 0.9614\n",
577 | "Epoch 94/1000\n",
578 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3642 - accuracy: 0.8750\n",
579 | "Epoch 00094: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
580 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4204 - accuracy: 0.8418 - val_loss: 0.2008 - val_accuracy: 0.9569\n",
581 | "Epoch 95/1000\n",
582 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3773 - accuracy: 0.8594\n",
583 | "Epoch 00095: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
584 | "27/27 [==============================] - 0s 4ms/step - loss: 0.4171 - accuracy: 0.8421 - val_loss: 0.1945 - val_accuracy: 0.9596\n",
585 | "Epoch 96/1000\n",
586 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4086 - accuracy: 0.8672\n",
587 | "Epoch 00096: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
588 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4124 - accuracy: 0.8520 - val_loss: 0.1930 - val_accuracy: 0.9614\n",
589 | "Epoch 97/1000\n",
590 | " 1/27 [>.............................] - ETA: 0s - loss: 0.2914 - accuracy: 0.8906\n",
591 | "Epoch 00097: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
592 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4303 - accuracy: 0.8367 - val_loss: 0.1958 - val_accuracy: 0.9569\n",
593 | "Epoch 98/1000\n",
594 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4562 - accuracy: 0.8672\n",
595 | "Epoch 00098: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
596 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4191 - accuracy: 0.8400 - val_loss: 0.1950 - val_accuracy: 0.9596\n",
597 | "Epoch 99/1000\n",
598 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3967 - accuracy: 0.8438\n",
599 | "Epoch 00099: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
600 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4260 - accuracy: 0.8418 - val_loss: 0.2044 - val_accuracy: 0.9551\n",
601 | "Epoch 100/1000\n",
602 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4173 - accuracy: 0.8516\n",
603 | "Epoch 00100: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
604 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4200 - accuracy: 0.8442 - val_loss: 0.2066 - val_accuracy: 0.9560\n",
605 | "Epoch 101/1000\n",
606 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3892 - accuracy: 0.8438\n",
607 | "Epoch 00101: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
608 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4245 - accuracy: 0.8418 - val_loss: 0.2058 - val_accuracy: 0.9578\n",
609 | "Epoch 102/1000\n",
610 | " 1/27 [>.............................] - ETA: 0s - loss: 0.2965 - accuracy: 0.8984\n",
611 | "Epoch 00102: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
612 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4100 - accuracy: 0.8445 - val_loss: 0.2093 - val_accuracy: 0.9578\n",
613 | "Epoch 103/1000\n",
614 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4241 - accuracy: 0.8125\n",
615 | "Epoch 00103: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
616 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4189 - accuracy: 0.8403 - val_loss: 0.1928 - val_accuracy: 0.9659\n",
617 | "Epoch 104/1000\n",
618 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6486 - accuracy: 0.7891\n",
619 | "Epoch 00104: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
620 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4223 - accuracy: 0.8424 - val_loss: 0.1964 - val_accuracy: 0.9596\n",
621 | "Epoch 105/1000\n",
622 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4589 - accuracy: 0.8281\n",
623 | "Epoch 00105: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
624 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4199 - accuracy: 0.8418 - val_loss: 0.1971 - val_accuracy: 0.9623\n",
625 | "Epoch 106/1000\n",
626 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4395 - accuracy: 0.8203\n",
627 | "Epoch 00106: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
628 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4028 - accuracy: 0.8511 - val_loss: 0.1912 - val_accuracy: 0.9641\n",
629 | "Epoch 107/1000\n",
630 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4061 - accuracy: 0.8594\n",
631 | "Epoch 00107: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
632 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4093 - accuracy: 0.8556 - val_loss: 0.1854 - val_accuracy: 0.9668\n",
633 | "Epoch 108/1000\n",
634 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4142 - accuracy: 0.8438\n",
635 | "Epoch 00108: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
636 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4249 - accuracy: 0.8367 - val_loss: 0.2008 - val_accuracy: 0.9614\n"
637 | ]
638 | },
639 | {
640 | "name": "stdout",
641 | "output_type": "stream",
642 | "text": [
643 | "Epoch 109/1000\n",
644 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5373 - accuracy: 0.7969\n",
645 | "Epoch 00109: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
646 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4198 - accuracy: 0.8379 - val_loss: 0.1955 - val_accuracy: 0.9659\n",
647 | "Epoch 110/1000\n",
648 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3872 - accuracy: 0.8281\n",
649 | "Epoch 00110: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
650 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4077 - accuracy: 0.8505 - val_loss: 0.2020 - val_accuracy: 0.9614\n",
651 | "Epoch 111/1000\n",
652 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3474 - accuracy: 0.8828\n",
653 | "Epoch 00111: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
654 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4132 - accuracy: 0.8433 - val_loss: 0.1984 - val_accuracy: 0.9632\n",
655 | "Epoch 112/1000\n",
656 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4205 - accuracy: 0.8672\n",
657 | "Epoch 00112: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
658 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4182 - accuracy: 0.8397 - val_loss: 0.1973 - val_accuracy: 0.9614\n",
659 | "Epoch 113/1000\n",
660 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4995 - accuracy: 0.8125\n",
661 | "Epoch 00113: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
662 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4042 - accuracy: 0.8478 - val_loss: 0.1922 - val_accuracy: 0.9650\n",
663 | "Epoch 114/1000\n",
664 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3640 - accuracy: 0.8750\n",
665 | "Epoch 00114: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
666 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4072 - accuracy: 0.8430 - val_loss: 0.1868 - val_accuracy: 0.9596\n",
667 | "Epoch 115/1000\n",
668 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5042 - accuracy: 0.8203\n",
669 | "Epoch 00115: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
670 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3999 - accuracy: 0.8535 - val_loss: 0.1966 - val_accuracy: 0.9605\n",
671 | "Epoch 116/1000\n",
672 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5355 - accuracy: 0.7422\n",
673 | "Epoch 00116: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
674 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4174 - accuracy: 0.8433 - val_loss: 0.1919 - val_accuracy: 0.9659\n",
675 | "Epoch 117/1000\n",
676 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3729 - accuracy: 0.8750\n",
677 | "Epoch 00117: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
678 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4101 - accuracy: 0.8451 - val_loss: 0.1932 - val_accuracy: 0.9578\n",
679 | "Epoch 118/1000\n",
680 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3193 - accuracy: 0.8828\n",
681 | "Epoch 00118: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
682 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4260 - accuracy: 0.8379 - val_loss: 0.1865 - val_accuracy: 0.9641\n",
683 | "Epoch 119/1000\n",
684 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3852 - accuracy: 0.8438\n",
685 | "Epoch 00119: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
686 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3797 - accuracy: 0.8622 - val_loss: 0.1900 - val_accuracy: 0.9677\n",
687 | "Epoch 120/1000\n",
688 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3636 - accuracy: 0.8594\n",
689 | "Epoch 00120: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
690 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4017 - accuracy: 0.8460 - val_loss: 0.1908 - val_accuracy: 0.9659\n",
691 | "Epoch 121/1000\n",
692 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4521 - accuracy: 0.8359\n",
693 | "Epoch 00121: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
694 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4023 - accuracy: 0.8538 - val_loss: 0.1935 - val_accuracy: 0.9659\n",
695 | "Epoch 122/1000\n",
696 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4849 - accuracy: 0.8203\n",
697 | "Epoch 00122: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
698 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4186 - accuracy: 0.8457 - val_loss: 0.1937 - val_accuracy: 0.9659\n",
699 | "Epoch 123/1000\n",
700 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4021 - accuracy: 0.8516\n",
701 | "Epoch 00123: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
702 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4156 - accuracy: 0.8478 - val_loss: 0.1907 - val_accuracy: 0.9632\n",
703 | "Epoch 124/1000\n",
704 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3601 - accuracy: 0.8906\n",
705 | "Epoch 00124: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
706 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3948 - accuracy: 0.8550 - val_loss: 0.1862 - val_accuracy: 0.9605\n",
707 | "Epoch 125/1000\n",
708 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4446 - accuracy: 0.7891\n",
709 | "Epoch 00125: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
710 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4152 - accuracy: 0.8520 - val_loss: 0.1888 - val_accuracy: 0.9623\n",
711 | "Epoch 126/1000\n",
712 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3733 - accuracy: 0.8438\n",
713 | "Epoch 00126: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
714 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3913 - accuracy: 0.8550 - val_loss: 0.1937 - val_accuracy: 0.9632\n",
715 | "Epoch 127/1000\n",
716 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3000 - accuracy: 0.8828\n",
717 | "Epoch 00127: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n",
718 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3820 - accuracy: 0.8583 - val_loss: 0.1867 - val_accuracy: 0.9632\n",
719 | "Epoch 00127: early stopping\n"
720 | ]
721 | },
722 | {
723 | "data": {
724 | "text/plain": [
725 | "