├── mediapipe test.py ├── utils ├── __init__.py └── cvfpscalc.py ├── model ├── keypoint_classifier │ ├── keypoint_classifier_label.csv │ ├── keypoint_classifier.hdf5 │ ├── keypoint_classifier.tflite │ └── keypoint_classifier.py ├── point_history_classifier │ ├── point_history_classifier_label.csv │ ├── point_history_classifier.hdf5 │ ├── point_history_classifier.tflite │ └── point_history_classifier.py └── __init__.py ├── README.md ├── app.py ├── keypoint_classification.ipynb └── keypoint_classification_EN.ipynb /mediapipe test.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | from utils.cvfpscalc import CvFpsCalc -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier_label.csv: -------------------------------------------------------------------------------- 1 | Open 2 | Close 3 | Pointer 4 | OK 5 | -------------------------------------------------------------------------------- /model/point_history_classifier/point_history_classifier_label.csv: -------------------------------------------------------------------------------- 1 | Stop 2 | Clockwise 3 | Counter Clockwise 4 | Move 5 | -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | from model.keypoint_classifier.keypoint_classifier import KeyPointClassifier 2 | from model.point_history_classifier.point_history_classifier import PointHistoryClassifier -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/keypoint_classifier/keypoint_classifier.hdf5 -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/keypoint_classifier/keypoint_classifier.tflite -------------------------------------------------------------------------------- /model/point_history_classifier/point_history_classifier.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/point_history_classifier/point_history_classifier.hdf5 -------------------------------------------------------------------------------- /model/point_history_classifier/point_history_classifier.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SuryaMoorthyu/handgesture-detection-for-visually-impaired-people/HEAD/model/point_history_classifier/point_history_classifier.tflite -------------------------------------------------------------------------------- /utils/cvfpscalc.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import cv2 as cv 3 | 4 | 5 | class CvFpsCalc(object): 6 | def __init__(self, buffer_len=1): 7 | self._start_tick = cv.getTickCount() 8 | self._freq = 1000.0 / cv.getTickFrequency() 9 | self._difftimes = deque(maxlen=buffer_len) 10 | 11 | def get(self): 12 | current_tick = cv.getTickCount() 13 | different_time = (current_tick - self._start_tick) * self._freq 14 | self._start_tick = current_tick 15 | 16 | self._difftimes.append(different_time) 17 | 18 | fps = 1000.0 / (sum(self._difftimes) / len(self._difftimes)) 19 | fps_rounded = round(fps, 2) 20 | 21 | return fps_rounded 22 | -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | 7 | class KeyPointClassifier(object): 8 | def __init__( 9 | self, 10 | model_path='model/keypoint_classifier/keypoint_classifier.tflite', 11 | num_threads=1, 12 | ): 13 | self.interpreter = tf.lite.Interpreter(model_path=model_path, 14 | num_threads=num_threads) 15 | 16 | self.interpreter.allocate_tensors() 17 | self.input_details = self.interpreter.get_input_details() 18 | self.output_details = self.interpreter.get_output_details() 19 | 20 | def __call__( 21 | self, 22 | landmark_list, 23 | ): 24 | input_details_tensor_index = self.input_details[0]['index'] 25 | self.interpreter.set_tensor( 26 | input_details_tensor_index, 27 | np.array([landmark_list], dtype=np.float32)) 28 | self.interpreter.invoke() 29 | 30 | output_details_tensor_index = self.output_details[0]['index'] 31 | 32 | result = self.interpreter.get_tensor(output_details_tensor_index) 33 | 34 | result_index = np.argmax(np.squeeze(result)) 35 | 36 | return result_index 37 | -------------------------------------------------------------------------------- /model/point_history_classifier/point_history_classifier.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | 7 | class PointHistoryClassifier(object): 8 | def __init__( 9 | self, 10 | model_path='model/point_history_classifier/point_history_classifier.tflite', 11 | score_th=0.5, 12 | invalid_value=0, 13 | num_threads=1, 14 | ): 15 | self.interpreter = tf.lite.Interpreter(model_path=model_path, 16 | num_threads=num_threads) 17 | 18 | self.interpreter.allocate_tensors() 19 | self.input_details = self.interpreter.get_input_details() 20 | self.output_details = self.interpreter.get_output_details() 21 | 22 | self.score_th = score_th 23 | self.invalid_value = invalid_value 24 | 25 | def __call__( 26 | self, 27 | point_history, 28 | ): 29 | input_details_tensor_index = self.input_details[0]['index'] 30 | self.interpreter.set_tensor( 31 | input_details_tensor_index, 32 | np.array([point_history], dtype=np.float32)) 33 | self.interpreter.invoke() 34 | 35 | output_details_tensor_index = self.output_details[0]['index'] 36 | 37 | result = self.interpreter.get_tensor(output_details_tensor_index) 38 | 39 | result_index = np.argmax(np.squeeze(result)) 40 | 41 | if np.squeeze(result)[result_index] < self.score_th: 42 | result_index = self.invalid_value 43 | 44 | return result_index 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hand-gesture-recognition-using-mediapipe 2 | Estimate hand pose using MediaPipe (Python version).
This is a sample 3 | program that recognizes hand signs and finger gestures with a simple MLP using the detected key points. 4 |
❗ _️**This is English Translated version of the [original repo](https://github.com/Kazuhito00/hand-gesture-recognition-using-mediapipe). All Content is translated to english along with comments and notebooks**_ ❗ 5 |
6 | ![mqlrf-s6x16](https://user-images.githubusercontent.com/37477845/102222442-c452cd00-3f26-11eb-93ec-c387c98231be.gif) 7 | 8 | This repository contains the following contents. 9 | * Sample program 10 | * Hand sign recognition model(TFLite) 11 | * Finger gesture recognition model(TFLite) 12 | * Learning data for hand sign recognition and notebook for learning 13 | * Learning data for finger gesture recognition and notebook for learning 14 | 15 | # Requirements 16 | * mediapipe 0.8.1 17 | * OpenCV 3.4.2 or Later 18 | * Tensorflow 2.3.0 or Later
tf-nightly 2.5.0.dev or later (Only when creating a TFLite for an LSTM model) 19 | * scikit-learn 0.23.2 or Later (Only if you want to display the confusion matrix) 20 | * matplotlib 3.3.2 or Later (Only if you want to display the confusion matrix) 21 | 22 | # Demo 23 | Here's how to run the demo using your webcam. 24 | ```bash 25 | python app.py 26 | ``` 27 | 28 | The following options can be specified when running the demo. 29 | * --device
Specifying the camera device number (Default:0) 30 | * --width
Width at the time of camera capture (Default:960) 31 | * --height
Height at the time of camera capture (Default:540) 32 | * --use_static_image_mode
Whether to use static_image_mode option for MediaPipe inference (Default:Unspecified) 33 | * --min_detection_confidence
34 | Detection confidence threshold (Default:0.5) 35 | * --min_tracking_confidence
36 | Tracking confidence threshold (Default:0.5) 37 | 38 | # Directory 39 |
 40 | │  app.py
 41 | │  keypoint_classification.ipynb
 42 | │  point_history_classification.ipynb
 43 | │  
 44 | ├─model
 45 | │  ├─keypoint_classifier
 46 | │  │  │  keypoint.csv
 47 | │  │  │  keypoint_classifier.hdf5
 48 | │  │  │  keypoint_classifier.py
 49 | │  │  │  keypoint_classifier.tflite
 50 | │  │  └─ keypoint_classifier_label.csv
 51 | │  │          
 52 | │  └─point_history_classifier
 53 | │      │  point_history.csv
 54 | │      │  point_history_classifier.hdf5
 55 | │      │  point_history_classifier.py
 56 | │      │  point_history_classifier.tflite
 57 | │      └─ point_history_classifier_label.csv
 58 | │          
 59 | └─utils
 60 |     └─cvfpscalc.py
 61 | 
62 | ### app.py 63 | This is a sample program for inference.
64 | In addition, learning data (key points) for hand sign recognition,
65 | You can also collect training data (index finger coordinate history) for finger gesture recognition. 66 | 67 | ### keypoint_classification.ipynb 68 | This is a model training script for hand sign recognition. 69 | 70 | ### point_history_classification.ipynb 71 | This is a model training script for finger gesture recognition. 72 | 73 | ### model/keypoint_classifier 74 | This directory stores files related to hand sign recognition.
75 | The following files are stored. 76 | * Training data(keypoint.csv) 77 | * Trained model(keypoint_classifier.tflite) 78 | * Label data(keypoint_classifier_label.csv) 79 | * Inference module(keypoint_classifier.py) 80 | 81 | ### model/point_history_classifier 82 | This directory stores files related to finger gesture recognition.
83 | The following files are stored. 84 | * Training data(point_history.csv) 85 | * Trained model(point_history_classifier.tflite) 86 | * Label data(point_history_classifier_label.csv) 87 | * Inference module(point_history_classifier.py) 88 | 89 | ### utils/cvfpscalc.py 90 | This is a module for FPS measurement. 91 | 92 | # Training 93 | Hand sign recognition and finger gesture recognition can add and change training data and retrain the model. 94 | 95 | ### Hand sign recognition training 96 | #### 1.Learning data collection 97 | Press "k" to enter the mode to save key points(displayed as 「MODE:Logging Key Point」)
98 |

99 | If you press "0" to "9", the key points will be added to "model/keypoint_classifier/keypoint.csv" as shown below.
100 | 1st column: Pressed number (used as class ID), 2nd and subsequent columns: Key point coordinates
101 |

102 | The key point coordinates are the ones that have undergone the following preprocessing up to ④.
103 | 104 |

105 | In the initial state, three types of learning data are included: open hand (class ID: 0), close hand (class ID: 1), and pointing (class ID: 2).
106 | If necessary, add 3 or later, or delete the existing data of csv to prepare the training data.
107 |    108 | 109 | #### 2.Model training 110 | Open "[keypoint_classification.ipynb](keypoint_classification.ipynb)" in Jupyter Notebook and execute from top to bottom.
111 | To change the number of training data classes, change the value of "NUM_CLASSES = 3"
and modify the label of "model/keypoint_classifier/keypoint_classifier_label.csv" as appropriate.

112 | 113 | #### X.Model structure 114 | The image of the model prepared in "[keypoint_classification.ipynb](keypoint_classification.ipynb)" is as follows. 115 |

116 | 117 | ### Finger gesture recognition training 118 | #### 1.Learning data collection 119 | Press "h" to enter the mode to save the history of fingertip coordinates (displayed as "MODE:Logging Point History").
120 |

121 | If you press "0" to "9", the key points will be added to "model/point_history_classifier/point_history.csv" as shown below.
122 | 1st column: Pressed number (used as class ID), 2nd and subsequent columns: Coordinate history
123 |

124 | The key point coordinates are the ones that have undergone the following preprocessing up to ④.
125 |

126 | In the initial state, 4 types of learning data are included: stationary (class ID: 0), clockwise (class ID: 1), counterclockwise (class ID: 2), and moving (class ID: 4).
127 | If necessary, add 5 or later, or delete the existing data of csv to prepare the training data.
128 |     129 | 130 | #### 2.Model training 131 | Open "[point_history_classification.ipynb](point_history_classification.ipynb)" in Jupyter Notebook and execute from top to bottom.
132 | To change the number of training data classes, change the value of "NUM_CLASSES = 4" and
modify the label of "model/point_history_classifier/point_history_classifier_label.csv" as appropriate.

133 | 134 | #### X.Model structure 135 | The image of the model prepared in "[point_history_classification.ipynb](point_history_classification.ipynb)" is as follows. 136 |
137 | The model using "LSTM" is as follows.
Please change "use_lstm = False" to "True" when using (tf-nightly required (as of 2020/12/16))
138 | 139 | 140 | # Reference 141 | * [MediaPipe](https://mediapipe.dev/) 142 | 143 | # Author 144 | Kazuhito Takahashi(https://twitter.com/KzhtTkhs) 145 | 146 | # Translation and other improvements 147 | Nikita Kiselov(https://github.com/kinivi) 148 | 149 | # License 150 | hand-gesture-recognition-using-mediapipe is under [Apache v2 license](LICENSE). 151 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import csv 4 | import copy 5 | import argparse 6 | import itertools 7 | from collections import Counter 8 | from collections import deque 9 | 10 | import cv2 as cv 11 | import numpy as np 12 | import mediapipe as mp 13 | 14 | from utils import CvFpsCalc 15 | from model import KeyPointClassifier 16 | from model import PointHistoryClassifier 17 | 18 | 19 | def get_args(): 20 | parser = argparse.ArgumentParser() 21 | 22 | parser.add_argument("--device", type=int, default=0) 23 | parser.add_argument("--width", help='cap width', type=int, default=960) 24 | parser.add_argument("--height", help='cap height', type=int, default=540) 25 | 26 | parser.add_argument('--use_static_image_mode', action='store_true') 27 | parser.add_argument("--min_detection_confidence", 28 | help='min_detection_confidence', 29 | type=float, 30 | default=0.7) 31 | parser.add_argument("--min_tracking_confidence", 32 | help='min_tracking_confidence', 33 | type=int, 34 | default=0.5) 35 | 36 | args = parser.parse_args() 37 | 38 | return args 39 | 40 | 41 | def main(): 42 | # Argument parsing ################################################################# 43 | args = get_args() 44 | 45 | cap_device = args.device 46 | cap_width = args.width 47 | cap_height = args.height 48 | 49 | use_static_image_mode = args.use_static_image_mode 50 | min_detection_confidence = args.min_detection_confidence 51 | min_tracking_confidence = args.min_tracking_confidence 52 | 53 | use_brect = True 54 | 55 | # Camera preparation ############################################################### 56 | cap = cv.VideoCapture(cap_device) 57 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 58 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 59 | 60 | # Model load ############################################################# 61 | mp_hands = mp.solutions.hands 62 | hands = mp_hands.Hands( 63 | static_image_mode=use_static_image_mode, 64 | max_num_hands=1, 65 | min_detection_confidence=min_detection_confidence, 66 | min_tracking_confidence=min_tracking_confidence, 67 | ) 68 | 69 | keypoint_classifier = KeyPointClassifier() 70 | 71 | point_history_classifier = PointHistoryClassifier() 72 | 73 | # Read labels ########################################################### 74 | with open('model/keypoint_classifier/keypoint_classifier_label.csv', 75 | encoding='utf-8-sig') as f: 76 | keypoint_classifier_labels = csv.reader(f) 77 | keypoint_classifier_labels = [ 78 | row[0] for row in keypoint_classifier_labels 79 | ] 80 | with open( 81 | 'model/point_history_classifier/point_history_classifier_label.csv', 82 | encoding='utf-8-sig') as f: 83 | point_history_classifier_labels = csv.reader(f) 84 | point_history_classifier_labels = [ 85 | row[0] for row in point_history_classifier_labels 86 | ] 87 | 88 | # FPS Measurement ######################################################## 89 | cvFpsCalc = CvFpsCalc(buffer_len=10) 90 | 91 | # Coordinate history ################################################################# 92 | history_length = 16 93 | point_history = deque(maxlen=history_length) 94 | 95 | # Finger gesture history ################################################ 96 | finger_gesture_history = deque(maxlen=history_length) 97 | 98 | # ######################################################################## 99 | mode = 0 100 | 101 | while True: 102 | fps = cvFpsCalc.get() 103 | 104 | # Process Key (ESC: end) ################################################# 105 | key = cv.waitKey(10) 106 | if key == 27: # ESC 107 | break 108 | number, mode = select_mode(key, mode) 109 | 110 | # Camera capture ##################################################### 111 | ret, image = cap.read() 112 | if not ret: 113 | break 114 | image = cv.flip(image, 1) # Mirror display 115 | debug_image = copy.deepcopy(image) 116 | 117 | # Detection implementation ############################################################# 118 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 119 | 120 | image.flags.writeable = False 121 | results = hands.process(image) 122 | image.flags.writeable = True 123 | 124 | # #################################################################### 125 | if results.multi_hand_landmarks is not None: 126 | for hand_landmarks, handedness in zip(results.multi_hand_landmarks, 127 | results.multi_handedness): 128 | # Bounding box calculation 129 | brect = calc_bounding_rect(debug_image, hand_landmarks) 130 | # Landmark calculation 131 | landmark_list = calc_landmark_list(debug_image, hand_landmarks) 132 | 133 | # Conversion to relative coordinates / normalized coordinates 134 | pre_processed_landmark_list = pre_process_landmark( 135 | landmark_list) 136 | pre_processed_point_history_list = pre_process_point_history( 137 | debug_image, point_history) 138 | # Write to the dataset file 139 | logging_csv(number, mode, pre_processed_landmark_list, 140 | pre_processed_point_history_list) 141 | 142 | # Hand sign classification 143 | hand_sign_id = keypoint_classifier(pre_processed_landmark_list) 144 | if hand_sign_id == 2: # Point gesture 145 | point_history.append(landmark_list[8]) 146 | else: 147 | point_history.append([0, 0]) 148 | 149 | # Finger gesture classification 150 | finger_gesture_id = 0 151 | point_history_len = len(pre_processed_point_history_list) 152 | if point_history_len == (history_length * 2): 153 | finger_gesture_id = point_history_classifier( 154 | pre_processed_point_history_list) 155 | 156 | # Calculates the gesture IDs in the latest detection 157 | finger_gesture_history.append(finger_gesture_id) 158 | most_common_fg_id = Counter( 159 | finger_gesture_history).most_common() 160 | 161 | # Drawing part 162 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 163 | debug_image = draw_landmarks(debug_image, landmark_list) 164 | debug_image = draw_info_text( 165 | debug_image, 166 | brect, 167 | handedness, 168 | keypoint_classifier_labels[hand_sign_id], 169 | point_history_classifier_labels[most_common_fg_id[0][0]], 170 | ) 171 | else: 172 | point_history.append([0, 0]) 173 | 174 | debug_image = draw_point_history(debug_image, point_history) 175 | debug_image = draw_info(debug_image, fps, mode, number) 176 | 177 | # Screen reflection ############################################################# 178 | cv.imshow('Hand Gesture Recognition', debug_image) 179 | 180 | cap.release() 181 | cv.destroyAllWindows() 182 | 183 | 184 | def select_mode(key, mode): 185 | number = -1 186 | if 48 <= key <= 57: # 0 ~ 9 187 | number = key - 48 188 | if key == 110: # n 189 | mode = 0 190 | if key == 107: # k 191 | mode = 1 192 | if key == 104: # h 193 | mode = 2 194 | return number, mode 195 | 196 | 197 | def calc_bounding_rect(image, landmarks): 198 | image_width, image_height = image.shape[1], image.shape[0] 199 | 200 | landmark_array = np.empty((0, 2), int) 201 | 202 | for _, landmark in enumerate(landmarks.landmark): 203 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 204 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 205 | 206 | landmark_point = [np.array((landmark_x, landmark_y))] 207 | 208 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 209 | 210 | x, y, w, h = cv.boundingRect(landmark_array) 211 | 212 | return [x, y, x + w, y + h] 213 | 214 | 215 | def calc_landmark_list(image, landmarks): 216 | image_width, image_height = image.shape[1], image.shape[0] 217 | 218 | landmark_point = [] 219 | 220 | # Keypoint 221 | for _, landmark in enumerate(landmarks.landmark): 222 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 223 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 224 | # landmark_z = landmark.z 225 | 226 | landmark_point.append([landmark_x, landmark_y]) 227 | 228 | return landmark_point 229 | 230 | 231 | def pre_process_landmark(landmark_list): 232 | temp_landmark_list = copy.deepcopy(landmark_list) 233 | 234 | # Convert to relative coordinates 235 | base_x, base_y = 0, 0 236 | for index, landmark_point in enumerate(temp_landmark_list): 237 | if index == 0: 238 | base_x, base_y = landmark_point[0], landmark_point[1] 239 | 240 | temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x 241 | temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y 242 | 243 | # Convert to a one-dimensional list 244 | temp_landmark_list = list( 245 | itertools.chain.from_iterable(temp_landmark_list)) 246 | 247 | # Normalization 248 | max_value = max(list(map(abs, temp_landmark_list))) 249 | 250 | def normalize_(n): 251 | return n / max_value 252 | 253 | temp_landmark_list = list(map(normalize_, temp_landmark_list)) 254 | 255 | return temp_landmark_list 256 | 257 | 258 | def pre_process_point_history(image, point_history): 259 | image_width, image_height = image.shape[1], image.shape[0] 260 | 261 | temp_point_history = copy.deepcopy(point_history) 262 | 263 | # Convert to relative coordinates 264 | base_x, base_y = 0, 0 265 | for index, point in enumerate(temp_point_history): 266 | if index == 0: 267 | base_x, base_y = point[0], point[1] 268 | 269 | temp_point_history[index][0] = (temp_point_history[index][0] - 270 | base_x) / image_width 271 | temp_point_history[index][1] = (temp_point_history[index][1] - 272 | base_y) / image_height 273 | 274 | # Convert to a one-dimensional list 275 | temp_point_history = list( 276 | itertools.chain.from_iterable(temp_point_history)) 277 | 278 | return temp_point_history 279 | 280 | 281 | def logging_csv(number, mode, landmark_list, point_history_list): 282 | if mode == 0: 283 | pass 284 | if mode == 1 and (0 <= number <= 9): 285 | csv_path = 'model/keypoint_classifier/keypoint.csv' 286 | with open(csv_path, 'a', newline="") as f: 287 | writer = csv.writer(f) 288 | writer.writerow([number, *landmark_list]) 289 | if mode == 2 and (0 <= number <= 9): 290 | csv_path = 'model/point_history_classifier/point_history.csv' 291 | with open(csv_path, 'a', newline="") as f: 292 | writer = csv.writer(f) 293 | writer.writerow([number, *point_history_list]) 294 | return 295 | 296 | 297 | def draw_landmarks(image, landmark_point): 298 | if len(landmark_point) > 0: 299 | # Thumb 300 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]), 301 | (0, 0, 0), 6) 302 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[3]), 303 | (255, 255, 255), 2) 304 | cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]), 305 | (0, 0, 0), 6) 306 | cv.line(image, tuple(landmark_point[3]), tuple(landmark_point[4]), 307 | (255, 255, 255), 2) 308 | 309 | # Index finger 310 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]), 311 | (0, 0, 0), 6) 312 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[6]), 313 | (255, 255, 255), 2) 314 | cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]), 315 | (0, 0, 0), 6) 316 | cv.line(image, tuple(landmark_point[6]), tuple(landmark_point[7]), 317 | (255, 255, 255), 2) 318 | cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]), 319 | (0, 0, 0), 6) 320 | cv.line(image, tuple(landmark_point[7]), tuple(landmark_point[8]), 321 | (255, 255, 255), 2) 322 | 323 | # Middle finger 324 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]), 325 | (0, 0, 0), 6) 326 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[10]), 327 | (255, 255, 255), 2) 328 | cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]), 329 | (0, 0, 0), 6) 330 | cv.line(image, tuple(landmark_point[10]), tuple(landmark_point[11]), 331 | (255, 255, 255), 2) 332 | cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]), 333 | (0, 0, 0), 6) 334 | cv.line(image, tuple(landmark_point[11]), tuple(landmark_point[12]), 335 | (255, 255, 255), 2) 336 | 337 | # Ring finger 338 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]), 339 | (0, 0, 0), 6) 340 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[14]), 341 | (255, 255, 255), 2) 342 | cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]), 343 | (0, 0, 0), 6) 344 | cv.line(image, tuple(landmark_point[14]), tuple(landmark_point[15]), 345 | (255, 255, 255), 2) 346 | cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]), 347 | (0, 0, 0), 6) 348 | cv.line(image, tuple(landmark_point[15]), tuple(landmark_point[16]), 349 | (255, 255, 255), 2) 350 | 351 | # Little finger 352 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]), 353 | (0, 0, 0), 6) 354 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[18]), 355 | (255, 255, 255), 2) 356 | cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]), 357 | (0, 0, 0), 6) 358 | cv.line(image, tuple(landmark_point[18]), tuple(landmark_point[19]), 359 | (255, 255, 255), 2) 360 | cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]), 361 | (0, 0, 0), 6) 362 | cv.line(image, tuple(landmark_point[19]), tuple(landmark_point[20]), 363 | (255, 255, 255), 2) 364 | 365 | # Palm 366 | cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]), 367 | (0, 0, 0), 6) 368 | cv.line(image, tuple(landmark_point[0]), tuple(landmark_point[1]), 369 | (255, 255, 255), 2) 370 | cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]), 371 | (0, 0, 0), 6) 372 | cv.line(image, tuple(landmark_point[1]), tuple(landmark_point[2]), 373 | (255, 255, 255), 2) 374 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]), 375 | (0, 0, 0), 6) 376 | cv.line(image, tuple(landmark_point[2]), tuple(landmark_point[5]), 377 | (255, 255, 255), 2) 378 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]), 379 | (0, 0, 0), 6) 380 | cv.line(image, tuple(landmark_point[5]), tuple(landmark_point[9]), 381 | (255, 255, 255), 2) 382 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]), 383 | (0, 0, 0), 6) 384 | cv.line(image, tuple(landmark_point[9]), tuple(landmark_point[13]), 385 | (255, 255, 255), 2) 386 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]), 387 | (0, 0, 0), 6) 388 | cv.line(image, tuple(landmark_point[13]), tuple(landmark_point[17]), 389 | (255, 255, 255), 2) 390 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]), 391 | (0, 0, 0), 6) 392 | cv.line(image, tuple(landmark_point[17]), tuple(landmark_point[0]), 393 | (255, 255, 255), 2) 394 | 395 | # Key Points 396 | for index, landmark in enumerate(landmark_point): 397 | if index == 0: # 手首1 398 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 399 | -1) 400 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 401 | if index == 1: # 手首2 402 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 403 | -1) 404 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 405 | if index == 2: # 親指:付け根 406 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 407 | -1) 408 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 409 | if index == 3: # 親指:第1関節 410 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 411 | -1) 412 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 413 | if index == 4: # 親指:指先 414 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255), 415 | -1) 416 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1) 417 | if index == 5: # 人差指:付け根 418 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 419 | -1) 420 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 421 | if index == 6: # 人差指:第2関節 422 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 423 | -1) 424 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 425 | if index == 7: # 人差指:第1関節 426 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 427 | -1) 428 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 429 | if index == 8: # 人差指:指先 430 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255), 431 | -1) 432 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1) 433 | if index == 9: # 中指:付け根 434 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 435 | -1) 436 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 437 | if index == 10: # 中指:第2関節 438 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 439 | -1) 440 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 441 | if index == 11: # 中指:第1関節 442 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 443 | -1) 444 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 445 | if index == 12: # 中指:指先 446 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255), 447 | -1) 448 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1) 449 | if index == 13: # 薬指:付け根 450 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 451 | -1) 452 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 453 | if index == 14: # 薬指:第2関節 454 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 455 | -1) 456 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 457 | if index == 15: # 薬指:第1関節 458 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 459 | -1) 460 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 461 | if index == 16: # 薬指:指先 462 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255), 463 | -1) 464 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1) 465 | if index == 17: # 小指:付け根 466 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 467 | -1) 468 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 469 | if index == 18: # 小指:第2関節 470 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 471 | -1) 472 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 473 | if index == 19: # 小指:第1関節 474 | cv.circle(image, (landmark[0], landmark[1]), 5, (255, 255, 255), 475 | -1) 476 | cv.circle(image, (landmark[0], landmark[1]), 5, (0, 0, 0), 1) 477 | if index == 20: # 小指:指先 478 | cv.circle(image, (landmark[0], landmark[1]), 8, (255, 255, 255), 479 | -1) 480 | cv.circle(image, (landmark[0], landmark[1]), 8, (0, 0, 0), 1) 481 | 482 | return image 483 | 484 | 485 | def draw_bounding_rect(use_brect, image, brect): 486 | if use_brect: 487 | # Outer rectangle 488 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 489 | (0, 0, 0), 1) 490 | 491 | return image 492 | 493 | 494 | def draw_info_text(image, brect, handedness, hand_sign_text, 495 | finger_gesture_text): 496 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22), 497 | (0, 0, 0), -1) 498 | 499 | info_text = handedness.classification[0].label[0:] 500 | if hand_sign_text != "": 501 | info_text = info_text + ':' + hand_sign_text 502 | cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4), 503 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv.LINE_AA) 504 | 505 | if finger_gesture_text != "": 506 | cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60), 507 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 4, cv.LINE_AA) 508 | cv.putText(image, "Finger Gesture:" + finger_gesture_text, (10, 60), 509 | cv.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2, 510 | cv.LINE_AA) 511 | 512 | return image 513 | 514 | 515 | def draw_point_history(image, point_history): 516 | for index, point in enumerate(point_history): 517 | if point[0] != 0 and point[1] != 0: 518 | cv.circle(image, (point[0], point[1]), 1 + int(index / 2), 519 | (152, 251, 152), 2) 520 | 521 | return image 522 | 523 | 524 | def draw_info(image, fps, mode, number): 525 | cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX, 526 | 1.0, (0, 0, 0), 4, cv.LINE_AA) 527 | cv.putText(image, "FPS:" + str(fps), (10, 30), cv.FONT_HERSHEY_SIMPLEX, 528 | 1.0, (255, 255, 255), 2, cv.LINE_AA) 529 | 530 | mode_string = ['Logging Key Point', 'Logging Point History'] 531 | if 1 <= mode <= 2: 532 | cv.putText(image, "MODE:" + mode_string[mode - 1], (10, 90), 533 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, 534 | cv.LINE_AA) 535 | if 0 <= number <= 9: 536 | cv.putText(image, "NUM:" + str(number), (10, 110), 537 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, 538 | cv.LINE_AA) 539 | return image 540 | 541 | 542 | if __name__ == '__main__': 543 | main() 544 | -------------------------------------------------------------------------------- /keypoint_classification.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import csv\n", 10 | "\n", 11 | "import numpy as np\n", 12 | "import tensorflow as tf\n", 13 | "from sklearn.model_selection import train_test_split\n", 14 | "\n", 15 | "RANDOM_SEED = 42" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "# 各パス指定" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 2, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "dataset = 'model/keypoint_classifier/keypoint.csv'\n", 32 | "model_save_path = 'model/keypoint_classifier/keypoint_classifier.hdf5'" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "# 分類数設定" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 3, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "NUM_CLASSES = 3" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "# 学習データ読み込み" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 4, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (21 * 2) + 1)))" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 5, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 6, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "# モデル構築" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 7, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "model = tf.keras.models.Sequential([\n", 99 | " tf.keras.layers.Input((21 * 2, )),\n", 100 | " tf.keras.layers.Dropout(0.2),\n", 101 | " tf.keras.layers.Dense(20, activation='relu'),\n", 102 | " tf.keras.layers.Dropout(0.4),\n", 103 | " tf.keras.layers.Dense(10, activation='relu'),\n", 104 | " tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n", 105 | "])" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 8, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "name": "stdout", 115 | "output_type": "stream", 116 | "text": [ 117 | "Model: \"sequential\"\n", 118 | "_________________________________________________________________\n", 119 | "Layer (type) Output Shape Param # \n", 120 | "=================================================================\n", 121 | "dropout (Dropout) (None, 42) 0 \n", 122 | "_________________________________________________________________\n", 123 | "dense (Dense) (None, 20) 860 \n", 124 | "_________________________________________________________________\n", 125 | "dropout_1 (Dropout) (None, 20) 0 \n", 126 | "_________________________________________________________________\n", 127 | "dense_1 (Dense) (None, 10) 210 \n", 128 | "_________________________________________________________________\n", 129 | "dense_2 (Dense) (None, 3) 33 \n", 130 | "=================================================================\n", 131 | "Total params: 1,103\n", 132 | "Trainable params: 1,103\n", 133 | "Non-trainable params: 0\n", 134 | "_________________________________________________________________\n" 135 | ] 136 | } 137 | ], 138 | "source": [ 139 | "model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 9, 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "# モデルチェックポイントのコールバック\n", 149 | "cp_callback = tf.keras.callbacks.ModelCheckpoint(\n", 150 | " model_save_path, verbose=1, save_weights_only=False)\n", 151 | "# 早期打ち切り用コールバック\n", 152 | "es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": 10, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "# モデルコンパイル\n", 162 | "model.compile(\n", 163 | " optimizer='adam',\n", 164 | " loss='sparse_categorical_crossentropy',\n", 165 | " metrics=['accuracy']\n", 166 | ")" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": {}, 172 | "source": [ 173 | "# モデル訓練" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": 11, 179 | "metadata": { 180 | "scrolled": true 181 | }, 182 | "outputs": [ 183 | { 184 | "name": "stdout", 185 | "output_type": "stream", 186 | "text": [ 187 | "Epoch 1/1000\n", 188 | " 1/27 [>.............................] - ETA: 0s - loss: 1.1295 - accuracy: 0.3203\n", 189 | "Epoch 00001: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 190 | "27/27 [==============================] - 0s 11ms/step - loss: 1.1004 - accuracy: 0.3602 - val_loss: 1.0431 - val_accuracy: 0.5220\n", 191 | "Epoch 2/1000\n", 192 | " 1/27 [>.............................] - ETA: 0s - loss: 1.0440 - accuracy: 0.4844\n", 193 | "Epoch 00002: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 194 | "27/27 [==============================] - 0s 3ms/step - loss: 1.0503 - accuracy: 0.4297 - val_loss: 0.9953 - val_accuracy: 0.6397\n", 195 | "Epoch 3/1000\n", 196 | " 1/27 [>.............................] - ETA: 0s - loss: 1.0043 - accuracy: 0.5312\n", 197 | "Epoch 00003: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 198 | "27/27 [==============================] - 0s 4ms/step - loss: 1.0210 - accuracy: 0.4582 - val_loss: 0.9545 - val_accuracy: 0.6523\n", 199 | "Epoch 4/1000\n", 200 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9503 - accuracy: 0.5625\n", 201 | "Epoch 00004: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 202 | "27/27 [==============================] - 0s 4ms/step - loss: 0.9906 - accuracy: 0.5022 - val_loss: 0.9168 - val_accuracy: 0.6721\n", 203 | "Epoch 5/1000\n", 204 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9562 - accuracy: 0.5469\n", 205 | "Epoch 00005: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 206 | "27/27 [==============================] - 0s 3ms/step - loss: 0.9654 - accuracy: 0.5340 - val_loss: 0.8791 - val_accuracy: 0.7017\n", 207 | "Epoch 6/1000\n", 208 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9184 - accuracy: 0.5938\n", 209 | "Epoch 00006: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 210 | "27/27 [==============================] - 0s 3ms/step - loss: 0.9256 - accuracy: 0.5577 - val_loss: 0.8344 - val_accuracy: 0.7269\n", 211 | "Epoch 7/1000\n", 212 | "27/27 [==============================] - ETA: 0s - loss: 0.9050 - accuracy: 0.5715\n", 213 | "Epoch 00007: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 214 | "27/27 [==============================] - 0s 4ms/step - loss: 0.9050 - accuracy: 0.5715 - val_loss: 0.7887 - val_accuracy: 0.7646\n", 215 | "Epoch 8/1000\n", 216 | " 1/27 [>.............................] - ETA: 0s - loss: 0.9135 - accuracy: 0.5547\n", 217 | "Epoch 00008: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 218 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8642 - accuracy: 0.5993 - val_loss: 0.7414 - val_accuracy: 0.7996\n", 219 | "Epoch 9/1000\n", 220 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8002 - accuracy: 0.6172\n", 221 | "Epoch 00009: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 222 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8258 - accuracy: 0.6263 - val_loss: 0.6881 - val_accuracy: 0.8149\n", 223 | "Epoch 10/1000\n", 224 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8056 - accuracy: 0.6328\n", 225 | "Epoch 00010: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 226 | "27/27 [==============================] - 0s 3ms/step - loss: 0.8008 - accuracy: 0.6341 - val_loss: 0.6461 - val_accuracy: 0.8239\n", 227 | "Epoch 11/1000\n", 228 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7746 - accuracy: 0.6719\n", 229 | "Epoch 00011: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 230 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7771 - accuracy: 0.6491 - val_loss: 0.6143 - val_accuracy: 0.8266\n", 231 | "Epoch 12/1000\n", 232 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7242 - accuracy: 0.7109\n", 233 | "Epoch 00012: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 234 | "27/27 [==============================] - 0s 4ms/step - loss: 0.7490 - accuracy: 0.6650 - val_loss: 0.5740 - val_accuracy: 0.8320\n", 235 | "Epoch 13/1000\n", 236 | " 1/27 [>.............................] - ETA: 0s - loss: 0.8363 - accuracy: 0.6328\n", 237 | "Epoch 00013: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 238 | "27/27 [==============================] - 0s 4ms/step - loss: 0.7397 - accuracy: 0.6731 - val_loss: 0.5465 - val_accuracy: 0.8446\n", 239 | "Epoch 14/1000\n", 240 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7634 - accuracy: 0.6172\n", 241 | "Epoch 00014: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 242 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7190 - accuracy: 0.6883 - val_loss: 0.5202 - val_accuracy: 0.8589\n", 243 | "Epoch 15/1000\n", 244 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6699 - accuracy: 0.6875\n", 245 | "Epoch 00015: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 246 | "27/27 [==============================] - 0s 3ms/step - loss: 0.7077 - accuracy: 0.6973 - val_loss: 0.4944 - val_accuracy: 0.8652\n", 247 | "Epoch 16/1000\n", 248 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6447 - accuracy: 0.7500\n", 249 | "Epoch 00016: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 250 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6903 - accuracy: 0.6928 - val_loss: 0.4781 - val_accuracy: 0.8805\n", 251 | "Epoch 17/1000\n", 252 | " 1/27 [>.............................] - ETA: 0s - loss: 0.7165 - accuracy: 0.6875\n", 253 | "Epoch 00017: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 254 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6919 - accuracy: 0.6973 - val_loss: 0.4696 - val_accuracy: 0.8895\n", 255 | "Epoch 18/1000\n", 256 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6268 - accuracy: 0.7422\n", 257 | "Epoch 00018: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 258 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6498 - accuracy: 0.7303 - val_loss: 0.4440 - val_accuracy: 0.8967\n", 259 | "Epoch 19/1000\n", 260 | "27/27 [==============================] - ETA: 0s - loss: 0.6499 - accuracy: 0.7261\n", 261 | "Epoch 00019: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 262 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6499 - accuracy: 0.7261 - val_loss: 0.4254 - val_accuracy: 0.9039\n", 263 | "Epoch 20/1000\n", 264 | "26/27 [===========================>..] - ETA: 0s - loss: 0.6386 - accuracy: 0.7236\n", 265 | "Epoch 00020: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 266 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6415 - accuracy: 0.7228 - val_loss: 0.4082 - val_accuracy: 0.9093\n", 267 | "Epoch 21/1000\n", 268 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5631 - accuracy: 0.7500\n", 269 | "Epoch 00021: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 270 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6484 - accuracy: 0.7180 - val_loss: 0.4114 - val_accuracy: 0.9173\n", 271 | "Epoch 22/1000\n", 272 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5775 - accuracy: 0.7812\n", 273 | "Epoch 00022: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 274 | "27/27 [==============================] - 0s 3ms/step - loss: 0.6154 - accuracy: 0.7480 - val_loss: 0.3907 - val_accuracy: 0.9218\n", 275 | "Epoch 23/1000\n", 276 | "25/27 [==========================>...] - ETA: 0s - loss: 0.5967 - accuracy: 0.7588\n", 277 | "Epoch 00023: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 278 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5971 - accuracy: 0.7582 - val_loss: 0.3763 - val_accuracy: 0.9227\n", 279 | "Epoch 24/1000\n", 280 | "26/27 [===========================>..] - ETA: 0s - loss: 0.6064 - accuracy: 0.7569\n", 281 | "Epoch 00024: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 282 | "27/27 [==============================] - 0s 4ms/step - loss: 0.6066 - accuracy: 0.7567 - val_loss: 0.3714 - val_accuracy: 0.9254\n", 283 | "Epoch 25/1000\n", 284 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6211 - accuracy: 0.7422\n", 285 | "Epoch 00025: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 286 | "27/27 [==============================] - 0s 7ms/step - loss: 0.5954 - accuracy: 0.7579 - val_loss: 0.3611 - val_accuracy: 0.9353\n", 287 | "Epoch 26/1000\n", 288 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5621 - accuracy: 0.7812\n", 289 | "Epoch 00026: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 290 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5818 - accuracy: 0.7737 - val_loss: 0.3498 - val_accuracy: 0.9380\n", 291 | "Epoch 27/1000\n", 292 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6431 - accuracy: 0.7500\n", 293 | "Epoch 00027: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 294 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5882 - accuracy: 0.7648 - val_loss: 0.3355 - val_accuracy: 0.9416\n" 295 | ] 296 | }, 297 | { 298 | "name": "stdout", 299 | "output_type": "stream", 300 | "text": [ 301 | "Epoch 28/1000\n", 302 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5633 - accuracy: 0.8203\n", 303 | "Epoch 00028: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 304 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5746 - accuracy: 0.7702 - val_loss: 0.3273 - val_accuracy: 0.9425\n", 305 | "Epoch 29/1000\n", 306 | "27/27 [==============================] - ETA: 0s - loss: 0.5856 - accuracy: 0.7651\n", 307 | "Epoch 00029: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 308 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5856 - accuracy: 0.7651 - val_loss: 0.3237 - val_accuracy: 0.9434\n", 309 | "Epoch 30/1000\n", 310 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5524 - accuracy: 0.7812\n", 311 | "Epoch 00030: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 312 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5576 - accuracy: 0.7788 - val_loss: 0.3203 - val_accuracy: 0.9452\n", 313 | "Epoch 31/1000\n", 314 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5643 - accuracy: 0.7578\n", 315 | "Epoch 00031: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 316 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5450 - accuracy: 0.7773 - val_loss: 0.3111 - val_accuracy: 0.9443\n", 317 | "Epoch 32/1000\n", 318 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5507 - accuracy: 0.7812\n", 319 | "Epoch 00032: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 320 | "27/27 [==============================] - 0s 6ms/step - loss: 0.5574 - accuracy: 0.7860 - val_loss: 0.3017 - val_accuracy: 0.9434\n", 321 | "Epoch 33/1000\n", 322 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5302 - accuracy: 0.8125\n", 323 | "Epoch 00033: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 324 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5444 - accuracy: 0.7905 - val_loss: 0.2917 - val_accuracy: 0.9479\n", 325 | "Epoch 34/1000\n", 326 | "27/27 [==============================] - ETA: 0s - loss: 0.5421 - accuracy: 0.7848\n", 327 | "Epoch 00034: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 328 | "27/27 [==============================] - 0s 4ms/step - loss: 0.5421 - accuracy: 0.7848 - val_loss: 0.2863 - val_accuracy: 0.9470\n", 329 | "Epoch 35/1000\n", 330 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4633 - accuracy: 0.8125\n", 331 | "Epoch 00035: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 332 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5331 - accuracy: 0.7980 - val_loss: 0.2804 - val_accuracy: 0.9506\n", 333 | "Epoch 36/1000\n", 334 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5972 - accuracy: 0.7812\n", 335 | "Epoch 00036: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 336 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5255 - accuracy: 0.7968 - val_loss: 0.2774 - val_accuracy: 0.9479\n", 337 | "Epoch 37/1000\n", 338 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5851 - accuracy: 0.7578\n", 339 | "Epoch 00037: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 340 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5284 - accuracy: 0.7947 - val_loss: 0.2681 - val_accuracy: 0.9497\n", 341 | "Epoch 38/1000\n", 342 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4775 - accuracy: 0.7812\n", 343 | "Epoch 00038: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 344 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5143 - accuracy: 0.7995 - val_loss: 0.2678 - val_accuracy: 0.9479\n", 345 | "Epoch 39/1000\n", 346 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5495 - accuracy: 0.7812\n", 347 | "Epoch 00039: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 348 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5250 - accuracy: 0.7935 - val_loss: 0.2657 - val_accuracy: 0.9470\n", 349 | "Epoch 40/1000\n", 350 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5244 - accuracy: 0.8203\n", 351 | "Epoch 00040: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 352 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5195 - accuracy: 0.8001 - val_loss: 0.2606 - val_accuracy: 0.9524\n", 353 | "Epoch 41/1000\n", 354 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6996 - accuracy: 0.6953\n", 355 | "Epoch 00041: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 356 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5304 - accuracy: 0.7956 - val_loss: 0.2572 - val_accuracy: 0.9515\n", 357 | "Epoch 42/1000\n", 358 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4843 - accuracy: 0.8281\n", 359 | "Epoch 00042: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 360 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5115 - accuracy: 0.8091 - val_loss: 0.2513 - val_accuracy: 0.9524\n", 361 | "Epoch 43/1000\n", 362 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3830 - accuracy: 0.8594\n", 363 | "Epoch 00043: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 364 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4939 - accuracy: 0.8133 - val_loss: 0.2423 - val_accuracy: 0.9551\n", 365 | "Epoch 44/1000\n", 366 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4531 - accuracy: 0.7969\n", 367 | "Epoch 00044: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 368 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4969 - accuracy: 0.8094 - val_loss: 0.2437 - val_accuracy: 0.9497\n", 369 | "Epoch 45/1000\n", 370 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5198 - accuracy: 0.7812\n", 371 | "Epoch 00045: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 372 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4961 - accuracy: 0.8106 - val_loss: 0.2441 - val_accuracy: 0.9533\n", 373 | "Epoch 46/1000\n", 374 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5912 - accuracy: 0.7812\n", 375 | "Epoch 00046: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 376 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4816 - accuracy: 0.8271 - val_loss: 0.2482 - val_accuracy: 0.9542\n", 377 | "Epoch 47/1000\n", 378 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5292 - accuracy: 0.8047\n", 379 | "Epoch 00047: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 380 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4789 - accuracy: 0.8127 - val_loss: 0.2313 - val_accuracy: 0.9569\n", 381 | "Epoch 48/1000\n", 382 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4428 - accuracy: 0.8125\n", 383 | "Epoch 00048: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 384 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4901 - accuracy: 0.8079 - val_loss: 0.2319 - val_accuracy: 0.9560\n", 385 | "Epoch 49/1000\n", 386 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4565 - accuracy: 0.8281\n", 387 | "Epoch 00049: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 388 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4893 - accuracy: 0.8154 - val_loss: 0.2300 - val_accuracy: 0.9533\n", 389 | "Epoch 50/1000\n", 390 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5185 - accuracy: 0.7812\n", 391 | "Epoch 00050: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 392 | "27/27 [==============================] - 0s 3ms/step - loss: 0.5079 - accuracy: 0.8112 - val_loss: 0.2386 - val_accuracy: 0.9524\n", 393 | "Epoch 51/1000\n", 394 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4280 - accuracy: 0.8203\n", 395 | "Epoch 00051: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 396 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4692 - accuracy: 0.8205 - val_loss: 0.2332 - val_accuracy: 0.9578\n", 397 | "Epoch 52/1000\n", 398 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5299 - accuracy: 0.8203\n", 399 | "Epoch 00052: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 400 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4907 - accuracy: 0.8145 - val_loss: 0.2336 - val_accuracy: 0.9569\n", 401 | "Epoch 53/1000\n", 402 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5856 - accuracy: 0.7969\n", 403 | "Epoch 00053: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 404 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4882 - accuracy: 0.8181 - val_loss: 0.2306 - val_accuracy: 0.9605\n", 405 | "Epoch 54/1000\n", 406 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4466 - accuracy: 0.8047\n", 407 | "Epoch 00054: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 408 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4641 - accuracy: 0.8235 - val_loss: 0.2218 - val_accuracy: 0.9596\n" 409 | ] 410 | }, 411 | { 412 | "name": "stdout", 413 | "output_type": "stream", 414 | "text": [ 415 | "Epoch 55/1000\n", 416 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3760 - accuracy: 0.8672\n", 417 | "Epoch 00055: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 418 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4630 - accuracy: 0.8241 - val_loss: 0.2242 - val_accuracy: 0.9578\n", 419 | "Epoch 56/1000\n", 420 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4607 - accuracy: 0.7734\n", 421 | "Epoch 00056: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 422 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4546 - accuracy: 0.8277 - val_loss: 0.2168 - val_accuracy: 0.9605\n", 423 | "Epoch 57/1000\n", 424 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4946 - accuracy: 0.7969\n", 425 | "Epoch 00057: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 426 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4675 - accuracy: 0.8214 - val_loss: 0.2211 - val_accuracy: 0.9578\n", 427 | "Epoch 58/1000\n", 428 | "25/27 [==========================>...] - ETA: 0s - loss: 0.4393 - accuracy: 0.8334\n", 429 | "Epoch 00058: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 430 | "27/27 [==============================] - 0s 4ms/step - loss: 0.4418 - accuracy: 0.8325 - val_loss: 0.2115 - val_accuracy: 0.9632\n", 431 | "Epoch 59/1000\n", 432 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4987 - accuracy: 0.7969\n", 433 | "Epoch 00059: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 434 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4646 - accuracy: 0.8217 - val_loss: 0.2116 - val_accuracy: 0.9596\n", 435 | "Epoch 60/1000\n", 436 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4904 - accuracy: 0.7812\n", 437 | "Epoch 00060: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 438 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4576 - accuracy: 0.8232 - val_loss: 0.2108 - val_accuracy: 0.9569\n", 439 | "Epoch 61/1000\n", 440 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5147 - accuracy: 0.8281\n", 441 | "Epoch 00061: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 442 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4646 - accuracy: 0.8253 - val_loss: 0.2174 - val_accuracy: 0.9587\n", 443 | "Epoch 62/1000\n", 444 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3386 - accuracy: 0.8750\n", 445 | "Epoch 00062: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 446 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4433 - accuracy: 0.8310 - val_loss: 0.2145 - val_accuracy: 0.9560\n", 447 | "Epoch 63/1000\n", 448 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4222 - accuracy: 0.8594\n", 449 | "Epoch 00063: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 450 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4624 - accuracy: 0.8283 - val_loss: 0.2099 - val_accuracy: 0.9569\n", 451 | "Epoch 64/1000\n", 452 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4376 - accuracy: 0.8203\n", 453 | "Epoch 00064: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 454 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4601 - accuracy: 0.8235 - val_loss: 0.2075 - val_accuracy: 0.9641\n", 455 | "Epoch 65/1000\n", 456 | "27/27 [==============================] - ETA: 0s - loss: 0.4676 - accuracy: 0.8265\n", 457 | "Epoch 00065: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 458 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4676 - accuracy: 0.8265 - val_loss: 0.2172 - val_accuracy: 0.9551\n", 459 | "Epoch 66/1000\n", 460 | "26/27 [===========================>..] - ETA: 0s - loss: 0.4434 - accuracy: 0.8368\n", 461 | "Epoch 00066: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 462 | "27/27 [==============================] - 1s 20ms/step - loss: 0.4429 - accuracy: 0.8370 - val_loss: 0.2154 - val_accuracy: 0.9578\n", 463 | "Epoch 67/1000\n", 464 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4827 - accuracy: 0.8125\n", 465 | "Epoch 00067: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 466 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4434 - accuracy: 0.8358 - val_loss: 0.2090 - val_accuracy: 0.9587\n", 467 | "Epoch 68/1000\n", 468 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5810 - accuracy: 0.7656\n", 469 | "Epoch 00068: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 470 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4892 - accuracy: 0.8049 - val_loss: 0.2160 - val_accuracy: 0.9578\n", 471 | "Epoch 69/1000\n", 472 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4375 - accuracy: 0.7812\n", 473 | "Epoch 00069: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 474 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4597 - accuracy: 0.8250 - val_loss: 0.2100 - val_accuracy: 0.9605\n", 475 | "Epoch 70/1000\n", 476 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3782 - accuracy: 0.8359\n", 477 | "Epoch 00070: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 478 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4408 - accuracy: 0.8349 - val_loss: 0.2087 - val_accuracy: 0.9596\n", 479 | "Epoch 71/1000\n", 480 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4578 - accuracy: 0.8438\n", 481 | "Epoch 00071: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 482 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4425 - accuracy: 0.8355 - val_loss: 0.2075 - val_accuracy: 0.9587\n", 483 | "Epoch 72/1000\n", 484 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4267 - accuracy: 0.8438\n", 485 | "Epoch 00072: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 486 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4581 - accuracy: 0.8292 - val_loss: 0.2059 - val_accuracy: 0.9623\n", 487 | "Epoch 73/1000\n", 488 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4450 - accuracy: 0.8750\n", 489 | "Epoch 00073: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 490 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4508 - accuracy: 0.8403 - val_loss: 0.2083 - val_accuracy: 0.9614\n", 491 | "Epoch 74/1000\n", 492 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3858 - accuracy: 0.8906\n", 493 | "Epoch 00074: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 494 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4472 - accuracy: 0.8361 - val_loss: 0.2043 - val_accuracy: 0.9650\n", 495 | "Epoch 75/1000\n", 496 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4439 - accuracy: 0.8359\n", 497 | "Epoch 00075: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 498 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4515 - accuracy: 0.8325 - val_loss: 0.2138 - val_accuracy: 0.9632\n", 499 | "Epoch 76/1000\n", 500 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3983 - accuracy: 0.8203\n", 501 | "Epoch 00076: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 502 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4462 - accuracy: 0.8334 - val_loss: 0.2065 - val_accuracy: 0.9623\n", 503 | "Epoch 77/1000\n", 504 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5020 - accuracy: 0.8047\n", 505 | "Epoch 00077: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 506 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4325 - accuracy: 0.8388 - val_loss: 0.2061 - val_accuracy: 0.9605\n", 507 | "Epoch 78/1000\n", 508 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3215 - accuracy: 0.8672\n", 509 | "Epoch 00078: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 510 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4394 - accuracy: 0.8391 - val_loss: 0.2054 - val_accuracy: 0.9578\n", 511 | "Epoch 79/1000\n", 512 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4025 - accuracy: 0.8359\n", 513 | "Epoch 00079: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 514 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4370 - accuracy: 0.8310 - val_loss: 0.2031 - val_accuracy: 0.9605\n", 515 | "Epoch 80/1000\n", 516 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4505 - accuracy: 0.8125\n", 517 | "Epoch 00080: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 518 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4419 - accuracy: 0.8340 - val_loss: 0.2010 - val_accuracy: 0.9596\n", 519 | "Epoch 81/1000\n", 520 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5287 - accuracy: 0.7891\n", 521 | "Epoch 00081: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 522 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4369 - accuracy: 0.8304 - val_loss: 0.2081 - val_accuracy: 0.9578\n" 523 | ] 524 | }, 525 | { 526 | "name": "stdout", 527 | "output_type": "stream", 528 | "text": [ 529 | "Epoch 82/1000\n", 530 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5132 - accuracy: 0.8047\n", 531 | "Epoch 00082: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 532 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4360 - accuracy: 0.8460 - val_loss: 0.2045 - val_accuracy: 0.9605\n", 533 | "Epoch 83/1000\n", 534 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4239 - accuracy: 0.8125\n", 535 | "Epoch 00083: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 536 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4511 - accuracy: 0.8313 - val_loss: 0.1984 - val_accuracy: 0.9605\n", 537 | "Epoch 84/1000\n", 538 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4853 - accuracy: 0.8203\n", 539 | "Epoch 00084: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 540 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4357 - accuracy: 0.8304 - val_loss: 0.2024 - val_accuracy: 0.9623\n", 541 | "Epoch 85/1000\n", 542 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4782 - accuracy: 0.8125\n", 543 | "Epoch 00085: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 544 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4320 - accuracy: 0.8424 - val_loss: 0.2015 - val_accuracy: 0.9587\n", 545 | "Epoch 86/1000\n", 546 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3985 - accuracy: 0.8828\n", 547 | "Epoch 00086: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 548 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4422 - accuracy: 0.8349 - val_loss: 0.2087 - val_accuracy: 0.9587\n", 549 | "Epoch 87/1000\n", 550 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4810 - accuracy: 0.8359\n", 551 | "Epoch 00087: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 552 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4393 - accuracy: 0.8316 - val_loss: 0.2105 - val_accuracy: 0.9605\n", 553 | "Epoch 88/1000\n", 554 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4819 - accuracy: 0.8125\n", 555 | "Epoch 00088: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 556 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4381 - accuracy: 0.8400 - val_loss: 0.2070 - val_accuracy: 0.9623\n", 557 | "Epoch 89/1000\n", 558 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5002 - accuracy: 0.8281\n", 559 | "Epoch 00089: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 560 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4428 - accuracy: 0.8343 - val_loss: 0.2044 - val_accuracy: 0.9605\n", 561 | "Epoch 90/1000\n", 562 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3314 - accuracy: 0.9062\n", 563 | "Epoch 00090: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 564 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4156 - accuracy: 0.8406 - val_loss: 0.2026 - val_accuracy: 0.9578\n", 565 | "Epoch 91/1000\n", 566 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3850 - accuracy: 0.8594\n", 567 | "Epoch 00091: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 568 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4125 - accuracy: 0.8439 - val_loss: 0.2058 - val_accuracy: 0.9551\n", 569 | "Epoch 92/1000\n", 570 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4976 - accuracy: 0.7734\n", 571 | "Epoch 00092: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 572 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4217 - accuracy: 0.8415 - val_loss: 0.1999 - val_accuracy: 0.9623\n", 573 | "Epoch 93/1000\n", 574 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4155 - accuracy: 0.8516\n", 575 | "Epoch 00093: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 576 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4166 - accuracy: 0.8412 - val_loss: 0.1947 - val_accuracy: 0.9614\n", 577 | "Epoch 94/1000\n", 578 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3642 - accuracy: 0.8750\n", 579 | "Epoch 00094: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 580 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4204 - accuracy: 0.8418 - val_loss: 0.2008 - val_accuracy: 0.9569\n", 581 | "Epoch 95/1000\n", 582 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3773 - accuracy: 0.8594\n", 583 | "Epoch 00095: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 584 | "27/27 [==============================] - 0s 4ms/step - loss: 0.4171 - accuracy: 0.8421 - val_loss: 0.1945 - val_accuracy: 0.9596\n", 585 | "Epoch 96/1000\n", 586 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4086 - accuracy: 0.8672\n", 587 | "Epoch 00096: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 588 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4124 - accuracy: 0.8520 - val_loss: 0.1930 - val_accuracy: 0.9614\n", 589 | "Epoch 97/1000\n", 590 | " 1/27 [>.............................] - ETA: 0s - loss: 0.2914 - accuracy: 0.8906\n", 591 | "Epoch 00097: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 592 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4303 - accuracy: 0.8367 - val_loss: 0.1958 - val_accuracy: 0.9569\n", 593 | "Epoch 98/1000\n", 594 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4562 - accuracy: 0.8672\n", 595 | "Epoch 00098: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 596 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4191 - accuracy: 0.8400 - val_loss: 0.1950 - val_accuracy: 0.9596\n", 597 | "Epoch 99/1000\n", 598 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3967 - accuracy: 0.8438\n", 599 | "Epoch 00099: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 600 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4260 - accuracy: 0.8418 - val_loss: 0.2044 - val_accuracy: 0.9551\n", 601 | "Epoch 100/1000\n", 602 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4173 - accuracy: 0.8516\n", 603 | "Epoch 00100: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 604 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4200 - accuracy: 0.8442 - val_loss: 0.2066 - val_accuracy: 0.9560\n", 605 | "Epoch 101/1000\n", 606 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3892 - accuracy: 0.8438\n", 607 | "Epoch 00101: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 608 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4245 - accuracy: 0.8418 - val_loss: 0.2058 - val_accuracy: 0.9578\n", 609 | "Epoch 102/1000\n", 610 | " 1/27 [>.............................] - ETA: 0s - loss: 0.2965 - accuracy: 0.8984\n", 611 | "Epoch 00102: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 612 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4100 - accuracy: 0.8445 - val_loss: 0.2093 - val_accuracy: 0.9578\n", 613 | "Epoch 103/1000\n", 614 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4241 - accuracy: 0.8125\n", 615 | "Epoch 00103: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 616 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4189 - accuracy: 0.8403 - val_loss: 0.1928 - val_accuracy: 0.9659\n", 617 | "Epoch 104/1000\n", 618 | " 1/27 [>.............................] - ETA: 0s - loss: 0.6486 - accuracy: 0.7891\n", 619 | "Epoch 00104: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 620 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4223 - accuracy: 0.8424 - val_loss: 0.1964 - val_accuracy: 0.9596\n", 621 | "Epoch 105/1000\n", 622 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4589 - accuracy: 0.8281\n", 623 | "Epoch 00105: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 624 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4199 - accuracy: 0.8418 - val_loss: 0.1971 - val_accuracy: 0.9623\n", 625 | "Epoch 106/1000\n", 626 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4395 - accuracy: 0.8203\n", 627 | "Epoch 00106: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 628 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4028 - accuracy: 0.8511 - val_loss: 0.1912 - val_accuracy: 0.9641\n", 629 | "Epoch 107/1000\n", 630 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4061 - accuracy: 0.8594\n", 631 | "Epoch 00107: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 632 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4093 - accuracy: 0.8556 - val_loss: 0.1854 - val_accuracy: 0.9668\n", 633 | "Epoch 108/1000\n", 634 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4142 - accuracy: 0.8438\n", 635 | "Epoch 00108: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 636 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4249 - accuracy: 0.8367 - val_loss: 0.2008 - val_accuracy: 0.9614\n" 637 | ] 638 | }, 639 | { 640 | "name": "stdout", 641 | "output_type": "stream", 642 | "text": [ 643 | "Epoch 109/1000\n", 644 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5373 - accuracy: 0.7969\n", 645 | "Epoch 00109: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 646 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4198 - accuracy: 0.8379 - val_loss: 0.1955 - val_accuracy: 0.9659\n", 647 | "Epoch 110/1000\n", 648 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3872 - accuracy: 0.8281\n", 649 | "Epoch 00110: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 650 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4077 - accuracy: 0.8505 - val_loss: 0.2020 - val_accuracy: 0.9614\n", 651 | "Epoch 111/1000\n", 652 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3474 - accuracy: 0.8828\n", 653 | "Epoch 00111: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 654 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4132 - accuracy: 0.8433 - val_loss: 0.1984 - val_accuracy: 0.9632\n", 655 | "Epoch 112/1000\n", 656 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4205 - accuracy: 0.8672\n", 657 | "Epoch 00112: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 658 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4182 - accuracy: 0.8397 - val_loss: 0.1973 - val_accuracy: 0.9614\n", 659 | "Epoch 113/1000\n", 660 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4995 - accuracy: 0.8125\n", 661 | "Epoch 00113: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 662 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4042 - accuracy: 0.8478 - val_loss: 0.1922 - val_accuracy: 0.9650\n", 663 | "Epoch 114/1000\n", 664 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3640 - accuracy: 0.8750\n", 665 | "Epoch 00114: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 666 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4072 - accuracy: 0.8430 - val_loss: 0.1868 - val_accuracy: 0.9596\n", 667 | "Epoch 115/1000\n", 668 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5042 - accuracy: 0.8203\n", 669 | "Epoch 00115: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 670 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3999 - accuracy: 0.8535 - val_loss: 0.1966 - val_accuracy: 0.9605\n", 671 | "Epoch 116/1000\n", 672 | " 1/27 [>.............................] - ETA: 0s - loss: 0.5355 - accuracy: 0.7422\n", 673 | "Epoch 00116: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 674 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4174 - accuracy: 0.8433 - val_loss: 0.1919 - val_accuracy: 0.9659\n", 675 | "Epoch 117/1000\n", 676 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3729 - accuracy: 0.8750\n", 677 | "Epoch 00117: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 678 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4101 - accuracy: 0.8451 - val_loss: 0.1932 - val_accuracy: 0.9578\n", 679 | "Epoch 118/1000\n", 680 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3193 - accuracy: 0.8828\n", 681 | "Epoch 00118: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 682 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4260 - accuracy: 0.8379 - val_loss: 0.1865 - val_accuracy: 0.9641\n", 683 | "Epoch 119/1000\n", 684 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3852 - accuracy: 0.8438\n", 685 | "Epoch 00119: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 686 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3797 - accuracy: 0.8622 - val_loss: 0.1900 - val_accuracy: 0.9677\n", 687 | "Epoch 120/1000\n", 688 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3636 - accuracy: 0.8594\n", 689 | "Epoch 00120: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 690 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4017 - accuracy: 0.8460 - val_loss: 0.1908 - val_accuracy: 0.9659\n", 691 | "Epoch 121/1000\n", 692 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4521 - accuracy: 0.8359\n", 693 | "Epoch 00121: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 694 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4023 - accuracy: 0.8538 - val_loss: 0.1935 - val_accuracy: 0.9659\n", 695 | "Epoch 122/1000\n", 696 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4849 - accuracy: 0.8203\n", 697 | "Epoch 00122: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 698 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4186 - accuracy: 0.8457 - val_loss: 0.1937 - val_accuracy: 0.9659\n", 699 | "Epoch 123/1000\n", 700 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4021 - accuracy: 0.8516\n", 701 | "Epoch 00123: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 702 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4156 - accuracy: 0.8478 - val_loss: 0.1907 - val_accuracy: 0.9632\n", 703 | "Epoch 124/1000\n", 704 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3601 - accuracy: 0.8906\n", 705 | "Epoch 00124: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 706 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3948 - accuracy: 0.8550 - val_loss: 0.1862 - val_accuracy: 0.9605\n", 707 | "Epoch 125/1000\n", 708 | " 1/27 [>.............................] - ETA: 0s - loss: 0.4446 - accuracy: 0.7891\n", 709 | "Epoch 00125: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 710 | "27/27 [==============================] - 0s 3ms/step - loss: 0.4152 - accuracy: 0.8520 - val_loss: 0.1888 - val_accuracy: 0.9623\n", 711 | "Epoch 126/1000\n", 712 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3733 - accuracy: 0.8438\n", 713 | "Epoch 00126: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 714 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3913 - accuracy: 0.8550 - val_loss: 0.1937 - val_accuracy: 0.9632\n", 715 | "Epoch 127/1000\n", 716 | " 1/27 [>.............................] - ETA: 0s - loss: 0.3000 - accuracy: 0.8828\n", 717 | "Epoch 00127: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 718 | "27/27 [==============================] - 0s 3ms/step - loss: 0.3820 - accuracy: 0.8583 - val_loss: 0.1867 - val_accuracy: 0.9632\n", 719 | "Epoch 00127: early stopping\n" 720 | ] 721 | }, 722 | { 723 | "data": { 724 | "text/plain": [ 725 | "" 726 | ] 727 | }, 728 | "execution_count": 11, 729 | "metadata": {}, 730 | "output_type": "execute_result" 731 | } 732 | ], 733 | "source": [ 734 | "model.fit(\n", 735 | " X_train,\n", 736 | " y_train,\n", 737 | " epochs=1000,\n", 738 | " batch_size=128,\n", 739 | " validation_data=(X_test, y_test),\n", 740 | " callbacks=[cp_callback, es_callback]\n", 741 | ")" 742 | ] 743 | }, 744 | { 745 | "cell_type": "code", 746 | "execution_count": 12, 747 | "metadata": {}, 748 | "outputs": [ 749 | { 750 | "name": "stdout", 751 | "output_type": "stream", 752 | "text": [ 753 | "9/9 [==============================] - 0s 1ms/step - loss: 0.1867 - accuracy: 0.9632\n" 754 | ] 755 | } 756 | ], 757 | "source": [ 758 | "# モデル評価\n", 759 | "val_loss, val_acc = model.evaluate(X_test, y_test, batch_size=128)" 760 | ] 761 | }, 762 | { 763 | "cell_type": "code", 764 | "execution_count": 13, 765 | "metadata": {}, 766 | "outputs": [], 767 | "source": [ 768 | "# 保存したモデルのロード\n", 769 | "model = tf.keras.models.load_model(model_save_path)" 770 | ] 771 | }, 772 | { 773 | "cell_type": "code", 774 | "execution_count": 14, 775 | "metadata": {}, 776 | "outputs": [ 777 | { 778 | "name": "stdout", 779 | "output_type": "stream", 780 | "text": [ 781 | "[0.77297777 0.1697358 0.05728642]\n", 782 | "0\n" 783 | ] 784 | } 785 | ], 786 | "source": [ 787 | "# 推論テスト\n", 788 | "predict_result = model.predict(np.array([X_test[0]]))\n", 789 | "print(np.squeeze(predict_result))\n", 790 | "print(np.argmax(np.squeeze(predict_result)))" 791 | ] 792 | }, 793 | { 794 | "cell_type": "markdown", 795 | "metadata": {}, 796 | "source": [ 797 | "# 混同行列" 798 | ] 799 | }, 800 | { 801 | "cell_type": "code", 802 | "execution_count": 15, 803 | "metadata": {}, 804 | "outputs": [ 805 | { 806 | "data": { 807 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAAFmCAYAAAClXQeMAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAfJklEQVR4nO3de5yWdZn48c81DKiA5llhYMWUPGQbtumalBnkibXQLQlfmSc22s3MyvLQK9d0s7VNM63NbVwPuJsoWaRrHlP8mZkgChKndPIQIIh4BExl5vn+/phbeyJmHmTumeeeez5vX/eL5/k+9+Ea58VcXN/re98TKSUkScpDQ70DkCSVh0lFkpQbk4okKTcmFUlSbkwqkqTcmFQkSbkxqUhSHxMR/SJiTkTckr3fNSJmRkRLRNwQEQOy8c2y9y3Z5yNqnbuxm2Nn3aonvBGmwAY1HVTvENQJ7yMrvnVvLIvczpXDz8v+279zY+I5DVgEbJW9/w5wSUrp+oj4L2AScHn254sppd0jYmK236c6O7GViiQVRaWt61sNETEM+Afgv7P3AYwBbsx2mQIclb0en70n+3xstn+HTCqS1Ld8HzgDqGTvtwNeSim1Zu+XAk3Z6yZgCUD2+cvZ/h0yqUhSUaRKl7eImBwRs6u2yW+ePiKOBFamlB7uri+h23sqkqSNVKnU3qeGlFIz0NzBx6OBj0fEOGBz2nsqlwJbR0RjVo0MA5Zl+y8DhgNLI6IReAfwfGfXt1KRpIJIqdLlrfPzp7NTSsNSSiOAicA9KaVPAzOAT2a7nQDclL2+OXtP9vk9qcbqEZOKJOlM4CsR0UJ7z+TKbPxKYLts/CvAWbVO5PSXJBVFDtNfGyuldC9wb/b6CWD/DezzGnDM2zmvSUWSiqLG9FVvYFKRpKLYiPtMis6kIklFUYJKxUa9JCk3ViqSVBQ92KjvLiYVSSqIWveZ9AYmFUkqCisVSVJuSlCp2KiXJOXGSkWSisL7VCRJuSnB9JdJRZKKogSNensqkqTcWKlIUlE4/SVJyk0Jpr9MKpJUECm5+kuSlJcSTH/ZqJck5cZKRZKKwp6KJCk3JZj+MqlIUlH4mBZJUm5KUKnYqJck5cZKRZKKwka9JCk3JZj+MqlIUlGUoFKxpyJJyo2ViiQVRQkqFZOKJBWED5SUJOXHSkWSlJsSrP6yUS9Jyo1JRZKKolLp+taJiNg8ImZFxKMRsSAizsvGr4mIJyNibraNysYjIi6LiJaImBcR76v1JTj9JUlF0f3TX68DY1JKayKiP3B/RNyWffa1lNKN6+1/BDAy2/4euDz7s0MmFUkqim5u1KeUErAme9s/21Inh4wHrs2OezAito6IISml5R0d4PSXJBVFqnR5i4jJETG7aptcfYmI6BcRc4GVwF0ppZnZRxdkU1yXRMRm2VgTsKTq8KXZWIesVCSpRFJKzUBzJ5+3AaMiYmtgekTsA5wNrAAGZMeeCZy/Kde3UpGkoujmRn21lNJLwAzg8JTS8tTudeBqYP9st2XA8KrDhmVjHTKpSFJRdP/qrx2yCoWI2AI4BFgcEUOysQCOAuZnh9wMHJ+tAjsAeLmzfgo4/SVJxdH9q7+GAFMioh/tRcW0lNItEXFPROwABDAX+Ods/1uBcUAL8CpwUq0LmFQkqY9IKc0D9t3A+JgO9k/AKW/nGk5/bUBbWxufPPEUPv+1cwFY+swKjv3slzhiwsmcfs6/s27dur/Y/64Z97PP6COYv+ixeoSrKg0NDcyaeTvTp19T71BUZdiwodx150959NEZzJ17D6d+YVK9QyqmHuypdBeTygb8709v4p0j/uat95dcfhWf+dRR3DbtKrbacjA/u+WOtz5bu/ZV/venN/G3e+9Rj1C1nlNPncTixS31DkPraW1t5YwzzuO97/0IH/zgx/jnfzmRvfYaWe+wiieHJcX1ZlJZz4qVz3HfA7P4xMcOAyClxMyHH+XQgz8EwPhxH+We+3771v4/uOJaTj7uGAZsNqAu8erPmpqGcMQRY7nq6uvqHYrWs2LFSubMbe/9rlmzlsWLH2fo0J3rHFUB9YVKJSL2jIgzs+e/XJa93qsngquH71z6Y77y+UlEtP+veenlV9hy8CAaG/sBsNMO27PyuecBWPj7FlasXMWHD9y/w/Op51x80Tc5++wLqFQ6u0FY9bbLLsMY9d59mDVrTr1DKZ6yVyoRcSZwPe0rAmZlWwBTI+Ks7g+vZ937m5lsu83WvHvP2mV5pVLhP37QzNdO/WwPRKZaxo0by8rnVjFnzu/qHYo6MWjQQKbdcAWnf/VcVq9eU/sA9Tq1Vn9NAt6dUvqLznREfA9YAFy4oYOyxwJMBvjRxd/in44/NodQu9+ceQu59/4H+fVvH+L1N9axdu2rXPj9/2L1mrW0trbR2NiPZ59bxY47bMfaV/9EyxNPc9IXzgBg1QsvcuqZ5/GD75zLPnu9q85fSd9z4Af248h/OJTDDxvD5ptvxlZbbck1V1/GiSd9sd6hKdPY2Mi0G65g6tTp/OIXt9U+oC8qwPRVV0X7irEOPoxYDByWUnp6vfFdgDtTSjW70+tWPdEr5yJmPTKPa6b+jB999zy+8o0L+OjBoxn30YM57z9+wB6778rEfzzyL/Y/8Qtn8NVT/qnXJZRBTQfVO4TcHXTQB/jylz/H0UefWO9Quqyzv5+9zdVXXcoLL7zE6V89t96h5GrdG8sir3P9adr5Xf6GbzHhX3OLZ1PU6ql8Cbg7Im6LiOZsux24Gzit26MriC//y8lce/10jphwMi+/8gr/eOSh9Q5J6lVGH7gfxx33ST7ykQOZ/dCdzH7oTg4/fIO3RvRtKXV9q7NOKxWAaO9Y78+fn0y5DHgoeyhZTb21UukryliplEmZKpWyyrVSmXpu1yuVY8+ra6VS8476lFIFeLAHYpEk9XI+pkWSiqIEjXqTiiQVRQHuM+kqk4okFUUJKhUf0yJJyo2ViiQVRQlW+5lUJKkoSjD9ZVKRpKIwqUiSclOC1V826iVJubFSkaSCSCX4XUAmFUkqCnsqkqTclKCnYlKRpKIowfSXjXpJUm6sVCSpKOypSJJyY1KRJOWmBM/+sqciScqNlYokFYXTX5Kk3JRgSbFJRZKKogQ3P9pTkaSiqKSub52IiM0jYlZEPBoRCyLivGx814iYGREtEXFDRAzIxjfL3rdkn4+o9SWYVCSp73gdGJNSei8wCjg8Ig4AvgNcklLaHXgRmJTtPwl4MRu/JNuvUyYVSSqIVKl0eev0/O3WZG/7Z1sCxgA3ZuNTgKOy1+Oz92Sfj42I6OwaJhVJKopunv4CiIh+ETEXWAncBfwBeCml1JrtshRoyl43AUsAss9fBrbr7PwmFUkqilTp8hYRkyNidtU2+S8ukVJbSmkUMAzYH9gzzy/B1V+SVCIppWageSP2eykiZgAfALaOiMasGhkGLMt2WwYMB5ZGRCPwDuD5zs5rpSJJRdH9q792iIits9dbAIcAi4AZwCez3U4Abspe35y9J/v8npQ6f5aMlYokFUX331E/BJgSEf1oLyqmpZRuiYiFwPUR8S1gDnBltv+VwP9ERAvwAjCx1gVMKpJUFN18R31KaR6w7wbGn6C9v7L++GvAMW/nGiYVSSoK76iXJOnPrFQkqSh8oKQkKS+17ojvDUwqklQUViqSpNyUIKnYqJck5cZKRZKKogRLik0qklQUJZj+MqlIUkGkEiQVeyqSpNxYqUhSUZSgUjGpSFJRePOjJCk3ViqSpNyUIKnYqJck5cZKRZIKosZv6u0VTCqSVBQlmP4yqUhSUZhUahs49EPdfQl1wSu3nVvvENSJkcdcVu8Q1IO8o16SpCpOf0lSUZSgUjGpSFJR9P4b6k0qklQU9lQkSapipSJJRVGCSsWkIklFYU9FkpSXMvRUTCqSVBQlqFRs1EuScmOlIkkFUYbpLysVSSqKSg5bJyJieETMiIiFEbEgIk7Lxr8ZEcsiYm62jas65uyIaImI30fEYbW+BCsVSSqI1P09lVbg9JTSIxGxJfBwRNyVfXZJSumi6p0jYm9gIvBuYCjwq4h4V0qpraMLWKlIUlF0c6WSUlqeUnoke70aWAQ0dXLIeOD6lNLrKaUngRZg/86uYVKRpD4oIkYA+wIzs6EvRMS8iLgqIrbJxpqAJVWHLaXzJGRSkaSiSJWubxExOSJmV22T179ORAwGfgZ8KaX0CnA5sBswClgOXLypX4M9FUkqihx6KimlZqC5o88joj/tCeUnKaWfZ8c8W/X5FcAt2dtlwPCqw4dlYx2yUpGkgsijUulMRARwJbAopfS9qvEhVbsdDczPXt8MTIyIzSJiV2AkMKuza1ipSFLfMRr4DPC7iJibjX0dODYiRgEJeAr4HEBKaUFETAMW0r5y7JTOVn6BSUWSCqO7lxSnlO4HYgMf3drJMRcAF2zsNUwqklQQPXCfSrczqUhSUaQNFRG9i0lFkgqiDJWKq78kSbmxUpGkgkgVp78kSTkpw/SXSUWSCiLZqJck5aUMlYqNeklSbqxUJKkgbNRLknKTev+vqDepSFJRlKFSsaciScqNlYokFUQZKhWTiiQVhD0VSVJurFQkSbkpwx31NuolSbmxUpGkgijDY1pMKpJUEJUSTH+ZVCSpIMrQUzGpSFJBlGH1l416SVJurFQkqSC8+VGSlJsyTH+ZVCSpIMqw+sueiiQpN1YqklQQLimWJOXGRn0fMmzYUK6+6lJ23Gl7Ukpc+d8/4Qc/vLLeYfU5r69r5eTv3cC61jZaKxU+uu9IPn/kaM659nYefnwJg7fYDIDzP3M4ew7f8a3j5j+1ghMuuo4LTz6SQ973rnqF36cMadqZS3/0bbbfcTtSSlw35Uau/PH/ste79+DC753DoEEDWfLHZzj1c2eyZvXaeodbCGXoqZhUNlJraytnnHEec+bOZ/DgQcyceTu/uvs+Fi16vN6h9SkDGvtxxWnHMHDzAaxra+Oki6/ng+/eFYAvH/3hDSaMtkqFS39xHwfsNaKHo+3b2lpbOf+c7zJ/3iIGDR7IbfdM4757H+C7l57Ht/71Ih58YDaf+vTR/POpJ3HRt39Y73ALoQzTXzbqN9KKFSuZM3c+AGvWrGXx4scZOnTnOkfV90QEAzcfAEBrW4XWtgpB538Rp947h7H7jmTbLQf2RIjKrHx2FfPnLQJg7ZpXefyxJ9h5yE68c/ddePCB2QDcd+9vGfexQ+oZpnK2yUklIk7KM5DeZJddhjHqvfswa9aceofSJ7VVKkz49rWMOfNyDthzF96z6xAAfnjz/RzzrSl898YZvLGuFYBnX1rNjLktTPjQqDpGrGHDh7LP3+7FnIfn8djiP3DYuDEAHDn+UP9xViWlrm+diYjhETEjIhZGxIKIOC0b3zYi7oqIx7M/t8nGIyIui4iWiJgXEe+r9TV0pVI5rwvH9lqDBg1k2g1XcPpXz2X16jX1DqdP6tfQwLSvH88dF0xm/lMraHlmFV8c/0F+ce5J/OTMT/Py2te4+q6HAPjuT+/ltKM/REND759W6K0GDtqC5imX8M2vf4c1q9dy+qnncPykidx6zw0MHjyIdevW1TvEwqik6PJWQytwekppb+AA4JSI2Bs4C7g7pTQSuDt7D3AEMDLbJgOX17pApz2ViJjX0UfATp0cNzkLgIZ+76ChYVCtOHqFxsZGpt1wBVOnTucXv7it3uH0eVsN3Jz99hjObxY8yQmH7AfAgP6NjP/APlz7q/bplYV/XMGZV/4SgJfW/on75z9Bv4ZgzKiRdYu7L2lsbKR5yveZfuMvue2WXwHwh8ef5NOfmAzArrvtwthDDqpniIXS3T2VlNJyYHn2enVELAKagPHAwdluU4B7gTOz8WtTSgl4MCK2jogh2Xk2qFajfifgMODF9cYDeKCTwJuBZoD+A5pKsEiu3RXNF7N4cQvfv7S53qH0WS+sfpXGfg1sNXBzXntjHQ8uepqTDt2P515eww7vGExKiRmPtrD70O0AuPXfPvvWsedcezsH7fNOE0oPuuiy82l57Amu+NG1b41tt/22PL/qBSKC007/HP9zzbQ6Rlgseaz+qv5HfaY5+5m8/n4jgH2BmcBOVYliBX8uGpqAJVWHLc3GNjmp3AIMTinN3UBA99Y4tlRGH7gfxx33SX73u4XMfuhOAL5xzoXcfvs9dY6sb1n18lrOufY2KpVEJSUO/bs9OOg9u/HZ70/jxTV/IqXEHsN25BvHfrTeofZ5+/39vnxy4sdZtOAx7vh/NwLwnX+7lF1324UTJk0E4LZbfsUNP5lezzBLp/of9R2JiMHAz4AvpZReifhzMksppYjY5GIgUjffbVOmSqWMXrnt3HqHoE6MPOayeoegGpa+MD+3OasHh/5jl39eHvDMzzuNJyL6014w3JFS+l429nvg4JTS8ogYAtybUtojIn6cvZ66/n4dnd8lxZJUEN3dqI/2kuRKYNGbCSVzM3BC9voE4Kaq8eOzVWAHAC93llDAmx8lqTB64ObH0cBngN9FxNxs7OvAhcC0iJgEPA1MyD67FRgHtACvAjVvJTGpSFIfkVK6Hzq8W3jsBvZPwClv5xomFUkqiEq9A8iBSUWSCiLVeORQb2BSkaSCqJRgraxJRZIKolKCSsUlxZKk3FipSFJB2FORJOXG1V+SpNyUoVKxpyJJyo2ViiQVhNNfkqTcmFQkSbkpQ0/FpCJJBVHp/TnFRr0kKT9WKpJUEGV4TItJRZIKogTPkzSpSFJRuPpLkpSbSvT+6S8b9ZKk3FipSFJB2FORJOXGnookKTfe/ChJUhUrFUkqCG9+lCTlxka9JCk3ZeipmFQkqSDKsPrLRr0kKTdWKpJUEPZUJEm5saciScqNPRVJUm4qOWy1RMRVEbEyIuZXjX0zIpZFxNxsG1f12dkR0RIRv4+Iw2qd36QiSX3LNcDhGxi/JKU0KttuBYiIvYGJwLuzY34UEf06O7lJRZIKIkXXt5rXSOk+4IWNDGk8cH1K6fWU0pNAC7B/ZweYVCSpIHpi+qsTX4iIedn02DbZWBOwpGqfpdlYh0wqklQQeSSViJgcEbOrtskbcenLgd2AUcBy4OJN/Rpc/SVJJZJSagaa3+Yxz775OiKuAG7J3i4DhlftOiwb65CViiQVRMph2xQRMaTq7dHAmyvDbgYmRsRmEbErMBKY1dm5rFQkqSB64ubHiJgKHAxsHxFLgXOBgyNiFO156SngcwAppQURMQ1YCLQCp6SU2jo7v0lFkgqiJ25+TCkdu4HhKzvZ/wLggo09v0lFkgrCO+olSapipSJJBeFTiiVJufEpxZKk3JShp2JSkaSCKMP0l416SVJuur1SKUPmLbN3jDu/3iGoEy9d9PF6h6AeVCnBT0ynvySpIOypSJJy0/vrFHsqkqQcWalIUkE4/SVJyo03P0qScuPqL0lSbnp/SrFRL0nKkZWKJBWEjXpJUm7sqUiSctP7U4pJRZIKowzTXzbqJUm5sVKRpIKwpyJJyk3vTykmFUkqDHsqkiRVsVKRpIJIJZgAM6lIUkGUYfrLpCJJBeHqL0lSbnp/SrFRL0nKkZWKJBVEGaa/rFQkqSAqOWy1RMRVEbEyIuZXjW0bEXdFxOPZn9tk4xERl0VES0TMi4j31Tq/SUWSCiLl8N9GuAY4fL2xs4C7U0ojgbuz9wBHACOzbTJwea2Tm1QkqSB6olJJKd0HvLDe8HhgSvZ6CnBU1fi1qd2DwNYRMaSz85tUJEk7pZSWZ69XADtlr5uAJVX7Lc3GOmRSkaSCyGP6KyImR8Tsqm3y24ohpUQXVje7+kuSCiKPO+pTSs1A89s87NmIGJJSWp5Nb63MxpcBw6v2G5aNdchKRZIKopJSl7dNdDNwQvb6BOCmqvHjs1VgBwAvV02TbZCViiT1IRExFTgY2D4ilgLnAhcC0yJiEvA0MCHb/VZgHNACvAqcVOv8JhVJKoieuPUxpXRsBx+N3cC+CTjl7ZzfpCJJBVGGO+pNKpJUEP4+FUlSbsrw+1Rc/SVJyo2ViiQVhD0VSVJu7KlIknJThp6KSUWSCiJt+h3xhWGjXpKUGysVSSoIG/WSpNzYU5Ek5aYMq7/sqUiScmOlIkkFYU9FkpSbMiwpNqlIUkHYqJck5cZGfR9z2KEHs2D+fSxeeD9nfO1t/TI09ZCGhgZmzbyd6dOvqXcofdLrrW0cN+0hJkydySeue5DLZz4BwPXzlvDx/3mAfX94Ny/+6Y2/Om7Bs6/w/v+8h7tanu3pkJUzK5WN1NDQwGWXXsDh445l6dLlPPjbW/m/W+5k0aLH6x2aqpx66iQWL25hy60G1zuUPmlAvwaaj9qXgQMaWddW4eSfP8zoXbZj1JCtOWjE9vzT9Ef+6pi2SuLSB1o44G+2rUPExVKGRn3NSiUi9oyIsRExeL3xw7svrOLZf799+cMfnuLJJ//IunXrmDbtJj7+scPqHZaqNDUN4YgjxnLV1dfVO5Q+KyIYOKD936qtlURrJRHAnjtsydCtttjgMdfPW8LY3XZg2y0G9GCkxZRS6vJWb50mlYj4InATcCowPyLGV3387e4MrGiGNu3MkqXPvPV+6bLlDB26cx0j0vouvuibnH32BVQq9f+L1Ze1VRKfun4mY6/6NQcM35b37PyODvddueY17nniOY55z7AejLC4KqQub/VWq1L5LPB3KaWjgIOBcyLitOyz6OigiJgcEbMjYnalsjaXQKXOjBs3lpXPrWLOnN/VO5Q+r19DcMPEv+eOE0cz/9mXaXl+TYf7fvfXj3PagbvTEB3+OFEvU6un0pBSWgOQUnoqIg4GboyIXegkqaSUmoFmgMYBTfVPnTl4ZtkKhg8b+tb7YU1DeOaZFXWMSNUO/MB+HPkPh3L4YWPYfPPN2GqrLbnm6ss48aQv1ju0PmvLzfrz/qZteODp59l9uw33uBaufIWz7pgPwEuvreP+p1fR2NDAR965Q0+GWhh9YfXXsxEx6s03WYI5EtgeeE83xlU4D82ey+6778qIEcPp378/EyaM5/9uubPeYSnzjXMu5J277ce79vgAx33mFGbc+xsTSh288Kc3WP36OgBea21j5pIXGLHNoA73/+UJo7k12z66246c/eE9+mxCAaik1OWt3mpVKscDrdUDKaVW4PiI+HG3RVVAbW1tnPalb3DrL6+jX0MD10y5gYULH6t3WFKhrFr7Ov/6q4VUUvsPyEN235GDdt2e6x5dwpRHnub5V99gwtSZfHDE9pw7Zq96h1s49U8JXRfdvVqgLNNfZeVcdrG9dNHH6x2Cahh46o9y+0s0umlMl39e/mbZPXX9S+3Nj5Kk3HjzoyQVRBGWBHeVSUWSCqIINy92lUlFkgrCSkWSlJu+cJ+KJEkbzUpFkgqiJ3oqEfEUsBpoA1pTSu+PiG2BG4ARwFPAhJTSi5tyfisVSSqIHnyg5EdSSqNSSu/P3p8F3J1SGgncnb3fJCYVSSqIOj76fjwwJXs9BThqU09kUpGkEql+Sny2TV5vlwTcGREPV322U0ppefZ6BbDTpl7fnookFUQeS4qrnxLfgQ+mlJZFxI7AXRGxeL3jU0RsciAmFUkqiJ5YUpxSWpb9uTIipgP70/5E+iEppeURMQRYuannd/pLkgqiux99HxGDImLLN18DhwLzgZuBE7LdTqD9N/5uEisVSSqIHqhUdgKmR/vTyRuB61JKt0fEQ8C0iJgEPA1M2NQLmFQkqY9IKT0BvHcD488DY/O4hklFkgqiCL+5satMKpJUEGV49pdJRZIKwkpFkpSbMlQqLimWJOXGSkWSCsLpL0lSbsow/WVSkaSCSKlS7xC6zJ6KJCk3ViqSVBB5PKW43kwqklQQPfHrhLubSUWSCsJKRZKUmzJUKjbqJUm5sVKRpILw5kdJUm68+VGSlJsy9FRMKpJUEGVY/WWjXpKUGysVSSoIp78kSblx9ZckKTdlqFTsqUiScmOlIkkFUYbVXyYVSSqIMkx/mVQkqSBs1EuSclOGx7TYqJck5cZKRZIKwukvSVJubNRLknJjT0WSlJuUUpe3WiLi8Ij4fUS0RMRZeX8NJhVJ6iMioh/wn8ARwN7AsRGxd57XcPpLkgqiB3oq+wMtKaUnACLiemA8sDCvC1ipSFJBpBy2GpqAJVXvl2Zjuen2SqX1jWXR3dfoSRExOaXUXO84tGF+f4rP71HH8vh5GRGTgclVQ809+f/bSuXtm1x7F9WR35/i83vUjVJKzSml91dt1QllGTC86v2wbCw3JhVJ6jseAkZGxK4RMQCYCNyc5wVs1EtSH5FSao2ILwB3AP2Aq1JKC/K8hknl7XMuuNj8/hSf36M6SindCtzaXeePMjwWQJJUDPZUJEm5Mam8Dd39eANtuoi4KiJWRsT8eseivxYRwyNiRkQsjIgFEXFavWNS93D6ayNljzd4DDiE9huGHgKOTSnldieqNl1EHASsAa5NKe1T73j0lyJiCDAkpfRIRGwJPAwc5d+f8rFS2XhvPd4gpfQG8ObjDVQAKaX7gBfqHYc2LKW0PKX0SPZ6NbCInO/kVjGYVDZetz/eQOoLImIEsC8ws86hqBuYVCT1mIgYDPwM+FJK6ZV6x6P8mVQ2Xrc/3kAqs4joT3tC+UlK6ef1jkfdw6Sy8br98QZSWUVEAFcCi1JK36t3POo+JpWNlFJqBd58vMEiYFrejzfQpouIqcBvgT0iYmlETKp3TPoLo4HPAGMiYm62jat3UMqfS4olSbmxUpEk5cakIknKjUlFkpQbk4okKTcmFUlSbkwqkqTcmFQkSbkxqUiScvP/AVplmVKXNqIBAAAAAElFTkSuQmCC\n", 808 | "text/plain": [ 809 | "
" 810 | ] 811 | }, 812 | "metadata": { 813 | "needs_background": "light" 814 | }, 815 | "output_type": "display_data" 816 | }, 817 | { 818 | "name": "stdout", 819 | "output_type": "stream", 820 | "text": [ 821 | "Classification Report\n", 822 | " precision recall f1-score support\n", 823 | "\n", 824 | " 0 1.00 0.99 0.99 410\n", 825 | " 1 0.98 0.92 0.95 385\n", 826 | " 2 0.91 0.99 0.95 318\n", 827 | "\n", 828 | " accuracy 0.96 1113\n", 829 | " macro avg 0.96 0.96 0.96 1113\n", 830 | "weighted avg 0.96 0.96 0.96 1113\n", 831 | "\n" 832 | ] 833 | } 834 | ], 835 | "source": [ 836 | "import pandas as pd\n", 837 | "import seaborn as sns\n", 838 | "import matplotlib.pyplot as plt\n", 839 | "from sklearn.metrics import confusion_matrix, classification_report\n", 840 | "\n", 841 | "def print_confusion_matrix(y_true, y_pred, report=True):\n", 842 | " labels = sorted(list(set(y_true)))\n", 843 | " cmx_data = confusion_matrix(y_true, y_pred, labels=labels)\n", 844 | " \n", 845 | " df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)\n", 846 | " \n", 847 | " fig, ax = plt.subplots(figsize=(7, 6))\n", 848 | " sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)\n", 849 | " ax.set_ylim(len(set(y_true)), 0)\n", 850 | " plt.show()\n", 851 | " \n", 852 | " if report:\n", 853 | " print('Classification Report')\n", 854 | " print(classification_report(y_test, y_pred))\n", 855 | "\n", 856 | "Y_pred = model.predict(X_test)\n", 857 | "y_pred = np.argmax(Y_pred, axis=1)\n", 858 | "\n", 859 | "print_confusion_matrix(y_test, y_pred)" 860 | ] 861 | }, 862 | { 863 | "cell_type": "markdown", 864 | "metadata": {}, 865 | "source": [ 866 | "# Tensorflow-Lite用のモデルへ変換" 867 | ] 868 | }, 869 | { 870 | "cell_type": "code", 871 | "execution_count": 16, 872 | "metadata": {}, 873 | "outputs": [], 874 | "source": [ 875 | "# 推論専用のモデルとして保存\n", 876 | "model.save(model_save_path, include_optimizer=False)" 877 | ] 878 | }, 879 | { 880 | "cell_type": "code", 881 | "execution_count": 17, 882 | "metadata": {}, 883 | "outputs": [ 884 | { 885 | "name": "stdout", 886 | "output_type": "stream", 887 | "text": [ 888 | "WARNING:tensorflow:From d:\\00.envs\\20201208_mediapipe\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\n", 889 | "Instructions for updating:\n", 890 | "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", 891 | "WARNING:tensorflow:From d:\\00.envs\\20201208_mediapipe\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n", 892 | "Instructions for updating:\n", 893 | "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", 894 | "INFO:tensorflow:Assets written to: C:\\Users\\sihit\\AppData\\Local\\Temp\\tmpy2l6ipxu\\assets\n" 895 | ] 896 | }, 897 | { 898 | "data": { 899 | "text/plain": [ 900 | "6224" 901 | ] 902 | }, 903 | "execution_count": 17, 904 | "metadata": {}, 905 | "output_type": "execute_result" 906 | } 907 | ], 908 | "source": [ 909 | "# モデルを変換(量子化)\n", 910 | "tflite_save_path = 'model/keypoint_classifier/keypoint_classifier.tflite'\n", 911 | "\n", 912 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n", 913 | "converter.optimizations = [tf.lite.Optimize.DEFAULT]\n", 914 | "tflite_quantized_model = converter.convert()\n", 915 | "\n", 916 | "open(tflite_save_path, 'wb').write(tflite_quantized_model)" 917 | ] 918 | }, 919 | { 920 | "cell_type": "markdown", 921 | "metadata": {}, 922 | "source": [ 923 | "# 推論テスト" 924 | ] 925 | }, 926 | { 927 | "cell_type": "code", 928 | "execution_count": 18, 929 | "metadata": {}, 930 | "outputs": [], 931 | "source": [ 932 | "interpreter = tf.lite.Interpreter(model_path=tflite_save_path)\n", 933 | "interpreter.allocate_tensors()" 934 | ] 935 | }, 936 | { 937 | "cell_type": "code", 938 | "execution_count": 19, 939 | "metadata": {}, 940 | "outputs": [], 941 | "source": [ 942 | "# 入出力テンソルを取得\n", 943 | "input_details = interpreter.get_input_details()\n", 944 | "output_details = interpreter.get_output_details()" 945 | ] 946 | }, 947 | { 948 | "cell_type": "code", 949 | "execution_count": 20, 950 | "metadata": {}, 951 | "outputs": [], 952 | "source": [ 953 | "interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]]))" 954 | ] 955 | }, 956 | { 957 | "cell_type": "code", 958 | "execution_count": 21, 959 | "metadata": { 960 | "scrolled": true 961 | }, 962 | "outputs": [ 963 | { 964 | "name": "stdout", 965 | "output_type": "stream", 966 | "text": [ 967 | "Wall time: 0 ns\n" 968 | ] 969 | } 970 | ], 971 | "source": [ 972 | "%%time\n", 973 | "# 推論実施\n", 974 | "interpreter.invoke()\n", 975 | "tflite_results = interpreter.get_tensor(output_details[0]['index'])" 976 | ] 977 | }, 978 | { 979 | "cell_type": "code", 980 | "execution_count": 22, 981 | "metadata": {}, 982 | "outputs": [ 983 | { 984 | "name": "stdout", 985 | "output_type": "stream", 986 | "text": [ 987 | "[0.7729778 0.16973573 0.05728643]\n", 988 | "0\n" 989 | ] 990 | } 991 | ], 992 | "source": [ 993 | "print(np.squeeze(tflite_results))\n", 994 | "print(np.argmax(np.squeeze(tflite_results)))" 995 | ] 996 | } 997 | ], 998 | "metadata": { 999 | "kernelspec": { 1000 | "display_name": "Python 3", 1001 | "language": "python", 1002 | "name": "python3" 1003 | }, 1004 | "language_info": { 1005 | "codemirror_mode": { 1006 | "name": "ipython", 1007 | "version": 3 1008 | }, 1009 | "file_extension": ".py", 1010 | "mimetype": "text/x-python", 1011 | "name": "python", 1012 | "nbconvert_exporter": "python", 1013 | "pygments_lexer": "ipython3", 1014 | "version": "3.8.5" 1015 | } 1016 | }, 1017 | "nbformat": 4, 1018 | "nbformat_minor": 4 1019 | } 1020 | -------------------------------------------------------------------------------- /keypoint_classification_EN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "kernelspec": { 6 | "display_name": "Python 3", 7 | "language": "python", 8 | "name": "python3" 9 | }, 10 | "language_info": { 11 | "codemirror_mode": { 12 | "name": "ipython", 13 | "version": 3 14 | }, 15 | "file_extension": ".py", 16 | "mimetype": "text/x-python", 17 | "name": "python", 18 | "nbconvert_exporter": "python", 19 | "pygments_lexer": "ipython3", 20 | "version": "3.8.5" 21 | }, 22 | "colab": { 23 | "name": "keypoint_classification_EN.ipynb", 24 | "provenance": [], 25 | "collapsed_sections": [], 26 | "toc_visible": true 27 | }, 28 | "accelerator": "GPU" 29 | }, 30 | "cells": [ 31 | { 32 | "cell_type": "code", 33 | "metadata": { 34 | "id": "igMyGnjE9hEp" 35 | }, 36 | "source": [ 37 | "import csv\n", 38 | "\n", 39 | "import numpy as np\n", 40 | "import tensorflow as tf\n", 41 | "from sklearn.model_selection import train_test_split\n", 42 | "\n", 43 | "RANDOM_SEED = 42" 44 | ], 45 | "execution_count": null, 46 | "outputs": [] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": { 51 | "id": "t2HDvhIu9hEr" 52 | }, 53 | "source": [ 54 | "# Specify each path" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "metadata": { 60 | "id": "9NvZP2Zn9hEy" 61 | }, 62 | "source": [ 63 | "dataset = 'model/keypoint_classifier/keypoint.csv'\n", 64 | "model_save_path = 'model/keypoint_classifier/keypoint_classifier.hdf5'\n", 65 | "tflite_save_path = 'model/keypoint_classifier/keypoint_classifier.tflite'" 66 | ], 67 | "execution_count": null, 68 | "outputs": [] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": { 73 | "id": "s5oMH7x19hEz" 74 | }, 75 | "source": [ 76 | "# Set number of classes" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "metadata": { 82 | "id": "du4kodXL9hEz" 83 | }, 84 | "source": [ 85 | "NUM_CLASSES = 4" 86 | ], 87 | "execution_count": null, 88 | "outputs": [] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": { 93 | "id": "XjnL0uso9hEz" 94 | }, 95 | "source": [ 96 | "# Dataset reading" 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "metadata": { 102 | "id": "QT5ZqtEz9hE0" 103 | }, 104 | "source": [ 105 | "X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (21 * 2) + 1)))" 106 | ], 107 | "execution_count": null, 108 | "outputs": [] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "metadata": { 113 | "id": "QmoKFsp49hE0" 114 | }, 115 | "source": [ 116 | "y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))" 117 | ], 118 | "execution_count": null, 119 | "outputs": [] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "metadata": { 124 | "id": "xQU7JTZ_9hE0" 125 | }, 126 | "source": [ 127 | "X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED)" 128 | ], 129 | "execution_count": null, 130 | "outputs": [] 131 | }, 132 | { 133 | "cell_type": "markdown", 134 | "metadata": { 135 | "id": "mxK_lETT9hE0" 136 | }, 137 | "source": [ 138 | "# Model building" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "metadata": { 144 | "id": "vHBmUf1t9hE1" 145 | }, 146 | "source": [ 147 | "model = tf.keras.models.Sequential([\n", 148 | " tf.keras.layers.Input((21 * 2, )),\n", 149 | " tf.keras.layers.Dropout(0.2),\n", 150 | " tf.keras.layers.Dense(20, activation='relu'),\n", 151 | " tf.keras.layers.Dropout(0.4),\n", 152 | " tf.keras.layers.Dense(10, activation='relu'),\n", 153 | " tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n", 154 | "])" 155 | ], 156 | "execution_count": null, 157 | "outputs": [] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "metadata": { 162 | "id": "ypqky9tc9hE1", 163 | "colab": { 164 | "base_uri": "https://localhost:8080/" 165 | }, 166 | "outputId": "5db082bb-30e3-4110-bf63-a1ee777ecd46" 167 | }, 168 | "source": [ 169 | "model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)" 170 | ], 171 | "execution_count": null, 172 | "outputs": [ 173 | { 174 | "output_type": "stream", 175 | "text": [ 176 | "Model: \"sequential\"\n", 177 | "_________________________________________________________________\n", 178 | "Layer (type) Output Shape Param # \n", 179 | "=================================================================\n", 180 | "dropout (Dropout) (None, 42) 0 \n", 181 | "_________________________________________________________________\n", 182 | "dense (Dense) (None, 20) 860 \n", 183 | "_________________________________________________________________\n", 184 | "dropout_1 (Dropout) (None, 20) 0 \n", 185 | "_________________________________________________________________\n", 186 | "dense_1 (Dense) (None, 10) 210 \n", 187 | "_________________________________________________________________\n", 188 | "dense_2 (Dense) (None, 4) 44 \n", 189 | "=================================================================\n", 190 | "Total params: 1,114\n", 191 | "Trainable params: 1,114\n", 192 | "Non-trainable params: 0\n", 193 | "_________________________________________________________________\n" 194 | ], 195 | "name": "stdout" 196 | } 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "metadata": { 202 | "id": "MbMjOflQ9hE1" 203 | }, 204 | "source": [ 205 | "# Model checkpoint callback\n", 206 | "cp_callback = tf.keras.callbacks.ModelCheckpoint(\n", 207 | " model_save_path, verbose=1, save_weights_only=False)\n", 208 | "# Callback for early stopping\n", 209 | "es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)" 210 | ], 211 | "execution_count": null, 212 | "outputs": [] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "metadata": { 217 | "id": "c3Dac0M_9hE2" 218 | }, 219 | "source": [ 220 | "# Model compilation\n", 221 | "model.compile(\n", 222 | " optimizer='adam',\n", 223 | " loss='sparse_categorical_crossentropy',\n", 224 | " metrics=['accuracy']\n", 225 | ")" 226 | ], 227 | "execution_count": null, 228 | "outputs": [] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": { 233 | "id": "7XI0j1Iu9hE2" 234 | }, 235 | "source": [ 236 | "# Model training" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "metadata": { 242 | "scrolled": true, 243 | "id": "WirBl-JE9hE3", 244 | "colab": { 245 | "base_uri": "https://localhost:8080/" 246 | }, 247 | "outputId": "71b30ca2-8294-4d9d-8aa2-800d90d399de" 248 | }, 249 | "source": [ 250 | "model.fit(\n", 251 | " X_train,\n", 252 | " y_train,\n", 253 | " epochs=1000,\n", 254 | " batch_size=128,\n", 255 | " validation_data=(X_test, y_test),\n", 256 | " callbacks=[cp_callback, es_callback]\n", 257 | ")" 258 | ], 259 | "execution_count": null, 260 | "outputs": [ 261 | { 262 | "output_type": "stream", 263 | "text": [ 264 | "Epoch 1/1000\n", 265 | "29/29 [==============================] - 2s 15ms/step - loss: 1.3853 - accuracy: 0.3360 - val_loss: 1.2779 - val_accuracy: 0.4244\n", 266 | "\n", 267 | "Epoch 00001: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 268 | "Epoch 2/1000\n", 269 | "29/29 [==============================] - 0s 4ms/step - loss: 1.2943 - accuracy: 0.3780 - val_loss: 1.2151 - val_accuracy: 0.4703\n", 270 | "\n", 271 | "Epoch 00002: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 272 | "Epoch 3/1000\n", 273 | "29/29 [==============================] - 0s 4ms/step - loss: 1.2524 - accuracy: 0.3749 - val_loss: 1.1472 - val_accuracy: 0.5572\n", 274 | "\n", 275 | "Epoch 00003: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 276 | "Epoch 4/1000\n", 277 | "29/29 [==============================] - 0s 4ms/step - loss: 1.1989 - accuracy: 0.4251 - val_loss: 1.0682 - val_accuracy: 0.6374\n", 278 | "\n", 279 | "Epoch 00004: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 280 | "Epoch 5/1000\n", 281 | "29/29 [==============================] - 0s 4ms/step - loss: 1.1363 - accuracy: 0.4733 - val_loss: 1.0027 - val_accuracy: 0.6608\n", 282 | "\n", 283 | "Epoch 00005: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 284 | "Epoch 6/1000\n", 285 | "29/29 [==============================] - 0s 4ms/step - loss: 1.0938 - accuracy: 0.5107 - val_loss: 0.9416 - val_accuracy: 0.6717\n", 286 | "\n", 287 | "Epoch 00006: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 288 | "Epoch 7/1000\n", 289 | "29/29 [==============================] - 0s 4ms/step - loss: 1.0426 - accuracy: 0.5351 - val_loss: 0.8775 - val_accuracy: 0.7043\n", 290 | "\n", 291 | "Epoch 00007: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 292 | "Epoch 8/1000\n", 293 | "29/29 [==============================] - 0s 4ms/step - loss: 1.0024 - accuracy: 0.5597 - val_loss: 0.8238 - val_accuracy: 0.7243\n", 294 | "\n", 295 | "Epoch 00008: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 296 | "Epoch 9/1000\n", 297 | "29/29 [==============================] - 0s 4ms/step - loss: 0.9845 - accuracy: 0.5475 - val_loss: 0.7726 - val_accuracy: 0.7444\n", 298 | "\n", 299 | "Epoch 00009: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 300 | "Epoch 10/1000\n", 301 | "29/29 [==============================] - 0s 4ms/step - loss: 0.9527 - accuracy: 0.5661 - val_loss: 0.7256 - val_accuracy: 0.7602\n", 302 | "\n", 303 | "Epoch 00010: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 304 | "Epoch 11/1000\n", 305 | "29/29 [==============================] - 0s 4ms/step - loss: 0.9066 - accuracy: 0.5915 - val_loss: 0.6922 - val_accuracy: 0.7886\n", 306 | "\n", 307 | "Epoch 00011: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 308 | "Epoch 12/1000\n", 309 | "29/29 [==============================] - 0s 4ms/step - loss: 0.8825 - accuracy: 0.6094 - val_loss: 0.6512 - val_accuracy: 0.8087\n", 310 | "\n", 311 | "Epoch 00012: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 312 | "Epoch 13/1000\n", 313 | "29/29 [==============================] - 0s 3ms/step - loss: 0.8901 - accuracy: 0.6124 - val_loss: 0.6228 - val_accuracy: 0.8246\n", 314 | "\n", 315 | "Epoch 00013: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 316 | "Epoch 14/1000\n", 317 | "29/29 [==============================] - 0s 3ms/step - loss: 0.8616 - accuracy: 0.6238 - val_loss: 0.5969 - val_accuracy: 0.8688\n", 318 | "\n", 319 | "Epoch 00014: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 320 | "Epoch 15/1000\n", 321 | "29/29 [==============================] - 0s 3ms/step - loss: 0.8060 - accuracy: 0.6412 - val_loss: 0.5634 - val_accuracy: 0.8780\n", 322 | "\n", 323 | "Epoch 00015: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 324 | "Epoch 16/1000\n", 325 | "29/29 [==============================] - 0s 3ms/step - loss: 0.8036 - accuracy: 0.6573 - val_loss: 0.5445 - val_accuracy: 0.8897\n", 326 | "\n", 327 | "Epoch 00016: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 328 | "Epoch 17/1000\n", 329 | "29/29 [==============================] - 0s 6ms/step - loss: 0.7658 - accuracy: 0.6752 - val_loss: 0.5247 - val_accuracy: 0.9056\n", 330 | "\n", 331 | "Epoch 00017: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 332 | "Epoch 18/1000\n", 333 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7816 - accuracy: 0.6566 - val_loss: 0.5061 - val_accuracy: 0.9073\n", 334 | "\n", 335 | "Epoch 00018: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 336 | "Epoch 19/1000\n", 337 | "29/29 [==============================] - 0s 4ms/step - loss: 0.7564 - accuracy: 0.6842 - val_loss: 0.4910 - val_accuracy: 0.9039\n", 338 | "\n", 339 | "Epoch 00019: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 340 | "Epoch 20/1000\n", 341 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7531 - accuracy: 0.6800 - val_loss: 0.4742 - val_accuracy: 0.9098\n", 342 | "\n", 343 | "Epoch 00020: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 344 | "Epoch 21/1000\n", 345 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7271 - accuracy: 0.6952 - val_loss: 0.4619 - val_accuracy: 0.9190\n", 346 | "\n", 347 | "Epoch 00021: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 348 | "Epoch 22/1000\n", 349 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7392 - accuracy: 0.6969 - val_loss: 0.4552 - val_accuracy: 0.9240\n", 350 | "\n", 351 | "Epoch 00022: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 352 | "Epoch 23/1000\n", 353 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7169 - accuracy: 0.6980 - val_loss: 0.4407 - val_accuracy: 0.9190\n", 354 | "\n", 355 | "Epoch 00023: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 356 | "Epoch 24/1000\n", 357 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7228 - accuracy: 0.6844 - val_loss: 0.4299 - val_accuracy: 0.9348\n", 358 | "\n", 359 | "Epoch 00024: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 360 | "Epoch 25/1000\n", 361 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7076 - accuracy: 0.6995 - val_loss: 0.4208 - val_accuracy: 0.9273\n", 362 | "\n", 363 | "Epoch 00025: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 364 | "Epoch 26/1000\n", 365 | "29/29 [==============================] - 0s 4ms/step - loss: 0.7026 - accuracy: 0.7094 - val_loss: 0.4089 - val_accuracy: 0.9340\n", 366 | "\n", 367 | "Epoch 00026: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 368 | "Epoch 27/1000\n", 369 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6647 - accuracy: 0.7284 - val_loss: 0.4033 - val_accuracy: 0.9307\n", 370 | "\n", 371 | "Epoch 00027: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 372 | "Epoch 28/1000\n", 373 | "29/29 [==============================] - 0s 3ms/step - loss: 0.7012 - accuracy: 0.7009 - val_loss: 0.4048 - val_accuracy: 0.9282\n", 374 | "\n", 375 | "Epoch 00028: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 376 | "Epoch 29/1000\n", 377 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6833 - accuracy: 0.7196 - val_loss: 0.3877 - val_accuracy: 0.9390\n", 378 | "\n", 379 | "Epoch 00029: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 380 | "Epoch 30/1000\n", 381 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6934 - accuracy: 0.7080 - val_loss: 0.3753 - val_accuracy: 0.9465\n", 382 | "\n", 383 | "Epoch 00030: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 384 | "Epoch 31/1000\n", 385 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6482 - accuracy: 0.7330 - val_loss: 0.3744 - val_accuracy: 0.9407\n", 386 | "\n", 387 | "Epoch 00031: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 388 | "Epoch 32/1000\n", 389 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6765 - accuracy: 0.7150 - val_loss: 0.3752 - val_accuracy: 0.9465\n", 390 | "\n", 391 | "Epoch 00032: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 392 | "Epoch 33/1000\n", 393 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6690 - accuracy: 0.7229 - val_loss: 0.3627 - val_accuracy: 0.9490\n", 394 | "\n", 395 | "Epoch 00033: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 396 | "Epoch 34/1000\n", 397 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6609 - accuracy: 0.7177 - val_loss: 0.3601 - val_accuracy: 0.9415\n", 398 | "\n", 399 | "Epoch 00034: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 400 | "Epoch 35/1000\n", 401 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6472 - accuracy: 0.7369 - val_loss: 0.3538 - val_accuracy: 0.9357\n", 402 | "\n", 403 | "Epoch 00035: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 404 | "Epoch 36/1000\n", 405 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6448 - accuracy: 0.7398 - val_loss: 0.3439 - val_accuracy: 0.9482\n", 406 | "\n", 407 | "Epoch 00036: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 408 | "Epoch 37/1000\n", 409 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6417 - accuracy: 0.7341 - val_loss: 0.3454 - val_accuracy: 0.9482\n", 410 | "\n", 411 | "Epoch 00037: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 412 | "Epoch 38/1000\n", 413 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6605 - accuracy: 0.7270 - val_loss: 0.3479 - val_accuracy: 0.9507\n", 414 | "\n", 415 | "Epoch 00038: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 416 | "Epoch 39/1000\n", 417 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6306 - accuracy: 0.7349 - val_loss: 0.3439 - val_accuracy: 0.9499\n", 418 | "\n", 419 | "Epoch 00039: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 420 | "Epoch 40/1000\n", 421 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6719 - accuracy: 0.7174 - val_loss: 0.3491 - val_accuracy: 0.9490\n", 422 | "\n", 423 | "Epoch 00040: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 424 | "Epoch 41/1000\n", 425 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6213 - accuracy: 0.7391 - val_loss: 0.3326 - val_accuracy: 0.9465\n", 426 | "\n", 427 | "Epoch 00041: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 428 | "Epoch 42/1000\n", 429 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6397 - accuracy: 0.7397 - val_loss: 0.3294 - val_accuracy: 0.9499\n", 430 | "\n", 431 | "Epoch 00042: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 432 | "Epoch 43/1000\n", 433 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6255 - accuracy: 0.7534 - val_loss: 0.3269 - val_accuracy: 0.9515\n", 434 | "\n", 435 | "Epoch 00043: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 436 | "Epoch 44/1000\n", 437 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6403 - accuracy: 0.7363 - val_loss: 0.3300 - val_accuracy: 0.9507\n", 438 | "\n", 439 | "Epoch 00044: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 440 | "Epoch 45/1000\n", 441 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6105 - accuracy: 0.7541 - val_loss: 0.3156 - val_accuracy: 0.9574\n", 442 | "\n", 443 | "Epoch 00045: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 444 | "Epoch 46/1000\n", 445 | "29/29 [==============================] - 0s 3ms/step - loss: 0.6065 - accuracy: 0.7611 - val_loss: 0.3083 - val_accuracy: 0.9582\n", 446 | "\n", 447 | "Epoch 00046: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 448 | "Epoch 47/1000\n", 449 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5970 - accuracy: 0.7595 - val_loss: 0.3147 - val_accuracy: 0.9432\n", 450 | "\n", 451 | "Epoch 00047: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 452 | "Epoch 48/1000\n", 453 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5949 - accuracy: 0.7490 - val_loss: 0.3119 - val_accuracy: 0.9524\n", 454 | "\n", 455 | "Epoch 00048: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 456 | "Epoch 49/1000\n", 457 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6136 - accuracy: 0.7447 - val_loss: 0.3049 - val_accuracy: 0.9591\n", 458 | "\n", 459 | "Epoch 00049: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 460 | "Epoch 50/1000\n", 461 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6021 - accuracy: 0.7487 - val_loss: 0.3109 - val_accuracy: 0.9591\n", 462 | "\n", 463 | "Epoch 00050: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 464 | "Epoch 51/1000\n", 465 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5964 - accuracy: 0.7519 - val_loss: 0.3073 - val_accuracy: 0.9607\n", 466 | "\n", 467 | "Epoch 00051: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 468 | "Epoch 52/1000\n", 469 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6201 - accuracy: 0.7530 - val_loss: 0.3005 - val_accuracy: 0.9657\n", 470 | "\n", 471 | "Epoch 00052: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 472 | "Epoch 53/1000\n", 473 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5830 - accuracy: 0.7675 - val_loss: 0.2952 - val_accuracy: 0.9641\n", 474 | "\n", 475 | "Epoch 00053: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 476 | "Epoch 54/1000\n", 477 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5810 - accuracy: 0.7717 - val_loss: 0.2938 - val_accuracy: 0.9624\n", 478 | "\n", 479 | "Epoch 00054: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 480 | "Epoch 55/1000\n", 481 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5772 - accuracy: 0.7629 - val_loss: 0.2909 - val_accuracy: 0.9616\n", 482 | "\n", 483 | "Epoch 00055: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 484 | "Epoch 56/1000\n", 485 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5697 - accuracy: 0.7671 - val_loss: 0.2858 - val_accuracy: 0.9616\n", 486 | "\n", 487 | "Epoch 00056: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 488 | "Epoch 57/1000\n", 489 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6045 - accuracy: 0.7662 - val_loss: 0.2897 - val_accuracy: 0.9607\n", 490 | "\n", 491 | "Epoch 00057: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 492 | "Epoch 58/1000\n", 493 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5758 - accuracy: 0.7605 - val_loss: 0.2866 - val_accuracy: 0.9649\n", 494 | "\n", 495 | "Epoch 00058: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 496 | "Epoch 59/1000\n", 497 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5680 - accuracy: 0.7674 - val_loss: 0.2823 - val_accuracy: 0.9666\n", 498 | "\n", 499 | "Epoch 00059: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 500 | "Epoch 60/1000\n", 501 | "29/29 [==============================] - 0s 4ms/step - loss: 0.6012 - accuracy: 0.7609 - val_loss: 0.2797 - val_accuracy: 0.9632\n", 502 | "\n", 503 | "Epoch 00060: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 504 | "Epoch 61/1000\n", 505 | "29/29 [==============================] - 0s 7ms/step - loss: 0.5754 - accuracy: 0.7716 - val_loss: 0.2738 - val_accuracy: 0.9657\n", 506 | "\n", 507 | "Epoch 00061: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 508 | "Epoch 62/1000\n", 509 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5867 - accuracy: 0.7662 - val_loss: 0.2681 - val_accuracy: 0.9641\n", 510 | "\n", 511 | "Epoch 00062: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 512 | "Epoch 63/1000\n", 513 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5659 - accuracy: 0.7775 - val_loss: 0.2679 - val_accuracy: 0.9624\n", 514 | "\n", 515 | "Epoch 00063: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 516 | "Epoch 64/1000\n", 517 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5809 - accuracy: 0.7579 - val_loss: 0.2676 - val_accuracy: 0.9616\n", 518 | "\n", 519 | "Epoch 00064: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 520 | "Epoch 65/1000\n", 521 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5411 - accuracy: 0.7946 - val_loss: 0.2597 - val_accuracy: 0.9657\n", 522 | "\n", 523 | "Epoch 00065: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 524 | "Epoch 66/1000\n", 525 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5708 - accuracy: 0.7580 - val_loss: 0.2605 - val_accuracy: 0.9683\n", 526 | "\n", 527 | "Epoch 00066: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 528 | "Epoch 67/1000\n", 529 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5758 - accuracy: 0.7743 - val_loss: 0.2610 - val_accuracy: 0.9666\n", 530 | "\n", 531 | "Epoch 00067: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 532 | "Epoch 68/1000\n", 533 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5817 - accuracy: 0.7755 - val_loss: 0.2627 - val_accuracy: 0.9649\n", 534 | "\n", 535 | "Epoch 00068: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 536 | "Epoch 69/1000\n", 537 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5433 - accuracy: 0.7961 - val_loss: 0.2506 - val_accuracy: 0.9691\n", 538 | "\n", 539 | "Epoch 00069: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 540 | "Epoch 70/1000\n", 541 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5277 - accuracy: 0.7969 - val_loss: 0.2515 - val_accuracy: 0.9674\n", 542 | "\n", 543 | "Epoch 00070: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 544 | "Epoch 71/1000\n", 545 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5326 - accuracy: 0.7916 - val_loss: 0.2523 - val_accuracy: 0.9657\n", 546 | "\n", 547 | "Epoch 00071: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 548 | "Epoch 72/1000\n", 549 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5366 - accuracy: 0.7858 - val_loss: 0.2503 - val_accuracy: 0.9632\n", 550 | "\n", 551 | "Epoch 00072: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 552 | "Epoch 73/1000\n", 553 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5365 - accuracy: 0.7923 - val_loss: 0.2454 - val_accuracy: 0.9674\n", 554 | "\n", 555 | "Epoch 00073: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 556 | "Epoch 74/1000\n", 557 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5308 - accuracy: 0.7907 - val_loss: 0.2477 - val_accuracy: 0.9666\n", 558 | "\n", 559 | "Epoch 00074: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 560 | "Epoch 75/1000\n", 561 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5484 - accuracy: 0.7893 - val_loss: 0.2423 - val_accuracy: 0.9666\n", 562 | "\n", 563 | "Epoch 00075: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 564 | "Epoch 76/1000\n", 565 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5486 - accuracy: 0.7919 - val_loss: 0.2460 - val_accuracy: 0.9657\n", 566 | "\n", 567 | "Epoch 00076: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 568 | "Epoch 77/1000\n", 569 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5641 - accuracy: 0.7737 - val_loss: 0.2504 - val_accuracy: 0.9674\n", 570 | "\n", 571 | "Epoch 00077: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 572 | "Epoch 78/1000\n", 573 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5293 - accuracy: 0.7988 - val_loss: 0.2358 - val_accuracy: 0.9691\n", 574 | "\n", 575 | "Epoch 00078: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 576 | "Epoch 79/1000\n", 577 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5279 - accuracy: 0.8004 - val_loss: 0.2314 - val_accuracy: 0.9699\n", 578 | "\n", 579 | "Epoch 00079: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 580 | "Epoch 80/1000\n", 581 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5228 - accuracy: 0.7923 - val_loss: 0.2357 - val_accuracy: 0.9708\n", 582 | "\n", 583 | "Epoch 00080: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 584 | "Epoch 81/1000\n", 585 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5105 - accuracy: 0.8039 - val_loss: 0.2433 - val_accuracy: 0.9699\n", 586 | "\n", 587 | "Epoch 00081: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 588 | "Epoch 82/1000\n", 589 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5105 - accuracy: 0.7994 - val_loss: 0.2415 - val_accuracy: 0.9699\n", 590 | "\n", 591 | "Epoch 00082: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 592 | "Epoch 83/1000\n", 593 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5323 - accuracy: 0.7907 - val_loss: 0.2395 - val_accuracy: 0.9674\n", 594 | "\n", 595 | "Epoch 00083: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 596 | "Epoch 84/1000\n", 597 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5423 - accuracy: 0.7904 - val_loss: 0.2396 - val_accuracy: 0.9691\n", 598 | "\n", 599 | "Epoch 00084: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 600 | "Epoch 85/1000\n", 601 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5419 - accuracy: 0.7913 - val_loss: 0.2335 - val_accuracy: 0.9683\n", 602 | "\n", 603 | "Epoch 00085: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 604 | "Epoch 86/1000\n", 605 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5259 - accuracy: 0.8041 - val_loss: 0.2351 - val_accuracy: 0.9666\n", 606 | "\n", 607 | "Epoch 00086: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 608 | "Epoch 87/1000\n", 609 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5282 - accuracy: 0.7899 - val_loss: 0.2285 - val_accuracy: 0.9708\n", 610 | "\n", 611 | "Epoch 00087: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 612 | "Epoch 88/1000\n", 613 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5117 - accuracy: 0.7977 - val_loss: 0.2334 - val_accuracy: 0.9699\n", 614 | "\n", 615 | "Epoch 00088: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 616 | "Epoch 89/1000\n", 617 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5065 - accuracy: 0.8034 - val_loss: 0.2389 - val_accuracy: 0.9657\n", 618 | "\n", 619 | "Epoch 00089: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 620 | "Epoch 90/1000\n", 621 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5209 - accuracy: 0.7974 - val_loss: 0.2391 - val_accuracy: 0.9632\n", 622 | "\n", 623 | "Epoch 00090: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 624 | "Epoch 91/1000\n", 625 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5023 - accuracy: 0.8022 - val_loss: 0.2242 - val_accuracy: 0.9699\n", 626 | "\n", 627 | "Epoch 00091: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 628 | "Epoch 92/1000\n", 629 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5345 - accuracy: 0.7989 - val_loss: 0.2257 - val_accuracy: 0.9699\n", 630 | "\n", 631 | "Epoch 00092: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 632 | "Epoch 93/1000\n", 633 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5404 - accuracy: 0.7961 - val_loss: 0.2314 - val_accuracy: 0.9699\n", 634 | "\n", 635 | "Epoch 00093: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 636 | "Epoch 94/1000\n", 637 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5065 - accuracy: 0.8004 - val_loss: 0.2324 - val_accuracy: 0.9691\n", 638 | "\n", 639 | "Epoch 00094: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 640 | "Epoch 95/1000\n", 641 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5292 - accuracy: 0.8023 - val_loss: 0.2263 - val_accuracy: 0.9733\n", 642 | "\n", 643 | "Epoch 00095: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 644 | "Epoch 96/1000\n", 645 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5031 - accuracy: 0.8069 - val_loss: 0.2243 - val_accuracy: 0.9724\n", 646 | "\n", 647 | "Epoch 00096: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 648 | "Epoch 97/1000\n", 649 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4866 - accuracy: 0.8238 - val_loss: 0.2188 - val_accuracy: 0.9691\n", 650 | "\n", 651 | "Epoch 00097: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 652 | "Epoch 98/1000\n", 653 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5235 - accuracy: 0.8082 - val_loss: 0.2216 - val_accuracy: 0.9741\n", 654 | "\n", 655 | "Epoch 00098: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 656 | "Epoch 99/1000\n", 657 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5103 - accuracy: 0.8040 - val_loss: 0.2309 - val_accuracy: 0.9699\n", 658 | "\n", 659 | "Epoch 00099: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 660 | "Epoch 100/1000\n", 661 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4986 - accuracy: 0.8049 - val_loss: 0.2237 - val_accuracy: 0.9733\n", 662 | "\n", 663 | "Epoch 00100: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 664 | "Epoch 101/1000\n", 665 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4859 - accuracy: 0.8137 - val_loss: 0.2149 - val_accuracy: 0.9724\n", 666 | "\n", 667 | "Epoch 00101: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 668 | "Epoch 102/1000\n", 669 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5011 - accuracy: 0.8068 - val_loss: 0.2196 - val_accuracy: 0.9724\n", 670 | "\n", 671 | "Epoch 00102: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 672 | "Epoch 103/1000\n", 673 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4784 - accuracy: 0.8216 - val_loss: 0.2194 - val_accuracy: 0.9733\n", 674 | "\n", 675 | "Epoch 00103: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 676 | "Epoch 104/1000\n", 677 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4757 - accuracy: 0.8298 - val_loss: 0.2195 - val_accuracy: 0.9749\n", 678 | "\n", 679 | "Epoch 00104: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 680 | "Epoch 105/1000\n", 681 | "29/29 [==============================] - 0s 6ms/step - loss: 0.5101 - accuracy: 0.7992 - val_loss: 0.2342 - val_accuracy: 0.9632\n", 682 | "\n", 683 | "Epoch 00105: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 684 | "Epoch 106/1000\n", 685 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4907 - accuracy: 0.8168 - val_loss: 0.2276 - val_accuracy: 0.9641\n", 686 | "\n", 687 | "Epoch 00106: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 688 | "Epoch 107/1000\n", 689 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5035 - accuracy: 0.8050 - val_loss: 0.2187 - val_accuracy: 0.9691\n", 690 | "\n", 691 | "Epoch 00107: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 692 | "Epoch 108/1000\n", 693 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5085 - accuracy: 0.7979 - val_loss: 0.2091 - val_accuracy: 0.9724\n", 694 | "\n", 695 | "Epoch 00108: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 696 | "Epoch 109/1000\n", 697 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4948 - accuracy: 0.8134 - val_loss: 0.2169 - val_accuracy: 0.9699\n", 698 | "\n", 699 | "Epoch 00109: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 700 | "Epoch 110/1000\n", 701 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4943 - accuracy: 0.8085 - val_loss: 0.2115 - val_accuracy: 0.9657\n", 702 | "\n", 703 | "Epoch 00110: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 704 | "Epoch 111/1000\n", 705 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5165 - accuracy: 0.8106 - val_loss: 0.2226 - val_accuracy: 0.9657\n", 706 | "\n", 707 | "Epoch 00111: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 708 | "Epoch 112/1000\n", 709 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4962 - accuracy: 0.8136 - val_loss: 0.2235 - val_accuracy: 0.9666\n", 710 | "\n", 711 | "Epoch 00112: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 712 | "Epoch 113/1000\n", 713 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5203 - accuracy: 0.8059 - val_loss: 0.2215 - val_accuracy: 0.9691\n", 714 | "\n", 715 | "Epoch 00113: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 716 | "Epoch 114/1000\n", 717 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5095 - accuracy: 0.8011 - val_loss: 0.2207 - val_accuracy: 0.9724\n", 718 | "\n", 719 | "Epoch 00114: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 720 | "Epoch 115/1000\n", 721 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4947 - accuracy: 0.8178 - val_loss: 0.2101 - val_accuracy: 0.9683\n", 722 | "\n", 723 | "Epoch 00115: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 724 | "Epoch 116/1000\n", 725 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5079 - accuracy: 0.8116 - val_loss: 0.2172 - val_accuracy: 0.9699\n", 726 | "\n", 727 | "Epoch 00116: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 728 | "Epoch 117/1000\n", 729 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5056 - accuracy: 0.8056 - val_loss: 0.2236 - val_accuracy: 0.9691\n", 730 | "\n", 731 | "Epoch 00117: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 732 | "Epoch 118/1000\n", 733 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5451 - accuracy: 0.7969 - val_loss: 0.2210 - val_accuracy: 0.9649\n", 734 | "\n", 735 | "Epoch 00118: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 736 | "Epoch 119/1000\n", 737 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5005 - accuracy: 0.8192 - val_loss: 0.2220 - val_accuracy: 0.9733\n", 738 | "\n", 739 | "Epoch 00119: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 740 | "Epoch 120/1000\n", 741 | "29/29 [==============================] - 0s 4ms/step - loss: 0.5130 - accuracy: 0.8043 - val_loss: 0.2170 - val_accuracy: 0.9716\n", 742 | "\n", 743 | "Epoch 00120: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 744 | "Epoch 121/1000\n", 745 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4874 - accuracy: 0.8132 - val_loss: 0.2177 - val_accuracy: 0.9724\n", 746 | "\n", 747 | "Epoch 00121: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 748 | "Epoch 122/1000\n", 749 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4863 - accuracy: 0.8106 - val_loss: 0.2247 - val_accuracy: 0.9716\n", 750 | "\n", 751 | "Epoch 00122: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 752 | "Epoch 123/1000\n", 753 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4867 - accuracy: 0.8101 - val_loss: 0.2138 - val_accuracy: 0.9699\n", 754 | "\n", 755 | "Epoch 00123: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 756 | "Epoch 124/1000\n", 757 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4550 - accuracy: 0.8279 - val_loss: 0.2110 - val_accuracy: 0.9724\n", 758 | "\n", 759 | "Epoch 00124: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 760 | "Epoch 125/1000\n", 761 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4908 - accuracy: 0.8152 - val_loss: 0.2088 - val_accuracy: 0.9716\n", 762 | "\n", 763 | "Epoch 00125: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 764 | "Epoch 126/1000\n", 765 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5095 - accuracy: 0.8085 - val_loss: 0.2052 - val_accuracy: 0.9749\n", 766 | "\n", 767 | "Epoch 00126: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 768 | "Epoch 127/1000\n", 769 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4814 - accuracy: 0.8096 - val_loss: 0.2055 - val_accuracy: 0.9733\n", 770 | "\n", 771 | "Epoch 00127: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 772 | "Epoch 128/1000\n", 773 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4960 - accuracy: 0.8126 - val_loss: 0.2153 - val_accuracy: 0.9699\n", 774 | "\n", 775 | "Epoch 00128: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 776 | "Epoch 129/1000\n", 777 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4922 - accuracy: 0.8236 - val_loss: 0.2169 - val_accuracy: 0.9699\n", 778 | "\n", 779 | "Epoch 00129: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 780 | "Epoch 130/1000\n", 781 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4516 - accuracy: 0.8311 - val_loss: 0.2098 - val_accuracy: 0.9666\n", 782 | "\n", 783 | "Epoch 00130: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 784 | "Epoch 131/1000\n", 785 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4596 - accuracy: 0.8147 - val_loss: 0.2010 - val_accuracy: 0.9758\n", 786 | "\n", 787 | "Epoch 00131: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 788 | "Epoch 132/1000\n", 789 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4641 - accuracy: 0.8284 - val_loss: 0.1976 - val_accuracy: 0.9733\n", 790 | "\n", 791 | "Epoch 00132: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 792 | "Epoch 133/1000\n", 793 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4831 - accuracy: 0.8071 - val_loss: 0.2037 - val_accuracy: 0.9766\n", 794 | "\n", 795 | "Epoch 00133: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 796 | "Epoch 134/1000\n", 797 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4643 - accuracy: 0.8149 - val_loss: 0.2157 - val_accuracy: 0.9699\n", 798 | "\n", 799 | "Epoch 00134: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 800 | "Epoch 135/1000\n", 801 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4682 - accuracy: 0.8230 - val_loss: 0.2119 - val_accuracy: 0.9699\n", 802 | "\n", 803 | "Epoch 00135: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 804 | "Epoch 136/1000\n", 805 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4986 - accuracy: 0.8085 - val_loss: 0.2093 - val_accuracy: 0.9724\n", 806 | "\n", 807 | "Epoch 00136: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 808 | "Epoch 137/1000\n", 809 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4729 - accuracy: 0.8226 - val_loss: 0.1992 - val_accuracy: 0.9741\n", 810 | "\n", 811 | "Epoch 00137: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 812 | "Epoch 138/1000\n", 813 | "29/29 [==============================] - 0s 3ms/step - loss: 0.5082 - accuracy: 0.8100 - val_loss: 0.2034 - val_accuracy: 0.9724\n", 814 | "\n", 815 | "Epoch 00138: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 816 | "Epoch 139/1000\n", 817 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4770 - accuracy: 0.8159 - val_loss: 0.2011 - val_accuracy: 0.9691\n", 818 | "\n", 819 | "Epoch 00139: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 820 | "Epoch 140/1000\n", 821 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4842 - accuracy: 0.8167 - val_loss: 0.2040 - val_accuracy: 0.9733\n", 822 | "\n", 823 | "Epoch 00140: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 824 | "Epoch 141/1000\n", 825 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4895 - accuracy: 0.8208 - val_loss: 0.2113 - val_accuracy: 0.9657\n", 826 | "\n", 827 | "Epoch 00141: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 828 | "Epoch 142/1000\n", 829 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4821 - accuracy: 0.8175 - val_loss: 0.2193 - val_accuracy: 0.9649\n", 830 | "\n", 831 | "Epoch 00142: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 832 | "Epoch 143/1000\n", 833 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4909 - accuracy: 0.8237 - val_loss: 0.2194 - val_accuracy: 0.9607\n", 834 | "\n", 835 | "Epoch 00143: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 836 | "Epoch 144/1000\n", 837 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4814 - accuracy: 0.8163 - val_loss: 0.2222 - val_accuracy: 0.9591\n", 838 | "\n", 839 | "Epoch 00144: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 840 | "Epoch 145/1000\n", 841 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4776 - accuracy: 0.8226 - val_loss: 0.2159 - val_accuracy: 0.9657\n", 842 | "\n", 843 | "Epoch 00145: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 844 | "Epoch 146/1000\n", 845 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4810 - accuracy: 0.8290 - val_loss: 0.2111 - val_accuracy: 0.9632\n", 846 | "\n", 847 | "Epoch 00146: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 848 | "Epoch 147/1000\n", 849 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4892 - accuracy: 0.8163 - val_loss: 0.2065 - val_accuracy: 0.9666\n", 850 | "\n", 851 | "Epoch 00147: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 852 | "Epoch 148/1000\n", 853 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4642 - accuracy: 0.8298 - val_loss: 0.2058 - val_accuracy: 0.9716\n", 854 | "\n", 855 | "Epoch 00148: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 856 | "Epoch 149/1000\n", 857 | "29/29 [==============================] - 0s 7ms/step - loss: 0.4666 - accuracy: 0.8255 - val_loss: 0.2084 - val_accuracy: 0.9733\n", 858 | "\n", 859 | "Epoch 00149: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 860 | "Epoch 150/1000\n", 861 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4687 - accuracy: 0.8264 - val_loss: 0.1983 - val_accuracy: 0.9749\n", 862 | "\n", 863 | "Epoch 00150: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 864 | "Epoch 151/1000\n", 865 | "29/29 [==============================] - 0s 4ms/step - loss: 0.4801 - accuracy: 0.8201 - val_loss: 0.2018 - val_accuracy: 0.9724\n", 866 | "\n", 867 | "Epoch 00151: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 868 | "Epoch 152/1000\n", 869 | "29/29 [==============================] - 0s 3ms/step - loss: 0.4387 - accuracy: 0.8379 - val_loss: 0.2064 - val_accuracy: 0.9724\n", 870 | "\n", 871 | "Epoch 00152: saving model to model/keypoint_classifier/keypoint_classifier.hdf5\n", 872 | "Epoch 00152: early stopping\n" 873 | ], 874 | "name": "stdout" 875 | }, 876 | { 877 | "output_type": "execute_result", 878 | "data": { 879 | "text/plain": [ 880 | "" 881 | ] 882 | }, 883 | "metadata": { 884 | "tags": [] 885 | }, 886 | "execution_count": 11 887 | } 888 | ] 889 | }, 890 | { 891 | "cell_type": "code", 892 | "metadata": { 893 | "id": "pxvb2Y299hE3", 894 | "colab": { 895 | "base_uri": "https://localhost:8080/" 896 | }, 897 | "outputId": "59eb3185-2e37-4b9e-bc9d-ab1b8ac29b7f" 898 | }, 899 | "source": [ 900 | "# Model evaluation\n", 901 | "val_loss, val_acc = model.evaluate(X_test, y_test, batch_size=128)" 902 | ], 903 | "execution_count": null, 904 | "outputs": [ 905 | { 906 | "output_type": "stream", 907 | "text": [ 908 | "10/10 [==============================] - 0s 2ms/step - loss: 0.2064 - accuracy: 0.9724\n" 909 | ], 910 | "name": "stdout" 911 | } 912 | ] 913 | }, 914 | { 915 | "cell_type": "code", 916 | "metadata": { 917 | "id": "RBkmDeUW9hE4" 918 | }, 919 | "source": [ 920 | "# Loading the saved model\n", 921 | "model = tf.keras.models.load_model(model_save_path)" 922 | ], 923 | "execution_count": null, 924 | "outputs": [] 925 | }, 926 | { 927 | "cell_type": "code", 928 | "metadata": { 929 | "id": "tFz9Tb0I9hE4", 930 | "colab": { 931 | "base_uri": "https://localhost:8080/" 932 | }, 933 | "outputId": "1c3b3528-54ae-4ee2-ab04-77429211cbef" 934 | }, 935 | "source": [ 936 | "# Inference test\n", 937 | "predict_result = model.predict(np.array([X_test[0]]))\n", 938 | "print(np.squeeze(predict_result))\n", 939 | "print(np.argmax(np.squeeze(predict_result)))" 940 | ], 941 | "execution_count": null, 942 | "outputs": [ 943 | { 944 | "output_type": "stream", 945 | "text": [ 946 | "[9.8105639e-01 1.8674158e-02 2.2328236e-04 4.6191799e-05]\n", 947 | "0\n" 948 | ], 949 | "name": "stdout" 950 | } 951 | ] 952 | }, 953 | { 954 | "cell_type": "markdown", 955 | "metadata": { 956 | "id": "S3U4yNWx9hE4" 957 | }, 958 | "source": [ 959 | "# Confusion matrix" 960 | ] 961 | }, 962 | { 963 | "cell_type": "code", 964 | "metadata": { 965 | "id": "AP1V6SCk9hE5", 966 | "colab": { 967 | "base_uri": "https://localhost:8080/", 968 | "height": 582 969 | }, 970 | "outputId": "08e41a80-7a4a-4619-8125-ecc371368d19" 971 | }, 972 | "source": [ 973 | "import pandas as pd\n", 974 | "import seaborn as sns\n", 975 | "import matplotlib.pyplot as plt\n", 976 | "from sklearn.metrics import confusion_matrix, classification_report\n", 977 | "\n", 978 | "def print_confusion_matrix(y_true, y_pred, report=True):\n", 979 | " labels = sorted(list(set(y_true)))\n", 980 | " cmx_data = confusion_matrix(y_true, y_pred, labels=labels)\n", 981 | " \n", 982 | " df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)\n", 983 | " \n", 984 | " fig, ax = plt.subplots(figsize=(7, 6))\n", 985 | " sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)\n", 986 | " ax.set_ylim(len(set(y_true)), 0)\n", 987 | " plt.show()\n", 988 | " \n", 989 | " if report:\n", 990 | " print('Classification Report')\n", 991 | " print(classification_report(y_test, y_pred))\n", 992 | "\n", 993 | "Y_pred = model.predict(X_test)\n", 994 | "y_pred = np.argmax(Y_pred, axis=1)\n", 995 | "\n", 996 | "print_confusion_matrix(y_test, y_pred)" 997 | ], 998 | "execution_count": null, 999 | "outputs": [ 1000 | { 1001 | "output_type": "display_data", 1002 | "data": { 1003 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAAFpCAYAAABUC7VZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3de5xVdb3/8ddnzwwgICKBCAOKCWWoAQlEeckrAongyUDKvPGTU1kHykuWeUxPdqyTKJp6HALFVGDyhiIqhpqSCOIRiZsKIjLDXeSqwszsz++PWdJkzAwOa2Z995r308d6sG57r89ej3F/5vP9rLXG3B0REZE4ZJIOQERE0kNJRUREYqOkIiIisVFSERGR2CipiIhIbJRUREQkNkoqIiKNjJnlmdnrZjY9Wj7CzOaa2XIzm2pmTaL1TaPl5dH2LrW9t5KKiEjjMxpYWmX5t8At7t4V+AAYGa0fCXwQrb8l2q9GSioiIo2ImXUCvgn8MVo24FTgoWiXScDQaH5ItEy0/bRo/2opqYiINC63AlcB2Wj5c8AWdy+PlkuAwmi+EFgNEG3fGu1frfy4o/20sk3v6DkwddC844lJh5CT9MMmDa18d2mNv7l/FnF8XzZpd+S/A6OqrCpy9yIAMzsL2ODur5nZyft7rL2p96QiIiL7KFux328RJZCiajYfD5xtZoOAZkArYBzQ2szyo2qkE1Aa7V8KdAZKzCwfOAh4v6bja/hLRKSRcPefu3snd+8CnAc85+7fBZ4Hzo12uxCYFs0/Hi0TbX/Oa3kKsSoVEZFQeLb2ferHz4ApZvZr4HVgQrR+AvAnM1sObKYyEdVISUVEJBTZhksq7v4C8EI0/w7Qdy/7fAx8+7O8r5KKiEggPLlKJTbqqYiISGxUqYiIhKIBh7/qi5KKiEgoUjD8paQiIhKKGO5TSZqSiohIKFJQqahRLyIisVGlIiISCjXqRUQkLmm4T0VJRUQkFKpUREQkNimoVNSoFxGR2KhSEREJhe5TERGR2KRg+EtJRUQkFClo1KunIiIisVGlIiISCg1/iYhIbFIw/KWkIiISCHdd/SUiInFJwfCXGvUiIhIbVSoiIqFQT0VERGKTguEvJRURkVDoMS0iIhKbFFQqatSLiEhsVKmIiIRCjXoREYlNCoa/lFREREKRgkpFPRUREYmNKhURkVCoUglfRUUF5150GT+88joAStasY8SlYxg47BIuv/a/KSsrA2D37t1cfu1/M3DYJYy4dAyla9cnGXaQOnXqyLMz/8wbbzzPggXP8eMfjUw6pJxxZv+TWbzoRZYtmc1VV16WdDg5o7GdN/eK/Z6Slvqkcv+fp/H5LoftWb7lrol8b/hQniqeSKsDW/Lw9GcAeGT6TFod2JKniiu3j71zYlIhB6u8vJyrrrqeHj1O4YQTBvP9H1zEl77ULemwgpfJZLht3I2cNfh8ju1xCsOHD9V52weN8rxls/s/1cDMmpnZPDN7w8wWm9n10fp7zWylmS2Ipp7RejOz28xsuZktNLOv1PYRak0qZnaUmf0seuPbovkv7eMpStS6DRt58eV5fGvwmQC4O3Nfe4P+J58IwJBBp/Pci3MAeO6lOQwZdDoA/U8+kbmvLcDdkwk8UOvWbeD1BYsA2LFjJ8uWvU3HjocmHFX4+vbpxYoV77Jy5XuUlZVRXDyNs6OfSaleozxvnt3/qWa7gFPdvQfQExhgZv2ibVe6e89oWhCtGwh0i6ZRwF21HaDGpGJmPwOmAAbMiyYDJpvZ1bW9edJ+O+5ufvrDkZhVfswtW7dxYMsW5OfnAdC+XVs2bHwfgA0b3+fQQ9oCkJ+fR8sWzdmydVsygeeAww/vRM8exzBv3utJhxK8joWHsrpkzZ7lktK1Ssb7QOctfl5pR7RYEE01/fY8BLgvet0rQGsz61DTMWqrVEYCfdz9Jne/P5puAvpG2/bKzEaZ2Xwzm//H+ybXcoj68cLf5tLm4NYcfVTKy+UEtGjRnOKp47n8iuvYvn1H7S8QkX0Tw/BX1e/faBpV9RBmlmdmC4ANwLPuPjfadGM0xHWLmTWN1hUCq6u8vCRaV63arv7KAh2BVZ9a3yHatlfuXgQUAZRteieRMaTXFy7hhdmv8NKcV9m1u4ydOz/kplv/l+07dlJeXkF+fh7rN27ikHafA+CQdp9j3YZNHHpIO8rLK9ix80NaH9QqidCDlp+fT/HU8Uye/CiPPfZU0uHkhDWl6+jcqeOe5U6FHVizZl2CEeWGRnneYrj5ser3bzXbK4CeZtYaeNTMjgF+DqwDmkSv/RlwQ12OX1ulMgaYZWZPmVlRND0NzAJG1+WADeUnP7iYWY/dz8yHJ/E/119N3+N68Ntf/Yy+X/kyM194CYBpM/7CqSd+DYBTTujHtBl/AWDmCy/x1eN6YGaJxR+q8UU3s2zZcm4dV+3PrHzKq/MX0LXrEXTp0pmCggKGDRvCE9NnJh1W8BrleavnRn1V7r4FeB4Y4O5royGuXcA9VI5GAZQCnau8rFO0rlo1JhV3fxr4AnA98Ew0/Qr4YrQt5/zkB5dw35RHGTjsErZu28a/ndUfgH8760y2btvGwGGV28d8/+KEIw3P8V/vw/nnn8spp3yd+a/OZP6rMxkw4NSkwwpeRUUFo8f8khlPPsiihS/w0ENPsGTJW0mHFbxGed7quVFvZu2iCgUzOwA4A1j2SZ/EKn+THgosil7yOHBBdBVYP2Cru6+t8Rj1fYVTUsNfua55xxOTDiEn6YdNGlr57tLYhjQ+euYP+/0jfMCZP6o2HjP7MjAJyKOyqCh29xvM7DmgHZUXYi0Avu/uO6Ik8wdgAPAhcLG7z6/p+LqjXkQkFPV8R727LwR67WX9XoccvLLq+Ex3nSqpiIiEIgWPaVFSEREJRQoefZ/6x7SIiEjDUaUiIhIKDX+JiEhsUjD8paQiIhIKVSoiIhKbFFQqatSLiEhsVKmIiIRCw18iIhIbJRUREYlNCv7arJKKiEgoUlCpqFEvIiKxUaUiIhKKFFQqSioiIqFIwX0qSioiIqFIQaWinoqIiMRGlYqISCh0SbGIiMQmBcNfSioiIqFQUhERkdik4OovNepFRCQ2qlRERALhWTXqRUQkLuqpiIhIbFLQU1FSEREJRQqGv9SoFxGR2KhSEREJhXoqIiISGyUVERGJTQqe/aWeioiIxEaViohIKFIw/KVKRUQkFFnf/6kGZtbMzOaZ2RtmttjMro/WH2Fmc81suZlNNbMm0fqm0fLyaHuX2j6CkoqISCg8u/9TzXYBp7p7D6AnMMDM+gG/BW5x967AB8DIaP+RwAfR+lui/WqkpCIiEop6rlS80o5osSCaHDgVeChaPwkYGs0PiZaJtp9mZlbTMeq9p9K844n1fYhU2vbolUmHkJM6j7gz6RBy0paPdyYdgsTEzEYBo6qsKnL3oirb84DXgK7AHcAKYIu7l0e7lACF0XwhsBrA3cvNbCvwOWBTdcdXo15EJBAeQ6M+SiBFNWyvAHqaWWvgUeCo/T5oFUoqIiKhaMBnf7n7FjN7Hvga0NrM8qNqpRNQGu1WCnQGSswsHzgIeL+m91VPRUQkFPXcqDezdlGFgpkdAJwBLAWeB86NdrsQmBbNPx4tE21/zr3mOzRVqYiINB4dgElRXyUDFLv7dDNbAkwxs18DrwMTov0nAH8ys+XAZuC82g6gpCIiEop6Hv5y94VAr72sfwfou5f1HwPf/izHUFIREQlFCu6oV1IREQlFCv5Il5KKiEgoUvDnhHX1l4iIxEaViohIKDT8JSIicYnjjvqkKamIiIRClYqIiMQmBUlFjXoREYmNKhURkVCk4JJiJRURkVCkYPhLSUVEJBCegqSinoqIiMRGlYqISChSUKkoqYiIhEI3P4qISGxUqYiISGxSkFTUqBcRkdioUhERCYR77lcqSioiIqFIwfCXkoqISCiUVEREJC66o15ERKQKVSoiIqFIQaWipCIiEorcv6FeSUVEJBTqqYiIiFShSkVEJBQpqFSUVEREQqGeioiIxCUNPRUlFRGRUKhSyU1Nmzbl+ecepmnTpuTl5/HII09yww03Jx1WMHaVlXPJH6ZRVl5BeTbL6T0+zw8H9N2z/bePzOaxeUuZc9OlAPz55cVMnb2ITMZo3rSAa7/9DY48tE1S4QehY+Gh3Hn372h3SFvcnfvunUrRXffR+uCD+OM9t3LY4YW8t6qUkReNZuuWbUmHG6wz+5/M2LE3kJfJMPGeyfzuf+5IOiSpRaO8+mvXrl2c0X8Yx/U+g969+3Nm/5P5at+vJB1WMJrk5zH+h2dTfOUwpl7xbV5etpqF764DYPHqDWz7aNc/7T/wK9146KrhFF8xjItO6cnN015OIuygVJRX8J/X3MTxfQcx4LRhjLz0u3zhi0cy+iejePGvc+jbqz8v/nUOo38yKulQg5XJZLht3I2cNfh8ju1xCsOHD+VLX+qWdFj1yrO+31NNzKyzmT1vZkvMbLGZjY7W/8rMSs1sQTQNqvKan5vZcjN708zOrO0zNMqkArBz54cAFBTkU1BQkIpHTsfFrLLiACivyFJekcXMqMhmueXxOYwZ3O+f9m/ZrMme+Y92l2PWoOEGaf36jSx8YwkAO3bs5K03V9ChY3sGfvM0pj74KABTH3yUQWednmSYQevbpxcrVrzLypXvUVZWRnHxNM4eXOt3Wm7LxjDVrBy43N27A/2Ay8yse7TtFnfvGU0zAKJt5wFHAwOAO80sr6YD1Hn4y8wudvd76vr6pGUyGebNfZojj+zCXf97L/NefT3pkIJSkc0yYuxDrN60leHHH8Oxh7fngRcX8o1jutCuVYt/2X/K7EXc/9c3KKuooOgHZycQcbg6H1bIsV/uzmvz36Bdu7asX78RqEw87dq1TTi6cHUsPJTVJWv2LJeUrqVvn14JRlT/vJ57Ku6+FlgbzW83s6VAYQ0vGQJMcfddwEozWw70BeZU94L9qVSur26DmY0ys/lmNj+b3bkfh6g/2WyW3n360+WI3vTp3Yujj/5i0iEFJS+TofiKYTxz3QUsem8Dr61Yw7NvrGDECcfudf/zTjiG6dd8l9Hf7Mf4Z19r4GjD1aJFc+790+1cc/Vv2LH9X/9fUIUs/ySGSqXq92807XWM1cy6AL2AudGqH5nZQjObaGYHR+sKgdVVXlZCzUmo5qQSHWBv09+B9tW9zt2L3L23u/fOZP71t9qQbN26jRf++jf69z856VCC1OqApvTpWsiry0tZvWkrg3/zIAP/634+Litn8I0P/Mv+A3p144VF7zZ8oAHKz8/nnvtv56HiJ3jyiZkAbNy4ifbt2wHQvn07Nm16P8kQg7amdB2dO3Xcs9ypsANr1qxLMKLcUPX7N5qKPr2PmbUEHgbGuPs24C7gSKAnlZVMna9cqq1SaQ9cAAzey5Sz/ze0bduGgw5qBUCzZs04/bSTePPNFQlHFY7NOz7a04z/eHc5r7y1mu6d2jHr+ot46trzeera82lWkM8T13wXgFUbt+x57UtLV3FY24MSiTs04+74DW+9uYK77vjHKPHTM55j+HfOAWD4d87hqSdnJRVe8F6dv4CuXY+gS5fOFBQUMGzYEJ6YPjPpsOqVZ/d/qo2ZFVCZUB5w90cA3H29u1e4exYYT+UQF0Ap0LnKyztF66pVW09lOtDS3RfsJbAXag8/TB06tGfihFvJy8tgmQwPPfQEM2b8JemwgrFp24dcO/k5stksWXf69+jKSUd3qXb/KbMXMfetEvLzMrQ6oCk3fOfUhgs2UF/tdxzDRwxl8aJlPD97GgA33jCWcbcUMeHecZx/wbmsfm8NIy8anXCk4aqoqGD0mF8y48kHyctkuHfSVJYseSvpsOpXPfdUzMyACcBSdx9bZX2HqN8CcA6wKJp/HHjQzMYCHYFuwLwaj1HfY7oFTQo1aFwH2x69MukQclLnEXcmHUJO2vJxmL3PXFC+uzS26x03nvGN/f6+bPfsX6uNx8xOAF4C/s4/UtgvgBFUDn058C7w758kGTO7BriEyivHxrj7UzUdv1He/Cgi0hi5+2xgb0lnRg2vuRG4cV+PoaQiIhKI+r6kuCEoqYiIBEJJRURE4uO5/zgKJRURkUCkoVJptM/+EhGR+KlSEREJhGc1/CUiIjFJw/CXkoqISCBcjXoREYlLGioVNepFRCQ2qlRERAKhRr2IiMQmDX+zTUlFRCQQaahU1FMREZHYqFIREQlEGioVJRURkUCopyIiIrFRpSIiIrFJwx31atSLiEhsVKmIiAQiDY9pUVIREQlENgXDX0oqIiKBSENPRUlFRCQQabj6S416ERGJjSoVEZFA6OZHERGJTRqGv5RUREQCkYarv9RTERGR2KhSEREJhC4pFhGR2KhRLyIisVFPRUREYuNu+z3VxMw6m9nzZrbEzBab2ehofRsze9bM3o7+PThab2Z2m5ktN7OFZvaV2j6DkoqISONRDlzu7t2BfsBlZtYduBqY5e7dgFnRMsBAoFs0jQLuqu0ASioiIoFw3/+p5vf3te7+f9H8dmApUAgMASZFu00ChkbzQ4D7vNIrQGsz61DTMdRTCVTrb92cdAg5acvky5IOIScdOPz2pEMQGranYmZdgF7AXKC9u6+NNq0D2kfzhcDqKi8ridatpRpKKiIigYjjkmIzG0XlUNUnity96FP7tAQeBsa4+zazfxzX3d3M6nwdmpKKiEgg4qhUogRSVN12MyugMqE84O6PRKvXm1kHd18bDW9tiNaXAp2rvLxTtK5a6qmIiDQSVlmSTACWuvvYKpseBy6M5i8EplVZf0F0FVg/YGuVYbK9UqUiIhKIBrj38Xjge8DfzWxBtO4XwE1AsZmNBFYBw6JtM4BBwHLgQ+Di2g6gpCIiEoj6btS7+2yguoOctpf9HfhMV78oqYiIBCINz/5ST0VERGKjSkVEJBDZpAOIgZKKiEggvNp2R+5QUhERCURWj74XEZG4ZFNQqahRLyIisVGlIiISCPVUREQkNrr6S0REYpOGSkU9FRERiY0qFRGRQGj4S0REYqOkIiIisUlDT0VJRUQkENnczylq1IuISHxUqYiIBCINj2lRUhERCUQKnieppCIiEgpd/SUiIrHJWu4Pf6lRLyIisVGlIiISCPVUREQkNuqpiIhIbHTzo4iISBWqVEREAqGbH0VEJDZq1IuISGzS0FNRUhERCUQarv5So15ERGKjSkVEJBDqqeSoTp06cs/EcRzSvi3uzoQ/PsDtf5iQdFg54c03X2bH9p1UVFRQXl7B14//ZtIhBWNXWTmX3P0UZeUVlGed04/twg/P6MWvHprNkpJNOHB421bc8O0Tad60AIBnFq7k7r+8Dhhf6NCGm0Z8I9HPEJoz+5/M2LE3kJfJMPGeyfzuf+5IOqR6pZ5KjiovL+eqq67n9QWLaNmyBXPnPs1fZr3I0qVvJx1aTuh/5jDef/+DpMMITpP8PMZfOoDmTQsoq8hy8f8+yQlfLOSKs/rSslkTAH4/fR5T5izlkpO/zKpNW5n4/ELu/f43adW8KZt3fJTwJwhLJpPhtnE3MmDQCEpK1vLKnBk8MX1mqv8/bYieiplNBM4CNrj7MdG6XwGXAhuj3X7h7jOibT8HRgIVwH+4+zM1vX+tPRUzO8rMTjOzlp9aP+AzfpZgrFu3gdcXLAJgx46dLFv2Nh07HppwVJLrzGxPBVJekaW8IothexKKu7OrrHzPnQiPzHuL4V/7Eq2aNwWgTcsDkgg7WH379GLFindZufI9ysrKKC6extmDz0w6rHqVjWHaB/cCe/v+vsXde0bTJwmlO3AecHT0mjvNLK+mN68xqZjZfwDTgB8Di8xsSJXNv9m3+MN2+OGd6NnjGObNez3pUHKDO09Of4A5Lz/JyJHfSTqa4FRkswwbN41Tfz2Zft06cuxh7QD4zz+/xGk3TmHlxq2c9/XuAKzatI1Vm7Zy4V1P8r07pvO3N0uSDD04HQsPZXXJmj3LJaVr9ctfDNz9RWDzPu4+BJji7rvcfSWwHOhb0wtqG/66FDjO3XeYWRfgITPr4u7joPpbP81sFDAKIJN3EJlMi32Mv2G1aNGc4qnjufyK69i+fUfS4eSEU079FmvWrKNdu88x48kHefPNFcyePTfpsIKRl8lQPHoI2z7axU//9BzL131A10MP5oZvn0hFNstNj8/lmYUrGdq7GxXZLO9t2sYfRw1kw9adXHL3DP48ZiitDmia9MeQhHgMPZWq37+RIncv2oeX/sjMLgDmA5e7+wdAIfBKlX1KonXVqm34K+PuOwDc/V3gZGCgmY2lhqTi7kXu3tvde4eaUPLz8ymeOp7Jkx/lsceeSjqcnLFmzToANm58n2mPP02f3j0TjihMrQ5oSp/Pd+Bvb/2j+sjLZBjw5SOYtehdANof1IJvdD+MgrwMhW0O5PC2B/Hepm0JRRyeNaXr6Nyp457lToUd9vz8pVUcw19Vv3+jaV8Syl3AkUBPYC1wc10/Q21JZb2Z7fnWiBLMWUBb4Ni6HjQE44tuZtmy5dw6bl/OtwA0b34ALVu22DN/+mknsXjxmwlHFY7NOz5m20e7APi4rJxXlq+hS5VE4e78del7HNHuIABO6X4Y899ZC8AHOz9m1aatdGpzYDLBB+jV+Qvo2vUIunTpTEFBAcOGDeGJ6TOTDqteNVBP5V+4+3p3r3D3LDCefwxxlQKdq+zaKVpXrdqGvy4Ayj918HLgAjO7+zNFHZDjv96H888/l7//fQnzX638If3ltTfx9NPPJRxZ2Nq3b0fx1PEA5OfnMWXqNGY++0KyQQVk0/YPubb4JbLuZN3pf+wRnHhUZy6+ewY7P96NA1/o0IZrhn4NgK9/oZA5b6/h38Y+QsaMnwzqQ+sWzZL9EAGpqKhg9JhfMuPJB8nLZLh30lSWLHkr6bBSycw6uPvaaPEcYFE0/zjwYDQ61RHoBsyr8b3c6/d2m4ImhWm4n6fBZTJ62EFdbJl8WdIh5KQDh9+edAg5q3x3aWx3l9ze+fz9/r788er7a4zHzCZT2cpoC6wHrouWe1J5/+W7wL9/kmTM7BrgEioLjDHuXmO/oFHepyIiEqKGuPnR3UfsZXW1d3+7+43Ajfv6/koqIiKBSMMDJZVUREQCkYakooF7ERGJjSoVEZFApOGqJiUVEZFA6CnFIiISmzT0VJRUREQCkYbhLzXqRUQkNqpUREQCkU1BraKkIiISCPVUREQkNrlfp6inIiIiMVKlIiISCA1/iYhIbHTzo4iIxEZXf4mISGxyP6WoUS8iIjFSpSIiEgg16kVEJDbqqYiISGxyP6UoqYiIBCMNw19q1IuISGxUqYiIBEI9FRERiU3upxQlFRGRYKinIiIiUoUqFRGRQHgKBsCUVEREApGG4S8lFRGRQOjqLxERiU3upxQ16kVEJEaqVEREApGG4S9VKiIigcjGMNXGzCaa2QYzW1RlXRsze9bM3o7+PThab2Z2m5ktN7OFZvaV2t5fSUVEJBAew3/74F5gwKfWXQ3McvduwKxoGWAg0C2aRgF31fbmSioiIoFoiErF3V8ENn9q9RBgUjQ/CRhaZf19XukVoLWZdajp/eu9p5L7I4TJqMim4Yr1hnfg8NuTDiEnnd3huKRDkJiY2Sgqq4pPFLl7US0va+/ua6P5dUD7aL4QWF1lv5Jo3VqqoUa9iEgg4rijPkogtSWRml7vZlbnQJRUREQCkeD4xHoz6+Dua6PhrQ3R+lKgc5X9OkXrqqWeiohIILLu+z3V0ePAhdH8hcC0KusviK4C6wdsrTJMtleqVEREGhEzmwycDLQ1sxLgOuAmoNjMRgKrgGHR7jOAQcBy4EPg4treX0lFRCQQDXFhk7uPqGbTaXvZ14HLPsv7K6mIiAQiDXfUK6mIiARCf09FRERik4a703T1l4iIxEaViohIINRTERGR2KinIiIisUlDT0VJRUQkEF73O+KDoUa9iIjERpWKiEgg1KgXEZHYqKciIiKxScPVX+qpiIhIbFSpiIgEQj0VERGJTRouKVZSEREJhBr1IiISGzXqRUREqlClIiISCDXqRUQkNmrUi4hIbNJQqainIiIisVGlIiISiDRc/aWkIiISiKx6KiIiEpfcTylKKiIiwVCjXkREpApVKiIigUhDpaKkIiISCN38KCIisVGlIiIisUnDfSqNslE/vuhm1pS8wYLXZyUdSs45s//JLF70IsuWzOaqKy9LOpycofO2784aeTa3PvsHbp15Oz+57QoKmhbwo9+P5q7Z47l5xq3cPONWunQ/IukwpRqNslK5775i7rzzHu65Z1zSoeSUTCbDbeNuZMCgEZSUrOWVOTN4YvpMli59O+nQgqbztu/atG/DNy8ezOjTLmP3rt1cfsdVnDD4RADu+809zJnxcsIR1q+G6KmY2bvAdqACKHf33mbWBpgKdAHeBYa5+wd1ef9aKxUz62tmfaL57mb2UzMbVJeDheKl2XPZ/MGWpMPIOX379GLFindZufI9ysrKKC6extmDz0w6rODpvH02eXkZmjRrQiYvQ9MDmrJ5/eakQ2owWXy/p310irv3dPfe0fLVwCx37wbMipbrpMakYmbXAbcBd5nZfwN/AFoAV5vZNXU9qOSmjoWHsrpkzZ7lktK1dOx4aIIR5Qadt323ef1mphU9xt1zJjDh1Ul8uH0nb7y0AIDvXHE+Y5++jYuvHUl+k3QOsrj7fk91NASYFM1PAobW9Y1qq1TOBY4HTgIuA4a6+38BZwLDq3uRmY0ys/lmNj+b3VnX2ESkkWnRqgV9+3+VH5xwKf+v70U0PaAZJ51zMg/87j5+fOoPuersn9Ky9YGc8/1vJR1qsKp+/0bTqE/t4sBMM3utyrb27r42ml8HtK/r8WtL9+XuXgF8aGYr3H0bgLt/ZGbZ6l7k7kVAEUB+k8Lcv5xBAFhTuo7OnTruWe5U2IE1a9YlGFFu0Hnbd18+oSfrV69n2+ZtAMx9eg5HHXcULz76AgDlu8t57s9/YciocxKMsv7EcUlx1e/fapzg7qVmdgjwrJkt+9Tr3czqHEhtlcpuM2sezR/3yUozOwioNqlIOr06fwFdux5Bly6dKSgoYF0UiJUAAATlSURBVNiwITwxfWbSYQVP523fbVqzkS/0+iJNmjUB4Njje1CyfDUHH3Lwnn2+2r8f7725KqkQ65XH8F+tx3Avjf7dADwK9AXWm1kHgOjfDXX9DLVVKie5+64ogKpJpAC4sK4HTdr9f7qDb5z0Ndq2bcO778zn+ht+zz33Tkk6rOBVVFQweswvmfHkg+RlMtw7aSpLlryVdFjB03nbd28veIs5M/7G75+8lWxFBe8sfoeZDz7DtZN+Ras2rTAzVi5Zyd2/uDPpUOtFfT/63sxaABl33x7N9wduAB6n8jv9pujfaXU+Rn1fwqbhL5Hwnd3huNp3kr16ZNXjFtd7Hd3+q/v9fbl4/dxq4zGzz1NZnUBlUfGgu99oZp8DioHDgFVUXlJcp8vu0nkJhYiI/At3fwfosZf17wOnxXEMJRURkUDoLz+KiEhs0vDsLyUVEZFAqFIREZHYpKFSaZRPKRYRkfqhSkVEJBAa/hIRkdikYfhLSUVEJBD//OCS3KSeioiIxEaViohIIOJ4SnHSlFRERALREH9OuL4pqYiIBEKVioiIxCYNlYoa9SIiEhtVKiIigdDNjyIiEhvd/CgiIrFJQ09FSUVEJBBpuPpLjXoREYmNKhURkUBo+EtERGKjq79ERCQ2aahU1FMREZHYqFIREQlEGq7+UlIREQlEGoa/lFRERAKhRr2IiMQmDY9pUaNeRERio0pFRCQQGv4SEZHYqFEvIiKxUU9FRERi4+77PdXGzAaY2ZtmttzMro77MyipiIg0EmaWB9wBDAS6AyPMrHucx9Dwl4hIIBqgp9IXWO7u7wCY2RRgCLAkrgOoUhERCYTHMNWiEFhdZbkkWhebeq9UyneXWn0fo67MbJS7FyUdR67Reas7nbu6aSznLY7vSzMbBYyqsqqoIc9dY69URtW+i+yFzlvd6dzVjc7bPnL3InfvXWWqmlBKgc5VljtF62LT2JOKiEhj8irQzcyOMLMmwHnA43EeQI16EZFGwt3LzexHwDNAHjDR3RfHeYzGnlRSP0ZbT3Te6k7nrm503mLi7jOAGfX1/paGxwKIiEgY1FMREZHYNNqkUt+PKkgjM5toZhvMbFHSseQSM+tsZs+b2RIzW2xmo5OOKReYWTMzm2dmb0Tn7fqkY5LaNcrhr+hRBW8BZ1B588+rwAh3j+2u0jQys5OAHcB97n5M0vHkCjPrAHRw9/8zswOB14Ch+nmrmZkZ0MLdd5hZATAbGO3uryQcmtSgsVYqex5V4O67gU8eVSA1cPcXgc1Jx5Fr3H2tu/9fNL8dWErMdzGnkVfaES0WRFPj+y04xzTWpFLvjyoQ2Rsz6wL0AuYmG0luMLM8M1sAbACedXedt8A11qQi0uDMrCXwMDDG3bclHU8ucPcKd+9J5Z3ffc1Mw66Ba6xJpd4fVSBSVdQTeBh4wN0fSTqeXOPuW4DngQFJxyI1a6xJpd4fVSDyiajhPAFY6u5jk44nV5hZOzNrHc0fQOWFNcuSjUpq0yiTiruXA588qmApUBz3owrSyMwmA3OAL5pZiZmNTDqmHHE88D3gVDNbEE2Dkg4qB3QAnjezhVT+Ivisu09POCapRaO8pFhEROpHo6xURESkfiipiIhIbJRUREQkNkoqIiISGyUVERGJjZKKiIjERklFRERio6QiIiKx+f+qZaCCTOC31wAAAABJRU5ErkJggg==\n", 1004 | "text/plain": [ 1005 | "
" 1006 | ] 1007 | }, 1008 | "metadata": { 1009 | "tags": [], 1010 | "needs_background": "light" 1011 | } 1012 | }, 1013 | { 1014 | "output_type": "stream", 1015 | "text": [ 1016 | "Classification Report\n", 1017 | " precision recall f1-score support\n", 1018 | "\n", 1019 | " 0 0.99 1.00 0.99 402\n", 1020 | " 1 0.98 0.94 0.96 366\n", 1021 | " 2 0.94 0.98 0.96 343\n", 1022 | " 3 1.00 0.99 0.99 86\n", 1023 | "\n", 1024 | " accuracy 0.97 1197\n", 1025 | " macro avg 0.98 0.98 0.98 1197\n", 1026 | "weighted avg 0.97 0.97 0.97 1197\n", 1027 | "\n" 1028 | ], 1029 | "name": "stdout" 1030 | } 1031 | ] 1032 | }, 1033 | { 1034 | "cell_type": "markdown", 1035 | "metadata": { 1036 | "id": "FNP6aqzc9hE5" 1037 | }, 1038 | "source": [ 1039 | "# Convert to model for Tensorflow-Lite" 1040 | ] 1041 | }, 1042 | { 1043 | "cell_type": "code", 1044 | "metadata": { 1045 | "id": "ODjnYyld9hE6" 1046 | }, 1047 | "source": [ 1048 | "# Save as a model dedicated to inference\n", 1049 | "model.save(model_save_path, include_optimizer=False)" 1050 | ], 1051 | "execution_count": null, 1052 | "outputs": [] 1053 | }, 1054 | { 1055 | "cell_type": "code", 1056 | "metadata": { 1057 | "id": "zRfuK8Y59hE6", 1058 | "colab": { 1059 | "base_uri": "https://localhost:8080/" 1060 | }, 1061 | "outputId": "a4ca585c-b5d5-4244-8291-8674063209bb" 1062 | }, 1063 | "source": [ 1064 | "# Transform model (quantization)\n", 1065 | "\n", 1066 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n", 1067 | "converter.optimizations = [tf.lite.Optimize.DEFAULT]\n", 1068 | "tflite_quantized_model = converter.convert()\n", 1069 | "\n", 1070 | "open(tflite_save_path, 'wb').write(tflite_quantized_model)" 1071 | ], 1072 | "execution_count": null, 1073 | "outputs": [ 1074 | { 1075 | "output_type": "stream", 1076 | "text": [ 1077 | "INFO:tensorflow:Assets written to: /tmp/tmpe5yx255p/assets\n" 1078 | ], 1079 | "name": "stdout" 1080 | }, 1081 | { 1082 | "output_type": "execute_result", 1083 | "data": { 1084 | "text/plain": [ 1085 | "6352" 1086 | ] 1087 | }, 1088 | "metadata": { 1089 | "tags": [] 1090 | }, 1091 | "execution_count": 17 1092 | } 1093 | ] 1094 | }, 1095 | { 1096 | "cell_type": "markdown", 1097 | "metadata": { 1098 | "id": "CHBPBXdx9hE6" 1099 | }, 1100 | "source": [ 1101 | "# Inference test" 1102 | ] 1103 | }, 1104 | { 1105 | "cell_type": "code", 1106 | "metadata": { 1107 | "id": "mGAzLocO9hE7" 1108 | }, 1109 | "source": [ 1110 | "interpreter = tf.lite.Interpreter(model_path=tflite_save_path)\n", 1111 | "interpreter.allocate_tensors()" 1112 | ], 1113 | "execution_count": null, 1114 | "outputs": [] 1115 | }, 1116 | { 1117 | "cell_type": "code", 1118 | "metadata": { 1119 | "id": "oQuDK8YS9hE7" 1120 | }, 1121 | "source": [ 1122 | "# Get I / O tensor\n", 1123 | "input_details = interpreter.get_input_details()\n", 1124 | "output_details = interpreter.get_output_details()" 1125 | ], 1126 | "execution_count": null, 1127 | "outputs": [] 1128 | }, 1129 | { 1130 | "cell_type": "code", 1131 | "metadata": { 1132 | "id": "2_ixAf_l9hE7" 1133 | }, 1134 | "source": [ 1135 | "interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]]))" 1136 | ], 1137 | "execution_count": null, 1138 | "outputs": [] 1139 | }, 1140 | { 1141 | "cell_type": "code", 1142 | "metadata": { 1143 | "scrolled": true, 1144 | "id": "s4FoAnuc9hE7", 1145 | "colab": { 1146 | "base_uri": "https://localhost:8080/" 1147 | }, 1148 | "outputId": "91f18257-8d8b-4ef3-c558-e9b5f94fabbf" 1149 | }, 1150 | "source": [ 1151 | "%%time\n", 1152 | "# Inference implementation\n", 1153 | "interpreter.invoke()\n", 1154 | "tflite_results = interpreter.get_tensor(output_details[0]['index'])" 1155 | ], 1156 | "execution_count": null, 1157 | "outputs": [ 1158 | { 1159 | "output_type": "stream", 1160 | "text": [ 1161 | "CPU times: user 131 µs, sys: 17 µs, total: 148 µs\n", 1162 | "Wall time: 679 µs\n" 1163 | ], 1164 | "name": "stdout" 1165 | } 1166 | ] 1167 | }, 1168 | { 1169 | "cell_type": "code", 1170 | "metadata": { 1171 | "id": "vONjp19J9hE8", 1172 | "colab": { 1173 | "base_uri": "https://localhost:8080/" 1174 | }, 1175 | "outputId": "77205e24-fd00-42c4-f7b6-e06e527c2cba" 1176 | }, 1177 | "source": [ 1178 | "print(np.squeeze(tflite_results))\n", 1179 | "print(np.argmax(np.squeeze(tflite_results)))" 1180 | ], 1181 | "execution_count": null, 1182 | "outputs": [ 1183 | { 1184 | "output_type": "stream", 1185 | "text": [ 1186 | "[9.8105639e-01 1.8674169e-02 2.2328216e-04 4.6191799e-05]\n", 1187 | "0\n" 1188 | ], 1189 | "name": "stdout" 1190 | } 1191 | ] 1192 | } 1193 | ] 1194 | } --------------------------------------------------------------------------------