├── model ├── __init__.py └── keypoint_classifier │ ├── keypoint_classifier_label.csv │ ├── keypoint_classifier.hdf5 │ ├── keypoint_classifier.tflite │ └── keypoint_classifier.py ├── .gitignore ├── README.md ├── Collect_from_image.py ├── Collect_from_webcam.py ├── main.py ├── LICENSE └── training.ipynb /model/__init__.py: -------------------------------------------------------------------------------- 1 | from model.keypoint_classifier.keypoint_classifier import KeyPointClassifier -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier_label.csv: -------------------------------------------------------------------------------- 1 | Angry 2 | Happy 3 | Neutral 4 | Sad 5 | Surprise 6 | -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.hdf5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/REWTAO/Facial-emotion-recognition-using-mediapipe/HEAD/model/keypoint_classifier/keypoint_classifier.hdf5 -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/REWTAO/Facial-emotion-recognition-using-mediapipe/HEAD/model/keypoint_classifier/keypoint_classifier.tflite -------------------------------------------------------------------------------- /model/keypoint_classifier/keypoint_classifier.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | 7 | class KeyPointClassifier(object): 8 | def __init__( 9 | self, 10 | model_path='model/keypoint_classifier/keypoint_classifier.tflite', 11 | num_threads=1, 12 | ): 13 | self.interpreter = tf.lite.Interpreter(model_path=model_path, 14 | num_threads=num_threads) 15 | 16 | self.interpreter.allocate_tensors() 17 | self.input_details = self.interpreter.get_input_details() 18 | self.output_details = self.interpreter.get_output_details() 19 | self.last_index = 2 20 | 21 | def __call__( 22 | self, 23 | landmark_list, 24 | ): 25 | input_details_tensor_index = self.input_details[0]['index'] 26 | self.interpreter.set_tensor( 27 | input_details_tensor_index, 28 | np.array([landmark_list], dtype=np.float32)) 29 | self.interpreter.invoke() 30 | 31 | output_details_tensor_index = self.output_details[0]['index'] 32 | 33 | result = self.interpreter.get_tensor(output_details_tensor_index) 34 | if np.max(result) >= 0.85: 35 | result_index = np.argmax(np.squeeze(result)) 36 | self.last_index = result_index 37 | return result_index 38 | else: 39 | return self.last_index 40 | 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # bat 132 | *.bat -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Reference from Kazuhito Takahashi(https://twitter.com/KzhtTkhs) 2 | - [MediaPipe](https://mediapipe.dev/) 3 | - [Kazuhito00/mediapipe-python-sample](https://github.com/Kazuhito00/mediapipe-python-sample) 4 | - [Kazuhito00/hand-gesture-recognition-using-mediapipe](https://github.com/Kazuhito00/hand-gesture-recognition-using-mediapipe) 5 | 6 | # facial emotion recognition using mediapipe 7 | - Estimate face mesh using MediaPipe(Python version).This is a sample program that recognizes facial emotion with a simple multilayer perceptron using the detected key points that returned from mediapipe.Although this model is 97% accurate, there is no generalization due to too little training data. 8 | - the project is implement from https://github.com/Kazuhito00/hand-gesture-recognition-using-mediapipe to use in facial emotion recognition 9 | - the keypoint.csv is empty because this file is too large to upload so if you want to training model please find new dataset or record data by yourself 10 | 11 | This repository contains the following contents. 12 | - Sample program 13 | - Facial emotion recognition model(TFLite) 14 | - Script for collect data from images dataset and camera 15 | 16 | # Requirements 17 | - mediapipe 0.8.9 18 | - OpenCV 4.5.4 or Later 19 | - Tensorflow 2.7.0 or Later 20 | - scikit-learn 1.0.1 or Later (Only if you want to display the confusion matrix) 21 | - matplotlib 3.5.0 or Later (Only if you want to display the confusion matrix) 22 | 23 | ### main.py 24 | This is a sample program for inference.it will use keypoint_classifier.tflite as model to predict your emotion. 25 | 26 | ### training.ipynb 27 | This is a model training script for facial emotion recognition. 28 | 29 | ### model/keypoint_classifier 30 | This directory stores files related to facial emotion recognition. 31 | The following files are stored. 32 | * Training data(keypoint.csv) 33 | * Trained model(keypoint_classifier.tflite) 34 | * Label data(keypoint_classifier_label.csv) 35 | * Inference module(keypoint_classifier.py) 36 | 37 | ### Collect_from_image.py 38 | This script will collect the keypoints from image dataset(.jpg). you can change your dataset directory to collect data.It will use your folder name to label. 39 | 40 | ### Collect_from_webcam.py 41 | This script will collect the keypoints from your camera. press 'k' to enter the mode to save key points that show 'Record keypoints mode' then press '0-9' as label. the key points will be added to "model/keypoint_classifier/keypoint.csv". 42 | 43 | # Author 44 | Rattasart Sakunrat 45 | 46 | # License 47 | hand-gesture-recognition-using-mediapipe is under [Apache v2 license](LICENSE). -------------------------------------------------------------------------------- /Collect_from_image.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import copy 3 | import itertools 4 | import os 5 | import cv2 6 | import mediapipe as mp 7 | 8 | def encode_label(label_name,category): 9 | for i in category: 10 | if i == label_name: 11 | return category.index(i) 12 | 13 | 14 | def calc_landmark_list(image, landmarks): 15 | image_width, image_height = image.shape[1], image.shape[0] 16 | 17 | landmark_point = [] 18 | 19 | # Keypoint 20 | for _, landmark in enumerate(landmarks.landmark): 21 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 22 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 23 | 24 | landmark_point.append([landmark_x, landmark_y]) 25 | 26 | return landmark_point 27 | 28 | 29 | def pre_process_landmark(landmark_list): 30 | temp_landmark_list = copy.deepcopy(landmark_list) 31 | 32 | # Convert to relative coordinates 33 | base_x, base_y = 0, 0 34 | for index, landmark_point in enumerate(temp_landmark_list): 35 | if index == 0: 36 | base_x, base_y = landmark_point[0], landmark_point[1] 37 | 38 | temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x 39 | temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y 40 | 41 | # Convert to a one-dimensional list 42 | temp_landmark_list = list( 43 | itertools.chain.from_iterable(temp_landmark_list)) 44 | 45 | # Normalization 46 | max_value = max(list(map(abs, temp_landmark_list))) 47 | 48 | def normalize_(n): 49 | return n / max_value 50 | 51 | temp_landmark_list = list(map(normalize_, temp_landmark_list)) 52 | 53 | return temp_landmark_list 54 | 55 | 56 | def logging_csv(number, landmark_list): 57 | if 0 <= number <= 5: 58 | csv_path = 'model/keypoint_classifier/keypoint.csv' 59 | with open(csv_path, 'a', newline="") as f: 60 | writer = csv.writer(f) 61 | writer.writerow([number, *landmark_list]) 62 | return 63 | 64 | 65 | root = "Your dataset dir" 66 | IMAGE_FILES = [] 67 | category = ['Anger','Happy','Neutral','Sad','Surprise'] 68 | for path, subdirs, files in os.walk(root): 69 | for name in files: 70 | IMAGE_FILES.append(os.path.join(path, name)) 71 | 72 | use_brect = True 73 | 74 | mp_face_mesh = mp.solutions.face_mesh 75 | face_mesh = mp_face_mesh.FaceMesh( 76 | max_num_faces=1, 77 | refine_landmarks=True, 78 | min_detection_confidence=0.7, 79 | static_image_mode=True) 80 | 81 | 82 | for idx, file in enumerate(IMAGE_FILES): 83 | label_name = file.rsplit("/",1)[-1] 84 | label_name = label_name.rsplit("\\",1)[0] 85 | label = encode_label(label_name,category) 86 | image = cv2.imread(file) 87 | image = cv2.flip(image, 1) # Mirror display 88 | debug_image = copy.deepcopy(image) 89 | 90 | # Detection implementation 91 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 92 | image.flags.writeable = False 93 | results = face_mesh.process(image) 94 | image.flags.writeable = True 95 | if results.multi_face_landmarks is not None: 96 | for face_landmarks in results.multi_face_landmarks: 97 | 98 | # Landmark calculation 99 | landmark_list = calc_landmark_list(debug_image, face_landmarks) 100 | 101 | # Conversion to relative coordinates / normalized coordinates 102 | pre_processed_landmark_list = pre_process_landmark( 103 | landmark_list) 104 | # Write to the dataset file 105 | logging_csv(label, pre_processed_landmark_list) -------------------------------------------------------------------------------- /Collect_from_webcam.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import copy 3 | import itertools 4 | import cv2 as cv 5 | import mediapipe as mp 6 | 7 | def select_mode(key, mode): 8 | number = -1 9 | if 48 <= key <= 57: # 0 ~ 9 10 | number = key - 48 11 | if key == 110: # n 12 | mode = 0 13 | if key == 107: # k # record mode 14 | mode = 1 15 | return number, mode 16 | 17 | 18 | def calc_landmark_list(image, landmarks): 19 | image_width, image_height = image.shape[1], image.shape[0] 20 | 21 | landmark_point = [] 22 | 23 | # Keypoint 24 | for _, landmark in enumerate(landmarks.landmark): 25 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 26 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 27 | 28 | landmark_point.append([landmark_x, landmark_y]) 29 | 30 | return landmark_point 31 | 32 | 33 | def pre_process_landmark(landmark_list): 34 | temp_landmark_list = copy.deepcopy(landmark_list) 35 | 36 | # Convert to relative coordinates 37 | base_x, base_y = 0, 0 38 | for index, landmark_point in enumerate(temp_landmark_list): 39 | if index == 0: 40 | base_x, base_y = landmark_point[0], landmark_point[1] 41 | 42 | temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x 43 | temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y 44 | 45 | # Convert to a one-dimensional list 46 | temp_landmark_list = list( 47 | itertools.chain.from_iterable(temp_landmark_list)) 48 | 49 | # Normalization 50 | max_value = max(list(map(abs, temp_landmark_list))) 51 | 52 | def normalize_(n): 53 | return n / max_value 54 | 55 | temp_landmark_list = list(map(normalize_, temp_landmark_list)) 56 | 57 | return temp_landmark_list 58 | 59 | 60 | def logging_csv(number, mode, landmark_list): 61 | if mode == 0: 62 | pass 63 | if mode == 1 and (0 <= number <= 9): 64 | csv_path = 'model/keypoint_classifier/keypoint.csv' 65 | with open(csv_path, 'a', newline="") as f: 66 | writer = csv.writer(f) 67 | writer.writerow([number, *landmark_list]) 68 | return 69 | 70 | 71 | cap_device = 0 72 | cap_width = 1920 73 | cap_height = 1080 74 | 75 | use_brect = True 76 | 77 | # Camera preparation 78 | cap = cv.VideoCapture(cap_device) 79 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 80 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 81 | 82 | mp_face_mesh = mp.solutions.face_mesh 83 | face_mesh = mp_face_mesh.FaceMesh( 84 | max_num_faces=1, 85 | refine_landmarks=True, 86 | min_detection_confidence=0.7, 87 | min_tracking_confidence=0.5) 88 | 89 | mode = 0 90 | 91 | while True: 92 | 93 | # Process Key (ESC: end) 94 | key = cv.waitKey(10) 95 | if key == 27: # ESC 96 | break 97 | number, mode = select_mode(key, mode) 98 | 99 | # Camera capture 100 | ret, image = cap.read() 101 | if not ret: 102 | break 103 | image = cv.flip(image, 1) # Mirror display 104 | debug_image = copy.deepcopy(image) 105 | 106 | # Detection implementation 107 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 108 | 109 | image.flags.writeable = False 110 | results = face_mesh.process(image) 111 | image.flags.writeable = True 112 | 113 | if results.multi_face_landmarks is not None: 114 | for face_landmarks in results.multi_face_landmarks: 115 | 116 | # Landmark calculation 117 | landmark_list = calc_landmark_list(debug_image, face_landmarks) 118 | 119 | # Conversion to relative coordinates / normalized coordinates 120 | pre_processed_landmark_list = pre_process_landmark( 121 | landmark_list) 122 | # Write to the dataset file 123 | logging_csv(number, mode, pre_processed_landmark_list) 124 | 125 | if mode == 1 : 126 | cv.putText(debug_image, "MODE:" + 'Record keypoints mode', (10, 90), 127 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, 128 | cv.LINE_AA) 129 | cv.imshow('Facial Emotion Recognition', debug_image) 130 | 131 | cap.release() 132 | cv.destroyAllWindows() -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import copy 3 | import itertools 4 | 5 | import cv2 as cv 6 | import numpy as np 7 | import mediapipe as mp 8 | from model import KeyPointClassifier 9 | 10 | 11 | def calc_landmark_list(image, landmarks): 12 | image_width, image_height = image.shape[1], image.shape[0] 13 | 14 | landmark_point = [] 15 | 16 | # Keypoint 17 | for _, landmark in enumerate(landmarks.landmark): 18 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 19 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 20 | 21 | landmark_point.append([landmark_x, landmark_y]) 22 | 23 | return landmark_point 24 | 25 | 26 | def pre_process_landmark(landmark_list): 27 | temp_landmark_list = copy.deepcopy(landmark_list) 28 | 29 | # Convert to relative coordinates 30 | base_x, base_y = 0, 0 31 | for index, landmark_point in enumerate(temp_landmark_list): 32 | if index == 0: 33 | base_x, base_y = landmark_point[0], landmark_point[1] 34 | 35 | temp_landmark_list[index][0] = temp_landmark_list[index][0] - base_x 36 | temp_landmark_list[index][1] = temp_landmark_list[index][1] - base_y 37 | 38 | # Convert to a one-dimensional list 39 | temp_landmark_list = list( 40 | itertools.chain.from_iterable(temp_landmark_list)) 41 | 42 | # Normalization 43 | max_value = max(list(map(abs, temp_landmark_list))) 44 | 45 | def normalize_(n): 46 | return n / max_value 47 | 48 | temp_landmark_list = list(map(normalize_, temp_landmark_list)) 49 | 50 | return temp_landmark_list 51 | 52 | 53 | def draw_bounding_rect(use_brect, image, brect): 54 | if use_brect: 55 | # Outer rectangle 56 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 57 | (0, 0, 0), 1) 58 | 59 | return image 60 | 61 | def calc_bounding_rect(image, landmarks): 62 | image_width, image_height = image.shape[1], image.shape[0] 63 | 64 | landmark_array = np.empty((0, 2), int) 65 | 66 | for _, landmark in enumerate(landmarks.landmark): 67 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 68 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 69 | 70 | landmark_point = [np.array((landmark_x, landmark_y))] 71 | 72 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 73 | 74 | x, y, w, h = cv.boundingRect(landmark_array) 75 | 76 | return [x, y, x + w, y + h] 77 | 78 | def draw_info_text(image, brect, facial_text): 79 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[1] - 22), 80 | (0, 0, 0), -1) 81 | 82 | if facial_text != "": 83 | info_text = 'Emotion :' + facial_text 84 | cv.putText(image, info_text, (brect[0] + 5, brect[1] - 4), 85 | cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1, cv.LINE_AA) 86 | 87 | return image 88 | 89 | cap_device = 0 90 | cap_width = 1920 91 | cap_height = 1080 92 | 93 | use_brect = True 94 | 95 | # Camera preparation 96 | cap = cv.VideoCapture(cap_device) 97 | cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width) 98 | cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height) 99 | 100 | # Model load 101 | mp_face_mesh = mp.solutions.face_mesh 102 | face_mesh = mp_face_mesh.FaceMesh( 103 | max_num_faces=1, 104 | refine_landmarks=True, 105 | min_detection_confidence=0.5, 106 | min_tracking_confidence=0.5) 107 | 108 | keypoint_classifier = KeyPointClassifier() 109 | 110 | 111 | # Read labels 112 | with open('model/keypoint_classifier/keypoint_classifier_label.csv', 113 | encoding='utf-8-sig') as f: 114 | keypoint_classifier_labels = csv.reader(f) 115 | keypoint_classifier_labels = [ 116 | row[0] for row in keypoint_classifier_labels 117 | ] 118 | 119 | mode = 0 120 | 121 | while True: 122 | 123 | # Process Key (ESC: end) 124 | key = cv.waitKey(10) 125 | if key == 27: # ESC 126 | break 127 | 128 | # Camera capture 129 | ret, image = cap.read() 130 | if not ret: 131 | break 132 | image = cv.flip(image, 1) # Mirror display 133 | debug_image = copy.deepcopy(image) 134 | 135 | # Detection implementation 136 | image = cv.cvtColor(image, cv.COLOR_BGR2RGB) 137 | 138 | image.flags.writeable = False 139 | results = face_mesh.process(image) 140 | image.flags.writeable = True 141 | 142 | if results.multi_face_landmarks is not None: 143 | for face_landmarks in results.multi_face_landmarks: 144 | # Bounding box calculation 145 | brect = calc_bounding_rect(debug_image, face_landmarks) 146 | 147 | # Landmark calculation 148 | landmark_list = calc_landmark_list(debug_image, face_landmarks) 149 | 150 | # Conversion to relative coordinates / normalized coordinates 151 | pre_processed_landmark_list = pre_process_landmark( 152 | landmark_list) 153 | 154 | #emotion classification 155 | facial_emotion_id = keypoint_classifier(pre_processed_landmark_list) 156 | # Drawing part 157 | debug_image = draw_bounding_rect(use_brect, debug_image, brect) 158 | debug_image = draw_info_text( 159 | debug_image, 160 | brect, 161 | keypoint_classifier_labels[facial_emotion_id]) 162 | 163 | # Screen reflection 164 | cv.imshow('Facial Emotion Recognition', debug_image) 165 | 166 | cap.release() 167 | cv.destroyAllWindows() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /training.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 736, 6 | "metadata": { 7 | "id": "igMyGnjE9hEp" 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import csv\n", 12 | "\n", 13 | "import numpy as np\n", 14 | "import tensorflow as tf\n", 15 | "from sklearn.model_selection import train_test_split\n", 16 | "\n", 17 | "RANDOM_SEED = 42" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": { 23 | "id": "t2HDvhIu9hEr" 24 | }, 25 | "source": [ 26 | "# Specify each path" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 737, 32 | "metadata": { 33 | "id": "9NvZP2Zn9hEy" 34 | }, 35 | "outputs": [], 36 | "source": [ 37 | "dataset = 'model/keypoint_classifier/keypoint.csv'\n", 38 | "model_save_path = 'model/keypoint_classifier/keypoint_classifier.hdf5'\n", 39 | "tflite_save_path = 'model/keypoint_classifier/keypoint_classifier.tflite'" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": { 45 | "id": "s5oMH7x19hEz" 46 | }, 47 | "source": [ 48 | "# Set number of classes" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 738, 54 | "metadata": { 55 | "id": "du4kodXL9hEz" 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "NUM_CLASSES = 5" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": { 65 | "id": "XjnL0uso9hEz" 66 | }, 67 | "source": [ 68 | "# Dataset reading" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 739, 74 | "metadata": { 75 | "id": "QT5ZqtEz9hE0" 76 | }, 77 | "outputs": [], 78 | "source": [ 79 | "X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (478 * 2) + 1)))" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 740, 85 | "metadata": { 86 | "id": "QmoKFsp49hE0" 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0))" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 741, 96 | "metadata": { 97 | "id": "xQU7JTZ_9hE0" 98 | }, 99 | "outputs": [], 100 | "source": [ 101 | "X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, train_size=0.8, random_state=RANDOM_SEED)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "metadata": { 107 | "id": "mxK_lETT9hE0" 108 | }, 109 | "source": [ 110 | "# Model building" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 742, 116 | "metadata": { 117 | "id": "vHBmUf1t9hE1" 118 | }, 119 | "outputs": [], 120 | "source": [ 121 | "# model = tf.keras.models.Sequential([\n", 122 | "# tf.keras.layers.Input((478 * 2, )),\n", 123 | "# tf.keras.layers.Dense(64, activation='selu', kernel_initializer='lecun_normal'),\n", 124 | "# tf.keras.layers.Dense(32, activation='selu', kernel_initializer='lecun_normal'),\n", 125 | "# tf.keras.layers.Dense(16, activation='selu', kernel_initializer='lecun_normal'),\n", 126 | "# tf.keras.layers.Dropout(0.5),\n", 127 | "# tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n", 128 | "# ])\n", 129 | "\n", 130 | "model = tf.keras.models.Sequential([\n", 131 | " tf.keras.layers.Input((478 * 2, )),\n", 132 | " tf.keras.layers.Dense(20, activation='elu'),\n", 133 | " tf.keras.layers.Dropout(0.5),\n", 134 | " tf.keras.layers.Dense(10, activation='elu'),\n", 135 | " tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')\n", 136 | "])" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 743, 142 | "metadata": { 143 | "colab": { 144 | "base_uri": "https://localhost:8080/" 145 | }, 146 | "id": "ypqky9tc9hE1", 147 | "outputId": "5db082bb-30e3-4110-bf63-a1ee777ecd46" 148 | }, 149 | "outputs": [ 150 | { 151 | "name": "stdout", 152 | "output_type": "stream", 153 | "text": [ 154 | "Model: \"sequential_36\"\n", 155 | "_________________________________________________________________\n", 156 | " Layer (type) Output Shape Param # \n", 157 | "=================================================================\n", 158 | " dense_135 (Dense) (None, 20) 19140 \n", 159 | " \n", 160 | " dropout_86 (Dropout) (None, 20) 0 \n", 161 | " \n", 162 | " dense_136 (Dense) (None, 10) 210 \n", 163 | " \n", 164 | " dense_137 (Dense) (None, 5) 55 \n", 165 | " \n", 166 | "=================================================================\n", 167 | "Total params: 19,405\n", 168 | "Trainable params: 19,405\n", 169 | "Non-trainable params: 0\n", 170 | "_________________________________________________________________\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "model.summary() # tf.keras.utils.plot_model(model, show_shapes=True)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 744, 181 | "metadata": { 182 | "id": "MbMjOflQ9hE1" 183 | }, 184 | "outputs": [], 185 | "source": [ 186 | "# Model checkpoint callback\n", 187 | "cp_callback = tf.keras.callbacks.ModelCheckpoint(\n", 188 | " model_save_path, verbose=1, save_weights_only=False)\n", 189 | "# Callback for early stopping\n", 190 | "es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1)" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 745, 196 | "metadata": { 197 | "id": "c3Dac0M_9hE2" 198 | }, 199 | "outputs": [], 200 | "source": [ 201 | "# Model compilation\n", 202 | "opt = tf.keras.optimizers.Adam(learning_rate=0.001)\n", 203 | "model.compile(\n", 204 | " optimizer=opt,\n", 205 | " loss='sparse_categorical_crossentropy',\n", 206 | " metrics=['accuracy']\n", 207 | ")" 208 | ] 209 | }, 210 | { 211 | "cell_type": "markdown", 212 | "metadata": { 213 | "id": "7XI0j1Iu9hE2" 214 | }, 215 | "source": [ 216 | "# Model training" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": 746, 222 | "metadata": { 223 | "colab": { 224 | "base_uri": "https://localhost:8080/" 225 | }, 226 | "id": "WirBl-JE9hE3", 227 | "outputId": "71b30ca2-8294-4d9d-8aa2-800d90d399de", 228 | "scrolled": true 229 | }, 230 | "outputs": [ 231 | { 232 | "name": "stdout", 233 | "output_type": "stream", 234 | "text": [ 235 | "Epoch 1/1000\n", 236 | "42/75 [===============>..............] - ETA: 0s - loss: 1.5240 - accuracy: 0.3328 \n", 237 | "Epoch 00001: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 238 | "75/75 [==============================] - 0s 3ms/step - loss: 1.4114 - accuracy: 0.4166 - val_loss: 1.0966 - val_accuracy: 0.6219\n", 239 | "Epoch 2/1000\n", 240 | "50/75 [===================>..........] - ETA: 0s - loss: 1.0692 - accuracy: 0.6288\n", 241 | "Epoch 00002: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 242 | "75/75 [==============================] - 0s 2ms/step - loss: 1.0157 - accuracy: 0.6526 - val_loss: 0.7701 - val_accuracy: 0.7747\n", 243 | "Epoch 3/1000\n", 244 | "74/75 [============================>.] - ETA: 0s - loss: 0.7589 - accuracy: 0.7383\n", 245 | "Epoch 00003: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 246 | "75/75 [==============================] - 0s 2ms/step - loss: 0.7578 - accuracy: 0.7391 - val_loss: 0.5567 - val_accuracy: 0.8338\n", 247 | "Epoch 4/1000\n", 248 | "44/75 [================>.............] - ETA: 0s - loss: 0.6174 - accuracy: 0.7889\n", 249 | "Epoch 00004: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 250 | "75/75 [==============================] - 0s 2ms/step - loss: 0.6012 - accuracy: 0.7877 - val_loss: 0.4336 - val_accuracy: 0.8769\n", 251 | "Epoch 5/1000\n", 252 | "50/75 [===================>..........] - ETA: 0s - loss: 0.5293 - accuracy: 0.8108\n", 253 | "Epoch 00005: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 254 | "75/75 [==============================] - 0s 2ms/step - loss: 0.5128 - accuracy: 0.8168 - val_loss: 0.3685 - val_accuracy: 0.8924\n", 255 | "Epoch 6/1000\n", 256 | "50/75 [===================>..........] - ETA: 0s - loss: 0.4679 - accuracy: 0.8305\n", 257 | "Epoch 00006: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 258 | "75/75 [==============================] - 0s 2ms/step - loss: 0.4568 - accuracy: 0.8366 - val_loss: 0.3136 - val_accuracy: 0.9146\n", 259 | "Epoch 7/1000\n", 260 | "72/75 [===========================>..] - ETA: 0s - loss: 0.4145 - accuracy: 0.8541\n", 261 | "Epoch 00007: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 262 | "75/75 [==============================] - 0s 2ms/step - loss: 0.4124 - accuracy: 0.8545 - val_loss: 0.2950 - val_accuracy: 0.9129\n", 263 | "Epoch 8/1000\n", 264 | "50/75 [===================>..........] - ETA: 0s - loss: 0.3918 - accuracy: 0.8528\n", 265 | "Epoch 00008: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 266 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3812 - accuracy: 0.8583 - val_loss: 0.2825 - val_accuracy: 0.8890\n", 267 | "Epoch 9/1000\n", 268 | "49/75 [==================>...........] - ETA: 0s - loss: 0.3631 - accuracy: 0.8736\n", 269 | "Epoch 00009: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 270 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3550 - accuracy: 0.8754 - val_loss: 0.2410 - val_accuracy: 0.9263\n", 271 | "Epoch 10/1000\n", 272 | "51/75 [===================>..........] - ETA: 0s - loss: 0.3485 - accuracy: 0.8742\n", 273 | "Epoch 00010: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 274 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3438 - accuracy: 0.8762 - val_loss: 0.2366 - val_accuracy: 0.9246\n", 275 | "Epoch 11/1000\n", 276 | "37/75 [=============>................] - ETA: 0s - loss: 0.3221 - accuracy: 0.8813\n", 277 | "Epoch 00011: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 278 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3140 - accuracy: 0.8895 - val_loss: 0.2279 - val_accuracy: 0.9255\n", 279 | "Epoch 12/1000\n", 280 | "47/75 [=================>............] - ETA: 0s - loss: 0.2933 - accuracy: 0.8981\n", 281 | "Epoch 00012: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 282 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3028 - accuracy: 0.8934 - val_loss: 0.2251 - val_accuracy: 0.9213\n", 283 | "Epoch 13/1000\n", 284 | "48/75 [==================>...........] - ETA: 0s - loss: 0.3021 - accuracy: 0.8911\n", 285 | "Epoch 00013: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 286 | "75/75 [==============================] - 0s 2ms/step - loss: 0.3081 - accuracy: 0.8883 - val_loss: 0.2005 - val_accuracy: 0.9389\n", 287 | "Epoch 14/1000\n", 288 | "51/75 [===================>..........] - ETA: 0s - loss: 0.2847 - accuracy: 0.9029\n", 289 | "Epoch 00014: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 290 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2869 - accuracy: 0.9010 - val_loss: 0.2123 - val_accuracy: 0.9250\n", 291 | "Epoch 15/1000\n", 292 | "42/75 [===============>..............] - ETA: 0s - loss: 0.2842 - accuracy: 0.8984\n", 293 | "Epoch 00015: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 294 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2801 - accuracy: 0.8995 - val_loss: 0.1883 - val_accuracy: 0.9401\n", 295 | "Epoch 16/1000\n", 296 | "51/75 [===================>..........] - ETA: 0s - loss: 0.2800 - accuracy: 0.9009\n", 297 | "Epoch 00016: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 298 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2763 - accuracy: 0.9001 - val_loss: 0.2087 - val_accuracy: 0.9158\n", 299 | "Epoch 17/1000\n", 300 | "52/75 [===================>..........] - ETA: 0s - loss: 0.2662 - accuracy: 0.9076\n", 301 | "Epoch 00017: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 302 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2639 - accuracy: 0.9068 - val_loss: 0.1832 - val_accuracy: 0.9410\n", 303 | "Epoch 18/1000\n", 304 | "47/75 [=================>............] - ETA: 0s - loss: 0.2514 - accuracy: 0.9061\n", 305 | "Epoch 00018: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 306 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2494 - accuracy: 0.9087 - val_loss: 0.1723 - val_accuracy: 0.9460\n", 307 | "Epoch 19/1000\n", 308 | "46/75 [=================>............] - ETA: 0s - loss: 0.2511 - accuracy: 0.9081\n", 309 | "Epoch 00019: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 310 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2546 - accuracy: 0.9092 - val_loss: 0.1705 - val_accuracy: 0.9430\n", 311 | "Epoch 20/1000\n", 312 | "49/75 [==================>...........] - ETA: 0s - loss: 0.2378 - accuracy: 0.9165\n", 313 | "Epoch 00020: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 314 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2367 - accuracy: 0.9149 - val_loss: 0.1727 - val_accuracy: 0.9347\n", 315 | "Epoch 21/1000\n", 316 | "51/75 [===================>..........] - ETA: 0s - loss: 0.2502 - accuracy: 0.9056\n", 317 | "Epoch 00021: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 318 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2481 - accuracy: 0.9079 - val_loss: 0.1590 - val_accuracy: 0.9535\n", 319 | "Epoch 22/1000\n", 320 | "50/75 [===================>..........] - ETA: 0s - loss: 0.2407 - accuracy: 0.9123\n", 321 | "Epoch 00022: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 322 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2415 - accuracy: 0.9134 - val_loss: 0.1634 - val_accuracy: 0.9456\n", 323 | "Epoch 23/1000\n", 324 | "69/75 [==========================>...] - ETA: 0s - loss: 0.2373 - accuracy: 0.9134\n", 325 | "Epoch 00023: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 326 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2393 - accuracy: 0.9128 - val_loss: 0.1705 - val_accuracy: 0.9372\n", 327 | "Epoch 24/1000\n", 328 | "50/75 [===================>..........] - ETA: 0s - loss: 0.2498 - accuracy: 0.9109\n", 329 | "Epoch 00024: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 330 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2457 - accuracy: 0.9098 - val_loss: 0.1575 - val_accuracy: 0.9477\n", 331 | "Epoch 25/1000\n", 332 | "50/75 [===================>..........] - ETA: 0s - loss: 0.2428 - accuracy: 0.9100\n", 333 | "Epoch 00025: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 334 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2394 - accuracy: 0.9096 - val_loss: 0.1570 - val_accuracy: 0.9447\n", 335 | "Epoch 26/1000\n", 336 | "49/75 [==================>...........] - ETA: 0s - loss: 0.2169 - accuracy: 0.9251\n", 337 | "Epoch 00026: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 338 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2196 - accuracy: 0.9241 - val_loss: 0.1567 - val_accuracy: 0.9401\n", 339 | "Epoch 27/1000\n", 340 | "51/75 [===================>..........] - ETA: 0s - loss: 0.2334 - accuracy: 0.9144\n", 341 | "Epoch 00027: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 342 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2235 - accuracy: 0.9180 - val_loss: 0.1580 - val_accuracy: 0.9493\n", 343 | "Epoch 28/1000\n", 344 | "51/75 [===================>..........] - ETA: 0s - loss: 0.2239 - accuracy: 0.9182\n", 345 | "Epoch 00028: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 346 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2227 - accuracy: 0.9180 - val_loss: 0.1437 - val_accuracy: 0.9502\n", 347 | "Epoch 29/1000\n", 348 | "50/75 [===================>..........] - ETA: 0s - loss: 0.2260 - accuracy: 0.9217\n", 349 | "Epoch 00029: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 350 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2186 - accuracy: 0.9236 - val_loss: 0.1533 - val_accuracy: 0.9414\n", 351 | "Epoch 30/1000\n", 352 | "50/75 [===================>..........] - ETA: 0s - loss: 0.2082 - accuracy: 0.9247\n", 353 | "Epoch 00030: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 354 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2180 - accuracy: 0.9221 - val_loss: 0.1633 - val_accuracy: 0.9447\n", 355 | "Epoch 31/1000\n", 356 | "48/75 [==================>...........] - ETA: 0s - loss: 0.2147 - accuracy: 0.9222\n", 357 | "Epoch 00031: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 358 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2075 - accuracy: 0.9258 - val_loss: 0.1820 - val_accuracy: 0.9284\n", 359 | "Epoch 32/1000\n", 360 | "52/75 [===================>..........] - ETA: 0s - loss: 0.1974 - accuracy: 0.9292\n", 361 | "Epoch 00032: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 362 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2018 - accuracy: 0.9287 - val_loss: 0.1823 - val_accuracy: 0.9317\n", 363 | "Epoch 33/1000\n", 364 | "49/75 [==================>...........] - ETA: 0s - loss: 0.2108 - accuracy: 0.9244\n", 365 | "Epoch 00033: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 366 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2133 - accuracy: 0.9217 - val_loss: 0.1296 - val_accuracy: 0.9598\n", 367 | "Epoch 34/1000\n", 368 | "36/75 [=============>................] - ETA: 0s - loss: 0.1983 - accuracy: 0.9280\n", 369 | "Epoch 00034: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 370 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2041 - accuracy: 0.9253 - val_loss: 0.1382 - val_accuracy: 0.9485\n", 371 | "Epoch 35/1000\n", 372 | "48/75 [==================>...........] - ETA: 0s - loss: 0.2137 - accuracy: 0.9201\n", 373 | "Epoch 00035: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 374 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2102 - accuracy: 0.9215 - val_loss: 0.1300 - val_accuracy: 0.9615\n", 375 | "Epoch 36/1000\n", 376 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1953 - accuracy: 0.9283\n", 377 | "Epoch 00036: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 378 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2092 - accuracy: 0.9232 - val_loss: 0.1340 - val_accuracy: 0.9569\n", 379 | "Epoch 37/1000\n", 380 | "42/75 [===============>..............] - ETA: 0s - loss: 0.2197 - accuracy: 0.9102\n", 381 | "Epoch 00037: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 382 | "75/75 [==============================] - 0s 2ms/step - loss: 0.2120 - accuracy: 0.9190 - val_loss: 0.1293 - val_accuracy: 0.9552\n", 383 | "Epoch 38/1000\n", 384 | "39/75 [==============>...............] - ETA: 0s - loss: 0.1932 - accuracy: 0.9263\n", 385 | "Epoch 00038: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 386 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1935 - accuracy: 0.9272 - val_loss: 0.1208 - val_accuracy: 0.9631\n", 387 | "Epoch 39/1000\n", 388 | "48/75 [==================>...........] - ETA: 0s - loss: 0.1767 - accuracy: 0.9354\n", 389 | "Epoch 00039: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 390 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1865 - accuracy: 0.9309 - val_loss: 0.1171 - val_accuracy: 0.9673\n", 391 | "Epoch 40/1000\n", 392 | "46/75 [=================>............] - ETA: 0s - loss: 0.1934 - accuracy: 0.9275\n", 393 | "Epoch 00040: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 394 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1869 - accuracy: 0.9301 - val_loss: 0.1154 - val_accuracy: 0.9673\n", 395 | "Epoch 41/1000\n", 396 | "45/75 [=================>............] - ETA: 0s - loss: 0.1902 - accuracy: 0.9351\n", 397 | "Epoch 00041: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 398 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1876 - accuracy: 0.9332 - val_loss: 0.1239 - val_accuracy: 0.9539\n", 399 | "Epoch 42/1000\n", 400 | "61/75 [=======================>......] - ETA: 0s - loss: 0.1865 - accuracy: 0.9319\n", 401 | "Epoch 00042: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 402 | "75/75 [==============================] - 0s 3ms/step - loss: 0.1857 - accuracy: 0.9311 - val_loss: 0.1116 - val_accuracy: 0.9665\n", 403 | "Epoch 43/1000\n", 404 | "44/75 [================>.............] - ETA: 0s - loss: 0.1746 - accuracy: 0.9375\n", 405 | "Epoch 00043: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 406 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1696 - accuracy: 0.9391 - val_loss: 0.1294 - val_accuracy: 0.9556\n", 407 | "Epoch 44/1000\n", 408 | "46/75 [=================>............] - ETA: 0s - loss: 0.1933 - accuracy: 0.9231\n", 409 | "Epoch 00044: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 410 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1911 - accuracy: 0.9263 - val_loss: 0.1178 - val_accuracy: 0.9598\n", 411 | "Epoch 45/1000\n", 412 | "46/75 [=================>............] - ETA: 0s - loss: 0.1855 - accuracy: 0.9305\n", 413 | "Epoch 00045: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 414 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1902 - accuracy: 0.9281 - val_loss: 0.1146 - val_accuracy: 0.9644\n", 415 | "Epoch 46/1000\n", 416 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1951 - accuracy: 0.9271\n", 417 | "Epoch 00046: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 418 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1930 - accuracy: 0.9300 - val_loss: 0.1203 - val_accuracy: 0.9652\n", 419 | "Epoch 47/1000\n", 420 | "38/75 [==============>...............] - ETA: 0s - loss: 0.1747 - accuracy: 0.9334\n", 421 | "Epoch 00047: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 422 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1751 - accuracy: 0.9344 - val_loss: 0.1562 - val_accuracy: 0.9330\n", 423 | "Epoch 48/1000\n", 424 | "46/75 [=================>............] - ETA: 0s - loss: 0.1753 - accuracy: 0.9383\n", 425 | "Epoch 00048: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 426 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1788 - accuracy: 0.9359 - val_loss: 0.1221 - val_accuracy: 0.9590\n", 427 | "Epoch 49/1000\n", 428 | "46/75 [=================>............] - ETA: 0s - loss: 0.1571 - accuracy: 0.9406\n", 429 | "Epoch 00049: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 430 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1686 - accuracy: 0.9372 - val_loss: 0.1071 - val_accuracy: 0.9686\n", 431 | "Epoch 50/1000\n", 432 | "40/75 [===============>..............] - ETA: 0s - loss: 0.1634 - accuracy: 0.9430\n", 433 | "Epoch 00050: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 434 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1716 - accuracy: 0.9381 - val_loss: 0.1090 - val_accuracy: 0.9657\n", 435 | "Epoch 51/1000\n", 436 | "43/75 [================>.............] - ETA: 0s - loss: 0.1695 - accuracy: 0.9390\n", 437 | "Epoch 00051: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 438 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1738 - accuracy: 0.9387 - val_loss: 0.1033 - val_accuracy: 0.9715\n", 439 | "Epoch 52/1000\n", 440 | "38/75 [==============>...............] - ETA: 0s - loss: 0.1828 - accuracy: 0.9332\n", 441 | "Epoch 00052: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 442 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1741 - accuracy: 0.9363 - val_loss: 0.1139 - val_accuracy: 0.9615\n", 443 | "Epoch 53/1000\n", 444 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1790 - accuracy: 0.9355\n", 445 | "Epoch 00053: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 446 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1760 - accuracy: 0.9363 - val_loss: 0.1096 - val_accuracy: 0.9602\n", 447 | "Epoch 54/1000\n", 448 | "45/75 [=================>............] - ETA: 0s - loss: 0.1820 - accuracy: 0.9319\n", 449 | "Epoch 00054: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 450 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1990 - accuracy: 0.9223 - val_loss: 0.1481 - val_accuracy: 0.9380\n", 451 | "Epoch 55/1000\n", 452 | "38/75 [==============>...............] - ETA: 0s - loss: 0.1659 - accuracy: 0.9431\n", 453 | "Epoch 00055: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 454 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1704 - accuracy: 0.9400 - val_loss: 0.1196 - val_accuracy: 0.9615\n", 455 | "Epoch 56/1000\n", 456 | "47/75 [=================>............] - ETA: 0s - loss: 0.1680 - accuracy: 0.9413\n", 457 | "Epoch 00056: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 458 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1656 - accuracy: 0.9401 - val_loss: 0.1113 - val_accuracy: 0.9552\n", 459 | "Epoch 57/1000\n", 460 | "39/75 [==============>...............] - ETA: 0s - loss: 0.1716 - accuracy: 0.9349\n", 461 | "Epoch 00057: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 462 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1682 - accuracy: 0.9383 - val_loss: 0.1198 - val_accuracy: 0.9611\n", 463 | "Epoch 58/1000\n", 464 | "40/75 [===============>..............] - ETA: 0s - loss: 0.1490 - accuracy: 0.9459\n", 465 | "Epoch 00058: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 466 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1580 - accuracy: 0.9440 - val_loss: 0.1009 - val_accuracy: 0.9636\n", 467 | "Epoch 59/1000\n", 468 | "48/75 [==================>...........] - ETA: 0s - loss: 0.1616 - accuracy: 0.9417\n", 469 | "Epoch 00059: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 470 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1650 - accuracy: 0.9391 - val_loss: 0.0938 - val_accuracy: 0.9724\n", 471 | "Epoch 60/1000\n", 472 | "48/75 [==================>...........] - ETA: 0s - loss: 0.1578 - accuracy: 0.9424\n", 473 | "Epoch 00060: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 474 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1576 - accuracy: 0.9429 - val_loss: 0.0934 - val_accuracy: 0.9719\n", 475 | "Epoch 61/1000\n", 476 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1623 - accuracy: 0.9431\n", 477 | "Epoch 00061: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 478 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1695 - accuracy: 0.9374 - val_loss: 0.0906 - val_accuracy: 0.9728\n", 479 | "Epoch 62/1000\n", 480 | "43/75 [================>.............] - ETA: 0s - loss: 0.1580 - accuracy: 0.9406\n", 481 | "Epoch 00062: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 482 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1574 - accuracy: 0.9433 - val_loss: 0.0977 - val_accuracy: 0.9678\n", 483 | "Epoch 63/1000\n", 484 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1580 - accuracy: 0.9397\n", 485 | "Epoch 00063: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 486 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1626 - accuracy: 0.9386 - val_loss: 0.1064 - val_accuracy: 0.9615\n", 487 | "Epoch 64/1000\n", 488 | "39/75 [==============>...............] - ETA: 0s - loss: 0.1564 - accuracy: 0.9385\n", 489 | "Epoch 00064: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 490 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1625 - accuracy: 0.9386 - val_loss: 0.0979 - val_accuracy: 0.9698\n", 491 | "Epoch 65/1000\n", 492 | "46/75 [=================>............] - ETA: 0s - loss: 0.1685 - accuracy: 0.9317\n", 493 | "Epoch 00065: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 494 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1719 - accuracy: 0.9341 - val_loss: 0.1093 - val_accuracy: 0.9631\n", 495 | "Epoch 66/1000\n", 496 | "39/75 [==============>...............] - ETA: 0s - loss: 0.1769 - accuracy: 0.9367\n", 497 | "Epoch 00066: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 498 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1703 - accuracy: 0.9358 - val_loss: 0.1074 - val_accuracy: 0.9652\n", 499 | "Epoch 67/1000\n", 500 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1763 - accuracy: 0.9330\n", 501 | "Epoch 00067: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 502 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1758 - accuracy: 0.9329 - val_loss: 0.0900 - val_accuracy: 0.9728\n", 503 | "Epoch 68/1000\n", 504 | "48/75 [==================>...........] - ETA: 0s - loss: 0.1533 - accuracy: 0.9435\n", 505 | "Epoch 00068: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 506 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1474 - accuracy: 0.9459 - val_loss: 0.0887 - val_accuracy: 0.9728\n", 507 | "Epoch 69/1000\n", 508 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1393 - accuracy: 0.9481\n", 509 | "Epoch 00069: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 510 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1425 - accuracy: 0.9476 - val_loss: 0.1030 - val_accuracy: 0.9585\n", 511 | "Epoch 70/1000\n", 512 | "43/75 [================>.............] - ETA: 0s - loss: 0.1583 - accuracy: 0.9373\n", 513 | "Epoch 00070: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 514 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1535 - accuracy: 0.9424 - val_loss: 0.1275 - val_accuracy: 0.9460\n", 515 | "Epoch 71/1000\n", 516 | "54/75 [====================>.........] - ETA: 0s - loss: 0.1463 - accuracy: 0.9491\n", 517 | "Epoch 00071: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 518 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1510 - accuracy: 0.9445 - val_loss: 0.0928 - val_accuracy: 0.9690\n", 519 | "Epoch 72/1000\n", 520 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1698 - accuracy: 0.9377\n", 521 | "Epoch 00072: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 522 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1593 - accuracy: 0.9423 - val_loss: 0.1076 - val_accuracy: 0.9606\n", 523 | "Epoch 73/1000\n", 524 | "43/75 [================>.............] - ETA: 0s - loss: 0.1477 - accuracy: 0.9473\n", 525 | "Epoch 00073: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 526 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1481 - accuracy: 0.9477 - val_loss: 0.0828 - val_accuracy: 0.9753\n", 527 | "Epoch 74/1000\n", 528 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1551 - accuracy: 0.9436\n", 529 | "Epoch 00074: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 530 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1638 - accuracy: 0.9405 - val_loss: 0.0896 - val_accuracy: 0.9715\n", 531 | "Epoch 75/1000\n", 532 | "43/75 [================>.............] - ETA: 0s - loss: 0.1430 - accuracy: 0.9477\n", 533 | "Epoch 00075: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 534 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1551 - accuracy: 0.9438 - val_loss: 0.1871 - val_accuracy: 0.9259\n", 535 | "Epoch 76/1000\n", 536 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1589 - accuracy: 0.9423\n", 537 | "Epoch 00076: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 538 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1522 - accuracy: 0.9441 - val_loss: 0.0916 - val_accuracy: 0.9682\n", 539 | "Epoch 77/1000\n", 540 | "44/75 [================>.............] - ETA: 0s - loss: 0.1450 - accuracy: 0.9432\n", 541 | "Epoch 00077: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 542 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1500 - accuracy: 0.9424 - val_loss: 0.0952 - val_accuracy: 0.9682\n", 543 | "Epoch 78/1000\n", 544 | "51/75 [===================>..........] - ETA: 0s - loss: 0.1651 - accuracy: 0.9398\n", 545 | "Epoch 00078: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 546 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1649 - accuracy: 0.9404 - val_loss: 0.0884 - val_accuracy: 0.9732\n", 547 | "Epoch 79/1000\n", 548 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1587 - accuracy: 0.9420\n", 549 | "Epoch 00079: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 550 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1493 - accuracy: 0.9466 - val_loss: 0.0832 - val_accuracy: 0.9707\n", 551 | "Epoch 80/1000\n", 552 | "46/75 [=================>............] - ETA: 0s - loss: 0.1317 - accuracy: 0.9513\n", 553 | "Epoch 00080: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 554 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1386 - accuracy: 0.9486 - val_loss: 0.0859 - val_accuracy: 0.9719\n", 555 | "Epoch 81/1000\n", 556 | "44/75 [================>.............] - ETA: 0s - loss: 0.1317 - accuracy: 0.9521\n", 557 | "Epoch 00081: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 558 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1396 - accuracy: 0.9487 - val_loss: 0.0786 - val_accuracy: 0.9740\n", 559 | "Epoch 82/1000\n", 560 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1385 - accuracy: 0.9496\n", 561 | "Epoch 00082: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 562 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1413 - accuracy: 0.9480 - val_loss: 0.0988 - val_accuracy: 0.9703\n", 563 | "Epoch 83/1000\n", 564 | "44/75 [================>.............] - ETA: 0s - loss: 0.1442 - accuracy: 0.9474\n", 565 | "Epoch 00083: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 566 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1438 - accuracy: 0.9467 - val_loss: 0.0778 - val_accuracy: 0.9753\n", 567 | "Epoch 84/1000\n", 568 | "44/75 [================>.............] - ETA: 0s - loss: 0.1428 - accuracy: 0.9474\n", 569 | "Epoch 00084: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 570 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1488 - accuracy: 0.9455 - val_loss: 0.0796 - val_accuracy: 0.9765\n", 571 | "Epoch 85/1000\n", 572 | "44/75 [================>.............] - ETA: 0s - loss: 0.1349 - accuracy: 0.9515\n", 573 | "Epoch 00085: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 574 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1427 - accuracy: 0.9482 - val_loss: 0.0868 - val_accuracy: 0.9707\n", 575 | "Epoch 86/1000\n", 576 | "43/75 [================>.............] - ETA: 0s - loss: 0.1339 - accuracy: 0.9489\n", 577 | "Epoch 00086: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 578 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1350 - accuracy: 0.9493 - val_loss: 0.0787 - val_accuracy: 0.9724\n", 579 | "Epoch 87/1000\n", 580 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1638 - accuracy: 0.9377\n", 581 | "Epoch 00087: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 582 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1501 - accuracy: 0.9420 - val_loss: 0.0799 - val_accuracy: 0.9740\n", 583 | "Epoch 88/1000\n", 584 | "44/75 [================>.............] - ETA: 0s - loss: 0.1350 - accuracy: 0.9533\n", 585 | "Epoch 00088: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 586 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1418 - accuracy: 0.9482 - val_loss: 0.1016 - val_accuracy: 0.9669\n", 587 | "Epoch 89/1000\n", 588 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1397 - accuracy: 0.9485\n", 589 | "Epoch 00089: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 590 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1567 - accuracy: 0.9432 - val_loss: 0.0756 - val_accuracy: 0.9745\n", 591 | "Epoch 90/1000\n", 592 | "40/75 [===============>..............] - ETA: 0s - loss: 0.1395 - accuracy: 0.9482\n", 593 | "Epoch 00090: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 594 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1364 - accuracy: 0.9513 - val_loss: 0.0808 - val_accuracy: 0.9686\n", 595 | "Epoch 91/1000\n", 596 | "43/75 [================>.............] - ETA: 0s - loss: 0.1263 - accuracy: 0.9564\n", 597 | "Epoch 00091: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 598 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1330 - accuracy: 0.9539 - val_loss: 0.0719 - val_accuracy: 0.9761\n", 599 | "Epoch 92/1000\n", 600 | "41/75 [===============>..............] - ETA: 0s - loss: 0.1491 - accuracy: 0.9449\n", 601 | "Epoch 00092: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 602 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1512 - accuracy: 0.9427 - val_loss: 0.0867 - val_accuracy: 0.9715\n", 603 | "Epoch 93/1000\n", 604 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1354 - accuracy: 0.9507\n", 605 | "Epoch 00093: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 606 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1378 - accuracy: 0.9472 - val_loss: 0.0913 - val_accuracy: 0.9682\n", 607 | "Epoch 94/1000\n", 608 | "43/75 [================>.............] - ETA: 0s - loss: 0.1466 - accuracy: 0.9479\n", 609 | "Epoch 00094: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 610 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1412 - accuracy: 0.9485 - val_loss: 0.0716 - val_accuracy: 0.9740\n", 611 | "Epoch 95/1000\n", 612 | "41/75 [===============>..............] - ETA: 0s - loss: 0.1381 - accuracy: 0.9484\n", 613 | "Epoch 00095: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 614 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1327 - accuracy: 0.9511 - val_loss: 0.0722 - val_accuracy: 0.9770\n", 615 | "Epoch 96/1000\n", 616 | "44/75 [================>.............] - ETA: 0s - loss: 0.1284 - accuracy: 0.9512\n", 617 | "Epoch 00096: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 618 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1341 - accuracy: 0.9492 - val_loss: 0.0812 - val_accuracy: 0.9711\n", 619 | "Epoch 97/1000\n", 620 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1643 - accuracy: 0.9372\n", 621 | "Epoch 00097: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 622 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1507 - accuracy: 0.9448 - val_loss: 0.0721 - val_accuracy: 0.9753\n", 623 | "Epoch 98/1000\n", 624 | "44/75 [================>.............] - ETA: 0s - loss: 0.1428 - accuracy: 0.9469\n", 625 | "Epoch 00098: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 626 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1496 - accuracy: 0.9438 - val_loss: 0.1018 - val_accuracy: 0.9619\n", 627 | "Epoch 99/1000\n", 628 | "43/75 [================>.............] - ETA: 0s - loss: 0.1421 - accuracy: 0.9486\n", 629 | "Epoch 00099: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 630 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1388 - accuracy: 0.9481 - val_loss: 0.0712 - val_accuracy: 0.9761\n", 631 | "Epoch 100/1000\n", 632 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1319 - accuracy: 0.9529\n", 633 | "Epoch 00100: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 634 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1297 - accuracy: 0.9520 - val_loss: 0.0740 - val_accuracy: 0.9707\n", 635 | "Epoch 101/1000\n", 636 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1482 - accuracy: 0.9450\n", 637 | "Epoch 00101: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 638 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1359 - accuracy: 0.9498 - val_loss: 0.0763 - val_accuracy: 0.9745\n", 639 | "Epoch 102/1000\n", 640 | "40/75 [===============>..............] - ETA: 0s - loss: 0.1368 - accuracy: 0.9508\n", 641 | "Epoch 00102: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 642 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1396 - accuracy: 0.9477 - val_loss: 0.0782 - val_accuracy: 0.9740\n", 643 | "Epoch 103/1000\n", 644 | "43/75 [================>.............] - ETA: 0s - loss: 0.1263 - accuracy: 0.9529\n", 645 | "Epoch 00103: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 646 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1350 - accuracy: 0.9486 - val_loss: 0.0750 - val_accuracy: 0.9749\n", 647 | "Epoch 104/1000\n", 648 | "69/75 [==========================>...] - ETA: 0s - loss: 0.1268 - accuracy: 0.9530\n", 649 | "Epoch 00104: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 650 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1291 - accuracy: 0.9518 - val_loss: 0.0700 - val_accuracy: 0.9753\n", 651 | "Epoch 105/1000\n", 652 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1282 - accuracy: 0.9515\n", 653 | "Epoch 00105: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 654 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1308 - accuracy: 0.9511 - val_loss: 0.0748 - val_accuracy: 0.9732\n", 655 | "Epoch 106/1000\n", 656 | "43/75 [================>.............] - ETA: 0s - loss: 0.1283 - accuracy: 0.9544\n", 657 | "Epoch 00106: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 658 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1276 - accuracy: 0.9553 - val_loss: 0.0712 - val_accuracy: 0.9791\n", 659 | "Epoch 107/1000\n", 660 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1421 - accuracy: 0.9457\n", 661 | "Epoch 00107: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 662 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1329 - accuracy: 0.9502 - val_loss: 0.0676 - val_accuracy: 0.9774\n", 663 | "Epoch 108/1000\n", 664 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1395 - accuracy: 0.9475\n", 665 | "Epoch 00108: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 666 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1332 - accuracy: 0.9508 - val_loss: 0.0651 - val_accuracy: 0.9778\n", 667 | "Epoch 109/1000\n", 668 | "43/75 [================>.............] - ETA: 0s - loss: 0.1343 - accuracy: 0.9520\n", 669 | "Epoch 00109: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 670 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1376 - accuracy: 0.9507 - val_loss: 0.0707 - val_accuracy: 0.9791\n", 671 | "Epoch 110/1000\n", 672 | "43/75 [================>.............] - ETA: 0s - loss: 0.1364 - accuracy: 0.9511\n", 673 | "Epoch 00110: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 674 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1262 - accuracy: 0.9529 - val_loss: 0.0818 - val_accuracy: 0.9690\n", 675 | "Epoch 111/1000\n", 676 | "50/75 [===================>..........] - ETA: 0s - loss: 0.1314 - accuracy: 0.9506\n", 677 | "Epoch 00111: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 678 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1325 - accuracy: 0.9522 - val_loss: 0.0959 - val_accuracy: 0.9585\n", 679 | "Epoch 112/1000\n", 680 | "50/75 [===================>..........] - ETA: 0s - loss: 0.1265 - accuracy: 0.9542\n", 681 | "Epoch 00112: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 682 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1266 - accuracy: 0.9535 - val_loss: 0.0721 - val_accuracy: 0.9765\n", 683 | "Epoch 113/1000\n", 684 | "44/75 [================>.............] - ETA: 0s - loss: 0.1268 - accuracy: 0.9547\n", 685 | "Epoch 00113: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 686 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1386 - accuracy: 0.9498 - val_loss: 0.0681 - val_accuracy: 0.9782\n", 687 | "Epoch 114/1000\n", 688 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1278 - accuracy: 0.9516\n", 689 | "Epoch 00114: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 690 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1287 - accuracy: 0.9527 - val_loss: 0.0691 - val_accuracy: 0.9761\n", 691 | "Epoch 115/1000\n", 692 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1236 - accuracy: 0.9537\n", 693 | "Epoch 00115: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 694 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1294 - accuracy: 0.9516 - val_loss: 0.0816 - val_accuracy: 0.9728\n", 695 | "Epoch 116/1000\n", 696 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1280 - accuracy: 0.9552\n", 697 | "Epoch 00116: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 698 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1292 - accuracy: 0.9532 - val_loss: 0.0842 - val_accuracy: 0.9686\n", 699 | "Epoch 117/1000\n", 700 | "43/75 [================>.............] - ETA: 0s - loss: 0.1286 - accuracy: 0.9562\n", 701 | "Epoch 00117: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 702 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1316 - accuracy: 0.9522 - val_loss: 0.1010 - val_accuracy: 0.9615\n", 703 | "Epoch 118/1000\n", 704 | "50/75 [===================>..........] - ETA: 0s - loss: 0.1441 - accuracy: 0.9469\n", 705 | "Epoch 00118: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 706 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1407 - accuracy: 0.9487 - val_loss: 0.0990 - val_accuracy: 0.9627\n", 707 | "Epoch 119/1000\n", 708 | "43/75 [================>.............] - ETA: 0s - loss: 0.1360 - accuracy: 0.9495\n", 709 | "Epoch 00119: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 710 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1359 - accuracy: 0.9497 - val_loss: 0.0690 - val_accuracy: 0.9782\n", 711 | "Epoch 120/1000\n", 712 | "43/75 [================>.............] - ETA: 0s - loss: 0.1148 - accuracy: 0.9566\n", 713 | "Epoch 00120: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 714 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1186 - accuracy: 0.9560 - val_loss: 0.0740 - val_accuracy: 0.9740\n", 715 | "Epoch 121/1000\n", 716 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1315 - accuracy: 0.9509\n", 717 | "Epoch 00121: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 718 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1271 - accuracy: 0.9511 - val_loss: 0.0754 - val_accuracy: 0.9761\n", 719 | "Epoch 122/1000\n", 720 | "50/75 [===================>..........] - ETA: 0s - loss: 0.1198 - accuracy: 0.9548\n", 721 | "Epoch 00122: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 722 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1226 - accuracy: 0.9532 - val_loss: 0.0743 - val_accuracy: 0.9745\n", 723 | "Epoch 123/1000\n", 724 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1221 - accuracy: 0.9572\n", 725 | "Epoch 00123: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 726 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1263 - accuracy: 0.9541 - val_loss: 0.0615 - val_accuracy: 0.9799\n", 727 | "Epoch 124/1000\n", 728 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1180 - accuracy: 0.9544\n", 729 | "Epoch 00124: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 730 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1153 - accuracy: 0.9574 - val_loss: 0.0665 - val_accuracy: 0.9786\n", 731 | "Epoch 125/1000\n", 732 | "49/75 [==================>...........] - ETA: 0s - loss: 0.1278 - accuracy: 0.9511\n", 733 | "Epoch 00125: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 734 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1338 - accuracy: 0.9499 - val_loss: 0.0657 - val_accuracy: 0.9774\n", 735 | "Epoch 126/1000\n", 736 | "41/75 [===============>..............] - ETA: 0s - loss: 0.1167 - accuracy: 0.9588\n", 737 | "Epoch 00126: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 738 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1206 - accuracy: 0.9569 - val_loss: 0.1068 - val_accuracy: 0.9569\n", 739 | "Epoch 127/1000\n", 740 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1431 - accuracy: 0.9451\n", 741 | "Epoch 00127: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 742 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1336 - accuracy: 0.9496 - val_loss: 0.0645 - val_accuracy: 0.9795\n", 743 | "Epoch 128/1000\n", 744 | "43/75 [================>.............] - ETA: 0s - loss: 0.1178 - accuracy: 0.9551\n", 745 | "Epoch 00128: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 746 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1191 - accuracy: 0.9561 - val_loss: 0.0666 - val_accuracy: 0.9795\n", 747 | "Epoch 129/1000\n", 748 | "43/75 [================>.............] - ETA: 0s - loss: 0.1264 - accuracy: 0.9511\n", 749 | "Epoch 00129: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 750 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1304 - accuracy: 0.9506 - val_loss: 0.0641 - val_accuracy: 0.9786\n", 751 | "Epoch 130/1000\n", 752 | "48/75 [==================>...........] - ETA: 0s - loss: 0.1311 - accuracy: 0.9510\n", 753 | "Epoch 00130: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 754 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1368 - accuracy: 0.9484 - val_loss: 0.0657 - val_accuracy: 0.9807\n", 755 | "Epoch 131/1000\n", 756 | "43/75 [================>.............] - ETA: 0s - loss: 0.1299 - accuracy: 0.9509\n", 757 | "Epoch 00131: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 758 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1256 - accuracy: 0.9527 - val_loss: 0.0747 - val_accuracy: 0.9728\n", 759 | "Epoch 132/1000\n", 760 | "40/75 [===============>..............] - ETA: 0s - loss: 0.1308 - accuracy: 0.9535\n", 761 | "Epoch 00132: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 762 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1300 - accuracy: 0.9521 - val_loss: 0.0735 - val_accuracy: 0.9711\n", 763 | "Epoch 133/1000\n", 764 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1232 - accuracy: 0.9589\n", 765 | "Epoch 00133: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 766 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1209 - accuracy: 0.9579 - val_loss: 0.0621 - val_accuracy: 0.9799\n", 767 | "Epoch 134/1000\n", 768 | "50/75 [===================>..........] - ETA: 0s - loss: 0.1347 - accuracy: 0.9492\n", 769 | "Epoch 00134: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 770 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1318 - accuracy: 0.9513 - val_loss: 0.0727 - val_accuracy: 0.9774\n", 771 | "Epoch 135/1000\n", 772 | "43/75 [================>.............] - ETA: 0s - loss: 0.1256 - accuracy: 0.9544\n", 773 | "Epoch 00135: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 774 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1323 - accuracy: 0.9525 - val_loss: 0.0853 - val_accuracy: 0.9728\n", 775 | "Epoch 136/1000\n", 776 | "41/75 [===============>..............] - ETA: 0s - loss: 0.1250 - accuracy: 0.9566\n", 777 | "Epoch 00136: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 778 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1294 - accuracy: 0.9515 - val_loss: 0.0690 - val_accuracy: 0.9765\n", 779 | "Epoch 137/1000\n", 780 | "51/75 [===================>..........] - ETA: 0s - loss: 0.1275 - accuracy: 0.9531\n", 781 | "Epoch 00137: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 782 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1302 - accuracy: 0.9512 - val_loss: 0.0860 - val_accuracy: 0.9661\n", 783 | "Epoch 138/1000\n", 784 | "45/75 [=================>............] - ETA: 0s - loss: 0.1287 - accuracy: 0.9514\n", 785 | "Epoch 00138: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 786 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1211 - accuracy: 0.9548 - val_loss: 0.0676 - val_accuracy: 0.9757\n", 787 | "Epoch 139/1000\n", 788 | "44/75 [================>.............] - ETA: 0s - loss: 0.1138 - accuracy: 0.9608\n", 789 | "Epoch 00139: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 790 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1265 - accuracy: 0.9549 - val_loss: 0.0723 - val_accuracy: 0.9765\n", 791 | "Epoch 140/1000\n", 792 | "43/75 [================>.............] - ETA: 0s - loss: 0.1297 - accuracy: 0.9528\n", 793 | "Epoch 00140: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 794 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1168 - accuracy: 0.9573 - val_loss: 0.0635 - val_accuracy: 0.9778\n", 795 | "Epoch 141/1000\n", 796 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1082 - accuracy: 0.9589\n", 797 | "Epoch 00141: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 798 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1201 - accuracy: 0.9556 - val_loss: 0.0982 - val_accuracy: 0.9611\n", 799 | "Epoch 142/1000\n", 800 | "42/75 [===============>..............] - ETA: 0s - loss: 0.1299 - accuracy: 0.9490\n", 801 | "Epoch 00142: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 802 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1251 - accuracy: 0.9519 - val_loss: 0.0624 - val_accuracy: 0.9824\n", 803 | "Epoch 143/1000\n", 804 | "43/75 [================>.............] - ETA: 0s - loss: 0.1231 - accuracy: 0.9531\n", 805 | "Epoch 00143: saving model to model/keypoint_classifier\\keypoint_classifier.hdf5\n", 806 | "75/75 [==============================] - 0s 2ms/step - loss: 0.1150 - accuracy: 0.9564 - val_loss: 0.0659 - val_accuracy: 0.9774\n", 807 | "Epoch 00143: early stopping\n" 808 | ] 809 | }, 810 | { 811 | "data": { 812 | "text/plain": [ 813 | "" 814 | ] 815 | }, 816 | "execution_count": 746, 817 | "metadata": {}, 818 | "output_type": "execute_result" 819 | } 820 | ], 821 | "source": [ 822 | "model.fit(\n", 823 | " X_train,\n", 824 | " y_train,\n", 825 | " epochs=1000,\n", 826 | " batch_size=128,\n", 827 | " validation_data=(X_test, y_test),\n", 828 | " callbacks=[cp_callback, es_callback]\n", 829 | ")" 830 | ] 831 | }, 832 | { 833 | "cell_type": "code", 834 | "execution_count": 747, 835 | "metadata": { 836 | "colab": { 837 | "base_uri": "https://localhost:8080/" 838 | }, 839 | "id": "pxvb2Y299hE3", 840 | "outputId": "59eb3185-2e37-4b9e-bc9d-ab1b8ac29b7f" 841 | }, 842 | "outputs": [ 843 | { 844 | "name": "stdout", 845 | "output_type": "stream", 846 | "text": [ 847 | "19/19 [==============================] - 0s 777us/step - loss: 0.0659 - accuracy: 0.9774\n" 848 | ] 849 | } 850 | ], 851 | "source": [ 852 | "# Model evaluation\n", 853 | "val_loss, val_acc = model.evaluate(X_test, y_test, batch_size=128)" 854 | ] 855 | }, 856 | { 857 | "cell_type": "code", 858 | "execution_count": 748, 859 | "metadata": { 860 | "id": "RBkmDeUW9hE4" 861 | }, 862 | "outputs": [], 863 | "source": [ 864 | "# Loading the saved model\n", 865 | "model = tf.keras.models.load_model(model_save_path)" 866 | ] 867 | }, 868 | { 869 | "cell_type": "code", 870 | "execution_count": 749, 871 | "metadata": { 872 | "colab": { 873 | "base_uri": "https://localhost:8080/" 874 | }, 875 | "id": "tFz9Tb0I9hE4", 876 | "outputId": "1c3b3528-54ae-4ee2-ab04-77429211cbef" 877 | }, 878 | "outputs": [ 879 | { 880 | "name": "stdout", 881 | "output_type": "stream", 882 | "text": [ 883 | "[4.8647752e-10 1.0000000e+00 4.4540444e-15 1.1016403e-18 8.2022105e-09]\n", 884 | "1\n" 885 | ] 886 | } 887 | ], 888 | "source": [ 889 | "# Inference test\n", 890 | "predict_result = model.predict(np.array([X_test[0]]))\n", 891 | "print(np.squeeze(predict_result))\n", 892 | "print(np.argmax(np.squeeze(predict_result)))" 893 | ] 894 | }, 895 | { 896 | "cell_type": "markdown", 897 | "metadata": { 898 | "id": "S3U4yNWx9hE4" 899 | }, 900 | "source": [ 901 | "# Confusion matrix" 902 | ] 903 | }, 904 | { 905 | "cell_type": "code", 906 | "execution_count": 750, 907 | "metadata": { 908 | "colab": { 909 | "base_uri": "https://localhost:8080/", 910 | "height": 582 911 | }, 912 | "id": "AP1V6SCk9hE5", 913 | "outputId": "08e41a80-7a4a-4619-8125-ecc371368d19" 914 | }, 915 | "outputs": [ 916 | { 917 | "data": { 918 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAAFpCAYAAABUC7VZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAmr0lEQVR4nO3deZwV5ZX/8c+5vbODKNBAAgpucQGCiOOGSxBRwckoxmhiDIZJxigaJ2pM8stg4pK4Y/wZURTcJW644Iq44CiCARFBZVW6m1VkR+jue+aPLrFV6Aa67q3qut+3r3pRW997Hm/Th/Ocqmpzd0RERMKQijoAERFJDiUVEREJjZKKiIiERklFRERCo6QiIiKhUVIREZHQKKmIiOQQM2tlZo+a2YdmNsfMDjOzNmb2kpnNDf5sHZxrZjbSzOaZ2Uwz61Xf6yupiIjklluA5919X+BgYA5wOTDR3bsDE4NtgBOB7sEyDLi9vhc33fwoIpIbzKwlMAPY02v98Dezj4B+7r7EzDoAr7r7PmZ2R7D+0DfP2957qFIREckdXYEVwD1mNt3M7jKzpkC7WoliKdAuWO8ILK719WXBvu3KDzngb9l4+wU5Wwq1GP541CFEImUWdQiRydXKv01J86hDiMyyNR+G9g1fuXJBg7+BCnff6z+pmar60ih3HxWs5wO9gAvcfYqZ3cJXU10AuLub2S7HkfGkIiIiOyhd3eCXCBLIqO0cLgPK3H1KsP0oNUllmZl1qDX9tTw4Xg50rvX1nYJ926XpLxGRuPB0w5e6Xt59KbDYzPYJdh0HzAaeAs4J9p0DjA/WnwJ+GlwF1hdYU1c/BVSpiIjER7rupBCSC4AHzKwQWACcS02BMc7MhgKfAEOCcycAA4F5wMbg3DopqYiI5BB3nwH03sah47ZxrgPn78zrK6mIiMSE1zN91RgoqYiIxEV2pr8ySklFRCQuElCp6OovEREJjSoVEZG4COE+lagpqYiIxEUCpr+UVERE4kKNehERCUsSLilWo15EREKjSkVEJC40/SUiIqFJwPSXkoqISFzokmIREQlNAioVNepFRCQ0qlREROJCjXoREQlNAqa/lFREROIiAZWKeioiIhIaVSoiIjHhrkuKRUQkLOqpiIhIaBLQU1FSERGJC1Uq0atOO2c99DZ7NCti5OBe/HzcO2zYUjMvuWrTFg5o14KbBvXE3fnbax/x5sIVFBfkMaL/Aey3R4uIow9fp06ljLn7FvZo1xZ35667HuDWv4+OOqyM23vvPXng/tu3bnft+h1GXHk9t96a/LEXFRUx6ZXHKCoqIi8/j8cff5Yrr7wh6rAyoqiokPHP3U9hYSF5+Xk8M/5FrrvmVm76+184uMcBmBnz5y/iwl/9jo0bNkYdbk5q9EnlwRmf0LVNUzZsqQLg7iF9th675JkZ9NtzDwAmL1rJp59vYPzPjuD9pWu4euJs7juzbyQxZ1JVVRW/vXQE02fMolmzprwz5Xlenvg6c+bMjTq0jPr44wUc0ucEAFKpFIsWTmP8+Ocjjio7Nm/ezA/6D2HDho3k5+fz2qtP8MLzk5jyzr+iDi10mzdv4Yen/IyNwViffuEBXnnpdf74u2tYv24DACOuupyhw87i1pvujDjaXZCAZ3816kuKl637gskLV/LvB3T81rH1m6uYungVx+xVk1Rem7+Ck/crxcw4qEMr1m2pYsWGzdkOOeOWLl3O9BmzAFi/fgMffjiXjqXtI44qu4499ggWLPiETz8tjzqUrNkQ/Ku8oCCfgoIC3D3iiDJnY62x5hfk4+5bEwpAcUlR4x2/pxu+RKzepGJm+5rZZWY2MlguM7P9shFcfa577UOGH7E3KexbxybNX06fzm1oVlRTjC3f8AXtmxdvPd6uWTHL13+RtVij8N3vdqLHwQcw5Z3pUYeSVUNOH8Qj48ZHHUZWpVIppk19kYrymbw88XXemZrczzyVSjHxjSf4YN6bvDbpf/nXuzMBuPm2q5k1dzLdu+/J6DvujzjKXZRON3yJWJ1JxcwuAx4GDHgnWAx4yMwuz3x42/f6ghW0aVLI/u223Rd5/qMlDNinQ5ajio+mTZsw7pE7+c1//4l169ZHHU7WFBQUcPLJ/XnssWeiDiWr0uk0vQ/pT5euvTmkd0++9719og4pY9LpNMcd+e/02L8fvXodxL77dQfgovOv4KB9juLjj+cz+IcDI45yF+VApTIUOMTdr3X3+4PlWqBPcGybzGyYmU0zs2l3T54VZrxbzahYzWsLVjBw9Otc/txMpi5exe+ffx+Azzdt4YNlazmya9ut5+/RtJil676qTJat/4I9mhV/63WTID8/n38+cicPPfQETz75XNThZNWAAccwfcb7LF++MupQIrFmzVpefe1N+vfvF3UoGbd2zTomvzGFY44/cuu+dDrNk49N4ORB/SOMLLfVl1TSQOk29ncIjm2Tu49y997u3vvnRxzQkPi268IjuvPCeUczYehRXHviQRzSuQ1XDTgQgJfnLuPIrm0pys/bev7Re+3OM3MqcHdmLllNs8J8dm9alJHYonbnqBuY8+E8br5lVNShZN0ZQwbzyCO5NfXVtm0bWrasqdiLi4s5/rij+Oij+RFHlRm77daaFi2bA1BcXMTRx/wb8+cupMue39l6zgkDj2Xu3AVRhdgwCZj+qu/qr4uAiWY2F1gc7PsO0A34dQbjapAXPlrKuYd0/dq+I7q0ZfLClQwaM5ni/Dz+p//3Ioousw7/t0P4ydmnMfP92Uyb+iIAf/zjtTz3/CsRR5Z5TZqUcNxxR/Ff50c6M5t1HTq04+7RN5OXl8JSKR599GkmTHg56rAyol373Rn5j2vJS+WRShnjn3iel154laeef4DmzZthBh/M+ohLf/M/UYe6a2KQFBrK6rtKwsxS1Ex3fXmJVTkw1XfwITUbb7+gkV6G0XAthj8edQiRSNm3L5zIFY32qqMGalPSPOoQIrNszYehfcNven1Mg7+BSo76WaR/Aeu9T8Xd08DbWYhFREQauUZ/86OISGIkYPpLSUVEJC5icElwQympiIjEhSoVEREJTQIqlUb97C8REYkXVSoiInGh6S8REQlNAqa/lFREROIiAZWKeioiIhIaVSoiInGRgEpFSUVEJC7UUxERkdCoUhERkdAkoFJRo15EJIeY2SIze9/MZpjZtGBfGzN7yczmBn+2DvabmY00s3lmNtPMetX3+koqIiJxkb3f/HiMu/dw997B9uXARHfvDkwMtgFOBLoHyzDg9vpeWElFRCQuPN3wZdcMBsYG62OBU2vtv9drvA20MrMOdb2QkoqISFxkp1Jx4EUze9fMhgX72rn7kmB9KdAuWO/IV79KHqCMr34L8DapUS8iEhchXP0VJIphtXaNcvdRtbaPcPdyM9sDeMnMPqz99e7uZrbLv9ZYSUVEJEGCBDKqjuPlwZ/LzewJoA+wzMw6uPuSYHpreXB6OdC51pd3CvZtl6a/RETiwr3hSx3MrKmZNf9yHegPzAKeAs4JTjsHGB+sPwX8NLgKrC+wptY02TapUhERiYvM3/zYDnjCzKDm5/+D7v68mU0FxpnZUOATYEhw/gRgIDAP2AicW98bKKmIiMRFhpOKuy8ADt7G/s+A47ax34Hzd+Y9NP0lIiKhUaUiIhIXCXhMi5KKiEhc6IGSIiISmnqu3moMlFREROIiAZWKGvUiIhKajFcqLYY/num3iK1NFW9EHUIkSkqPjDoEybLPNq2LOoRkSECloukvEZG40NVfIiISFk+rUS8iImFJwPSXGvUiIhIaVSoiInGhnoqIiIRGPRUREQmNeioiIiJfUaUiIhIXCahUlFREROJCD5QUEZHQqFIREZHQJODqLzXqRUQkNKpURETiQjc/iohIaBIw/aWkIiISE65GvYiIhCYBlYoa9SIiEhpVKiIicaFGvYiIhCYB019KKiIicZGARr16KiIiEhpVKiIicaHpLxERCY0a9SIiEhpVKiIiEpYk3FGvRr2IiIQmkUnlzlE3UFH2HjOmT4w6lIxZu249F//+L5xy5i845cfDmDFrDmvWruO84Vcw8IyhnDf8CtasXQfAgk8Wc9awi+nZ7xTuefDRiCPPnBP69+ODWa/z4ezJXPrb86MOJ2tyddyQwLGnveFLxBKZVO69dxwnnXxW1GFk1LU3/4PDD+3N0w/dyeNjb2PP73bmrvvG0bd3DyY8Mpq+vXsw+v5xALRs0ZzLL/4lPzvzPyKOOnNSqRQjb7mKk085mwMPPoYzzjiV/fbrHnVYGZer44aEjl1JJZ7emDyFVZ+vjjqMjFm3fgPvvjeL/zjlBAAKCgpo0bwZk954i8EnHg/A4BOP55XX3wJgt9atOHC/fcjPT24Lrc8hPZk/fxELF35KZWUl48aNZ1Dw/yfJcnXckNCxe7rhS8QSmVSSrrxiKa1bteQPV93IaT87n/93zc1s3PQFn32+mt3btgGg7W6t+SzBifWbSju2Z3FZxdbtsvIllJa2jzCi7MjVcUNCx57LlYqZnVvHsWFmNs3MpqXTG3b1LWQ7qqqrmfPxPM7495N4dMxtlJQUM/q+cV87x8wws4giFJFc1ZBKZcT2Drj7KHfv7e69U6mmDXgL2Zb2e7Sl3e5tOeh7+wLQv98RzP54Hru1bsWKlasAWLFyFW1atYwyzKyqKF9K506lW7c7dexARcXSCCPKjlwdNyRz7J72Bi9RqzOpmNnM7SzvA+2yFKN8Q9vd2tB+j91Z+EkZAG+/O4O9unyHfkf0ZfxzLwMw/rmXOebIw6IMM6umTptBt25d6dKlMwUFBQwZMpinn3kx6rAyLlfHDQkdewKmv+rr3LYDTgA+/8Z+A/43IxGF4P77buPoow6jbds2LFowjRFXXs89Yx6OOqxQXXHxr7hsxN+orKqkc2kH/nzFxbg7l/zxah5/5gVK2+/BDX++AoCVn63ijKEXsn7DRlKpFPePe5LxD9xBs6bJqSKrq6sZftEfmPDsg+SlUowZ+wizZ38cdVgZl6vjhoSOPQE3P5r79jObmY0G7nH3yds49qC7/7i+N8gv7Bh96ozIpoo3og4hEiWlR0YdgkjWVG0pD615ue7XAxv887L53ydE2kyts1Jx96F1HKs3oYiIyE6IwfRVQyX3xgURkcZGSUVERMJSVzuisdDNjyIicZGlq7/MLM/MppvZM8F2VzObYmbzzOwRMysM9hcF2/OC413qe20lFRGR3DMcmFNr+6/ATe7ejZqrfb/spw8FPg/23xScVyclFRGRuMhCpWJmnYCTgLuCbQOOBb58hPlY4NRgfXCwTXD8OKvnUR3qqYiIxESW7oi/GbgUaB5s7wasdveqYLsM6BisdwQWA7h7lZmtCc5fub0XV6UiIhIXIVQqtZ+9GCzDvnx5MzsZWO7u72ZqCKpURETiIoQb6t19FDBqO4cPBwaZ2UCgGGgB3AK0MrP8oFrpBJQH55cDnYEyM8sHWgKf1fX+qlRERHKEu//O3Tu5exfgR8Ar7n4WMAk4LTjtHGB8sP5UsE1w/BWv57pnVSoiIjER4VOGLwMeNrO/ANOB0cH+0cB9ZjYPWEVNIqqTkoqISFxkMam4+6vAq8H6AqDPNs75Ajh9Z15XSUVEJC4a/0OK1VMREZHwqFIREYmJOPzmxoZSUhERiYsETH8pqYiIxIQqFRERCU8CKhU16kVEJDSqVEREYsITUKkoqYiIxIWSioiIhEWVioiIhCcBSUWNehERCY0qFRGRmND0l4iIhEZJRUREQpOEpKKeioiIhEaVSgaVlB4ZdQiRWH1J36hDiEyrG96OOgRpzNyijqDBlFRERGIiCdNfSioiIjHhaVUqIiISkiRUKmrUi4hIaFSpiIjEhKtRLyIiYUnC9JeSiohITCShUa+eioiIhEaViohITLhHHUHDKamIiMREEqa/lFRERGJCSUVEREKThOkvNepFRCQ0qlRERGJC018iIhIa3VEvIiKh0R31IiISmnQCKhU16kVEJDSqVEREYkI9FRERCY2u/hIRkdDo5kcREZFaVKmIiMSEpr9ERCQ0SbikWElFRCQmdPWXiIiERo16ERGRWhKbVE7o348PZr3Oh7Mnc+lvz486nKzJiXFbipILrqP4nN8BUPTD/6LkwhsoufBGin/831BYDECqy/6U/Po6mv5lHHkH9I0y4oy6c9QNVJS9x4zpE6MOJeuS9v2edmvwErVEJpVUKsXIW67i5FPO5sCDj+GMM05lv/26Rx1WxuXKuAsOP4n08vKt25ufvYdNIy9h08jfkF6zkoLDTgTAV69g86N/p+q9N6IKNSvuvXccJ518VtRhZF0Sv9/drcFLXcys2MzeMbP3zOwDMxsR7O9qZlPMbJ6ZPWJmhcH+omB7XnC8S31jSGRS6XNIT+bPX8TChZ9SWVnJuHHjGXTKCVGHlXG5MG5r0Ya8fXpRNfXlr3Zu3vTV8fzCrRPTvnoF6aWfJGOiug5vTJ7Cqs9XRx1G1iXx+9294Us9NgPHuvvBQA9ggJn1Bf4K3OTu3YDPgaHB+UOBz4P9NwXn1anepGJm+5rZcWbW7Bv7B9QbfkRKO7ZncVnF1u2y8iWUlraPMKLsyIVxF538c7Y8d9+3/vYU/cf5NLliNLZ7RyrfmhBRdJJNSfx+z/T0l9dYH2wWBIsDxwKPBvvHAqcG64ODbYLjx5lZnW9SZ1IxswuB8cAFwCwzG1zr8NV1Ri8Ssrx9v49vWEO6YsG3jm1+7DY2XvMLfEUZ+QcdHkF0Io2DmeWZ2QxgOfASMB9Y7e5VwSllQMdgvSOwGCA4vgbYra7Xr++S4l8A33f39cFc2qNm1sXdbwG2m63MbBgwDMDyWpJKNa3nbcJVUb6Uzp1Kt2536tiBioqlWY0hCkkfd9539yVvv0Nosk8vyC/AippQNORCNo8bWXOCp6l8700Kjx5M1buTog1WMi6J3+9h3KdS++dvYJS7j/rqPbwa6GFmrYAngH0b/Ka11JdUUl+WSu6+yMz6UZNYvksdSSUYwCiA/MKOWZ/QnjptBt26daVLl86Uly9lyJDB/OSnjf/KkPokfdxbXniALS88AEBe1+9RcNQgNo8bie3WHv+s5odJ/n69v9bEl+RK4vd7GFdv1f75W895q81sEnAY0MrM8oNqpBPw5V+icqAzUGZm+UBL4LO6Xre+pLLMzHq4+4wgiPVmdjJwN3BgfUFHpbq6muEX/YEJzz5IXirFmLGPMHv2x1GHlXE5OW4zik+7AIpLACO9dBGbn6z5+5TqtBfFZ1+GlTStSTbH/4hNN18UabiZcP99t3H0UYfRtm0bFi2Yxogrr+eeMQ9HHVbGJfH7PdP/Ajez3YHKIKGUAD+gpvk+CTgNeBg4h5q2B8BTwfZbwfFX3Ou+HMDqOm5mnYAqd/9WTWlmh7v7m/UNIopKRaK1+pLk3hNSn1Y3vB11CJJlVVvKQ7s55O3SHzb452Xfisfrak0cRE3jPY+anvo4d7/SzPakJqG0AaYDZ7v7ZjMrBu4DegKrgB+5+7ebmrXUWam4e1kdx+pNKCIisuMyffOiu8+kJkF8c/8CoM829n8BnL4z76Fnf4mIxIQeKCkiIqFJRx1ACJRURERiwrd/UW2jkcjHtIiISDRUqYiIxEQ6AdfKKqmIiMREOgHTX0oqIiIxkYSeipKKiEhMJOHqLzXqRUQkNKpURERiQtNfIiISmiRMfympiIjERBKSinoqIiISGlUqIiIxoZ6KiIiEJt34c4qSiohIXOiOehERCU0CHv2lRr2IiIRHlYqISEwk4ZJiJRURkZhIm3oqIiISkiT0VJRURERiIgnTX2rUi4hIaFSpiIjEhG5+FBGR0OjmRxERCU0SGvXqqYiISGhUqUjoWt3wdtQhRGbNiOOjDiESLf/0ctQhJIJ6KiIiEpokXFKspCIiEhNJ6KkoqYiIxEQSpr/UqBcRkdCoUhERiQn1VEREJDRKKiIiEhpPQE9FSUVEJCaSUKmoUS8iIqFRpSIiEhNJqFSUVEREYkI3P4qISGh086OIiEgtqlRERGJCPRUREQmNkoqIiIRGjXoREQmNGvUiItJomFlnM5tkZrPN7AMzGx7sb2NmL5nZ3ODP1sF+M7ORZjbPzGaaWa/63kNJRUQkJtIhLPWoAi5x9/2BvsD5ZrY/cDkw0d27AxODbYATge7BMgy4vb43UFIREYkJD2Gp8/Xdl7j7v4L1dcAcoCMwGBgbnDYWODVYHwzc6zXeBlqZWYe63kM9FRGRmEhnsVVvZl2AnsAUoJ27LwkOLQXaBesdgcW1vqws2LeE7VClIiKSIGY2zMym1VqGbeOcZsBjwEXuvrb2MXffkaJnu1SpiIjERBj3qbj7KGDU9o6bWQE1CeUBd3882L3MzDq4+5Jgemt5sL8c6FzryzsF+7ZLlYqISExkuqdiZgaMBua4+421Dj0FnBOsnwOMr7X/p8FVYH2BNbWmybZJlYqISExk4Y76w4GfAO+b2Yxg3xXAtcA4MxsKfAIMCY5NAAYC84CNwLn1vYGSiohITGT65kd3nwxs712O28b5Dpy/M++h6S8REQlNYpPKCf378cGs1/lw9mQu/e1OJdpGLVfHDTkydjOKz72SotMvBqDwpPMo+dX1FP/8Sop/fiWpPb6z9dTCH5xFyS//RsnQv5Bq992oIs6opH3mabzBS9QSOf2VSqUYectVDBh4JmVlS3j7rQk8/cyLzJkzN+rQMipXxw25M/b83v3xzyqgqGTrvi2vPEz1R9O+dl7eXgdhrduz6R+Xkirdi8IB5/DF2CuzHW5GJfEzjz4lNFwiK5U+h/Rk/vxFLFz4KZWVlYwbN55Bp5wQdVgZl6vjhtwYuzVvTX63g6l877V6z83r3ouqWW8CkK6YjxU1wZq2zHSIWZXEzzwLj2nJuHqTipn1MbNDgvX9zew3ZjYw86HtutKO7VlcVrF1u6x8CaWl7SOMKDtyddyQG2MvPP4stkwaB/71f88WHn0aJUP/QuFxP4a8mskHa94aX/vZ1nN83SqseeusxptpSfzMEz/9ZWZ/ouaBYvlm9hJwKDAJuNzMerr7VVmIUSTn5XU7GN+4lvTSRaS+s+/W/ZWv/pMtG9ZAXj6FJ55LQd+TqHxzfB2vJJJZ9fVUTgN6AEXUPA+mk7uvNbPrqXlezDaTSvBYgGEAlteSVKppaAHviIrypXTuVLp1u1PHDlRULM1qDFHI1XFD8see6rg3ed16UrLnQZBfgBWVUHTKf7L56TtqTqiuomrmGxQceiIAvu5zrMVuQE1/wZq3wdd9HlH0mZHEzzz6OqPh6pv+qnL3anffCMz/8hkx7r6JOqbv3H2Uu/d2997ZTigAU6fNoFu3rnTp0pmCggKGDBnM08+8mPU4si1Xxw3JH3vla/9k020Xs+n2/2bz+Nup/mQOm5++42t9kvy9e5FeUQZA9dzp5B9wOACp0r3wzZvwDWsiiT1TkviZJ6GnUl+lssXMmgRJ5ftf7jSzlsQj/m2qrq5m+EV/YMKzD5KXSjFm7CPMnv1x1GFlXK6OG3J37EWDfok1aQ5mpJd9ypbnxwBQPf898vY6iJJfXgeVm9n87F3RBpoBSfzM49ATaShz3/4gzKzI3TdvY39boIO7v1/fG+QXdmz8/5dEdtCaEcdHHUIkWv7p5ahDiEzVlvLQ7oP/TZcfNfjn5Y2LHo70lxLXWalsK6EE+1cCKzMSkYhIjkrCv8ATefOjiEhjFNuewk5QUhERiQlPQK2ipCIiEhNJqFQS+ZgWERGJhioVEZGYSMIlxUoqIiIx0fhTipKKiEhsqFIREZHQqFEvIiJSiyoVEZGY0H0qIiISmiRMfympiIjERBIqFfVUREQkNKpURERiQtNfIiISmnQdv9+qsVBSERGJicafUpRURERiIwl31KtRLyIioVGlIiISE0m4pFhJRUQkJnT1l4iIhCYJPRUlFRGRmEjC9Jca9SIiEhpVKiIiMaGeioiIhMZ1R72IiIQlCY169VRERCQ0qlRERGJCPZUdkDLL9FvEVhKeOCo7p+WfXo46hEgsP6lb1CEkQhIuKValIiISE0noqSipiIjERBKu/lKjXkREQqNKRUQkJtSoFxGR0KhRLyIioUlCo149FRGRmHD3Bi/1MbO7zWy5mc2qta+Nmb1kZnODP1sH+83MRprZPDObaWa96nt9JRURkdwyBhjwjX2XAxPdvTswMdgGOBHoHizDgNvre3ElFRGRmEjjDV7q4+6vA6u+sXswMDZYHwucWmv/vV7jbaCVmXWo6/XVUxERiYkIG/Xt3H1JsL4UaBesdwQW1zqvLNi3hO1QUhERiYkwHu1kZsOomar60ih3H7WjX+/ubma7HIiSiohIggQJZIeTSGCZmXVw9yXB9NbyYH850LnWeZ2CfdulnoqISEx4CMsuego4J1g/Bxhfa/9Pg6vA+gJrak2TbZMqFRGRmMjGfSpm9hDQD2hrZmXAn4BrgXFmNhT4BBgSnD4BGAjMAzYC59b3+koqIiIxkY2k4u5nbufQcds414Hzd+b1lVRERGJCTykWERGpRZWKiEhMJOHZX0oqIiIxoacUi4hIaJLQU1FSERGJiSRMf6lRLyIioVGlIiISE5r+EhGR0CRh+ktJRUQkJpJw9Zd6KiIiEprEJpWWLVvw8EN38P7MV5n53iQOPbTeX62cCCf078cHs17nw9mTufS3O/XInkbtzlE3UFH2HjOmT4w6lKxL/GdeUEiLv/2DFjeOpsUtYyj5Uc0zDfMP7EWL6++kxY130fzqW0m17whA8aAhtBw5lhY33U3zETeS2r1dXa8eK2n3Bi9Rs0w3hgqLOkUyytF33cTkN9/hnnseoqCggCZNSlizZm1WY8j2B5xKpZjzwRsMGHgmZWVLePutCZz9k/9izpy5WY0jCkcecSjr12/gnntuoUfPbz0XL7Hi8pkvP6lbZt+guAS+2AR5ebS4+u9sGH0rzYZfwbprfk+67BOKBpxKfvd92XDrteQf0JOqj2fDls0UnTCY/AN6sOGGERkLrc0Tr1lYr/W9doc2+IfGB8umhBbPrkhkpdKiRXOOOPJQ7rnnIQAqKyuznlCi0OeQnsyfv4iFCz+lsrKScePGM+iUE6IOKyvemDyFVZ+vjjqMrMuZz/yLTTV/5uXXLO7gjpU0AcCaNCW96jMAqmZNhy2ba9Y/nk1qt90jCXlXJKFS2emkYmb3ZiKQMHXt0pmVK1Zx15038s6U5/nH7dfRpElJ1GFlXGnH9iwuq9i6XVa+hNLS9hFGJJmWM595KkWLG++i9ZgnqXxvGtVz57Dhtuto/se/0urOf1LUrz+bHn/gW19WdPxAKv81JYKAd42H8F/U6kwqZvbUN5angR9+uZ2lGHdaXn4+PXsewB2j7qPPoQPYsHFjMueaRXJFOs3a35zH6vNOJ7/7fuR9pyvFg05n3Z8vY/UvTmfzK8/R5Nyv/x0vPPoH5O+1D188+XBEQeem+iqVTsBa4EbghmBZV2t9m8xsmJlNM7Np6eoNYcW6w8rLl1BWtoSpU6cD8Pjjz9Kj54FZjyPbKsqX0rlT6dbtTh07UFGxNMKIJNNy7TP3jeupnDWdgl6HktdlL6rnzgFgy+RXKNj3gK3n5R/0fUpO+wnrrrkCqiqjCnen5cL0V2/gXeD31Pxu4leBTe7+mru/tr0vcvdR7t7b3Xun8pqGF+0OWrZsBWVlFey9954AHHvMETnRrJ46bQbdunWlS5fOFBQUMGTIYJ5+5sWow5IMyoXP3Fq0xJo0q9koLKTg4N5Ul32CNWlKqrQTAPnBPoC8rt1p+qtLWHf17/A1qyOKetckYfqrzpsf3T0N3GRm/wz+XFbf18TFxRf/kbFjbqWwsJCFCz/hvF9cEnVIGVddXc3wi/7AhGcfJC+VYszYR5g9++Oow8qK+++7jaOPOoy2bduwaME0Rlx5PfeMSf60Ry585qnWu9H0wisglYKUseXNV6mc9hYb/v/1NLv0z5BO4xvWseHvfwWgyTm/xIpLaPbbmiu+0iuWs/6aK6Icwg6LQ6XRUDt1SbGZnQQc7u47/AlFdUlxHCThG0RkR2T8kuIYC/OS4j3b9mzwD40FK6dHeknxTlUd7v4s8GyGYhERkUauUUxliYjkgpqOQ+OmpCIiEhN6SrGIiIQmCb9PJZGPaRERkWioUhERiQlNf4mISGiSMP2lpCIiEhNJuLdNSUVEJCbi8JiVhlKjXkREQqNKRUQkJtRTERGR0OjqLxERCU0SKhX1VEREJDSqVEREYkKXFIuISGiSMP2lpCIiEhNq1IuISGiSUKmoUS8iIqFRpSIiEhNq1IuISGiS8OwvJRURkZhQpSIiIqFRo15ERKQWVSoiIjGRhJ6KKhURkZhw9wYv9TGzAWb2kZnNM7PLwx6DKhURkZjIdE/FzPKA24AfAGXAVDN7yt1nh/UeqlRERHJHH2Ceuy9w9y3Aw8DgMN9ASUVEJCY8hKUeHYHFtbbLgn2hyfj015bNZZbp99geMxvm7qOiev8o5erYc3XckLtjT9K4q7aUN/jnpZkNA4bV2jUqm/9/kl6pDKv/lMTK1bHn6rghd8eeq+PeJncf5e69ay21E0o50LnWdqdgX2iSnlREROQrU4HuZtbVzAqBHwFPhfkGuvpLRCRHuHuVmf0aeAHIA+529w/CfI+kJ5VEzLPuolwde66OG3J37Lk67l3i7hOACZl6fUvCs2ZERCQe1FMREZHQJDapZPpRBHFlZneb2XIzmxV1LNlkZp3NbJKZzTazD8xseNQxZYuZFZvZO2b2XjD2EVHHlE1mlmdm083smahjkYQmlVqPIjgR2B8408z2jzaqrBkDDIg6iAhUAZe4+/5AX+D8HPrMNwPHuvvBQA9ggJn1jTakrBoOzIk6CKmRyKRCFh5FEFfu/jqwKuo4ss3dl7j7v4L1ddT8kAn1TuG48hrrg82CYMmJZqmZdQJOAu6KOhapkdSkkvFHEUh8mVkXoCcwJeJQsiaYApoBLAdecvdcGfvNwKVAOuI4JJDUpCI5ysyaAY8BF7n72qjjyRZ3r3b3HtTcId3HzA6IOKSMM7OTgeXu/m7UschXkppUMv4oAokfMyugJqE84O6PRx1PFNx9NTCJ3OirHQ4MMrNF1ExxH2tm90cbkiQ1qWT8UQQSL2ZmwGhgjrvfGHU82WRmu5tZq2C9hJrflfFhpEFlgbv/zt07uXsXav6Ov+LuZ0ccVs5LZFJx9yrgy0cRzAHGhf0ogrgys4eAt4B9zKzMzIZGHVOWHA78hJp/rc4IloFRB5UlHYBJZjaTmn9QveTuurxWIqE76kVEJDSJrFRERCQaSioiIhIaJRUREQmNkoqIiIRGSUVEREKjpCIiIqFRUhERkdAoqYiISGj+D2+AcvzedIEnAAAAAElFTkSuQmCC", 919 | "text/plain": [ 920 | "
" 921 | ] 922 | }, 923 | "metadata": { 924 | "needs_background": "light" 925 | }, 926 | "output_type": "display_data" 927 | }, 928 | { 929 | "name": "stdout", 930 | "output_type": "stream", 931 | "text": [ 932 | "Classification Report\n", 933 | " precision recall f1-score support\n", 934 | "\n", 935 | " 0 0.99 0.91 0.95 515\n", 936 | " 1 1.00 1.00 1.00 602\n", 937 | " 2 0.98 1.00 0.99 432\n", 938 | " 3 0.99 1.00 1.00 450\n", 939 | " 4 0.92 0.98 0.95 389\n", 940 | "\n", 941 | " accuracy 0.98 2388\n", 942 | " macro avg 0.98 0.98 0.98 2388\n", 943 | "weighted avg 0.98 0.98 0.98 2388\n", 944 | "\n" 945 | ] 946 | } 947 | ], 948 | "source": [ 949 | "import pandas as pd\n", 950 | "import seaborn as sns\n", 951 | "import matplotlib.pyplot as plt\n", 952 | "from sklearn.metrics import confusion_matrix, classification_report\n", 953 | "\n", 954 | "def print_confusion_matrix(y_true, y_pred, report=True):\n", 955 | " labels = sorted(list(set(y_true)))\n", 956 | " cmx_data = confusion_matrix(y_true, y_pred, labels=labels)\n", 957 | " \n", 958 | " df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels)\n", 959 | " \n", 960 | " fig, ax = plt.subplots(figsize=(7, 6))\n", 961 | " sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False)\n", 962 | " ax.set_ylim(len(set(y_true)), 0)\n", 963 | " plt.show()\n", 964 | " \n", 965 | " if report:\n", 966 | " print('Classification Report')\n", 967 | " print(classification_report(y_test, y_pred))\n", 968 | "\n", 969 | "Y_pred = model.predict(X_test)\n", 970 | "y_pred = np.argmax(Y_pred, axis=1)\n", 971 | "\n", 972 | "print_confusion_matrix(y_test, y_pred)" 973 | ] 974 | }, 975 | { 976 | "cell_type": "markdown", 977 | "metadata": { 978 | "id": "FNP6aqzc9hE5" 979 | }, 980 | "source": [ 981 | "# Convert to model for Tensorflow-Lite" 982 | ] 983 | }, 984 | { 985 | "cell_type": "code", 986 | "execution_count": 751, 987 | "metadata": { 988 | "id": "ODjnYyld9hE6" 989 | }, 990 | "outputs": [], 991 | "source": [ 992 | "# Save as a model dedicated to inference\n", 993 | "model.save(model_save_path, include_optimizer=False)" 994 | ] 995 | }, 996 | { 997 | "cell_type": "code", 998 | "execution_count": 752, 999 | "metadata": { 1000 | "colab": { 1001 | "base_uri": "https://localhost:8080/" 1002 | }, 1003 | "id": "zRfuK8Y59hE6", 1004 | "outputId": "a4ca585c-b5d5-4244-8291-8674063209bb" 1005 | }, 1006 | "outputs": [ 1007 | { 1008 | "name": "stdout", 1009 | "output_type": "stream", 1010 | "text": [ 1011 | "INFO:tensorflow:Assets written to: C:\\Users\\Ratta\\AppData\\Local\\Temp\\tmpe31ea_65\\assets\n" 1012 | ] 1013 | }, 1014 | { 1015 | "name": "stderr", 1016 | "output_type": "stream", 1017 | "text": [ 1018 | "INFO:tensorflow:Assets written to: C:\\Users\\Ratta\\AppData\\Local\\Temp\\tmpe31ea_65\\assets\n" 1019 | ] 1020 | }, 1021 | { 1022 | "data": { 1023 | "text/plain": [ 1024 | "22432" 1025 | ] 1026 | }, 1027 | "execution_count": 752, 1028 | "metadata": {}, 1029 | "output_type": "execute_result" 1030 | } 1031 | ], 1032 | "source": [ 1033 | "# Transform model (quantization)\n", 1034 | "\n", 1035 | "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n", 1036 | "converter.optimizations = [tf.lite.Optimize.DEFAULT]\n", 1037 | "tflite_quantized_model = converter.convert()\n", 1038 | "\n", 1039 | "open(tflite_save_path, 'wb').write(tflite_quantized_model)" 1040 | ] 1041 | }, 1042 | { 1043 | "cell_type": "markdown", 1044 | "metadata": { 1045 | "id": "CHBPBXdx9hE6" 1046 | }, 1047 | "source": [ 1048 | "# Inference test" 1049 | ] 1050 | }, 1051 | { 1052 | "cell_type": "code", 1053 | "execution_count": 753, 1054 | "metadata": { 1055 | "id": "mGAzLocO9hE7" 1056 | }, 1057 | "outputs": [], 1058 | "source": [ 1059 | "interpreter = tf.lite.Interpreter(model_path=tflite_save_path)\n", 1060 | "interpreter.allocate_tensors()" 1061 | ] 1062 | }, 1063 | { 1064 | "cell_type": "code", 1065 | "execution_count": 754, 1066 | "metadata": { 1067 | "id": "oQuDK8YS9hE7" 1068 | }, 1069 | "outputs": [], 1070 | "source": [ 1071 | "# Get I / O tensor\n", 1072 | "input_details = interpreter.get_input_details()\n", 1073 | "output_details = interpreter.get_output_details()" 1074 | ] 1075 | }, 1076 | { 1077 | "cell_type": "code", 1078 | "execution_count": 755, 1079 | "metadata": { 1080 | "id": "2_ixAf_l9hE7" 1081 | }, 1082 | "outputs": [], 1083 | "source": [ 1084 | "interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]]))" 1085 | ] 1086 | }, 1087 | { 1088 | "cell_type": "markdown", 1089 | "metadata": {}, 1090 | "source": [] 1091 | }, 1092 | { 1093 | "cell_type": "code", 1094 | "execution_count": 756, 1095 | "metadata": { 1096 | "colab": { 1097 | "base_uri": "https://localhost:8080/" 1098 | }, 1099 | "id": "s4FoAnuc9hE7", 1100 | "outputId": "91f18257-8d8b-4ef3-c558-e9b5f94fabbf", 1101 | "scrolled": true 1102 | }, 1103 | "outputs": [ 1104 | { 1105 | "name": "stdout", 1106 | "output_type": "stream", 1107 | "text": [ 1108 | "Wall time: 0 ns\n" 1109 | ] 1110 | } 1111 | ], 1112 | "source": [ 1113 | "%%time\n", 1114 | "# Inference implementation\n", 1115 | "interpreter.invoke()\n", 1116 | "tflite_results = interpreter.get_tensor(output_details[0]['index'])" 1117 | ] 1118 | }, 1119 | { 1120 | "cell_type": "code", 1121 | "execution_count": 757, 1122 | "metadata": { 1123 | "colab": { 1124 | "base_uri": "https://localhost:8080/" 1125 | }, 1126 | "id": "vONjp19J9hE8", 1127 | "outputId": "77205e24-fd00-42c4-f7b6-e06e527c2cba" 1128 | }, 1129 | "outputs": [ 1130 | { 1131 | "name": "stdout", 1132 | "output_type": "stream", 1133 | "text": [ 1134 | "[4.2188655e-10 1.0000000e+00 3.5894669e-15 7.3437989e-19 7.6229787e-09]\n", 1135 | "1\n" 1136 | ] 1137 | } 1138 | ], 1139 | "source": [ 1140 | "print(np.squeeze(tflite_results))\n", 1141 | "print(np.argmax(np.squeeze(tflite_results)))" 1142 | ] 1143 | }, 1144 | { 1145 | "cell_type": "code", 1146 | "execution_count": null, 1147 | "metadata": {}, 1148 | "outputs": [], 1149 | "source": [] 1150 | } 1151 | ], 1152 | "metadata": { 1153 | "accelerator": "GPU", 1154 | "colab": { 1155 | "collapsed_sections": [], 1156 | "name": "keypoint_classification_EN.ipynb", 1157 | "provenance": [], 1158 | "toc_visible": true 1159 | }, 1160 | "kernelspec": { 1161 | "display_name": "Python 3", 1162 | "language": "python", 1163 | "name": "python3" 1164 | }, 1165 | "language_info": { 1166 | "codemirror_mode": { 1167 | "name": "ipython", 1168 | "version": 3 1169 | }, 1170 | "file_extension": ".py", 1171 | "mimetype": "text/x-python", 1172 | "name": "python", 1173 | "nbconvert_exporter": "python", 1174 | "pygments_lexer": "ipython3", 1175 | "version": "3.9.9" 1176 | } 1177 | }, 1178 | "nbformat": 4, 1179 | "nbformat_minor": 0 1180 | } 1181 | --------------------------------------------------------------------------------