├── README.md ├── __pycache__ ├── hand.cpython-37.pyc ├── hand.cpython-38.pyc ├── predict.cpython-37.pyc ├── predict.cpython-38.pyc ├── window.cpython-37.pyc └── window.cpython-38.pyc ├── get_hands.py ├── hand.py ├── main.py ├── models ├── inference_model │ ├── __model__ │ ├── __params__ │ └── model.yml └── inference_model1 │ ├── __model__ │ ├── __params__ │ └── model.yml ├── predict.py ├── src ├── MAYA_default.png ├── bu.gif ├── bu.mp4 ├── bu.png ├── buhaoyisi.mp4 ├── default.png ├── hao.gif ├── hao.mp4 ├── hao.png ├── result_default.png ├── si.gif ├── si.mp4 ├── si.png ├── yi.gif ├── yi.mp4 └── yi.png ├── test ├── 4930.png ├── 516223230.png ├── test_bu.png ├── test_hao.png ├── test_si.png └── test_yi.png ├── videos ├── 052 │ ├── 01_052_001_02.avi │ ├── 01_052_001_03.avi │ ├── 01_052_001_04.avi │ └── 02_052_001_01.avi └── 053 │ ├── 01_053_001_01.avi │ ├── 01_053_001_02.avi │ ├── 01_053_001_03.avi │ └── 01_053_001_04.avi ├── window.py └── window.ui /README.md: -------------------------------------------------------------------------------- 1 | # 基于opencv和paddlex的手势识别demo 2 | 3 | ### 文件/目录说明: 4 | 5 | 1. gethand.py 6 | 从视屏目录videos里截取手部图片,并保存在hands目录内。 7 | 8 | 2. hand.py 9 | 封装了检测手部位置的方法,使用了开源的 [mediapipe](https://google.github.io/mediapipe/) 。 10 | 11 | 3. window.ui 12 | 使用 [PyQt5](https://pypi.org/project/PyQt5/) 生成的窗口UI文件。 13 | 14 | 4. window.py 15 | 使用PyQt5由UI文件生成的py文件。 16 | 17 | 5. main.py 18 | 程序的主入口,包含界面的逻辑代码。 19 | 20 | 6. models 21 | 由Paddlex训练出的模型。 22 | 23 | 7. test 24 | 用来测试项目及项目模型的图片。 25 | 26 | ### 视屏文件说明 27 | 28 | **本版本只含两个测试视频目录,且模型文件识别的手势为:‘不’,‘好’,‘意’,‘思’** 29 | 30 | 其他手势视频见链接: https://pan.baidu.com/s/17sCE1QkJjNP6y0LVfClBWg?pwd=gtsr 提取码: gtsr 31 | 32 | 每个手势有多个视屏文件,被包含在同一个目录内,目录命名为001-053,其命名数字代表了该手势的标签。 33 | 34 | |数字|手势|数字 |手势 |数字 |手势 |数字 |手势 |数字 |手势 | 35 | |----| ---|---- | ---- | ----|----|----|---- |----| ----| 36 | | 1 | 你 | 2 | 好 | 3 | 谢谢 |4 |再见 |5 |叫 | 37 | | 6 | 什么 | 7 | 的 | 8 | 我 |9 |是 |10 |学生 | 38 | | 11 | 不 | 12 | 意思 | 13 | 对不起 |14 |反对 |15 |打 | 39 | | 16 | 电话 | 17 | 通知 | 18 | 同意 |19 |想法 |20 |很 | 40 | | 21 | 高兴 | 22 | 认识 | 23 | 没 |24 |关系 |25 |可以 | 41 | | 26 | 帮 | 27 | 教师 | 28 | 职业 |29 |什么 |30 |去 | 42 | | 31 | 坐 | 32 | 飞机 | 33 | 开汽车 |34 |家 |35 |在 | 43 | | 36 | 天津 | 37 | 多、几 | 38 | 大 |39 |时候 |40 |喜欢 | 44 | | 41 | 游泳 | 42 | 朋友 | 43 | 现在、今天 |44 |星期 |45 |星期一 | 45 | | 46 | 星期二 | 47 | 星期三 | 48 | 星期四 |49 |星期五 |50 |星期六 | 46 | | 51 | 星期日 | 52 | 名字 | 53 | 联系 | | | | | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /__pycache__/hand.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/hand.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/hand.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/hand.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/predict.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/predict.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/predict.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/predict.cpython-38.pyc -------------------------------------------------------------------------------- /__pycache__/window.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/window.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/window.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/__pycache__/window.cpython-38.pyc -------------------------------------------------------------------------------- /get_hands.py: -------------------------------------------------------------------------------- 1 | """ 2 | get hand pictures from videos 3 | """ 4 | import os 5 | import cv2 6 | from hand import HandDetector 7 | 8 | 9 | def get_hand(videodirname, imagedirname, imagename, area=None): 10 | videos = [] 11 | if not os.path.exists(imagedirname): 12 | os.makedirs(imagedirname) 13 | if area is None: 14 | area = [(0, 0), (1, 0.75)] 15 | detector = HandDetector() 16 | for i in os.listdir(videodirname): 17 | videos.append(os.path.join(videodirname, i)) 18 | count = 0 19 | for video in videos: 20 | cap = cv2.VideoCapture(video) 21 | while True: 22 | ret, cv2_image = cap.read() 23 | if not ret: 24 | break 25 | hand, rectangle, success = detector.rectangle(cv2_image, area=area) 26 | if success: 27 | hand = cv2.resize(hand, (256, 256)) 28 | count = count + 1 29 | print(f'{imagedirname}/{imagename}_{count}.png') 30 | cv2.imwrite(f'{imagedirname}/{imagename}_{count}.png', hand) 31 | return 32 | 33 | 34 | if __name__ == '__main__': 35 | for i in os.listdir('videos'): 36 | if i != '.DS_Store': 37 | print(i) 38 | get_hand(videodirname=f'videos/{i}', imagedirname=f'hands/{i}', imagename=i) 39 | -------------------------------------------------------------------------------- /hand.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from mediapipe.python import solutions 3 | 4 | 5 | class HandDetector: 6 | def __init__(self): 7 | self.static_image_mode = True 8 | self.max_num_hands = 1 9 | self.min_detection_confidence = 0.5 10 | self.min_tracking_confidence = 0.5 11 | self.hands = solutions.hands.Hands(self.static_image_mode, 12 | self.max_num_hands, 13 | self.min_detection_confidence, 14 | self.min_tracking_confidence, ) 15 | self.draw = solutions.drawing_utils 16 | self.solutions = solutions 17 | 18 | def handMarks(self, image): 19 | imgRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 20 | results = self.hands.process(image=imgRGB) 21 | hand_landmarks = results.multi_hand_landmarks 22 | if hand_landmarks: 23 | for lms in hand_landmarks: 24 | self.draw.draw_landmarks(image, lms, self.solutions.hands.HAND_CONNECTIONS) 25 | return image 26 | 27 | def rectangle(self, image, image_size=256, hand_size=0.5, area=None): 28 | if area is None: 29 | area = [(0, 0), (1, 1)] 30 | hand = image 31 | rectangle = image 32 | success = False 33 | imgRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 34 | results = self.hands.process(image=imgRGB) 35 | hand_landmarks = results.multi_hand_landmarks 36 | h, w, c = image.shape 37 | if hand_landmarks: 38 | x_max = 0 39 | y_max = 0 40 | x_min = w 41 | y_min = h 42 | for lms in hand_landmarks: 43 | for lm in lms.landmark: 44 | x, y = int(lm.x * w), int(lm.y * h) 45 | if x > x_max: 46 | x_max = x 47 | if x < x_min: 48 | x_min = x 49 | if y > y_max: 50 | y_max = y 51 | if y < y_min: 52 | y_min = y 53 | x_mean = (x_min + x_max) / 2 54 | y_mean = (y_min + y_max) / 2 55 | width = (x_max - x_min + y_max - y_min) / 4 56 | x_min = int(x_mean - width / hand_size) 57 | y_min = int(y_mean - width / hand_size) 58 | x_max = int(x_mean + width / hand_size) 59 | y_max = int(y_mean + width / hand_size) 60 | if x_min > area[0][0] * w and y_min > area[0][1] * h and x_max < area[1][0] * w and y_max < area[1][1] * h: 61 | success = True 62 | hand = image[y_min:y_max, x_min:x_max] 63 | hand = cv2.resize(hand, (image_size, image_size)) 64 | rectangle = cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2) 65 | 66 | return hand, rectangle, success 67 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtGui import QPixmap 2 | import numpy as np 3 | from window import Ui_MainWindow 4 | from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QInputDialog 5 | from PyQt5 import QtCore, QtGui, QtWidgets 6 | import sys 7 | import cv2 8 | from hand import HandDetector 9 | from predict import predict 10 | import time 11 | 12 | 13 | def label_show(label, image): 14 | qt_img_buf = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) 15 | qt_img = QtGui.QImage(qt_img_buf.data, qt_img_buf.shape[1], qt_img_buf.shape[0], QtGui.QImage.Format_RGB32) 16 | image = QPixmap.fromImage(qt_img).scaled(label.width(), label.height()) 17 | label.setPixmap(image) 18 | 19 | 20 | class MainWindow(QMainWindow, Ui_MainWindow): 21 | def __init__(self, parent=None): 22 | super(MainWindow, self).__init__(parent) 23 | self.image_file = False 24 | self.setupUi(self) 25 | self.initUI() 26 | self.hand_size = 0.6 27 | self.detector = HandDetector() 28 | self.videoFPS = 24 29 | self.image = False 30 | self.video = False 31 | self.camera = False 32 | self.camera_selected = 0 33 | self.detect = False 34 | self.model = predict(model='models/inference_model1') 35 | self.cap = None 36 | self.predict = False 37 | 38 | def maya(self, word=None, speed=200, action=True): 39 | gif = QtGui.QMovie(f'src/{word}.gif') 40 | self.labelMAYA.setMovie(gif) 41 | gif.setSpeed(speed) 42 | if action: 43 | gif.start() 44 | else: 45 | gif.stop() 46 | 47 | def print_log(self, log_words): 48 | current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) 49 | self.textLog.append(current_time + ':' + log_words) # 在指定的区域显示提示信息 50 | cursor = self.textLog.textCursor() 51 | self.textLog.moveCursor(cursor.End) # 光标移到最后,这样就会自动显示出来 52 | QtWidgets.QApplication.processEvents() 53 | 54 | def initUI(self): 55 | self.setWindowTitle('手语识别可视化界面') 56 | 57 | self.actionLoadImage.triggered.connect(self.load_image) 58 | self.actionStart.triggered.connect(self.start_detect) 59 | self.actionLoadVideo.triggered.connect(self.load_video) 60 | self.actionVideoFPS.triggered.connect(self.change_VideoFPS) 61 | self.actionOpenCamera.triggered.connect(self.open_camera) 62 | self.actionCloseCamera.triggered.connect(self.close_camera) 63 | self.actionPredict.triggered.connect(self.predict) 64 | self.actionSelectedCamera.triggered.connect(self.select_camera) 65 | self.actionHandSize.triggered.connect(self.changeHandSize) 66 | self.actionModelFile.triggered.connect(self.changeModelFile) 67 | self.actionMinDetectionConfidence.triggered.connect(self.changeMinDetectionConfidence) 68 | self.actionMinTrackingConfidence.triggered.connect(self.changeMinTrackingConfidence) 69 | self.actionExit.triggered.connect(self.exit) 70 | 71 | def exit(self): 72 | app = QApplication.instance() 73 | # 退出应用程序 74 | app.quit() 75 | 76 | def load_image(self): 77 | image_name, image_type = QFileDialog.getOpenFileName(self, '打开图片', r'./', '图片 (*.png *.jpg *.jpeg)') 78 | cv2_image = cv2.imread(image_name) 79 | label_show(label=self.labelImageOrVideo, image=cv2_image) 80 | self.image = True 81 | self.print_log(log_words=f'load image:{image_name}') 82 | self.image_file = cv2_image 83 | 84 | def load_video(self): 85 | self.video_name, video_type = QFileDialog.getOpenFileName(self, '打开视频', r'./', '视频 (*.avi *.mp4)') 86 | self.cap = cv2.VideoCapture(self.video_name) 87 | self.video = True 88 | self.print_log(log_words=f'load video:{self.video_name}') 89 | while True: 90 | ret, cv2_image = self.cap.read() 91 | if not ret: 92 | break 93 | label_show(label=self.labelImageOrVideo, image=cv2_image) 94 | cv2.waitKey(int(1000 / self.videoFPS)) 95 | 96 | def start_detect(self): 97 | maya = cv2.imread('src/MAYA_default.png') 98 | self.detect = True 99 | if self.image: 100 | hand, rectangle, success = self.detector.rectangle(self.image_file, hand_size=self.hand_size) 101 | if success: 102 | label_show(label=self.labelImageOrVideo, image=rectangle) 103 | else: 104 | self.print_log(log_words=f'cant find hand') 105 | if self.video: 106 | cap = cv2.VideoCapture(self.video_name) 107 | while True: 108 | ret, image = cap.read() 109 | if not ret: 110 | break 111 | hand, rectangle, success = self.detector.rectangle(image, hand_size=self.hand_size) 112 | label_show(label=self.labelImageOrVideo, image=rectangle) 113 | if success: 114 | label_show(label=self.labelMAYA, image=hand) 115 | else: 116 | label_show(label=self.labelMAYA, image=maya) 117 | cv2.waitKey(int(1000 / self.videoFPS)) 118 | if self.camera: 119 | self.print_log(log_words=f'using camera detecting') 120 | 121 | while True: 122 | ret, image = self.cap.read() 123 | if not ret: 124 | break 125 | hand, rectangle, success = self.detector.rectangle(image, hand_size=self.hand_size) 126 | label_show(label=self.labelImageOrVideo, image=rectangle) 127 | if success: 128 | label_show(label=self.labelMAYA, image=hand) 129 | else: 130 | label_show(label=self.labelMAYA, image=maya) 131 | cv2.waitKey(int(1000 / self.videoFPS)) 132 | 133 | def predict(self): 134 | self.predict = True 135 | if self.image: 136 | hand, rectangle, success = self.detector.rectangle(self.image_file, hand_size=self.hand_size) 137 | if success: 138 | word = self.model.result(hand)['category'] 139 | score = self.model.result(hand)['score'] 140 | result_image = QPixmap(f'src/{word}.png') 141 | label_show(label=self.labelImageOrVideo, image=rectangle) 142 | self.maya(word=word) 143 | self.labelResult.setPixmap(result_image) 144 | self.print_log(log_words='result:' + word + ' score: ' + str(score)) 145 | self.maya(word=word, speed=100) 146 | else: 147 | label_show(label=self.labelImageOrVideo, image=rectangle) 148 | self.print_log(log_words='cant find hand') 149 | 150 | if self.camera: 151 | flag = 0 152 | dic_count = {'bu': 0, 'hao': 0, 'yi': 0, 'si': 0} 153 | while True: 154 | ret, cv2_image = self.cap.read() 155 | if not ret: 156 | break 157 | hand, rectangle, success = self.detector.rectangle(cv2_image, hand_size=self.hand_size) 158 | label_show(label=self.labelImageOrVideo, image=rectangle) 159 | 160 | if success: 161 | 162 | hand = cv2.resize(hand, (128, 128)) 163 | word = self.model.result(hand)['category'] 164 | score = self.model.result(hand)['score'] 165 | self.print_log(log_words='result:' + word + ' score: ' + str(score)) 166 | image = QPixmap(f'src/{word}.png') 167 | self.labelResult.setPixmap(image) 168 | dic_count[word] += 1 169 | if sum(dic_count.values()) < 8: 170 | word_show = max(dic_count, key=dic_count.get) 171 | if flag == 0: 172 | self.maya(word=word_show) 173 | flag = 1 174 | else: 175 | dic_count = {'bu': 0, 'hao': 0, 'yi': 0, 'si': 0} 176 | flag = 0 177 | else: 178 | label_show(label=self.labelMAYA, image=cv2.imread('src/MAYA_default.png')) 179 | label_show(label=self.labelResult, image=cv2.imread('src/result_default.png')) 180 | cv2.waitKey(int(1000 / self.videoFPS)) 181 | 182 | def open_camera(self): 183 | camera = cv2.VideoCapture(self.camera_selected) 184 | self.cap = camera 185 | self.print_log(log_words=f'opening camera') 186 | while True: 187 | ret, cv2_image = camera.read() 188 | if not ret: 189 | break 190 | self.camera = True 191 | label_show(label=self.labelImageOrVideo, image=cv2_image) 192 | cv2.waitKey(0) 193 | cv2.waitKey(int(1000 / self.videoFPS)) 194 | 195 | def change_VideoFPS(self): 196 | number, ok = QInputDialog.getInt(self, "input video fps", "(10-60)") 197 | self.videoFPS = number 198 | self.print_log(log_words=f'set video fps: {str(number)}') 199 | 200 | def close_camera(self): 201 | self.camera = False 202 | self.cap.release() 203 | self.print_log(log_words='camera has been closed') 204 | self.maya(action=False) 205 | label_show(label=self.labelResult, image=cv2.imread('src/result_default.png')) 206 | label_show(label=self.labelMAYA, image=cv2.imread('src/MAYA_default.png')) 207 | label_show(label=self.labelImageOrVideo, image=cv2.imread('src/default.png')) 208 | 209 | def select_camera(self): 210 | if self.camera_selected == 0: 211 | self.camera_selected = 1 212 | self.actionSelectedCamera.setText('SelectedCamera 1') 213 | self.print_log(log_words='camera 1 has been selected') 214 | else: 215 | self.camera_selected = 0 216 | self.actionSelectedCamera.setText('SelectedCamera 0') 217 | self.print_log(log_words='camera 0 has been selected') 218 | 219 | def changeHandSize(self): 220 | number, ok = QInputDialog.getDouble(self, "input hand_size", "(0-1)") 221 | self.hand_size = number 222 | self.print_log(log_words=f'set hand size: {str(number)}') 223 | 224 | def changeModelFile(self): 225 | path = QFileDialog.getExistingDirectory(self, "choose model filepath", '/') 226 | self.model = predict(model=path) 227 | self.print_log(log_words=f'set model: {path}') 228 | 229 | def changeMinDetectionConfidence(self): 230 | number, ok = QInputDialog.getDouble(self, "MinDetectionConfidence", "(0-1)") 231 | self.detector.min_detection_confidence = number 232 | self.print_log(log_words=f'MinDetectionConfidence: {str(number)}') 233 | 234 | def changeMinTrackingConfidence(self): 235 | number, ok = QInputDialog.getDouble(self, "MinTrackingConfidence", "(0-1)") 236 | self.detector.min_tracking_confidence = number 237 | self.print_log(log_words=f'MinTrackingConfidence: {str(number)}') 238 | 239 | 240 | if __name__ == "__main__": 241 | app = QApplication(sys.argv) 242 | MainWindow = MainWindow() 243 | MainWindow.show() 244 | sys.exit(app.exec_()) 245 | -------------------------------------------------------------------------------- /models/inference_model/__model__: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/models/inference_model/__model__ -------------------------------------------------------------------------------- /models/inference_model/__params__: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/models/inference_model/__params__ -------------------------------------------------------------------------------- /models/inference_model/model.yml: -------------------------------------------------------------------------------- 1 | Model: MobileNetV3_large_ssld 2 | Transforms: 3 | - ResizeByShort: 4 | max_size: -1 5 | short_size: 329 6 | - CenterCrop: 7 | crop_size: 288 8 | - Normalize: 9 | mean: 10 | - 0.485 11 | - 0.456 12 | - 0.406 13 | std: 14 | - 0.229 15 | - 0.224 16 | - 0.225 17 | TransformsMode: RGB 18 | _Attributes: 19 | eval_metrics: 20 | acc1: 0.9911190053285968 21 | fixed_input_shape: null 22 | labels: 23 | - bu 24 | - hao 25 | - si 26 | - yi 27 | model_type: classifier 28 | num_classes: 4 29 | _ModelInputsOutputs: 30 | test_inputs: 31 | - - image 32 | - image 33 | test_outputs: 34 | - - predict 35 | - softmax_0.tmp_0 36 | _init_params: 37 | num_classes: 4 38 | completed_epochs: 0 39 | status: Infer 40 | version: 1.2.8 41 | -------------------------------------------------------------------------------- /models/inference_model1/__model__: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/models/inference_model1/__model__ -------------------------------------------------------------------------------- /models/inference_model1/__params__: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/models/inference_model1/__params__ -------------------------------------------------------------------------------- /models/inference_model1/model.yml: -------------------------------------------------------------------------------- 1 | Model: MobileNetV3_large_ssld 2 | Transforms: 3 | - ResizeByShort: 4 | max_size: -1 5 | short_size: 329 6 | - CenterCrop: 7 | crop_size: 288 8 | - Normalize: 9 | mean: 10 | - 0.485 11 | - 0.456 12 | - 0.406 13 | std: 14 | - 0.229 15 | - 0.224 16 | - 0.225 17 | TransformsMode: RGB 18 | _Attributes: 19 | eval_metrics: 20 | acc1: 1.0 21 | fixed_input_shape: null 22 | labels: 23 | - bu 24 | - hao 25 | - si 26 | - yi 27 | model_type: classifier 28 | num_classes: 4 29 | _ModelInputsOutputs: 30 | test_inputs: 31 | - - image 32 | - image 33 | test_outputs: 34 | - - predict 35 | - softmax_0.tmp_0 36 | _init_params: 37 | num_classes: 4 38 | completed_epochs: 0 39 | status: Infer 40 | version: 1.2.8 41 | -------------------------------------------------------------------------------- /predict.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | import paddlex.deploy 4 | 5 | 6 | class predict: 7 | def __init__(self, model): 8 | self.model = paddlex.deploy.Predictor(model, use_gpu=True) 9 | self.model_type = self.model.model_type 10 | 11 | def result(self, image): 12 | image = image.astype('float32') 13 | image = cv2.resize(image, (288, 288)) 14 | result = self.model.predict(image) 15 | return result[0] 16 | 17 | if __name__ == "__main__": 18 | model = predict(model='D:\Codes\Python\gesture\models\inference_model1') 19 | image = cv2.imread('hands/bu/bu61.png') 20 | print(model.result(image)) 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /src/MAYA_default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/MAYA_default.png -------------------------------------------------------------------------------- /src/bu.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/bu.gif -------------------------------------------------------------------------------- /src/bu.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/bu.mp4 -------------------------------------------------------------------------------- /src/bu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/bu.png -------------------------------------------------------------------------------- /src/buhaoyisi.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/buhaoyisi.mp4 -------------------------------------------------------------------------------- /src/default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/default.png -------------------------------------------------------------------------------- /src/hao.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/hao.gif -------------------------------------------------------------------------------- /src/hao.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/hao.mp4 -------------------------------------------------------------------------------- /src/hao.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/hao.png -------------------------------------------------------------------------------- /src/result_default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/result_default.png -------------------------------------------------------------------------------- /src/si.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/si.gif -------------------------------------------------------------------------------- /src/si.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/si.mp4 -------------------------------------------------------------------------------- /src/si.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/si.png -------------------------------------------------------------------------------- /src/yi.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/yi.gif -------------------------------------------------------------------------------- /src/yi.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/yi.mp4 -------------------------------------------------------------------------------- /src/yi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/src/yi.png -------------------------------------------------------------------------------- /test/4930.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/4930.png -------------------------------------------------------------------------------- /test/516223230.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/516223230.png -------------------------------------------------------------------------------- /test/test_bu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/test_bu.png -------------------------------------------------------------------------------- /test/test_hao.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/test_hao.png -------------------------------------------------------------------------------- /test/test_si.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/test_si.png -------------------------------------------------------------------------------- /test/test_yi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/test/test_yi.png -------------------------------------------------------------------------------- /videos/052/01_052_001_02.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/052/01_052_001_02.avi -------------------------------------------------------------------------------- /videos/052/01_052_001_03.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/052/01_052_001_03.avi -------------------------------------------------------------------------------- /videos/052/01_052_001_04.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/052/01_052_001_04.avi -------------------------------------------------------------------------------- /videos/052/02_052_001_01.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/052/02_052_001_01.avi -------------------------------------------------------------------------------- /videos/053/01_053_001_01.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/053/01_053_001_01.avi -------------------------------------------------------------------------------- /videos/053/01_053_001_02.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/053/01_053_001_02.avi -------------------------------------------------------------------------------- /videos/053/01_053_001_03.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/053/01_053_001_03.avi -------------------------------------------------------------------------------- /videos/053/01_053_001_04.avi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangze2017/gesture/510a8f9cf8fd08371db0c34d80495156e7b5acdc/videos/053/01_053_001_04.avi -------------------------------------------------------------------------------- /window.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'window.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.4 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | 10 | 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | 13 | 14 | class Ui_MainWindow(object): 15 | def setupUi(self, MainWindow): 16 | MainWindow.setObjectName("MainWindow") 17 | MainWindow.resize(800, 600) 18 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) 19 | sizePolicy.setHorizontalStretch(0) 20 | sizePolicy.setVerticalStretch(0) 21 | sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth()) 22 | MainWindow.setSizePolicy(sizePolicy) 23 | MainWindow.setMinimumSize(QtCore.QSize(800, 600)) 24 | MainWindow.setMaximumSize(QtCore.QSize(800, 600)) 25 | self.centralwidget = QtWidgets.QWidget(MainWindow) 26 | self.centralwidget.setObjectName("centralwidget") 27 | self.groupBoxImageOrVideo = QtWidgets.QGroupBox(self.centralwidget) 28 | self.groupBoxImageOrVideo.setGeometry(QtCore.QRect(10, 10, 512, 384)) 29 | self.groupBoxImageOrVideo.setAutoFillBackground(True) 30 | self.groupBoxImageOrVideo.setObjectName("groupBoxImageOrVideo") 31 | self.labelImageOrVideo = QtWidgets.QLabel(self.groupBoxImageOrVideo) 32 | self.labelImageOrVideo.setGeometry(QtCore.QRect(10, 20, 491, 351)) 33 | self.labelImageOrVideo.setAutoFillBackground(True) 34 | self.labelImageOrVideo.setText("") 35 | self.labelImageOrVideo.setPixmap(QtGui.QPixmap("src/default.png")) 36 | self.labelImageOrVideo.setScaledContents(True) 37 | self.labelImageOrVideo.setAlignment(QtCore.Qt.AlignCenter) 38 | self.labelImageOrVideo.setObjectName("labelImageOrVideo") 39 | self.groupBoxMAYA = QtWidgets.QGroupBox(self.centralwidget) 40 | self.groupBoxMAYA.setGeometry(QtCore.QRect(530, 10, 261, 384)) 41 | self.groupBoxMAYA.setAutoFillBackground(True) 42 | self.groupBoxMAYA.setObjectName("groupBoxMAYA") 43 | self.labelMAYA = QtWidgets.QLabel(self.groupBoxMAYA) 44 | self.labelMAYA.setGeometry(QtCore.QRect(10, 20, 241, 351)) 45 | self.labelMAYA.setText("") 46 | self.labelMAYA.setPixmap(QtGui.QPixmap("src/MAYA_default.png")) 47 | self.labelMAYA.setScaledContents(True) 48 | self.labelMAYA.setObjectName("labelMAYA") 49 | self.groupBoxLog = QtWidgets.QGroupBox(self.centralwidget) 50 | self.groupBoxLog.setGeometry(QtCore.QRect(10, 400, 511, 141)) 51 | self.groupBoxLog.setAutoFillBackground(True) 52 | self.groupBoxLog.setObjectName("groupBoxLog") 53 | self.textLog = QtWidgets.QTextBrowser(self.groupBoxLog) 54 | self.textLog.setGeometry(QtCore.QRect(10, 20, 491, 111)) 55 | self.textLog.setObjectName("textLog") 56 | self.groupBoxResult = QtWidgets.QGroupBox(self.centralwidget) 57 | self.groupBoxResult.setGeometry(QtCore.QRect(530, 400, 261, 141)) 58 | self.groupBoxResult.setAutoFillBackground(True) 59 | self.groupBoxResult.setObjectName("groupBoxResult") 60 | self.labelResult = QtWidgets.QLabel(self.groupBoxResult) 61 | self.labelResult.setGeometry(QtCore.QRect(10, 20, 241, 111)) 62 | self.labelResult.setAutoFillBackground(True) 63 | self.labelResult.setText("") 64 | self.labelResult.setPixmap(QtGui.QPixmap("src/result_dfault.png")) 65 | self.labelResult.setScaledContents(True) 66 | self.labelResult.setAlignment(QtCore.Qt.AlignCenter) 67 | self.labelResult.setObjectName("labelResult") 68 | MainWindow.setCentralWidget(self.centralwidget) 69 | self.menubar = QtWidgets.QMenuBar(MainWindow) 70 | self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26)) 71 | self.menubar.setObjectName("menubar") 72 | self.menuFile = QtWidgets.QMenu(self.menubar) 73 | self.menuFile.setObjectName("menuFile") 74 | self.menuLoadLocalFile = QtWidgets.QMenu(self.menuFile) 75 | self.menuLoadLocalFile.setObjectName("menuLoadLocalFile") 76 | self.menuCamera = QtWidgets.QMenu(self.menubar) 77 | self.menuCamera.setObjectName("menuCamera") 78 | self.menuView = QtWidgets.QMenu(self.menubar) 79 | self.menuView.setObjectName("menuView") 80 | self.menuActions = QtWidgets.QMenu(self.menubar) 81 | self.menuActions.setObjectName("menuActions") 82 | self.menuSettings = QtWidgets.QMenu(self.menubar) 83 | self.menuSettings.setObjectName("menuSettings") 84 | MainWindow.setMenuBar(self.menubar) 85 | self.statusbar = QtWidgets.QStatusBar(MainWindow) 86 | self.statusbar.setObjectName("statusbar") 87 | MainWindow.setStatusBar(self.statusbar) 88 | self.actionExit = QtWidgets.QAction(MainWindow) 89 | self.actionExit.setObjectName("actionExit") 90 | self.actionCloseCamera = QtWidgets.QAction(MainWindow) 91 | self.actionCloseCamera.setObjectName("actionCloseCamera") 92 | self.actionLoadImage = QtWidgets.QAction(MainWindow) 93 | self.actionLoadImage.setObjectName("actionLoadImage") 94 | self.actionLoadVideo = QtWidgets.QAction(MainWindow) 95 | self.actionLoadVideo.setObjectName("actionLoadVideo") 96 | self.actionStart = QtWidgets.QAction(MainWindow) 97 | self.actionStart.setObjectName("actionStart") 98 | self.actionStop = QtWidgets.QAction(MainWindow) 99 | self.actionStop.setObjectName("actionStop") 100 | self.actionLoop = QtWidgets.QAction(MainWindow) 101 | self.actionLoop.setObjectName("actionLoop") 102 | self.actionHandSize = QtWidgets.QAction(MainWindow) 103 | self.actionHandSize.setObjectName("actionHandSize") 104 | self.actionMaxNumHands = QtWidgets.QAction(MainWindow) 105 | self.actionMaxNumHands.setObjectName("actionMaxNumHands") 106 | self.actionMinDetectionConfidence = QtWidgets.QAction(MainWindow) 107 | self.actionMinDetectionConfidence.setObjectName("actionMinDetectionConfidence") 108 | self.actionMinTrackingConfidence = QtWidgets.QAction(MainWindow) 109 | self.actionMinTrackingConfidence.setObjectName("actionMinTrackingConfidence") 110 | self.actionModelFile = QtWidgets.QAction(MainWindow) 111 | self.actionModelFile.setObjectName("actionModelFile") 112 | self.actionPredict = QtWidgets.QAction(MainWindow) 113 | self.actionPredict.setObjectName("actionPredict") 114 | self.actionCamera_0 = QtWidgets.QAction(MainWindow) 115 | self.actionCamera_0.setObjectName("actionCamera_0") 116 | self.actionCamera_1 = QtWidgets.QAction(MainWindow) 117 | self.actionCamera_1.setObjectName("actionCamera_1") 118 | self.actionOpenCamera_2 = QtWidgets.QAction(MainWindow) 119 | self.actionOpenCamera_2.setObjectName("actionOpenCamera_2") 120 | self.actionOpenCamera = QtWidgets.QAction(MainWindow) 121 | self.actionOpenCamera.setObjectName("actionOpenCamera") 122 | self.actionSelectedCamera = QtWidgets.QAction(MainWindow) 123 | self.actionSelectedCamera.setObjectName("actionSelectedCamera") 124 | self.actionVideoFPS = QtWidgets.QAction(MainWindow) 125 | self.actionVideoFPS.setObjectName("actionVideoFPS") 126 | self.menuLoadLocalFile.addAction(self.actionLoadImage) 127 | self.menuLoadLocalFile.addAction(self.actionLoadVideo) 128 | self.menuFile.addAction(self.menuLoadLocalFile.menuAction()) 129 | self.menuFile.addSeparator() 130 | self.menuFile.addAction(self.actionExit) 131 | self.menuCamera.addAction(self.actionOpenCamera) 132 | self.menuCamera.addAction(self.actionCloseCamera) 133 | self.menuView.addAction(self.actionHandSize) 134 | self.menuView.addAction(self.actionMinDetectionConfidence) 135 | self.menuView.addAction(self.actionMinTrackingConfidence) 136 | self.menuView.addAction(self.actionModelFile) 137 | self.menuActions.addAction(self.actionStart) 138 | self.menuActions.addAction(self.actionPredict) 139 | self.menuSettings.addAction(self.actionSelectedCamera) 140 | self.menuSettings.addAction(self.actionVideoFPS) 141 | self.menubar.addAction(self.menuFile.menuAction()) 142 | self.menubar.addAction(self.menuCamera.menuAction()) 143 | self.menubar.addAction(self.menuView.menuAction()) 144 | self.menubar.addAction(self.menuActions.menuAction()) 145 | self.menubar.addAction(self.menuSettings.menuAction()) 146 | 147 | self.retranslateUi(MainWindow) 148 | QtCore.QMetaObject.connectSlotsByName(MainWindow) 149 | 150 | def retranslateUi(self, MainWindow): 151 | _translate = QtCore.QCoreApplication.translate 152 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) 153 | self.groupBoxImageOrVideo.setTitle(_translate("MainWindow", "Image/Video")) 154 | self.groupBoxMAYA.setTitle(_translate("MainWindow", "MAYA")) 155 | self.groupBoxLog.setTitle(_translate("MainWindow", "Log")) 156 | self.groupBoxResult.setTitle(_translate("MainWindow", "Result")) 157 | self.menuFile.setTitle(_translate("MainWindow", "File")) 158 | self.menuLoadLocalFile.setTitle(_translate("MainWindow", "LoadLocalFile")) 159 | self.menuCamera.setTitle(_translate("MainWindow", "Camera")) 160 | self.menuView.setTitle(_translate("MainWindow", "Parameters")) 161 | self.menuActions.setTitle(_translate("MainWindow", "Actions")) 162 | self.menuSettings.setTitle(_translate("MainWindow", "Settings")) 163 | self.actionExit.setText(_translate("MainWindow", "Exit")) 164 | self.actionCloseCamera.setText(_translate("MainWindow", "CloseCamera")) 165 | self.actionLoadImage.setText(_translate("MainWindow", "LoadImage")) 166 | self.actionLoadVideo.setText(_translate("MainWindow", "LoadVideo")) 167 | self.actionStart.setText(_translate("MainWindow", "StartDetect")) 168 | self.actionStop.setText(_translate("MainWindow", "StopDetect")) 169 | self.actionLoop.setText(_translate("MainWindow", "Loop(ON)")) 170 | self.actionHandSize.setText(_translate("MainWindow", "HandSize")) 171 | self.actionMaxNumHands.setText(_translate("MainWindow", "MaxNumHands")) 172 | self.actionMinDetectionConfidence.setText(_translate("MainWindow", "MinDetectionConfidence")) 173 | self.actionMinTrackingConfidence.setText(_translate("MainWindow", "MinTrackingConfidence")) 174 | self.actionModelFile.setText(_translate("MainWindow", "ModelFile")) 175 | self.actionPredict.setText(_translate("MainWindow", "Predict")) 176 | self.actionCamera_0.setText(_translate("MainWindow", "Camera 0")) 177 | self.actionCamera_1.setText(_translate("MainWindow", "Camera 1")) 178 | self.actionOpenCamera_2.setText(_translate("MainWindow", "OpenCamera")) 179 | self.actionOpenCamera.setText(_translate("MainWindow", "OpenCamera")) 180 | self.actionSelectedCamera.setText(_translate("MainWindow", "SelectedCamera0")) 181 | self.actionVideoFPS.setText(_translate("MainWindow", "VideoFPS")) 182 | -------------------------------------------------------------------------------- /window.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | MainWindow 4 | 5 | 6 | 7 | 0 8 | 0 9 | 800 10 | 600 11 | 12 | 13 | 14 | 15 | 0 16 | 0 17 | 18 | 19 | 20 | 21 | 800 22 | 600 23 | 24 | 25 | 26 | 27 | 800 28 | 600 29 | 30 | 31 | 32 | MainWindow 33 | 34 | 35 | 36 | 37 | 38 | 10 39 | 10 40 | 512 41 | 384 42 | 43 | 44 | 45 | true 46 | 47 | 48 | Image/Video 49 | 50 | 51 | 52 | 53 | 10 54 | 20 55 | 491 56 | 351 57 | 58 | 59 | 60 | true 61 | 62 | 63 | 64 | 65 | 66 | src/default.png 67 | 68 | 69 | true 70 | 71 | 72 | Qt::AlignCenter 73 | 74 | 75 | 76 | 77 | 78 | 79 | 530 80 | 10 81 | 261 82 | 384 83 | 84 | 85 | 86 | true 87 | 88 | 89 | MAYA 90 | 91 | 92 | 93 | 94 | 10 95 | 20 96 | 241 97 | 351 98 | 99 | 100 | 101 | 102 | 103 | 104 | src/MAYA_default.png 105 | 106 | 107 | true 108 | 109 | 110 | 111 | 112 | 113 | 114 | 10 115 | 400 116 | 511 117 | 141 118 | 119 | 120 | 121 | true 122 | 123 | 124 | Log 125 | 126 | 127 | 128 | 129 | 10 130 | 20 131 | 491 132 | 111 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 530 141 | 400 142 | 261 143 | 141 144 | 145 | 146 | 147 | true 148 | 149 | 150 | Result 151 | 152 | 153 | 154 | 155 | 10 156 | 20 157 | 241 158 | 111 159 | 160 | 161 | 162 | true 163 | 164 | 165 | 166 | 167 | 168 | src/result_dfault.png 169 | 170 | 171 | true 172 | 173 | 174 | Qt::AlignCenter 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 0 183 | 0 184 | 800 185 | 26 186 | 187 | 188 | 189 | 190 | File 191 | 192 | 193 | 194 | LoadLocalFile 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | Camera 206 | 207 | 208 | 209 | 210 | 211 | 212 | Parameters 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | Actions 222 | 223 | 224 | 225 | 226 | 227 | 228 | Settings 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | Exit 243 | 244 | 245 | 246 | 247 | CloseCamera 248 | 249 | 250 | 251 | 252 | LoadImage 253 | 254 | 255 | 256 | 257 | LoadVideo 258 | 259 | 260 | 261 | 262 | StartDetect 263 | 264 | 265 | 266 | 267 | StopDetect 268 | 269 | 270 | 271 | 272 | Loop(ON) 273 | 274 | 275 | 276 | 277 | HandSize 278 | 279 | 280 | 281 | 282 | MaxNumHands 283 | 284 | 285 | 286 | 287 | MinDetectionConfidence 288 | 289 | 290 | 291 | 292 | MinTrackingConfidence 293 | 294 | 295 | 296 | 297 | ModelFile 298 | 299 | 300 | 301 | 302 | Predict 303 | 304 | 305 | 306 | 307 | Camera 0 308 | 309 | 310 | 311 | 312 | Camera 1 313 | 314 | 315 | 316 | 317 | OpenCamera 318 | 319 | 320 | 321 | 322 | OpenCamera 323 | 324 | 325 | 326 | 327 | SelectedCamera0 328 | 329 | 330 | 331 | 332 | VideoFPS 333 | 334 | 335 | 336 | 337 | 338 | 339 | --------------------------------------------------------------------------------