├── research ├── yuanban.mp4 ├── saved_model │ ├── ikun_classifier45.index │ └── ikun_classifier45.data-00000-of-00001 ├── patch_video.py ├── Kalman2D.py ├── animate.py ├── trajectory_plot.py └── write_frame_landmarks.py ├── resources ├── animation.gif ├── raw_frame.gif ├── trajplot.gif ├── composition.gif └── segmentation.gif ├── PoseDefinition.py ├── debugtellopy.py ├── README.md ├── SpeechAgent.py ├── DataPipeline.py ├── ModelInference.py ├── PID.py ├── ControlCentre.py └── ControlCentreUI.py /research/yuanban.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/research/yuanban.mp4 -------------------------------------------------------------------------------- /resources/animation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/resources/animation.gif -------------------------------------------------------------------------------- /resources/raw_frame.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/resources/raw_frame.gif -------------------------------------------------------------------------------- /resources/trajplot.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/resources/trajplot.gif -------------------------------------------------------------------------------- /resources/composition.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/resources/composition.gif -------------------------------------------------------------------------------- /resources/segmentation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/resources/segmentation.gif -------------------------------------------------------------------------------- /research/saved_model/ikun_classifier45.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/research/saved_model/ikun_classifier45.index -------------------------------------------------------------------------------- /research/saved_model/ikun_classifier45.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ViolinLee/TelloDroneDetectionPython/HEAD/research/saved_model/ikun_classifier45.data-00000-of-00001 -------------------------------------------------------------------------------- /PoseDefinition.py: -------------------------------------------------------------------------------- 1 | class PoseDefinition: 2 | part_id_dict = { 3 | # 'nose_id': [0], 4 | 'left_eye_id': [1, 2, 3], 5 | 'right_eye_id': [4, 5, 6], 6 | 'mouse_id_id': [9, 10], 7 | 'shoulder_id': [11, 12], 8 | 'left_wrist_id': [12, 14, 16], 9 | 'right_wrist_id': [11, 13, 15], 10 | 'left_hand_id': [16, 18, 20, 22, 16], 11 | 'right_hand_id': [15, 17, 19, 21, 15], 12 | 'body_id': [11, 12, 24, 23, 11], 13 | 'left_leg_id': [23, 25, 27], 14 | 'right_leg_id': [24, 26, 28], 15 | 'left_foot_id': [27, 29, 31, 27], 16 | 'right_foot_id': [28, 30, 32, 28] 17 | } 18 | 19 | ALL_ID_LINKs = list(part_id_dict.values()) 20 | 21 | EDGES = [] 22 | for i in [[[e, ID_LINK[i + 1]] for i, e in enumerate(ID_LINK) if i < len(ID_LINK)-1] for ID_LINK in ALL_ID_LINKs]: 23 | for j in i: 24 | EDGES.append(j) 25 | -------------------------------------------------------------------------------- /debugtellopy.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | from functools import wraps 3 | from djitellopy import Tello 4 | 5 | 6 | def dump_args(func): 7 | """ 8 | Decorator to print function call details. 9 | This includes parameters names and effective values. 10 | """ 11 | 12 | @wraps(func) 13 | def wrapper(*args, **kwargs): 14 | func_args = inspect.signature(func).bind(*args, **kwargs).arguments 15 | func_args_str = ", ".join(map("{0[0]} = {0[1]!r}".format, func_args.items())) 16 | print(f"{func.__module__}.{func.__qualname__} ( {func_args_str} )") 17 | return func(*args, **kwargs) 18 | 19 | return wrapper 20 | 21 | 22 | class DebugTello(Tello): 23 | def __init__(self): 24 | super().__init__() 25 | 26 | @dump_args 27 | def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int, 28 | yaw_velocity: int): 29 | pass 30 | 31 | @dump_args 32 | def takeoff(self): 33 | self.is_flying = True 34 | 35 | @dump_args 36 | def land(self): 37 | self.is_flying = False 38 | 39 | @dump_args 40 | def move_up(self, x: int): 41 | pass 42 | 43 | 44 | if __name__ == '__main__': 45 | @dump_args 46 | def test(a, b=4, c="blah-blah", *args, **kwargs): 47 | pass 48 | 49 | test(1, 2, 3, 4, 5, d=6, g=12.9) 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TelloDroneDetectionPython 2 | 麻雀虽小,五脏俱全。这是一个使用Tello无人机视频流、通过RNN模型对人体关键点序列(由mediapipe逐帧推理输出)进行行为预测,进而辨别真假IKUN的综合性项目。项目主要涉及以下知识点: 3 | 4 | 5 | - PYQT多线程 6 | - Mediapipe人体关键点检测、人体前景分割 7 | - TensorFlow人体动作/行为识别(RNN) 8 | - OpenCV图像处理、视频读写和画面合成 9 | - Matplotlib可视化(关键点、运动轨迹) 10 | 11 | 12 | # 环境准备 13 | * mediapipe 14 | * tensorflow (cpu或gpu版本) 15 | * opencv-python 16 | * matplotlib 17 | * pyqt5 18 | 19 | # 演示 20 | 1. research/trajectory_plot.py:分别使用matplotlib和opencv绘制视频上的人体关键点。首次运行会缓存对应视频每帧检测出的关键点数据至`static_plot_data.pkl`文件,若更换新的视频预测,需要先将该文件删除,以在重新运行时生成并缓存新的`static_plot_data.pkl`文件。 21 | ![image](resources/trajplot.gif) 22 | ![image](resources/raw_frame.gif) 23 | 24 | 2. research/animate.py:使用结构十分简单的RNN模型对视频上的任务进行真假IKUN预测。该RNN模型由GRU层、全连接层构成。 25 | ![image](resources/animation.gif) 26 | 27 | 3. research/write_frame_landmarks.py:提取视频中的每帧图像并保存在`analysis_frames/raw_frames`目录下,对每帧图像进行关键点检测,并保存在`analysis_frames/landmark`目录下。最后还生成人体前景分割后的视频片段。 28 | ![image](resources/segmentation.gif) 29 | 30 | 4. research/patch_video.py:将指定目录(代码中`video_root`)下的多个视频组合成一个1080P的视频。视频总帧数取各视频帧数的最小值M,每个视频只保留前M帧画面。 31 | ![image](resources/composition.gif) 32 | 33 | 5. research/training.ipynb:数据预处理及IKUN识别模型(RNN)的训练。本次试验的正样本数量为55个,通过数据增强后达到152个。数据量很少,但仍达到不错的预测结果。在多组不同序列数目的对照试验中,输入数据的序列数目为45时效果最好。 34 | 35 | 6. ControlCentre.py:PYQT5应用主程序(飞行控制部分功能更新中) 36 | 37 | 38 | 39 | # 参考 40 | - [2D卡尔曼滤波器](https://stackoverflow.com/questions/13901997/kalman-2d-filter-in-python) 41 | - [Mediapipe 范例](https://github.com/Kazuhito00/mediapipe-python-sample) 42 | - [计算机视觉小项目 CVprojects](https://github.com/enpeizhao/CVprojects) 43 | 44 | 45 | -------------------------------------------------------------------------------- /SpeechAgent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import wave 3 | import pyaudio 4 | import soundfile 5 | from paddlespeech.cli.tts.infer import TTSExecutor 6 | from paddlespeech.cli.asr.infer import ASRExecutor 7 | 8 | 9 | class SpeechAgent: 10 | def __init__(self): 11 | self.tts = TTSExecutor() 12 | self.asr = ASRExecutor() 13 | self.p = pyaudio.PyAudio() 14 | self.chunk = 1024 15 | 16 | def speech_recognize(self, wav_file: os.PathLike) -> str: 17 | result = self.asr(audio_file=wav_file) 18 | return result 19 | 20 | def asr_listen(self): 21 | """监听可造成阻塞(可封装在threading的run中,但需注意工具实例是否能跨线程调用)""" 22 | out_path = 'listening.wav' 23 | # 待实现监听和写入 24 | text = self.speech_generate(out_path) 25 | return text 26 | 27 | def speech_generate(self, text: str) -> os.PathLike: 28 | assert isinstance(text, str) and len(text) > 0, 'Input Chinese text...' 29 | wav_file = self.tts(text=text) 30 | return wav_file 31 | 32 | def tts_speak(self, text): 33 | wav_file = self.speech_generate(text) 34 | wf = wave.open(wav_file, 'rb') 35 | 36 | # open stream based on the wave object which has been input. 37 | stream = self.p.open(format=self.p.get_format_from_width(wf.getsampwidth()), 38 | channels=wf.getnchannels(), 39 | rate=wf.getframerate(), 40 | output=True) 41 | 42 | data = wf.readframes(self.chunk) 43 | while data != b'': 44 | stream.write(data) 45 | data = wf.readframes(self.chunk) 46 | 47 | # cleanup stuff. 48 | wf.close() 49 | stream.close() 50 | 51 | def __del__(self): 52 | self.p.terminate() 53 | -------------------------------------------------------------------------------- /research/patch_video.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | import pickle 5 | from tqdm import tqdm 6 | 7 | big_image_shape = (1920, 1080) 8 | sub_image_shape = (480, 270) 9 | videos_num_width = int(big_image_shape[0] / sub_image_shape[0]) 10 | videos_num_height = int(big_image_shape[1] / sub_image_shape[1]) 11 | videos_num = videos_num_height * videos_num_width 12 | print(f"need videos samples number: {videos_num}") 13 | videos_num = int(videos_num) 14 | 15 | 16 | if __name__ == '__main__': 17 | video_root = 'E:\\Video\\TelloDroneDetection\\原始视频素材\\negative' 18 | 19 | cache_path = 'all_video_frames.pkl' 20 | if not os.path.exists(cache_path): 21 | with open(cache_path, 'wb') as f: 22 | all_video_frames = [] 23 | min_frames_num = 1024 ** 2 24 | for filename in os.listdir(video_root): 25 | vid_path = os.path.join(video_root, filename) 26 | 27 | cap = cv2.VideoCapture(vid_path) 28 | if not cap.isOpened(): 29 | print("Error Opening video File") 30 | raise IOError 31 | 32 | frames = [] 33 | while cap.isOpened(): 34 | ret, frame = cap.read() 35 | if ret: 36 | frames.append(frame) 37 | else: 38 | break 39 | all_video_frames.append(frames) 40 | min_frames_num = min(min_frames_num, len(frames)) 41 | 42 | cap.release() 43 | cv2.destroyAllWindows() 44 | print(f'min_frames_num: {min_frames_num}') 45 | 46 | all_video_frames = np.array([frames[:min_frames_num] for frames in all_video_frames]) 47 | pickle.dump(all_video_frames, f) 48 | print("successfuly cached.") 49 | else: 50 | with open(cache_path, 'rb') as f: 51 | all_video_frames = pickle.load(f) 52 | print("successfuly loaded.") 53 | 54 | out = cv2.VideoWriter('negative_patch.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 25, big_image_shape) 55 | for frame_cnt in tqdm(range(all_video_frames.shape[1])): 56 | frame = np.zeros((big_image_shape[1], big_image_shape[0], 3)).astype(np.uint8) 57 | i_w, j_h = 0, 0 58 | for i in range(videos_num_width): 59 | for j in range(videos_num_height): 60 | frame[j*sub_image_shape[1]: ((j+1)*sub_image_shape[1]), i*(sub_image_shape[0]): ((i+1)*sub_image_shape[0])] = \ 61 | cv2.resize(all_video_frames[i * videos_num_width + j][frame_cnt], sub_image_shape) 62 | 63 | #cv2.imshow('negative', cv2.resize(frame, (1400, 788))) 64 | cv2.waitKey(30) 65 | 66 | out.write(frame) 67 | out.release() 68 | 69 | 70 | -------------------------------------------------------------------------------- /research/Kalman2D.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | 5 | def kalman_xy(x, P, measurement, R, 6 | motion = np.matrix('0. 0. 0. 0.').T, 7 | Q = np.matrix(np.eye(4))): 8 | """ 9 | Parameters: 10 | x: initial state 4-tuple of location and velocity: (x0, x1, x0_dot, x1_dot) 11 | P: initial uncertainty convariance matrix 12 | measurement: observed position 13 | R: measurement noise 14 | motion: external motion added to state vector x 15 | Q: motion noise (same shape as P) 16 | """ 17 | return kalman(x, P, measurement, R, motion, Q, 18 | F = np.matrix(''' 19 | 1. 0. 1. 0.; 20 | 0. 1. 0. 1.; 21 | 0. 0. 1. 0.; 22 | 0. 0. 0. 1. 23 | '''), 24 | H = np.matrix(''' 25 | 1. 0. 0. 0.; 26 | 0. 1. 0. 0.''')) 27 | 28 | def kalman(x, P, measurement, R, motion, Q, F, H): 29 | ''' 30 | Parameters: 31 | x: initial state 32 | P: initial uncertainty convariance matrix 33 | measurement: observed position (same shape as H*x) 34 | R: measurement noise (same shape as H) 35 | motion: external motion added to state vector x 36 | Q: motion noise (same shape as P) 37 | F: next state function: x_prime = F*x 38 | H: measurement function: position = H*x 39 | 40 | Return: the updated and predicted new values for (x, P) 41 | 42 | See also http://en.wikipedia.org/wiki/Kalman_filter 43 | 44 | This version of kalman can be applied to many different situations by 45 | appropriately defining F and H 46 | ''' 47 | # UPDATE x, P based on measurement m 48 | # distance between measured and current position-belief 49 | y = np.matrix(measurement).T - H * x 50 | S = H * P * H.T + R # residual convariance 51 | K = P * H.T * S.I # Kalman gain 52 | x = x + K*y 53 | I = np.matrix(np.eye(F.shape[0])) # identity matrix 54 | P = (I - K*H)*P 55 | 56 | # PREDICT x, P based on motion 57 | x = F*x + motion 58 | P = F*P*F.T + Q 59 | 60 | return x, P 61 | 62 | def demo_kalman_xy(): 63 | x = np.matrix('0. 0. 0. 0.').T 64 | P = np.matrix(np.eye(4))*1000 # initial uncertainty 65 | 66 | N = 20 67 | true_x = np.linspace(0.0, 10.0, N) 68 | true_y = true_x**2 69 | observed_x = true_x + 0.05*np.random.random(N)*true_x 70 | observed_y = true_y + 0.05*np.random.random(N)*true_y 71 | plt.plot(observed_x, observed_y, 'ro') 72 | result = [] 73 | R = 0.01**2 74 | for meas in zip(observed_x, observed_y): 75 | x, P = kalman_xy(x, P, meas, R) 76 | result.append((x[:2]).tolist()) 77 | kalman_x, kalman_y = zip(*result) 78 | plt.plot(kalman_x, kalman_y, 'g-') 79 | plt.show() 80 | 81 | if __name__ == "__main__": 82 | demo_kalman_xy() 83 | -------------------------------------------------------------------------------- /DataPipeline.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PoseDefinition import PoseDefinition 4 | from PIL import Image, ImageDraw, ImageFont 5 | 6 | 7 | class DataPipeline: 8 | def __init__(self): 9 | pass 10 | 11 | @staticmethod 12 | def put_chinese_text(img, text, org, color=(0, 255, 0), size=30): 13 | if isinstance(img, np.ndarray): 14 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 15 | 16 | draw = ImageDraw.Draw(img) 17 | font_style = ImageFont.truetype("simsun.ttc", size, encoding="utf-8") 18 | draw.text(org, text, color, font=font_style) 19 | 20 | res = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) # in-replace 21 | return res 22 | 23 | @staticmethod 24 | def draw_info(img, info: dict): 25 | """ 26 | draw information at right-corner of the input image 27 | :param img: 28 | :param info: {'fps': 10, 'battery': 85, 'result': ''} 29 | :return: 30 | """ 31 | corner_org = [img.shape[1] - 200, 50] 32 | y_step = 30 33 | 34 | for key, value in info.items(): 35 | cv2.putText(img, f'{key}: {value}', corner_org, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) 36 | corner_org[1] = corner_org[1] + y_step 37 | 38 | return img 39 | 40 | @staticmethod 41 | def pipe_pre(frame): 42 | # frame = cv2.flip(frame, 1) 43 | #frame = cv2.resize(frame, (640, 480)) # 720p(960,720)->480p(640,480) 44 | frame = cv2.resize(frame, (720, 540)) 45 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 46 | 47 | return frame 48 | 49 | @staticmethod 50 | def pipe_post(frame: np.array, infer: dict, tello_status: dict): 51 | """ 52 | :param frame: 53 | :param infer: {'landmark': [], 'is_ikun': boolean} 54 | :param tello_status: {'temperature': float, 'battery': float, 'flight_time': float} 55 | :return: processed-frame and skeleton_img 56 | """ 57 | 58 | frame_height, frame_width = frame.shape[:2] 59 | 60 | # step1: 61 | frame = DataPipeline.draw_info(frame, tello_status) 62 | if infer['is_ikun'] is True: 63 | frame = DataPipeline.put_chinese_text(frame, "真IKUN", (100, 100)) 64 | 65 | # step2: 66 | skeleton_img = np.zeros((frame_height, frame_width, 3), dtype=np.uint8) # malloc mem 67 | 68 | if infer['landmark']: 69 | frame_landmarks = infer['landmark'].landmark # mediapipe's original processed output 70 | points_list = [[int(landmark.x * frame_width), int(landmark.y * frame_height)] for landmark in frame_landmarks] 71 | 72 | for edge in PoseDefinition.EDGES: 73 | cv2.line(skeleton_img, tuple(points_list[edge[0]]), tuple(points_list[edge[1]]), (0, 255, 0), 2) 74 | 75 | # center crop 76 | crop_width = frame_height * 360 / 540 77 | crop_start = int((frame_width - crop_width) / 2) 78 | skeleton_img_crop = skeleton_img[:, crop_start: frame_width - crop_start] 79 | skeleton_img_resize = cv2.resize(skeleton_img_crop, (360, 540)) 80 | 81 | return frame, skeleton_img_resize 82 | -------------------------------------------------------------------------------- /ModelInference.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras import layers 3 | from tensorflow.keras import optimizers 4 | from tensorflow.keras import Sequential 5 | import mediapipe as mp 6 | import numpy as np 7 | import cv2 8 | from itertools import chain 9 | 10 | CLASS_NAMES = ['xiaoheizi', 'ikun'] 11 | SEQ_LENGTH = 45 12 | COMPLEX_MODE = True 13 | CLASS_NUM = 2 14 | BATCH_SIZE = 4 15 | INPUT_SHAPE = (SEQ_LENGTH, 2) if not COMPLEX_MODE else (SEQ_LENGTH, 33*2) 16 | 17 | 18 | class PoseDetector: 19 | def __init__(self, model_complexity, enable_segmentation, min_detection_confidence, min_tracking_confidence): 20 | self.pose = mp.solutions.pose.Pose(model_complexity=model_complexity, 21 | enable_segmentation=enable_segmentation, 22 | min_detection_confidence=min_detection_confidence, 23 | min_tracking_confidence=min_tracking_confidence) 24 | 25 | def inference(self, frame_rgb): 26 | # image = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB) 27 | ret = self.pose.process(frame_rgb) 28 | return ret.pose_landmarks 29 | 30 | 31 | class IkunRecognizer: 32 | SEQUENCE_NUM = SEQ_LENGTH 33 | 34 | def __init__(self, model_path): 35 | self.model = self.load_model(model_path) # tf.keras.models.load_model(saved_model_path) 36 | self.make_first_inference() 37 | 38 | def load_model(self, model_path, saved_model=False): 39 | if saved_model: 40 | return tf.keras.models.load_model(model_path) 41 | else: 42 | # step1: build graph 43 | model = Sequential([ 44 | layers.Input(shape=INPUT_SHAPE), 45 | layers.GRU(32, return_sequences=False), 46 | layers.Dense(len(CLASS_NAMES), activation="softmax") 47 | ]) 48 | model.compile( 49 | optimizer=optimizers.Adam(0.001), 50 | loss="sparse_categorical_crossentropy", 51 | metrics=["accuracy"] 52 | ) 53 | 54 | # step2: restore parameters 55 | model.load_weights(model_path) 56 | return model 57 | 58 | def make_first_inference(self): 59 | random_input = np.random.random_sample((1, self.SEQUENCE_NUM, 33*2)) 60 | self.model.predict(random_input, verbose=0) 61 | 62 | def inference(self, mpose_output_list): 63 | landmarks = [[[landmark.x, landmark.y] for landmark in item.landmark] for item in mpose_output_list] 64 | model_input = np.expand_dims([list(chain.from_iterable(item)) for item in landmarks], 0) 65 | ret = self.model.predict(model_input, verbose=0) # 0=silent, 1=progress-bar, 2=one-line-per-epoch 66 | 67 | return ret 68 | 69 | 70 | class FaceDetector: 71 | def __init__(self, yunet_model_path, set_shape=(320, 320)): 72 | self.model = cv2.FaceDetectorYN.create( 73 | model=yunet_model_path, 74 | config='', 75 | input_size=(320, 320), 76 | score_threshold=0.9, 77 | nms_threshold=0.3, 78 | top_k=5000, 79 | backend_id=cv2.dnn.DNN_BACKEND_DEFAULT, 80 | target_id=cv2.dnn.DNN_TARGET_CPU 81 | ) 82 | 83 | self.set_shape = set_shape 84 | self.model.setInputSize(set_shape) 85 | 86 | def inference(self, img): 87 | if img.shape[:2] != self.set_shape: 88 | self.model.setInputSize(img.shape[:2][::-1]) # set size should be (width, height) order 89 | self.set_shape = img.shape[:2] 90 | _, faces = self.model.detect(img) 91 | 92 | return faces 93 | -------------------------------------------------------------------------------- /research/animate.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | import cv2 3 | import sys 4 | import numpy as np 5 | import tensorflow as tf 6 | from itertools import chain 7 | from tensorflow.keras import layers 8 | from tensorflow.keras import optimizers 9 | from tensorflow.keras import Sequential 10 | from PIL import Image, ImageDraw, ImageFont 11 | 12 | 13 | CLASS_NAMES = ['xiaoheizi', 'ikun'] 14 | SEQ_LENGTH = 45 15 | COMPLEX_MODE = True 16 | CLASS_NUM = 2 17 | BATCH_SIZE = 4 18 | INPUT_SHAPE = (SEQ_LENGTH, 2) if not COMPLEX_MODE else (SEQ_LENGTH, 33*2) 19 | # Mediapipe model config 20 | MODEL_COMPLEXITY = 2 21 | ENABLE_SEGMENTATION = False 22 | MIN_DETECTION_CONFIDENCE = 0.5 23 | MIN_TRACKING_CONFIDENCE = 0.5 24 | # image-show window size configuration 25 | MAX_WINDOW_WIDTH = 1920/3 26 | MAX_WINDOW_HEIGHT = 1080/3 27 | MAX_SIZE = np.array([MAX_WINDOW_WIDTH, MAX_WINDOW_HEIGHT]).astype(int) 28 | SMALL_SIZE = (MAX_SIZE / 2).astype(int) 29 | 30 | 31 | class Inference: 32 | def __init__(self, pose, cpt_path): 33 | self.pose = pose 34 | self.predictor = self.load_model(cpt_path) 35 | self.make_first_inference() 36 | 37 | def make_first_inference(self): 38 | random_input = np.random.random_sample((1, SEQ_LENGTH, 33*2)) 39 | self.predictor.predict(random_input, verbose=1) # mode. 0 = silent, 1 = progress bar, 2 = one line per epoch 40 | 41 | def load_model(self, model_path, saved_model=False): 42 | if saved_model: 43 | return tf.keras.models.load_model(model_path) 44 | else: 45 | # step1: build graph 46 | model = Sequential([ 47 | layers.Input(shape=INPUT_SHAPE), 48 | layers.GRU(32, return_sequences=False), 49 | layers.Dense(len(CLASS_NAMES), activation="softmax") 50 | ]) 51 | model.compile( 52 | optimizer=optimizers.Adam(0.001), 53 | loss="sparse_categorical_crossentropy", 54 | metrics=["accuracy"] 55 | ) 56 | 57 | # step2: restore parameters 58 | model.load_weights(model_path) 59 | return model 60 | 61 | def detect_landmark_fp(self, frame): 62 | image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 63 | result = self.pose.process(image) 64 | return result 65 | 66 | def extract_entire_feature(self, frames): 67 | landmarks = [self.detect_landmark_fp(frame).pose_landmarks for frame in frames] 68 | valid_landmarks = [[[landmark.x, landmark.y] for landmark in item.landmark] for item in landmarks if 69 | item is not None] # (N, 33, 2) 70 | valid_samples = [list(chain.from_iterable(item)) for item in valid_landmarks][:SEQ_LENGTH] 71 | return valid_samples 72 | 73 | def pipeline(self, frames): 74 | assert len(frames) >= SEQ_LENGTH, f'length of frames sequence is smaller than {SEQ_LENGTH}.' 75 | 76 | features_seq = self.extract_entire_feature(frames) 77 | model_input = np.array(features_seq) 78 | 79 | if model_input.shape[0] == SEQ_LENGTH: 80 | 81 | ret = self.predictor.predict(model_input) 82 | else: 83 | ret = None 84 | return ret 85 | 86 | def pipeline_features(self, landmark_features): 87 | landmarks = [[[landmark.x, landmark.y] for landmark in item.landmark] for item in landmark_features] 88 | model_input = np.expand_dims([list(chain.from_iterable(item)) for item in landmarks], 0) 89 | ret = self.predictor.predict(model_input, verbose=0) 90 | return ret 91 | 92 | 93 | def put_chinese_text(img, text, org, color=(255, 100, 100), size=35): 94 | if isinstance(img, np.ndarray): 95 | img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) 96 | 97 | draw = ImageDraw.Draw(img) 98 | font_style = ImageFont.truetype("simsun.ttc", size, encoding="utf-8") 99 | draw.text(org, text, color, font=font_style) 100 | 101 | res = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) # in-replace 102 | return res 103 | 104 | 105 | def animate(vid_path, cpt_path, mpose): 106 | # step0: create model instance 107 | engine = Inference(mpose, cpt_path) 108 | 109 | # step1: extract original frames and video info (shape, fps) 110 | cap = cv2.VideoCapture(vid_path) 111 | if not cap.isOpened(): 112 | print("Error Opening video File") 113 | raise IOError 114 | 115 | # get fps 116 | (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.') 117 | if int(major_ver) < 3: 118 | fps = cap.get(cv2.cv.CV_CAP_PROP_FPS) 119 | else: 120 | fps = cap.get(cv2.CAP_PROP_FPS) 121 | cycle_ms = 1 # int(1000/fps) 122 | 123 | # get frame shape 124 | frame_size = np.array([cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)]) 125 | resize_size = (frame_size * (MAX_SIZE/frame_size).min()).astype(int) 126 | resize_size_small = (resize_size / 2).astype(int) 127 | # print(resize_size) 128 | 129 | # step2: read and inference 130 | frames_features = [] 131 | while cap.isOpened(): 132 | ret, frame = cap.read() 133 | if ret: 134 | frame = cv2.resize(frame, resize_size) 135 | 136 | frame_landmarks = engine.detect_landmark_fp(cv2.resize(frame, resize_size_small)).pose_landmarks 137 | frames_features.append(frame_landmarks) if frame_landmarks is not None else None 138 | frames_features.pop(0) if len(frames_features) > SEQ_LENGTH else None 139 | 140 | # ret = engine.pipeline(frames) if len(frames) == SEQ_LENGTH else None 141 | ret = engine.pipeline_features(frames_features) if len(frames_features) == SEQ_LENGTH else None 142 | print(ret[0]) if ret is not None else None 143 | if ret is not None and CLASS_NAMES[np.argmax(ret)] == 'ikun': 144 | print('The True IKun!') 145 | frame = put_chinese_text(frame, "鉴定为:真IKun", (30, 30), color=(255, 50, 50), size=35) 146 | else: 147 | pass 148 | 149 | cv2.imshow("Frame", cv2.resize(frame, (int(frame.shape[1]*2.2), int(frame.shape[0]*2.2)))) 150 | if cv2.waitKey(cycle_ms) & 0xFF == ord("q"): 151 | break 152 | else: 153 | break 154 | cap.release() 155 | 156 | 157 | if __name__ == '__main__': 158 | # configuration 159 | video_path = 'yuanban_short.mp4' 160 | checkpoint_path = 'saved_model/ikun_classifier45' 161 | 162 | pose = mp.solutions.pose.Pose(model_complexity=MODEL_COMPLEXITY, 163 | enable_segmentation=ENABLE_SEGMENTATION, 164 | min_detection_confidence=MIN_DETECTION_CONFIDENCE, 165 | min_tracking_confidence=MIN_TRACKING_CONFIDENCE) 166 | 167 | # running 168 | animate(video_path, checkpoint_path, pose) 169 | -------------------------------------------------------------------------------- /research/trajectory_plot.py: -------------------------------------------------------------------------------- 1 | """https://stackoverflow.com/questions/13901997/kalman-2d-filter-in-python""" 2 | import traceback 3 | 4 | import matplotlib.pyplot as plt 5 | import matplotlib.animation as animation 6 | import moviepy.editor as mpy 7 | import mediapipe as mp 8 | import numpy as np 9 | import mimetypes 10 | import os 11 | import cv2 12 | import pickle 13 | from Kalman2D import kalman_xy 14 | from PoseDefinition import PoseDefinition 15 | 16 | 17 | class TrajectoryPlot: 18 | def __init__(self, model_complexity, enable_segmentation, min_detection_confidence, min_tracking_confidence): 19 | self.pose = mp.solutions.pose.Pose( 20 | model_complexity=model_complexity, 21 | enable_segmentation=enable_segmentation, 22 | min_detection_confidence=min_detection_confidence, 23 | min_tracking_confidence=min_tracking_confidence, 24 | ) 25 | self.it_id = 0 26 | 27 | @staticmethod 28 | def extract_video_frames(vid_path, sample_stride=0): 29 | """ 30 | @param sample_stride: 'stride=0' will reserve all frames 31 | """ 32 | cap = cv2.VideoCapture(vid_path) 33 | if not cap.isOpened(): 34 | print("Error Opening video File") 35 | raise IOError 36 | 37 | sample_frames = [] 38 | frames_cnt = 0 39 | while cap.isOpened(): 40 | ret, frame = cap.read() 41 | if ret: 42 | if not frames_cnt % sample_stride: 43 | sample_frames.append(frame) 44 | else: 45 | break 46 | 47 | frames_cnt += 1 48 | 49 | cap.release() 50 | cv2.destroyAllWindows() 51 | 52 | return sample_frames 53 | 54 | def plot_pose(self, ax, pose_landmarks): 55 | # plot key-points 56 | for index, point in enumerate(pose_landmarks): 57 | ax.plot(point[0], point[1], marker="o", markersize=4, markeredgecolor="red", markerfacecolor="green") 58 | 59 | # plot edges 60 | for id_link in PoseDefinition.ALL_ID_LINKs: 61 | # ax.plot(pose_landmarks[id_link, 0], pose_landmarks[id_link, 1], color='#900302', marker='+', linestyle='-') 62 | ax.plot(pose_landmarks[id_link, 0], pose_landmarks[id_link, 1], color='#900302') 63 | 64 | def cv2_draw_pose(self, img, pts): 65 | for edge in PoseDefinition.EDGES: 66 | cv2.line(img, tuple(pts[edge[0]]), tuple(pts[edge[1]]), (0, 255, 0), 5) 67 | 68 | def frame_inference(self, img): 69 | image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 70 | results = self.pose.process(image) 71 | return results.pose_landmarks 72 | 73 | def detect_key_point(self, frames): 74 | landmarks = [] # shape of (frames-num, key-points-num, 2) 75 | image_h, image_w = frames[0].shape[:2] 76 | for frame in frames: 77 | pose_landmarks = self.frame_inference(frame) 78 | if pose_landmarks is not None: 79 | frame_landmarks = pose_landmarks.landmark 80 | points_list = [[int(landmark.x * image_w), int(landmark.y * image_h)] for landmark in frame_landmarks] 81 | landmarks.append(points_list) 82 | else: 83 | landmarks.append(None) 84 | 85 | landmarks_valid = [landmark for landmark in landmarks if landmark is not None] 86 | landmarks_valid = np.array(landmarks_valid) 87 | landmarks = np.array(landmarks) 88 | 89 | return landmarks, landmarks_valid 90 | 91 | def filter_2d(self, meas_points): 92 | filtered_points = [] 93 | s = np.matrix('0. 0. 0. 0.').T 94 | P = np.matrix(np.eye(4)) * 1000 # initial uncertainty 95 | R = 0.01 ** 2 96 | for meas in meas_points: 97 | s, P = kalman_xy(s, P, meas, R) 98 | filtered_points.append((s[:2]).tolist()) 99 | 100 | filtered_points = np.array(filtered_points).squeeze() 101 | 102 | return filtered_points 103 | 104 | def plot_traj(self, vid_path, key_point_ids: list, mode=0, stride=0): 105 | """绘制所有轨迹点""" 106 | # step1: extract frames 107 | frames = self.extract_video_frames(vid_path, sample_stride=stride) 108 | print(f"Extracted frames numbers: {len(frames)}") 109 | 110 | frame_h, frame_w = frames[0].shape[:2] 111 | 112 | # step2: detect key-point from each frame (save only one person's landmarks) 113 | cache_path = 'static_plot_data.pkl' 114 | if not os.path.exists(cache_path): 115 | landmarks, landmarks_valid = self.detect_key_point(frames) 116 | with open(cache_path, 'wb') as f: 117 | pickle.dump((landmarks, landmarks_valid), f) 118 | else: 119 | with open(cache_path, 'rb') as f: 120 | landmarks, landmarks_valid = pickle.load(f) 121 | 122 | # step3: kalman-filter on single landmark 123 | meas_points = landmarks_valid[:, key_point_ids] 124 | meas_points = meas_points.mean(axis=1) 125 | filtered_points = self.filter_2d(meas_points) 126 | 127 | # step4: animate curve, full-pose and select-key-point using matplotlib 128 | # coordinate transformation (invert vertical direction) 129 | #landmarks_valid[..., 1] = frame_h - landmarks_valid[..., 1] 130 | landmarks_valid_vert = np.concatenate((landmarks_valid[..., [0]], frame_h - landmarks_valid[..., [1]]), axis=2) 131 | 132 | print(np.shape(filtered_points)) 133 | filtered_points_vert = np.concatenate((filtered_points[:, [0]], frame_h - filtered_points[:, [1]]), axis=1) 134 | 135 | # set up the figure, the axis, and the plot element we want to animate 136 | fig = plt.figure(figsize=(8, 8)) 137 | ax = plt.axes(xlim=(0, frame_w), ylim=(0, frame_h)) # -300)) 138 | 139 | def setup(): 140 | ax.clear() 141 | 142 | ax.set_aspect("equal") 143 | ax.set_xlim(0, frame_w) 144 | ax.set_ylim(0, frame_h) # -300) 145 | 146 | ax.set_xlabel('x (px)') 147 | ax.set_ylabel('y (px)') 148 | 149 | points_iterator = iter(filtered_points_vert) 150 | 151 | # animation function. This is called sequentially 152 | def animate_pose(i): 153 | setup() 154 | 155 | filtered_point = next(points_iterator) 156 | x = filtered_point[0] 157 | y = filtered_point[1] 158 | # print(x, y) 159 | ax.plot(x, y, 'bo', markersize=8) 160 | 161 | # print(landmarks_valid[self.it_id]) 162 | self.plot_pose(ax, landmarks_valid_vert[self.it_id]) 163 | 164 | self.it_id += 1 165 | if self.it_id == len(filtered_points_vert) - 1: 166 | anim.event_source.stop() 167 | 168 | def animate_fixed_nums_traj(i): 169 | setup() 170 | try: 171 | plotting_points.append(next(points_iterator)) 172 | if len(plotting_points) > points_num: 173 | plotting_points.pop(0) 174 | 175 | ax.plot([ele[0] for ele in plotting_points], [ele[1] for ele in plotting_points], 'bo', markersize=3) 176 | except IndexError as e: 177 | print("Index out of bounds") 178 | anim.event_source.stop() 179 | 180 | self.plot_pose(ax, landmarks_valid_vert[self.it_id]) 181 | 182 | self.it_id += 1 183 | if self.it_id == len(filtered_points_vert) - 1: 184 | anim.event_source.stop() 185 | 186 | if mode == 0: 187 | animate = animate_pose 188 | else: 189 | plotting_points = [] 190 | points_num = 30 191 | animate = animate_fixed_nums_traj 192 | 193 | # call the animator. blit=True means only re-draw the parts that have changed. 194 | anim = animation.FuncAnimation(fig, animate, frames=len(filtered_points_vert), interval=1) 195 | # anim = animation.FuncAnimation(fig, animate, interval=20) 196 | # anim.save('animation.mp4', fps=30, extra_args=['-vcodec', 'libx264']) 197 | plt.show() 198 | 199 | # step5: plot curve on origin image 200 | image_h, image_w = frames[0].shape[:2] 201 | res_h, res_w = (int(image_w/2), int(image_h/2)) 202 | out = cv2.VideoWriter('static_plot.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (image_w, image_h)) 203 | # Create solid alpha layer, same height and width as "img", filled with 128s 204 | alpha = np.full_like(frames[0][..., 0], 128) 205 | for index, frame in enumerate(frames): 206 | if landmarks[index] is not None: 207 | # Merge new alpha layer onto image with OpenCV "merge()" 208 | # semi_trans_frame = cv2.merge((frame, alpha)) 209 | semi_trans_frame = frame 210 | 211 | # Calculate 212 | circle_center = tuple(filtered_points[index].astype(np.int)) 213 | 214 | # plot lines 215 | pts = landmarks_valid[index] 216 | # cv2.polylines(semi_trans_frame, [pts], True, (0, 255, 255)) 217 | self.cv2_draw_pose(semi_trans_frame, pts) 218 | 219 | # plot key-points 220 | cv2.circle(semi_trans_frame, circle_center, 10, (255, 120, 50), thickness=-1) 221 | 222 | out.write(semi_trans_frame) 223 | cv2.imshow('frame', cv2.resize(semi_trans_frame, (int(res_h*1.8), int(res_w*1.8)))) 224 | 225 | # Press Q on keyboard to stop recording 226 | if cv2.waitKey(25) & 0xFF == ord('q'): 227 | break 228 | out.release() 229 | cv2.destroyAllWindows() 230 | 231 | 232 | if __name__ == '__main__': 233 | video_path = 'yuanban.mp4' 234 | 235 | traj_plotter = TrajectoryPlot(model_complexity=2, 236 | enable_segmentation=False, 237 | min_detection_confidence=0.5, 238 | min_tracking_confidence=0.5) 239 | 240 | traj_plotter.plot_traj(vid_path=video_path, key_point_ids=[11, 12], mode=0, stride=1) 241 | -------------------------------------------------------------------------------- /PID.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def _clamp(value, limits): 5 | lower, upper = limits 6 | if value is None: 7 | return None 8 | elif (upper is not None) and (value > upper): 9 | return upper 10 | elif (lower is not None) and (value < lower): 11 | return lower 12 | return value 13 | 14 | 15 | class PID(object): 16 | """A simple PID controller.""" 17 | 18 | def __init__( 19 | self, 20 | Kp=1.0, 21 | Ki=0.0, 22 | Kd=0.0, 23 | setpoint=0, 24 | sample_time=0.01, 25 | output_limits=(None, None), 26 | auto_mode=True, 27 | proportional_on_measurement=False, 28 | differetial_on_measurement=True, 29 | error_map=None, 30 | ): 31 | """ 32 | Initialize a new PID controller. 33 | 34 | :param Kp: The value for the proportional gain Kp 35 | :param Ki: The value for the integral gain Ki 36 | :param Kd: The value for the derivative gain Kd 37 | :param setpoint: The initial setpoint that the PID will try to achieve 38 | :param sample_time: The time in seconds which the controller should wait before generating 39 | a new output value. The PID works best when it is constantly called (eg. during a 40 | loop), but with a sample time set so that the time difference between each update is 41 | (close to) constant. If set to None, the PID will compute a new output value every time 42 | it is called. 43 | :param output_limits: The initial output limits to use, given as an iterable with 2 44 | elements, for example: (lower, upper). The output will never go below the lower limit 45 | or above the upper limit. Either of the limits can also be set to None to have no limit 46 | in that direction. Setting output limits also avoids integral windup, since the 47 | integral term will never be allowed to grow outside of the limits. 48 | :param auto_mode: Whether the controller should be enabled (auto mode) or not (manual mode) 49 | :param proportional_on_measurement: Whether the proportional term should be calculated on 50 | the input directly rather than on the error (which is the traditional way). Using 51 | proportional-on-measurement avoids overshoot for some types of systems. 52 | :param differetial_on_measurement: Whether the differential term should be calculated on 53 | the input directly rather than on the error (which is the traditional way). 54 | :param error_map: Function to transform the error value in another constrained value. 55 | """ 56 | self.Kp, self.Ki, self.Kd = Kp, Ki, Kd 57 | self.setpoint = setpoint 58 | self.sample_time = sample_time 59 | 60 | self._min_output, self._max_output = None, None 61 | self._auto_mode = auto_mode 62 | self.proportional_on_measurement = proportional_on_measurement 63 | self.differetial_on_measurement = differetial_on_measurement 64 | self.error_map = error_map 65 | 66 | self._proportional = 0 67 | self._integral = 0 68 | self._derivative = 0 69 | 70 | self._last_time = None 71 | self._last_output = None 72 | self._last_error = None 73 | self._last_input = None 74 | 75 | try: 76 | # Get monotonic time to ensure that time deltas are always positive 77 | self.time_fn = time.monotonic 78 | except AttributeError: 79 | # time.monotonic() not available (using python < 3.3), fallback to time.time() 80 | self.time_fn = time.time 81 | 82 | self.output_limits = output_limits 83 | self.reset() 84 | 85 | def __call__(self, input_, dt=None): 86 | """ 87 | Update the PID controller. 88 | 89 | Call the PID controller with *input_* and calculate and return a control output if 90 | sample_time seconds has passed since the last update. If no new output is calculated, 91 | return the previous output instead (or None if no value has been calculated yet). 92 | 93 | :param dt: If set, uses this value for timestep instead of real time. This can be used in 94 | simulations when simulation time is different from real time. 95 | """ 96 | if not self.auto_mode: 97 | return self._last_output 98 | 99 | now = self.time_fn() 100 | if dt is None: 101 | dt = now - self._last_time if (now - self._last_time) else 1e-16 102 | elif dt <= 0: 103 | raise ValueError('dt has negative value {}, must be positive'.format(dt)) 104 | 105 | if self.sample_time is not None and dt < self.sample_time and self._last_output is not None: 106 | # Only update every sample_time seconds 107 | return self._last_output 108 | 109 | # Compute error terms 110 | error = self.setpoint - input_ 111 | d_input = input_ - (self._last_input if (self._last_input is not None) else input_) 112 | d_error = error - (self._last_error if (self._last_error is not None) else error) 113 | 114 | # Check if must map the error 115 | if self.error_map is not None: 116 | error = self.error_map(error) 117 | 118 | # Compute the proportional term 119 | if not self.proportional_on_measurement: 120 | # Regular proportional-on-error, simply set the proportional term 121 | self._proportional = self.Kp * error 122 | else: 123 | # Add the proportional error on measurement to error_sum 124 | self._proportional -= self.Kp * d_input 125 | 126 | # Compute integral and derivative terms 127 | self._integral += self.Ki * error * dt 128 | self._integral = _clamp(self._integral, self.output_limits) # Avoid integral windup 129 | 130 | if self.differetial_on_measurement: 131 | self._derivative = -self.Kd * d_input / dt 132 | else: 133 | self._derivative = self.Kd * d_error / dt 134 | 135 | # Compute final output 136 | output = self._proportional + self._integral + self._derivative 137 | output = _clamp(output, self.output_limits) 138 | 139 | # Keep track of state 140 | self._last_output = output 141 | self._last_input = input_ 142 | self._last_error = error 143 | self._last_time = now 144 | 145 | return output 146 | 147 | def __repr__(self): 148 | return ( 149 | '{self.__class__.__name__}(' 150 | 'Kp={self.Kp!r}, Ki={self.Ki!r}, Kd={self.Kd!r}, ' 151 | 'setpoint={self.setpoint!r}, sample_time={self.sample_time!r}, ' 152 | 'output_limits={self.output_limits!r}, auto_mode={self.auto_mode!r}, ' 153 | 'proportional_on_measurement={self.proportional_on_measurement!r}, ' 154 | 'differetial_on_measurement={self.differetial_on_measurement!r}, ' 155 | 'error_map={self.error_map!r}' 156 | ')' 157 | ).format(self=self) 158 | 159 | @property 160 | def components(self): 161 | """ 162 | The P-, I- and D-terms from the last computation as separate components as a tuple. Useful 163 | for visualizing what the controller is doing or when tuning hard-to-tune systems. 164 | """ 165 | return self._proportional, self._integral, self._derivative 166 | 167 | @property 168 | def tunings(self): 169 | """The tunings used by the controller as a tuple: (Kp, Ki, Kd).""" 170 | return self.Kp, self.Ki, self.Kd 171 | 172 | @tunings.setter 173 | def tunings(self, tunings): 174 | """Set the PID tunings.""" 175 | self.Kp, self.Ki, self.Kd = tunings 176 | 177 | @property 178 | def auto_mode(self): 179 | """Whether the controller is currently enabled (in auto mode) or not.""" 180 | return self._auto_mode 181 | 182 | @auto_mode.setter 183 | def auto_mode(self, enabled): 184 | """Enable or disable the PID controller.""" 185 | self.set_auto_mode(enabled) 186 | 187 | def set_auto_mode(self, enabled, last_output=None): 188 | """ 189 | Enable or disable the PID controller, optionally setting the last output value. 190 | 191 | This is useful if some system has been manually controlled and if the PID should take over. 192 | In that case, disable the PID by setting auto mode to False and later when the PID should 193 | be turned back on, pass the last output variable (the control variable) and it will be set 194 | as the starting I-term when the PID is set to auto mode. 195 | 196 | :param enabled: Whether auto mode should be enabled, True or False 197 | :param last_output: The last output, or the control variable, that the PID should start 198 | from when going from manual mode to auto mode. Has no effect if the PID is already in 199 | auto mode. 200 | """ 201 | if enabled and not self._auto_mode: 202 | # Switching from manual mode to auto, reset 203 | self.reset() 204 | 205 | self._integral = last_output if (last_output is not None) else 0 206 | self._integral = _clamp(self._integral, self.output_limits) 207 | 208 | self._auto_mode = enabled 209 | 210 | @property 211 | def output_limits(self): 212 | """ 213 | The current output limits as a 2-tuple: (lower, upper). 214 | 215 | See also the *output_limits* parameter in :meth:`PID.__init__`. 216 | """ 217 | return self._min_output, self._max_output 218 | 219 | @output_limits.setter 220 | def output_limits(self, limits): 221 | """Set the output limits.""" 222 | if limits is None: 223 | self._min_output, self._max_output = None, None 224 | return 225 | 226 | min_output, max_output = limits 227 | 228 | if (None not in limits) and (max_output < min_output): 229 | raise ValueError('lower limit must be less than upper limit') 230 | 231 | self._min_output = min_output 232 | self._max_output = max_output 233 | 234 | self._integral = _clamp(self._integral, self.output_limits) 235 | self._last_output = _clamp(self._last_output, self.output_limits) 236 | 237 | def reset(self): 238 | """ 239 | Reset the PID controller internals. 240 | 241 | This sets each term to 0 as well as clearing the integral, the last output and the last 242 | input (derivative calculation). 243 | """ 244 | self._proportional = 0 245 | self._integral = 0 246 | self._derivative = 0 247 | 248 | self._integral = _clamp(self._integral, self.output_limits) 249 | 250 | self._last_time = self.time_fn() 251 | self._last_output = None 252 | self._last_input = None -------------------------------------------------------------------------------- /research/write_frame_landmarks.py: -------------------------------------------------------------------------------- 1 | """https://github.com/Kazuhito00/mediapipe-python-sample""" 2 | 3 | import copy 4 | 5 | import cv2 as cv 6 | import os 7 | 8 | import mediapipe as mp 9 | import numpy as np 10 | 11 | 12 | def draw_landmarks(image, landmarks, visibility_th=0.5): 13 | image_width, image_height = image.shape[1], image.shape[0] 14 | 15 | landmark_point = [] 16 | 17 | for index, landmark in enumerate(landmarks.landmark): 18 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 19 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 20 | landmark_z = landmark.z 21 | landmark_point.append([landmark.visibility, (landmark_x, landmark_y)]) 22 | 23 | if landmark.visibility < visibility_th: 24 | continue 25 | 26 | if index == 0: # 鼻 27 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 28 | if index == 1: # 右目:目頭 29 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 30 | if index == 2: # 右目:瞳 31 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 32 | if index == 3: # 右目:目尻 33 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 34 | if index == 4: # 左目:目頭 35 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 36 | if index == 5: # 左目:瞳 37 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 38 | if index == 6: # 左目:目尻 39 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 40 | if index == 7: # 右耳 41 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 42 | if index == 8: # 左耳 43 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 44 | if index == 9: # 口:左端 45 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 46 | if index == 10: # 口:左端 47 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 48 | if index == 11: # 右肩 49 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 50 | if index == 12: # 左肩 51 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 52 | if index == 13: # 右肘 53 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 54 | if index == 14: # 左肘 55 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 56 | if index == 15: # 右手首 57 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 58 | if index == 16: # 左手首 59 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 60 | if index == 17: # 右手1(外側端) 61 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 62 | if index == 18: # 左手1(外側端) 63 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 64 | if index == 19: # 右手2(先端) 65 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 66 | if index == 20: # 左手2(先端) 67 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 68 | if index == 21: # 右手3(内側端) 69 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 70 | if index == 22: # 左手3(内側端) 71 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 72 | if index == 23: # 腰(右側) 73 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 74 | if index == 24: # 腰(左側) 75 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 76 | if index == 25: # 右ひざ 77 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 78 | if index == 26: # 左ひざ 79 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 80 | if index == 27: # 右足首 81 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 82 | if index == 28: # 左足首 83 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 84 | if index == 29: # 右かかと 85 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 86 | if index == 30: # 左かかと 87 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 88 | if index == 31: # 右つま先 89 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 90 | if index == 32: # 左つま先 91 | cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2) 92 | 93 | # if not upper_body_only: 94 | if True: 95 | cv.putText(image, "z:" + str(round(landmark_z, 3)), 96 | (landmark_x - 10, landmark_y - 10), 97 | cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, 98 | cv.LINE_AA) 99 | 100 | if len(landmark_point) > 0: 101 | # 右目 102 | if landmark_point[1][0] > visibility_th and landmark_point[2][0] > visibility_th: 103 | cv.line(image, landmark_point[1][1], landmark_point[2][1], (0, 255, 0), 2) 104 | if landmark_point[2][0] > visibility_th and landmark_point[3][0] > visibility_th: 105 | cv.line(image, landmark_point[2][1], landmark_point[3][1], 106 | (0, 255, 0), 2) 107 | 108 | # 左目 109 | if landmark_point[4][0] > visibility_th and landmark_point[5][0] > visibility_th: 110 | cv.line(image, landmark_point[4][1], landmark_point[5][1], 111 | (0, 255, 0), 2) 112 | if landmark_point[5][0] > visibility_th and landmark_point[6][0] > visibility_th: 113 | cv.line(image, landmark_point[5][1], landmark_point[6][1], 114 | (0, 255, 0), 2) 115 | 116 | # 口 117 | if landmark_point[9][0] > visibility_th and landmark_point[10][0] > visibility_th: 118 | cv.line(image, landmark_point[9][1], landmark_point[10][1], 119 | (0, 255, 0), 2) 120 | 121 | # 肩 122 | if landmark_point[11][0] > visibility_th and landmark_point[12][0] > visibility_th: 123 | cv.line(image, landmark_point[11][1], landmark_point[12][1], 124 | (0, 255, 0), 2) 125 | 126 | # 右腕 127 | if landmark_point[11][0] > visibility_th and landmark_point[13][0] > visibility_th: 128 | cv.line(image, landmark_point[11][1], landmark_point[13][1], 129 | (0, 255, 0), 2) 130 | if landmark_point[13][0] > visibility_th and landmark_point[15][0] > visibility_th: 131 | cv.line(image, landmark_point[13][1], landmark_point[15][1], 132 | (0, 255, 0), 2) 133 | 134 | # 左腕 135 | if landmark_point[12][0] > visibility_th and landmark_point[14][0] > visibility_th: 136 | cv.line(image, landmark_point[12][1], landmark_point[14][1], 137 | (0, 255, 0), 2) 138 | if landmark_point[14][0] > visibility_th and landmark_point[16][0] > visibility_th: 139 | cv.line(image, landmark_point[14][1], landmark_point[16][1], 140 | (0, 255, 0), 2) 141 | 142 | # 右手 143 | if landmark_point[15][0] > visibility_th and landmark_point[17][0] > visibility_th: 144 | cv.line(image, landmark_point[15][1], landmark_point[17][1], 145 | (0, 255, 0), 2) 146 | if landmark_point[17][0] > visibility_th and landmark_point[19][0] > visibility_th: 147 | cv.line(image, landmark_point[17][1], landmark_point[19][1], 148 | (0, 255, 0), 2) 149 | if landmark_point[19][0] > visibility_th and landmark_point[21][0] > visibility_th: 150 | cv.line(image, landmark_point[19][1], landmark_point[21][1], 151 | (0, 255, 0), 2) 152 | if landmark_point[21][0] > visibility_th and landmark_point[15][0] > visibility_th: 153 | cv.line(image, landmark_point[21][1], landmark_point[15][1], 154 | (0, 255, 0), 2) 155 | 156 | # 左手 157 | if landmark_point[16][0] > visibility_th and landmark_point[18][0] > visibility_th: 158 | cv.line(image, landmark_point[16][1], landmark_point[18][1], 159 | (0, 255, 0), 2) 160 | if landmark_point[18][0] > visibility_th and landmark_point[20][0] > visibility_th: 161 | cv.line(image, landmark_point[18][1], landmark_point[20][1], 162 | (0, 255, 0), 2) 163 | if landmark_point[20][0] > visibility_th and landmark_point[22][0] > visibility_th: 164 | cv.line(image, landmark_point[20][1], landmark_point[22][1], 165 | (0, 255, 0), 2) 166 | if landmark_point[22][0] > visibility_th and landmark_point[16][0] > visibility_th: 167 | cv.line(image, landmark_point[22][1], landmark_point[16][1], 168 | (0, 255, 0), 2) 169 | 170 | # 胴体 171 | if landmark_point[11][0] > visibility_th and landmark_point[23][0] > visibility_th: 172 | cv.line(image, landmark_point[11][1], landmark_point[23][1], 173 | (0, 255, 0), 2) 174 | if landmark_point[12][0] > visibility_th and landmark_point[24][0] > visibility_th: 175 | cv.line(image, landmark_point[12][1], landmark_point[24][1], 176 | (0, 255, 0), 2) 177 | if landmark_point[23][0] > visibility_th and landmark_point[24][0] > visibility_th: 178 | cv.line(image, landmark_point[23][1], landmark_point[24][1], 179 | (0, 255, 0), 2) 180 | 181 | if len(landmark_point) > 25: 182 | # 右足 183 | if landmark_point[23][0] > visibility_th and landmark_point[25][0] > visibility_th: 184 | cv.line(image, landmark_point[23][1], landmark_point[25][1], 185 | (0, 255, 0), 2) 186 | if landmark_point[25][0] > visibility_th and landmark_point[27][0] > visibility_th: 187 | cv.line(image, landmark_point[25][1], landmark_point[27][1], 188 | (0, 255, 0), 2) 189 | if landmark_point[27][0] > visibility_th and landmark_point[29][0] > visibility_th: 190 | cv.line(image, landmark_point[27][1], landmark_point[29][1], 191 | (0, 255, 0), 2) 192 | if landmark_point[29][0] > visibility_th and landmark_point[31][0] > visibility_th: 193 | cv.line(image, landmark_point[29][1], landmark_point[31][1], 194 | (0, 255, 0), 2) 195 | 196 | # 左足 197 | if landmark_point[24][0] > visibility_th and landmark_point[26][0] > visibility_th: 198 | cv.line(image, landmark_point[24][1], landmark_point[26][1], 199 | (0, 255, 0), 2) 200 | if landmark_point[26][0] > visibility_th and landmark_point[28][0] > visibility_th: 201 | cv.line(image, landmark_point[26][1], landmark_point[28][1], 202 | (0, 255, 0), 2) 203 | if landmark_point[28][0] > visibility_th and landmark_point[30][0] > visibility_th: 204 | cv.line(image, landmark_point[28][1], landmark_point[30][1], 205 | (0, 255, 0), 2) 206 | if landmark_point[30][0] > visibility_th and landmark_point[32][0] > visibility_th: 207 | cv.line(image, landmark_point[30][1], landmark_point[32][1], 208 | (0, 255, 0), 2) 209 | return image 210 | 211 | 212 | def calc_bounding_rect(image, landmarks): 213 | image_width, image_height = image.shape[1], image.shape[0] 214 | 215 | landmark_array = np.empty((0, 2), int) 216 | 217 | for _, landmark in enumerate(landmarks.landmark): 218 | landmark_x = min(int(landmark.x * image_width), image_width - 1) 219 | landmark_y = min(int(landmark.y * image_height), image_height - 1) 220 | 221 | landmark_point = [np.array((landmark_x, landmark_y))] 222 | 223 | landmark_array = np.append(landmark_array, landmark_point, axis=0) 224 | 225 | x, y, w, h = cv.boundingRect(landmark_array) 226 | 227 | return [x, y, x + w, y + h] 228 | 229 | 230 | def draw_bounding_rect(use_brect, image, brect): 231 | if use_brect: 232 | # 外接矩形 233 | cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]), 234 | (0, 255, 0), 2) 235 | 236 | return image 237 | 238 | 239 | if __name__ == "__main__": 240 | enable_segmentation = True 241 | 242 | vid_path = 'kunkun.mp4' 243 | save_root = 'analysis_frames' 244 | raw_sub = os.path.join(save_root, 'raw_frames') 245 | pose_sub = os.path.join(save_root, 'landmark') 246 | 247 | cap = cv.VideoCapture(vid_path) 248 | if not cap.isOpened(): 249 | print("Error Opening video File") 250 | raise IOError 251 | 252 | frames = [] 253 | cnt = 0 254 | while cap.isOpened(): 255 | ret, frame = cap.read() 256 | if ret: 257 | frames.append(frame) 258 | 259 | filename = os.path.join(os.path.join(raw_sub, f'{cnt}.jpg')) 260 | cv.imwrite(filename, frame) 261 | 262 | cnt += 1 263 | else: 264 | break 265 | 266 | cap.release() 267 | 268 | out = cv.VideoWriter('CPose.avi', cv.VideoWriter_fourcc('M', 'J', 'P', 'G'), 25, 269 | (frames[0].shape[1], frames[0].shape[0])) 270 | filenames = os.listdir(raw_sub) 271 | filenames.sort(key=lambda x: int(x.split('.')[0])) 272 | for filename in filenames: 273 | img_path = os.path.join(raw_sub, filename) 274 | out_path = os.path.join(pose_sub, filename.replace('.jpg', '_landmark.jpg')) 275 | 276 | mp_pose = mp.solutions.pose 277 | pose = mp_pose.Pose( 278 | # upper_body_only=upper_body_only, 279 | model_complexity=2, 280 | enable_segmentation=True, 281 | min_detection_confidence=0.5, 282 | min_tracking_confidence=0.5 283 | ) 284 | 285 | img = cv.imread(img_path) 286 | results = pose.process(img) 287 | debug_image = copy.deepcopy(img) 288 | 289 | # 描画 ################################################################ 290 | if enable_segmentation and results.segmentation_mask is not None: 291 | mask = np.stack((results.segmentation_mask,) * 3, 292 | axis=-1) > 0.5 293 | bg_resize_image = np.zeros(img.shape, dtype=np.uint8) 294 | bg_resize_image[:] = (0, 0, 0) 295 | debug_image = np.where(mask, debug_image, bg_resize_image) 296 | if results.pose_landmarks is not None: 297 | brect = calc_bounding_rect(debug_image, results.pose_landmarks) 298 | debug_image = draw_landmarks( 299 | debug_image, 300 | results.pose_landmarks, 301 | # upper_body_only, 302 | ) 303 | # debug_image = draw_bounding_rect(True, debug_image, brect) 304 | 305 | out.write(debug_image) 306 | cv.imwrite(out_path, debug_image) 307 | 308 | out.release() 309 | -------------------------------------------------------------------------------- /ControlCentre.py: -------------------------------------------------------------------------------- 1 | """UI 2 | remote-control: tello-sdk 3 | voice-interaction: speak 4 | compute-engine 5 | image-processing (pre&post) 6 | model-inference 7 | visualization: 8 | inference-result 9 | tello-status 10 | """ 11 | 12 | import os 13 | import time 14 | import traceback 15 | 16 | import cv2 17 | import sys 18 | import logging 19 | import threading 20 | import numpy as np 21 | import pyqtgraph as pg 22 | from collections import deque 23 | from DataPipeline import DataPipeline 24 | from ModelInference import PoseDetector, IkunRecognizer, FaceDetector 25 | # from SpeechAgent import SpeechAgent 26 | # from djitellopy import Tello 27 | from debugtellopy import DebugTello as Tello 28 | from PID import PID 29 | from ControlCentreUI import Ui_MainWindow 30 | from PyQt5 import QtCore, QtGui, QtWidgets 31 | from PyQt5.QtCore import QThread, QDir, QTimer, QSize, Qt 32 | from PyQt5.QtGui import QPixmap, QImage, QTextCursor 33 | from PyQt5.QtWidgets import QMainWindow, QFileDialog, QMessageBox, QDialog, QLineEdit, QDialogButtonBox, QFormLayout 34 | from PyQt5.QtGui import QIntValidator, QDoubleValidator 35 | 36 | logging.basicConfig(format='%(process)s-%(thread)d-%(levelname)s-%(message)s', level=logging.DEBUG) 37 | 38 | # order: yaw->zpos->xpos 39 | ORDERS = ['yaw', 'zpos', 'xpos'] 40 | SAMPLES_NUMBER = 100 41 | RECOGNIZER_THR = 0.55 42 | 43 | 44 | class ProcessStreamThread(QThread): 45 | stream_signal = QtCore.pyqtSignal(tuple) 46 | fps = 20 47 | loop_sec = 1 / fps 48 | 49 | def __init__(self, tello_agent, detector_config): 50 | super().__init__() 51 | self.tello_agent = tello_agent # for frame-getting 52 | self.detector = PoseDetector(detector_config['model_complexity'], 53 | detector_config['enable_segmentation'], 54 | detector_config['min_detection_confidence'], 55 | detector_config['min_tracking_confidence']) 56 | self.stream_ret_queue = deque(maxlen=2000) 57 | self.isStreaming = False 58 | self.frame_h, self.frame_w = (None, None) 59 | 60 | def get_frame_shape(self): 61 | return self.frame_h, self.frame_w 62 | 63 | def run(self) -> None: 64 | 65 | while True: 66 | start_time = time.time() 67 | 68 | frame = self.tello_agent.get_frame_read().frame 69 | try: 70 | frame = DataPipeline.pipe_pre(frame) # flip and convert-color 71 | except ValueError as e: 72 | traceback.print_exc() 73 | continue 74 | 75 | if not self.isStreaming: 76 | self.isStreaming = True 77 | self.frame_h, self.frame_w = frame.shape[:2] 78 | 79 | detect_ret = self.detector.inference(frame) # mediapipe's output 80 | 81 | time_consume = time.time() - start_time 82 | # print(f"consume time: {time_consume * 1000}ms") 83 | 84 | self.stream_ret_queue.append((frame, detect_ret, time_consume)) 85 | self.stream_signal.emit((frame, detect_ret)) 86 | 87 | if time_consume <= self.loop_sec: 88 | time.sleep(self.loop_sec - time_consume) 89 | else: 90 | # logging.info("Lower FPS than set value!") 91 | pass 92 | 93 | 94 | class VisualizationThread(QThread): 95 | visualization_signal = QtCore.pyqtSignal(dict) 96 | 97 | def __init__(self, recognizer_path, label_imageTransmission, label_skeleton, tello_status_dict, stream_ret_queue): 98 | super().__init__() 99 | self.label_imageTransmission = label_imageTransmission 100 | self.label_skeleton = label_skeleton 101 | self.stream_ret_queue = stream_ret_queue # should be a shallow copy! 102 | self.tello_status_dict = tello_status_dict # should be a shallow copy! 103 | 104 | # store pose-landmarks detection result 105 | self.detect_ret_list = [] 106 | 107 | self.recognizer = IkunRecognizer(recognizer_path) 108 | 109 | def update_frame(self, frame): 110 | """Update UI-frame""" 111 | showImage = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888) 112 | pixmap = QPixmap.fromImage(showImage) 113 | self.label_imageTransmission.setPixmap(pixmap) 114 | 115 | def update_graphic(self, skeleton_image): 116 | showImage = QImage(skeleton_image.data, skeleton_image.shape[1], skeleton_image.shape[0], QImage.Format_RGB888) 117 | pixmap = QPixmap.fromImage(showImage) 118 | self.label_skeleton.setPixmap(pixmap) 119 | 120 | # graphic-1: update flying curve 121 | # available_length = len(self.controller_input_queue) 122 | # self.signal_arrays[self.SAMPLES_NUMBER - available_length:] = self.controller_input_queue 123 | # 124 | # tab_index = self.tabWidget.currentIndex() 125 | # self.signal_plots[tab_index].setData(self.time_array, self.signal_arrays[tab_index]) 126 | # self.signal_plots[tab_index].updateItems() 127 | # self.signal_plots[tab_index].sigPlotChanged.emit(self.signal_plots[tab_index]) # ? 128 | 129 | def run(self) -> None: 130 | while True: 131 | if self.stream_ret_queue: 132 | # print(len(self.stream_ret_queue)) 133 | start_time = time.time() 134 | 135 | # image view 136 | frame, detect_ret, time_consume = self.stream_ret_queue.popleft() 137 | if detect_ret is not None: 138 | # append the latest pose-landmarks detection result 139 | self.detect_ret_list.append(detect_ret) 140 | self.detect_ret_list.pop(0) if len(self.detect_ret_list) > self.recognizer.SEQUENCE_NUM else None 141 | 142 | if len(self.detect_ret_list) == IkunRecognizer.SEQUENCE_NUM: 143 | scores = self.recognizer.inference(self.detect_ret_list) 144 | argmax = np.argmax(scores) 145 | is_ikun = True if (argmax == 1 and (scores[0][argmax] > RECOGNIZER_THR)) else False 146 | else: 147 | is_ikun = False 148 | 149 | infer = {'landmark': detect_ret, 'is_ikun': is_ikun} 150 | 151 | # print(self.tello_status_dict) 152 | self.tello_status_dict['FPS'] = round(1/time_consume, 1) 153 | frame, skeleton = DataPipeline.pipe_post(frame, infer, self.tello_status_dict) 154 | self.update_frame(frame) 155 | self.update_graphic(skeleton) 156 | 157 | # if skeleton is not None: 158 | # cv2.imshow('demo', skeleton) 159 | # if cv2.waitKey(1) & 0xFF == ord('q'): 160 | # break 161 | 162 | self.visualization_signal.emit(infer) 163 | 164 | time_consume = time.time() - start_time 165 | # print(f"visualize consume: {time_consume * 1000}ms") 166 | # if 'delay' is not used, there will be a very significant display delay even of '0.01ms'! 167 | time.sleep(max(0., ProcessStreamThread.loop_sec - time_consume)) 168 | 169 | 170 | class ControlThread(QThread): 171 | output_signal = QtCore.pyqtSignal(str) 172 | 173 | def __init__(self, tello_agent, pid_input_queue: deque): 174 | super().__init__() 175 | self.tello_agent = tello_agent # for remote-control 176 | self.pid_input_queue = pid_input_queue # for communication with main-thread 177 | self._auto_mode = False 178 | 179 | self.yaw_pid = PID(1, 0.01, 0.1) # yaw control 180 | self.zpos_pid = PID(1, 0.01, 0.1) # up-down control 181 | self.xpos_pid = PID(1, 0.01, 0.1) # forward-backward control 182 | self.yaw_pid.output_limits = (-50, 50) 183 | self.zpos_pid.output_limits = (-50, 50) 184 | self.xpos_pid.output_limits = (-50, 50) 185 | 186 | @property 187 | def auto_mode(self): 188 | return self._auto_mode 189 | 190 | @auto_mode.setter 191 | def auto_mode(self, enabled): 192 | self.set_auto_mode(enabled) 193 | 194 | def set_auto_mode(self, enabled: bool): 195 | self._auto_mode = enabled 196 | 197 | # remember to set each pid-controller's auto_mode properties 198 | self.yaw_pid.auto_mode = enabled 199 | self.zpos_pid.auto_mode = enabled 200 | self.xpos_pid.auto_mode = enabled 201 | 202 | def set_controller_setpoints(self, setpoints): 203 | self.yaw_pid.setpoint = setpoints['yaw'] 204 | self.zpos_pid.setpoint = setpoints['zpos'] 205 | self.xpos_pid.setpoint = setpoints['xpos'] 206 | 207 | def tunings(self, ctrl_type: str, values: tuple): 208 | if ctrl_type == 'yaw': 209 | self.yaw_pid.tunings(values) 210 | elif ctrl_type == 'zpos': 211 | self.zpos_pid.tunings(values) 212 | elif ctrl_type == 'xpos': 213 | self.xpos_pid.tunings(values) 214 | else: 215 | raise ValueError 216 | 217 | def run(self) -> None: 218 | while True: 219 | if self.tello_agent.is_flying and self._auto_mode: 220 | try: 221 | inputs = self.pid_input_queue[-1] # QA: pop 222 | except: 223 | logging.warning('Not enough measurements!') 224 | continue 225 | 226 | yaw_output = self.yaw_pid(inputs[0]) if inputs[0] is not None else 0 227 | zpos_output = self.zpos_pid(inputs[1]) if inputs[1] is not None else 0 228 | xpos_output = self.xpos_pid(inputs[2]) if inputs[2] is not None else 0 229 | 230 | """ 231 | left_right_vel = 0 232 | forward_backward_vel = min(100, max(xpos_output, -100)) 233 | up_down_vel = min(100, max(zpos_output, -100)) 234 | yaw_vel = min(100, max(yaw_output, -100)) # or max(-100, min(yaw_output, 100)) 235 | self.tello_agent.send_rc_control(left_right_vel, forward_backward_vel, up_down_vel, yaw_vel) 236 | """ 237 | if not np.any((yaw_output, zpos_output, xpos_output)): 238 | self.tello_agent.send_rc_control(0, xpos_output, zpos_output, yaw_output) 239 | else: 240 | # logging.warning("") 241 | pass 242 | else: 243 | pass 244 | 245 | 246 | class TelloStatusThread(QThread): 247 | status_signal = QtCore.pyqtSignal(str) 248 | 249 | def __init__(self, tello_agent: Tello): 250 | super().__init__() 251 | self.tello_agent = tello_agent 252 | self.status = {} 253 | 254 | def run(self) -> None: 255 | while True: 256 | try: 257 | # can't reassign 'self.status' using this way: self.status = {...} 258 | # keep only those states or attributes that will not change dramatically 259 | # dimension: °C, %, cm, s 260 | self.status['temperature'] = self.tello_agent.get_temperature(), 261 | # 'height': self.tello_agent.get_height(), 262 | # 'height_barometer': self.tello_agent.get_barometer(), 263 | # 'distance_tof': self.tello_agent.get_distance_tof(), 264 | self.status['battery'] = self.tello_agent.get_battery() 265 | self.status['flight_time'] = self.tello_agent.get_flight_time() 266 | 267 | if self.status['battery'] < 20: 268 | self.status_signal.emit(f"Low Battery: {self.status['battery']}%") 269 | except ConnectionAbortedError: 270 | print(traceback.format_exc()) 271 | 272 | # update status of tello per 5-sec. 273 | QThread.sleep(5) 274 | 275 | 276 | class MainWindow(QMainWindow, Ui_MainWindow): 277 | machine_signal = QtCore.pyqtSignal(object) 278 | 279 | def __init__(self, recognizer_path, yunet_path, mpose_config): 280 | super().__init__() 281 | self.setupUi(self) 282 | self.setupUiMore() 283 | 284 | # graphic-view initialization 285 | pen = pg.mkPen(color=(255, 92, 92), width=1.5) 286 | self.time_array = np.arange(SAMPLES_NUMBER, 0, 1) 287 | self.graphicsViews = [self.graphicsView_yaw_plotting, self.graphicsView_zpos_plotting, self.graphicsView_xpos_plotting] 288 | self.signal_arrays = [] 289 | self.signal_plots = [] 290 | for i in range(3): 291 | self.graphicsViews.append(eval('self.graphicsView_' + ORDERS[i] + '_plotting')) 292 | self.signal_arrays.append(np.zeros(SAMPLES_NUMBER)) 293 | self.signal_plots.append(pg.PlotDataItem(self.time_array, self.signal_arrays[i], pen=pen, name=f'{ORDERS[i]} [pixel]')) 294 | # pyqtgraph addItem 295 | self.graphicsViews[i].addItem(self.signal_plots[i]) 296 | 297 | # queue (used in different thread) 298 | self.controller_input_queue = deque(maxlen=SAMPLES_NUMBER) # [(yaw_input, zpos_input, xpos_input), ...] 299 | self.speech_msg_queue = deque(maxlen=10) 300 | 301 | # constructor 302 | # self.agent = SpeechAgent() 303 | self.tello = Tello() 304 | 305 | self.face_detector = FaceDetector(yunet_path) 306 | 307 | # initialize 308 | self.tello.connect() 309 | 310 | # self.tello.set_video_resolution(Tello.RESOLUTION_480P) 311 | # self.tello.set_video_bitrate(Tello.BITRATE_1MBPS) 312 | # self.tello.set_video_fps(Tello.FPS_30) 313 | self.tello.streamon() 314 | 315 | # initialize tello status updating thread (to avoid the impact of instant wifi-communication on FPS) 316 | self.tello_status_tracker = TelloStatusThread(self.tello) 317 | # initialize processing thread 318 | self.stream_processor = ProcessStreamThread(self.tello, mpose_config) 319 | # ... 320 | self.visualizer = VisualizationThread(recognizer_path, 321 | self.label_imageTransmission, 322 | self.label_skeleton, 323 | self.tello_status_tracker.status, 324 | self.stream_processor.stream_ret_queue) 325 | # initialize position control thread 326 | self.position_controller = ControlThread(self.tello, self.controller_input_queue) 327 | 328 | # some tricks 329 | self.is_people = False 330 | self.is_ikun = False 331 | 332 | # register callback 333 | self.register_callback() 334 | 335 | def setupUiMore(self): 336 | # lineEdit validator 337 | lineEdit_names = ['lineEdit_YawProportional', 'lineEdit_YawIntegral', 'lineEdit_YawDerivative', 338 | 'lineEdit_ZPosProportional', 'lineEdit_ZPosIntegral', 'lineEdit_ZPosDerivative', 339 | 'lineEdit_XPosProportional', 'lineEdit_XPosIntegral', 'lineEdit_XPosDerivative'] 340 | validator = QDoubleValidator(self) 341 | validator.setRange(0, 10) 342 | validator.setNotation(QDoubleValidator.StandardNotation) 343 | validator.setDecimals(2) 344 | # set validator in one line 345 | # [eval('self.' + name + '.setValidator(validator)') for name in lineEdit_names] # error 346 | symbols = {"self": self, 'validator': validator} 347 | [eval('self' + '.' + name + '.setValidator(validator)', symbols) for name in lineEdit_names] 348 | 349 | def get_tello_status(self): 350 | return self.tello_status_tracker.status 351 | 352 | def register_callback(self): 353 | # self.machine_signal.connect(self.machine_cb) 354 | self.stream_processor.stream_signal.connect(self.update_processed_info_cb) 355 | self.visualizer.visualization_signal.connect(self.speech_cb) 356 | # self.position_controller.output_signal.connect() 357 | # self.tello_status_tracker.status_signal.connect(self.handle_status_cb) 358 | 359 | # pushButton group 360 | # running 361 | self.pushButton_start.clicked.connect(self.start_cb) 362 | self.pushButton_land.clicked.connect(self.land_cb) 363 | # PID parameters setting 364 | self.pushButton_setYawParameters.clicked.connect(self.setPID_cb) 365 | self.pushButton_setZPosParameters.clicked.connect(self.setPID_cb) 366 | self.pushButton_setXPosParameters.clicked.connect(self.setPID_cb) 367 | 368 | # radioButton group 369 | self.radioButton_manual.toggled.connect(self.ratio_manual_cb) 370 | self.radioButton_auto.toggled.connect(self.ratio_auto_cb) 371 | 372 | def ratio_manual_cb(self): 373 | self.pushButton_up.setEnabled(True) 374 | self.pushButton_down.setEnabled(True) 375 | self.pushButton_turnLeft.setEnabled(True) 376 | self.pushButton_turnRight.setEnabled(True) 377 | self.pushButton_forward.setEnabled(True) 378 | self.pushButton_backward.setEnabled(True) 379 | 380 | self.position_controller.auto_mode = False 381 | 382 | def ratio_auto_cb(self): 383 | self.pushButton_up.setDisabled(True) 384 | self.pushButton_down.setDisabled(True) 385 | self.pushButton_turnLeft.setDisabled(True) 386 | self.pushButton_turnRight.setDisabled(True) 387 | self.pushButton_forward.setDisabled(True) 388 | self.pushButton_backward.setDisabled(True) 389 | 390 | self.position_controller.auto_mode = True 391 | 392 | def setPID_cb(self): 393 | pid_type = ORDERS[self.tabWidget.currentIndex()] 394 | # list elements order: P -> I -> D 395 | line_edits_group = [f'lineEdit_{pid_type.capitalize()}Proportional', 396 | f'lineEdit_{pid_type.capitalize()}Integral', 397 | f'lineEdit_{pid_type.capitalize()}Derivative'] 398 | set_vals = tuple(map(lambda le: float(le.text()), line_edits_group)) 399 | self.position_controller.tunings(pid_type, values=set_vals) 400 | logging.info(f"PID setting on {pid_type} controller: {set_vals}") 401 | 402 | def handle_status_cb(self, msg): 403 | # if low-battery, warn and land 404 | if self.tello_status_tracker.status['battery'] <= 10: 405 | msg = "Low Battery Warning. Force Landing!" 406 | self.tello.land() 407 | else: 408 | pass 409 | 410 | self.statusbar.setStyleSheet("color: red") 411 | self.statusbar.showMessage(msg) 412 | 413 | def compute_measurement(self, frame, detect_ret) -> list: 414 | """ 415 | :param frame: 416 | :param infer_ret: 417 | :return: [nose's pixel-val on width-dir, nose's pixel-val on height-dir, head's width pixel-val] yaw->zpos->xpox 418 | """ 419 | image_h, image_w = frame.shape[:2] 420 | frame_landmarks = detect_ret.landmark 421 | # points_list = [[int(landmark.x * image_w), int(landmark.y * image_h)] for landmark in frame_landmarks] # 33点 422 | 423 | # for item that not to control, return measurement of 'None' 424 | yaw_meas = frame_landmarks[0].x * image_w 425 | zpos_meas = frame_landmarks[0].y * image_h 426 | 427 | # ret_faces = self.face_detector.inference(frame) 428 | # xpos_meas = ret_faces[0][2] if ret_faces is not None else None 429 | xpos_meas = None 430 | 431 | measurement = [yaw_meas, zpos_meas, xpos_meas] # orders: yaw->zpos->xpos 432 | 433 | return measurement 434 | 435 | def update_processed_info_cb(self, stream_signal): 436 | """Postprocessing of ProcessStreamThread's each iteration""" 437 | frame, detect_ret = stream_signal 438 | 439 | if self.position_controller.auto_mode: 440 | measurement = self.compute_measurement(frame, detect_ret) 441 | self.controller_input_queue.append(measurement) 442 | 443 | def speech_cb(self, infer_signal): 444 | # update tricks 445 | if not self.is_people: 446 | if infer_signal['landmark'] is not None: 447 | self.is_people = True 448 | msg = "检测到有行人出现" 449 | logging.info(msg) 450 | self.speech_msg_queue.append(msg) 451 | elif infer_signal['landmark'] is None: 452 | self.is_people = False 453 | self.is_ikun = False 454 | 455 | if not self.is_ikun: 456 | if infer_signal['is_ikun']: 457 | self.is_ikun = True 458 | msg = "发现真IKUN" 459 | logging.info(msg) 460 | self.speech_msg_queue.append(msg) 461 | 462 | def start_cb(self): 463 | logging.info("start-button pushed") 464 | # start a stream-processing thread 465 | self.stream_processor.start() 466 | self.visualizer.start() 467 | self.tello_status_tracker.start() 468 | 469 | while not self.stream_processor.isStreaming: 470 | logging.info("Stream not available") 471 | time.sleep(1) 472 | logging.info("Streaming on!") 473 | 474 | frame_h, frame_w = self.stream_processor.get_frame_shape() 475 | 476 | # config and start a position-control thread. 477 | # eyes locate at 1/2-width, 3/4-height of the image. 478 | # The width of the head needs to occupy 1/10-width of the image. 479 | # pid_setpoints = {'yaw': int(frame_w / 2), 'xpos': int(frame_w / 10), 'zpos': int(frame_h * 3 / 4)} 480 | # self.position_controller.set_controller_setpoints(pid_setpoints) 481 | # self.position_controller.start() 482 | 483 | # take-off then leave this func 484 | tello_battery = self.tello.get_battery() 485 | if tello_battery > 30: 486 | logging.info(f"Sufficient battery power: {tello_battery}%. TelloDrone takeoff!") 487 | self.tello.takeoff() 488 | self.tello.move_up(50) 489 | else: 490 | logging.error(f"TelloDrone low battery: {tello_battery}%. Please charge first!") 491 | 492 | def land_cb(self): 493 | logging.info("stop-button pushed") 494 | self.tello.land() 495 | 496 | 497 | if __name__ == '__main__': 498 | RECOGNIZER_PATH = './research/saved_model/ikun_classifier45' 499 | YUNET_PATH = './research/yunet_model/face_detection_yunet_2022mar.onnx' 500 | MPOSE_CONFIG = {'model_complexity': 2, 501 | 'enable_segmentation': False, 502 | 'min_detection_confidence': 0.5, 503 | 'min_tracking_confidence': 0.5} 504 | 505 | app = QtWidgets.QApplication(sys.argv) 506 | window = MainWindow(RECOGNIZER_PATH, YUNET_PATH, MPOSE_CONFIG) 507 | window.show() 508 | sys.exit(app.exec_()) 509 | -------------------------------------------------------------------------------- /ControlCentreUI.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Form implementation generated from reading ui file 'ControlCentre.ui' 4 | # 5 | # Created by: PyQt5 UI code generator 5.15.9 6 | # 7 | # WARNING: Any manual changes made to this file will be lost when pyuic5 is 8 | # run again. Do not edit this file unless you know what you are doing. 9 | 10 | 11 | from PyQt5 import QtCore, QtGui, QtWidgets 12 | 13 | 14 | class Ui_MainWindow(object): 15 | def setupUi(self, MainWindow): 16 | MainWindow.setObjectName("MainWindow") 17 | MainWindow.resize(1120, 870) 18 | self.centralwidget = QtWidgets.QWidget(MainWindow) 19 | self.centralwidget.setObjectName("centralwidget") 20 | self.label_imageTransmission = QtWidgets.QLabel(self.centralwidget) 21 | self.label_imageTransmission.setGeometry(QtCore.QRect(10, 300, 720, 540)) 22 | font = QtGui.QFont() 23 | font.setPointSize(12) 24 | font.setBold(True) 25 | font.setWeight(75) 26 | self.label_imageTransmission.setFont(font) 27 | self.label_imageTransmission.setAutoFillBackground(True) 28 | self.label_imageTransmission.setAlignment(QtCore.Qt.AlignCenter) 29 | self.label_imageTransmission.setObjectName("label_imageTransmission") 30 | self.groupBox_telloController = QtWidgets.QGroupBox(self.centralwidget) 31 | self.groupBox_telloController.setGeometry(QtCore.QRect(10, 10, 1101, 281)) 32 | font = QtGui.QFont() 33 | font.setFamily("SimSun-ExtB") 34 | font.setPointSize(10) 35 | font.setBold(False) 36 | font.setWeight(50) 37 | self.groupBox_telloController.setFont(font) 38 | self.groupBox_telloController.setObjectName("groupBox_telloController") 39 | self.groupBox_PIDTuning = QtWidgets.QGroupBox(self.groupBox_telloController) 40 | self.groupBox_PIDTuning.setGeometry(QtCore.QRect(510, 20, 581, 251)) 41 | self.groupBox_PIDTuning.setObjectName("groupBox_PIDTuning") 42 | self.tabWidget = QtWidgets.QTabWidget(self.groupBox_PIDTuning) 43 | self.tabWidget.setGeometry(QtCore.QRect(6, 20, 571, 221)) 44 | self.tabWidget.setObjectName("tabWidget") 45 | self.tab_Yaw = QtWidgets.QWidget() 46 | self.tab_Yaw.setObjectName("tab_Yaw") 47 | self.groupBox_YawSettings = QtWidgets.QGroupBox(self.tab_Yaw) 48 | self.groupBox_YawSettings.setGeometry(QtCore.QRect(10, 10, 171, 181)) 49 | self.groupBox_YawSettings.setObjectName("groupBox_YawSettings") 50 | self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_YawSettings) 51 | self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(10, 29, 81, 101)) 52 | self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2") 53 | self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2) 54 | self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) 55 | self.verticalLayout_2.setObjectName("verticalLayout_2") 56 | self.label_YawProportional = QtWidgets.QLabel(self.verticalLayoutWidget_2) 57 | font = QtGui.QFont() 58 | font.setPointSize(9) 59 | font.setUnderline(False) 60 | self.label_YawProportional.setFont(font) 61 | self.label_YawProportional.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 62 | self.label_YawProportional.setObjectName("label_YawProportional") 63 | self.verticalLayout_2.addWidget(self.label_YawProportional) 64 | self.label_YawIntegral = QtWidgets.QLabel(self.verticalLayoutWidget_2) 65 | font = QtGui.QFont() 66 | font.setPointSize(9) 67 | self.label_YawIntegral.setFont(font) 68 | self.label_YawIntegral.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 69 | self.label_YawIntegral.setObjectName("label_YawIntegral") 70 | self.verticalLayout_2.addWidget(self.label_YawIntegral) 71 | self.label_YawDerivative = QtWidgets.QLabel(self.verticalLayoutWidget_2) 72 | font = QtGui.QFont() 73 | font.setPointSize(9) 74 | self.label_YawDerivative.setFont(font) 75 | self.label_YawDerivative.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 76 | self.label_YawDerivative.setObjectName("label_YawDerivative") 77 | self.verticalLayout_2.addWidget(self.label_YawDerivative) 78 | self.verticalLayoutWidget_6 = QtWidgets.QWidget(self.groupBox_YawSettings) 79 | self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(100, 19, 61, 121)) 80 | self.verticalLayoutWidget_6.setObjectName("verticalLayoutWidget_6") 81 | self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_6) 82 | self.verticalLayout_6.setContentsMargins(0, 0, 0, 0) 83 | self.verticalLayout_6.setObjectName("verticalLayout_6") 84 | self.lineEdit_YawProportional = QtWidgets.QLineEdit(self.verticalLayoutWidget_6) 85 | self.lineEdit_YawProportional.setAlignment(QtCore.Qt.AlignCenter) 86 | self.lineEdit_YawProportional.setObjectName("lineEdit_YawProportional") 87 | self.verticalLayout_6.addWidget(self.lineEdit_YawProportional) 88 | self.lineEdit_YawIntegral = QtWidgets.QLineEdit(self.verticalLayoutWidget_6) 89 | self.lineEdit_YawIntegral.setAlignment(QtCore.Qt.AlignCenter) 90 | self.lineEdit_YawIntegral.setObjectName("lineEdit_YawIntegral") 91 | self.verticalLayout_6.addWidget(self.lineEdit_YawIntegral) 92 | self.lineEdit_YawDerivative = QtWidgets.QLineEdit(self.verticalLayoutWidget_6) 93 | self.lineEdit_YawDerivative.setAlignment(QtCore.Qt.AlignCenter) 94 | self.lineEdit_YawDerivative.setObjectName("lineEdit_YawDerivative") 95 | self.verticalLayout_6.addWidget(self.lineEdit_YawDerivative) 96 | self.pushButton_setYawParameters = QtWidgets.QPushButton(self.groupBox_YawSettings) 97 | self.pushButton_setYawParameters.setGeometry(QtCore.QRect(10, 140, 151, 31)) 98 | font = QtGui.QFont() 99 | font.setBold(True) 100 | font.setWeight(75) 101 | self.pushButton_setYawParameters.setFont(font) 102 | icon = QtGui.QIcon() 103 | icon.addPixmap(QtGui.QPixmap("research/resources/pidconfig.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) 104 | self.pushButton_setYawParameters.setIcon(icon) 105 | self.pushButton_setYawParameters.setObjectName("pushButton_setYawParameters") 106 | self.horizontalLayoutWidget = QtWidgets.QWidget(self.tab_Yaw) 107 | self.horizontalLayoutWidget.setGeometry(QtCore.QRect(190, 10, 371, 181)) 108 | self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget") 109 | self.horizontalLayout_yaw_plotting = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget) 110 | self.horizontalLayout_yaw_plotting.setContentsMargins(0, 0, 0, 0) 111 | self.horizontalLayout_yaw_plotting.setObjectName("horizontalLayout_yaw_plotting") 112 | self.graphicsView_yaw_plotting = GraphicsView(self.horizontalLayoutWidget) 113 | self.graphicsView_yaw_plotting.setObjectName("graphicsView_yaw_plotting") 114 | self.horizontalLayout_yaw_plotting.addWidget(self.graphicsView_yaw_plotting) 115 | self.tabWidget.addTab(self.tab_Yaw, "") 116 | self.tab_ZPos = QtWidgets.QWidget() 117 | self.tab_ZPos.setObjectName("tab_ZPos") 118 | self.groupBox_ZPosSettings = QtWidgets.QGroupBox(self.tab_ZPos) 119 | self.groupBox_ZPosSettings.setGeometry(QtCore.QRect(10, 10, 171, 181)) 120 | self.groupBox_ZPosSettings.setObjectName("groupBox_ZPosSettings") 121 | self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.groupBox_ZPosSettings) 122 | self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(10, 29, 81, 101)) 123 | self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3") 124 | self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3) 125 | self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) 126 | self.verticalLayout_3.setObjectName("verticalLayout_3") 127 | self.label_ZPosProportional = QtWidgets.QLabel(self.verticalLayoutWidget_3) 128 | font = QtGui.QFont() 129 | font.setPointSize(9) 130 | font.setUnderline(False) 131 | self.label_ZPosProportional.setFont(font) 132 | self.label_ZPosProportional.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 133 | self.label_ZPosProportional.setObjectName("label_ZPosProportional") 134 | self.verticalLayout_3.addWidget(self.label_ZPosProportional) 135 | self.label_ZPosIntegral = QtWidgets.QLabel(self.verticalLayoutWidget_3) 136 | font = QtGui.QFont() 137 | font.setPointSize(9) 138 | self.label_ZPosIntegral.setFont(font) 139 | self.label_ZPosIntegral.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 140 | self.label_ZPosIntegral.setObjectName("label_ZPosIntegral") 141 | self.verticalLayout_3.addWidget(self.label_ZPosIntegral) 142 | self.label_ZPosDerivative = QtWidgets.QLabel(self.verticalLayoutWidget_3) 143 | font = QtGui.QFont() 144 | font.setPointSize(9) 145 | self.label_ZPosDerivative.setFont(font) 146 | self.label_ZPosDerivative.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 147 | self.label_ZPosDerivative.setObjectName("label_ZPosDerivative") 148 | self.verticalLayout_3.addWidget(self.label_ZPosDerivative) 149 | self.verticalLayoutWidget_7 = QtWidgets.QWidget(self.groupBox_ZPosSettings) 150 | self.verticalLayoutWidget_7.setGeometry(QtCore.QRect(100, 19, 61, 121)) 151 | self.verticalLayoutWidget_7.setObjectName("verticalLayoutWidget_7") 152 | self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_7) 153 | self.verticalLayout_7.setContentsMargins(0, 0, 0, 0) 154 | self.verticalLayout_7.setObjectName("verticalLayout_7") 155 | self.lineEdit_ZPosProportional = QtWidgets.QLineEdit(self.verticalLayoutWidget_7) 156 | self.lineEdit_ZPosProportional.setAlignment(QtCore.Qt.AlignCenter) 157 | self.lineEdit_ZPosProportional.setObjectName("lineEdit_ZPosProportional") 158 | self.verticalLayout_7.addWidget(self.lineEdit_ZPosProportional) 159 | self.lineEdit_ZPosIntegral = QtWidgets.QLineEdit(self.verticalLayoutWidget_7) 160 | self.lineEdit_ZPosIntegral.setAlignment(QtCore.Qt.AlignCenter) 161 | self.lineEdit_ZPosIntegral.setObjectName("lineEdit_ZPosIntegral") 162 | self.verticalLayout_7.addWidget(self.lineEdit_ZPosIntegral) 163 | self.lineEdit_ZPosDerivative = QtWidgets.QLineEdit(self.verticalLayoutWidget_7) 164 | self.lineEdit_ZPosDerivative.setAlignment(QtCore.Qt.AlignCenter) 165 | self.lineEdit_ZPosDerivative.setObjectName("lineEdit_ZPosDerivative") 166 | self.verticalLayout_7.addWidget(self.lineEdit_ZPosDerivative) 167 | self.pushButton_setZPosParameters = QtWidgets.QPushButton(self.groupBox_ZPosSettings) 168 | self.pushButton_setZPosParameters.setGeometry(QtCore.QRect(10, 140, 151, 31)) 169 | font = QtGui.QFont() 170 | font.setBold(True) 171 | font.setWeight(75) 172 | self.pushButton_setZPosParameters.setFont(font) 173 | self.pushButton_setZPosParameters.setIcon(icon) 174 | self.pushButton_setZPosParameters.setObjectName("pushButton_setZPosParameters") 175 | self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.tab_ZPos) 176 | self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(190, 10, 371, 181)) 177 | self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3") 178 | self.horizontalLayout_zpos_plotting = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3) 179 | self.horizontalLayout_zpos_plotting.setContentsMargins(0, 0, 0, 0) 180 | self.horizontalLayout_zpos_plotting.setObjectName("horizontalLayout_zpos_plotting") 181 | self.graphicsView_zpos_plotting = GraphicsView(self.horizontalLayoutWidget_3) 182 | self.graphicsView_zpos_plotting.setObjectName("graphicsView_zpos_plotting") 183 | self.horizontalLayout_zpos_plotting.addWidget(self.graphicsView_zpos_plotting) 184 | self.tabWidget.addTab(self.tab_ZPos, "") 185 | self.tab_XPos = QtWidgets.QWidget() 186 | self.tab_XPos.setObjectName("tab_XPos") 187 | self.groupBox_XPosSettings = QtWidgets.QGroupBox(self.tab_XPos) 188 | self.groupBox_XPosSettings.setGeometry(QtCore.QRect(10, 10, 171, 181)) 189 | self.groupBox_XPosSettings.setObjectName("groupBox_XPosSettings") 190 | self.verticalLayoutWidget_4 = QtWidgets.QWidget(self.groupBox_XPosSettings) 191 | self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(10, 29, 81, 101)) 192 | self.verticalLayoutWidget_4.setObjectName("verticalLayoutWidget_4") 193 | self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_4) 194 | self.verticalLayout_4.setContentsMargins(0, 0, 0, 0) 195 | self.verticalLayout_4.setObjectName("verticalLayout_4") 196 | self.label_XPosProportional = QtWidgets.QLabel(self.verticalLayoutWidget_4) 197 | font = QtGui.QFont() 198 | font.setPointSize(9) 199 | font.setUnderline(False) 200 | self.label_XPosProportional.setFont(font) 201 | self.label_XPosProportional.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 202 | self.label_XPosProportional.setObjectName("label_XPosProportional") 203 | self.verticalLayout_4.addWidget(self.label_XPosProportional) 204 | self.label_XPosIntegral = QtWidgets.QLabel(self.verticalLayoutWidget_4) 205 | font = QtGui.QFont() 206 | font.setPointSize(9) 207 | self.label_XPosIntegral.setFont(font) 208 | self.label_XPosIntegral.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 209 | self.label_XPosIntegral.setObjectName("label_XPosIntegral") 210 | self.verticalLayout_4.addWidget(self.label_XPosIntegral) 211 | self.label_XPosDerivative = QtWidgets.QLabel(self.verticalLayoutWidget_4) 212 | font = QtGui.QFont() 213 | font.setPointSize(9) 214 | self.label_XPosDerivative.setFont(font) 215 | self.label_XPosDerivative.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) 216 | self.label_XPosDerivative.setObjectName("label_XPosDerivative") 217 | self.verticalLayout_4.addWidget(self.label_XPosDerivative) 218 | self.verticalLayoutWidget_8 = QtWidgets.QWidget(self.groupBox_XPosSettings) 219 | self.verticalLayoutWidget_8.setGeometry(QtCore.QRect(100, 19, 61, 121)) 220 | self.verticalLayoutWidget_8.setObjectName("verticalLayoutWidget_8") 221 | self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_8) 222 | self.verticalLayout_8.setContentsMargins(0, 0, 0, 0) 223 | self.verticalLayout_8.setObjectName("verticalLayout_8") 224 | self.lineEdit_XPosProportional = QtWidgets.QLineEdit(self.verticalLayoutWidget_8) 225 | self.lineEdit_XPosProportional.setAlignment(QtCore.Qt.AlignCenter) 226 | self.lineEdit_XPosProportional.setObjectName("lineEdit_XPosProportional") 227 | self.verticalLayout_8.addWidget(self.lineEdit_XPosProportional) 228 | self.lineEdit_XPosIntegral = QtWidgets.QLineEdit(self.verticalLayoutWidget_8) 229 | self.lineEdit_XPosIntegral.setAlignment(QtCore.Qt.AlignCenter) 230 | self.lineEdit_XPosIntegral.setObjectName("lineEdit_XPosIntegral") 231 | self.verticalLayout_8.addWidget(self.lineEdit_XPosIntegral) 232 | self.lineEdit_XPosDerivative = QtWidgets.QLineEdit(self.verticalLayoutWidget_8) 233 | self.lineEdit_XPosDerivative.setAlignment(QtCore.Qt.AlignCenter) 234 | self.lineEdit_XPosDerivative.setObjectName("lineEdit_XPosDerivative") 235 | self.verticalLayout_8.addWidget(self.lineEdit_XPosDerivative) 236 | self.pushButton_setXPosParameters = QtWidgets.QPushButton(self.groupBox_XPosSettings) 237 | self.pushButton_setXPosParameters.setGeometry(QtCore.QRect(10, 140, 151, 31)) 238 | font = QtGui.QFont() 239 | font.setBold(True) 240 | font.setWeight(75) 241 | self.pushButton_setXPosParameters.setFont(font) 242 | self.pushButton_setXPosParameters.setIcon(icon) 243 | self.pushButton_setXPosParameters.setObjectName("pushButton_setXPosParameters") 244 | self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.tab_XPos) 245 | self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(190, 10, 371, 181)) 246 | self.horizontalLayoutWidget_4.setObjectName("horizontalLayoutWidget_4") 247 | self.horizontalLayout_xpos_plotting = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_4) 248 | self.horizontalLayout_xpos_plotting.setContentsMargins(0, 0, 0, 0) 249 | self.horizontalLayout_xpos_plotting.setObjectName("horizontalLayout_xpos_plotting") 250 | self.graphicsView_xpos_plotting = GraphicsView(self.horizontalLayoutWidget_4) 251 | self.graphicsView_xpos_plotting.setObjectName("graphicsView_xpos_plotting") 252 | self.horizontalLayout_xpos_plotting.addWidget(self.graphicsView_xpos_plotting) 253 | self.tabWidget.addTab(self.tab_XPos, "") 254 | self.groupBox_RCController = QtWidgets.QGroupBox(self.groupBox_telloController) 255 | self.groupBox_RCController.setGeometry(QtCore.QRect(10, 20, 481, 251)) 256 | self.groupBox_RCController.setObjectName("groupBox_RCController") 257 | self.gridLayoutWidget_3 = QtWidgets.QWidget(self.groupBox_RCController) 258 | self.gridLayoutWidget_3.setGeometry(QtCore.QRect(140, 30, 239, 211)) 259 | self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3") 260 | self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_3) 261 | self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize) 262 | self.gridLayout_3.setContentsMargins(0, 0, 0, 0) 263 | self.gridLayout_3.setObjectName("gridLayout_3") 264 | self.pushButton_down = QtWidgets.QPushButton(self.gridLayoutWidget_3) 265 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) 266 | sizePolicy.setHorizontalStretch(0) 267 | sizePolicy.setVerticalStretch(0) 268 | sizePolicy.setHeightForWidth(self.pushButton_down.sizePolicy().hasHeightForWidth()) 269 | self.pushButton_down.setSizePolicy(sizePolicy) 270 | self.pushButton_down.setObjectName("pushButton_down") 271 | self.gridLayout_3.addWidget(self.pushButton_down, 2, 1, 1, 1) 272 | self.pushButton_turnLeft = QtWidgets.QPushButton(self.gridLayoutWidget_3) 273 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) 274 | sizePolicy.setHorizontalStretch(0) 275 | sizePolicy.setVerticalStretch(0) 276 | sizePolicy.setHeightForWidth(self.pushButton_turnLeft.sizePolicy().hasHeightForWidth()) 277 | self.pushButton_turnLeft.setSizePolicy(sizePolicy) 278 | self.pushButton_turnLeft.setObjectName("pushButton_turnLeft") 279 | self.gridLayout_3.addWidget(self.pushButton_turnLeft, 1, 0, 1, 1) 280 | self.pushButton_up = QtWidgets.QPushButton(self.gridLayoutWidget_3) 281 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) 282 | sizePolicy.setHorizontalStretch(0) 283 | sizePolicy.setVerticalStretch(0) 284 | sizePolicy.setHeightForWidth(self.pushButton_up.sizePolicy().hasHeightForWidth()) 285 | self.pushButton_up.setSizePolicy(sizePolicy) 286 | self.pushButton_up.setObjectName("pushButton_up") 287 | self.gridLayout_3.addWidget(self.pushButton_up, 0, 1, 1, 1) 288 | self.pushButton_turnRight = QtWidgets.QPushButton(self.gridLayoutWidget_3) 289 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) 290 | sizePolicy.setHorizontalStretch(0) 291 | sizePolicy.setVerticalStretch(0) 292 | sizePolicy.setHeightForWidth(self.pushButton_turnRight.sizePolicy().hasHeightForWidth()) 293 | self.pushButton_turnRight.setSizePolicy(sizePolicy) 294 | self.pushButton_turnRight.setObjectName("pushButton_turnRight") 295 | self.gridLayout_3.addWidget(self.pushButton_turnRight, 1, 2, 1, 1) 296 | self.groupBox_Run = QtWidgets.QGroupBox(self.groupBox_RCController) 297 | self.groupBox_Run.setGeometry(QtCore.QRect(10, 20, 111, 221)) 298 | self.groupBox_Run.setObjectName("groupBox_Run") 299 | self.verticalLayoutWidget_12 = QtWidgets.QWidget(self.groupBox_Run) 300 | self.verticalLayoutWidget_12.setGeometry(QtCore.QRect(10, 19, 91, 191)) 301 | self.verticalLayoutWidget_12.setObjectName("verticalLayoutWidget_12") 302 | self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_12) 303 | self.verticalLayout_14.setContentsMargins(0, 0, 0, 0) 304 | self.verticalLayout_14.setSpacing(20) 305 | self.verticalLayout_14.setObjectName("verticalLayout_14") 306 | self.verticalLayout_15 = QtWidgets.QVBoxLayout() 307 | self.verticalLayout_15.setSpacing(10) 308 | self.verticalLayout_15.setObjectName("verticalLayout_15") 309 | self.radioButton_manual = QtWidgets.QRadioButton(self.verticalLayoutWidget_12) 310 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) 311 | sizePolicy.setHorizontalStretch(0) 312 | sizePolicy.setVerticalStretch(0) 313 | sizePolicy.setHeightForWidth(self.radioButton_manual.sizePolicy().hasHeightForWidth()) 314 | self.radioButton_manual.setSizePolicy(sizePolicy) 315 | self.radioButton_manual.setChecked(True) 316 | self.radioButton_manual.setObjectName("radioButton_manual") 317 | self.verticalLayout_15.addWidget(self.radioButton_manual) 318 | self.radioButton_auto = QtWidgets.QRadioButton(self.verticalLayoutWidget_12) 319 | self.radioButton_auto.setObjectName("radioButton_auto") 320 | self.verticalLayout_15.addWidget(self.radioButton_auto) 321 | self.verticalLayout_14.addLayout(self.verticalLayout_15) 322 | self.verticalLayout_16 = QtWidgets.QVBoxLayout() 323 | self.verticalLayout_16.setObjectName("verticalLayout_16") 324 | self.pushButton_start = QtWidgets.QPushButton(self.verticalLayoutWidget_12) 325 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) 326 | sizePolicy.setHorizontalStretch(0) 327 | sizePolicy.setVerticalStretch(0) 328 | sizePolicy.setHeightForWidth(self.pushButton_start.sizePolicy().hasHeightForWidth()) 329 | self.pushButton_start.setSizePolicy(sizePolicy) 330 | icon1 = QtGui.QIcon() 331 | icon1.addPixmap(QtGui.QPixmap("research/resources/start.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) 332 | self.pushButton_start.setIcon(icon1) 333 | self.pushButton_start.setObjectName("pushButton_start") 334 | self.verticalLayout_16.addWidget(self.pushButton_start) 335 | self.pushButton_land = QtWidgets.QPushButton(self.verticalLayoutWidget_12) 336 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) 337 | sizePolicy.setHorizontalStretch(0) 338 | sizePolicy.setVerticalStretch(0) 339 | sizePolicy.setHeightForWidth(self.pushButton_land.sizePolicy().hasHeightForWidth()) 340 | self.pushButton_land.setSizePolicy(sizePolicy) 341 | icon2 = QtGui.QIcon() 342 | icon2.addPixmap(QtGui.QPixmap("research/resources/stop.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) 343 | self.pushButton_land.setIcon(icon2) 344 | self.pushButton_land.setObjectName("pushButton_land") 345 | self.verticalLayout_16.addWidget(self.pushButton_land) 346 | self.verticalLayout_14.addLayout(self.verticalLayout_16) 347 | self.verticalLayoutWidget_13 = QtWidgets.QWidget(self.groupBox_RCController) 348 | self.verticalLayoutWidget_13.setGeometry(QtCore.QRect(390, 30, 77, 211)) 349 | self.verticalLayoutWidget_13.setObjectName("verticalLayoutWidget_13") 350 | self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_13) 351 | self.verticalLayout_17.setContentsMargins(0, 0, 0, 0) 352 | self.verticalLayout_17.setObjectName("verticalLayout_17") 353 | self.pushButton_forward = QtWidgets.QPushButton(self.verticalLayoutWidget_13) 354 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) 355 | sizePolicy.setHorizontalStretch(0) 356 | sizePolicy.setVerticalStretch(0) 357 | sizePolicy.setHeightForWidth(self.pushButton_forward.sizePolicy().hasHeightForWidth()) 358 | self.pushButton_forward.setSizePolicy(sizePolicy) 359 | self.pushButton_forward.setObjectName("pushButton_forward") 360 | self.verticalLayout_17.addWidget(self.pushButton_forward) 361 | spacerItem = QtWidgets.QSpacerItem(40, 60, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) 362 | self.verticalLayout_17.addItem(spacerItem) 363 | self.pushButton_backward = QtWidgets.QPushButton(self.verticalLayoutWidget_13) 364 | sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred) 365 | sizePolicy.setHorizontalStretch(0) 366 | sizePolicy.setVerticalStretch(0) 367 | sizePolicy.setHeightForWidth(self.pushButton_backward.sizePolicy().hasHeightForWidth()) 368 | self.pushButton_backward.setSizePolicy(sizePolicy) 369 | self.pushButton_backward.setObjectName("pushButton_backward") 370 | self.verticalLayout_17.addWidget(self.pushButton_backward) 371 | self.label_skeleton = QtWidgets.QLabel(self.centralwidget) 372 | self.label_skeleton.setGeometry(QtCore.QRect(743, 300, 360, 540)) 373 | font = QtGui.QFont() 374 | font.setPointSize(12) 375 | font.setBold(True) 376 | font.setWeight(75) 377 | self.label_skeleton.setFont(font) 378 | self.label_skeleton.setAutoFillBackground(True) 379 | self.label_skeleton.setAlignment(QtCore.Qt.AlignCenter) 380 | self.label_skeleton.setObjectName("label_skeleton") 381 | MainWindow.setCentralWidget(self.centralwidget) 382 | self.statusbar = QtWidgets.QStatusBar(MainWindow) 383 | self.statusbar.setObjectName("statusbar") 384 | MainWindow.setStatusBar(self.statusbar) 385 | 386 | self.retranslateUi(MainWindow) 387 | self.tabWidget.setCurrentIndex(0) 388 | QtCore.QMetaObject.connectSlotsByName(MainWindow) 389 | 390 | def retranslateUi(self, MainWindow): 391 | _translate = QtCore.QCoreApplication.translate 392 | MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) 393 | self.label_imageTransmission.setText(_translate("MainWindow", "Image Transmission")) 394 | self.groupBox_telloController.setTitle(_translate("MainWindow", "Tello Controller")) 395 | self.groupBox_PIDTuning.setTitle(_translate("MainWindow", "PID Tuning")) 396 | self.groupBox_YawSettings.setTitle(_translate("MainWindow", "Settings")) 397 | self.label_YawProportional.setText(_translate("MainWindow", "Proportional")) 398 | self.label_YawIntegral.setText(_translate("MainWindow", "Integral")) 399 | self.label_YawDerivative.setText(_translate("MainWindow", "Derivative")) 400 | self.lineEdit_YawProportional.setText(_translate("MainWindow", "0.00")) 401 | self.lineEdit_YawIntegral.setText(_translate("MainWindow", "0.00")) 402 | self.lineEdit_YawDerivative.setText(_translate("MainWindow", "0.00")) 403 | self.pushButton_setYawParameters.setText(_translate("MainWindow", "Set Parameters")) 404 | self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_Yaw), _translate("MainWindow", "Yaw")) 405 | self.groupBox_ZPosSettings.setTitle(_translate("MainWindow", "Settings")) 406 | self.label_ZPosProportional.setText(_translate("MainWindow", "Proportional")) 407 | self.label_ZPosIntegral.setText(_translate("MainWindow", "Integral")) 408 | self.label_ZPosDerivative.setText(_translate("MainWindow", "Derivative")) 409 | self.lineEdit_ZPosProportional.setText(_translate("MainWindow", "0.00")) 410 | self.lineEdit_ZPosIntegral.setText(_translate("MainWindow", "0.00")) 411 | self.lineEdit_ZPosDerivative.setText(_translate("MainWindow", "0.00")) 412 | self.pushButton_setZPosParameters.setText(_translate("MainWindow", "Set Parameters")) 413 | self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_ZPos), _translate("MainWindow", "ZPos")) 414 | self.groupBox_XPosSettings.setTitle(_translate("MainWindow", "Settings")) 415 | self.label_XPosProportional.setText(_translate("MainWindow", "Proportional")) 416 | self.label_XPosIntegral.setText(_translate("MainWindow", "Integral")) 417 | self.label_XPosDerivative.setText(_translate("MainWindow", "Derivative")) 418 | self.lineEdit_XPosProportional.setText(_translate("MainWindow", "0.00")) 419 | self.lineEdit_XPosIntegral.setText(_translate("MainWindow", "0.00")) 420 | self.lineEdit_XPosDerivative.setText(_translate("MainWindow", "0.00")) 421 | self.pushButton_setXPosParameters.setText(_translate("MainWindow", "Set Parameters")) 422 | self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_XPos), _translate("MainWindow", "XPos")) 423 | self.groupBox_RCController.setTitle(_translate("MainWindow", "RC Control")) 424 | self.pushButton_down.setText(_translate("MainWindow", "Down")) 425 | self.pushButton_turnLeft.setText(_translate("MainWindow", "Turn-L")) 426 | self.pushButton_up.setText(_translate("MainWindow", "Up")) 427 | self.pushButton_turnRight.setText(_translate("MainWindow", "Turn-R")) 428 | self.groupBox_Run.setTitle(_translate("MainWindow", "Run")) 429 | self.radioButton_manual.setText(_translate("MainWindow", "Manual")) 430 | self.radioButton_auto.setText(_translate("MainWindow", "Auto")) 431 | self.pushButton_start.setText(_translate("MainWindow", "Start")) 432 | self.pushButton_land.setText(_translate("MainWindow", "Land")) 433 | self.pushButton_forward.setText(_translate("MainWindow", "Forward")) 434 | self.pushButton_backward.setText(_translate("MainWindow", "Backward")) 435 | self.label_skeleton.setText(_translate("MainWindow", "Skeleton View")) 436 | from pyqtgraph import GraphicsView 437 | 438 | 439 | if __name__ == "__main__": 440 | import sys 441 | app = QtWidgets.QApplication(sys.argv) 442 | MainWindow = QtWidgets.QMainWindow() 443 | ui = Ui_MainWindow() 444 | ui.setupUi(MainWindow) 445 | MainWindow.show() 446 | sys.exit(app.exec_()) 447 | --------------------------------------------------------------------------------