├── images ├── img1.jpeg ├── img10.jpeg ├── img11.jpeg ├── img2.jpeg ├── img3.jpeg ├── img4.jpeg ├── img5.jpeg ├── img6.jpeg ├── img7.jpeg ├── img8.jpeg └── img9.jpeg ├── __pycache__ ├── GUI.cpython-311.pyc ├── TRACKING.cpython-311.pyc └── POSEESTIMATION.cpython-311.pyc ├── calibration_matrix.yaml ├── camera.py ├── LICENSE ├── Calib.py ├── MAIN.py ├── TRACKING.py ├── POSEESTIMATION.py ├── GUI.py ├── README.md └── drone_tracking_pid.py /images/img1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img1.jpeg -------------------------------------------------------------------------------- /images/img10.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img10.jpeg -------------------------------------------------------------------------------- /images/img11.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img11.jpeg -------------------------------------------------------------------------------- /images/img2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img2.jpeg -------------------------------------------------------------------------------- /images/img3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img3.jpeg -------------------------------------------------------------------------------- /images/img4.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img4.jpeg -------------------------------------------------------------------------------- /images/img5.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img5.jpeg -------------------------------------------------------------------------------- /images/img6.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img6.jpeg -------------------------------------------------------------------------------- /images/img7.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img7.jpeg -------------------------------------------------------------------------------- /images/img8.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img8.jpeg -------------------------------------------------------------------------------- /images/img9.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/images/img9.jpeg -------------------------------------------------------------------------------- /__pycache__/GUI.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/__pycache__/GUI.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/TRACKING.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/__pycache__/TRACKING.cpython-311.pyc -------------------------------------------------------------------------------- /__pycache__/POSEESTIMATION.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello/HEAD/__pycache__/POSEESTIMATION.cpython-311.pyc -------------------------------------------------------------------------------- /calibration_matrix.yaml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | - - 918.1560329069506 3 | - 0.0 4 | - 504.2571672050115 5 | - - 0.0 6 | - 920.2049666835973 7 | - 414.439454838821 8 | - - 0.0 9 | - 0.0 10 | - 1.0 11 | dist_coeff: 12 | - - 0.19168665830761594 13 | - -1.6823772785122213 14 | - 0.016221945283270898 15 | - 0.009885946420555035 16 | - 4.253359456070316 17 | -------------------------------------------------------------------------------- /camera.py: -------------------------------------------------------------------------------- 1 | from djitellopy import Tello 2 | import cv2 3 | drone = Tello() 4 | import time 5 | 6 | drone.connect() 7 | 8 | drone.send_command_with_return("downvision 1") 9 | drone.streamon() 10 | index = 1 11 | #img = cv2.VideoCapture(0) 12 | # drone.set_video_direction(Tello.CAMERA_DOWNWARD) 13 | while True: 14 | 15 | # IsTrue, frame = img.read() 16 | # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 17 | # img = cv2.resize(img, (360, 240)) 18 | myFrame = drone.get_frame_read() 19 | 20 | frame = myFrame.frame 21 | dframe = cv2.flip(frame, 0) 22 | 23 | cv2.imshow("Image", dframe) 24 | 25 | cond = cv2.waitKey(10) & 0xFF 26 | 27 | if cond == ord('d'): 28 | seconds = 1672215379.5045543 29 | local_time = time.ctime(seconds) 30 | path = "C:\\Users\\Nabeel Ahmad\\Documents\\Telllo\\18_05_2024\images\\img" + str(index) + ".jpeg" 31 | index +=1 32 | cv2.imwrite(path, frame) 33 | 34 | elif cond == ord('e'): 35 | break 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Kashif Khan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Calib.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import glob 4 | import yaml 5 | #import pathlib 6 | 7 | # termination criteria 8 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 9 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) 10 | objp = np.zeros((7*7,3), np.float32) 11 | objp[:,:2] = np.mgrid[0:7,0:7].T.reshape(-1,2) 12 | # Arrays to store object points and image points from all the images. 13 | objpoints = [] # 3d point in real world space 14 | imgpoints = [] # 2d points in image plane. 15 | 16 | images = glob.glob(r'C:\\Users\\Nabeel Ahmad\\Documents\\Telllo\\18_05_2024\\images/*.jpeg') 17 | 18 | # path = 'results' 19 | # pathlib.Path(path).mkdir(parents=True, exist_ok=True) 20 | 21 | found = 0 22 | for fname in images: # Here, 10 can be changed to whatever number you like to choose 23 | img = cv2.imread(fname) # Capture frame-by-frame 24 | #print(images[im_i]) 25 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 26 | # Find the chess board corners 27 | ret, corners = cv2.findChessboardCorners(gray, (7,7), None) 28 | # If found, add object points, image points (after refining them) 29 | if ret == True: 30 | objpoints.append(objp) # Certainly, every loop objp is the same, in 3D. 31 | corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) 32 | imgpoints.append(corners2) 33 | # Draw and display the corners 34 | img = cv2.drawChessboardCorners(img, (7 ,7), corners2, ret) 35 | found += 1 36 | cv2.imshow('img', img) 37 | cv2.waitKey(1000) 38 | # if you want to save images with detected corners 39 | # uncomment following 2 lines and lines 5, 18 and 19 40 | # image_name = path + '/calibresult' + str(found) + '.png' 41 | # cv2.imwrite(image_name, img) 42 | 43 | print("Number of images used for calibration: ", found) 44 | 45 | # When everything done, release the capture 46 | # cap.release() 47 | cv2.destroyAllWindows() 48 | 49 | # calibration 50 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) 51 | 52 | # transform the matrix and distortion coefficients to writable lists 53 | data = {'camera_matrix': np.asarray(mtx).tolist(), 54 | 'dist_coeff': np.asarray(dist).tolist()} 55 | 56 | # and save it to a file 57 | with open("calibration_matrix.yaml", "w") as f: 58 | yaml.dump(data, f) 59 | 60 | -------------------------------------------------------------------------------- /MAIN.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from djitellopy import tello 4 | 5 | import POSEESTIMATION 6 | import TRACKING 7 | import GUI 8 | 9 | import matplotlib.pyplot as plt 10 | pose = POSEESTIMATION.pose_estimation() 11 | track = TRACKING.track(pose) 12 | window = GUI.gui("1000x700", (650, 450), track) 13 | 14 | def initializeTello(): 15 | myDrone = tello.Tello() 16 | myDrone.connect() 17 | myDrone.for_back_velocity = 0 18 | myDrone. left_right_velocity = 0 19 | myDrone.up_down_velocity = 0 20 | myDrone.yaw_velocity = 0 21 | myDrone.speed = 0 22 | print(myDrone.get_battery()) 23 | #myDrone.set_video_resolution(myDrone.RESOLUTION_480P) 24 | #myDrone.set_video_fps() 25 | myDrone.streamoff() 26 | myDrone.streamon() 27 | return myDrone 28 | 29 | def send_command(): 30 | left_right, far_back, up_down, yaw = track.new_state 31 | #return 1 32 | if myDrone.send_rc_control: 33 | #print("sending") 34 | myDrone.send_rc_control(left_right, far_back, up_down, yaw) 35 | return 1 36 | else: 37 | return 0 38 | 39 | def update_parameters(): 40 | window.battery_label.configure(text = "Battery Level: "+str(myDrone.get_battery())+"%") 41 | window.temperature_label.configure(text = "Temperature: "+str(myDrone.get_temperature())+"C") 42 | window.altitude_label.configure(text = "Altitude: "+str(myDrone.get_height())) 43 | 44 | def plot_data(): 45 | plt.plot(track.x_list) 46 | plt.plot(track.x_ref_list) 47 | plt.show() 48 | 49 | 50 | myDrone = initializeTello() 51 | 52 | # myDrone.takeoff() 53 | left_right, far_back, up_down, yaw = 0, 0, 0, 0 54 | state = (left_right, far_back, up_down, yaw) 55 | new_state = (left_right, far_back, up_down, yaw) 56 | myFrame = myDrone.get_frame_read() 57 | 58 | #cap = cv2.VideoCapture(0) 59 | 60 | def main_func(): 61 | if track.land_state == 1: 62 | print("LANDING") 63 | track.land_state = -1 64 | myDrone.land() 65 | myDrone.end() 66 | #plot_data() 67 | if track.send_command_state == 1: 68 | if send_command(): 69 | track.change_state() 70 | track.send_command_state = 0 71 | #ret, frame = cap.read() 72 | img = myFrame.frame 73 | img = cv2.flip(img, 0) 74 | frame = track.update_inputs(img) 75 | text = str(track.new_state) 76 | cv2.putText(frame, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) 77 | window.update_video(frame) 78 | if track.land_state == 0: 79 | update_parameters() 80 | window.after(1, main_func) 81 | 82 | 83 | window.after(0, main_func) 84 | window.mainloop() -------------------------------------------------------------------------------- /TRACKING.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | class track(): 5 | 6 | def __init__(self, pose): 7 | self.pose = pose 8 | self.left_right = 0 9 | self.far_back = 0 10 | self.up_down = 0 11 | self.yaw = 0 12 | self.button_check = 0 13 | self.land_state = 0 14 | self.send_command_state = 0 15 | self.trackandland = 0 16 | self.x_list, self.x_ref_list, self.y_list, self.y_ref_list = [], [], [], [] 17 | 18 | self.error_x, self.error_y, self.prev_error_z = 0, 0, 0 19 | self.prev_error_x, self.prev_error_y , self.prev_error_z = self.error_x, self.error_y, self.prev_error_z 20 | 21 | 22 | self.state = (self.left_right, self.far_back, self.up_down, self.yaw) 23 | self.new_state = self.state 24 | 25 | def change_state(self): 26 | self.state = (self.left_right, self.far_back, self.up_down, self.yaw) 27 | 28 | def change_new_state(self): 29 | self.new_state = (self.left_right, self.far_back, self.up_down, self.yaw) 30 | 31 | def update_velocities(self, x, y, z, x_ref, y_ref, z_ref): 32 | 33 | """self.x_list.append(x) 34 | self.x_ref_list.append(x_ref) 35 | self.y_list.append(y) 36 | self.y_ref_list.append(y_ref)""" 37 | 38 | self.left_right, self.far_back, self.up_down, self.yaw = 0, 0, 0, 0 39 | 40 | speed = 100 41 | self.error_x, self.error_y, self.error_z = x - x_ref, y_ref - y, z_ref - z 42 | 43 | p, d = 0.1, 0.65 44 | 45 | self.left_right = int(np.clip(p*self.error_x + d*(self.error_x - self.prev_error_x), -speed, speed)) 46 | self.far_back = int(np.clip(p*self.error_y + d*(self.error_y - self.prev_error_y), -speed, speed)) 47 | 48 | self.prev_error_x, self.prev_error_y = self.error_x, self.error_y 49 | 50 | 51 | """if abs(self.left_right) < 10: 52 | self.left_right = 0 53 | if abs(self.far_back) < 10: 54 | self.far_back = 0 """ 55 | 56 | if abs(self.left_right) < 25 and abs(self.far_back) < 25 and self.trackandland == 1: 57 | if z < z_ref + 10: 58 | self.land_state = 1 59 | else: 60 | #self.up_down = int(np.clip(0.4*self.error_z + 0.4*(self.error_z - self.prev_error_z), -60, 60)) 61 | #self.prev_error_z = self.error_z 62 | self.up_down = -40 63 | else: 64 | self.up_down = 0 65 | 66 | def update_inputs(self, frame): 67 | if self.land_state == 0: 68 | if not self.button_check: 69 | coordinates, frame = self.pose.get_pose_estimate(frame) 70 | h, w, _ = frame.shape 71 | x_ref, y_ref, z_ref = w//2, h//2, 80 72 | p = 0.30 73 | x_l, x_u = int(p*w), int((1-p)*w ) 74 | y_l, y_u = int(p*h), int((1-p)*h) 75 | 76 | cv2.line(frame, (x_l, 0), (x_l, h), (255, 0, 0), 4) 77 | cv2.line(frame, (x_u, 0), (x_u, h), (255, 0, 0), 4) 78 | cv2.line(frame, (0, y_l), (w, y_l), (255, 0, 0), 4) 79 | cv2.line(frame, (0, y_u), (w, y_u), (255, 0, 0), 4) 80 | if len(coordinates) > 0: 81 | x, y, z = coordinates 82 | print(z) 83 | cv2.circle(frame, (x, y), 4, (0, 255, 0), -1) 84 | self.update_velocities(x, y, z, x_ref, y_ref, z_ref) 85 | self.change_new_state() 86 | 87 | else: 88 | #print("no marker detected") 89 | self.left_right, self.far_back, self.up_down, self.yaw = 0, 0, 0, 0 90 | self.change_new_state() 91 | 92 | if (self.new_state != self.state): 93 | self.send_command_state = 1 94 | 95 | return frame -------------------------------------------------------------------------------- /POSEESTIMATION.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import sys 4 | import yaml 5 | import time 6 | 7 | 8 | class pose_estimation(): 9 | 10 | def __init__(self): 11 | 12 | self.ARUCO_DICT = { 13 | "DICT_4X4_50": cv2.aruco.DICT_4X4_50, 14 | "DICT_4X4_100": cv2.aruco.DICT_4X4_100, 15 | "DICT_4X4_250": cv2.aruco.DICT_4X4_250, 16 | "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, 17 | "DICT_5X5_50": cv2.aruco.DICT_5X5_50, 18 | "DICT_5X5_100": cv2.aruco.DICT_5X5_100, 19 | "DICT_5X5_250": cv2.aruco.DICT_5X5_250, 20 | "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, 21 | "DICT_6X6_50": cv2.aruco.DICT_6X6_50, 22 | "DICT_6X6_100": cv2.aruco.DICT_6X6_100, 23 | "DICT_6X6_250": cv2.aruco.DICT_6X6_250, 24 | "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, 25 | "DICT_7X7_50": cv2.aruco.DICT_7X7_50, 26 | "DICT_7X7_100": cv2.aruco.DICT_7X7_100, 27 | "DICT_7X7_250": cv2.aruco.DICT_7X7_250, 28 | "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, 29 | "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL, 30 | "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, 31 | "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, 32 | "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, 33 | "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 34 | } 35 | 36 | self.aruco_type = "DICT_5X5_100" 37 | self.arucoDict = cv2.aruco.getPredefinedDictionary(self.ARUCO_DICT[self.aruco_type]) 38 | self.arucoParams = cv2.aruco.DetectorParameters() 39 | self.detector = cv2.aruco.ArucoDetector(self.arucoDict, self.arucoParams) 40 | 41 | self.camera_info = self.get_camera_info(path = 'calibration_matrix.yaml') 42 | #self.intrinsic_camera = np.array(self.camera_info['camera_matrix']) 43 | #self.distortion = np.array(self.camera_info['dist_coeff'][0]) 44 | self.matrix_coefficients = np.array(self.camera_info['camera_matrix']) 45 | self.distortion_coefficients = np.array(self.camera_info['dist_coeff'][0]) 46 | 47 | 48 | def get_camera_info(self, path): 49 | with open(path, 'r') as file: 50 | data = yaml.safe_load(file) 51 | return data 52 | 53 | def estimatePoseSingleMarkers(self, corners, marker_size): 54 | marker_points = np.array([[-marker_size / 2, marker_size / 2, 0], 55 | [marker_size / 2, marker_size / 2, 0], 56 | [marker_size / 2, -marker_size / 2, 0], 57 | [-marker_size / 2, -marker_size / 2, 0]], dtype=np.float32) 58 | trash = [] 59 | rvecs = [] 60 | tvecs = [] 61 | 62 | for c in corners: 63 | nada, R, t = cv2.solvePnP(marker_points, c, self.matrix_coefficients, self.distortion_coefficients, False, cv2.SOLVEPNP_IPPE_SQUARE) 64 | rvecs.append(R) 65 | tvecs.append(t) 66 | trash.append(nada) 67 | return rvecs, tvecs, trash 68 | 69 | def get_pose_estimate(self, frame): 70 | 71 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 72 | cv2.undistort(gray, self.matrix_coefficients, self.distortion_coefficients, frame) 73 | corners, ids, rejected_img_points = self.detector.detectMarkers(frame) 74 | 75 | 76 | if len(corners) > 0: 77 | for i in range(0, len(ids)): 78 | 79 | rvec, tvec, markerPoints = self.estimatePoseSingleMarkers(corners, 100) 80 | # cv2.aruco.drawDetectedMarkers(frame, corners) 81 | for i in range(0, ids.size): 82 | cv2.drawFrameAxes(frame, self.matrix_coefficients, self.distortion_coefficients, np.array(rvec)[i, :, :], np.array(tvec)[i, :, :], 50, 2) 83 | x, y, z = round(tvec[0][0][0]/10), round(tvec[0][1][0]/10), round(tvec[0][2][0]/10) 84 | 85 | x_ = int((corners[i-1][0][0][0] + corners[i-1][0][1][0] + corners[i-1][0][2][0] + corners[i-1][0][3][0]) / 4) 86 | y_ = int((corners[i-1][0][0][1] + corners[i-1][0][1][1] + corners[i-1][0][2][1] + corners[i-1][0][3][1]) / 4) 87 | 88 | 89 | return [x_, y_, z], frame 90 | else: 91 | return [], frame -------------------------------------------------------------------------------- /GUI.py: -------------------------------------------------------------------------------- 1 | import customtkinter 2 | import cv2 3 | from PIL import Image, ImageTk 4 | 5 | class gui(customtkinter.CTk): 6 | def __init__(self, window_size = "800x600", video_size = (600, 400), track_obj = None): 7 | super().__init__() 8 | self.track_obj = track_obj 9 | self.title("Mission Control") 10 | self.geometry(window_size) 11 | self.video_size = video_size 12 | 13 | customtkinter.set_appearance_mode("dark") 14 | customtkinter.set_default_color_theme("blue") 15 | 16 | self.x, self.y, self.dx, self.dy = 0.1, 0.75, 0.2, 0.2 17 | 18 | self.left_button = self.create_button("LEFT", self.x + 0.05 , self.y + self.dy/2, self.button_pressed, self.button_released) 19 | self.right_button = self.create_button("RIGHT", self.x + 2*self.dx - 0.05, self.y + self.dy/2, self.button_pressed, self.button_released) 20 | self.far_button = self.create_button("FORWARD", self.x + self.dx, self.y, self.button_pressed, self.button_released) 21 | self.back_button = self.create_button("BACKWARD", self.x + self.dx, self.y + self.dy, self.button_pressed, self.button_released) 22 | self.up_button = self.create_button("UP", self.x + 2.8*self.dx , self.y, self.button_pressed, self.button_released) 23 | self.down_button = self.create_button("DOWN", self.x + 2.8*self.dx, self.y + self.dy, self.button_pressed, self.button_released) 24 | 25 | self.down_button = self.create_button("TRACKANDLAND", self.x + 3.8*self.dx, self.y, self.button_pressed, self.button_released) 26 | self.down_button = self.create_button("LAND", self.x + 3.8*self.dx, self.y + self.dy, self.button_pressed, self.button_released) 27 | 28 | self.video_frame = customtkinter.CTkFrame(self, width=self.video_size[0], height=self.video_size[1], 29 | border_width=6, border_color="white") 30 | self.video_frame.pack(side="left", padx = 50, pady = 20, anchor = "n") 31 | 32 | self.video_label = customtkinter.CTkLabel(self.video_frame, width=self.video_size[0], height=self.video_size[1], text = "") 33 | self.video_label.pack(padx=4, pady=4) 34 | self.video_label.pack(fill=customtkinter.BOTH, expand=True) 35 | 36 | self.info_frame = customtkinter.CTkFrame(self, width=400, height=100, border_width=0, fg_color="transparent") 37 | self.info_frame.pack(side="left", padx = 10, pady = 130, anchor = "n") 38 | 39 | # Create labels for real-time information 40 | self.battery_label = customtkinter.CTkLabel(self.info_frame, width=250, text=f"Battery : NA %", 41 | text_color="yellow", font=("Arial", 20), anchor="w") 42 | self.battery_label.pack(side="top", padx=20, pady=30) 43 | self.temperature_label = customtkinter.CTkLabel(self.info_frame, width=250, text=f"Temperature: °C", 44 | text_color="yellow", font=("Arial", 20), anchor="w") 45 | self.temperature_label.pack(side="top", padx=20, pady=30) 46 | self.altitude_label = customtkinter.CTkLabel(self.info_frame, width=250, text=f"Altitude: cm", 47 | text_color="yellow", font=("Arial", 20), anchor="w") 48 | self.altitude_label.pack(side="top", padx=20, pady=30) 49 | #self.speed_label = customtkinter.CTkLabel(self.info_frame, width=200, text=f"Speed: m/s", 50 | # text_color="yellow", font=("Arial", 20), anchor="w") 51 | #self.speed_label.pack(side="top", padx=20, pady=20) 52 | 53 | def create_button(self, text, rel_x, rel_y, func1, func2): 54 | button = customtkinter.CTkButton(self, text=text, 55 | fg_color=("blue", "black"), 56 | width=100, height=50, border_width=3, 57 | corner_radius=20) 58 | button.place(relx = rel_x, rely = rel_y, anchor=customtkinter.CENTER) 59 | button.bind("", lambda event, button_text=text: func1(event, button_text)) 60 | button.bind("", lambda event, button_text=text: func2(event, button_text)) 61 | return button 62 | 63 | 64 | def button_pressed(self, event, button_text): 65 | self.track_obj.button_check = 1 66 | self.track_obj.left_right, self.track_obj.far_back = 0, 0 67 | self.track_obj.up_down, self.track_obj.yaw = 0, 0 68 | speed = 40 69 | if button_text == "LEFT": 70 | self.track_obj.left_right = -speed 71 | self.track_obj.change_new_state() 72 | elif button_text == "RIGHT": 73 | self.track_obj.left_right = speed 74 | self.track_obj.change_new_state() 75 | elif button_text == "FORWARD": 76 | self.track_obj.far_back = speed 77 | self.track_obj.change_new_state() 78 | elif button_text == "BACKWARD": 79 | self.track_obj.far_back = -speed 80 | self.track_obj.change_new_state() 81 | elif button_text == "UP": 82 | self.track_obj.up_down = speed 83 | self.track_obj.change_new_state() 84 | elif button_text == "DOWN": 85 | self.track_obj.up_down= -speed 86 | self.track_obj.change_new_state() 87 | elif button_text == "LAND": 88 | if self.track_obj.land_state == 0: 89 | self.track_obj.land_state = 1 90 | elif button_text == "TRACKANDLAND": 91 | self.track_obj.trackandland = 1 92 | 93 | def button_released(self, event, button_text): 94 | if button_text == "LAND": 95 | pass 96 | else: 97 | self.track_obj.left_right, self.track_obj.far_back = 0, 0 98 | self.track_obj.up_down, self.track_obj.yaw = 0, 0, 99 | self.track_obj.change_new_state() 100 | self.track_obj.button_check = 0 101 | 102 | def update_video(self, img): 103 | # Convert the image from BGR color (which OpenCV uses) to RGB color 104 | #rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 105 | # Convert the image to PIL format 106 | pil_image = Image.fromarray(img) 107 | # Convert the PIL image to CTkImage 108 | tk_image = customtkinter.CTkImage(dark_image=pil_image, size = (self.video_size[0], self.video_size[1])) 109 | # Update the label with the new image 110 | self.video_label.configure(image=tk_image) 111 | self.video_label.image = tk_image 112 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autonomous Landing of UAV on Moving Platform Using Computer Vision 2 | 3 | ## Introduction 4 | This project aims to develop a UAV system that can autonomously land on a moving platform using computer vision and control systems. The focus is on enhancing UAV technology by enabling drones to land on moving platforms with precision, which has applications in logistics, military, and rescue missions. 5 | 6 | ![image](https://github.com/user-attachments/assets/db815c15-0122-4b36-9e1c-5a8487fd339b) 7 | 8 | ## Table of Contents 9 | - [Introduction](#introduction) 10 | - [Screenshots and Videos](#screenshots-and-videos) 11 | - [Background and Motivation](#background-and-motivation) 12 | - [Objectives](#objectives) 13 | - [Key Components](#key-components) 14 | - [Camera Calibration](#camera-calibration) 15 | - [Pose Estimation](#pose-estimation) 16 | - [ArUco Markers](#aruco-markers) 17 | - [PID Control System](#pid-control-system) 18 | - [Workflow](#workflow) 19 | - [Results](#results) 20 | - [Limitations](#limitations) 21 | - [Future Work](#future-work) 22 | - [Installation](#installation) 23 | - [Usage](#usage) 24 | - [Contributing](#contributing) 25 | - [License](#license) 26 | - [Contributors](#contributors) 27 | 28 | ## Background and Motivation 29 | Traditional UAV landing methods are limited and not suitable for dynamic environments. This project addresses this need for precision, with applications in logistics, military, and rescue missions. 30 | 31 | ## Screenshots and Videos 32 | https://github.com/user-attachments/assets/efe6aa5e-7c5d-413b-8058-ab73d69cfaa0 33 | 34 | https://github.com/user-attachments/assets/c310fbe9-0985-4bed-b44e-b981540e041b 35 | 36 | ## Objectives 37 | 1. Develop a vision-based system 38 | 2. Implement PID control 39 | 3. Ensure real-time data processing 40 | 4. Integrate systems 41 | 5. Conduct field testing 42 | 43 | ## Key Components 44 | ### Hardware Setup 45 | - **DJI Tello Drone:** Chosen for its lightweight design, programmability, and onboard camera and sensors. 46 | 47 | ![image](https://github.com/user-attachments/assets/e8c3edd6-4240-4231-8001-6070c6beff81) 48 | 49 | ### Software Framework 50 | - **Programming Language:** Python 51 | - **IDE:** Visual Studio Code (VSCode) 52 | - **Functions:** Real-time image processing and control algorithm implementation 53 | 54 | ### Control Interface Module 55 | - **GUI Features:** Live feed from the camera, manual control buttons, autonomous landing buttons, feedback on altitude, temperature, and height. 56 | 57 | ![image](https://github.com/user-attachments/assets/24907fe4-583b-403c-9346-e89dc817098a) 58 | 59 | ## Camera Calibration 60 | Camera calibration ensures accurate distance and angle measurements. This is crucial for determining the drone's position and orientation relative to the landing platform. 61 | 62 | https://github.com/user-attachments/assets/c310fbe9-0985-4bed-b44e-b981540e041b 63 | 64 | ## Pose Estimation 65 | Pose estimation determines the position and orientation of the drone relative to the landing platform. It uses the camera matrix and distortion coefficients obtained from camera calibration. 66 | 67 | ## ArUco Markers 68 | ArUco markers are used for accurate pose estimation and object tracking. They are square-shaped patterns with a unique identifier, high detectability, and are easy to use. 69 | 70 | ![image](https://github.com/user-attachments/assets/c56e2989-6e93-4d30-8c61-3a642d3d46ac) 71 | 72 | ### Advantages of Using ArUco Markers 73 | - **Ease of Use:** Easy to print and deploy. 74 | - **Open-Source Libraries:** Available libraries like OpenCV's ArUco module simplify development. 75 | - **Accuracy:** Provide reliable pose estimation essential for precise UAV landings. 76 | 77 | ### Limitations 78 | - **Lighting Conditions:** Detection accuracy can be affected by extreme lighting. 79 | - **Distance and Size:** Detection range is limited by camera resolution and marker size. 80 | 81 | ## PID Control System 82 | The PID control system is a feedback control mechanism that includes Proportional, Integral, and Derivative components. It is essential for real-time adjustments and precise landings. 83 | 84 | ### Role in UAV Control 85 | - **Maintain Stability:** Continuous adjustments to UAV's flight parameters. 86 | - **Track the Moving Platform:** Adjusts flight path based on pose information from the vision system. 87 | - **Achieve Precision Landings:** Fine-tuning of PID parameters for precise and smooth landings. 88 | 89 | ## Workflow 90 | 1. **Initialization** 91 | 2. **Data Reception and Processing** 92 | 3. **Decision Execution** 93 | 4. **Feedback Analysis** 94 | 95 | ## Results 96 | The system was tested in an indoor environment using the DJI Tello UAV, showing an excellent level of accuracy and precision. 97 | 98 | ## Limitations 99 | - **Computational Power:** Insufficient for real-time processing. 100 | - **Data Transmission:** Latency between the UAV and ground station. 101 | 102 | ## Future Work 103 | - **Algorithm Optimization:** Improve computational efficiency. 104 | - **Alternative Sensors:** Explore LIDAR or infrared sensors. 105 | - **AI Integration:** Incorporate machine learning and deep learning for increased autonomy and flexibility. 106 | 107 | ## Installation 108 | 1. Clone the repository: 109 | ```bash 110 | git clone https://github.com/KashifKhaan/UAV-MovingPlatformLanding-DjiTello 111 | ``` 112 | 2. Navigate to the project directory: 113 | ```bash 114 | cd UAV-MovingPlatformLanding-DjiTello 115 | ``` 116 | 3. Install the required Python packages: 117 | ```bash 118 | pip install -r requirements.txt 119 | ``` 120 | 121 | ## Usage 122 | 1. Ensure the DJI Tello Drone is powered on and connected to your Wi-Fi network. 123 | 2. Run the main script to start the autonomous landing system: 124 | ```bash 125 | python main.py 126 | ``` 127 | 3. Use the GUI to monitor the live feed and control the UAV. 128 | 129 | ## Contributing 130 | Contributions are welcome! Please fork the repository and submit a pull request with your changes. For major changes, please open an issue first to discuss what you would like to change. 131 | 132 | ## License 133 | This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. 134 | 135 | ## Contributors 136 | - Mohammad Kashif 137 | - Nabeel Ahmad 138 | - Muneer Ahmad 139 | 140 | ## 🔗 Contact 141 | If you have any questions, suggestions, or feedback, please feel free to reach out to us: 142 | 143 | [![linkedin](https://img.shields.io/badge/linkedin-0A66C2?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/mr-kashif-442146214/) 144 | 145 | Email: mrkashi2020@gmail.com 146 | 147 | `We appreciate your support and hope you enjoy using Our UAV Project!` 148 | -------------------------------------------------------------------------------- /drone_tracking_pid.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import sys 4 | import yaml 5 | import time 6 | 7 | from djitellopy import tello 8 | 9 | 10 | ARUCO_DICT = { 11 | "DICT_4X4_50": cv2.aruco.DICT_4X4_50, 12 | "DICT_4X4_100": cv2.aruco.DICT_4X4_100, 13 | "DICT_4X4_250": cv2.aruco.DICT_4X4_250, 14 | "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, 15 | "DICT_5X5_50": cv2.aruco.DICT_5X5_50, 16 | "DICT_5X5_100": cv2.aruco.DICT_5X5_100, 17 | "DICT_5X5_250": cv2.aruco.DICT_5X5_250, 18 | "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, 19 | "DICT_6X6_50": cv2.aruco.DICT_6X6_50, 20 | "DICT_6X6_100": cv2.aruco.DICT_6X6_100, 21 | "DICT_6X6_250": cv2.aruco.DICT_6X6_250, 22 | "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, 23 | "DICT_7X7_50": cv2.aruco.DICT_7X7_50, 24 | "DICT_7X7_100": cv2.aruco.DICT_7X7_100, 25 | "DICT_7X7_250": cv2.aruco.DICT_7X7_250, 26 | "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, 27 | "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL, 28 | "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, 29 | "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, 30 | "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, 31 | "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 32 | } 33 | 34 | def my_estimatePoseSingleMarkers(corners, marker_size, mtx, distortion): 35 | ''' 36 | This will estimate the rvec and tvec for each of the marker corners detected by: 37 | corners, ids, rejectedImgPoints = detector.detectMarkers(image) 38 | corners - is an array of detected corners for each detected marker in the image 39 | marker_size - is the size of the detected markers 40 | mtx - is the camera matrix 41 | distortion - is the camera distortion matrix 42 | RETURN list of rvecs, tvecs, and trash (so that it corresponds to the old estimatePoseSingleMarkers()) 43 | ''' 44 | marker_points = np.array([[-marker_size / 2, marker_size / 2, 0], 45 | [marker_size / 2, marker_size / 2, 0], 46 | [marker_size / 2, -marker_size / 2, 0], 47 | [-marker_size / 2, -marker_size / 2, 0]], dtype=np.float32) 48 | trash = [] 49 | rvecs = [] 50 | tvecs = [] 51 | 52 | for c in corners: 53 | nada, R, t = cv2.solvePnP(marker_points, c, mtx, distortion, False, cv2.SOLVEPNP_IPPE_SQUARE) 54 | rvecs.append(R) 55 | tvecs.append(t) 56 | trash.append(nada) 57 | return rvecs, tvecs, trash 58 | 59 | 60 | def pose_estimation(frame, aruco_dict_type, matrix_coefficients, distortion_coefficients): 61 | 62 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 63 | aruco_dict = cv2.aruco.getPredefinedDictionary(aruco_dict_type) 64 | parameters = cv2.aruco.DetectorParameters() 65 | detector = cv2.aruco.ArucoDetector(aruco_dict, parameters) 66 | 67 | cv2.undistort(gray, matrix_coefficients, distortion_coefficients, frame) 68 | 69 | 70 | corners, ids, rejected_img_points = detector.detectMarkers(frame) 71 | 72 | 73 | if len(corners) > 0: 74 | for i in range(0, len(ids)): 75 | 76 | rvec, tvec, markerPoints = my_estimatePoseSingleMarkers(corners, 100, matrix_coefficients, distortion_coefficients) 77 | # cv2.aruco.drawDetectedMarkers(frame, corners) 78 | for i in range(0, ids.size): 79 | cv2.drawFrameAxes(frame, matrix_coefficients, distortion_coefficients, np.array(rvec)[i, :, :], np.array(tvec)[i, :, :], 50, 2) 80 | x, y, z = round(tvec[0][0][0]/10), round(tvec[0][1][0]/10), round(tvec[0][2][0]/10) 81 | s = " " 82 | coordinates = str(int(w*x/z)) + s + str(int(h*y/z)) + s + str(z) + s 83 | # cv2.putText(frame, coordinates, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) 84 | 85 | x_ = int((corners[i-1][0][0][0] + corners[i-1][0][1][0] + corners[i-1][0][2][0] + corners[i-1][0][3][0]) / 4) 86 | y_ = int((corners[i-1][0][0][1] + corners[i-1][0][1][1] + corners[i-1][0][2][1] + corners[i-1][0][3][1]) / 4) 87 | 88 | 89 | return [x_, y_, z], frame 90 | else: 91 | return [], frame 92 | 93 | 94 | #return frame 95 | 96 | def get_pid_values(coordinates): 97 | x, y, z = coordinates 98 | x_ref = 0 99 | error_x = x_ref - x 100 | x_out = 0.1 * error_x 101 | return x_out 102 | 103 | def get_velocities(x, y, w, h): 104 | 105 | left_right, far_back, up_down, yaw = 0, 0, 0, 0 106 | 107 | x_center, y_center = w//2, h//2 108 | 109 | kp = 0.5 110 | 111 | 112 | left_right = int(np.clip(0.2*(x - x_center), -40, 40)) 113 | 114 | up_down = int(np.clip(kp*(y_center - y), -40, 40)) 115 | 116 | if (abs(left_right) < 25): 117 | left_right = 0 118 | if (abs(up_down) < 25): 119 | up_down = 0 120 | 121 | return left_right, far_back, up_down, yaw 122 | 123 | def send_command(left_right, far_back, up_down, yaw): 124 | if myDrone.send_rc_control: 125 | myDrone.send_rc_control(left_right, far_back, up_down, yaw) 126 | return 1 127 | else: 128 | return 0 129 | 130 | 131 | 132 | def initializeTello(): 133 | myDrone = tello.Tello() 134 | myDrone.connect() 135 | myDrone.for_back_velocity = 0 136 | myDrone. left_right_velocity = 0 137 | myDrone.up_down_velocity = 0 138 | myDrone.yaw_velocity = 0 139 | myDrone.speed = 0 140 | print(myDrone.get_battery()) 141 | myDrone.streamoff() 142 | myDrone.streamon() 143 | return myDrone 144 | 145 | aruco_type = "DICT_5X5_100" 146 | 147 | arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[aruco_type]) 148 | 149 | arucoParams = cv2.aruco.DetectorParameters() 150 | detector = cv2.aruco.ArucoDetector(arucoDict, arucoParams) 151 | 152 | 153 | # intrinsic_camera = np.array(((933.15867, 0, 657.59),(0,933.1586, 400.36993),(0,0,1))) 154 | # distortion = np.array((-0.43948,0.18514,0,0)) 155 | 156 | 157 | 158 | with open('calibration_matrix.yaml', 'r') as file: 159 | data = yaml.safe_load(file) 160 | 161 | 162 | intrinsic_camera = np.array(data['camera_matrix']) 163 | distortion = np.array(data['dist_coeff'][0]) 164 | 165 | cap = cv2.VideoCapture(0) 166 | 167 | # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) 168 | # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) 169 | 170 | 171 | 172 | myDrone = initializeTello() 173 | #myDrone.set_video_resolution(myDrone.RESOLUTION_480P) 174 | 175 | 176 | myDrone.takeoff() 177 | left_right, far_back, up_down, yaw = 0, 0, 0, 0 178 | state = (left_right, far_back, up_down, yaw) 179 | new_state = (left_right, far_back, up_down, yaw) 180 | myFrame = myDrone.get_frame_read() 181 | 182 | while True: 183 | 184 | img = myFrame.frame 185 | coordinates, frame = pose_estimation(img, ARUCO_DICT[aruco_type], intrinsic_camera, distortion) 186 | 187 | h, w, _ = frame.shape 188 | 189 | p = 0.4 190 | 191 | x_l, x_u = int(p*w), int((1-p)*w) 192 | y_l, y_u = int(p*h), int((1-p)*h) 193 | 194 | cv2.line(frame, (x_l, 0), (x_l, h), (0, 0, 255), 4) 195 | cv2.line(frame, (x_u, 0), (x_u, h), (0, 0, 255), 4) 196 | cv2.line(frame, (0, y_l), (w, y_l), (0, 0, 255), 4) 197 | cv2.line(frame, (0, y_u), (w, y_u), (0, 0, 255), 4) 198 | if len(coordinates) > 0: 199 | x, y, z = coordinates 200 | cv2.circle(frame, (x, y), 4, (0, 255, 0), -1) 201 | left_right, far_back, up_down, yaw = get_velocities(x, y, w, h) 202 | new_state = (left_right, far_back, up_down, yaw) 203 | if (new_state != state): 204 | if send_command(left_right, far_back, up_down, yaw): 205 | state = new_state 206 | 207 | else: 208 | print("no marker detected") 209 | left_right, far_back, up_down, yaw = 0, 0, 0, 0 210 | new_state = (left_right, far_back, up_down, yaw) 211 | if (new_state != state): 212 | if send_command(left_right, far_back, up_down, yaw): 213 | state = new_state 214 | 215 | text = str(state) 216 | cv2.putText(frame, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) 217 | cv2.imshow("output", frame) 218 | 219 | key = cv2.waitKey(1) & 0xFF 220 | if key == ord('q'): 221 | myDrone.land() 222 | myDrone.end() 223 | break 224 | 225 | cap.release() 226 | cv2.destroyAllWindows() --------------------------------------------------------------------------------