├── target.jpg ├── Tools ├── target.jpg ├── sc_replay.py ├── make_config.py ├── sc_CameraCalibrate.py ├── pl_targetGen.py ├── sc_config.py └── target.eps ├── .gitignore ├── OpticalFlow.py ├── RedBalloon.py ├── README.md ├── Smart_Camera.cnf ├── pl_gui.py ├── pl_util.py ├── SmartCamera.py ├── pid.py ├── position_vector.py ├── pl_sim.py ├── sc_config.py ├── sc_logger.py ├── sc_dispatcher.py ├── vehicle_control.py ├── sc_video.py ├── CircleDetector.py └── PrecisionLand.py /target.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djnugent/SmartCamera/HEAD/target.jpg -------------------------------------------------------------------------------- /Tools/target.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djnugent/SmartCamera/HEAD/Tools/target.jpg -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | /logs 3 | /vids 4 | /enroute 5 | .cproject 6 | .project 7 | .settings/* -------------------------------------------------------------------------------- /OpticalFlow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sc_config 3 | from sc_logger import sc_logger 4 | 5 | 6 | class OpticalFlow(object): 7 | 8 | def __init__(self,args): 9 | #load config file 10 | sc_config.config.get_file(self.name()) 11 | 12 | #get camera index 13 | self.camera_index = int(args.camera) 14 | 15 | def name(self): 16 | return "Optical_Flow" 17 | 18 | def run(self): 19 | sc_logger.text(sc_logger.GENERAL, 'running {0}'.format(self.name())) 20 | 21 | -------------------------------------------------------------------------------- /RedBalloon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import sc_config 3 | from sc_logger import sc_logger 4 | 5 | 6 | class RedBalloon(object): 7 | 8 | def __init__(self,args): 9 | #load config file 10 | sc_config.config.get_file(self.name()) 11 | 12 | #get camera index 13 | self.camera_index = int(args.camera) 14 | 15 | def name(self): 16 | return "Red_Balloon_Finder" 17 | 18 | def run(self): 19 | sc_logger.text(sc_logger.GENERAL, 'running {0}'.format(self.name())) 20 | 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #SmartCamera# 2 | 3 | *This project has been moved to my VisNav Repo, please check that out for my most recent progress* 4 | https://www.github.com/djnugent/visnav 5 | 6 | Use image processing to perform visual navigation for ardupilot 7 | 8 | Currently the program only supports Precision land but was designed to be expanded for other vision guided tasks 9 | 10 | ###Run the code### 11 | 1. start sitl 12 | 2. load DroneAPI module 13 | 3. Run 14 | ``` 15 | api start /home/[your username]/SmartCamera/PrecisionLand.py 16 | ``` 17 | 4. Take off 18 | 5. Enter GUIDED mode to start visual landing 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /Smart_Camera.cnf: -------------------------------------------------------------------------------- 1 | [algorithm] 2 | eccentricity = 0.6 3 | distance_threshold = 15 4 | min_circles = 5 5 | radius_tolerance = 2 6 | ratio_tolerance = 0.015 7 | target_code = 0.8 0.91 0.76 0.84 0.7 0.66 0.49 8 | 9 | outer_ring = 0.08255 10 | 11 | [processing] 12 | desired_cores = 4 13 | background_capture = True 14 | 15 | [logging] 16 | location = ~/SmartCamera/ 17 | print_level = debug, general 18 | log_level = aircraft , algorithm, general 19 | display_level = raw, gui 20 | record_level = raw 21 | 22 | [camera] 23 | index = 0 24 | width = 640 25 | height = 480 26 | horizontal-fov = 70.42 27 | vertical-fov = 43.3 28 | matrix = 614.01269552 0.0 315.00073982 29 | 0.0 614.43556296 237.14926858 30 | 0.0 0.0 1.0 31 | distortion = 0.12269303 -0.26618881 0.00129035 0.00081791 0.17005303 32 | 33 | [gui] 34 | show_target = False 35 | show_mode = False 36 | show_vel_vector = False 37 | show_artifical_horizon = False 38 | show_distance = False 39 | show_fps = False 40 | show_location = False 41 | 42 | [general] 43 | simulate = False 44 | 45 | [control] 46 | search_yaw_speed = 5.0 47 | search_target_delay = 2.0 48 | vel_xy_p = 1.0 49 | vel_xy_i = 0.0 50 | vel_xy_d = 0.0 51 | vel_xy_imax = 10.0 52 | vel_z_p = 2.5 53 | vel_z_i = 0.0 54 | vel_z_d = 0.0 55 | vel_z_imax = 10.0 56 | vel_speed_min = 1.0 57 | vel_speed_max = 4.0 58 | vel_accel = 0.5 59 | vel_dist_ratio = 0.5 60 | vel_pitch_target = -5.0 61 | vel_update_rate_sec = 0.2 62 | 63 | -------------------------------------------------------------------------------- /pl_gui.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import cv2 3 | import numpy as np 4 | from copy import copy 5 | 6 | 7 | class PrecisionLandGUI(object): 8 | 9 | 10 | @classmethod 11 | #render_image - completes all gui operation according to config file 12 | def render_image(self, img, fps=None, target=None, distance=None, mode=None, vel=None, craftAttitude=None, locationCraft=None, locationTarget=None): 13 | pass 14 | 15 | @classmethod 16 | #add_predicted_location- place a marker on image where the target is suppossed to be based on GPS 17 | def add_predicted_location(self, img, locationCraft, locationTarget): 18 | pass 19 | 20 | @classmethod 21 | #add_artifical_horizon- place a bubble on image to indicate a line perpendicular to level 22 | def add_artifical_horizon(self, img, craftAttitude): 23 | pass 24 | 25 | @classmethod 26 | #add_mode_border- put a colored border around the image to signify the control the computer has 27 | def add_mode_border(self, mode): 28 | pass 29 | 30 | @classmethod 31 | #add_velocity_vectors- add vectors to show how the commands sent to the autopilot 32 | def add_velocity_vectors(self, vel): 33 | pass 34 | 35 | @classmethod 36 | #add_target_highlight- highlight the detected target, 1.the target center 2.each ring 37 | def add_target_highlights(self, image, target): 38 | #create a shallow copy of image 39 | img = copy(image) 40 | 41 | if target is not None: 42 | for i in range(0,len(target)): 43 | cv2.ellipse(img,target[i],(0,255,0),2) 44 | return img 45 | 46 | @classmethod 47 | #add_distance - adds a distance value to image 48 | def add_distance(self,img, distance): 49 | pass 50 | 51 | @classmethod 52 | #add_fps- display actaul runtime 53 | def add_fps(self,img,fps): 54 | pass -------------------------------------------------------------------------------- /pl_util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import time 3 | import math 4 | 5 | '''' 6 | Class with various helper methods 7 | ''' 8 | 9 | 10 | # current_milli_time - current time in milliseconds 11 | def current_milli_time(): 12 | return int(time.time() * 1000) 13 | 14 | # pixels_to_angle - converts a number of pixels into an angle in radians 15 | def pixels_to_angle(num_pixels,fov,img_size): 16 | return num_pixels * math.radians(fov) / img_size 17 | 18 | #shift_to_origin - make the origin of the image (imgwidth/2,imgheight/2) 19 | def shift_to_origin(pt,width,height): 20 | return ((pt[0] - width/2.0),(-1*pt[1] + height/2.0)) 21 | 22 | 23 | #shift_to_image - make the origin of the image upper left corner 24 | def shift_to_image(pt,width,height): 25 | return ((pt[0] + width/2),(-1*pt[1] + height/2.0)) 26 | 27 | 28 | # wrap_PI - wraps value between -2*PI ~ +2*PI (i.e. -360 ~ +360 degrees) down to -PI ~ PI (i.e. -180 ~ +180 degrees) 29 | #angle should be in radians 30 | def wrap_PI(angle): 31 | if (angle > math.pi): 32 | return (angle - (math.pi * 2.0)) 33 | if (angle < -math.pi): 34 | return (angle + (math.pi * 2.0)) 35 | return angle 36 | 37 | 38 | # get_distance_from_pixels - returns distance to balloon in meters given number of pixels in image and expected 0.5m radius 39 | # size_in_pizels : diameter or radius of the object on the image (in pixels) 40 | # actual_size : diameter or radius of the object in meters 41 | def get_distance_from_pixels(size_in_pixels, actual_size,fov,img_size): 42 | # avoid divide by zero by returning 9999.9 meters for zero sized object 43 | if (size_in_pixels == 0): 44 | return 9999.9 45 | # convert num_pixels to angular size 46 | return actual_size / pixels_to_angle(size_in_pixels,fov,img_size) -------------------------------------------------------------------------------- /SmartCamera.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import logging 3 | import time 4 | import argparse 5 | import os 6 | from pymavlink import mavutil 7 | from droneapi.lib import VehicleMode, Location 8 | import sc_config 9 | from sc_logger import sc_logger 10 | #startegies 11 | from PrecisionLand import PrecisionLand 12 | from RedBalloon import RedBalloon 13 | from OpticalFlow import OpticalFlow 14 | 15 | 16 | 17 | 18 | #THIS FILE IS INACTIVE. NEEDS TO BE BETTER INTEGRATED WITH CONFIG FILE 19 | 20 | 21 | ''' 22 | 23 | #parse arguments 24 | parser = argparse.ArgumentParser(description="Use image processing to peform visual navigation") 25 | #optional arguments 26 | parser.add_argument('-S', '--Strategy', action="store", default='land', type=str, 27 | choices=['land','redballoon','opticalflow'], 28 | help='Land: vision assisted landing \n redballoon: redballoon finder for AVC 2014 \n opticalflow: Optical flow using webcam') 29 | parser.add_argument('-c', '--camera', default=0, help="Select the camera index for opencv") 30 | parser.add_argument('-i', '--input', default=False, help='use a video filename as an input instead of a webcam') 31 | parser.add_argument('-f', '--file', default='Smart_Camera.cnf', help='load a config file other than the default') 32 | 33 | args, unknown = parser.parse_known_args() 34 | ''' 35 | 36 | 37 | 38 | 39 | #run a strategy 40 | Strategy = None 41 | if args.Strategy == 'land': 42 | Strategy = PrecisionLand(args) 43 | if args.Strategy == 'redballoon': 44 | Strategy = RedBalloon(args) 45 | if args.Strategy == 'opticalflow': 46 | Strategy = OpticalFlow(args) 47 | 48 | 49 | #configure a logger 50 | sc_logger.set_name(Strategy.name()) 51 | 52 | 53 | sc_logger.text(sc_logger.GENERAL, 'Starting {0}'.format(Strategy.name())) 54 | 55 | Strategy.run() 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /Tools/sc_replay.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy 3 | import os 4 | import sys 5 | import argparse 6 | 7 | 8 | 9 | class SmartCameraReplay(): 10 | 11 | 12 | def __init__(self): 13 | pass 14 | 15 | def start(self): 16 | 17 | #parse arguments 18 | parser = argparse.ArgumentParser(description="Replay video and text logs from a flight") 19 | #required arguments 20 | parser.add_argument('log_file', action="store", type=str, 21 | help='Enter absolute location of log file. Ex: /home/odroid/Smart_Camera/logs/Smart_Camera-2015-01-22-20-35.log') 22 | parser.add_argument('raw_vid', action="store", type=str, 23 | help='Enter absolute location of RAW video file. Ex: /home/odroid/Smart_Camera/logs/Smart_Camera-raw-2015-01-22-20-35.avi') 24 | #optional arguments 25 | parser.add_argument('-g' ,'--gui_vid' , action="store", type=str, 26 | help='Enter absolute location of GUI video file. Ex: /home/odroid/Smart_Camera/logs/Smart_Camera-gui-2015-01-22-20-35.avi') 27 | 28 | args, unknown = parser.parse_known_args() 29 | 30 | 31 | #check and open files 32 | #log 33 | if(not os.path.exists(args.log_file)): 34 | print 'Unable to find log file' 35 | sys.exit(0) 36 | self.log = open(args.log_file) 37 | #raw video 38 | if(not os.path.exists(args.raw_vid)): 39 | print 'Unable to find raw video file' 40 | sys.exit(0) 41 | self.raw = cv2.VideoCapture(args.raw_vid) 42 | #gui video 43 | if(args.gui_vid is not None): 44 | if(not os.exists(args.log_file)): 45 | print 'Unable to find gui file' 46 | sys.exit(0) 47 | self.gui = cv2.VideoCapture(args.gui_vid) 48 | 49 | 50 | def run(self): 51 | log_text = self.log.readlines() 52 | for line in log_text: 53 | 54 | 55 | 56 | 57 | 58 | class line(): 59 | def __init__(self, time, level, msg): 60 | pass 61 | 62 | 63 | 64 | 65 | 66 | if __name__ == '__main__': 67 | rep = SmartCameraReplay() 68 | rep.start() 69 | rep.run() -------------------------------------------------------------------------------- /pid.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | pid class : implements a pid controller 4 | 5 | """ 6 | 7 | import math 8 | import time 9 | 10 | class pid(object): 11 | 12 | def __init__(self, initial_p=0, initial_i=0, initial_d=0, initial_imax=0): 13 | # default config file 14 | self.p_gain = initial_p 15 | self.i_gain = initial_i 16 | self.d_gain = initial_d 17 | self.imax = abs(initial_imax) 18 | self.integrator = 0 19 | self.last_error = None 20 | self.last_update = time.time() 21 | 22 | # __str__ - print position vector as string 23 | def __str__(self): 24 | return "P:%s,I:%s,D:%s,IMAX:%s,Integrator:%s" % (self.p_gain, self.i_gain, self.d_gain, self.imax, self.integrator) 25 | 26 | # get_dt - returns time difference since last get_dt call 27 | def get_dt(self, max_dt): 28 | now = time.time() 29 | time_diff = now - self.last_update 30 | self.last_update = now 31 | if time_diff > max_dt: 32 | return 0.0 33 | else: 34 | return time_diff 35 | 36 | # get_p - return p term 37 | def get_p(self, error): 38 | return self.p_gain * error 39 | 40 | # get_i - return i term 41 | def get_i(self, error, dt): 42 | self.integrator = self.integrator + error * self.i_gain * dt 43 | self.integrator = min(self.integrator, self.imax) 44 | self.integrator = max(self.integrator, -self.imax) 45 | return self.integrator 46 | 47 | # get_d - return d term 48 | def get_d(self, error, dt): 49 | if self.last_error is None: 50 | self.last_error = error 51 | ret = (error - self.last_error) * self.d_gain * dt 52 | self.last_error = error 53 | return ret 54 | 55 | # get pi - return p and i terms 56 | def get_pi(self, error, dt): 57 | return self.get_p(error) + self.get_i(error,dt) 58 | 59 | # get pid - return p, i and d terms 60 | def get_pid(self, error, dt): 61 | return self.get_p(error) + self.get_i(error,dt) + self.get_d(error, dt) 62 | 63 | # get_integrator - return built up i term 64 | def get_integrator(self): 65 | return self.integrator 66 | 67 | # reset_I - clears I term 68 | def reset_I(self): 69 | self.integrator = 0 70 | 71 | # main - used to test the class 72 | def main(self): 73 | 74 | # print object 75 | print "Test PID: %s" % test_pid 76 | 77 | # run it through a test 78 | for i in range (0, 100): 79 | result_p = test_pid.get_p(i) 80 | result_i = test_pid.get_i(i, 0.1) 81 | result_d = test_pid.get_d(i, 0.1) 82 | result = result_p + result_i + result_d 83 | print "Err %s, Result: %f (P:%f, I:%f, D:%f, Int:%f)" % (i, result, result_p, result_i, result_d, self.get_integrator()) 84 | 85 | # run the main routine if this is file is called from the command line 86 | if __name__ == "__main__": 87 | # create pid object P, I, D, IMAX 88 | test_pid = pid(1.0, 0.5, 0.01, 50) 89 | test_pid.main() -------------------------------------------------------------------------------- /Tools/make_config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import numpy as np 3 | import sc_config 4 | 5 | 6 | 7 | #####ALGORITHM####### 8 | 9 | #how round a circle needs to be. Perfect circle = 1 10 | sc_config.config.set_float('algorithm', 'eccentricity', 0.6) 11 | #acceptable distance(pixels) between cocentric circle centers 12 | sc_config.config.set_integer('algorithm','distance_threshold', 15) 13 | # number of circles needed for a valid target(times 2); 2 circles are often overlayed 14 | sc_config.config.set_integer('algorithm','min_circles',5) 15 | #pixels: used to identify repeat circles(stacked circles). Problem caused by findContours() 16 | sc_config.config.set_integer('algorithm', 'radius_tolerance', 2) 17 | #Tolerance used in comparing actaul ratios and preceived ratios 18 | sc_config.config.set_float('algorithm', 'ratio_tolerance', 0.015) 19 | 20 | 21 | #target specific data 22 | #target_code is the unique ratio between rings 23 | target_code_def = np.array([0.8,0.91,0.76,0.84,0.7,0.66,0.49]) 24 | sc_config.config.set_array('algorithm', 'target_code',target_code_def) 25 | #the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters 26 | sc_config.config.set_float('algorithm', 'outer_ring', 0.08255) 27 | 28 | 29 | 30 | 31 | ####PROCESSING####### 32 | 33 | sc_config.config.set_integer('processing', 'desired_cores', 4) 34 | #check if a core is already in use for background image capture 35 | sc_config.config.set_boolean('processing','background_capture', True) 36 | 37 | 38 | 39 | 40 | #####LOGGING###### 41 | sc_config.config.set_string('logging','location','~/SmartCamera/') 42 | 43 | #levels = 'debug' , 'general' , 'aircraft' , 'algorithm' , ' performance' 44 | #multiple message levels can be selected by concatination strings i.e. 'debug, aircraft' 45 | #what type of messages we print to the terminal 46 | sc_config.config.set_string('logging','print_level','debug, general') 47 | #what type of messages we log to a file 48 | sc_config.config.set_string('logging','log_level','aircraft , algorithm, general') 49 | 50 | sc_config.config.set_string('logging', 'display_level', 'raw, gui') 51 | #what type of images we record 52 | sc_config.config.set_string('logging', 'record_level', 'raw') 53 | 54 | 55 | 56 | 57 | #######CAMERA###### 58 | 59 | # get which camera we will use 60 | sc_config.config.set_integer('camera','index',0) 61 | # get image resolution 62 | sc_config.config.set_integer('camera','width',640) 63 | sc_config.config.set_integer('camera','height',480) 64 | 65 | 66 | # define field of view 67 | sc_config.config.set_float('camera','horizontal-fov',70.42) 68 | sc_config.config.set_float('camera','vertical-fov',43.3) 69 | 70 | 71 | 72 | #get camera distortion matrix and intrinsics. Defaults: logitech c920 73 | mtx = np.array([[ 614.01269552,0,315.00073982], 74 | [0,614.43556296,237.14926858], 75 | [0,0,1.0]]) 76 | dist = np.array([0.12269303, -0.26618881,0.00129035, 0.00081791,0.17005303]) 77 | 78 | sc_config.config.set_array('camera','matrix', mtx) 79 | sc_config.config.set_array('camera', 'distortion', dist) 80 | 81 | 82 | 83 | sc_config.config.save() -------------------------------------------------------------------------------- /Tools/sc_CameraCalibrate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import cv2 5 | import sc_config 6 | import argparse 7 | ''' 8 | This program is used to calibrate a camera for opencv 9 | Daniel Nugent 11/6/2014 10 | 11 | 12 | 13 | Image can be found at: 14 | http://wiki.ros.org/camera_calibration/Tutorials/StereoCalibration?action=AttachFile&do=get&target=check-108.pdf 15 | Otherwise any 9x7 chessboard grid will work 16 | 17 | Instructions: 18 | Line up camera with image 19 | Press 'c' key to capture image you want to process 20 | Change camera viewing angle and repeat until complete 21 | -It takes 14 GOOD images 22 | Calibration coefficients are printed in terminal 23 | 24 | Tips for different veiwing angle: 25 | -Put target in image center 26 | -Put target in corners 27 | -Put target along edges 28 | -Do extreme viewing angle on X then Y axis then X and Y axis 29 | -Have image target take up whole image 30 | -Rotate camera 45 degrees and 90 degrees 31 | -Try to get unique viewing angles for every image 32 | 33 | Tips for a GOOD Image: 34 | ***The entire chessboard must be in frame inorder to process 35 | ***Dont be jerking the camera when you capture(prevent motion blur) 36 | ***Make sure camera is focused 37 | 38 | 39 | 40 | Example coefficients for Logitech c920 webcam: 41 | 42 | RMS: 0.144252280465 43 | camera matrix: 44 | [[ 614.01269552 0. 315.00073982] 45 | [ 0. 614.43556296 237.14926858] 46 | [ 0. 0. 1. ]] 47 | distortion coefficients: [ 0.12269303 -0.26618881 0.00129035 0.00081791 0.17005303] 48 | ''' 49 | 50 | if __name__ == '__main__': 51 | 52 | 53 | 54 | #parse arguments 55 | parser = argparse.ArgumentParser(description="Calibrate a camera for use with openCV") 56 | parser.add_argument('-c','--camera', default=0, action="store", type=int, 57 | help='Select an index value for the camera 0-255') 58 | args, unknown = parser.parse_known_args() 59 | 60 | 61 | 62 | #open video capture 63 | #increment 0 to 1 or 2 if the wrong camera is used 64 | cap = cv2.VideoCapture(args.camera) 65 | 66 | 67 | #number of good images before processing calibration 68 | goodImages = 14 69 | 70 | # number of INTERNAL corners (8x6) for a (9x7) grid 71 | pattern_size = (8, 6) 72 | 73 | square_size = 1.0 74 | 75 | #sizing arrays 76 | pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 ) 77 | pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) 78 | pattern_points *= square_size 79 | 80 | obj_points = [] 81 | img_points = [] 82 | h, w = 0, 0 83 | 84 | 85 | x = 0 86 | while x < goodImages: 87 | #get image 88 | ret, img = cap.read(); 89 | 90 | #make it black and white 91 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 92 | 93 | if img is None: 94 | print "Failed to read", x 95 | continue 96 | 97 | cv2.imshow('raw',img) 98 | k = cv2.waitKey(1) & 0xFF 99 | 100 | #process frame when user press 'c' key 101 | if k == ord('c'): 102 | 103 | print 'processing %d...' % x, 104 | 105 | h, w = img.shape[:2] 106 | found, corners = cv2.findChessboardCorners(img, pattern_size) 107 | if found: 108 | 109 | #refine corners 110 | term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 ) 111 | cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) 112 | 113 | #display processed image 114 | vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) 115 | cv2.drawChessboardCorners(vis, pattern_size, corners, found) 116 | cv2.imshow('vis',vis) 117 | 118 | #increment valid image count 119 | x+=1 120 | 121 | if not found: 122 | print 'chessboard not found' 123 | continue 124 | 125 | img_points.append(corners.reshape(-1, 2)) 126 | obj_points.append(pattern_points) 127 | 128 | print 'ok' 129 | 130 | #analyze images to calculte distortion 131 | rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None) 132 | 133 | 134 | 135 | #save to configuration file 136 | sc_config.config.set_array('camera', 'matrix' , camera_matrix) 137 | sc_config.config.set_array('camera', 'distortion', dist_coefs) 138 | sc_config.config.save() 139 | 140 | 141 | print "camera matrix:\n", camera_matrix 142 | print "distortion coefficients: ", dist_coefs 143 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /Tools/pl_targetGen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import cv2 3 | import numpy as np 4 | import os 5 | import sc_config 6 | 7 | 8 | 9 | 10 | class PrecisionLandTargetGenerator(): 11 | 12 | 13 | def __init__(self): 14 | self.ring_color = (0,0,0) 15 | self.background_color = (255,255,255) 16 | self.img_size = 0 17 | self.ratio = np.zeros(10,np.float) 18 | self.length = 0 19 | 20 | 21 | 22 | 23 | def draw_ring(self, outer_radius, inner_radius): 24 | for radius in range(inner_radius,outer_radius): 25 | if(radius != 0): 26 | cv2.circle(self.img, (self.img_size/2,self.img_size/2), radius, self.ring_color, thickness=2, lineType=cv2.CV_AA, shift=0) 27 | 28 | 29 | def export_to_config_file(self): 30 | #trim off excess data 31 | cnt = 0 32 | for r in self.ratio: 33 | if r != 0: 34 | cnt += 1 35 | trim_ratio = np.resize(self.ratio,cnt) 36 | 37 | sc_config.config.set_array('algorithm','target_code',trim_ratio) 38 | sc_config.config.save() 39 | 40 | def import_from_config_file(self): 41 | target_code_def = np.array([0.8,0.91,0.76,0.84,0.7,0.66,0.49]) 42 | raw_ratio = sc_config.config.get_array('algorithm', 'target_code',target_code_def) 43 | 44 | for i in range(0,len(self.ratio)): 45 | #update buffer 46 | if(i < len(raw_ratio)): 47 | self.ratio[i]= raw_ratio[i] 48 | else: 49 | self.ratio[i] = 0 50 | 51 | #update trackbars 52 | field = 'Ratio ' + str(i) 53 | cv2.setTrackbarPos(field,'parameters',int(self.ratio[i] * 100)) 54 | 55 | 56 | def save_image(self): 57 | path = 'target.jpg' 58 | #get file valid filename 59 | i = 0 60 | while os.path.exists(path): 61 | path = 'target[{0}].jpg'.format(i) 62 | i+=1 63 | 64 | cv2.imwrite(path,self.img) 65 | 66 | def main(self): 67 | 68 | 69 | def updateParams(x): 70 | pass 71 | 72 | cv2.namedWindow('parameters') 73 | 74 | # create trackbars for parameters 75 | cv2.createTrackbar('Image size','parameters',800,6000,updateParams) 76 | cv2.createTrackbar('Border','parameters',5,200,updateParams) 77 | cv2.createTrackbar('Ratio 0','parameters',80,100,updateParams) 78 | cv2.createTrackbar('Ratio 1','parameters',91,100,updateParams) 79 | cv2.createTrackbar('Ratio 2','parameters',76,100,updateParams) 80 | cv2.createTrackbar('Ratio 3','parameters',84,100,updateParams) 81 | cv2.createTrackbar('Ratio 4','parameters',70,100,updateParams) 82 | cv2.createTrackbar('Ratio 5','parameters',66,100,updateParams) 83 | cv2.createTrackbar('Ratio 6','parameters',49,100,updateParams) 84 | cv2.createTrackbar('Ratio 7','parameters',0,100,updateParams) 85 | cv2.createTrackbar('Ratio 8','parameters',0,100,updateParams) 86 | cv2.createTrackbar('Ratio 9','parameters',0,100,updateParams) 87 | 88 | 89 | 90 | while True: 91 | #create image 92 | self.img_size = cv2.getTrackbarPos('Image size','parameters') 93 | self.img = np.zeros((self.img_size,self.img_size,3), np.uint8) 94 | self.img[:] = self.background_color 95 | 96 | border = cv2.getTrackbarPos('Border','parameters') 97 | 98 | 99 | #grab ring data 100 | self.ratio[0] = cv2.getTrackbarPos('Ratio 0','parameters') /100.0 101 | self.ratio[1] = cv2.getTrackbarPos('Ratio 1','parameters') /100.0 102 | self.ratio[2] = cv2.getTrackbarPos('Ratio 2','parameters') /100.0 103 | self.ratio[3] = cv2.getTrackbarPos('Ratio 3','parameters') /100.0 104 | self.ratio[4] = cv2.getTrackbarPos('Ratio 4','parameters') /100.0 105 | self.ratio[5] = cv2.getTrackbarPos('Ratio 5','parameters') /100.0 106 | self.ratio[6] = cv2.getTrackbarPos('Ratio 6','parameters') /100.0 107 | self.ratio[7] = cv2.getTrackbarPos('Ratio 7','parameters') /100.0 108 | self.ratio[8] = cv2.getTrackbarPos('Ratio 8','parameters') /100.0 109 | self.ratio[9] = cv2.getTrackbarPos('Ratio 9','parameters') /100.0 110 | 111 | outer_radius = 0 112 | inner_radius = 0 113 | i = 0 114 | 115 | #draw rings 116 | while(i < len(self.ratio)): 117 | #outer most radius 118 | if(i == 0): 119 | outer_radius = (self.img_size-border)/2 120 | else: 121 | outer_radius = int(inner_radius * (self.ratio[i])) 122 | i+=1 123 | 124 | #inner most radius 125 | if(i >= len(self.ratio)): 126 | inner_radius = 0 127 | else: 128 | inner_radius = int(outer_radius * (self.ratio[i])) 129 | 130 | self.draw_ring(outer_radius,inner_radius) 131 | i +=1 132 | 133 | 134 | cv2.imshow('Target', self.img) 135 | 136 | #user input 137 | k = cv2.waitKey(33) & 0xFF 138 | if k == ord('e'): 139 | self.export_to_config_file() 140 | print "Exported target data" 141 | if k == ord('i'): 142 | self.import_from_config_file() 143 | print "Imported target data" 144 | if k == ord('s'): 145 | self.save_image() 146 | self.export_to_config_file() 147 | print "Saved image and exported target data" 148 | 149 | 150 | 151 | if __name__ == '__main__': 152 | targetGen = PrecisionLandTargetGenerator() 153 | targetGen.main() -------------------------------------------------------------------------------- /position_vector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | PositionVector class : holds a 3 axis position offset from home in meters in NEU format 4 | X = North-South with +ve = North 5 | Y = West-East with +ve = West 6 | Z = Altitude with +ve = Up 7 | 8 | """ 9 | 10 | import math 11 | from droneapi.lib import Location 12 | 13 | # global variables used by position vector class 14 | posvec_home_location = Location(0,0,0) 15 | posvec_lon_scale = 1.0 # scaling used to account for shrinking distance between longitude lines as we move away from equator 16 | posvec_latlon_to_m = 111319.5 # converts lat/lon to meters 17 | 18 | class PositionVector(object): 19 | 20 | # define public members 21 | x = 0 # north-south direction. +ve = north of home 22 | y = 0 # west-east direction, +ve = east of home 23 | z = 0 # vertical direction, +ve = above home 24 | 25 | def __init__(self,initial_x=0,initial_y=0,initial_z=0): 26 | # default config file 27 | self.x = initial_x 28 | self.y = initial_y 29 | self.z = initial_z 30 | 31 | # get_from_location - returns a position vector created from a location 32 | @classmethod 33 | def get_from_location(cls, location): 34 | ret = PositionVector(0,0,0) 35 | ret.set_from_location(location) 36 | return ret 37 | 38 | # __str__ - print position vector as string 39 | def __str__(self): 40 | return "Pos:X=%s,Y=%s,Z=%s" % (self.x, self.y, self.z) 41 | 42 | # __add__ - addition operator overload 43 | def __add__(self, other): 44 | return PositionVector(self.x + other.x, self.y + other.y, self.z + other.z) 45 | 46 | # __sub__ - subtraction operator overload 47 | def __sub__(self, other): 48 | return PositionVector(self.x - other.x, self.y - other.y, self.z - other.z) 49 | 50 | # __mul__ - multiply operator overload 51 | def __mul__(self, scalar): 52 | return PositionVector(self.x * scalar, self.y * scalar, self.z * scalar) 53 | 54 | # get_location - return the location (i.e. lat, lon, alt) of the position vector 55 | def get_location(self): 56 | dlat = self.x / posvec_latlon_to_m 57 | dlon = self.y / (posvec_latlon_to_m * posvec_lon_scale) 58 | return Location(posvec_home_location.lat + dlat,posvec_home_location.lon + dlon,posvec_home_location.alt + self.z) 59 | 60 | # set_from_location - sets x,y,z offsets given a location object (i.e. lat, lon and alt) 61 | def set_from_location(self, location): 62 | # convert lat, lon to meters from home 63 | self.x = (location.lat - posvec_home_location.lat) * posvec_latlon_to_m 64 | self.y = (location.lon - posvec_home_location.lon) * posvec_latlon_to_m * posvec_lon_scale 65 | self.z = location.alt 66 | 67 | # set_home_location - sets home location used by all position vector instances 68 | @classmethod 69 | def set_home_location(cls, home_location): 70 | global posvec_home_location 71 | posvec_home_location = home_location 72 | PositionVector.update_lon_scale(posvec_home_location.lat) 73 | 74 | # get_home_location - returns home location used by all position vector instances 75 | @classmethod 76 | def get_home_location(cls): 77 | global posvec_home_location 78 | return posvec_home_location 79 | 80 | # get_distance_xy - returns horizontal distance in meters from one position to another 81 | @classmethod 82 | def get_distance_xy(cls, pos1, pos2): 83 | dx = pos2.x - pos1.x 84 | dy = pos2.y - pos1.y 85 | return math.sqrt(dx**2+dy**2) 86 | 87 | # get_distance_xyz - returns distance in meters from one position to another 88 | @classmethod 89 | def get_distance_xyz(cls, pos1, pos2): 90 | dx = pos2.x - pos1.x 91 | dy = pos2.y - pos1.y 92 | dz = pos2.z - pos1.z 93 | return math.sqrt(dx**2+dy**2+dz**2) 94 | 95 | # get_bearing - returns bearing from origin to destination in radians 96 | @classmethod 97 | def get_bearing(cls, origin, destination): 98 | # avoid error when origin and destination are exactly on top of each other 99 | if destination.x == origin.x and destination.y == origin.y: 100 | return 0 101 | bearing = math.radians(90) + math.atan2(-(destination.x-origin.x), destination.y-origin.y); 102 | if bearing < 0: 103 | bearing += math.radians(360); 104 | return bearing 105 | 106 | # get_elevation - returns an elevation in radians from origin to destination 107 | @classmethod 108 | def get_elevation(cls, origin, destination): 109 | # avoid error when origin and destination are exactly on top of each other 110 | if destination.x == origin.x and destination.y == origin.y and destination.z == origin.z: 111 | return 0 112 | 113 | # calculate distance to destination 114 | dist_xy = PositionVector.get_distance_xy(origin, destination) 115 | 116 | # calculate elevation 117 | elevation = -math.atan2(destination.z-origin.z, dist_xy) 118 | return elevation 119 | 120 | # get_lon_scale - return lon scaling factor to account for curvature of earth 121 | @classmethod 122 | def update_lon_scale(cls, lat): 123 | global posvec_lon_scale 124 | if lat <> 0: 125 | posvec_lon_scale = math.cos(math.radians(lat)) 126 | 127 | # main - used to test the class 128 | def main(self): 129 | # set home position - to tridge's home field (this is just for testing anyway) 130 | PositionVector.set_home_location(Location(-35.362938,149.165085,0)) 131 | print "Home %s" % PositionVector.get_home_location() 132 | home_pos = PositionVector(0,0,0) 133 | print "Home %s" % home_pos 134 | 135 | # other position 136 | other_pos = PositionVector.get_from_location(PositionVector.get_home_location()) 137 | print "Other %s" % other_pos 138 | 139 | # set vehicle to be 10m north, 10m east and 10m above home 140 | veh_pos = PositionVector(10,10,10) 141 | print "Vehicle %s" % veh_pos.get_location() 142 | print "Vehicle %s" % veh_pos 143 | print "Distance from home: %f" % PositionVector.get_distance_xyz(home_pos,veh_pos) 144 | 145 | 146 | # run the main routine if this is file is called from the command line 147 | if __name__ == "__main__": 148 | dummy = PositionVector(0,0,0) 149 | dummy.main() -------------------------------------------------------------------------------- /Tools/sc_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | SmartCamConfig class : handles config for the balloon_finder project 3 | 4 | balloon_finder.cnf file is created in the local directory 5 | 6 | other classes or files wishing to use this class should add 7 | import balloon_config 8 | 9 | """ 10 | 11 | from os.path import expanduser 12 | from StringIO import StringIO 13 | import numpy as np 14 | import ConfigParser 15 | 16 | class SmartCamConfig(object): 17 | 18 | def __init__(self): 19 | # create the global parser object 20 | self.parser = ConfigParser.SafeConfigParser() 21 | 22 | #load default config file 23 | self.get_file() 24 | 25 | 26 | #get_file - open a config file 27 | def get_file(self,name='Smart_Camera'): 28 | # default config file 29 | self.config_file = expanduser("~/"+ name + ".cnf") 30 | 31 | # read the config file into memory 32 | self.read() 33 | 34 | # read - reads the contents of the file into the dictionary in RAM 35 | def read(self): 36 | try: 37 | self.parser.read(self.config_file) 38 | except IOError as e: 39 | print 'Error {0} reading config file: {1}: '.format(e.errno, e.strerror) 40 | return 41 | 42 | # save - saves the config to disk 43 | def save(self): 44 | try: 45 | with open(self.config_file, 'wb') as configfile: 46 | self.parser.write(configfile) 47 | except IOError as e: 48 | print 'Error {0} writing config file: {1}: '.format(e.errno, e.strerror) 49 | return 50 | 51 | # check_section - ensures the section exists, creates it if not 52 | def check_section(self, section): 53 | if not self.parser.has_section(section): 54 | self.parser.add_section(section) 55 | return 56 | 57 | # get_balloon - returns the boolean found in the specified section/option or the default if not found 58 | def get_boolean(self, section, option, default): 59 | try: 60 | return self.parser.getboolean(section, option) 61 | except ConfigParser.Error: 62 | return default 63 | 64 | # set_boolean - sets the boolean to the specified section/option 65 | def set_boolean(self, section, option, new_value): 66 | self.check_section(section) 67 | self.parser.set(section, option, str(bool(new_value))) 68 | return 69 | 70 | # get_integer - returns the integer found in the specified section/option or the default if not found 71 | def get_integer(self, section, option, default): 72 | try: 73 | return self.parser.getint(section, option) 74 | except ConfigParser.Error: 75 | return default 76 | 77 | # set_integer - sets the integer to the specified section/option 78 | def set_integer(self, section, option, new_value): 79 | self.check_section(section) 80 | self.parser.set(section, option, str(int(new_value))) 81 | return 82 | 83 | # get_float - returns the float found in the specified section/option or the default if not found 84 | def get_float(self, section, option, default): 85 | try: 86 | return self.parser.getfloat(section, option) 87 | except ConfigParser.Error: 88 | return default 89 | 90 | # set_float - sets the float to the specified section/option 91 | def set_float(self, section, option, new_value): 92 | self.check_section(section) 93 | self.parser.set(section, option, str(float(new_value))) 94 | return 95 | 96 | # get_string - returns the string found in the specified section/option or the default if not found 97 | def get_string(self, section, option, default): 98 | try: 99 | return self.parser.get(section, option) 100 | except ConfigParser.Error: 101 | return default 102 | 103 | # set_string - sets the string to the specified section/option 104 | def set_string(self, section, option, new_value): 105 | self.check_section(section) 106 | self.parser.set(section, option, str(new_value)) 107 | return 108 | 109 | # get_array - returns the array found in the specified section/option or the default if not found 110 | def get_array(self, section, option, default): 111 | try: 112 | raw = self.parser.get(section, option) 113 | return np.loadtxt(StringIO(raw)) 114 | except (ValueError, ConfigParser.Error): 115 | return default 116 | 117 | #set_array - sets the array to the specific section/option. Can only handle balanced 1D/2D primative type nparrays 118 | def set_array(self, section, option, new_value): 119 | self.check_section(section) 120 | try: 121 | #create iterator 122 | itr = np.nditer(new_value) 123 | colSize = new_value.shape[0] 124 | #deliminate columns by whitespace and rows by newline 125 | arr = '' 126 | for x in range(0,itr.itersize): 127 | arr += str(itr.next()) 128 | if x%colSize == colSize -1: 129 | arr += '\n' 130 | else: 131 | arr += ' ' 132 | 133 | self.parser.set(section, option, arr) 134 | 135 | except TypeError: 136 | self.parser.set(section, option, 'None') 137 | return 138 | 139 | # main - tests BalloonConfig class 140 | def main(self): 141 | #open file 142 | self.get_file("test_config") 143 | 144 | # print welcome message 145 | print "SmartCamConfig v0.1 test" 146 | print "config file: %s" % self.config_file 147 | 148 | # write and read a boolean 149 | section = 'Test_Section1' 150 | option = 'Test_boolean' 151 | print "Writing %s/%s = True" % (section,option) 152 | self.set_boolean(section,option,True) 153 | print "Read %s/%s : %s" % (section, option, self.get_boolean(section, option, False)) 154 | 155 | # write and read an integer 156 | section = 'Test_Section1' 157 | option = 'Test_integer' 158 | print "Writing %s/%s = 11" % (section,option) 159 | self.set_integer(section,option,11) 160 | print "Read %s/%s : %s" % (section, option, self.get_integer(section, option, 99)) 161 | 162 | # write and read a float 163 | section = 'Test_Section1' 164 | option = 'Test_float' 165 | print "Writing %s/%s = 12.345" % (section,option) 166 | self.set_float(section,option,12.345) 167 | print "Read %s/%s : %s" % (section, option, self.get_float(section, option, 0.01)) 168 | 169 | #write and read an array 170 | section = 'Test_Section1' 171 | option = 'Test_array' 172 | arr = np.array([[2.3,345,56],[2,-3,3.14],[45,1,5]]) 173 | print "writing %s/%s = [[2.3,345,56]\n[2,-3,3.14]\n[45,1,5]]" % (section,option) 174 | self.set_array(section,option,arr) 175 | print "Read %s/%s : %s" % (section, option, self.get_array(section, option, np.array([0,2]))) 176 | 177 | 178 | # read an undefined number to get back the default 179 | section = 'Test_Section2' 180 | option = 'test_default' 181 | print "Read %s/%s : %s" % (section, option, self.get_float(section, option, 21.21)) 182 | 183 | 184 | 185 | # save the config file 186 | self.save() 187 | 188 | return 189 | 190 | # declare global smartcam config object 191 | config = SmartCamConfig() 192 | 193 | # run the main routine if this is file is called from the command line 194 | if __name__ == "__main__": 195 | config.main() -------------------------------------------------------------------------------- /pl_sim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import cv2 3 | import numpy as np 4 | import math 5 | import time 6 | from position_vector import PositionVector 7 | from vehicle_control import veh_control 8 | from droneapi.lib import VehicleMode, Location, Attitude 9 | import sc_config 10 | from pl_util import shift_to_image 11 | 12 | current_milli_time = lambda: int(round(time.time() * 1000)) 13 | 14 | ''' 15 | TODO 16 | Intergrate config file 17 | 18 | Long term enhancements: 19 | Add in textured/tiled background 20 | dynamic exposure levels / frame rate 21 | Add google earth as background 22 | ''' 23 | 24 | 25 | 26 | class PrecisionLandSimulator(): 27 | 28 | 29 | def __init__(self): 30 | self.targetLocation = PositionVector() 31 | self.vehicleLocation = PositionVector() 32 | 33 | self.backgroundColor = (74,88,109) 34 | 35 | 36 | #load target 37 | filename = sc_config.config.get_string('simulator', 'target_location', '/home/dannuge/SmartCamera/target.jpg') 38 | target_size = sc_config.config.get_float('algorithm', 'outer_ring', 1.0) 39 | self.load_target(filename,target_size) 40 | 41 | 42 | #define camera 43 | self.camera_width = sc_config.config.get_integer('camera', 'width', 640) 44 | self.camera_height = sc_config.config.get_integer('camera', 'height', 640) 45 | self.camera_vfov = sc_config.config.get_float('camera', 'vertical-fov',72.42 ) 46 | self.camera_hfov = sc_config.config.get_float('camera', 'horizontal-fov', 72.42) 47 | self.camera_fov = math.sqrt(self.camera_vfov**2 + self.camera_hfov**2) 48 | self.camera_frameRate = 30 49 | 50 | #load_target- load an image to simulate the target. Enter the actaul target size in meters(assuming the target is square) 51 | def load_target(self,filename, actualSize): 52 | self.target = cv2.imread(filename) 53 | self.target_width = self.target.shape[1] 54 | self.target_height = self.target.shape[0] 55 | 56 | 57 | self.actualSize = actualSize 58 | #scaling factor for real dimensions to simultor pixels 59 | self.pixels_per_meter = (self.target_height + self.target_width) / (2.0 * actualSize) 60 | 61 | 62 | #set_target_location- give the target a gps location 63 | def set_target_location(self, location): 64 | self.targetLocation.set_from_location(location) 65 | 66 | 67 | #refresh_simulator - update vehicle position info necessary to simulate an image 68 | def refresh_simulator(self, vehicleLocation, vehicleAttitude): 69 | #get gps location of vehicle 70 | self.vehicleLocation.set_from_location(vehicleLocation) 71 | 72 | self.vehicleAttitude = vehicleAttitude 73 | 74 | 75 | #main - code used to test the simulator. Must be run from sitl. Control vehicle in guided mode using arrow keys,r,t,q,e 76 | def main(self): 77 | veh_control.connect(local_connect()) 78 | 79 | self.set_target_location(veh_control.get_location()) 80 | 81 | while(veh_control.is_connected()): 82 | location = veh_control.get_location() 83 | attitude = veh_control.get_attitude() 84 | 85 | self.refresh_simulator(location,attitude) 86 | frame = self.get_frame() 87 | cv2.imshow('frame',frame) 88 | 89 | key = cv2.waitKey(1) 90 | print key 91 | 92 | if key ==1113938: 93 | veh_control.set_velocity(2,0,0) #forward 94 | elif key == 1113940: 95 | veh_control.set_velocity(-2,0,0) #backward 96 | elif key == 1113937: 97 | veh_control.set_velocity(0,-2,0) #left 98 | elif key ==1113939: 99 | veh_control.set_velocity(0,2,0) #right 100 | elif(key == 1048690): 101 | yaw = math.degrees(attitude.yaw) #yaw left 102 | veh_control.set_yaw(yaw - 5) 103 | elif(key == 1048692): 104 | yaw = math.degrees(attitude.yaw) #yaw right 105 | veh_control.set_yaw(yaw + 5) 106 | elif(key == 1048677): 107 | veh_control.set_velocity(0,0,-2) #down 108 | elif(key == 1048689): 109 | veh_control.set_velocity(0,0,2) #up 110 | else: 111 | veh_control.set_velocity(0,0,0) #still 112 | 113 | 114 | #project_3D_to_2D - project a 3d point onto a 2d plane. Covert from world perspective to camera perspective 115 | def project_3D_to_2D(self,thetaX,thetaY,thetaZ, aX, aY,aZ, cX, cY, cZ, height, width, fov): 116 | dX = math.cos(-thetaY) * (math.sin(-thetaZ)*(cY-aY) + math.cos(-thetaZ)*(cX-aX)) - math.sin(-thetaY)*(aZ-cZ) 117 | dY = math.sin(-thetaX) * (math.cos(-thetaY)*(aZ-cZ) + math.sin(-thetaY)*(math.sin(-thetaZ)*(cY-aY) + math.cos(-thetaZ)*(cX-aX))) + math.cos(-thetaX)*(math.cos(-thetaZ)*(cY-aY) - math.sin(-thetaZ) * (cX-aX)) 118 | dZ = math.cos(-thetaX) * (math.cos(-thetaY)*(aZ-cZ) + math.sin(-thetaY)*(math.sin(-thetaZ)*(cY-aY) + math.cos(-thetaZ)*(cX-aX))) - math.sin(-thetaX)*(math.cos(-thetaZ)*(cY-aY) - math.sin(-thetaZ) * (cX-aX)) 119 | 120 | #veiwer position 121 | eX = 0 122 | eY = 0 123 | eZ = 1.0/math.tan(math.radians(fov)/2.0) 124 | 125 | #2D point 126 | bX = (dX - eX)*(eZ/dZ) 127 | bY = (dY - eY)*(eZ/dZ) 128 | 129 | #scaled to resolution 130 | sX = bX * width 131 | sY = bY * height 132 | 133 | return (sX,sY) 134 | 135 | 136 | #simulate_target - simulate an image given the target position[aX,aY,aZ](pixels)and camera position[cX,cY,cZ](pixels) and camera orientation 137 | def simulate_target(self,thetaX,thetaY,thetaZ, aX, aY, aZ, cX, cY, cZ, camera_height, camera_width, fov): 138 | img_width = self.target_width 139 | img_height = self.target_height 140 | 141 | #point maps 142 | corners = np.float32([[-img_width/2,img_height/2],[img_width/2 ,img_height/2],[-img_width/2,-img_height/2],[img_width/2, -img_height/2]]) 143 | newCorners = np.float32([[0,0],[0,0],[0,0],[0,0]]) 144 | 145 | 146 | #calculate projection for four corners of image 147 | for i in range(0,len(corners)): 148 | 149 | #shift to world 150 | x = corners[i][0] + cX - img_width/2.0 151 | y = corners[i][1] + cY - img_height/2.0 152 | 153 | 154 | #calculate perspective and position 155 | x , y = self.project_3D_to_2D(thetaX,thetaY,thetaZ, aY, aX, aZ, y, x, cZ,camera_height,camera_width,fov) 156 | 157 | #shift to camera 158 | x , y = shift_to_image((x,y),camera_width,camera_height) 159 | newCorners[i] = x,y 160 | 161 | 162 | #project image 163 | M = cv2.getPerspectiveTransform(corners,newCorners) 164 | sim = cv2.warpPerspective(self.target,M,(self.camera_width,self.camera_height),borderValue=self.backgroundColor) 165 | 166 | return sim 167 | 168 | 169 | 170 | #get_frame - retreive a simulated camera image 171 | def get_frame(self): 172 | start = current_milli_time() 173 | 174 | #distance bewteen camera and target in meters 175 | aX,aY,aZ = self.targetLocation.x, self.targetLocation.y, self.targetLocation.z 176 | cX,cY,cZ = self.vehicleLocation.x, self.vehicleLocation.y, self.vehicleLocation.z 177 | 178 | #camera angle 179 | thetaX = self.vehicleAttitude.pitch 180 | thetaY = self.vehicleAttitude.roll 181 | thetaZ = self.vehicleAttitude.yaw 182 | 183 | #convert distance bewtween camera and target in pixels 184 | aX = aX * self.pixels_per_meter 185 | aY = aY * self.pixels_per_meter 186 | aZ = aZ * self.pixels_per_meter 187 | cX = cX * self.pixels_per_meter 188 | cY = cY * self.pixels_per_meter 189 | cZ = cZ * self.pixels_per_meter 190 | 191 | 192 | #render image 193 | sim = self.simulate_target(thetaX,thetaY,thetaZ, aX, aY, aZ, cX, cY, cZ, self.camera_height, self.camera_width, self.camera_fov) 194 | 195 | #simulate framerate 196 | while(1000/self.camera_frameRate > current_milli_time() - start): 197 | pass 198 | return sim 199 | 200 | 201 | sim = PrecisionLandSimulator() 202 | 203 | 204 | if __name__ == "__builtin__": 205 | sim.main() 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | -------------------------------------------------------------------------------- /sc_config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | SmartCamConfig class : handles config for the balloon_finder project 4 | 5 | balloon_finder.cnf file is created in the local directory 6 | 7 | other classes or files wishing to use this class should add 8 | import balloon_config 9 | 10 | """ 11 | 12 | from os.path import expanduser 13 | from StringIO import StringIO 14 | import numpy as np 15 | import ConfigParser 16 | 17 | class SmartCamConfig(object): 18 | 19 | def __init__(self): 20 | # create the global parser object 21 | self.parser = ConfigParser.SafeConfigParser() 22 | 23 | #load default config file 24 | self.get_file() 25 | 26 | 27 | #get_file - open a config file 28 | def get_file(self,name='Smart_Camera'): 29 | # default config file 30 | self.config_file = expanduser("~/"+ name + ".cnf") 31 | 32 | # read the config file into memory 33 | self.read() 34 | 35 | # read - reads the contents of the file into the dictionary in RAM 36 | def read(self): 37 | try: 38 | self.parser.read(self.config_file) 39 | except IOError as e: 40 | print 'Error {0} reading config file: {1}: '.format(e.errno, e.strerror) 41 | return 42 | 43 | # save - saves the config to disk 44 | def save(self): 45 | try: 46 | with open(self.config_file, 'wb') as configfile: 47 | self.parser.write(configfile) 48 | except IOError as e: 49 | print 'Error {0} writing config file: {1}: '.format(e.errno, e.strerror) 50 | return 51 | 52 | # check_section - ensures the section exists, creates it if not 53 | def check_section(self, section): 54 | if not self.parser.has_section(section): 55 | self.parser.add_section(section) 56 | return 57 | 58 | # get_balloon - returns the boolean found in the specified section/option or the default if not found 59 | def get_boolean(self, section, option, default): 60 | try: 61 | return self.parser.getboolean(section, option) 62 | except ConfigParser.Error: 63 | return default 64 | 65 | # set_boolean - sets the boolean to the specified section/option 66 | def set_boolean(self, section, option, new_value): 67 | self.check_section(section) 68 | self.parser.set(section, option, str(bool(new_value))) 69 | return 70 | 71 | # get_integer - returns the integer found in the specified section/option or the default if not found 72 | def get_integer(self, section, option, default): 73 | try: 74 | return self.parser.getint(section, option) 75 | except ConfigParser.Error: 76 | return default 77 | 78 | # set_integer - sets the integer to the specified section/option 79 | def set_integer(self, section, option, new_value): 80 | self.check_section(section) 81 | self.parser.set(section, option, str(int(new_value))) 82 | return 83 | 84 | # get_float - returns the float found in the specified section/option or the default if not found 85 | def get_float(self, section, option, default): 86 | try: 87 | return self.parser.getfloat(section, option) 88 | except ConfigParser.Error: 89 | return default 90 | 91 | # set_float - sets the float to the specified section/option 92 | def set_float(self, section, option, new_value): 93 | self.check_section(section) 94 | self.parser.set(section, option, str(float(new_value))) 95 | return 96 | 97 | # get_string - returns the string found in the specified section/option or the default if not found 98 | def get_string(self, section, option, default): 99 | try: 100 | return self.parser.get(section, option) 101 | except ConfigParser.Error: 102 | return default 103 | 104 | # set_string - sets the string to the specified section/option 105 | def set_string(self, section, option, new_value): 106 | self.check_section(section) 107 | self.parser.set(section, option, str(new_value)) 108 | return 109 | 110 | # get_array - returns the array found in the specified section/option or the default if not found 111 | def get_array(self, section, option, default): 112 | try: 113 | raw = self.parser.get(section, option) 114 | return np.loadtxt(StringIO(raw)) 115 | except (ValueError, ConfigParser.Error): 116 | return default 117 | 118 | #set_array - sets the array to the specific section/option. Can only handle balanced 1D/2D primative type nparrays 119 | def set_array(self, section, option, new_value): 120 | self.check_section(section) 121 | try: 122 | #create iterator 123 | itr = np.nditer(new_value) 124 | colSize = new_value.shape[0] 125 | #deliminate columns by whitespace and rows by newline 126 | arr = '' 127 | for x in range(0,itr.itersize): 128 | arr += str(itr.next()) 129 | if x%colSize == colSize -1: 130 | arr += '\n' 131 | else: 132 | arr += ' ' 133 | 134 | self.parser.set(section, option, arr) 135 | 136 | except TypeError: 137 | self.parser.set(section, option, 'None') 138 | return 139 | 140 | # main - tests BalloonConfig class 141 | def main(self): 142 | #open file 143 | self.get_file("test_config") 144 | 145 | # print welcome message 146 | print "SmartCamConfig v0.1 test" 147 | print "config file: %s" % self.config_file 148 | 149 | # write and read a boolean 150 | section = 'Test_Section1' 151 | option = 'Test_boolean' 152 | print "Writing %s/%s = True" % (section,option) 153 | self.set_boolean(section,option,True) 154 | print "Read %s/%s : %s" % (section, option, self.get_boolean(section, option, False)) 155 | 156 | # write and read an integer 157 | section = 'Test_Section1' 158 | option = 'Test_integer' 159 | print "Writing %s/%s = 11" % (section,option) 160 | self.set_integer(section,option,11) 161 | print "Read %s/%s : %s" % (section, option, self.get_integer(section, option, 99)) 162 | 163 | # write and read a float 164 | section = 'Test_Section1' 165 | option = 'Test_float' 166 | print "Writing %s/%s = 12.345" % (section,option) 167 | self.set_float(section,option,12.345) 168 | print "Read %s/%s : %s" % (section, option, self.get_float(section, option, 0.01)) 169 | 170 | #write and read an array 171 | section = 'Test_Section1' 172 | option = 'Test_array' 173 | arr = np.array([[2.3,345,56],[2,-3,3.14],[45,1,5]]) 174 | print "writing %s/%s = [[2.3,345,56]\n[2,-3,3.14]\n[45,1,5]]" % (section,option) 175 | self.set_array(section,option,arr) 176 | print "Read %s/%s : %s" % (section, option, self.get_array(section, option, np.array([0,2]))) 177 | 178 | 179 | # read an undefined number to get back the default 180 | section = 'Test_Section2' 181 | option = 'test_default' 182 | print "Read %s/%s : %s" % (section, option, self.get_float(section, option, 21.21)) 183 | 184 | 185 | 186 | # save the config file 187 | self.save() 188 | 189 | return 190 | 191 | # declare global smartcam config object 192 | config = SmartCamConfig() 193 | 194 | # run the main routine if this is file is called from the command line 195 | if __name__ == "__main__": 196 | config.main() -------------------------------------------------------------------------------- /sc_logger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import logging 3 | import sys 4 | import os 5 | import cv2 6 | import time 7 | import sc_config 8 | 9 | #TODO allow images to be displayed over webserver and possibly print statements 10 | 11 | 12 | 13 | 14 | class SmartCameraLogger(object): 15 | 16 | #level enumerations: text 17 | #general - logs/prints inforamtion about program activities(starting processes, camera failure, mode change, errors) 18 | GENERAL = 'general' 19 | #aircraft - logs/prints information coming from autopilot 20 | AIRCRAFT = 'aircraft' 21 | #algorithm - logs/prints information returned by image processing algorithm 22 | ALGORITHM = 'algorithm' 23 | #performance - logs/prints performance of the program 24 | PERFORMANCE = 'performance' 25 | #debug - logs/prints temporary messages to be used while developing 26 | DEBUG = 'debug' 27 | 28 | #level enumeration: image 29 | #raw - record/display raw video from camera 30 | RAW = 'raw' 31 | #gui - record/display raw video overlayed with a gui 32 | GUI = 'gui' 33 | 34 | ####Other enumerations made be added as necessary. These are just the defaults 35 | 36 | 37 | def __init__(self): 38 | #create sub driectory for logs and videos 39 | self.location = sc_config.config.get_string('logging','location','./') 40 | 41 | path = self.location + 'logs' 42 | if not os.path.exists(path): 43 | os.makedirs(path) 44 | path = self.location + 'vids' 45 | if not os.path.exists(path): 46 | os.makedirs(path) 47 | 48 | #set default startegy name 49 | self.strat_name = 'Smart_Camera' 50 | 51 | # get image resolution 52 | self.img_width = sc_config.config.get_integer('camera','width',640) 53 | self.img_height = sc_config.config.get_integer('camera','height',480) 54 | 55 | #####TEXT LOGGING###### 56 | #levels = 'debug' , 'general' , 'aircraft' , 'algorithm' , ' performance' 57 | #multiple message levels can be selected by concatination strings i.e. 'debug, aircraft' 58 | #what type of messages we print to the terminal 59 | self.print_level = sc_config.config.get_string('logging','print_level','debug, general') 60 | #what type of messages we log to a file 61 | self.log_level = sc_config.config.get_string('logging','log_level','aircraft , algorithm, general') 62 | 63 | 64 | #####VIDEO RECORDING###### 65 | #levels = 'frame' , 'gui' 66 | #multiple message levels can be selected at once by concatination strings i.e. 'frame, gui' 67 | #what type of images we display on the screen 68 | self.display_level = sc_config.config.get_string('logging', 'display_level', 'raw, gui') 69 | #what type of images we record 70 | self.record_level = sc_config.config.get_string('logging', 'record_level', 'raw') 71 | 72 | #Note about useful logging practices: 73 | #Inorder to replay a flight through the program log_level must include 'aircraft' and 'algorithm' 74 | #record level must include 'frame' 75 | #all other logging levels are for user diagnostics 76 | 77 | #a list of video writers and their tag 78 | self.video_writers = [] 79 | 80 | #text logger 81 | self.logger = None 82 | 83 | #set_name - give the logger a name. Helps when creating and understanding file names 84 | def set_name(self, strat_name): 85 | #define the name of the current strategy 86 | self.strat_name = strat_name 87 | 88 | #image - records/displays an image with the provided level 89 | def image(self, level, img): 90 | #display frame 91 | if(self.display_level.find(level) != -1): 92 | #show image 93 | cv2.imshow(level, img) 94 | #end program with esc key 95 | k = cv2.waitKey(1) & 0xFF 96 | if k == 27: 97 | cv2.destroyAllWindows() 98 | self.text(self.GENERAL, "User terminated Program") 99 | sys.exit(0) 100 | 101 | #record frame 102 | if(self.record_level.find(level) != -1): 103 | writer = self.get_video_writer(level) 104 | writer.write(img) 105 | 106 | #text - logs/prints a message with the provided level 107 | def text(self, level, msg): 108 | #print text 109 | if(self.print_level.find(level) != -1): 110 | print msg 111 | 112 | #log text 113 | if(self.log_level.find(level) != -1): 114 | 115 | #open a logger if necessary 116 | if(self.logger is None): 117 | self.open_text_logger() 118 | 119 | 120 | #log text 121 | self.logger.info(msg , extra={'type': level}) 122 | 123 | 124 | 125 | #open_video_writer- opens a video writer with a filename that starts the tag. The rest of the file name is date-time 126 | def open_video_writer(self, tag): 127 | #define filename 128 | path = self.location + 'vids/{0}-{1}-%Y-%m-%d-%H-%M.avi' 129 | filename = time.strftime(path.format(self.strat_name, tag)) 130 | 131 | 132 | # Define the codec and create VideoWriter object 133 | # Note: setting ex to -1 will display pop-up requesting user choose the encoder 134 | # Frame rate is hard coded in despite actaul recording rate: 15 fps 135 | ex = int(cv2.cv.CV_FOURCC('M','J','P','G')) 136 | video_writer = cv2.VideoWriter(filename, ex, 15, (self.img_width,self.img_height)) 137 | 138 | #add the video writer to a list of video writers 139 | self.video_writers.append((tag,video_writer)) 140 | 141 | #return the writer 142 | return video_writer 143 | 144 | #get_video_writer- retreives a videowriter assocaited with a certain tag 145 | def get_video_writer(self,tag): 146 | #look for video writer 147 | for writer in self.video_writers: 148 | if(writer[0] == tag): 149 | return writer[1] 150 | 151 | #if it doesn't exist, open one 152 | return self.open_video_writer(tag) 153 | 154 | 155 | #open_text_logger - create an instance of 'logging' and 156 | def open_text_logger(self): 157 | 158 | ''' 159 | 160 | #configure logger 161 | #format of log line: time - log level - message 162 | FORMAT = '%(asctime)s - %(type)s - %(message)s' 163 | 164 | 165 | #define filename 166 | path = self.location + 'logs/{0}-%Y-%m-%d-%H-%M.log' 167 | filename = time.strftime(path.format(self.strat_name)) 168 | 169 | logging.basicConfig(format=FORMAT, level=logging.INFO, filename= filename) 170 | #create logger 171 | self.logger = logging.getLogger() 172 | 173 | ''' 174 | #create logger 175 | self.logger = logging.getLogger('sc_logger') 176 | self.logger.setLevel(logging.INFO) 177 | 178 | # add a file handler 179 | #define filename 180 | path = self.location + 'logs/{0}-%Y-%m-%d-%H-%M.log' 181 | filename = time.strftime(path.format(self.strat_name)) 182 | fh = logging.FileHandler(filename) 183 | fh.setLevel(logging.INFO) 184 | # create a formatter and set the formatter for the handler. 185 | frmt = logging.Formatter('%(asctime)s - %(type)s - %(message)s') 186 | fh.setFormatter(frmt) 187 | # add the Handler to the logger 188 | self.logger.addHandler(fh) 189 | self.logger.propogate = False 190 | 191 | 192 | #create global logger 193 | sc_logger = SmartCameraLogger() 194 | 195 | if __name__ == '__main__': 196 | sc_logger.set_name('test') 197 | 198 | #test text logger 199 | sc_logger.text(sc_logger.DEBUG,"This is should appear in the terminal") 200 | sc_logger.text(sc_logger.AIRCRAFT, "This should appear in the file") 201 | sc_logger.text(sc_logger.GENERAL, "This should appear in both") 202 | sc_logger.text(sc_logger.PERFORMANCE, "This should not appear at all") 203 | 204 | #test video logger 205 | #create images 206 | import numpy as np 207 | green_image = np.zeros((sc_logger.img_height,sc_logger.img_width,3), np.uint8) 208 | green_image[:] = (0,255,0) 209 | blue_image = np.zeros((sc_logger.img_height,sc_logger.img_width,3), np.uint8) 210 | blue_image[:] = (255,0,0) 211 | 212 | 213 | for x in range(0, 45): 214 | time.sleep(66.6/1000) 215 | #Display a green and blue image for 3 seconds 216 | #Save a 3 second video of the blue image 217 | sc_logger.image(sc_logger.RAW, blue_image) 218 | sc_logger.image(sc_logger.GUI, green_image) -------------------------------------------------------------------------------- /sc_dispatcher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import multiprocessing 3 | import numpy as np 4 | import time 5 | import sc_config 6 | from sc_logger import sc_logger 7 | 8 | current_milli_time = lambda: int(time.time() * 1000) 9 | 10 | ''' 11 | SmartCameraDispatcher 12 | This class helps process images across multicores 13 | It can be used on (n) number of cores: including a single core. 14 | The disatcher sends images out for processing as fast as the camera can feed the images or as fast as the CPU can process them 15 | In addition to compensating to the number of core available for use, the program compensates for varying operating conditions 16 | i.e. slower image capture in low light settings; longer processing time with 'busy' images 17 | It will take at least 4 cycles for the program to stabilize its condition 18 | The dispatcher may not use all the cores it has access to. This is common for functions that already run 'fast' 19 | 20 | Usage: 21 | Upon intializing the dispatcher, the function/process to dispatched must be passed as an arguement to the constructor 22 | The first parameter of the function/process must be a child pipe connection 23 | This pipe connection gets passed in AUTOMATICALLY by the dispatcher. 24 | Meainng it is never used outside the function definition and the dispatcher class 25 | All other arguements depend on the operation of the function/process 26 | These will be passed into the dispatch() method 27 | Exculding the pipe connection. The dispatcher passes that in on its own 28 | The function/process will have to return all results through a pipe. 29 | Results must be returned as a tuple 30 | The first result of the tuple MUST BE an interal run time(in millis) of the function/process 31 | ''' 32 | 33 | 34 | 35 | 36 | class SmartCameraDispatcher(object): 37 | 38 | def __init__(self): 39 | 40 | #The number of core to be used while processing image data 41 | #This number may be less than the actaul number of cores on the CPU depending on the users specifications 42 | #cores = min(desiredCores, multiprocessing.cpu_count()) //dont allow more cores than the CPU has available and don;t run more than the user wants 43 | desired_cores = sc_config.config.get_integer('processing', 'desired_cores', 4) 44 | available_cores = min(desired_cores, multiprocessing.cpu_count()) 45 | #check if a core is already in use for background image capture 46 | cores_image_capture = int(sc_config.config.get_boolean('processing','background_capture', True)) 47 | self.cores_processing = available_cores - cores_image_capture 48 | 49 | 50 | #The time(in millis) is takes to capture an Image 51 | #Frame rate = 1000/captureTime 52 | #****On some cameras frame rate is dependent on camera exposure level 53 | #****i.e. dark = slower fps and light = fast frame rate 54 | #****Capture time is dependent on available CPU 55 | #This time will be dynamically calculated using an rolling average 56 | self.captureTime = 0 57 | self.captureTimeSet = np.zeros(4) 58 | 59 | #The time(in millis) it takes to process an image 60 | #****Process time is dependent on available CPU and the image 61 | #This time will be dynamically calculated using an rolling average 62 | self.processTime = 0 63 | self.processTimeSet = np.zeros(4) 64 | 65 | #How often a image is dispatched to be processed(in millis) 66 | #Determined by splitting up processTime among available number of cores 67 | #*****This will not be smaller than captureTime because we won't allow it to process frames more often than capturing frames; it will cause sync issues with Pipe() 68 | #*****If the CPU is slow and has limited cores, we may process everyother frame or every nth frame!!! 69 | #runTime = max(processTime/processing_cores,captureTime) 70 | self.runTime = 0 71 | 72 | #set up a pipe to pass info between background processes and dispatcher 73 | self.parent_conn, self.child_conn = multiprocessing.Pipe() 74 | 75 | #last time we started to process an image 76 | self.lastDispatch = 0 77 | 78 | #last time an image process completed 79 | self.lastRetreival = 0 80 | 81 | 82 | 83 | #calculate_dispatch_schedule - calculate the schedule of the image processing 84 | def calculate_dispatch_schedule(self): 85 | self.runTime = max(self.processTime/(self.cores_processing * 1.0), self.captureTime) 86 | 87 | #update_capture_time - updates the time it takes the camera to capture an image 88 | def update_capture_time(self,cap_time): 89 | #update captureTime 90 | self.captureTime, self.captureTimeSet = self.rolling_average(self.captureTimeSet, cap_time) 91 | 92 | #is_ready - checks whether it is time to dispatch a new process 93 | def is_ready(self): 94 | return (current_milli_time() - self.lastDispatch >= self.runTime) 95 | 96 | #dispatch - dispatch a new process to a core 97 | def dispatch(self, target, args): 98 | 99 | if self.is_ready() == False: 100 | return 101 | 102 | #mark our last dispatch time 103 | self.lastDispatch = current_milli_time() 104 | 105 | 106 | #splice together arguements 107 | #args = (args[0],) + (self.child_conn,) + args[1:] 108 | args = (self.child_conn,) + args 109 | 110 | #create a process to run in background 111 | p = multiprocessing.Process(target=target, args = args) 112 | p.daemon=True 113 | p.start() 114 | 115 | #is_available - checks if a process has completed and results are ready 116 | def is_available(self): 117 | #check to see if a process has finished and sent data 118 | return self.parent_conn.poll() 119 | 120 | #retreive - returns the results of the most recent completed process 121 | def retreive(self): 122 | if self.is_available(): 123 | 124 | #grab results 125 | results = self.parent_conn.recv() 126 | 127 | #update processTime. All processes must return a runtime through the pipe 128 | self.processTime, self.processTimeSet = self.rolling_average(self.processTimeSet, results[0]) 129 | 130 | #Calculate real runtime. Diagnostic purposes only 131 | #In an ideal system the dispatch rate would equal the retreival rate. That is not the case here 132 | actualRunTime = current_milli_time() - self.lastRetreival 133 | self.lastRetreival = current_milli_time() 134 | 135 | sc_logger.text(sc_logger.PERFORMANCE, "DispatchHz: {0} runHz: {1} ProcessHz: {2} CaptureHz: {3} Processes: {4} ".format(round(1000.0/(self.runTime)), round(1000.0/actualRunTime), round((1000.0/results[0])),round(1000.0/self.captureTime), len(multiprocessing.active_children()))) 136 | return results 137 | 138 | return None 139 | 140 | 141 | 142 | #rolling_average - returns a rolling average of a data set and inputs a new value 143 | def rolling_average(self, dataSet, newValue): 144 | total = newValue 145 | for i in range(0, len(dataSet)-1): 146 | dataSet[i] = dataSet[i+1] 147 | total += dataSet[i+1] 148 | dataSet[len(dataSet) - 1] = newValue 149 | return total * 1.0/len(dataSet) , dataSet 150 | 151 | #test for SmartCameraDispatcher 152 | def main(self): 153 | 154 | #Simulated load 155 | def dummyLoad(conn,load): 156 | time.sleep(load/1000.0) 157 | result = (load,) 158 | conn.send(result) 159 | 160 | 161 | while True: 162 | 163 | #update how often we dispatch a command 164 | self.calculate_dispatch_schedule() 165 | 166 | # simulate Retreiving an image 167 | capStart = current_milli_time() 168 | time.sleep(33.3/1000) #img = smartCam.get_image() 169 | capStop = current_milli_time() 170 | 171 | #update capture time 172 | self.update_capture_time(capStop-capStart) 173 | 174 | 175 | #Process image 176 | #We schedule the process as opposed to waiting for an available core 177 | #This brings consistancy and prevents overwriting a dead process before 178 | #information has been grabbed from the Pipe 179 | if self.is_ready(): 180 | self.dispatch(target=dummyLoad, args=(83.3)) 181 | 182 | #retreive results 183 | if self.is_available(): 184 | results = self.retreive() 185 | 186 | 187 | 188 | 189 | 190 | 191 | # create a single global object 192 | sc_dispatcher = SmartCameraDispatcher() 193 | 194 | if __name__ == "__main__": 195 | sc_dispatcher.main() -------------------------------------------------------------------------------- /vehicle_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import time 3 | import math 4 | from pymavlink import mavutil 5 | from droneapi.lib import VehicleMode, Location 6 | import sc_config 7 | from sc_video import sc_video 8 | from sc_logger import sc_logger 9 | 10 | """ 11 | This class encapsulates the vehicle and some commonly used controls via the DroneAPI 12 | """ 13 | 14 | ''' 15 | TODO: 16 | At rally point support to get get_landing() 17 | ''' 18 | 19 | 20 | 21 | class VehicleControl(object): 22 | 23 | def __init__(self): 24 | # add variable initialisation here 25 | self.api = None 26 | self.vehicle = None 27 | 28 | self.last_mode_call = 0 29 | self.last_mode_state = 'STABILIZE' 30 | self.mode_update_rate = sc_config.config.get_float('vehicle control', 'mode_update_rate', 0.75) 31 | 32 | self.last_home_call = 0 33 | self.last_home = None 34 | self.home_update_rate = sc_config.config.get_float('vehicle control', 'home_update_rate', 10) 35 | 36 | 37 | self.last_set_velocity = 0 38 | self.vel_update_rate = sc_config.config.get_float('vehicle control', 'vel_update_rate', 0.1) 39 | 40 | 41 | # connect - connects to droneAPI. 42 | # because of scope issues the local_connect() must be called from the top level class 43 | def connect(self, api): 44 | # First get an instance of the API endpoint (the connect via web case will be similar) 45 | self.api = api 46 | 47 | # if we succesfully connect 48 | if not self.api is None: 49 | # get our vehicle (we assume the user is trying to control the virst vehicle attached to the GCS) 50 | self.vehicle = self.api.get_vehicles()[0] 51 | return 52 | 53 | #is_connected - are we connected to a DroneApi 54 | def is_connected(self): 55 | if (self.api is None) or (self.vehicle is None): 56 | return False 57 | return (not self.api.exit) 58 | 59 | #get_vehicle - returns the connected vehicle 60 | def get_vehicle(self): 61 | return self.vehicle 62 | 63 | # controlling_vehicle - return true if we have control of the vehicle 64 | def controlling_vehicle(self): 65 | if self.api is None: 66 | return False 67 | 68 | # we are active in guided mode 69 | if self.get_mode() == "GUIDED": 70 | return True 71 | 72 | else: 73 | return False 74 | # is_armed - returns arm status of vehicle 75 | def is_armed(self): 76 | return self.vehicle.armed 77 | 78 | # set_yaw - send condition_yaw mavlink command to vehicle so it points at specified heading (in degrees) 79 | def set_yaw(self, heading): 80 | # create the CONDITION_YAW command 81 | msg = self.vehicle.message_factory.mission_item_encode(0, 0, # target system, target component 82 | 0, # sequence 83 | mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT, # frame 84 | mavutil.mavlink.MAV_CMD_CONDITION_YAW, # command 85 | 2, # current - set to 2 to make it a guided command 86 | 0, # auto continue 87 | heading, 0, 0, 0, 0, 0, 0) # param 1 ~ 7 88 | # send command to vehicle 89 | self.vehicle.send_mavlink(msg) 90 | self.vehicle.flush() 91 | 92 | # set_velocity - send nav_velocity command to vehicle to request it fly in specified direction 93 | def set_velocity(self, velocity_x, velocity_y, velocity_z): 94 | #only let commands through at 10hz 95 | if(time.time() - self.last_set_velocity) > self.vel_update_rate: 96 | self.last_set_velocity = time.time() 97 | # create the SET_POSITION_TARGET_LOCAL_NED command 98 | msg = self.vehicle.message_factory.set_position_target_local_ned_encode( 99 | 0, # time_boot_ms (not used) 100 | 0, 0, # target system, target component 101 | mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame 102 | 0x01C7, # type_mask (ignore pos | ignore acc) 103 | 0, 0, 0, # x, y, z positions (not used) 104 | velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s 105 | 0, 0, 0, # x, y, z acceleration (not used) 106 | 0, 0) # yaw, yaw_rate (not used) 107 | # send command to vehicle 108 | self.vehicle.send_mavlink(msg) 109 | self.vehicle.flush() 110 | 111 | sc_logger.text(sc_logger.AIRCRAFT, 'Sent Vx: {0}, Vy: {1}, Vz: {2}'.format(velocity_x,velocity_y,velocity_z)) 112 | 113 | #get_location - returns the lat, lon, alt of vehicle 114 | def get_location(self): 115 | return self.vehicle.location 116 | 117 | #get_attitude - returns pitch, roll, and yaw of vehicle 118 | def get_attitude(self): 119 | return self.vehicle.attitude 120 | 121 | #get_landing - get the landing location. Only supports home location 122 | def get_landing(self): 123 | return self.get_home(True) 124 | 125 | 126 | #get_home - get the home location for this mission 127 | def get_home(self, wait_for_arm = False): 128 | 129 | #wait unitl we are armed to grab the INITIAL home position. This will lock up the program. 130 | if(wait_for_arm and self.last_home is None): 131 | sc_logger.text(sc_logger.GENERAL, 'Waiting for intial home lock: Requires armed status....') 132 | while(self.vehicle.armed == False): 133 | time.sleep(0.3) 134 | sc_logger.text(sc_logger.GENERAL, 'Got valid intial home location') 135 | 136 | 137 | if(time.time() - self.last_home_call > self.home_update_rate): 138 | self.last_home_call = time.time() 139 | 140 | # download the vehicle waypoints 141 | mission_cmds = self.vehicle.commands 142 | mission_cmds.download() 143 | mission_cmds.wait_valid() 144 | # get the home lat and lon 145 | home_lat = mission_cmds[0].x 146 | home_lon = mission_cmds[0].y 147 | 148 | self.last_home = Location(home_lat,home_lon,0) 149 | 150 | return self.last_home 151 | 152 | #get_mode - get current mode of vehicle 153 | def get_mode(self): 154 | #limit how often we request the current mode 155 | if (time.time() - self.last_mode_call) > self.mode_update_rate: 156 | self.last_mode_call = time.time() 157 | self.last_mode = self.vehicle.mode.name 158 | 159 | return self.last_mode 160 | 161 | #set_mode - set the mode of the vehicle as long as we are in control 162 | def set_mode(self, mode): 163 | if self.controlling_vehicle(): 164 | self.vehicle.mode = VehicleMode(mode) 165 | self.vehicle.flush() 166 | return True 167 | 168 | return False 169 | 170 | # run - should be called repeatedly from parent 171 | def run(self): 172 | # return immediately if not connected 173 | if self.api is None: 174 | return 175 | 176 | # we are connected so iterate 177 | if self.controlling_vehicle(): 178 | # request vehicle to turn due east 179 | self.set_yaw(90) 180 | return 181 | 182 | # test - should be called to test this class from the simulator 183 | def test(self): 184 | # return immediately if not connected 185 | if self.api is None: 186 | return 187 | 188 | while not self.api.exit: 189 | # we are connected so iterate 190 | 191 | # control vehicle 192 | if self.controlling_vehicle(): 193 | # request vehicle to turn due east 194 | self.set_yaw(90) 195 | #self.set_velocity(200,0,0) 196 | print self.get_attitude() 197 | print self.get_location() 198 | 199 | # sleep so we don't consume too much CPU 200 | time.sleep(1.0) 201 | 202 | # create global object 203 | veh_control = VehicleControl() 204 | 205 | # if this is the parent class connect and run test 206 | if __name__ == "__builtin__": 207 | veh_control.connect(local_connect()) 208 | veh_control.test() 209 | -------------------------------------------------------------------------------- /sc_video.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | sc_video.py 4 | This file includes functions to: 5 | Initialise the camera 6 | Initialise the output video 7 | Image size is held in the smart_camera.cnf 8 | """ 9 | 10 | import sys 11 | from os.path import expanduser 12 | import time 13 | import math 14 | import multiprocessing 15 | import cv2 16 | import numpy as np 17 | import sc_config 18 | from sc_logger import sc_logger 19 | 20 | class SmartCameraVideo: 21 | 22 | def __init__(self): 23 | 24 | # get which camera we will use 25 | self.camera_index = sc_config.config.get_integer('camera','index',0) 26 | 27 | # get image resolution 28 | self.img_width = sc_config.config.get_integer('camera','width',640) 29 | self.img_height = sc_config.config.get_integer('camera','height',480) 30 | 31 | 32 | # get image center 33 | self.img_center_x = self.img_width / 2 34 | self.img_center_y = self.img_height / 2 35 | 36 | 37 | # define field of view 38 | self.cam_hfov = sc_config.config.get_float('camera','horizontal-fov',70.42) 39 | self.cam_vfov = sc_config.config.get_float('camera','vertical-fov',43.3) 40 | 41 | 42 | 43 | #get camera distortion matrix and intrinsics. Defaults: logitech c920 44 | mtx = np.array([[ 614.01269552,0,315.00073982], 45 | [0,614.43556296,237.14926858], 46 | [0,0,1.0]]) 47 | dist = np.array([0.12269303, -0.26618881,0.00129035, 0.00081791,0.17005303]) 48 | 49 | self.matrix = sc_config.config.get_array('camera','matrix', mtx) 50 | self.distortion = sc_config.config.get_array('camera', 'distortion', dist) 51 | 52 | self.newcameramtx, self.roi=cv2.getOptimalNewCameraMatrix(self.matrix,self.distortion,(self.img_width,self.img_height),1,(self.img_width,self.img_height)) 53 | 54 | 55 | #create a camera object 56 | self.camera = None 57 | 58 | 59 | #number of cores available for use 60 | desiredCores = sc_config.config.get_integer('processing', 'desired_cores', 4) 61 | self.cores_available = min(desiredCores, multiprocessing.cpu_count()) 62 | 63 | 64 | #does the user want to capture images in the background 65 | self.background_capture = sc_config.config.get_boolean('processing','background_capture', True) 66 | 67 | 68 | # background image processing variables 69 | self.proc = None # background process object 70 | self.parent_conn = None # parent end of communicatoin pipe 71 | self.img_counter = 0 # num images requested so far 72 | self.is_backgroundCap = False #state variable for background capture 73 | 74 | 75 | # __str__ - print position vector as string 76 | def __str__(self): 77 | return "SmartCameraVideo Object W:%d H:%d" % (self.img_width, self.img_height) 78 | 79 | # get_camera - initialises camera and returns VideoCapture object 80 | def get_camera(self,index): 81 | sc_logger.text(sc_logger.GENERAL, 'Starting Camera....') 82 | 83 | 84 | # setup video capture 85 | self.camera = cv2.VideoCapture(index) 86 | self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,self.img_width) 87 | self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,self.img_height) 88 | 89 | # check we can connect to camera 90 | if not self.camera.isOpened(): 91 | sc_logger.text(sc_logger.GENERAL,"failed to open camera, exiting!") 92 | sys.exit(0) 93 | 94 | sc_logger.text(sc_logger.GENERAL, 'Camera Open!') 95 | 96 | return self.camera 97 | 98 | 99 | # 100 | # background image processing routines 101 | # 102 | 103 | # image_capture_background - captures all images from the camera in the background and returning the latest image via the pipe when the parent requests it 104 | def image_capture_background(self, imgcap_connection): 105 | # exit immediately if imgcap_connection is invalid 106 | if imgcap_connection is None: 107 | sc_logger.text(sc_logger.GENERAL, "image_capture failed because pipe is uninitialised") 108 | return 109 | 110 | # clear latest image 111 | latest_image = None 112 | 113 | while True: 114 | # constantly get the image from the webcam 115 | success_flag, image=self.camera.read() 116 | 117 | # if successful overwrite our latest image 118 | if success_flag: 119 | latest_image = image 120 | 121 | # check if the parent wants the image 122 | if imgcap_connection.poll(): 123 | recv_obj = imgcap_connection.recv() 124 | # if -1 is received we exit 125 | if recv_obj == -1: 126 | break 127 | 128 | # otherwise we return the latest image 129 | imgcap_connection.send(latest_image) 130 | 131 | # release camera when exiting 132 | self.camera.release() 133 | 134 | 135 | def stop_capture(self): 136 | #Clean up when exitting background capture 137 | if(self.is_backgroundCap): 138 | # send exit command to image capture process 139 | self.parent_conn.send(-1) 140 | 141 | # join process 142 | self.proc.join() 143 | #no clean up required with regular capture 144 | 145 | def start_capture(self,index = 0): 146 | #make sure a camera is intialized 147 | if self.camera is None: 148 | self.get_camera(index) 149 | 150 | #background capture is desired 151 | if self.background_capture: 152 | #if we have more than one core available, then start background capture 153 | if(self.cores_available > 1): 154 | 155 | # create pipe 156 | self.parent_conn, imgcap_conn = multiprocessing.Pipe() 157 | 158 | # create and start the sub process and pass it it's end of the pipe 159 | self.proc = multiprocessing.Process(target=self.image_capture_background, args=(imgcap_conn,)) 160 | self.proc.daemon = True 161 | self.proc.start() 162 | 163 | #Mark that we are in background capture mode 164 | self.is_backgroundCap = True 165 | else: 166 | #Not enough cores for background capture or just doing regular capture 167 | self.is_backgroundCap = False 168 | 169 | 170 | # get_image - returns latest image from the camera captured from the background process 171 | def get_image(self): 172 | #grab image from pipe of background capture 173 | if(self.is_backgroundCap): 174 | # return immediately if pipe is not initialised 175 | if self.parent_conn == None: 176 | return None 177 | 178 | # send request to image capture for image 179 | self.parent_conn.send(self.img_counter) 180 | 181 | # increment counter for next interation 182 | self.img_counter = self.img_counter + 1 183 | 184 | # wait endlessly until image is returned 185 | img = self.parent_conn.recv() 186 | 187 | #use standard image cap 188 | else: 189 | #Grab an image 190 | success_flag, img= self.camera.read() 191 | 192 | # return image to caller 193 | return img 194 | 195 | #undisort_image- removes any distortion caused by the camera lense 196 | def undisort_image(self,frame): 197 | #undistort 198 | dst = cv2.undistort(frame, self.matrix, self.distortion, None, self.newcameramtx) 199 | 200 | # crop the image 201 | x,y,w,h = self.roi 202 | dst = dst[y:y+h, x:x+w] 203 | 204 | return dst 205 | 206 | # main - tests SmartCameraVideo class 207 | def main(self): 208 | #open a camera 209 | #self.get_camera(0) 210 | 211 | # start background process 212 | self.start_capture(self.camera_index) 213 | 214 | #did we start background capture 215 | print 'Background capture {0}'.format(self.is_backgroundCap) 216 | 217 | while True: 218 | # send request to image capture for image 219 | img = self.get_image() 220 | 221 | #undistort image 222 | img = self.undisort_image(img) 223 | 224 | # check image is valid 225 | if not img is None: 226 | # display image 227 | cv2.imshow ('image_display', img) 228 | else: 229 | print "no image" 230 | 231 | # check for ESC key being pressed 232 | k = cv2.waitKey(5) & 0xFF 233 | if k == 27: 234 | break 235 | 236 | # take a rest for a bit 237 | time.sleep(0.1) 238 | 239 | # send exit command to image capture process 240 | self.stop_capture() 241 | 242 | print "a2p 10 = %f" % self.angle_to_pixels_x(10) 243 | print "p2a 10 = %f" % self.pixels_to_angle_x(10) 244 | 245 | # create a single global object 246 | sc_video = SmartCameraVideo() 247 | 248 | if __name__ == "__main__": 249 | sc_video.main() -------------------------------------------------------------------------------- /CircleDetector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import cv2 3 | import numpy as np 4 | import math 5 | import time 6 | import sc_config 7 | from pl_util import current_milli_time, get_distance_from_pixels 8 | 9 | 10 | ''' 11 | TODO: 12 | make ECCENTRICITY dependent on craftAttitude 13 | Clean up code 14 | Improve calcDistToTarget method 15 | ''' 16 | 17 | 18 | class CircleDetector(object): 19 | 20 | 21 | 22 | def __init__(self): 23 | #load algorithm constants 24 | #how round a circle needs to be. Perfect circle = 1 25 | self.eccentricity = sc_config.config.get_float('algorithm', 'eccentricity', 0.6) 26 | #acceptable distance(pixels) between cocentric circle centers 27 | self.distance_threshold = sc_config.config.get_integer('algorithm','distance_threshold', 15) 28 | # number of circles needed for a valid target(times 2); 2 circles are often overlayed 29 | self.min_circles = sc_config.config.get_integer('algorithm','min_circles',5) 30 | #pixels: used to identify repeat circles(stacked circles). Problem caused by findContours() 31 | self.radius_tolerance = sc_config.config.get_integer('algorithm', 'radius_tolerance', 2) 32 | #Tolerance used in comparing actaul ratios and preceived ratios 33 | self.ratio_tolerance = sc_config.config.get_float('algorithm', 'ratio_tolerance', 0.015) 34 | 35 | 36 | #target specific data 37 | #target_code is the unique ratio between rings 38 | target_code_def = np.array([0.8,0.91,0.76,0.84,0.7,0.66,0.49]) 39 | self.target_code = sc_config.config.get_array('algorithm', 'target_code',target_code_def) 40 | #the outer_ring is a scaling factor for targets of various sizes; radius of outer ring in meters 41 | self.outer_ring = sc_config.config.get_float('algorithm', 'outer_ring', 0.08255) 42 | 43 | #define field of view 44 | self.cam_hfov = sc_config.config.get_float('camera', 'horizontal-fov', 70.42) 45 | self.cam_vfov = sc_config.config.get_float('camera', 'vertical-fov', 43.3)\ 46 | 47 | #define camera size 48 | self.cam_width = sc_config.config.get_integer('camera', 'width', 640) 49 | self.cam_height = sc_config.config.get_integer('camera', 'height', 480) 50 | 51 | 52 | #analyze_frame - process an frame and look for a bullseye 53 | #params -child_conn: child pipe connection 54 | # -img: raw image to be processed 55 | # -craftAttitude: roll and pitch of aircraft 56 | #return -runtime: time in millis to process an image 57 | # -center: tuple(x,y) of the objects position on; 'None' when no target 58 | # -distance: distance in meters to the target; -1 when unable to calculate 59 | # -targetEllipses: ellipses that compose the detected target 'None' when no target 60 | def analyze_frame(self, child_conn, img, craftAttitude): 61 | #start timer 62 | start = current_milli_time() 63 | 64 | 65 | #blur image and grayscale 66 | #img = cv2.medianBlur(img,5) 67 | 68 | #grayscale image 69 | cimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) 70 | 71 | 72 | #canny edge detector 73 | edges = cv2.Canny(cimg,100,200,3) 74 | 75 | if edges is not None: 76 | 77 | #locate contours 78 | contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) 79 | 80 | #turn contours into ellipses 81 | circles = np.empty((len(contours)),object) 82 | circlesCnt = 0 83 | for i in range(0,len(contours)): 84 | contour = contours[i] 85 | #make sure contour contains enough point for an ellipse 86 | if(len(contour) > 4): 87 | #detect an ellipse 88 | ellipse = cv2.fitEllipse(contour) 89 | #only take ellipses which are round 90 | if self.checkEccentricity(ellipse,self.eccentricity): 91 | circles[circlesCnt] = ellipse 92 | circlesCnt += 1 93 | 94 | 95 | #if circles were found then we look for nested circles 96 | if circlesCnt > 0: 97 | 98 | #get rid of null elements 99 | circles = np.resize(circles,circlesCnt) 100 | #look for nested ellipses 101 | nestedCircles = self.detectNested(circles) 102 | 103 | #if at least min_circles circles are nested look for target 104 | #Times min_circles by two because we haven't removed repeat/stacked circles yet 105 | if len(nestedCircles) > (self.min_circles * 2): 106 | 107 | #look for circles with a common center 108 | self.finalTarget, center = self.findCommonCenter(nestedCircles) 109 | 110 | #we found the target position on xy-plane 111 | if self.finalTarget is not None: 112 | 113 | #decode the target rings for a list of ring ratios 114 | ratios = self.tagAspectRatio(self.finalTarget) 115 | 116 | 117 | #try to calculate distance to target 118 | if ratios is not None: 119 | distance = self.calcDistToTarget(self.finalTarget,ratios) 120 | 121 | stop = current_milli_time() 122 | child_conn.send((stop-start,center, distance, self.finalTarget)) 123 | return 124 | 125 | #unable to calculate distance due to invalid data 126 | else: 127 | stop = current_milli_time() 128 | child_conn.send(( stop-start, center, -1, self.finalTarget)) 129 | return 130 | 131 | 132 | #unable to locate target 133 | stop = current_milli_time() 134 | child_conn.send((stop-start,None,-1,None)) 135 | return 136 | 137 | #distCenters - distance between two ellipses 138 | def distCenters(self,ellipse1,ellipse2): 139 | #distance between centers 140 | distance = math.sqrt(math.pow((ellipse1[0][0]-ellipse2[0][0]),2) + math.pow((ellipse1[0][1] - ellipse2[0][1]),2)) 141 | return distance 142 | 143 | #detectNested- return circles which are nested within other circles 144 | def detectNested(self,rawCircles): 145 | size = len(rawCircles) 146 | nestedCircles = np.empty(size, object) 147 | nestedCnt = 0 148 | for i in range(0,size): 149 | nested = False 150 | for j in range(i, size): 151 | if i != j: 152 | circle1 = rawCircles[i] 153 | circle2 = rawCircles[j] 154 | #average major and minor axises 155 | radius1 = (circle1[1][0] + circle1[1][1]) /2.0 156 | radius2 = (circle2[1][0] + circle2[1][1]) /2.0 157 | 158 | distance = self.distCenters(circle1,circle2) 159 | 160 | #check if a circle is nested within another circle 161 | if(distance < math.fabs(radius1 - radius2)): 162 | nested = True 163 | #add the base circle if it is nested 164 | if nested: 165 | nestedCircles[nestedCnt] = rawCircles[i] 166 | nestedCnt += 1 167 | #remove null objects 168 | nestedCircles = np.resize(nestedCircles,nestedCnt) 169 | 170 | return nestedCircles 171 | 172 | 173 | #checkEccentricity - checks if an ellipse is 'round' enough 174 | def checkEccentricity(self,ellipse, threshold): 175 | #threshold = 1 for perfect circles 176 | if ellipse[1][0] * 1.0/ ellipse[1][1] > threshold: 177 | return True 178 | return False 179 | ''' 180 | def findCommonCenter(self,ellipses): 181 | 182 | size = len(ellipses) 183 | minDistance = 640 #image width 184 | center = 0,0 185 | for i in range(0,size): 186 | nested = False 187 | for j in range(i, size): 188 | ellipse1 = ellipses[i] 189 | ellipse2 = ellipses[j] 190 | distance = math.sqrt(math.pow((ellipse1[0][0]-ellipse2[0][0]),2) + math.pow((ellipse1[0][1] - ellipse2[0][1]),2)) 191 | if distance <= minDistance and i != j: 192 | minDistance = distance 193 | center = ellipse1[0][0], ellipse1[0][1] 194 | 195 | return center 196 | 197 | def ellipsesAroundCenter(ellipses, center, threshold): 198 | size = len(ellipses) 199 | centeredEllipses = np.empty(size,object) 200 | centeredCnt = 0 201 | for i in range(0,size): 202 | distance = math.sqrt(math.pow((ellipses[i][0][0]-center[0]),2) + math.pow((ellipses[i][0][1] - center[1]),2)) 203 | if distance <= threshold: 204 | centeredEllipses[centeredCnt] = ellipses[i] 205 | centeredCnt += 1 206 | centeredEllipses = np.resize(centeredEllipses,centeredCnt) 207 | return centeredEllipses 208 | ''' 209 | #findCommonCenter - locates a group of circles which share a the most common center. Returns the group and the center point 210 | def findCommonCenter(self,nestedCircles): 211 | 212 | size = len(nestedCircles) 213 | 214 | #sort by radius 215 | for i in range(0,size): 216 | baseCircle = nestedCircles[i] 217 | smallestRadius = (baseCircle[1][0] + baseCircle[1][1]) /2.0 218 | smallest = i 219 | 220 | for j in range(i,size): 221 | circle = nestedCircles[j] 222 | radius = (circle[1][0] + circle[1][1]) /2.0 223 | if(radius < smallestRadius): 224 | smallestRadius = radius 225 | smallest = j 226 | 227 | nestedCircles[i] = nestedCircles[smallest] 228 | nestedCircles[smallest] = baseCircle 229 | 230 | #look at all circles 231 | #add all circles that are within a certain threshold distance 232 | #compare circle pairs and see which one has the most circles 233 | concentricCombos = np.empty([size,size],object) 234 | 235 | 236 | #start with the largest circle and scan all smaller circles and see if it is concentric with the large circle 237 | maxConcentricCnt = 1 238 | maxConcentricIndex = 0 239 | 240 | #stores circle centers 241 | xSum = np.zeros(size) 242 | ySum = np.zeros(size) 243 | 244 | for i in range(size-1,0,-1): 245 | outer = nestedCircles[i] 246 | concentricCombos[i][0] = outer 247 | cnt = 1 248 | 249 | 250 | for j in range(i, 0, -1): 251 | inner = nestedCircles[j] 252 | #outer circle and inner circle have same center, are different 253 | if (self.distCenters(outer,inner) < self.distance_threshold) and (i != j): 254 | #check that the circle isn't a repeat(a problem with findContours) 255 | previous = concentricCombos[i][cnt -1] 256 | radPrev = (previous[1][0] + previous[1][1]) /2.0 257 | radCurr = (inner[1][0] + inner[1][1]) /2.0 258 | #if the circle is cocentric and unique, add it 259 | if(radPrev - radCurr) > self.radius_tolerance: 260 | concentricCombos[i][cnt] = inner 261 | 262 | xSum[i] += inner[0][0] 263 | ySum[i] += inner[0][1] 264 | 265 | cnt += 1 266 | 267 | if(cnt > maxConcentricCnt): 268 | maxConcentricCnt = cnt 269 | maxConcentricIndex = i 270 | 271 | #no concentric circles 272 | if(maxConcentricCnt < self.min_circles): 273 | return None,None 274 | 275 | #choose the circle set with the most concentric circles 276 | mostConcentric = concentricCombos[maxConcentricIndex] 277 | mostConcentric = np.resize(mostConcentric, maxConcentricCnt) 278 | 279 | #calculate meanCenter 280 | meanCenter = xSum[maxConcentricIndex] / (maxConcentricCnt - 1), ySum[maxConcentricIndex]/(maxConcentricCnt - 1) 281 | 282 | return mostConcentric, meanCenter 283 | 284 | #tagAspectRatio- processes the final target and calculates the ratio between rings. returns an array of ratios 285 | def tagAspectRatio(self,target): 286 | size = len(target) 287 | #ratios = np.empty((size-1)*size/2.0, float) 288 | ratios = np.empty(size-1,float) 289 | cnt = 0 290 | 291 | for i in range(0,size-1): 292 | circle1 = target[i] 293 | circle2 = target[i+1] 294 | radius1 = (circle1[1][0] + circle1[1][1]) /2.0 295 | radius2 = (circle2[1][0] + circle2[1][1]) /2.0 296 | 297 | 298 | ratio = radius2 / radius1 299 | ratios[cnt] = round(ratio,3) 300 | cnt += 1 301 | return ratios 302 | 303 | 304 | #calculateRingSize - based on ring ID number and target size, calculate the size of a specific ring 305 | def calculateRingSize(self,ringNumber): 306 | radius = self.outer_ring #in meters 307 | 308 | #actualRadius Outer ring size * ratio[n] * ratios[n + 1] ... 309 | for i in range(0,ringNumber): 310 | radius = radius * self.target_code[i] 311 | 312 | return radius #in meters 313 | 314 | 315 | #calcDistToTarget - processes a target and calculates distance to the target 316 | def calcDistToTarget(self,target, ratios): 317 | distance = 0 318 | readings = 0 319 | for i in range(0,len(ratios)): 320 | ratio = ratios[i] 321 | for j in range(0,len(self.target_code)): 322 | 323 | 324 | if(math.fabs(self.target_code[j] - ratio) <= self.ratio_tolerance): 325 | circle1 = target[i] #outer ring 326 | circle2 = target[i+1] #inner ring 327 | radius1 = (circle1[1][0] + circle1[1][1]) /2.0 328 | radius2 = (circle2[1][0] + circle2[1][1]) /2.0 329 | 330 | fov = math.sqrt(self.cam_vfov**2 + self.cam_hfov**2) 331 | img_size = math.sqrt(self.cam_width**2 + self.cam_height**2) 332 | 333 | 334 | dist1 = get_distance_from_pixels(radius1, self.calculateRingSize(j),fov,img_size) 335 | dist2 = get_distance_from_pixels(radius2, self.calculateRingSize(j+1),fov,img_size) 336 | distance += (dist1 + dist2 )/2.0 337 | 338 | 339 | readings += 1 340 | 341 | #can not decode target 342 | if(readings == 0): 343 | return -1 344 | #average all distance readings 345 | return distance/(readings * 1.0) 346 | 347 | -------------------------------------------------------------------------------- /Tools/target.eps: -------------------------------------------------------------------------------- 1 | %!PS-Adobe-3.0 EPSF-3.0 2 | %%CreationDate: Sat Jan 10 19:05:49 2015 3 | %%LanguageLevel: 3 4 | %%BoundingBox: 0 0 800 800 5 | %%EndComments 6 | newpath 7 | 0.00 800.00 moveto 8 | 800.00 800.00 lineto 9 | 800.00 0.00 lineto 10 | 0.00 0.00 lineto 11 | 0.00 800.00 lineto 12 | closepath 13 | 0.984 0.984 0.984 setrgbcolor 14 | fill 15 | newpath 16 | 375.96 796.15 moveto 17 | 383.98 796.47 391.99 796.81 400.01 797.18 curveto 18 | 411.72 796.77 423.44 796.14 435.16 795.73 curveto 19 | 446.75 794.07 458.39 792.74 469.98 791.01 curveto 20 | 481.44 788.56 492.89 785.96 504.35 783.45 curveto 21 | 517.97 778.59 532.21 775.43 545.44 769.49 curveto 22 | 553.22 766.26 561.03 763.09 568.79 759.79 curveto 23 | 578.86 754.56 588.91 749.29 598.99 744.07 curveto 24 | 608.89 737.88 618.62 731.41 628.60 725.35 curveto 25 | 637.70 718.12 646.95 711.07 656.24 704.09 curveto 26 | 664.58 696.16 673.15 688.46 681.65 680.70 curveto 27 | 689.49 672.11 697.28 663.47 705.28 655.03 curveto 28 | 712.16 645.75 719.26 636.62 726.35 627.50 curveto 29 | 732.54 617.61 738.84 607.79 745.13 597.96 curveto 30 | 750.35 587.76 755.62 577.58 761.06 567.48 curveto 31 | 764.64 558.37 768.48 549.36 772.21 540.30 curveto 32 | 775.37 533.83 776.70 526.69 779.11 519.93 curveto 33 | 781.38 511.96 784.69 504.28 786.05 496.08 curveto 34 | 788.11 486.88 790.09 477.66 792.23 468.47 curveto 35 | 793.60 457.34 795.03 446.22 796.64 435.12 curveto 36 | 797.35 423.22 797.36 411.26 798.40 399.39 curveto 37 | 797.36 387.68 797.32 375.88 796.68 364.15 curveto 38 | 795.04 352.95 793.62 341.72 792.22 330.50 curveto 39 | 789.50 318.90 787.05 307.22 784.44 295.60 curveto 40 | 779.53 282.02 776.52 267.77 770.51 254.60 curveto 41 | 767.40 246.92 764.06 239.33 761.10 231.59 curveto 42 | 755.63 221.49 750.37 211.26 745.14 201.03 curveto 43 | 738.86 191.21 732.54 181.42 726.36 171.53 curveto 44 | 719.27 162.39 712.17 153.25 705.27 143.97 curveto 45 | 697.28 135.53 689.49 126.89 681.66 118.31 curveto 46 | 673.19 110.59 664.67 102.92 656.37 95.02 curveto 47 | 647.05 87.99 637.73 80.93 628.60 73.65 curveto 48 | 618.61 67.58 608.87 61.11 598.96 54.91 curveto 49 | 588.48 49.55 578.12 43.92 567.58 38.67 curveto 50 | 553.77 33.26 540.42 26.61 526.13 22.52 curveto 51 | 518.65 20.15 511.14 17.85 503.69 15.36 curveto 52 | 495.93 13.72 488.20 11.98 480.46 10.27 curveto 53 | 465.59 6.52 450.22 5.64 435.11 3.26 curveto 54 | 420.04 3.03 404.98 1.16 389.92 2.27 curveto 55 | 381.92 2.56 373.93 3.04 365.93 3.26 curveto 56 | 354.48 4.88 343.00 6.26 331.55 7.88 curveto 57 | 320.14 10.25 308.80 12.98 297.39 15.33 curveto 58 | 286.31 19.05 275.10 22.34 264.01 26.00 curveto 59 | 253.43 30.42 242.79 34.73 232.24 39.20 curveto 60 | 225.00 42.96 217.76 46.71 210.54 50.51 curveto 61 | 197.14 57.02 185.25 66.12 172.43 73.64 curveto 62 | 163.25 80.94 153.89 88.01 144.56 95.09 curveto 63 | 136.29 102.95 127.81 110.60 119.37 118.28 curveto 64 | 111.53 126.90 103.70 135.54 95.70 144.02 curveto 65 | 90.81 150.65 85.73 157.14 80.73 163.69 curveto 66 | 71.56 174.87 64.84 187.77 56.69 199.68 curveto 67 | 51.04 210.11 45.83 220.79 40.11 231.18 curveto 68 | 35.72 241.99 31.34 252.80 26.69 263.50 curveto 69 | 23.39 274.46 19.96 285.38 16.36 296.24 curveto 70 | 13.94 307.69 11.40 319.13 8.77 330.53 curveto 71 | 7.38 341.99 5.84 353.44 4.25 364.87 curveto 72 | 3.82 376.27 3.39 387.67 2.76 399.05 curveto 73 | 3.32 410.75 3.83 422.45 4.25 434.15 curveto 74 | 5.88 445.59 7.36 457.06 8.79 468.53 curveto 75 | 11.39 479.91 13.95 491.29 16.34 502.71 curveto 76 | 19.95 513.59 23.39 524.53 26.70 535.51 curveto 77 | 31.25 546.09 35.66 556.73 39.92 567.42 curveto 78 | 45.37 577.57 50.67 587.80 55.91 598.05 curveto 79 | 62.21 607.84 68.47 617.65 74.66 627.51 curveto 80 | 81.73 636.63 88.83 645.73 95.71 655.00 curveto 81 | 103.70 663.45 111.50 672.09 119.33 680.69 curveto 82 | 126.84 687.49 134.32 694.32 141.73 701.23 curveto 83 | 144.64 704.40 148.34 706.62 151.67 709.29 curveto 84 | 158.46 714.56 165.38 719.66 172.04 725.09 curveto 85 | 181.89 731.17 191.59 737.48 201.36 743.70 curveto 86 | 211.63 749.11 221.95 754.44 232.25 759.81 curveto 87 | 242.83 764.27 253.46 768.61 264.06 773.01 curveto 88 | 275.09 776.66 286.25 779.95 297.27 783.63 curveto 89 | 309.02 786.07 320.68 788.92 332.45 791.27 curveto 90 | 346.95 792.91 361.34 795.84 375.96 796.15 curveto 91 | closepath 92 | 0.012 0.012 0.012 setrgbcolor 93 | fill 94 | newpath 95 | 386.54 714.22 moveto 96 | 410.43 715.58 434.40 713.41 457.96 709.43 curveto 97 | 466.64 707.37 475.44 705.77 484.06 703.45 curveto 98 | 495.35 699.75 506.92 696.78 517.78 691.91 curveto 99 | 585.50 665.13 643.13 613.80 677.53 549.62 curveto 100 | 682.29 540.54 687.17 531.49 690.79 521.88 curveto 101 | 696.87 508.20 701.24 493.87 705.39 479.52 curveto 102 | 707.92 467.76 711.10 456.10 712.36 444.11 curveto 103 | 717.45 409.92 716.39 374.93 710.18 340.96 curveto 104 | 707.71 330.41 705.89 319.68 702.40 309.40 curveto 105 | 686.99 257.24 657.68 209.26 618.31 171.74 curveto 106 | 585.83 140.54 546.44 116.68 503.95 101.81 curveto 107 | 495.96 99.41 488.10 96.58 480.02 94.48 curveto 108 | 468.33 92.05 456.79 88.76 444.87 87.62 curveto 109 | 410.91 82.53 376.18 83.66 342.43 89.70 curveto 110 | 330.70 92.45 318.78 94.49 307.41 98.56 curveto 111 | 293.00 102.61 279.25 108.58 265.58 114.62 curveto 112 | 255.15 120.04 244.48 125.12 234.64 131.60 curveto 113 | 185.34 161.85 144.84 206.19 119.02 257.93 curveto 114 | 114.06 266.89 110.55 276.52 106.56 285.91 curveto 115 | 87.27 335.92 80.84 390.92 88.63 443.99 curveto 116 | 89.94 456.25 93.14 468.20 95.80 480.22 curveto 117 | 99.42 492.29 102.88 504.46 107.80 516.08 curveto 118 | 132.33 578.36 177.46 632.34 234.54 667.33 curveto 119 | 243.32 673.18 252.84 677.76 262.16 682.66 curveto 120 | 267.53 685.52 273.21 687.74 278.82 690.07 curveto 121 | 285.57 692.84 292.22 695.87 299.25 697.88 curveto 122 | 308.09 700.52 316.76 703.83 325.84 705.61 curveto 123 | 334.53 707.39 343.10 709.83 351.92 710.84 curveto 124 | 363.42 712.37 374.92 714.06 386.54 714.22 curveto 125 | closepath 126 | 0.984 0.984 0.984 setrgbcolor 127 | fill 128 | newpath 129 | 375.92 687.10 moveto 130 | 384.33 687.49 392.74 687.90 401.15 688.18 curveto 131 | 409.44 687.75 417.75 687.54 426.03 687.02 curveto 132 | 434.54 685.95 443.05 684.85 451.54 683.68 curveto 133 | 461.99 680.92 472.84 679.65 483.01 675.88 curveto 134 | 488.53 674.10 494.10 672.47 499.61 670.67 curveto 135 | 504.99 668.39 510.38 666.15 515.78 663.92 curveto 136 | 525.85 660.08 534.98 654.22 544.71 649.65 curveto 137 | 551.73 645.04 558.84 640.54 565.94 636.06 curveto 138 | 572.59 630.96 579.18 625.75 585.93 620.78 curveto 139 | 592.05 614.98 598.32 609.31 604.56 603.63 curveto 140 | 610.33 597.38 615.93 590.96 621.90 584.90 curveto 141 | 626.75 578.14 631.89 571.58 637.06 565.06 curveto 142 | 641.46 557.85 646.04 550.74 650.68 543.68 curveto 143 | 654.38 536.20 658.26 528.80 662.23 521.46 curveto 144 | 665.29 513.83 668.40 506.22 671.67 498.67 curveto 145 | 674.21 490.61 676.61 482.49 679.39 474.49 curveto 146 | 681.11 466.09 683.00 457.72 684.92 449.36 curveto 147 | 685.86 441.19 687.00 433.04 688.10 424.89 curveto 148 | 688.65 416.40 688.49 407.86 689.41 399.39 curveto 149 | 688.45 390.92 688.67 382.37 688.08 373.88 curveto 150 | 686.91 365.64 685.87 357.38 684.80 349.13 curveto 151 | 682.90 340.92 681.06 332.69 679.38 324.43 curveto 152 | 676.59 316.47 674.23 308.36 671.66 300.33 curveto 153 | 668.43 292.76 665.26 285.17 662.23 277.53 curveto 154 | 658.25 270.16 654.34 262.75 650.64 255.24 curveto 155 | 645.98 248.23 641.47 241.12 637.06 233.94 curveto 156 | 631.87 227.39 626.69 220.81 621.83 214.00 curveto 157 | 616.08 208.17 610.63 202.02 605.13 195.94 curveto 158 | 598.96 190.06 592.35 184.62 586.43 178.49 curveto 159 | 579.42 173.57 572.79 168.13 565.96 162.96 curveto 160 | 558.88 158.47 551.78 154.01 544.78 149.40 curveto 161 | 537.38 145.75 530.13 141.81 522.80 138.03 curveto 162 | 515.08 134.74 507.28 131.62 499.57 128.32 curveto 163 | 491.57 125.75 483.52 123.36 475.56 120.68 curveto 164 | 463.35 118.23 451.34 114.67 438.89 113.64 curveto 165 | 426.02 111.44 412.96 111.38 399.96 110.81 curveto 166 | 391.71 111.25 383.45 111.46 375.21 111.96 curveto 167 | 366.64 113.05 358.05 114.10 349.50 115.32 curveto 168 | 339.28 117.99 328.71 119.28 318.76 122.88 curveto 169 | 313.00 124.73 307.19 126.44 301.44 128.31 curveto 170 | 295.60 130.77 289.76 133.24 283.88 135.63 curveto 171 | 276.53 138.40 269.85 142.60 262.83 146.06 curveto 172 | 253.16 150.59 244.72 157.23 235.54 162.62 curveto 173 | 228.50 167.85 221.74 173.47 214.56 178.50 curveto 174 | 210.42 183.06 205.49 186.80 201.14 191.15 curveto 175 | 193.13 197.85 187.11 206.58 179.37 213.55 curveto 176 | 174.53 220.83 168.85 227.52 163.58 234.48 curveto 177 | 159.34 241.51 154.86 248.40 150.35 255.25 curveto 178 | 146.66 262.76 142.75 270.15 138.78 277.51 curveto 179 | 135.74 285.15 132.58 292.75 129.34 300.30 curveto 180 | 126.78 308.35 124.41 316.47 121.62 324.45 curveto 181 | 119.90 332.96 117.93 341.42 116.05 349.89 curveto 182 | 115.13 358.59 113.65 367.23 112.78 375.92 curveto 183 | 112.72 386.31 111.22 396.69 112.16 407.06 curveto 184 | 112.63 414.98 112.37 422.96 113.71 430.81 curveto 185 | 114.60 437.13 115.35 443.48 116.18 449.81 curveto 186 | 118.08 458.04 119.93 466.28 121.62 474.55 curveto 187 | 124.54 482.76 126.85 491.17 129.64 499.42 curveto 188 | 134.02 508.81 136.97 518.85 142.19 527.83 curveto 189 | 144.93 533.13 147.76 538.37 150.35 543.74 curveto 190 | 155.01 550.77 159.54 557.90 163.96 565.08 curveto 191 | 168.53 570.85 173.07 576.65 177.43 582.58 curveto 192 | 179.56 586.02 182.92 588.41 185.40 591.56 curveto 193 | 190.63 597.01 195.31 603.00 201.13 607.85 curveto 194 | 205.48 612.19 210.40 615.94 214.55 620.48 curveto 195 | 221.56 625.41 228.19 630.86 235.03 636.03 curveto 196 | 242.11 640.52 249.22 644.98 256.21 649.61 curveto 197 | 263.59 653.23 270.82 657.17 278.13 660.93 curveto 198 | 285.85 664.27 293.69 667.35 301.42 670.68 curveto 199 | 309.42 673.26 317.50 675.63 325.46 678.33 curveto 200 | 333.71 680.08 341.93 681.94 350.15 683.81 curveto 201 | 358.74 684.87 367.31 686.14 375.92 687.10 curveto 202 | closepath 203 | 0.012 0.012 0.012 setrgbcolor 204 | fill 205 | newpath 206 | 384.89 615.16 moveto 207 | 419.65 617.57 454.99 611.61 486.93 597.62 curveto 208 | 519.02 583.71 547.64 561.84 569.42 534.46 curveto 209 | 594.46 503.19 610.45 464.80 615.12 425.02 curveto 210 | 620.22 383.21 612.55 339.99 593.63 302.39 curveto 211 | 576.03 267.14 548.52 236.89 515.01 216.15 curveto 212 | 484.76 197.18 449.68 186.02 414.07 183.77 curveto 213 | 373.28 181.12 331.84 190.49 296.06 210.21 curveto 214 | 260.26 229.86 230.28 259.92 210.81 295.82 curveto 215 | 194.25 326.14 185.09 360.48 184.39 395.02 curveto 216 | 183.43 438.38 195.94 481.93 219.79 518.15 curveto 217 | 243.69 554.87 279.19 583.85 319.89 600.05 curveto 218 | 340.58 608.44 362.64 613.37 384.89 615.16 curveto 219 | closepath 220 | 0.984 0.984 0.984 setrgbcolor 221 | fill 222 | newpath 223 | 389.00 582.72 moveto 224 | 431.36 585.57 474.52 572.88 508.72 547.76 curveto 225 | 511.92 545.17 515.24 542.74 518.54 540.28 curveto 226 | 522.41 536.56 526.32 532.84 530.49 529.44 curveto 227 | 534.18 524.84 538.44 520.73 542.24 516.22 curveto 228 | 545.17 512.19 548.33 508.33 551.36 504.38 curveto 229 | 554.92 498.01 559.60 492.31 562.54 485.60 curveto 230 | 569.51 473.12 574.33 459.60 578.25 445.89 curveto 231 | 579.20 441.17 580.24 436.47 581.44 431.80 curveto 232 | 582.16 426.39 582.55 420.92 583.67 415.56 curveto 233 | 583.66 410.23 583.76 404.88 584.40 399.58 curveto 234 | 583.66 394.02 583.87 388.40 583.50 382.83 curveto 235 | 582.14 375.54 582.10 368.05 579.91 360.93 curveto 236 | 578.57 352.71 575.61 344.93 573.26 336.97 curveto 237 | 569.93 329.27 567.15 321.31 562.88 314.05 curveto 238 | 559.71 306.90 554.83 300.74 550.95 293.98 curveto 239 | 547.78 290.15 544.77 286.19 541.78 282.22 curveto 240 | 538.05 277.96 534.03 273.95 530.48 269.53 curveto 241 | 526.07 266.07 522.15 262.04 518.01 258.29 curveto 242 | 512.62 254.64 507.89 250.08 502.27 246.79 curveto 243 | 475.63 228.88 444.08 218.22 412.04 216.29 curveto 244 | 377.86 214.03 343.11 221.71 313.04 238.15 curveto 245 | 308.11 240.22 303.98 243.65 299.42 246.35 curveto 246 | 293.34 249.88 288.21 254.74 282.45 258.72 curveto 247 | 276.82 264.36 270.38 269.17 265.38 275.42 curveto 248 | 259.65 281.15 255.10 287.84 249.98 294.09 curveto 249 | 247.26 298.61 244.44 303.08 241.48 307.45 curveto 250 | 239.02 312.31 236.51 317.16 233.95 321.97 curveto 251 | 231.98 326.67 230.18 331.45 228.05 336.08 curveto 252 | 226.68 339.98 225.70 344.00 224.35 347.89 curveto 253 | 222.13 353.89 221.53 360.32 219.73 366.44 curveto 254 | 218.78 372.08 218.55 377.84 217.32 383.44 curveto 255 | 217.62 390.62 216.26 397.79 217.06 404.96 curveto 256 | 217.46 409.61 216.97 414.33 217.91 418.92 curveto 257 | 218.98 424.74 218.94 430.75 220.70 436.43 curveto 258 | 224.60 456.30 232.24 475.29 242.31 492.81 curveto 259 | 245.04 496.77 247.51 500.91 250.04 505.00 curveto 260 | 253.20 508.86 256.23 512.82 259.23 516.81 curveto 261 | 262.96 521.04 266.95 525.04 270.49 529.44 curveto 262 | 274.68 532.82 278.56 536.56 282.45 540.28 curveto 263 | 285.96 542.88 289.49 545.46 292.88 548.22 curveto 264 | 320.81 568.43 354.57 580.70 389.00 582.72 curveto 265 | closepath 266 | 0.012 0.012 0.012 setrgbcolor 267 | fill 268 | newpath 269 | 380.57 524.16 moveto 270 | 419.36 530.46 460.91 517.56 488.74 489.70 curveto 271 | 515.55 463.96 529.57 425.73 526.21 388.75 curveto 272 | 523.74 358.70 510.09 329.65 488.28 308.82 curveto 273 | 462.17 283.11 424.25 270.07 387.84 273.97 curveto 274 | 352.01 277.33 318.06 297.00 297.50 326.56 curveto 275 | 283.88 345.59 275.97 368.57 274.54 391.90 curveto 276 | 273.01 416.46 278.94 441.42 291.24 462.74 curveto 277 | 302.60 482.36 319.27 498.89 339.16 509.82 curveto 278 | 351.95 517.03 366.11 521.69 380.57 524.16 curveto 279 | closepath 280 | 0.984 0.984 0.984 setrgbcolor 281 | fill 282 | newpath 283 | 353.93 470.16 moveto 284 | 368.58 479.94 386.39 484.86 403.99 484.14 curveto 285 | 419.25 483.48 434.34 478.65 447.06 470.17 curveto 286 | 449.97 468.46 452.38 466.02 455.32 464.34 curveto 287 | 456.85 462.53 458.56 460.85 460.48 459.45 curveto 288 | 461.91 457.56 463.53 455.81 465.38 454.31 curveto 289 | 470.35 447.45 475.37 440.49 478.47 432.51 curveto 290 | 489.50 407.24 486.82 376.54 471.61 353.54 curveto 291 | 469.87 350.35 467.13 347.86 465.36 344.69 curveto 292 | 463.52 343.19 461.91 341.45 460.49 339.56 curveto 293 | 458.56 338.14 456.84 336.48 455.30 334.65 curveto 294 | 452.36 332.98 449.96 330.54 447.05 328.83 curveto 295 | 434.31 320.34 419.20 315.50 403.91 314.86 curveto 296 | 386.12 314.13 368.13 319.22 353.38 329.20 curveto 297 | 350.68 330.84 348.44 333.10 345.70 334.65 curveto 298 | 344.15 336.47 342.45 338.14 340.52 339.54 curveto 299 | 339.09 341.43 337.47 343.17 335.65 344.68 curveto 300 | 330.62 351.51 325.67 358.50 322.54 366.46 curveto 301 | 318.87 374.52 317.10 383.25 316.01 391.98 curveto 302 | 314.84 408.00 317.78 424.43 325.42 438.63 curveto 303 | 328.07 444.34 332.13 449.18 335.66 454.34 curveto 304 | 337.48 455.83 339.08 457.57 340.51 459.43 curveto 305 | 342.44 460.85 344.15 462.53 345.70 464.35 curveto 306 | 348.63 466.03 351.02 468.46 353.93 470.16 curveto 307 | closepath 308 | 0.012 0.012 0.012 setrgbcolor 309 | fill 310 | newpath 311 | 394.65 438.24 moveto 312 | 405.75 440.04 417.52 436.55 426.02 429.22 curveto 313 | 439.15 418.28 443.45 398.33 435.98 382.94 curveto 314 | 431.26 372.67 421.87 364.62 410.90 361.78 curveto 315 | 398.79 358.17 385.08 361.32 375.44 369.39 curveto 316 | 361.73 380.44 357.30 401.21 365.42 416.86 curveto 317 | 370.88 428.23 382.19 436.43 394.65 438.24 curveto 318 | closepath 319 | 0.984 0.984 0.984 setrgbcolor 320 | fill 321 | %%EOF 322 | -------------------------------------------------------------------------------- /PrecisionLand.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import math 3 | import time 4 | import cv2 5 | import Queue 6 | import sc_config 7 | from sc_video import sc_video 8 | from sc_dispatcher import sc_dispatcher 9 | from sc_logger import sc_logger 10 | from pl_gui import PrecisionLandGUI as gui 11 | from pl_sim import sim 12 | from pl_util import shift_to_origin, current_milli_time 13 | from CircleDetector import CircleDetector 14 | from vehicle_control import veh_control 15 | from droneapi.lib import VehicleMode, Location, Attitude 16 | from position_vector import PositionVector 17 | 18 | 19 | 20 | 21 | ''' 22 | Logic: 23 | TODO 24 | 25 | ''' 26 | 27 | 28 | 29 | 30 | ''' 31 | Temporary Changes: 32 | -added kill_camera(commented out) 33 | ''' 34 | 35 | 36 | ''' 37 | TODO: 38 | Future: 39 | -have program takeover during landing modes(not guided) 40 | -send warning message to GCS when releasing control 41 | -implement a landing detector and when to release control 42 | 43 | Bugs: 44 | -add logic for when the vehicle enters from the side of the landing cylinder and underneath the abort point 45 | -will cause the vehicle to climb the second it enters the area is the target is not in sight 46 | -will fix this when the vehicle accepts commands in landing modes 47 | -will add an intial_approach() method 48 | -make logic more accepting of land and RTL 49 | -add positive and negative check on parameters 50 | -inverted on Z axis 51 | 52 | Improvements: 53 | -add varaible descent_rate based on distance to target center and altitude 54 | -add better target_detected logic(multiple frames required for a lock) 55 | -add update rate to sc_logger 56 | -fix project file structure 57 | -fix Logging printing to console 58 | -handle droneapi start up better(location being null at start up-> issue using see inside_landing_area() RIGHT at startup) 59 | -bring back inside_landing_area() as a condition for enterting the main loop 60 | ''' 61 | 62 | 63 | class PrecisionLand(object): 64 | 65 | def __init__(self): 66 | 67 | #load config file 68 | sc_config.config.get_file('Smart_Camera') 69 | 70 | #get camera specs 71 | self.camera_index = sc_config.config.get_integer('camera','camera_index',0) 72 | self.camera_width = sc_config.config.get_integer('camera', 'camera_width', 640) 73 | self.camera_height = sc_config.config.get_integer('camera', 'camera_height', 480) 74 | self.camera_hfov = sc_config.config.get_float('camera', 'horizontal-fov', 72.42) 75 | self.camera_vfov = sc_config.config.get_float('camera', 'vertical-fov', 43.3) 76 | 77 | #use simulator 78 | self.simulator = sc_config.config.get_boolean('simulator','use_simulator',True) 79 | 80 | #how many times to attempt a land before giving up 81 | self.search_attempts = sc_config.config.get_integer('general','search_attempts', 5) 82 | 83 | #The timeout between losing the target and starting a climb/scan 84 | self.settle_time = sc_config.config.get_integer('general','settle_time', 1.5) 85 | 86 | #how high to climb in meters to complete a scan routine 87 | self.climb_altitude = sc_config.config.get_integer('general','climb_altitude', 20) 88 | 89 | #the max horizontal speed sent to autopilot 90 | self.vel_speed_max = sc_config.config.get_float('general', 'vel_speed_max', 5) 91 | 92 | #P term of the horizontal distance to velocity controller 93 | self.dist_to_vel = sc_config.config.get_float('general', 'dist_to_vel', 0.15) 94 | 95 | #Descent velocity 96 | self.descent_rate = sc_config.config.get_float('general','descent_rate', 0.5) 97 | 98 | #roll/pitch value that is considered stable 99 | self.stable_attitude = sc_config.config.get_float('general', 'stable_attitude', 0.18) 100 | 101 | #Climb rate when executing a search 102 | self.climb_rate = sc_config.config.get_float('general','climb_rate', -2.0) 103 | 104 | #The height at a climb is started if no target is detected 105 | self.abort_height = sc_config.config.get_integer('general', 'abort_height', 10) 106 | 107 | #when we have lock on target, only descend if within this radius 108 | self.descent_radius = sc_config.config.get_float('general', 'descent_radius', 1.0) 109 | 110 | #The height at which we lock the position on xy axis 111 | self.landing_area_min_alt = sc_config.config.get_integer('general', 'landing_area_min_alt', 1) 112 | 113 | #The radius of the cylinder surrounding the landing pad 114 | self.landing_area_radius = sc_config.config.get_integer('general', 'landing_area_radius', 20) 115 | 116 | #Whether the landing program can be reset after it is disabled 117 | self.allow_reset = sc_config.config.get_boolean('general', 'allow_reset', True) 118 | 119 | #Run the program no matter what mode or location; Useful for debug purposes 120 | self.always_run = sc_config.config.get_boolean('general', 'always_run', True) 121 | 122 | #whether the companion computer has control of the autopilot or not 123 | self.in_control = False 124 | 125 | #how many frames have been captured 126 | self.frame_count = 0 127 | 128 | #Reset state machine 129 | self.initialize_landing() 130 | 131 | #debugging: 132 | self.kill_camera = False 133 | 134 | def name(self): 135 | return "Precision_Land" 136 | 137 | def connect(self): 138 | while(veh_control.is_connected() == False): 139 | # connect to droneapi 140 | veh_control.connect(local_connect()) 141 | self.vehicle = veh_control.get_vehicle() 142 | 143 | def run(self): 144 | sc_logger.text(sc_logger.GENERAL, 'running {0}'.format(self.name())) 145 | 146 | #start a video capture 147 | if(self.simulator): 148 | sc_logger.text(sc_logger.GENERAL, 'Using simulator') 149 | sim.set_target_location(veh_control.get_home()) 150 | #sim.set_target_location(Location(0,0,0)) 151 | 152 | else: 153 | sc_video.start_capture(self.camera_index) 154 | 155 | 156 | #create an image processor 157 | detector = CircleDetector() 158 | 159 | #create a queue for images 160 | imageQueue = Queue.Queue() 161 | 162 | #create a queue for vehicle info 163 | vehicleQueue = Queue.Queue() 164 | 165 | while veh_control.is_connected(): 166 | 167 | ''' 168 | #kill camera for testing 169 | if(cv2.waitKey(2) == 1113938): 170 | self.kill_camera = not self.kill_camera 171 | ''' 172 | 173 | #Reintialize the landing program when entering a landing mode 174 | if veh_control.controlling_vehicle(): 175 | if not self.in_control: 176 | if(self.allow_reset): 177 | sc_logger.text(sc_logger.GENERAL, 'Program initialized to start state') 178 | self.initialize_landing() 179 | 180 | self.in_control = True 181 | 182 | else: 183 | self.in_control = False 184 | 185 | 186 | 187 | #we are in the landing zone or in a landing mode and we are still running the landing program 188 | #just because the program is running does not mean it controls the vehicle 189 | #i.e. in the landing area but not in a landing mode 190 | #FIXME add inside_landing_area() back to conditional 191 | if (self.in_control or self.always_run) and self.pl_enabled: 192 | 193 | 194 | 195 | #update how often we dispatch a command 196 | sc_dispatcher.calculate_dispatch_schedule() 197 | 198 | #get info from autopilot 199 | location = veh_control.get_location() 200 | attitude = veh_control.get_attitude() 201 | 202 | ''' 203 | #get info from autopilot 204 | location = Location(0.000009,0,location.alt) 205 | attitude = Attitude(0,0,0) 206 | ''' 207 | 208 | #update simulator 209 | if(self.simulator): 210 | sim.refresh_simulator(location,attitude) 211 | 212 | # grab an image 213 | capStart = current_milli_time() 214 | frame = self.get_frame() 215 | capStop = current_milli_time() 216 | 217 | ''' 218 | if(self.kill_camera): 219 | frame[:] = (0,255,0) 220 | ''' 221 | 222 | #update capture time 223 | sc_dispatcher.update_capture_time(capStop-capStart) 224 | 225 | 226 | #Process image 227 | #We schedule the process as opposed to waiting for an available core 228 | #This brings consistancy and prevents overwriting a dead process before 229 | #information has been grabbed from the Pipe 230 | if sc_dispatcher.is_ready(): 231 | #queue the image for later use: displaying image, overlays, recording 232 | imageQueue.put(frame) 233 | #queue vehicle info for later use: position processing 234 | vehicleQueue.put((location,attitude)) 235 | 236 | #the function must be run directly from the class 237 | sc_dispatcher.dispatch(target=detector.analyze_frame, args=(frame,attitude,)) 238 | 239 | 240 | 241 | #retreive results 242 | if sc_dispatcher.is_available(): 243 | 244 | sc_logger.text(sc_logger.GENERAL, 'Frame {0}'.format(self.frame_count)) 245 | self.frame_count += 1 246 | 247 | 248 | #results of image processor 249 | results = sc_dispatcher.retreive() 250 | # get image that was passed with the image processor 251 | img = imageQueue.get() 252 | #get vehicle position that was passed with the image processor 253 | location, attitude = vehicleQueue.get() 254 | 255 | 256 | #overlay gui 257 | rend_Image = gui.add_target_highlights(img, results[3]) 258 | 259 | 260 | #show/record images 261 | sc_logger.image(sc_logger.RAW, img) 262 | sc_logger.image(sc_logger.GUI, rend_Image) 263 | 264 | #display/log data 265 | sc_logger.text(sc_logger.ALGORITHM,'RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.format(results[0],results[1],results[2],results[3])) 266 | sc_logger.text(sc_logger.AIRCRAFT,attitude) 267 | sc_logger.text(sc_logger.AIRCRAFT,location) 268 | 269 | #send commands to autopilot 270 | self.control(results,attitude,location) 271 | 272 | else: 273 | if(self.pl_enabled == False): 274 | sc_logger.text(sc_logger.GENERAL, 'Landing disabled') 275 | else: 276 | sc_logger.text(sc_logger.GENERAL, 'Not in landing mode or Landing Area') 277 | 278 | 279 | 280 | 281 | 282 | #terminate program 283 | sc_logger.text(sc_logger.GENERAL, 'Vehicle disconnected, Program Terminated') 284 | if(self.simulator == False): 285 | sc_video.stop_capture() 286 | 287 | 288 | #initialize_landing - reset the state machine which controls the flow of the landing routine 289 | def initialize_landing(self): 290 | 291 | #how mant times we have attempted landing 292 | self.attempts = 0 293 | 294 | #Last time in millis since we had a valid target 295 | self.last_valid_target = 0 296 | 297 | #State variable climbing to scan for the target 298 | self.climbing = False 299 | 300 | #State variable which determines if this program will continue to run 301 | self.pl_enabled = True 302 | 303 | #State variable used to represent if autopilot is active 304 | self.initial_descent = True 305 | 306 | #State variable which represents a know target in landing area 307 | self.target_detected = False 308 | 309 | 310 | 311 | #control - how to respond to information captured from camera 312 | def control(self,target_info,attitude,location): 313 | #we have control from autopilot 314 | if self.in_control: 315 | 316 | valid_target = False 317 | 318 | now = time.time() 319 | 320 | #detected a target 321 | if target_info[1] is not None: 322 | self.target_detected = True 323 | valid_target = True 324 | initial_descent = False 325 | self.last_valid_target = now 326 | 327 | 328 | #attempt to use precision landing 329 | if(self.inside_landing_area() == 1): 330 | #we have detected a target in landing area 331 | if(self.target_detected): 332 | self.climbing = False 333 | self.initial_descent = False 334 | 335 | #we currently see target 336 | if(valid_target): 337 | sc_logger.text(sc_logger.GENERAL, 'Target detected. Moving to target') 338 | 339 | #move to target 340 | self.move_to_target(target_info,attitude,location) 341 | 342 | #lost target 343 | else: 344 | #we have lost the target for more than settle_time 345 | if(now - self.last_valid_target > self.settle_time): 346 | self.target_detected = False 347 | 348 | #temporarily lost target, 349 | #top section of cylinder 350 | if(veh_control.get_location().alt > self.abort_height): 351 | sc_logger.text(sc_logger.GENERAL, 'Lost Target: Straight Descent') 352 | 353 | #continue descent 354 | self.straight_descent() 355 | else: 356 | sc_logger.text(sc_logger.GENERAL, 'Lost Target: Holding') 357 | 358 | #neutralize velocity 359 | veh_control.set_velocity(0,0,0) 360 | 361 | 362 | 363 | #there is no known target in landing area 364 | else: 365 | 366 | #currently searching 367 | if(self.climbing): 368 | self.climb() 369 | 370 | #not searching, decide next move 371 | else: 372 | #top section of cylinder 373 | if(veh_control.get_location().alt > self.abort_height): 374 | #initial descent entering cylinder 375 | if(self.initial_descent): 376 | sc_logger.text(sc_logger.GENERAL, 'No Target: Initial Descent') 377 | 378 | #give autopilot control 379 | self.autopilot_land() 380 | 381 | #all other attempts prior to intial target detection 382 | else: 383 | sc_logger.text(sc_logger.GENERAL, 'No target: Straight descent') 384 | 385 | #straight descent 386 | self.straight_descent() 387 | 388 | #lower section of cylinder 389 | else: 390 | #we can attempt another land 391 | if(self.attempts < self.search_attempts): 392 | self.attempts += 1 393 | sc_logger.text(sc_logger.GENERAL, 'Climbing to attempt {0}'.format(self.attempts)) 394 | 395 | #start climbing 396 | self.climb() 397 | 398 | #give up and 399 | else: 400 | sc_logger.text(sc_logger.GENERAL, 'Out of attempts: Giving up') 401 | 402 | #give autopilot control 403 | self.autopilot_land() 404 | 405 | 406 | #final descent 407 | elif(self.inside_landing_area() == -1): 408 | sc_logger.text(sc_logger.GENERAL, 'In final descent') 409 | 410 | #straight descent 411 | self.straight_descent() 412 | self.target_detected = False 413 | 414 | 415 | #outside cylinder 416 | else: 417 | sc_logger.text(sc_logger.GENERAL, 'Outside landing zone') 418 | 419 | #give autopilot control 420 | self.autopilot_land() 421 | 422 | self.target_detected = False 423 | self.initial_descent = True 424 | 425 | #the program is running but the autopilot is in an invalid mode 426 | else: 427 | sc_logger.text(sc_logger.GENERAL, 'Not in control of vehicle') 428 | 429 | 430 | #release_control - give the autopilot full control and leave it in a stable state 431 | def release_control(self): 432 | sc_logger.text(sc_logger.GENERAL, 'Releasing control') 433 | 434 | #put vehicle in stable state 435 | veh_control.set_velocity(0,0,0) 436 | #autopilot_land() 437 | 438 | #dont let us take control ever again 439 | self.pl_enabled = False 440 | 441 | ''' 442 | # if in GUIDED mode switch back to LOITER 443 | if self.vehicle.mode.name == "GUIDED": 444 | self.vehicle.mode = VehicleMode("LOITER") 445 | self.vehicle.flush() 446 | ''' 447 | 448 | 449 | #move_to_target - fly aircraft to landing pad 450 | def move_to_target(self,target_info,attitude,location): 451 | x,y = target_info[1] 452 | 453 | 454 | #shift origin to center of image 455 | x,y = shift_to_origin((x,y),self.camera_width,self.camera_height) 456 | 457 | #this is necessary because the simulator is 100% accurate 458 | if(self.simulator): 459 | hfov = 48.7 460 | vfov = 49.7 461 | else: 462 | hfov = self.camera_hfov 463 | vfov = self.camera_vfov 464 | 465 | 466 | #stabilize image with vehicle attitude 467 | x -= (self.camera_width / hfov) * math.degrees(attitude.roll) 468 | y += (self.camera_height / vfov) * math.degrees(attitude.pitch) 469 | 470 | 471 | #convert to distance 472 | X, Y = self.pixel_point_to_position_xy((x,y),location.alt) 473 | 474 | #convert to world coordinates 475 | target_heading = math.atan2(Y,X) % (2*math.pi) 476 | target_heading = (attitude.yaw - target_heading) 477 | target_distance = math.sqrt(X**2 + Y**2) 478 | 479 | 480 | sc_logger.text(sc_logger.GENERAL, "Distance to target: {0}".format(round(target_distance,2))) 481 | 482 | 483 | #calculate speed toward target 484 | speed = target_distance * self.dist_to_vel 485 | #apply max speed limit 486 | speed = min(speed,self.vel_speed_max) 487 | 488 | #calculate cartisian speed 489 | vx = speed * math.sin(target_heading) * -1.0 490 | vy = speed * math.cos(target_heading) 491 | 492 | #only descend when on top of target 493 | if(target_distance > self.descent_radius): 494 | vz = 0 495 | else: 496 | vz = self.descent_rate 497 | 498 | 499 | #send velocity commands toward target heading 500 | veh_control.set_velocity(vx,vy,vz) 501 | 502 | 503 | 504 | #autopilot_land - Let the autopilot execute its normal landing procedure 505 | def autopilot_land(self): 506 | #descend velocity 507 | veh_control.set_velocity(0,0,self.descent_rate) 508 | #veh_control.set_velocity(9999,9999,9999) 509 | 510 | 511 | #straight_descent - send the vehicle straight down 512 | def straight_descent(self): 513 | veh_control.set_velocity(0,0,self.descent_rate) 514 | 515 | 516 | #climb - climb to a certain alitude then stop. 517 | def climb(self): 518 | 519 | if(veh_control.get_location().alt < self.climb_altitude): 520 | sc_logger.text(sc_logger.GENERAL, 'climbing') 521 | veh_control.set_velocity(0,0,self.climb_rate) 522 | self.climbing = True 523 | else: 524 | sc_logger.text(sc_logger.GENERAL, 'Reached top of search zone') 525 | veh_control.set_velocity(0,0,0) 526 | self.climbing = False 527 | 528 | 529 | #inside_landing_area - determine is we are in a landing zone 0 = False, 1 = True, -1 = below the zone 530 | def inside_landing_area(self): 531 | 532 | vehPos = PositionVector.get_from_location(veh_control.get_location()) 533 | landPos = PositionVector.get_from_location(veh_control.get_landing()) 534 | ''' 535 | vehPos = PositionVector.get_from_location(Location(0,0,10)) 536 | landPos = PositionVector.get_from_location(Location(0,0,0)) 537 | ''' 538 | if(PositionVector.get_distance_xy(vehPos,landPos) < self.landing_area_radius): 539 | #below area 540 | if(vehPos.z < self.landing_area_min_alt): 541 | return -1 542 | #in area 543 | else: 544 | return 1 545 | #outside area 546 | else: 547 | return 0 548 | 549 | 550 | 551 | #get_frame - pull an image from camera or simulator 552 | def get_frame(self): 553 | if(self.simulator): 554 | return sim.get_frame() 555 | else: 556 | return sc_video.get_image() 557 | 558 | 559 | #pixel_point_to_position_xy - convert position in pixels to position in meters 560 | #pixel_position - distance in pixel from CENTER of image 561 | #distance- distance from the camera to the object in meters 562 | def pixel_point_to_position_xy(self,pixel_position,distance): 563 | thetaX = pixel_position[0] * self.camera_hfov / self.camera_width 564 | thetaY = pixel_position[1] * self.camera_vfov / self.camera_height 565 | x = distance * math.tan(math.radians(thetaX)) 566 | y = distance * math.tan(math.radians(thetaY)) 567 | 568 | return (x,y) 569 | 570 | 571 | 572 | 573 | 574 | # if starting from mavproxy 575 | if __name__ == "__builtin__": 576 | # start precision landing 577 | strat = PrecisionLand() 578 | 579 | # connect to droneapi 580 | sc_logger.text(sc_logger.GENERAL, 'Connecting to vehicle...') 581 | strat.connect() 582 | sc_logger.text(sc_logger.GENERAL, 'Vehicle connected!') 583 | 584 | # run strategy 585 | strat.run() 586 | 587 | 588 | 589 | 590 | 591 | 592 | 593 | 594 | --------------------------------------------------------------------------------