├── LICENSE ├── Companion_Computer ├── presets │ ├── d4xx-high-confidence.json │ ├── d4xx-default.json │ └── d4xx-high-accuracy.json └── d4xx_to_mavlink_3D.py ├── README.md └── Simulation └── air_sim_to_mavlink.py /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Rishabh Singh 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Companion_Computer/presets/d4xx-high-confidence.json: -------------------------------------------------------------------------------- 1 | { 2 | "aux-param-autoexposure-setpoint": "2000", 3 | "aux-param-colorcorrection1": "0.298828", 4 | "aux-param-colorcorrection10": "0", 5 | "aux-param-colorcorrection11": "0", 6 | "aux-param-colorcorrection12": "0", 7 | "aux-param-colorcorrection2": "0.293945", 8 | "aux-param-colorcorrection3": "0.293945", 9 | "aux-param-colorcorrection4": "0.114258", 10 | "aux-param-colorcorrection5": "0", 11 | "aux-param-colorcorrection6": "0", 12 | "aux-param-colorcorrection7": "0", 13 | "aux-param-colorcorrection8": "0", 14 | "aux-param-colorcorrection9": "0", 15 | "aux-param-depthclampmax": "65536", 16 | "aux-param-depthclampmin": "0", 17 | "aux-param-disparityshift": "0", 18 | "controls-autoexposure-auto": "True", 19 | "controls-autoexposure-manual": "8500", 20 | "controls-depth-gain": "16", 21 | "controls-laserpower": "0", 22 | "controls-laserstate": "on", 23 | "ignoreSAD": "0", 24 | "param-autoexposure-setpoint": "2000", 25 | "param-censusenablereg-udiameter": "9", 26 | "param-censusenablereg-vdiameter": "9", 27 | "param-censususize": "9", 28 | "param-censusvsize": "9", 29 | "param-depthclampmax": "65536", 30 | "param-depthclampmin": "0", 31 | "param-depthunits": "1000", 32 | "param-disableraucolor": "0", 33 | "param-disablesadcolor": "0", 34 | "param-disablesadnormalize": "0", 35 | "param-disablesloleftcolor": "0", 36 | "param-disableslorightcolor": "1", 37 | "param-disparitymode": "0", 38 | "param-disparityshift": "0", 39 | "param-lambdaad": "751", 40 | "param-lambdacensus": "6", 41 | "param-leftrightthreshold": "10", 42 | "param-maxscorethreshb": "1423", 43 | "param-medianthreshold": "625", 44 | "param-minscorethresha": "4", 45 | "param-neighborthresh": "108", 46 | "param-raumine": "6", 47 | "param-rauminn": "3", 48 | "param-rauminnssum": "7", 49 | "param-raumins": "2", 50 | "param-rauminw": "2", 51 | "param-rauminwesum": "12", 52 | "param-regioncolorthresholdb": "0.784736", 53 | "param-regioncolorthresholdg": "0.565558", 54 | "param-regioncolorthresholdr": "0.985323", 55 | "param-regionshrinku": "3", 56 | "param-regionshrinkv": "0", 57 | "param-robbinsmonrodecrement": "5", 58 | "param-robbinsmonroincrement": "5", 59 | "param-rsmdiffthreshold": "1.65625", 60 | "param-rsmrauslodiffthreshold": "0.71875", 61 | "param-rsmremovethreshold": "0.809524", 62 | "param-scanlineedgetaub": "13", 63 | "param-scanlineedgetaug": "15", 64 | "param-scanlineedgetaur": "30", 65 | "param-scanlinep1": "30", 66 | "param-scanlinep1onediscon": "76", 67 | "param-scanlinep1twodiscon": "86", 68 | "param-scanlinep2": "98", 69 | "param-scanlinep2onediscon": "105", 70 | "param-scanlinep2twodiscon": "33", 71 | "param-secondpeakdelta": "775", 72 | "param-texturecountthresh": "4", 73 | "param-texturedifferencethresh": "50", 74 | "param-usersm": "1", 75 | "param-zunits": "1000" 76 | } -------------------------------------------------------------------------------- /Companion_Computer/presets/d4xx-default.json: -------------------------------------------------------------------------------- 1 | { 2 | "aux-param-autoexposure-setpoint": "1536", 3 | "aux-param-colorcorrection1": "0.298828", 4 | "aux-param-colorcorrection10": "-0", 5 | "aux-param-colorcorrection11": "-0", 6 | "aux-param-colorcorrection12": "-0", 7 | "aux-param-colorcorrection2": "0.293945", 8 | "aux-param-colorcorrection3": "0.293945", 9 | "aux-param-colorcorrection4": "0.114258", 10 | "aux-param-colorcorrection5": "-0", 11 | "aux-param-colorcorrection6": "-0", 12 | "aux-param-colorcorrection7": "-0", 13 | "aux-param-colorcorrection8": "-0", 14 | "aux-param-colorcorrection9": "-0", 15 | "aux-param-depthclampmax": "65536", 16 | "aux-param-depthclampmin": "0", 17 | "aux-param-disparityshift": "0", 18 | "controls-autoexposure-auto": "True", 19 | "controls-autoexposure-manual": "8500", 20 | "controls-color-autoexposure-auto": "True", 21 | "controls-color-autoexposure-manual": "166", 22 | "controls-color-backlight-compensation": "0", 23 | "controls-color-brightness": "0", 24 | "controls-color-contrast": "50", 25 | "controls-color-gain": "64", 26 | "controls-color-gamma": "300", 27 | "controls-color-hue": "0", 28 | "controls-color-power-line-frequency": "3", 29 | "controls-color-saturation": "64", 30 | "controls-color-sharpness": "50", 31 | "controls-color-white-balance-auto": "True", 32 | "controls-color-white-balance-manual": "4600", 33 | "controls-depth-gain": "16", 34 | "controls-laserpower": "150", 35 | "controls-laserstate": "on", 36 | "ignoreSAD": "0", 37 | "param-amplitude-factor": "0", 38 | "param-autoexposure-setpoint": "1536", 39 | "param-censusenablereg-udiameter": "9", 40 | "param-censusenablereg-vdiameter": "9", 41 | "param-censususize": "9", 42 | "param-censusvsize": "9", 43 | "param-depthclampmax": "65536", 44 | "param-depthclampmin": "0", 45 | "param-depthunits": "1000", 46 | "param-disableraucolor": "0", 47 | "param-disablesadcolor": "0", 48 | "param-disablesadnormalize": "0", 49 | "param-disablesloleftcolor": "0", 50 | "param-disableslorightcolor": "0", 51 | "param-disparitymode": "0", 52 | "param-disparityshift": "0", 53 | "param-lambdaad": "800", 54 | "param-lambdacensus": "26", 55 | "param-leftrightthreshold": "24", 56 | "param-maxscorethreshb": "2047", 57 | "param-medianthreshold": "500", 58 | "param-minscorethresha": "1", 59 | "param-neighborthresh": "7", 60 | "param-raumine": "1", 61 | "param-rauminn": "1", 62 | "param-rauminnssum": "3", 63 | "param-raumins": "1", 64 | "param-rauminw": "1", 65 | "param-rauminwesum": "3", 66 | "param-regioncolorthresholdb": "0.0499022", 67 | "param-regioncolorthresholdg": "0.0499022", 68 | "param-regioncolorthresholdr": "0.0499022", 69 | "param-regionshrinku": "3", 70 | "param-regionshrinkv": "1", 71 | "param-robbinsmonrodecrement": "10", 72 | "param-robbinsmonroincrement": "10", 73 | "param-rsmdiffthreshold": "4", 74 | "param-rsmrauslodiffthreshold": "1", 75 | "param-rsmremovethreshold": "0.375", 76 | "param-scanlineedgetaub": "72", 77 | "param-scanlineedgetaug": "72", 78 | "param-scanlineedgetaur": "72", 79 | "param-scanlinep1": "60", 80 | "param-scanlinep1onediscon": "105", 81 | "param-scanlinep1twodiscon": "70", 82 | "param-scanlinep2": "342", 83 | "param-scanlinep2onediscon": "190", 84 | "param-scanlinep2twodiscon": "130", 85 | "param-secondpeakdelta": "325", 86 | "param-texturecountthresh": "0", 87 | "param-texturedifferencethresh": "0", 88 | "param-usersm": "1", 89 | "param-zunits": "1000", 90 | "stream-depth-format": "Z16", 91 | "stream-fps": "30", 92 | "stream-height": "480", 93 | "stream-width": "848" 94 | } 95 | -------------------------------------------------------------------------------- /Companion_Computer/presets/d4xx-high-accuracy.json: -------------------------------------------------------------------------------- 1 | { 2 | "aux-param-autoexposure-setpoint": "1536", 3 | "aux-param-colorcorrection1": "0.298828", 4 | "aux-param-colorcorrection10": "-0", 5 | "aux-param-colorcorrection11": "-0", 6 | "aux-param-colorcorrection12": "-0", 7 | "aux-param-colorcorrection2": "0.293945", 8 | "aux-param-colorcorrection3": "0.293945", 9 | "aux-param-colorcorrection4": "0.114258", 10 | "aux-param-colorcorrection5": "-0", 11 | "aux-param-colorcorrection6": "-0", 12 | "aux-param-colorcorrection7": "-0", 13 | "aux-param-colorcorrection8": "-0", 14 | "aux-param-colorcorrection9": "-0", 15 | "aux-param-depthclampmax": "65536", 16 | "aux-param-depthclampmin": "0", 17 | "aux-param-disparityshift": "0", 18 | "controls-autoexposure-auto": "True", 19 | "controls-autoexposure-manual": "8500", 20 | "controls-color-autoexposure-auto": "True", 21 | "controls-color-autoexposure-manual": "166", 22 | "controls-color-backlight-compensation": "0", 23 | "controls-color-brightness": "0", 24 | "controls-color-contrast": "50", 25 | "controls-color-gain": "64", 26 | "controls-color-gamma": "300", 27 | "controls-color-hue": "0", 28 | "controls-color-power-line-frequency": "3", 29 | "controls-color-saturation": "64", 30 | "controls-color-sharpness": "50", 31 | "controls-color-white-balance-auto": "True", 32 | "controls-color-white-balance-manual": "4600", 33 | "controls-depth-gain": "16", 34 | "controls-laserpower": "150", 35 | "controls-laserstate": "on", 36 | "ignoreSAD": "0", 37 | "param-amplitude-factor": "0", 38 | "param-autoexposure-setpoint": "1536", 39 | "param-censusenablereg-udiameter": "9", 40 | "param-censusenablereg-vdiameter": "9", 41 | "param-censususize": "9", 42 | "param-censusvsize": "9", 43 | "param-depthclampmax": "65536", 44 | "param-depthclampmin": "0", 45 | "param-depthunits": "1000", 46 | "param-disableraucolor": "0", 47 | "param-disablesadcolor": "0", 48 | "param-disablesadnormalize": "0", 49 | "param-disablesloleftcolor": "0", 50 | "param-disableslorightcolor": "1", 51 | "param-disparitymode": "0", 52 | "param-disparityshift": "0", 53 | "param-lambdaad": "751", 54 | "param-lambdacensus": "6", 55 | "param-leftrightthreshold": "10", 56 | "param-maxscorethreshb": "2893", 57 | "param-medianthreshold": "796", 58 | "param-minscorethresha": "4", 59 | "param-neighborthresh": "108", 60 | "param-raumine": "6", 61 | "param-rauminn": "3", 62 | "param-rauminnssum": "7", 63 | "param-raumins": "2", 64 | "param-rauminw": "2", 65 | "param-rauminwesum": "12", 66 | "param-regioncolorthresholdb": "0.785714", 67 | "param-regioncolorthresholdg": "0.565558", 68 | "param-regioncolorthresholdr": "0.985323", 69 | "param-regionshrinku": "3", 70 | "param-regionshrinkv": "0", 71 | "param-robbinsmonrodecrement": "25", 72 | "param-robbinsmonroincrement": "2", 73 | "param-rsmdiffthreshold": "1.65625", 74 | "param-rsmrauslodiffthreshold": "0.71875", 75 | "param-rsmremovethreshold": "0.809524", 76 | "param-scanlineedgetaub": "13", 77 | "param-scanlineedgetaug": "15", 78 | "param-scanlineedgetaur": "30", 79 | "param-scanlinep1": "155", 80 | "param-scanlinep1onediscon": "160", 81 | "param-scanlinep1twodiscon": "59", 82 | "param-scanlinep2": "190", 83 | "param-scanlinep2onediscon": "507", 84 | "param-scanlinep2twodiscon": "493", 85 | "param-secondpeakdelta": "647", 86 | "param-texturecountthresh": "0", 87 | "param-texturedifferencethresh": "1722", 88 | "param-usersm": "1", 89 | "param-zunits": "1000", 90 | "stream-depth-format": "Z16", 91 | "stream-fps": "30", 92 | "stream-height": "480", 93 | "stream-width": "848" 94 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vision-Obstacle-Avoidance 2 | 3 | This repository provides scripts to be used by onboard computers with Intel RealSense Camera's to provide 3-D obstacle avoidance features with ArduPilot. It also provides a easy to use script that supports grabbing depth camera data from Microsoft AirSim and sending Mavlink messages to ArduPilot SITL for simulation. 4 | 5 | ## Objective: 6 | 7 | The depth array as retrieved from the camera is divided into a 3x3 grid. The smallest depth in each grid is stored and converted into a body-frame NEU XYZ vector. This vector is packed into a mavlink message OBSTACLE_DISTANCE_3D (Supported locally by ArduPilot master ONLY) and sent to Flight Controller. 8 | 9 | Currently the Avoidance system in ArduPilot is completely 2-D. This is being upgraded to 3D in the following PR: https://github.com/ArduPilot/ardupilot/pull/15553 10 | 11 | ![D455](https://i.ibb.co/XCSj9zZ/d455.png) 12 | 13 | 14 | Demonstration of flight test with this Script and Intel RealSense D455: https://youtu.be/-6PWB52J3ho 15 | 16 | Demonstration of AirSim: 17 | https://youtu.be/_G5LD6bPeJs 18 | 19 | 20 | ## Pre-Requirements for the entire project: 21 | 22 | 1. Clone the branch of the above mentioned PR. 23 | 2. Update pymavlink from ArduPilot repo. 24 | 3. ArduPilot Parameters (reboot after setting): 25 | - PRX_TYPE = 2 (proximity lib will look for mavlink messages) 26 | - AVOID_ENABLE = 3 (enable fence + proximity avoidance) 27 | 28 | 29 | ## Companion Computer: 30 | 31 | ### Requirements: 32 | 33 | - x86 based Companion Computer (for compatibility with Intel), 34 | Ubuntu 18.04 (otherwise, the following installation instruction might not work), 35 | - Python3 (default with Ubuntu 18.04) 36 | 37 | ### Install required packages: 38 | 39 | - pip3 install pyrealsense2 40 | - pip3 install numpy 41 | - pip3 install pymavlink 42 | - pip3 install apscheduler 43 | - pip3 install pyserial 44 | - pip3 install numba # Only necessary if you want to optimize the performance. - Require pip3 version >= 19 and llvmlite: pip3 install llvmlite==0.34.0 45 | - pip3 install opencv-python 46 | - sudo apt -y install python3-gst-1.0 gir1.2-gst-rtsp-server-1.0 gstreamer1 0-plugins-base gstreamer1.0-plugins-ugly libx264-dev 47 | 48 | - sudo apt install python3-opencv 49 | 50 | 51 | ### Steps to Launch: 52 | 53 | 1. Copy Companion_Computer directory onto the Companion Computer. Connect the camera. 54 | 3. Connect the computer to a flight controller running ArduPilot with Master + the above mentioned PR. 55 | 2. run script: d4xx_to_mavlink_3D.py --connect "Vehicle connection target string" 56 | 57 | ### Available arguments: 58 | 59 | - --connect: port/IP to connect to 60 | - --baudrate: Set baudrate of connection 61 | - --obstacle_distance_msg_hz: Rate of sending mavlink message, defaults to 15hz 62 | - --debug_enable: Displays RGB and depth view along with depth inside each grid (only enable if valid monitor is connected) 63 | - --camera_name: Camera name to be connected to. If not specified, any valid camera will be connected to randomly. For eg: type 'D435I' to look for Intel RealSense D435I. 64 | 65 | Once the camera is connected, check with Mission Planner if Obstacles are being read. 66 | 67 | 68 | ## AirSim: 69 | 70 | The AirSim Script shares some of the code from the d4xx_to_mavlink script. The idea remains the same. Depth image is grabbed from AirSim and divided into a 3x3 grid. 71 | This is a good way to experiment with depth cameras before moving onto real vehicle. 72 | 73 | ### Requirements: 74 | 75 | 1. Setup SITL: https://ardupilot.org/dev/docs/SITL-setup-landingpage.html 76 | 2. Setup SITL to work with AirSim: https://ardupilot.org/dev/docs/sitl-with-airsim.html 77 | 78 | ### Install required packages: 79 | 80 | - pip3 install pymavlink 81 | - pip3 install apscheduler 82 | - pip3 install airsim 83 | - pip3 install numpy 84 | - sudo apt-get install python-PIL 85 | - sudo apt install python3-opencv 86 | 87 | Add the following section to settings.json file that can be found in Documents/AirSim 88 | 89 | ``` 90 | "CameraDefaults": { 91 | "CaptureSettings": [ 92 | { 93 | "ImageType": 1, 94 | "Width": 640, 95 | "Height": 480, 96 | "FOV_Degrees": 90, 97 | "AutoExposureSpeed": 100, 98 | "MotionBlurAmount": 0 99 | } 100 | ] 101 | }, 102 | ``` 103 | 104 | ### Steps to Launch: 105 | 106 | 1. Launch SITL: sim_vehicle.py -v ArduCopter -f airsim-copter --console --map 107 | 2. Launch AirSim: ./Blocks.sh -ResX=640 -ResY=480 -windowed 108 | 3. Launch Script: python3 air_sim_to_mavlink.py 109 | 110 | ### Available arguments: 111 | 112 | - --connect: port/IP to connect to 113 | - --baudrate: Set baudrate of connection 114 | - --obstacle_distance_msg_hz: Rate of sending mavlink message, defaults to 15hz -------------------------------------------------------------------------------- /Simulation/air_sim_to_mavlink.py: -------------------------------------------------------------------------------- 1 | # Install required packages: 2 | # 3 | # pip3 install pymavlink 4 | # pip3 install apscheduler 5 | # pip3 install opencv-python 6 | # pip3 install airsim 7 | # pip3 install numpy 8 | # sudo apt-get install python-PIL 9 | 10 | import math 11 | import sys 12 | import time 13 | import airsim 14 | from cv2 import cv2 15 | import numpy as np 16 | from PIL import Image 17 | import threading 18 | from time import sleep 19 | from apscheduler.schedulers.background import BackgroundScheduler 20 | from pymavlink import mavutil 21 | import argparse 22 | 23 | sys.path.append("/usr/local/lib/") 24 | 25 | # Set MAVLink protocol to 2. 26 | import os 27 | 28 | os.environ["MAVLINK20"] = "1" 29 | 30 | ###################################################### 31 | ## Parsing user' inputs ## 32 | ###################################################### 33 | 34 | parser = argparse.ArgumentParser(description='Reboots vehicle') 35 | parser.add_argument('--connect', 36 | help="Vehicle connection target string. If not specified, a default string will be used.") 37 | parser.add_argument('--baudrate', type=float, 38 | help="Vehicle connection baudrate. If not specified, a default value will be used.") 39 | parser.add_argument('--obstacle_distance_msg_hz', type=float, 40 | help="Update frequency for OBSTACLE_DISTANCE message. If not specified, a default value will be used.") 41 | 42 | args = parser.parse_args() 43 | 44 | # Default configurations for connection to the FCU 45 | if not args.connect: 46 | connection_string = 'localhost:14551' 47 | else: 48 | connection_string = args.connect 49 | 50 | if not args.baudrate: 51 | connection_baudrate = 921600 52 | else: 53 | connection_baudrate = args.baudrate 54 | 55 | if not args.obstacle_distance_msg_hz: 56 | obstacle_distance_msg_hz = 15 57 | else: 58 | obstacle_distance_msg_hz = args.obstacle_distance_msg_hz 59 | 60 | # AirSim API 61 | client = airsim.MultirotorClient() 62 | client.confirmConnection() 63 | client.enableApiControl(True) 64 | 65 | DEPTH_RANGE_M = [0.3, 12] # depth range, to be changed as per requirements 66 | MAX_DEPTH = 9999 # arbitrary large number 67 | 68 | #numpy array to share obstacle coordinates between main thread and mavlink thread 69 | mavlink_obstacle_coordinates = np.ones((9,3), dtype = np.float) * (MAX_DEPTH) 70 | 71 | # get time in correct format 72 | start_time = int(round(time.time() * 1000)) 73 | current_milli_time = lambda: int(round(time.time() * 1000) - start_time) 74 | 75 | # get depth from airsim backend 76 | def get_depth(client): 77 | requests = [] 78 | requests.append(airsim.ImageRequest( 79 | 'front_center', airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False)) 80 | 81 | responses = client.simGetImages(requests) 82 | depth = airsim.list_to_2d_float_array( 83 | responses[0].image_data_float, responses[0].width, responses[0].height) 84 | depth = np.expand_dims(depth, axis=2) 85 | depth = depth.squeeze() 86 | return depth, responses[0].width, responses[0].height 87 | 88 | # this method converts, (x,y) from depth matrix to NEU 3-D vector in body frame 89 | def convert_depth_3D_vec(x_depth, y_depth, depth, fov): 90 | # https://stackoverflow.com/questions/62046666/find-3d-coordinate-with-respect-to-the-camera-using-2d-image-coordinates 91 | h, w = depth.shape 92 | center_x = w // 2 93 | center_y = h // 2 94 | focal_len = w / (2 * np.tan(fov / 2)) 95 | x = depth[y_depth, x_depth] 96 | y = (x_depth - center_x) * x / focal_len 97 | z = -1 * (y_depth - center_y) * x / focal_len 98 | return x,y,z 99 | 100 | # divide the depth data into a 3x3 grid. Pick out the smallest distance in each grid 101 | # store the x,y of the depth matrix, the 9 depths, and convert them into body-frame x,y,z 102 | def distances_from_depth_image(depth_mat, min_depth_m, max_depth_m, depth, depth_coordinates, obstacle_coordinates, valid_depth): 103 | # Parameters for depth image 104 | depth_img_width = depth_mat.shape[1] 105 | depth_img_height = depth_mat.shape[0] 106 | 107 | # Parameters for obstacle distance message 108 | step_x = depth_img_width / 20 109 | step_y = depth_img_height/ 20 110 | 111 | 112 | sampling_width = int(1/3 * depth_img_width) 113 | sampling_height = int(1/3* depth_img_height) 114 | # divide the frame into 3x3 grid to find the minimum depth value in "9 boxes" 115 | for i in range(9): 116 | if i%3 == 0 and i != 0: 117 | sampling_width = int(1/3* depth_img_width) 118 | sampling_height = sampling_height + int(1/3 * depth_img_height) 119 | 120 | x,y = 0,0 121 | x = sampling_width - int(1/3 * depth_img_width) 122 | 123 | while x < sampling_width: 124 | x = x + step_x 125 | y = sampling_height - int(1/3* depth_img_height) 126 | while y < sampling_height: 127 | y = y + step_y 128 | # make sure coordinates stay within matrix limits 129 | x_pixel = 0 if x < 0 else depth_img_width-1 if x > depth_img_width -1 else int(x) 130 | y_pixel = 0 if y < 0 else depth_img_height-1 if y > depth_img_height -1 else int(y) 131 | 132 | #convert depth to body-frame x,y,z 133 | x_obj,y_obj,z_obj = convert_depth_3D_vec(x_pixel, y_pixel, depth_mat, math.radians(90)) 134 | 135 | # actual euclidean distance to obstacle 136 | point_depth = (x_obj*x_obj + y_obj*y_obj + z_obj*z_obj)**0.5 137 | 138 | # if within valid range, mark this depth as valid and store all the info 139 | if point_depth <= depth[i] and point_depth > min_depth_m and point_depth < max_depth_m: 140 | depth[i] = point_depth 141 | depth_coordinates[i] = [x_pixel,y_pixel] 142 | obstacle_coordinates[i] = [x_obj, y_obj, z_obj] 143 | valid_depth[i] = True 144 | 145 | sampling_width = sampling_width + int(1/3* depth_img_width) 146 | 147 | # display depth image from AirSim. The data received from AirSim needs to be proceeded for a good view 148 | # also divides the view into 3x3 grid and prints smallest depth value in each grid 149 | # puts a circle on the smallest found depth value 150 | def getScreenDepthVis(client, depth_coordinates, depth_list): 151 | #get image from airsim 152 | responses = client.simGetImages([airsim.ImageRequest(0, airsim.ImageType.DepthPlanner, True, False)]) 153 | # condition the data 154 | img1d = np.array(responses[0].image_data_float, dtype=np.float) 155 | img1d = 255/np.maximum(np.ones(img1d.size), img1d) 156 | img2d = np.reshape(img1d, (responses[0].height, responses[0].width)) 157 | image = np.invert(np.array(Image.fromarray(img2d.astype(np.uint8), mode='L'))) 158 | 159 | factor = 10 160 | maxIntensity = 255.0 # depends on dtype of image data 161 | 162 | # Decrease intensity such that dark pixels become much darker, bright pixels become slightly dark 163 | newImage1 = (maxIntensity)*(image/maxIntensity)**factor 164 | newImage1 = newImage1.astype(np.uint8) 165 | 166 | color_img = cv2.applyColorMap(newImage1, cv2.COLORMAP_JET) 167 | # divide view into 3x3 matrix 168 | pxstep = int(newImage1.shape[1]/3) 169 | pystep = int(newImage1.shape[0]/3) 170 | gx = pxstep 171 | gy = pystep 172 | while gx < newImage1.shape[1]: 173 | cv2.line(color_img, (gx, 0), (gx, newImage1.shape[0]), color=(0, 0, 0), thickness=1) 174 | gx += pxstep 175 | while gy < newImage1.shape[0]: 176 | cv2.line(color_img, (0, gy), (newImage1.shape[1], gy), color=(0, 0, 0),thickness=1) 177 | gy += pystep 178 | 179 | # print circle, and depth values on the screen 180 | for i in range(len(depth_list)): 181 | if depth_list[i] <= DEPTH_RANGE_M[1]: 182 | color_img = cv2.circle(color_img, (int(depth_coordinates[i][0]),int(depth_coordinates[i][1])), 5, (0, 0, 0), 5) 183 | color_img = cv2.putText(color_img, str(round(depth_list[i],2)), (int(pxstep*(1/4 + i%3)),int(pystep*(1/3 + math.floor(i/3)))), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255), 2) 184 | 185 | cv2.imshow("Depth Vis", color_img) 186 | cv2.waitKey(1) 187 | 188 | 189 | def mavlink_loop(conn, callbacks): 190 | '''a main routine for a thread; reads data from a mavlink connection, 191 | calling callbacks based on message type received. 192 | ''' 193 | interesting_messages = list(callbacks.keys()) 194 | while True: 195 | # send a heartbeat msg 196 | conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER, 197 | mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 198 | 0, 199 | 0, 200 | 0) 201 | m = conn.recv_match(type=interesting_messages, timeout=1, blocking=True) 202 | if m is None: 203 | continue 204 | callbacks[m.get_type()](m) 205 | 206 | # send mavlink message to SITL 207 | def send_obstacle_distance_3D_message(): 208 | global conn 209 | time = current_milli_time() 210 | # print(mavlink_obstacle_coordinates) 211 | for i in range(9): 212 | conn.mav.obstacle_distance_3d_send( 213 | time, # us Timestamp (UNIX time or time since system boot) 214 | 0, # not implemented in ArduPilot 215 | 0, # not implemented in ArduPilot 216 | 65535, # unknown ID of the object. We are not really detecting the type of obstacle 217 | float(mavlink_obstacle_coordinates[i][0]), # X in NEU body frame 218 | float(mavlink_obstacle_coordinates[i][1]), # Y in NEU body frame 219 | float(mavlink_obstacle_coordinates[i][2]), # Z in NEU body frame 220 | float(DEPTH_RANGE_M[0]), # min range of sensor 221 | float(DEPTH_RANGE_M[1]) # max range of sensor 222 | ) 223 | 224 | conn = mavutil.mavlink_connection( 225 | device = str(connection_string), 226 | autoreconnect = True, 227 | source_system = 1, 228 | source_component = 93, 229 | baud=connection_baudrate, 230 | force_connected=True, 231 | ) 232 | 233 | mavlink_callbacks = { 234 | } 235 | mavlink_thread = threading.Thread(target=mavlink_loop, args=(conn, mavlink_callbacks)) 236 | mavlink_thread.start() 237 | 238 | # Send MAVlink messages in the background at pre-determined frequencies 239 | sched = BackgroundScheduler() 240 | sched.add_job(send_obstacle_distance_3D_message, 'interval', seconds = 1/obstacle_distance_msg_hz) 241 | sched.start() 242 | 243 | # main loop 244 | while True: 245 | #depth image from airsim 246 | depth_mat,width,height = get_depth(client) 247 | # Will be populated with smallest depth in each grid 248 | depth_list = np.ones((9,), dtype=np.float) * (DEPTH_RANGE_M[1] + 1) 249 | # Valid depth in each grid will be marked True 250 | valid_depth = np.ones((9,), dtype=np.bool) * False 251 | # Matrix Coordinated of the smallest depth in each grid 252 | depth_coordinates = np.ones((9,2), dtype=np.uint16) * (MAX_DEPTH) 253 | # Body frame NEU XYZ coordinates of obstacles to be sent to vehicle 254 | obstacle_coordinates = np.ones((9,3),dtype=np.float) * (MAX_DEPTH) 255 | 256 | #get the obstacles 257 | distances_from_depth_image(depth_mat, DEPTH_RANGE_M[0], DEPTH_RANGE_M[1], depth_list, depth_coordinates, obstacle_coordinates, valid_depth) 258 | # if valid, populate mavlink array 259 | for i in range(9): 260 | if valid_depth[i]: 261 | mavlink_obstacle_coordinates[i] = obstacle_coordinates[i] 262 | else: 263 | mavlink_obstacle_coordinates[i] = MAX_DEPTH 264 | 265 | # visualize the data 266 | getScreenDepthVis(client, depth_coordinates, depth_list) 267 | -------------------------------------------------------------------------------- /Companion_Computer/d4xx_to_mavlink_3D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ###################################################### 4 | ## librealsense D4xx to MAVLink ## 5 | ###################################################### 6 | # Requirements: 7 | # x86 based Companion Computer (for compatibility with Intel), 8 | # Ubuntu 18.04 (otherwise, the following installation instruction might not work), 9 | # Python3 (default with Ubuntu 18.04) 10 | # Install required packages: 11 | # pip3 install pyrealsense2 12 | # pip3 install numpy 13 | # pip3 install pymavlink 14 | # pip3 install apscheduler 15 | # pip3 install pyserial 16 | # pip3 install numba # Only necessary if you want to optimize the performance. Require pip3 version >= 19 and llvmlite: pip3 install llvmlite==0.34.0 17 | # pip3 install opencv-python 18 | # sudo apt -y install python3-gst-1.0 gir1.2-gst-rtsp-server-1.0 gstreamer1.0-plugins-base gstreamer1.0-plugins-ugly libx264-dev 19 | # Only necessary if you installed the minimal version of Ubuntu: 20 | # sudo apt install python3-opencv 21 | 22 | # Set the path for pyrealsense2.[].so 23 | # Otherwise, place the pyrealsense2.[].so file under the same directory as this script or modify PYTHONPATH 24 | import sys 25 | sys.path.append("/usr/local/lib/") 26 | 27 | # Set MAVLink protocol to 2. 28 | import os 29 | os.environ["MAVLINK20"] = "1" 30 | 31 | # Import the libraries 32 | import pyrealsense2 as rs 33 | import numpy as np 34 | import math as m 35 | import signal 36 | import sys 37 | import time 38 | import argparse 39 | import threading 40 | import json 41 | from time import sleep 42 | from apscheduler.schedulers.background import BackgroundScheduler 43 | from pymavlink import mavutil 44 | from numba import njit 45 | 46 | # In order to import cv2 under python3 when you also have ROS Kinetic installed 47 | if os.path.exists("/opt/ros/kinetic/lib/python2.7/dist-packages"): 48 | sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') 49 | if os.path.exists("~/anaconda3/lib/python3.7/site-packages"): 50 | sys.path.append('~/anaconda3/lib/python3.7/site-packages') 51 | import cv2 52 | 53 | # To setup video streaming 54 | import gi 55 | gi.require_version('Gst', '1.0') 56 | gi.require_version('GstRtspServer', '1.0') 57 | from gi.repository import Gst, GstRtspServer, GLib 58 | 59 | # To obtain ip address 60 | import socket 61 | 62 | ###################################################### 63 | ## Depth parameters - reconfigurable ## 64 | ###################################################### 65 | 66 | # Sensor-specific parameter, for D435: https://www.intelrealsense.com/depth-camera-d435/ 67 | STREAM_TYPE = [rs.stream.depth, rs.stream.color] # rs2_stream is a types of data provided by RealSense device 68 | FORMAT = [rs.format.z16, rs.format.bgr8] # rs2_format is identifies how binary data is encoded within a frame 69 | DEPTH_WIDTH = 640 # Defines the number of columns for each frame or zero for auto resolve 70 | DEPTH_HEIGHT = 480 # Defines the number of lines for each frame or zero for auto resolve 71 | COLOR_WIDTH = 640 72 | COLOR_HEIGHT = 480 73 | FPS = 30 74 | DEPTH_RANGE_M = [0.1, 10] # Replace with your sensor's specifics, in meter 75 | 76 | obstacle_line_height_ratio = 0.18 # [0-1]: 0-Top, 1-Bottom. The height of the horizontal line to find distance to obstacle. 77 | obstacle_line_thickness_pixel = 10 # [1-DEPTH_HEIGHT]: Number of pixel rows to use to generate the obstacle distance message. For each column, the scan will return the minimum value for those pixels centered vertically in the image. 78 | 79 | USE_PRESET_FILE = True 80 | PRESET_FILE = "presets/d4xx-high-accuracy.json" 81 | 82 | RTSP_STREAMING_ENABLE = False 83 | RTSP_PORT = "8554" 84 | RTSP_MOUNT_POINT = "/d4xx" 85 | 86 | # List of filters to be applied, in this order. 87 | # https://github.com/IntelRealSense/librealsense/blob/master/doc/post-processing-filters.md 88 | 89 | filters = [ 90 | [True, "Decimation Filter", rs.decimation_filter(2)], 91 | [True, "Threshold Filter", rs.threshold_filter()], 92 | [True, "Depth to Disparity", rs.disparity_transform(True)], 93 | [True, "Spatial Filter", rs.spatial_filter()], 94 | [True, "Temporal Filter", rs.temporal_filter()], 95 | [False, "Hole Filling Filter", rs.hole_filling_filter()], 96 | [True, "Disparity to Depth", rs.disparity_transform(False)] 97 | ] 98 | 99 | # 100 | # The filters can be tuned with opencv_depth_filtering.py script, and save the default values to here 101 | # Individual filters have different options so one have to apply the values accordingly 102 | # 103 | 104 | # decimation_magnitude = 8 105 | # filters[0][2].set_option(rs.option.filter_magnitude, decimation_magnitude) 106 | 107 | threshold_min_m = DEPTH_RANGE_M[0] 108 | threshold_max_m = DEPTH_RANGE_M[1] 109 | if filters[1][0] is True: 110 | filters[1][2].set_option(rs.option.min_distance, threshold_min_m) 111 | filters[1][2].set_option(rs.option.max_distance, threshold_max_m) 112 | 113 | ###################################################### 114 | ## ArduPilot-related parameters - reconfigurable ## 115 | ###################################################### 116 | 117 | # Default configurations for connection to the FCU 118 | connection_string_default = 'localhost:14551' 119 | connection_baudrate_default = 921600 120 | 121 | # Use this to rotate all processed data 122 | camera_facing_angle_degree = 0 123 | 124 | # Store device serial numbers of connected camera 125 | device_id = None 126 | 127 | # Enable/disable each message/function individually 128 | enable_msg_obstacle_distance = True 129 | enable_msg_distance_sensor = False 130 | obstacle_distance_msg_hz_default = 5.0 131 | default_large_dist = 9999 132 | 133 | # lock for thread synchronization 134 | lock = threading.Lock() 135 | 136 | # index for lower part of the depth frame in a 3x3 grid 137 | lower_frame = [6,7,8] 138 | 139 | mavlink_thread_should_exit = False 140 | 141 | debug_enable_default = 0 142 | 143 | # default exit code is failure - a graceful termination with a 144 | # terminate signal is possible. 145 | exit_code = 1 146 | 147 | ###################################################### 148 | ## Global variables ## 149 | ###################################################### 150 | 151 | # FCU connection variables 152 | vehicle_pitch_rad = None 153 | 154 | # Camera-related variables 155 | pipe = None 156 | depth_scale = 0 157 | colorizer = rs.colorizer() 158 | depth_hfov_deg = None 159 | depth_vfov_deg = None 160 | depth_intrinsics = None 161 | 162 | # The name of the display window 163 | display_name = 'Input/output depth' 164 | rtsp_streaming_img = None 165 | 166 | # Data variables 167 | data = None 168 | current_time_us = 0 169 | 170 | start_time = int(round(time.time() * 1000)) 171 | current_milli_time = lambda: int(round(time.time() * 1000) - start_time) 172 | 173 | current_time_ms = current_milli_time() 174 | last_obstacle_distance_sent_ms = 0 # value of current_time_us when obstacle_distance last sent 175 | 176 | # Obstacle distances in front of the sensor, starting from the left in increment degrees to the right 177 | # See here: https://mavlink.io/en/messages/common.html#OBSTACLE_DISTANCE 178 | min_depth_cm = int(DEPTH_RANGE_M[0] * 100) # In cm 179 | max_depth_cm = int(DEPTH_RANGE_M[1] * 100) # In cm, should be a little conservative 180 | distances_array_length = 72 181 | angle_offset = None 182 | increment_f = None 183 | distances = np.ones((distances_array_length,), dtype=np.uint16) * (max_depth_cm + 1) 184 | 185 | mavlink_obstacle_coordinates = np.ones((9,3), dtype = np.float) * (9999) 186 | ###################################################### 187 | ## Parsing user' inputs ## 188 | ###################################################### 189 | 190 | parser = argparse.ArgumentParser(description='Reboots vehicle') 191 | parser.add_argument('--connect', 192 | help="Vehicle connection target string. If not specified, a default string will be used.") 193 | parser.add_argument('--baudrate', type=float, 194 | help="Vehicle connection baudrate. If not specified, a default value will be used.") 195 | parser.add_argument('--obstacle_distance_msg_hz', type=float, 196 | help="Update frequency for OBSTACLE_DISTANCE message. If not specified, a default value will be used.") 197 | parser.add_argument('--debug_enable',type=float, 198 | help="Enable debugging information") 199 | parser.add_argument('--camera_name', type=str, 200 | help="Camera name to be connected to. If not specified, any valid camera will be connected to randomly. For eg: type 'D435I' to look for Intel RealSense D435I.") 201 | args = parser.parse_args() 202 | 203 | connection_string = args.connect 204 | connection_baudrate = args.baudrate 205 | obstacle_distance_msg_hz = args.obstacle_distance_msg_hz 206 | debug_enable = args.debug_enable 207 | camera_name = args.camera_name 208 | 209 | def progress(string): 210 | print(string, file=sys.stdout) 211 | sys.stdout.flush() 212 | 213 | # Using default values if no specified inputs 214 | if not connection_string: 215 | connection_string = connection_string_default 216 | progress("INFO: Using default connection_string %s" % connection_string) 217 | else: 218 | progress("INFO: Using connection_string %s" % connection_string) 219 | 220 | if not connection_baudrate: 221 | connection_baudrate = connection_baudrate_default 222 | progress("INFO: Using default connection_baudrate %s" % connection_baudrate) 223 | else: 224 | progress("INFO: Using connection_baudrate %s" % connection_baudrate) 225 | 226 | if not obstacle_distance_msg_hz: 227 | obstacle_distance_msg_hz = obstacle_distance_msg_hz_default 228 | progress("INFO: Using default obstacle_distance_msg_hz %s" % obstacle_distance_msg_hz) 229 | else: 230 | progress("INFO: Using obstacle_distance_msg_hz %s" % obstacle_distance_msg_hz) 231 | 232 | # The list of filters to be applied on the depth image 233 | for i in range(len(filters)): 234 | if filters[i][0] is True: 235 | progress("INFO: Applying: %s" % filters[i][1]) 236 | else: 237 | progress("INFO: NOT applying: %s" % filters[i][1]) 238 | 239 | if not debug_enable: 240 | debug_enable = debug_enable_default 241 | 242 | if debug_enable == 1: 243 | progress("INFO: Debugging option enabled") 244 | cv2.namedWindow(display_name, cv2.WINDOW_AUTOSIZE) 245 | else: 246 | progress("INFO: Debugging option DISABLED") 247 | 248 | ###################################################### 249 | ## Functions - MAVLink ## 250 | ###################################################### 251 | 252 | def mavlink_loop(conn, callbacks): 253 | '''a main routine for a thread; reads data from a mavlink connection, 254 | calling callbacks based on message type received. 255 | ''' 256 | interesting_messages = list(callbacks.keys()) 257 | while not mavlink_thread_should_exit: 258 | # send a heartbeat msg 259 | conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER, 260 | mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 261 | 0, 262 | 0, 263 | 0) 264 | m = conn.recv_match(type=interesting_messages, timeout=1, blocking=True) 265 | if m is None: 266 | continue 267 | callbacks[m.get_type()](m) 268 | 269 | def send_obstacle_distance_3D_message(): 270 | global current_time_ms, mavlink_obstacle_coordinates 271 | global last_obstacle_distance_sent_ms 272 | 273 | if current_time_ms == last_obstacle_distance_sent_ms: 274 | # no new frame 275 | return 276 | last_obstacle_distance_sent_ms = current_time_ms 277 | for i in range(9): 278 | conn.mav.obstacle_distance_3d_send( 279 | current_time_ms, # us Timestamp (UNIX time or time since system boot) 280 | 0, 281 | 0, 282 | 65535, 283 | float(mavlink_obstacle_coordinates[i][0]), 284 | float(mavlink_obstacle_coordinates[i][1]), 285 | float(mavlink_obstacle_coordinates[i][2]), 286 | float(DEPTH_RANGE_M[0]), 287 | float(DEPTH_RANGE_M[1]) 288 | ) 289 | 290 | def send_msg_to_gcs(text_to_be_sent): 291 | # MAV_SEVERITY: 0=EMERGENCY 1=ALERT 2=CRITICAL 3=ERROR, 4=WARNING, 5=NOTICE, 6=INFO, 7=DEBUG, 8=ENUM_END 292 | text_msg = 'D4xx: ' + text_to_be_sent 293 | conn.mav.statustext_send(mavutil.mavlink.MAV_SEVERITY_INFO, text_msg.encode()) 294 | progress("INFO: %s" % text_to_be_sent) 295 | 296 | # Request a timesync update from the flight controller, for future work. 297 | # TODO: Inspect the usage of timesync_update 298 | def update_timesync(ts=0, tc=0): 299 | if ts == 0: 300 | ts = int(round(time.time() * 1000)) 301 | conn.mav.timesync_send(tc, ts) 302 | 303 | 304 | ###################################################### 305 | ## Functions - D4xx cameras ## 306 | ###################################################### 307 | 308 | DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07","0B3A", "0B5C"] 309 | 310 | def find_device_that_supports_advanced_mode() : 311 | global device_id 312 | ctx = rs.context() 313 | ds5_dev = rs.device() 314 | devices = ctx.query_devices() 315 | for dev in devices: 316 | if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids: 317 | name = rs.camera_info.name 318 | if dev.supports(name): 319 | if not camera_name or (camera_name.lower() == dev.get_info(name).split()[2].lower()): 320 | progress("INFO: Found device that supports advanced mode: %s" % dev.get_info(name)) 321 | device_id = dev.get_info(rs.camera_info.serial_number) 322 | return dev 323 | raise Exception("No device that supports advanced mode was found") 324 | 325 | # Loop until we successfully enable advanced mode 326 | def realsense_enable_advanced_mode(advnc_mode): 327 | while not advnc_mode.is_enabled(): 328 | progress("INFO: Trying to enable advanced mode...") 329 | advnc_mode.toggle_advanced_mode(True) 330 | # At this point the device will disconnect and re-connect. 331 | progress("INFO: Sleeping for 5 seconds...") 332 | time.sleep(5) 333 | # The 'dev' object will become invalid and we need to initialize it again 334 | dev = find_device_that_supports_advanced_mode() 335 | advnc_mode = rs.rs400_advanced_mode(dev) 336 | progress("INFO: Advanced mode is %s" "enabled" if advnc_mode.is_enabled() else "disabled") 337 | 338 | # Load the settings stored in the JSON file 339 | def realsense_load_settings_file(advnc_mode, setting_file): 340 | # Sanity checks 341 | if os.path.isfile(setting_file): 342 | progress("INFO: Setting file found %s" % setting_file) 343 | else: 344 | progress("INFO: Cannot find setting file %s" % setting_file) 345 | exit() 346 | 347 | if advnc_mode.is_enabled(): 348 | progress("INFO: Advanced mode is enabled") 349 | else: 350 | progress("INFO: Device does not support advanced mode") 351 | exit() 352 | 353 | # Input for load_json() is the content of the json file, not the file path 354 | with open(setting_file, 'r') as file: 355 | json_text = file.read().strip() 356 | 357 | advnc_mode.load_json(json_text) 358 | 359 | # Establish connection to the Realsense camera 360 | def realsense_connect(): 361 | global pipe, depth_scale, depth_intrinsics 362 | # Declare RealSense pipe, encapsulating the actual device and sensors 363 | pipe = rs.pipeline() 364 | 365 | # Configure image stream(s) 366 | config = rs.config() 367 | if device_id: 368 | # connect to a specific device ID 369 | config.enable_device(device_id) 370 | config.enable_stream(STREAM_TYPE[0], DEPTH_WIDTH, DEPTH_HEIGHT, FORMAT[0], FPS) 371 | config.enable_stream(STREAM_TYPE[1], COLOR_WIDTH, COLOR_HEIGHT, FORMAT[1], FPS) 372 | 373 | # Start streaming with requested config 374 | profile = pipe.start(config) 375 | 376 | # Getting the depth sensor's depth scale (see rs-align example for explanation) 377 | depth_sensor = profile.get_device().first_depth_sensor() 378 | depth_scale = depth_sensor.get_depth_scale() 379 | depth_intrinsics = profile.get_stream(STREAM_TYPE[0]).as_video_stream_profile().intrinsics 380 | 381 | progress("INFO: Depth scale is: %s" % depth_scale) 382 | 383 | def realsense_configure_setting(setting_file): 384 | device = find_device_that_supports_advanced_mode() 385 | advnc_mode = rs.rs400_advanced_mode(device) 386 | realsense_enable_advanced_mode(advnc_mode) 387 | realsense_load_settings_file(advnc_mode, setting_file) 388 | 389 | 390 | def convert_depth_to_phys_coord_using_realsense(depth_coordinates, depth): 391 | result = rs.rs2_deproject_pixel_to_point(depth_intrinsics, [depth_coordinates[0], depth_coordinates[1]], depth) 392 | 393 | center_pixel = [depth_intrinsics.ppy/2,depth_intrinsics.ppx/2] 394 | result_center = rs.rs2_deproject_pixel_to_point(depth_intrinsics, center_pixel, depth) 395 | 396 | return result[2], (result[1] - result_center[1]), -(result[0]- result_center[0]) 397 | 398 | @njit 399 | def distances_from_depth_image(depth_mat, distances, min_depth_m, max_depth_m, depth, depth_coordinates): 400 | # Parameters for depth image 401 | depth_img_width = depth_mat.shape[1] 402 | depth_img_height = depth_mat.shape[0] 403 | 404 | # Parameters for obstacle distance message 405 | step_x = depth_img_width / 40 406 | step_y = depth_img_height/ 40 407 | 408 | 409 | sampling_width = int(1/3 * depth_img_width) 410 | sampling_height = int(1/3* depth_img_height) 411 | for i in range(9): 412 | if i%3 == 0 and i != 0: 413 | sampling_width = int(1/3* depth_img_width) 414 | sampling_height = sampling_height + int(1/3 * depth_img_height) 415 | 416 | x,y = 0,0 417 | x = sampling_width - int(1/3 * depth_img_width) 418 | # print(i, sampling_width, sampling_height) 419 | while x < sampling_width: 420 | x = x + step_x 421 | y = sampling_height - int(1/3* depth_img_height) 422 | while y < sampling_height: 423 | y = y + step_y 424 | x_pixel = 0 if x < 0 else depth_img_width-1 if x > depth_img_width -1 else int(x) 425 | y_pixel = 0 if y < 0 else depth_img_height-1 if y > depth_img_height -1 else int(y) 426 | point_depth = depth_mat[y_pixel,x_pixel] * depth_scale 427 | if point_depth < depth[i] and point_depth > min_depth_m and point_depth < max_depth_m: 428 | depth[i] = point_depth 429 | # print(point_depth) 430 | depth_coordinates[i] = [y_pixel,x_pixel] 431 | 432 | sampling_width = sampling_width + int(1/3* depth_img_width) 433 | 434 | # print (depth[1]) 435 | return depth_coordinates, depth 436 | 437 | ###################################################### 438 | ## Functions - RTSP Streaming ## 439 | ## Adapted from https://github.com/VimDrones/realsense-helper/blob/master/fisheye_stream_to_rtsp.py, credit to: @Huibean (GitHub) 440 | ###################################################### 441 | 442 | class SensorFactory(GstRtspServer.RTSPMediaFactory): 443 | def __init__(self, **properties): 444 | super(SensorFactory, self).__init__(**properties) 445 | self.number_frames = 0 446 | self.fps = FPS 447 | self.duration = 1 / self.fps * Gst.SECOND 448 | self.launch_string = 'appsrc name=source is-live=true block=true format=GST_FORMAT_TIME ' \ 449 | 'caps=video/x-raw,format=BGR,width={},height={},framerate={}/1 ' \ 450 | '! videoconvert ! video/x-raw,format=I420 ' \ 451 | '! x264enc speed-preset=ultrafast tune=zerolatency ' \ 452 | '! rtph264pay config-interval=1 name=pay0 pt=96'.format(COLOR_WIDTH, COLOR_HEIGHT, self.fps) 453 | 454 | def on_need_data(self, src, length): 455 | global rtsp_streaming_img 456 | frame = rtsp_streaming_img 457 | if frame is not None: 458 | data = frame.tobytes() 459 | buf = Gst.Buffer.new_allocate(None, len(data), None) 460 | buf.fill(0, data) 461 | buf.duration = self.duration 462 | timestamp = self.number_frames * self.duration 463 | buf.pts = buf.dts = int(timestamp) 464 | buf.offset = timestamp 465 | self.number_frames += 1 466 | retval = src.emit('push-buffer', buf) 467 | if retval != Gst.FlowReturn.OK: 468 | progress(retval) 469 | 470 | def do_create_element(self, url): 471 | return Gst.parse_launch(self.launch_string) 472 | 473 | def do_configure(self, rtsp_media): 474 | self.number_frames = 0 475 | appsrc = rtsp_media.get_element().get_child_by_name('source') 476 | appsrc.connect('need-data', self.on_need_data) 477 | 478 | 479 | class GstServer(GstRtspServer.RTSPServer): 480 | def __init__(self, **properties): 481 | super(GstServer, self).__init__(**properties) 482 | factory = SensorFactory() 483 | factory.set_shared(True) 484 | self.get_mount_points().add_factory(RTSP_MOUNT_POINT, factory) 485 | self.attach(None) 486 | 487 | def get_local_ip(): 488 | local_ip_address = "127.0.0.1" 489 | try: 490 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 491 | s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets 492 | local_ip_address = s.getsockname()[0] 493 | except: 494 | local_ip_address = socket.gethostbyname(socket.gethostname()) 495 | return local_ip_address 496 | 497 | 498 | ###################################################### 499 | ## Main code starts here ## 500 | ###################################################### 501 | 502 | try: 503 | # Note: 'version' attribute is supported from pyrealsense2 2.31 onwards and might require building from source 504 | progress("INFO: pyrealsense2 version: %s" % str(rs.__version__)) 505 | except Exception: 506 | # fail silently 507 | pass 508 | 509 | progress("INFO: Starting Vehicle communications") 510 | print (connection_string) 511 | conn = mavutil.mavlink_connection( 512 | device = str(connection_string), 513 | autoreconnect = True, 514 | source_system = 1, 515 | source_component = 93, 516 | baud=connection_baudrate, 517 | force_connected=True, 518 | ) 519 | mavlink_callbacks = { 520 | } 521 | mavlink_thread = threading.Thread(target=mavlink_loop, args=(conn, mavlink_callbacks)) 522 | mavlink_thread.start() 523 | # connecting and configuring the camera is a little hit-and-miss. 524 | # Start a timer and rely on a restart of the script to get it working. 525 | # Configuring the camera appears to block all threads, so we can't do 526 | # this internally. 527 | 528 | # send_msg_to_gcs('Setting timer...') 529 | signal.setitimer(signal.ITIMER_REAL, 5) # seconds... 530 | 531 | send_msg_to_gcs('Connecting to camera...') 532 | if USE_PRESET_FILE: 533 | realsense_configure_setting(PRESET_FILE) 534 | realsense_connect() 535 | send_msg_to_gcs('Camera connected.') 536 | 537 | signal.setitimer(signal.ITIMER_REAL, 0) # cancel alarm 538 | 539 | 540 | # Send MAVlink messages in the background at pre-determined frequencies 541 | sched = BackgroundScheduler() 542 | 543 | if enable_msg_obstacle_distance: 544 | sched.add_job(send_obstacle_distance_3D_message, 'interval', seconds = 1/obstacle_distance_msg_hz) 545 | send_msg_to_gcs('Sending obstacle distance messages to FCU') 546 | else: 547 | send_msg_to_gcs('Nothing to do. Check params to enable something') 548 | pipe.stop() 549 | conn.mav.close() 550 | progress("INFO: Realsense pipe and vehicle object closed.") 551 | sys.exit() 552 | 553 | glib_loop = None 554 | if RTSP_STREAMING_ENABLE is True: 555 | send_msg_to_gcs('RTSP at rtsp://' + get_local_ip() + ':' + RTSP_PORT + RTSP_MOUNT_POINT) 556 | Gst.init(None) 557 | server = GstServer() 558 | glib_loop = GLib.MainLoop() 559 | glib_thread = threading.Thread(target=glib_loop.run, args=()) 560 | glib_thread.start() 561 | else: 562 | send_msg_to_gcs('RTSP not streaming') 563 | 564 | sched.start() 565 | 566 | # gracefully terminate the script if an interrupt signal (e.g. ctrl-c) 567 | # is received. This is considered to be abnormal termination. 568 | main_loop_should_quit = False 569 | def sigint_handler(sig, frame): 570 | global main_loop_should_quit 571 | main_loop_should_quit = True 572 | signal.signal(signal.SIGINT, sigint_handler) 573 | 574 | # gracefully terminate the script if a terminate signal is received 575 | # (e.g. kill -TERM). 576 | def sigterm_handler(sig, frame): 577 | global main_loop_should_quit 578 | main_loop_should_quit = True 579 | global exit_code 580 | exit_code = 0 581 | 582 | signal.signal(signal.SIGTERM, sigterm_handler) 583 | 584 | # Begin of the main loop 585 | last_time = time.time() 586 | try: 587 | while not main_loop_should_quit: 588 | # This call waits until a new coherent set of frames is available on a device 589 | # Calls to get_frame_data(...) and get_frame_timestamp(...) on a device will return stable values until wait_for_frames(...) is called 590 | frames = pipe.wait_for_frames() 591 | align = rs.align(rs.stream.color) 592 | frames = align.process(frames) 593 | depth_frame = frames.get_depth_frame() 594 | color_frame = frames.get_color_frame() 595 | 596 | if not depth_frame or not color_frame: 597 | continue 598 | 599 | # Store the timestamp for MAVLink messages 600 | current_time_ms = current_milli_time() 601 | 602 | # Apply the filters 603 | filtered_frame = depth_frame 604 | for i in range(len(filters)): 605 | if filters[i][0] is True: 606 | filtered_frame = filters[i][2].process(filtered_frame) 607 | 608 | # Extract depth in matrix form 609 | depth_data = filtered_frame.as_frame().get_data() 610 | depth_mat = np.asanyarray(depth_data) 611 | 612 | # Create obstacle distance data from depth image 613 | depth = np.ones((9,), dtype=np.float) * (DEPTH_RANGE_M[1] + 1) 614 | depth_coordinates = np.ones((9,2), dtype=np.uint16) * (default_large_dist) 615 | obstacle_list, depth_list = distances_from_depth_image(depth_mat, distances, DEPTH_RANGE_M[0], DEPTH_RANGE_M[1], depth, depth_coordinates) 616 | 617 | coordinate_list = np.ones((9,3), dtype = np.float) * (default_large_dist) 618 | for i in range(len(depth_list)): 619 | if (depth_list[i] < DEPTH_RANGE_M[1]): 620 | coordinates = convert_depth_to_phys_coord_using_realsense(obstacle_list[i],depth_list[i]) 621 | coordinate_list[i] = coordinates 622 | 623 | mavlink_obstacle_coordinates = coordinate_list 624 | 625 | if RTSP_STREAMING_ENABLE is True: 626 | color_image = np.asanyarray(color_frame.get_data()) 627 | rtsp_streaming_img = color_image 628 | 629 | if debug_enable == 1: 630 | # Prepare the data 631 | input_image = np.asanyarray(colorizer.colorize(depth_frame).get_data()) 632 | output_image = np.asanyarray(colorizer.colorize(filtered_frame).get_data()) 633 | color_image = np.asanyarray(color_frame.get_data()) 634 | 635 | # divide view into 3x3 matrix 636 | pxstep = int(depth_mat.shape[1]/3) 637 | pystep = int(depth_mat.shape[0]/3) 638 | gx = pxstep 639 | gy = pystep 640 | while gx < depth_mat.shape[1]: 641 | cv2.line(output_image, (gx, 0), (gx, depth_mat.shape[0]), color=(0, 0, 0), thickness=1) 642 | gx += pxstep 643 | while gy < depth_mat.shape[0]: 644 | cv2.line(output_image, (0, gy), (depth_mat.shape[1], gy), color=(0, 0, 0),thickness=1) 645 | gy += pystep 646 | 647 | for i in range(len(depth_list)): 648 | # output_image = cv2.putText(output_image, str(round(depth_list[i],2)), (int(pxstep*(1/4 + i%3)),int(pystep*(1/3 + m.floor(i/3)))), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255), 2) 649 | if (depth_list[i] < DEPTH_RANGE_M[1]): 650 | output_image = cv2.putText(output_image, str(round(coordinate_list[i][0],2)), (int(pxstep*(1/4 + i%3)),int(pystep*(1/3 + m.floor(i/3)))), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255), 2) 651 | 652 | display_image = np.hstack((input_image, cv2.resize(output_image, (DEPTH_WIDTH, DEPTH_HEIGHT)))) 653 | cv2.namedWindow('Coloured Frames', cv2.WINDOW_AUTOSIZE) 654 | cv2.imshow('Coloured Frames', cv2.resize(color_image, (int(DEPTH_WIDTH), int(DEPTH_HEIGHT)))) 655 | 656 | # Put the fps in the corner of the image 657 | processing_speed = 1 / (time.time() - last_time) 658 | text = ("%0.2f" % (processing_speed,)) + ' fps' 659 | textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)[0] 660 | cv2.putText(display_image, 661 | text, 662 | org = (int((display_image.shape[1] - textsize[0]/2)), int((textsize[1])/2)), 663 | fontFace = cv2.FONT_HERSHEY_SIMPLEX, 664 | fontScale = 0.5, 665 | thickness = 1, 666 | color = (255, 255, 255)) 667 | 668 | 669 | 670 | # Show the images 671 | cv2.imshow(display_name, display_image) 672 | cv2.waitKey(1) 673 | 674 | last_time = time.time() 675 | 676 | except Exception as e: 677 | print (e) 678 | progress(e) 679 | 680 | except: 681 | send_msg_to_gcs('ERROR: Depth camera disconnected') 682 | 683 | finally: 684 | progress('Closing the script...') 685 | # start a timer in case stopping everything nicely doesn't work. 686 | signal.setitimer(signal.ITIMER_REAL, 5) # seconds... 687 | if glib_loop is not None: 688 | glib_loop.quit() 689 | glib_thread.join() 690 | pipe.stop() 691 | mavlink_thread_should_exit = True 692 | conn.close() 693 | progress("INFO: Realsense pipe and vehicle object closed.") 694 | sys.exit(exit_code) 695 | --------------------------------------------------------------------------------