├── .dockerignore ├── cloud_dora_run.sh ├── dora_run.sh ├── graphs ├── tutorials │ ├── webcam.yaml │ ├── webcam_yolov5.yaml │ ├── webcam_depth_frame.yaml │ ├── webcam_midas_frame.yaml │ └── webcam_full.yaml └── oasis │ ├── oasis_agent_yolov5.yaml │ ├── oasis_agent_obstacle_location.yaml │ ├── oasis_agent_planning.yaml │ ├── oasis_agent.yaml │ └── oasis_full.yaml ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── NOTICE.md ├── install_requirements.txt ├── ros ├── mavlink_node.py ├── point_cloud.py ├── mavros_node.py ├── ndt_pose.py ├── mavros_node_tracing.py └── ndt_pose_tracing.py ├── upgrade_dora.sh ├── requirements.txt ├── operators ├── webcam_op.py ├── traffic_sign_op.py ├── dora_tracing.py ├── strong_sort_op.py ├── yolov5_op.py ├── midas_op.py ├── imfnet_op.py ├── hybrid_astar_op.py ├── dora_utils.py ├── pid_control_op.py ├── obstacle_location_op.py ├── yolop_op.py ├── fot_op.py └── plot.py ├── README.md ├── carla ├── dora_tracing.py ├── carla_gps_op.py ├── carla_source_node.py ├── _dora_utils.py ├── oasis_agent.py └── _generate_world.py ├── .gitignore ├── docs └── src │ └── logo.svg └── LICENSE /.dockerignore: -------------------------------------------------------------------------------- 1 | nodes/__pycache__ 2 | docs 3 | **/*.avi 4 | **/*.pt -------------------------------------------------------------------------------- /cloud_dora_run.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | # export DESTINATION=`cat $DEST` 3 | # export DORA_JAEGER_TRACING=172.17.0.1:6831 4 | 5 | /home/dora/workspace/simulate/team_code/dora-drives/upgrade_dora.sh 6 | 7 | dora up 8 | dora start $YAML --attach --hot-reload -------------------------------------------------------------------------------- /dora_run.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | export DESTINATION=`cat $DEST` 3 | # export DORA_JAEGER_TRACING=172.17.0.1:6831 4 | 5 | /home/dora/workspace/simulate/team_code/dora-drives/upgrade_dora.sh 6 | 7 | dora up 8 | dora start $YAML --attach --hot-reload 9 | 10 | -------------------------------------------------------------------------------- /graphs/tutorials/webcam.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: webcam 3 | operator: 4 | python: ../../operators/webcam_op.py 5 | inputs: 6 | tick: dora/timer/millis/50 7 | outputs: 8 | - image 9 | env: 10 | DEVICE_INDEX: 0 11 | 12 | - id: plot 13 | operator: 14 | python: ../../operators/plot.py 15 | inputs: 16 | image: webcam/image -------------------------------------------------------------------------------- /graphs/tutorials/webcam_yolov5.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: webcam 3 | operator: 4 | python: ../../operators/webcam_op.py 5 | inputs: 6 | tick: dora/timer/millis/100 7 | outputs: 8 | - image 9 | env: 10 | DEVICE_INDEX: 0 11 | 12 | - id: yolov5 13 | operator: 14 | outputs: 15 | - bbox 16 | inputs: 17 | image: webcam/image 18 | python: ../../operators/yolov5_op.py 19 | 20 | - id: plot 21 | operator: 22 | python: ../../operators/plot.py 23 | inputs: 24 | image: webcam/image 25 | obstacles_bbox: yolov5/bbox 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /graphs/tutorials/webcam_depth_frame.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: webcam 3 | operator: 4 | python: ../../operators/webcam_op.py 5 | inputs: 6 | tick: dora/timer/millis/100 7 | outputs: 8 | - image 9 | env: 10 | DEVICE_INDEX: 0 11 | 12 | # - id: midas 13 | # operator: 14 | # outputs: 15 | # - depth_frame 16 | # inputs: 17 | # image: webcam/image 18 | # python: ../../operators/midas_op.py 19 | # env: 20 | # MIDAS_PATH: $HOME/Documents/CONTRIB/MiDaS 21 | # PYTORCH_DEVICE: "cuda" 22 | # MIDAS_WEIGHT_PATH: $HOME/Documents/CONTRIB/dpt_beit_base_384.pt.1 23 | 24 | - id: plot 25 | operator: 26 | python: ../../operators/plot.py 27 | inputs: 28 | # depth_frame: midas/depth_frame 29 | image: webcam/image -------------------------------------------------------------------------------- /graphs/tutorials/webcam_midas_frame.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: webcam 3 | operator: 4 | python: ../../operators/webcam_op.py 5 | inputs: 6 | tick: dora/timer/millis/100 7 | outputs: 8 | - image 9 | env: 10 | DEVICE_INDEX: 0 11 | 12 | - id: midas_op 13 | operator: 14 | outputs: 15 | - depth_frame 16 | inputs: 17 | image: webcam/image 18 | python: ../../operators/midas_op.py 19 | env: 20 | PYTORCH_DEVICE: "cuda" 21 | MIDAS_PATH: $DORA_DEP_HOME/dependencies/MiDaS/ 22 | MIDAS_WEIGHT_PATH: $DORA_DEP_HOME/dependencies/MiDaS/weights/midas_v21_small_256.pt 23 | MODEL_TYPE: "MiDaS_small" 24 | MODEL_NAME: "MiDaS_small" 25 | 26 | - id: plot 27 | operator: 28 | python: ../../operators/plot.py 29 | inputs: 30 | image: midas_op/depth_frame -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Dora start daemon: `dora up` 16 | 2. Start a new dataflow: `dora start dataflow.yaml` 17 | 3. Stop dataflow: `dora stop` 18 | 4. Destroy dataflow: `dora destroy` 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots or Video** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Environments (please complete the following information):** 27 | - System info: [use `uname --all` on LInux] 28 | - Dora version [use `dora --version` and `pip show dora-rs`] 29 | 30 | **Additional context** 31 | Add any other context about the problem here. 32 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | ## Copyright 2 | 3 | All content is the property of the respective authors or their employers. For more information regarding authorship of content, please consult the listed source code repository logs. 4 | 5 | ## Attribution 6 | 7 | This crate was largely inspired by **[`pylot`](https://github.com/erdos-project/pylot)** project developed in the [RISE Lab](https://rise.cs.berkeley.edu) at [UC Berkeley](https://berkeley.edu). We recommend to check out their paper [Pylot: A Modular Platform for Exploring Latency-Accuracy Tradeoffs in Autonomous Vehicles](https://ieeexplore.ieee.org/document/9561747/) for more information. 8 | 9 | ## License 10 | 11 | This project is licensed under the Apache License, Version 2.0 ([LICENSE](LICENSE) or ). Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be licensed as above, without any additional terms or conditions. -------------------------------------------------------------------------------- /install_requirements.txt: -------------------------------------------------------------------------------- 1 | influxdb-client 2 | opentelemetry-api 3 | opentelemetry-sdk 4 | opentelemetry-exporter-jaeger-thrift 5 | scikit-learn==1.0.2 6 | pytest 7 | absl-py 8 | gdown 9 | pyarrow 10 | # Old dependencies to double check 11 | ultralytics 12 | cvxpy 13 | erdos>=0.3.1 14 | lapsolver 15 | motmetrics 16 | numpy==1.21.6 # Ignore warning, as 1.22 requires python 3.8 17 | open3d 18 | opencv-python>=4.1.1 19 | opencv-contrib-python>=4.1.1 20 | pillow>=6.2.2 21 | pycocotools 22 | pytest 23 | scikit-image==0.18.3 24 | scipy==1.11.1 25 | shapely==1.6.4 26 | # tensorflow-gpu>=2.7.1 27 | ##### Tracking dependencies ##### 28 | Cython 29 | filterpy==1.4.1 30 | imgaug==0.2.8 31 | matplotlib==3.4.3 32 | nonechucks==0.3.1 33 | nuscenes-devkit 34 | progress 35 | pyquaternion 36 | ##### CARLA dependencies ##### 37 | networkx==2.2 38 | ninja 39 | dora-rs==0.2.5 40 | tensorboard 41 | 42 | ##### MiDaS #### 43 | timm==0.6.12 44 | imutils==0.5.4 45 | einops==0.6.0 46 | carla==0.9.13 47 | -------------------------------------------------------------------------------- /ros/mavlink_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import asyncio 3 | from mavsdk import System 4 | from mavsdk.offboard import OffboardError, VelocityBodyYawspeed 5 | from dora import Node 6 | import numpy as np 7 | 8 | node = Node() 9 | 10 | 11 | async def run(): 12 | drone = System() 13 | print("Waiting for drone...") 14 | await drone.connect(system_address="serial:///dev/ttyACM0:57600") 15 | print("Waiting for drone...") 16 | async for state in drone.core.connection_state(): 17 | if state.is_connected: 18 | print(f"Drone discovered with UUID: {state}") 19 | break 20 | 21 | for input_id, value, metadata in node: 22 | 23 | [vx, vy, vz, yaw] = np.frombuffer(value) 24 | print(f"vel: {vx, vy, vz, yaw}") 25 | await drone.offboard.set_velocity_body( 26 | VelocityBodyYawspeed(0.1, 0.1, 0, 45) 27 | ) 28 | print("awaited!") 29 | # await drone.offboard.set_velocity_body(VelocityBodyYawspeed(0, 0, 0.0, 0.0)) 30 | # await asyncio.sleep(0.5) 31 | 32 | print("-- Landing") 33 | 34 | 35 | if __name__ == "__main__": 36 | loop = asyncio.get_event_loop() 37 | loop.run_until_complete(run()) 38 | -------------------------------------------------------------------------------- /ros/point_cloud.py: -------------------------------------------------------------------------------- 1 | #!/home/nvidia/Documents/dora-rover/venv/bin/python3 2 | import sensor_msgs.point_cloud2 as pc2 3 | import rospy 4 | from sensor_msgs.msg import PointCloud2 5 | import numpy as np 6 | from dora import Node 7 | import time 8 | 9 | node = Node() 10 | 11 | 12 | start = time.time() 13 | 14 | 15 | def callback(data): 16 | global start 17 | gen = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z")) 18 | data = np.array(list(gen)) 19 | point_cloud = data[:, :3] 20 | 21 | # To camera coordinate 22 | # The latest coordinate space is the velodyne space. 23 | point_cloud = np.dot( 24 | point_cloud, 25 | np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]), 26 | ) 27 | point_cloud = point_cloud[np.where(point_cloud[:, 2] > 0.1)] 28 | # inds0 = np.random.choice(point_cloud.shape[0], min(1500, point_cloud.shape[0]), replace=False) 29 | # point_cloud = point_cloud[inds0] 30 | 31 | if (time.time() - start > 1) and len(point_cloud) > 0: 32 | node.send_output("lidar_pc", point_cloud.tobytes()) 33 | start = time.time() 34 | 35 | 36 | rospy.init_node("listener", anonymous=True) 37 | rospy.Subscriber("velodyne_points", PointCloud2, callback) 38 | rospy.spin() 39 | -------------------------------------------------------------------------------- /graphs/oasis/oasis_agent_yolov5.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: oasis_agent 3 | custom: 4 | inputs: 5 | tick: dora/timer/millis/100 6 | outputs: 7 | - position 8 | - speed 9 | - image 10 | - objective_waypoints 11 | - lidar_pc 12 | - opendrive 13 | source: shell 14 | # With Carla_source_node 15 | args: python3 ../../carla/carla_source_node.py 16 | # 17 | # Or with the OASIS AGENT 18 | # 19 | # args: > 20 | # python3 $SIMULATE --output 21 | # --oasJson --criteriaConfig $CRITERIA 22 | # --openscenario $XOSC 23 | # --agent $TEAM_AGENT 24 | # --agentConfig $TEAM_AGENT_CONF 25 | # --destination $DESTINATION 26 | 27 | - id: yolov5 28 | operator: 29 | outputs: 30 | - bbox 31 | inputs: 32 | image: oasis_agent/image 33 | python: ../../operators/yolov5_op.py 34 | env: 35 | # CUDA_VISIBLE_DEVICES: "" 36 | # PYTORCH_DEVICE: cuda # Uncomment if you want to use CUDA 37 | # YOLOV5_PATH: # /PATH/TO/YOLOv5 Ex: $DORA_DEP_HOME/dependencies/yolov5 38 | # YOLOV5_WEIGHT_PATH: : # /PATH/TO/YOLOv5 Ex: $ORA_DEP_HOME/dependencies/yolov5/yolov5n.pt 39 | 40 | - id: plot 41 | operator: 42 | python: ../../operators/plot.py 43 | inputs: 44 | image: oasis_agent/image 45 | obstacles_bbox: yolov5/bbox 46 | position: oasis_agent/position 47 | -------------------------------------------------------------------------------- /upgrade_dora.sh: -------------------------------------------------------------------------------- 1 | 2 | # Define the desired version of pip 3 | desired_version="0.2.6" 4 | 5 | # Check if pip is installed and get its version 6 | if command -v dora >/dev/null 2>&1; then 7 | installed_version=$(dora --version | awk '{print $2}') 8 | if [ "$installed_version" = "$desired_version" ]; then 9 | echo "dora version $desired_version is installed" 10 | else 11 | echo "dora version $installed_version is installed (expected $desired_version)" 12 | DORA_VERSION=v0.2.6 # Check for the latest release 13 | ARCHITECTURE=$(uname -m) 14 | wget https://github.com/dora-rs/dora/releases/download/${DORA_VERSION}/dora-${DORA_VERSION}-${ARCHITECTURE}-Linux.zip 15 | sudo unzip -o -q dora-${DORA_VERSION}-${ARCHITECTURE}-Linux.zip -d /bin/ 16 | pip install dora-rs==${DORA_VERSION} ## For Python API 17 | 18 | ## Update frenet 19 | pip install git+https://github.com/haixuanTao/frenet_optimal_trajectory_planner.git 20 | 21 | fi 22 | else 23 | echo "dora is not installed" 24 | DORA_VERSION=v0.2.6 # Check for the latest release 25 | ARCHITECTURE=$(uname -m) 26 | sudo wget https://github.com/dora-rs/dora/releases/download/${DORA_VERSION}/dora-${DORA_VERSION}-${ARCHITECTURE}-Linux.zip 27 | sudo unzip -o -q dora-${DORA_VERSION}-${ARCHITECTURE}-Linux.zip -d /bin/ 28 | pip install dora-rs==${DORA_VERSION} ## For Python API 29 | 30 | ## Update frenet 31 | pip install git+https://github.com/haixuanTao/frenet_optimal_trajectory_planner.git 32 | fi 33 | -------------------------------------------------------------------------------- /graphs/tutorials/webcam_full.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: webcam 3 | operator: 4 | python: ../../operators/webcam_op.py 5 | inputs: 6 | tick: dora/timer/millis/100 7 | outputs: 8 | - image 9 | env: 10 | DEVICE_INDEX: 0 11 | 12 | - id: yolov5 13 | operator: 14 | outputs: 15 | - bbox 16 | inputs: 17 | image: webcam/image 18 | python: ../../operators/yolov5_op.py 19 | 20 | # - id: yolop 21 | # operator: 22 | # outputs: 23 | # - lanes 24 | # - drivable_area 25 | # inputs: 26 | # image: webcam/image 27 | # python: ../../operators/yolop_op.py 28 | 29 | ## Commented out as it takes a lot of GPU memory. 30 | #- id: traffic_sign 31 | #operator: 32 | #outputs: 33 | #- bbox 34 | #inputs: 35 | #image: webcam/image 36 | #python: operators/traffic_sign_op.py 37 | 38 | - id: strong_sort 39 | operator: 40 | outputs: 41 | - obstacles_id 42 | inputs: 43 | image: webcam/image 44 | obstacles_bbox: yolov5/bbox 45 | python: ../../operators/strong_sort_op.py 46 | 47 | - id: plot 48 | operator: 49 | python: ../../operators/plot.py 50 | inputs: 51 | image: webcam/image 52 | obstacles_bbox: yolov5/bbox 53 | # traffic_sign_bbox: traffic_sign/bbox 54 | # lanes: yolop/lanes 55 | # drivable_area: yolop/drivable_area 56 | obstacles_id: strong_sort/obstacles_id 57 | -------------------------------------------------------------------------------- /graphs/oasis/oasis_agent_obstacle_location.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: oasis_agent 3 | custom: 4 | inputs: 5 | tick: dora/timer/millis/100 6 | outputs: 7 | - position 8 | - speed 9 | - image 10 | - objective_waypoints 11 | - lidar_pc 12 | - opendrive 13 | source: shell 14 | # With Carla_source_node 15 | args: python3 ../../carla/carla_source_node.py 16 | # 17 | # Or with the OASIS AGENT 18 | # 19 | # args: > 20 | # python3 $SIMULATE --output 21 | # --oasJson --criteriaConfig $CRITERIA 22 | # --openscenario $XOSC 23 | # --agent $TEAM_AGENT 24 | # --agentConfig $TEAM_AGENT_CONF 25 | # --destination $DESTINATION 26 | 27 | - id: yolov5 28 | operator: 29 | outputs: 30 | - bbox 31 | inputs: 32 | image: oasis_agent/image 33 | python: ../../operators/yolov5_op.py 34 | 35 | - id: obstacle_location_op 36 | operator: 37 | outputs: 38 | - obstacles 39 | inputs: 40 | lidar_pc: oasis_agent/lidar_pc 41 | obstacles_bbox: yolov5/bbox 42 | position: oasis_agent/position 43 | python: ../../operators/obstacle_location_op.py 44 | 45 | - id: plot 46 | operator: 47 | python: ../../operators/plot.py 48 | inputs: 49 | image: oasis_agent/image 50 | obstacles_bbox: yolov5/bbox 51 | position: oasis_agent/position 52 | obstacles: obstacle_location_op/obstacles 53 | -------------------------------------------------------------------------------- /ros/mavros_node.py: -------------------------------------------------------------------------------- 1 | #!/home/nvidia/Documents/dora-rover/venv/bin/python3 2 | # license removed for brevity 3 | import rospy 4 | from mavros_msgs.msg import PositionTarget, OverrideRCIn 5 | from geometry_msgs.msg import TwistStamped 6 | from dora import Node 7 | import numpy as np 8 | import time 9 | 10 | node = Node() 11 | 12 | TARGET_SPEED = 1100 13 | 14 | 15 | def talker(): 16 | pub = rospy.Publisher("mavros/rc/override", OverrideRCIn, queue_size=1) 17 | rospy.init_node("talker", anonymous=True) 18 | for input_id, value, metadata in node: 19 | [angle] = np.frombuffer(value) 20 | target = OverrideRCIn() 21 | if angle < np.pi / 2 and angle > -np.pi / 2: 22 | target_rotation = int((angle + np.pi / 2) / (np.pi) * 1000) + 1000 23 | target.channels[0] = target_rotation 24 | target.channels[2] = TARGET_SPEED 25 | elif angle < -np.pi / 2: 26 | target_rotation = 1000 27 | target.channels[0] = target_rotation 28 | target.channels[1] = TARGET_SPEED 29 | else: 30 | target.channels[0] = 2000 31 | target.channels[1] = TARGET_SPEED 32 | 33 | # target.channels[2] = 100 34 | # target = PositionTarget() 35 | # target.coordinate_frame = 9 36 | # target.header.stamp = rospy.get_rostime() 37 | # target.type_mask = int("110111111100",2) 38 | # target.position.x = 0.9 39 | # target.position.y = -0.9 40 | # target.velocity.x = 0.1 41 | # target.velocity.y = -0.1 42 | # target.yaw = yaw 43 | pub.publish(target) 44 | print("stopping") 45 | target = OverrideRCIn() 46 | target.channels[0] = 2000 47 | target.channels[1] = 120 48 | 49 | pub.publish(target) 50 | print("stopped") 51 | 52 | # rate.sleep() 53 | 54 | 55 | if __name__ == "__main__": 56 | try: 57 | talker() 58 | except rospy.ROSInterruptException: 59 | pass 60 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # YOLOv5 requirements 2 | # Usage: pip install -r requirements.txt 3 | 4 | # Base ---------------------------------------- 5 | matplotlib>=3.2.2 6 | Pillow>=7.1.2 7 | PyYAML>=5.3.1 8 | requests>=2.23.0 9 | scipy>=1.4.1 10 | tqdm>=4.64.0 11 | protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 12 | 13 | # wandb 14 | # clearml 15 | 16 | # Plotting ------------------------------------ 17 | pandas>=1.1.4 18 | seaborn>=0.11.0 19 | 20 | # Export -------------------------------------- 21 | # coremltools>=5.2 # CoreML export 22 | # onnx>=1.9.0 # ONNX export 23 | # onnx-simplifier>=0.4.1 # ONNX simplifier 24 | # nvidia-pyindex # TensorRT export 25 | # nvidia-tensorrt # TensorRT export 26 | # scikit-learn==0.19.2 # CoreML quantization 27 | # tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) 28 | # tensorflowjs>=3.9.0 # TF.js export 29 | # openvino-dev # OpenVINO export 30 | 31 | # Extras -------------------------------------- 32 | ipython # interactive notebook 33 | psutil # system utilization 34 | thop>=0.1.1 # FLOPs computation 35 | # albumentations>=1.0.3 36 | # pycocotools>=2.0 # COCO mAP 37 | # roboflow 38 | 39 | # YOLOP 40 | yacs 41 | Cython 42 | PyYAML>=5.3 43 | prefetch_generator 44 | imageio 45 | 46 | # strong_sort ----------------------------------- 47 | 48 | Cython 49 | h5py 50 | six 51 | tb-nightly 52 | future 53 | yacs 54 | gdown 55 | flake8 56 | yapf 57 | isort==4.3.21 58 | imageio 59 | torchreid @ https://github.com/KaiyangZhou/deep-person-reid/archive/master.zip 60 | git+https://github.com/haixuanTao/yolov5_strongsort_package.git 61 | 62 | # MinkowskiEngine --- 63 | 64 | ninja 65 | 66 | # IMFnet 67 | 68 | easydict==1.9 69 | einops==0.3.2 70 | joblib==1.2.0 71 | nibabel==3.2.1 72 | open3d 73 | pyflann==1.6.14 74 | pyflann_py3==0.1.0 75 | tensorboardX==2.5.1 76 | # git+https://github.com/haixuanTao/IMFNet 77 | 78 | # Traffic sign 79 | 80 | # git+https://github.com/haixuanTao/yolov7.git 81 | 82 | git+https://github.com/haixuanTao/frenet_optimal_trajectory_planner.git -------------------------------------------------------------------------------- /operators/webcam_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Webcam operator 3 | 4 | ## Inputs 5 | 6 | - tick: Dora tick. 7 | 8 | ## Outputs 9 | 10 | - image: HEIGHTxWIDTHxBGR array. 11 | 12 | ## Configuration 13 | 14 | Using cv2 package to get the webcam image. The webcam number can be configured using `CAMERA_INDEX` 15 | 16 | ## Graph Description 17 | 18 | ```yaml 19 | - id: yolov5 20 | operator: 21 | outputs: 22 | - bbox 23 | inputs: 24 | image: webcam/image 25 | python: ../../operators/webcam_op.py 26 | ``` 27 | """ 28 | 29 | from typing import Callable 30 | from dora import DoraStatus 31 | import os 32 | import cv2 33 | import numpy as np 34 | import pyarrow as pa 35 | 36 | pa.array([]) 37 | 38 | OUTPUT_WIDTH = 1920 39 | OUTPUT_HEIGHT = 1080 40 | DEVICE_INDEX = os.environ.get("DEVICE_INDEX", "0") 41 | 42 | 43 | class Operator: 44 | """ 45 | Compute a `control` based on the position and the waypoints of the car. 46 | """ 47 | 48 | def __init__(self): 49 | self.video_capture = cv2.VideoCapture(int(DEVICE_INDEX)) 50 | self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, OUTPUT_WIDTH) 51 | self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, OUTPUT_HEIGHT) 52 | 53 | def on_event( 54 | self, 55 | dora_event: dict, 56 | send_output: Callable[[str, bytes], None], 57 | ) -> DoraStatus: 58 | if dora_event["type"] == "INPUT": 59 | return self.on_input(dora_event, send_output) 60 | return DoraStatus.CONTINUE 61 | 62 | def on_input( 63 | self, 64 | dora_input: dict, 65 | send_output: Callable[[str, bytes], None], 66 | ): 67 | ret, frame = self.video_capture.read() 68 | if ret: 69 | frame = cv2.resize(frame, (OUTPUT_WIDTH, OUTPUT_HEIGHT)) 70 | frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA) 71 | send_output( 72 | "image", 73 | pa.array(frame.ravel()), 74 | dora_input["metadata"], 75 | ) 76 | else: 77 | print("could not get webcam.") 78 | return DoraStatus.CONTINUE 79 | 80 | def drop_operator(self): 81 | self.video_capture.release() 82 | -------------------------------------------------------------------------------- /ros/ndt_pose.py: -------------------------------------------------------------------------------- 1 | #!/home/nvidia/Documents/dora-rover/venv/bin/python3 2 | 3 | import sensor_msgs.point_cloud2 as pc2 4 | import rospy 5 | from sensor_msgs.msg import PointCloud2, Imu 6 | from geometry_msgs.msg import PoseStamped 7 | import numpy as np 8 | from dora import Node 9 | import time 10 | from scipy.spatial.transform import Rotation as R 11 | 12 | node = Node() 13 | initial_orientation = None 14 | orientation = None 15 | 16 | GOAL_LOCATION = np.array([[0, 0, 0], [1, 0, 0], [1, 8, 0]]) 17 | 18 | 19 | def imu_callback(data): 20 | 21 | global initial_orientation 22 | global orientation 23 | 24 | if initial_orientation is None: 25 | initial_orientation = R.from_quat( 26 | [ 27 | data.orientation.x, 28 | data.orientation.y, 29 | data.orientation.z, 30 | data.orientation.w, 31 | ] 32 | ) 33 | print( 34 | f"INITIAL ORIENTATION: {initial_orientation.as_euler('xyz', degrees=True)}" 35 | ) 36 | 37 | abs_goal_location = initial_orientation.apply(GOAL_LOCATION) 38 | abs_goal_location = abs_goal_location[:, :2] 39 | node.send_output("gps_waypoints", abs_goal_location.tobytes()) 40 | 41 | orientation = [ 42 | data.orientation.x, 43 | data.orientation.y, 44 | data.orientation.z, 45 | data.orientation.w, 46 | ] 47 | 48 | 49 | start = time.time() 50 | 51 | 52 | def pose_callback(data): 53 | global start 54 | global initial_orientation 55 | global orientation 56 | position = np.array( 57 | [ 58 | data.pose.position.x, 59 | data.pose.position.y, 60 | data.pose.position.z, 61 | ] 62 | ) 63 | if initial_orientation is None: 64 | return None 65 | 66 | position = initial_orientation.apply(position) 67 | position = np.concatenate([position, orientation]) 68 | 69 | if time.time() - start > 1: 70 | node.send_output("position", position.tobytes()) 71 | start = time.time() 72 | 73 | 74 | rospy.init_node("listener", anonymous=True) 75 | rospy.Subscriber("mavros/imu/data", Imu, imu_callback) 76 | rospy.Subscriber("current_pose", PoseStamped, pose_callback) 77 | 78 | rospy.spin() 79 | -------------------------------------------------------------------------------- /graphs/oasis/oasis_agent_planning.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: oasis_agent 3 | custom: 4 | inputs: 5 | tick: dora/timer/millis/100 6 | outputs: 7 | - position 8 | - speed 9 | - image 10 | - objective_waypoints 11 | - lidar_pc 12 | - opendrive 13 | source: shell 14 | # With Carla_source_node 15 | args: python3 ../../carla/carla_source_node.py 16 | # 17 | # Or with the OASIS AGENT 18 | # 19 | # args: > 20 | # python3 $SIMULATE --output 21 | # --oasJson --criteriaConfig $CRITERIA 22 | # --openscenario $XOSC 23 | # --agent $TEAM_AGENT 24 | # --agentConfig $TEAM_AGENT_CONF 25 | # --destination $DESTINATION 26 | 27 | - id: carla_gps_op 28 | operator: 29 | python: ../../carla/carla_gps_op.py 30 | outputs: 31 | - gps_waypoints 32 | inputs: 33 | opendrive: oasis_agent/opendrive 34 | objective_waypoints: oasis_agent/objective_waypoints 35 | position: oasis_agent/position 36 | 37 | - id: fot_op 38 | operator: 39 | python: ../../operators/fot_op.py 40 | outputs: 41 | - waypoints 42 | inputs: 43 | position: oasis_agent/position 44 | speed: oasis_agent/speed 45 | obstacles: obstacle_location_op/obstacles 46 | gps_waypoints: carla_gps_op/gps_waypoints 47 | 48 | - id: yolov5 49 | operator: 50 | outputs: 51 | - bbox 52 | inputs: 53 | image: oasis_agent/image 54 | python: ../../operators/yolov5_op.py 55 | 56 | - id: obstacle_location_op 57 | operator: 58 | outputs: 59 | - obstacles 60 | - global_lanes 61 | inputs: 62 | lidar_pc: oasis_agent/lidar_pc 63 | obstacles_bbox: yolov5/bbox 64 | position: oasis_agent/position 65 | python: ../../operators/obstacle_location_op.py 66 | 67 | - id: plot 68 | operator: 69 | python: ../../operators/plot.py 70 | inputs: 71 | image: oasis_agent/image 72 | obstacles_bbox: yolov5/bbox 73 | position: oasis_agent/position 74 | obstacles: obstacle_location_op/obstacles 75 | gps_waypoints: carla_gps_op/gps_waypoints 76 | waypoints: fot_op/waypoints -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 | --- 6 | 7 | `dora-drives` is a step-by-step tutorial that allows beginners to write their own autonomous vehicle program from scratch using a simple starter kit. 8 | 9 | ## Why dora-drives? 10 | 11 | We believe that programming autonomous driving vehicles is the perfect starting point to learning robotic applications as: 12 | - Autonomous driving is foundational for many robotic applications. 13 | - Autonomous driving is simple to explain. 14 | - There are a lot of datasets, models and documentation available online. 15 | 16 | ## Installation 17 | 18 | ```bash 19 | git clone git@github.com:dora-rs/dora-drives.git 20 | cd dora-drives 21 | 22 | ## Installing dependencies 23 | conda create -n dora3.7 python=3.7 -y 24 | conda activate dora3.7 25 | conda install pytorch=1.11.0 torchvision=0.12.0 cudatoolkit=11.3 -c pytorch -y 26 | pip install --upgrade pip 27 | pip install -r install_requirements.txt 28 | pip install -r requirements.txt 29 | 30 | ## Installing dora if its not already installed 31 | sudo wget https://github.com/dora-rs/dora/releases/download/v0.2.5/dora-v0.2.5-x86_64-Linux.zip && sudo unzip dora-v0.2.5-x86_64-Linux.zip -d /usr/local/bin 32 | ``` 33 | 34 | For more info, see: https://dora-rs.ai/docs/guides/dora-drives/installation 35 | 36 | ## Getting started 37 | 38 | You can run a fully looped autonomous vehicle with just the following command: 39 | 40 | ```bash 41 | docker pull carlasim/carla:0.9.13 42 | 43 | # Serve the carla simulator 44 | docker run --privileged --gpus all --net=host -e DISPLAY=$DISPLAY carlasim/carla:0.9.13 /bin/bash ./CarlaUE4.sh -carla-server -world-port=2000 -RenderOffScreen 45 | 46 | # Spawn dora daemon and coordinator 47 | dora up 48 | 49 | # Spawn dora dataflow 50 | dora start graphs/oasis/oasis_full.yaml --attach 51 | ``` 52 | 53 | To get a step-by-step tutorial, see: https://dora-rs.ai/docs/guides/dora-drives/carla 54 | 55 | ## Documentation 56 | 57 | The documentation can be found here: https://dora-rs.ai/docs/guides/dora-drives 58 | 59 | ## Discussion 60 | 61 | Our main communication channel is our Github Project Discussion page: https://github.com/orgs/dora-rs/discussions 62 | 63 | Feel free to reach out on any topic, issues or ideas. 64 | 65 | ## ⚖️ LICENSE 66 | 67 | This project is licensed under Apache-2.0. Check out [NOTICE.md](NOTICE.md) for more information. 68 | -------------------------------------------------------------------------------- /operators/traffic_sign_op.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | from dora import DoraStatus 7 | from yolov7_tt100k import WEIGHTS, letterbox, non_max_suppression, scale_coords 8 | 9 | 10 | class Operator: 11 | """ 12 | Infering traffic sign from images 13 | """ 14 | 15 | def __init__(self): 16 | model = torch.load(WEIGHTS) # load 17 | self.model = ( 18 | model["ema" if model.get("ema") else "model"].float().fuse().eval() 19 | ) 20 | self.model.to(torch.device("cuda")) 21 | 22 | def on_event( 23 | self, 24 | dora_event: dict, 25 | send_output: Callable[[str, bytes], None], 26 | ) -> DoraStatus: 27 | if dora_event["type"] == "INPUT": 28 | return self.on_input(dora_event, send_output) 29 | return DoraStatus.CONTINUE 30 | 31 | def on_input( 32 | self, 33 | dora_input: dict, 34 | send_output: Callable[[str, bytes], None], 35 | ) -> DoraStatus: 36 | """Handle image 37 | Args: 38 | dora_input["id"](str): Id of the input declared in the yaml configuration 39 | dora_input["data"] (bytes): Bytes message of the input 40 | send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. 41 | """ 42 | 43 | frame = cv2.imdecode( 44 | np.frombuffer( 45 | dora_input["data"], 46 | np.uint8, 47 | ), 48 | -1, 49 | ) 50 | img = frame[:, :, :3] 51 | shape = img.shape 52 | img, ratio, (dw, dh) = letterbox(img) 53 | img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 54 | img = np.ascontiguousarray(img) 55 | img = torch.from_numpy(img).to(torch.device("cuda")) 56 | half = False 57 | img = img.half() if half else img.float() # uint8 to fp16/32 58 | img /= 255.0 # 0 - 255 to 0.0 - 1.0 59 | if img.ndimension() == 3: 60 | img = img.unsqueeze(0) 61 | 62 | with torch.no_grad(): 63 | pred = self.model(img)[0] 64 | pred = non_max_suppression(pred, 0.15, 0.15, None, False)[0] 65 | pred[:, :4] = scale_coords( 66 | img.shape[2:], pred[:, :4], shape 67 | ).round() 68 | results = pred.detach().cpu().numpy() 69 | 70 | arrays = np.array(results)[:, [0, 2, 1, 3, 4, 5]] # xyxy -> xxyy 71 | arrays[:, 4] *= 100 72 | arrays = arrays.astype(np.int32) 73 | arrays = arrays.tobytes() 74 | send_output("bbox", arrays, dora_input["metadata"]) 75 | return DoraStatus.CONTINUE 76 | -------------------------------------------------------------------------------- /graphs/oasis/oasis_agent.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: oasis_agent 3 | custom: 4 | inputs: 5 | control: pid_control_op/control 6 | tick: dora/timer/millis/400 7 | outputs: 8 | - position 9 | - speed 10 | - image 11 | - objective_waypoints 12 | - lidar_pc 13 | - opendrive 14 | source: shell 15 | args: > 16 | python3 $SIMULATE --output 17 | --oasJson --criteriaConfig $CRITERIA 18 | --openscenario $XOSC 19 | --agent $TEAM_AGENT 20 | --agentConfig $TEAM_AGENT_CONF 21 | --destination $DISTINATION 22 | # 23 | # or for Carla Standalone: 24 | # 25 | # args: python3 ../../carla/carla_source_node.py 26 | 27 | - id: carla_gps_op 28 | operator: 29 | python: ../../carla/carla_gps_op.py 30 | outputs: 31 | - gps_waypoints 32 | inputs: 33 | opendrive: oasis_agent/opendrive 34 | objective_waypoints: oasis_agent/objective_waypoints 35 | position: oasis_agent/position 36 | 37 | - id: yolov5 38 | operator: 39 | outputs: 40 | - bbox 41 | inputs: 42 | image: oasis_agent/image 43 | python: ../../operators/yolov5_op.py 44 | env: 45 | # CUDA_VISIBLE_DEVICES: "" 46 | PYTORCH_DEVICE: cuda 47 | YOLOV5_PATH: /home/dora/workspace/simulate/team_code/dependencies/yolov5 48 | YOLOV5_WEIGHT_PATH: /home/dora/workspace/simulate/team_code/dependencies/yolov5/yolov5n.pt 49 | 50 | - id: obstacle_location_op 51 | operator: 52 | outputs: 53 | - obstacles 54 | inputs: 55 | lidar_pc: oasis_agent/lidar_pc 56 | obstacles_bbox: yolov5/bbox 57 | position: oasis_agent/position 58 | python: ../../operators/obstacle_location_op.py 59 | 60 | - id: fot_op 61 | operator: 62 | python: ../../operators/fot_op.py 63 | outputs: 64 | - waypoints 65 | inputs: 66 | position: oasis_agent/position 67 | speed: oasis_agent/speed 68 | obstacles: obstacle_location_op/obstacles 69 | gps_waypoints: carla_gps_op/gps_waypoints 70 | 71 | - id: pid_control_op 72 | operator: 73 | python: ../../operators/pid_control_op.py 74 | outputs: 75 | - control 76 | inputs: 77 | position: oasis_agent/position 78 | speed: oasis_agent/speed 79 | waypoints: fot_op/waypoints 80 | 81 | - id: plot 82 | operator: 83 | python: ../../operators/plot.py 84 | inputs: 85 | image: oasis_agent/image 86 | obstacles_bbox: yolov5/bbox 87 | obstacles: obstacle_location_op/obstacles 88 | gps_waypoints: carla_gps_op/gps_waypoints 89 | position: oasis_agent/position 90 | waypoints: fot_op/waypoints 91 | control: pid_control_op/control 92 | -------------------------------------------------------------------------------- /graphs/oasis/oasis_full.yaml: -------------------------------------------------------------------------------- 1 | nodes: 2 | - id: oasis_agent 3 | custom: 4 | inputs: 5 | control: pid_control_op/control 6 | tick: dora/timer/millis/100 7 | outputs: 8 | - position 9 | - speed 10 | - image 11 | - objective_waypoints 12 | - lidar_pc 13 | - opendrive 14 | source: shell 15 | # args: > 16 | # python3 $SIMULATE --output 17 | # --oasJson --criteriaConfig $CRITERIA 18 | # --openscenario $XOSC 19 | # --agent $TEAM_AGENT 20 | # --agentConfig $TEAM_AGENT_CONF 21 | # --destination $DESTINATION 22 | # 23 | # or for Carla Standalone: 24 | # 25 | args: python3 ../../carla/carla_source_node.py 26 | 27 | - id: carla_gps_op 28 | operator: 29 | python: ../../carla/carla_gps_op.py 30 | outputs: 31 | - gps_waypoints 32 | inputs: 33 | opendrive: oasis_agent/opendrive 34 | objective_waypoints: oasis_agent/objective_waypoints 35 | position: oasis_agent/position 36 | 37 | - id: yolov5 38 | operator: 39 | outputs: 40 | - bbox 41 | inputs: 42 | image: oasis_agent/image 43 | python: ../../operators/yolov5_op.py 44 | env: 45 | # CUDA_VISIBLE_DEVICES: "" 46 | PYTORCH_DEVICE: cuda 47 | # YOLOV5_PATH: /home/dora/workspace/simulate/team_code/dependencies/yolov5 48 | # YOLOV5_WEIGHT_PATH: /home/dora/workspace/simulate/team_code/dependencies/yolov5/yolov5n.pt 49 | 50 | 51 | - id: obstacle_location_op 52 | operator: 53 | outputs: 54 | - obstacles 55 | inputs: 56 | lidar_pc: oasis_agent/lidar_pc 57 | obstacles_bbox: yolov5/bbox 58 | position: oasis_agent/position 59 | python: ../../operators/obstacle_location_op.py 60 | 61 | - id: fot_op 62 | operator: 63 | python: ../../operators/fot_op.py 64 | outputs: 65 | - waypoints 66 | inputs: 67 | position: oasis_agent/position 68 | speed: oasis_agent/speed 69 | obstacles: obstacle_location_op/obstacles 70 | gps_waypoints: carla_gps_op/gps_waypoints 71 | 72 | - id: pid_control_op 73 | operator: 74 | python: ../../operators/pid_control_op.py 75 | outputs: 76 | - control 77 | inputs: 78 | position: oasis_agent/position 79 | speed: oasis_agent/speed 80 | waypoints: fot_op/waypoints 81 | 82 | - id: plot 83 | operator: 84 | python: ../../operators/plot.py 85 | inputs: 86 | image: oasis_agent/image 87 | obstacles_bbox: yolov5/bbox 88 | obstacles: obstacle_location_op/obstacles 89 | gps_waypoints: carla_gps_op/gps_waypoints 90 | position: oasis_agent/position 91 | waypoints: fot_op/waypoints 92 | control: pid_control_op/control 93 | -------------------------------------------------------------------------------- /carla/dora_tracing.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import typing 3 | 4 | from opentelemetry import trace 5 | from opentelemetry.exporter.jaeger.thrift import JaegerExporter 6 | from opentelemetry.sdk.resources import SERVICE_NAME, Resource 7 | from opentelemetry.sdk.trace import TracerProvider 8 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 9 | from opentelemetry.trace.propagation.tracecontext import ( 10 | TraceContextTextMapPropagator, 11 | ) 12 | 13 | CarrierT = typing.TypeVar("CarrierT") 14 | propagator = TraceContextTextMapPropagator() 15 | 16 | trace.set_tracer_provider( 17 | TracerProvider(resource=Resource.create({SERVICE_NAME: "python-client"})) 18 | ) 19 | tracer = trace.get_tracer(__name__) 20 | jaeger_exporter = JaegerExporter( 21 | agent_host_name="172.17.0.1", 22 | agent_port=6831, 23 | ) 24 | span_processor = BatchSpanProcessor(jaeger_exporter) 25 | trace.get_tracer_provider().add_span_processor(span_processor) 26 | 27 | 28 | class Getter(abc.ABC): 29 | """This class implements a Getter that enables extracting propagated 30 | fields from a carrier. 31 | """ 32 | 33 | def get( 34 | self, carrier: CarrierT, key: str 35 | ) -> typing.Optional[typing.List[str]]: 36 | """Function that can retrieve zero 37 | or more values from the carrier. In the case that 38 | the value does not exist, returns None. 39 | Args: 40 | carrier: An object which contains values that are used to 41 | construct a Context. 42 | key: key of a field in carrier. 43 | Returns: first value of the propagation key or None if the key doesn't 44 | exist. 45 | """ 46 | if key in carrier.keys(): 47 | return [carrier[key]] 48 | 49 | def keys(self, carrier: CarrierT) -> typing.List[str]: 50 | """Function that can retrieve all the keys in a carrier object. 51 | Args: 52 | carrier: An object which contains values that are 53 | used to construct a Context. 54 | Returns: 55 | list of keys from the carrier. 56 | """ 57 | return list(carrier.keys()) 58 | 59 | 60 | getter = Getter() 61 | 62 | 63 | def parse_context(string_context: str) -> CarrierT: 64 | 65 | result = {} 66 | 67 | for elements in string_context.split(";"): 68 | splits = elements.split(":") 69 | if len(splits) == 2: 70 | result[splits[0]] = splits[1] 71 | elif len(splits) == 1: 72 | result[splits[0]] = "" 73 | 74 | return result 75 | 76 | 77 | def extract_context(dora_input): 78 | string_context = dora_input["open_telemetry_context"] 79 | 80 | carrier = parse_context(string_context) 81 | 82 | context = propagator.extract(carrier=carrier, getter=getter) 83 | 84 | return context 85 | 86 | def serialize_context(context: dict) -> str: 87 | output = "" 88 | for key, value in context.items(): 89 | output += f"{key}:{value};" 90 | return output -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | bin/ 5 | wheels 6 | hub 7 | # These are backup files generated by rustfmt 8 | **/*.rs.bk 9 | 10 | # Removing images. 11 | *.jpg 12 | # *.png 13 | *.csv 14 | 15 | *.pt 16 | *.avi 17 | 18 | # Byte-compiled / optimized / DLL files 19 | __pycache__/ 20 | *.py[cod] 21 | *$py.class 22 | 23 | # C extensions 24 | *.so 25 | 26 | # Distribution / packaging 27 | .Python 28 | build/ 29 | develop-eggs/ 30 | dist/ 31 | downloads/ 32 | eggs/ 33 | .eggs/ 34 | lib/ 35 | lib64/ 36 | parts/ 37 | sdist/ 38 | var/ 39 | wheels/ 40 | share/python-wheels/ 41 | *.egg-info/ 42 | .installed.cfg 43 | *.egg 44 | MANIFEST 45 | 46 | # PyInstaller 47 | # Usually these files are written by a python script from a template 48 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 49 | *.manifest 50 | *.spec 51 | 52 | # Installer logs 53 | pip-log.txt 54 | pip-delete-this-directory.txt 55 | 56 | # Unit test / coverage reports 57 | htmlcov/ 58 | .tox/ 59 | .nox/ 60 | .coverage 61 | .coverage.* 62 | .cache 63 | nosetests.xml 64 | coverage.xml 65 | *.cover 66 | *.py,cover 67 | .hypothesis/ 68 | .pytest_cache/ 69 | cover/ 70 | 71 | # Translations 72 | *.mo 73 | *.pot 74 | 75 | # Django stuff: 76 | *.log 77 | local_settings.py 78 | db.sqlite3 79 | db.sqlite3-journal 80 | 81 | # Flask stuff: 82 | instance/ 83 | .webassets-cache 84 | 85 | # Scrapy stuff: 86 | .scrapy 87 | 88 | # Sphinx documentation 89 | docs/_build/ 90 | 91 | # PyBuilder 92 | .pybuilder/ 93 | target/ 94 | 95 | # Jupyter Notebook 96 | .ipynb_checkpoints 97 | 98 | # IPython 99 | profile_default/ 100 | ipython_config.py 101 | 102 | # pyenv 103 | # For a library or package, you might want to ignore these files since the code is 104 | # intended to run in multiple environments; otherwise, check them in: 105 | # .python-version 106 | 107 | # pipenv 108 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 109 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 110 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 111 | # install all needed dependencies. 112 | #Pipfile.lock 113 | 114 | # poetry 115 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 116 | # This is especially recommended for binary packages to ensure reproducibility, and is more 117 | # commonly ignored for libraries. 118 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 119 | #poetry.lock 120 | 121 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 122 | __pypackages__/ 123 | 124 | # Celery stuff 125 | celerybeat-schedule 126 | celerybeat.pid 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # pytype static type analyzer 159 | .pytype/ 160 | 161 | # Cython debug symbols 162 | cython_debug/ 163 | -------------------------------------------------------------------------------- /operators/dora_tracing.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import os 3 | import typing 4 | 5 | from opentelemetry import trace 6 | from opentelemetry.exporter.jaeger.thrift import JaegerExporter 7 | from opentelemetry.sdk.resources import SERVICE_NAME, Resource 8 | from opentelemetry.sdk.trace import TracerProvider 9 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 10 | from opentelemetry.trace.propagation.tracecontext import ( 11 | TraceContextTextMapPropagator, 12 | ) 13 | 14 | CarrierT = typing.TypeVar("CarrierT") 15 | 16 | 17 | class Getter(abc.ABC): 18 | """This class implements a Getter that enables extracting propagated 19 | fields from a carrier. 20 | """ 21 | 22 | def get( 23 | self, carrier: CarrierT, key: str 24 | ) -> typing.Optional[typing.List[str]]: 25 | """Function that can retrieve zero 26 | or more values from the carrier. In the case that 27 | the value does not exist, returns None. 28 | Args: 29 | carrier: An object which contains values that are used to 30 | construct a Context. 31 | key: key of a field in carrier. 32 | Returns: first value of the propagation key or None if the key doesn't 33 | exist. 34 | """ 35 | if key in carrier.keys(): 36 | return [carrier[key]] 37 | 38 | def keys(self, carrier: CarrierT) -> typing.List[str]: 39 | """Function that can retrieve all the keys in a carrier object. 40 | Args: 41 | carrier: An object which contains values that are 42 | used to construct a Context. 43 | Returns: 44 | list of keys from the carrier. 45 | """ 46 | return list(carrier.keys()) 47 | 48 | 49 | getter = Getter() 50 | 51 | 52 | def parse_context(string_context: str) -> CarrierT: 53 | 54 | result = {} 55 | 56 | for elements in string_context.split(";"): 57 | splits = elements.split(":") 58 | if len(splits) == 2: 59 | result[splits[0]] = splits[1] 60 | elif len(splits) == 1: 61 | result[splits[0]] = "" 62 | 63 | return result 64 | 65 | 66 | def extract_context(dora_input): 67 | string_context = dora_input["open_telemetry_context"] 68 | 69 | carrier = parse_context(string_context) 70 | 71 | propagator = TraceContextTextMapPropagator() 72 | context = propagator.extract(carrier=carrier, getter=getter) 73 | 74 | return context 75 | 76 | 77 | def a_decorator_passing_arbitrary_arguments(function_to_decorate): 78 | propagator = TraceContextTextMapPropagator() 79 | 80 | trace.set_tracer_provider( 81 | TracerProvider( 82 | resource=Resource.create({SERVICE_NAME: os.getenv("DORA_NODE_ID")}) 83 | ) 84 | ) 85 | tracer = trace.get_tracer(__name__) 86 | jaeger_exporter = JaegerExporter( 87 | agent_host_name="172.17.0.1", 88 | agent_port=6831, 89 | ) 90 | span_processor = BatchSpanProcessor(jaeger_exporter) 91 | trace.get_tracer_provider().add_span_processor(span_processor) 92 | 93 | def a_wrapper_accepting_arbitrary_arguments(*args, **kwargs): 94 | 95 | carrier = parse_context(kwargs["metadata"]["open_telemetry_context"]) 96 | context = propagator.extract(carrier=carrier) 97 | with tracer.start_as_current_span( 98 | name=kwargs["id"], context=context 99 | ) as child_span: 100 | function_to_decorate(*args, **kwargs) 101 | 102 | return a_wrapper_accepting_arbitrary_arguments 103 | -------------------------------------------------------------------------------- /ros/mavros_node_tracing.py: -------------------------------------------------------------------------------- 1 | #!/home/nvidia/Documents/dora-rover/venv/bin/python3 2 | # license removed for brevity 3 | import rospy 4 | from mavros_msgs.msg import PositionTarget, OverrideRCIn 5 | from geometry_msgs.msg import TwistStamped 6 | from dora import Node 7 | import numpy as np 8 | import time 9 | 10 | import typing 11 | from opentelemetry import trace 12 | from opentelemetry.exporter.jaeger.thrift import JaegerExporter 13 | from opentelemetry.sdk.resources import SERVICE_NAME, Resource 14 | from opentelemetry.sdk.trace import TracerProvider 15 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 16 | from opentelemetry.trace.propagation.tracecontext import ( 17 | TraceContextTextMapPropagator, 18 | ) 19 | 20 | 21 | def parse_context(string_context: str): 22 | result = {} 23 | for elements in string_context.split(";"): 24 | splits = elements.split(":") 25 | if len(splits) == 2: 26 | result[splits[0]] = splits[1] 27 | elif len(splits) == 1: 28 | result[splits[0]] = "" 29 | return result 30 | 31 | 32 | CarrierT = typing.TypeVar("CarrierT") 33 | propagator = TraceContextTextMapPropagator() 34 | 35 | trace.set_tracer_provider( 36 | TracerProvider(resource=Resource.create({SERVICE_NAME: "mavros"})) 37 | ) 38 | tracer = trace.get_tracer(__name__) 39 | jaeger_exporter = JaegerExporter( 40 | agent_host_name="172.17.0.1", 41 | agent_port=6831, 42 | ) 43 | span_processor = BatchSpanProcessor(jaeger_exporter) 44 | trace.get_tracer_provider().add_span_processor(span_processor) 45 | 46 | 47 | node = Node() 48 | 49 | TARGET_SPEED = 1600 50 | 51 | 52 | def talker(): 53 | pub = rospy.Publisher("mavros/rc/override", OverrideRCIn, queue_size=1) 54 | rospy.init_node("talker", anonymous=True) 55 | for input_id, value, metadata in node: 56 | carrier = parse_context(metadata["open_telemetry_context"]) 57 | context = propagator.extract(carrier=carrier) 58 | with tracer.start_as_current_span( 59 | name="control", context=context 60 | ) as child_span: 61 | 62 | [angle] = np.frombuffer(value) 63 | target = OverrideRCIn() 64 | if angle < np.pi / 2 and angle > -np.pi / 2: 65 | target_rotation = ( 66 | int((angle + np.pi / 2) / (np.pi) * 1000) + 1000 67 | ) 68 | target.channels[0] = target_rotation 69 | target.channels[1] = TARGET_SPEED 70 | elif angle < -np.pi / 2: 71 | target_rotation = 1000 72 | target.channels[0] = target_rotation 73 | target.channels[1] = TARGET_SPEED 74 | else: 75 | target.channels[0] = 2000 76 | target.channels[1] = TARGET_SPEED 77 | 78 | # target.channels[2] = 100 79 | # target = PositionTarget() 80 | # target.coordinate_frame = 9 81 | # target.header.stamp = rospy.get_rostime() 82 | # target.type_mask = int("110111111100",2) 83 | # target.position.x = 0.9 84 | # target.position.y = -0.9 85 | # target.velocity.x = 0.1 86 | # target.velocity.y = -0.1 87 | # target.yaw = yaw 88 | pub.publish(target) 89 | print("stopping") 90 | target = OverrideRCIn() 91 | target.channels[0] = 2000 92 | target.channels[1] = 1550 93 | 94 | pub.publish(target) 95 | print("stopped") 96 | 97 | # rate.sleep() 98 | 99 | 100 | if __name__ == "__main__": 101 | try: 102 | talker() 103 | except rospy.ROSInterruptException: 104 | pass 105 | -------------------------------------------------------------------------------- /ros/ndt_pose_tracing.py: -------------------------------------------------------------------------------- 1 | #!/home/nvidia/Documents/dora-rover/venv/bin/python3 2 | 3 | import sensor_msgs.point_cloud2 as pc2 4 | import rospy 5 | from sensor_msgs.msg import PointCloud2, Imu 6 | from geometry_msgs.msg import PoseStamped 7 | import numpy as np 8 | from dora import Node 9 | import time 10 | from scipy.spatial.transform import Rotation as R 11 | 12 | 13 | import typing 14 | from opentelemetry import trace 15 | from opentelemetry.exporter.jaeger.thrift import JaegerExporter 16 | from opentelemetry.sdk.resources import SERVICE_NAME, Resource 17 | from opentelemetry.sdk.trace import TracerProvider 18 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 19 | from opentelemetry.trace.propagation.tracecontext import ( 20 | TraceContextTextMapPropagator, 21 | ) 22 | 23 | 24 | def serialize_context(context: dict) -> str: 25 | output = "" 26 | for key, value in context.items(): 27 | output += f"{key}:{value};" 28 | return output 29 | 30 | 31 | CarrierT = typing.TypeVar("CarrierT") 32 | propagator = TraceContextTextMapPropagator() 33 | 34 | trace.set_tracer_provider( 35 | TracerProvider(resource=Resource.create({SERVICE_NAME: "ndt_pose"})) 36 | ) 37 | tracer = trace.get_tracer(__name__) 38 | jaeger_exporter = JaegerExporter( 39 | agent_host_name="172.17.0.1", 40 | agent_port=6831, 41 | ) 42 | span_processor = BatchSpanProcessor(jaeger_exporter) 43 | trace.get_tracer_provider().add_span_processor(span_processor) 44 | 45 | 46 | node = Node() 47 | initial_orientation = None 48 | orientation = None 49 | 50 | GOAL_LOCATION = np.array([[0, 0, 0], [3, 0, 0], [3, -8, 0]]) 51 | 52 | 53 | def imu_callback(data): 54 | 55 | global initial_orientation 56 | global orientation 57 | 58 | if initial_orientation is None: 59 | initial_orientation = R.from_quat( 60 | [ 61 | data.orientation.x, 62 | data.orientation.y, 63 | data.orientation.z, 64 | data.orientation.w, 65 | ] 66 | ) 67 | print( 68 | f"INITIAL ORIENTATION: {initial_orientation.as_euler('xyz', degrees=True)}" 69 | ) 70 | 71 | abs_goal_location = initial_orientation.apply(GOAL_LOCATION) 72 | abs_goal_location = abs_goal_location[:, :2] 73 | node.send_output("gps_waypoints", abs_goal_location.tobytes()) 74 | 75 | orientation = [ 76 | data.orientation.x, 77 | data.orientation.y, 78 | data.orientation.z, 79 | data.orientation.w, 80 | ] 81 | 82 | # node.send_output("imu", orientation.tobytes()) 83 | 84 | 85 | start = time.time() 86 | 87 | 88 | def pose_callback(data): 89 | global start 90 | global initial_orientation 91 | global orientation 92 | position = np.array( 93 | [ 94 | data.pose.position.x, 95 | data.pose.position.y, 96 | data.pose.position.z, 97 | ] 98 | ) 99 | if initial_orientation is None: 100 | return None 101 | 102 | position = initial_orientation.apply(position) 103 | position = np.concatenate([position, orientation]) 104 | 105 | if time.time() - start > 1: 106 | with tracer.start_as_current_span("start") as _span: 107 | output = {} 108 | propagator.inject(output) 109 | metadata = {"open_telemetry_context": serialize_context(output)} 110 | node.send_output("position", position.tobytes(), metadata) 111 | start = time.time() 112 | 113 | 114 | rospy.init_node("listener", anonymous=True) 115 | rospy.Subscriber("mavros/imu/data", Imu, imu_callback) 116 | rospy.Subscriber("current_pose", PoseStamped, pose_callback) 117 | 118 | rospy.spin() 119 | -------------------------------------------------------------------------------- /operators/strong_sort_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Strong Sort operator 3 | 4 | `Strong sort` uses deep learning to uniquely identify bounding boxes in order to track them trough an image stream. 5 | 6 | ## Inputs 7 | 8 | - image: HEIGHTxWIDTHxBGR array. 9 | - bbox: N_BBOX, X_MIN, X_MAX, Y_MIN, Y_MAX, CONDIDENCE, CLASS, array 10 | 11 | ## Outputs 12 | 13 | - obstacles_id: x1, x2, y1, y2 track_id, class_id, conf 14 | 15 | ## Example plot (Tracking correspond to the blue # id ) 16 | 17 | ![img](https://i.imgur.com/ozO1y7l.gif) 18 | 19 | ## Graph Description 20 | 21 | ```yaml 22 | - id: yolov5 23 | operator: 24 | outputs: 25 | - obstacles_id 26 | inputs: 27 | image: webcam/image 28 | bbox: yolov5/bbox 29 | python: ../../operators/strong_sort_op.py 30 | ``` 31 | 32 | ## Graph Visualisation 33 | 34 | ```mermaid 35 | flowchart TB 36 | oasis_agent -- image --> strong_sort/op 37 | yolov5/op -- bbox as obstacles_bbox --> strong_sort/op 38 | ``` 39 | """ 40 | from typing import Callable 41 | 42 | import numpy as np 43 | import pyarrow as pa 44 | import torch 45 | from dora import DoraStatus 46 | from strong_sort import StrongSORT 47 | 48 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 49 | 50 | IMAGE_WIDTH = 1920 51 | IMAGE_HEIGHT = 1080 52 | 53 | 54 | def xxyy2xywh(x): 55 | # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right 56 | y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) 57 | y[:, 0] = (x[:, 0] + x[:, 1]) / 2 # x center 58 | y[:, 1] = (x[:, 2] + x[:, 3]) / 2 # y center 59 | y[:, 2] = x[:, 1] - x[:, 0] # width 60 | y[:, 3] = x[:, 3] - x[:, 2] # height 61 | return y 62 | 63 | 64 | class Operator: 65 | """ 66 | Infering object from images 67 | """ 68 | 69 | def __init__(self): 70 | model = StrongSORT( 71 | "osnet_x0_25_msmt17.pt", 72 | torch.device("cuda"), 73 | False, 74 | ) 75 | model.model.warmup() 76 | self.model = model 77 | self.frame = [] 78 | 79 | def on_event( 80 | self, 81 | dora_event: dict, 82 | send_output: Callable[[str, bytes], None], 83 | ) -> DoraStatus: 84 | if dora_event["type"] == "INPUT": 85 | return self.on_input(dora_event, send_output) 86 | return DoraStatus.CONTINUE 87 | 88 | def on_input( 89 | self, 90 | dora_input: dict, 91 | send_output: Callable[[str, bytes], None], 92 | ) -> DoraStatus: 93 | if dora_input["id"] == "image": 94 | frame = np.array( 95 | dora_input["value"], 96 | np.uint8, 97 | ).reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 98 | 99 | self.frame = frame[:, :, :3] 100 | 101 | elif dora_input["id"] == "obstacles_bbox" and len(self.frame) != 0: 102 | obstacles = np.array(dora_input["value"]).reshape((-1, 6)) 103 | if obstacles.shape[0] == 0: 104 | # self.model.increment_ages() 105 | send_output( 106 | "obstacles_id", 107 | pa.array(np.array([]).ravel()), 108 | dora_input["metadata"], 109 | ) 110 | return DoraStatus.CONTINUE 111 | 112 | # Post Processing yolov5 113 | xywhs = xxyy2xywh(obstacles[:, 0:4]) 114 | confs = obstacles[:, 4] 115 | clss = obstacles[:, 5] 116 | with torch.no_grad(): 117 | outputs = np.array( 118 | self.model.update(xywhs, confs, clss, self.frame) 119 | ).astype("int32") 120 | if len(outputs) != 0: 121 | outputs = outputs[ 122 | :, [0, 2, 1, 3, 4, 5, 6] 123 | ] # xyxy -> x1, x2, y1, y2 track_id, class_id, conf 124 | 125 | send_output( 126 | "obstacles_id", 127 | pa.array(outputs.ravel()), 128 | dora_input["metadata"], 129 | ) 130 | 131 | return DoraStatus.CONTINUE 132 | -------------------------------------------------------------------------------- /operators/yolov5_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Yolov5 operator 3 | 4 | `Yolov5` object detection operator generates bounding boxes on images where it detects object. 5 | 6 | More info here: [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) 7 | 8 | `Yolov5` has not been finetuned on the simulation and is directly importing weight from Pytorch Hub. 9 | 10 | In case you want to run `yolov5` without internet you can clone [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) and download the weights you want to use from [the release page](https://github.com/ultralytics/yolov5/releases/tag/v7.0) and then specify within the yaml graph the two environments variables: 11 | - `YOLOV5_PATH: YOUR/PATH` 12 | - `YOLOV5_WEIGHT_PATH: YOUR/WEIGHT/PATH` 13 | 14 | You can also choose to allocate the model in GPU using the environment variable: 15 | - `PYTORCH_DEVICE: cuda # or cpu` 16 | 17 | ## Inputs 18 | 19 | - image: HEIGHT x WIDTH x BGR array. 20 | 21 | ## Outputs 22 | 23 | - bbox: N_BBOX, X_MIN, X_MAX, Y_MIN, Y_MAX, CONDIDENCE, CLASS, array 24 | 25 | ## Example Image 26 | 27 | ![yolov5 example](https://i.imgur.com/hPrazyl.jpg) 28 | 29 | ## Graph Description 30 | 31 | ```yaml 32 | - id: yolov5 33 | operator: 34 | outputs: 35 | - bbox 36 | inputs: 37 | image: webcam/image 38 | python: ../../operators/yolov5_op.py 39 | ``` 40 | 41 | ## Graph Visualisation 42 | 43 | ```mermaid 44 | flowchart TB 45 | oasis_agent 46 | subgraph yolov5 47 | yolov5/op[op] 48 | end 49 | subgraph obstacle_location_op 50 | obstacle_location_op/op[op] 51 | end 52 | oasis_agent -- image --> yolov5/op 53 | yolov5/op -- bbox as obstacles_bbox --> obstacle_location_op/op 54 | ``` 55 | """ 56 | 57 | 58 | import os 59 | from typing import Callable 60 | 61 | import numpy as np 62 | import pyarrow as pa 63 | import torch 64 | from dora import DoraStatus 65 | 66 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 67 | IMAGE_WIDTH = 1920 68 | IMAGE_HEIGHT = 1080 69 | DEVICE = os.environ.get("PYTORCH_DEVICE") or "cpu" 70 | YOLOV5_PATH = os.environ.get("YOLOV5_PATH") 71 | YOLOV5_WEIGHT_PATH = os.environ.get("YOLOV5_WEIGHT_PATH") 72 | 73 | 74 | class Operator: 75 | """ 76 | Send `bbox` found by YOLOv5 on given `image` 77 | """ 78 | 79 | def __init__(self): 80 | if YOLOV5_PATH is None: 81 | # With internet 82 | self.model = torch.hub.load( 83 | "ultralytics/yolov5", 84 | "yolov5n", 85 | ) 86 | else: 87 | # Without internet 88 | # 89 | # To install: 90 | # cd $DORA_HOME_DEP/dependecies # Optional 91 | # git clone https://github.com/ultralytics/yolov5.git 92 | # rm yolov5/.git -rf 93 | # Add YOLOV5_PATH and YOLOV5_WEIGHT_PATH in your YAML graph 94 | 95 | self.model = torch.hub.load( 96 | YOLOV5_PATH, 97 | "custom", 98 | path=YOLOV5_WEIGHT_PATH, 99 | source="local", 100 | ) 101 | 102 | self.model.to(torch.device(DEVICE)) 103 | self.model.eval() 104 | 105 | def on_event( 106 | self, 107 | dora_event: dict, 108 | send_output: Callable[[str, bytes], None], 109 | ) -> DoraStatus: 110 | if dora_event["type"] == "INPUT": 111 | return self.on_input(dora_event, send_output) 112 | return DoraStatus.CONTINUE 113 | 114 | def on_input( 115 | self, 116 | dora_input: dict, 117 | send_output: Callable[[str, bytes], None], 118 | ) -> DoraStatus: 119 | """ 120 | Handle image 121 | Args: 122 | dora_input["id"] (str): Id of the input declared in the yaml configuration 123 | dora_input["value"] (arrow.array (UInt8)): Bytes message of the input 124 | send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. 125 | """ 126 | if dora_input["id"] == "image": 127 | frame = ( 128 | dora_input["value"].to_numpy().reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 129 | ) 130 | frame = frame[:, :, :3] 131 | 132 | results = self.model(frame) # includes NMS 133 | arrays = np.array(results.xyxy[0].cpu())[ 134 | :, [0, 2, 1, 3, 4, 5] 135 | ] # xyxy -> xxyy 136 | arrays[:, 4] *= 100 137 | arrays = arrays.astype(np.int32) 138 | arrays = pa.array(arrays.ravel()) 139 | send_output("bbox", arrays, dora_input["metadata"]) 140 | return DoraStatus.CONTINUE 141 | -------------------------------------------------------------------------------- /carla/carla_gps_op.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import numpy as np 4 | import pyarrow as pa 5 | from _dora_utils import closest_vertex 6 | from _hd_map import HDMap 7 | from dora import DoraStatus 8 | from scipy.spatial.transform import Rotation as R 9 | 10 | pa.array([]) 11 | from carla import Map 12 | 13 | # Planning general 14 | NUM_WAYPOINTS_AHEAD = 120 15 | GOAL_LOCATION = [234, 59, 39] 16 | 17 | 18 | def filter_consecutive_duplicate(x): 19 | return np.array([elem for i, elem in enumerate(x) if (elem - x[i - 1]).any()]) 20 | 21 | 22 | class Operator: 23 | """ 24 | Compute GPS waypoints, given a `position`, an `objective waypoints` and an `opendrive` map. 25 | """ 26 | 27 | def __init__(self): 28 | self._goal_location = GOAL_LOCATION 29 | self.hd_map = None 30 | self.position = [] 31 | self.waypoints = np.array([]) 32 | self.target_speeds = np.array([]) 33 | self.objective_waypoints = [] 34 | self.waypoints_array = np.array([]) 35 | 36 | def on_event( 37 | self, 38 | dora_event: dict, 39 | send_output: Callable[[str, bytes], None], 40 | ) -> DoraStatus: 41 | if dora_event["type"] == "INPUT": 42 | return self.on_input(dora_event, send_output) 43 | return DoraStatus.CONTINUE 44 | 45 | def on_input( 46 | self, 47 | dora_input: dict, 48 | send_output: Callable[[str, bytes], None], 49 | ): 50 | if "position" == dora_input["id"]: 51 | self.position = np.array(dora_input["value"]) 52 | return DoraStatus.CONTINUE 53 | 54 | elif "opendrive" == dora_input["id"]: 55 | # Registering the map 56 | opendrive = dora_input["value"][0].as_py() 57 | self.hd_map = HDMap(Map("map", opendrive)) 58 | return DoraStatus.CONTINUE 59 | 60 | elif "objective_waypoints" == dora_input["id"]: 61 | self.objective_waypoints = np.array(dora_input["value"]).reshape((-1, 3)) 62 | 63 | if self.hd_map is None or len(self.position) == 0: 64 | print("No map within the gps or position") 65 | return DoraStatus.CONTINUE 66 | 67 | (index, _closest_objective) = closest_vertex( 68 | self.objective_waypoints[:, :2], 69 | np.array([self.position[:2]]), 70 | ) 71 | 72 | self.objective_waypoints = self.objective_waypoints[ 73 | index : index + NUM_WAYPOINTS_AHEAD 74 | ] 75 | self._goal_location = self.objective_waypoints[0] 76 | 77 | # Used cached waypoints but start at closest point. 78 | if len(self.waypoints) != 0: 79 | (index, _) = closest_vertex( 80 | self.waypoints, 81 | np.array([self.position[:2]]), 82 | ) 83 | 84 | self.waypoints = self.waypoints[index : index + NUM_WAYPOINTS_AHEAD] 85 | self.target_speeds = self.target_speeds[ 86 | index : index + NUM_WAYPOINTS_AHEAD 87 | ] 88 | 89 | elif len(self.waypoints) == 0: 90 | # Deconstructing the position 91 | [x, y, z, rx, ry, rz, rw] = self.position 92 | [pitch, roll, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler( 93 | "xyz", degrees=False 94 | ) 95 | 96 | # Compute the waypoints 97 | waypoints = self.hd_map.compute_waypoints( 98 | [ 99 | x, 100 | y, 101 | self._goal_location[2], 102 | ], 103 | self._goal_location, 104 | )[:NUM_WAYPOINTS_AHEAD] 105 | 106 | ## Verify that computed waypoints are not inverted 107 | target_vector = waypoints[0] - self.position[:2] 108 | angle = np.arctan2(target_vector[1], target_vector[0]) 109 | diff_angle = np.arctan2(np.sin(angle - yaw), np.cos(angle - yaw)) 110 | # if np.abs(diff_angle) > np.pi / 2: 111 | # print("Error in computation of waypoints.") 112 | # print( 113 | # "The next target waypoint requires to make a 180 degrees turn." 114 | # ) 115 | # print(f"target waypoint: {waypoints[0]}") 116 | # print(f"position: {[x, y, z]}") 117 | # print(f"goal location: {self._goal_location}") 118 | # else: 119 | self.waypoints = waypoints 120 | self.target_speeds = np.array([5.0] * len(waypoints)) 121 | 122 | if len(self.waypoints) == 0: 123 | send_output( 124 | "gps_waypoints", 125 | pa.array(self.waypoints.ravel()), 126 | dora_input["metadata"], 127 | ) # World coordinate 128 | return DoraStatus.CONTINUE 129 | 130 | self.waypoints_array = np.concatenate( 131 | [ 132 | self.waypoints.T, 133 | self.target_speeds.reshape(1, -1), 134 | ] 135 | ).T.astype(np.float32) 136 | 137 | send_output( 138 | "gps_waypoints", 139 | pa.array(filter_consecutive_duplicate(self.waypoints_array).ravel()), 140 | dora_input["metadata"], 141 | ) # World coordinate 142 | 143 | return DoraStatus.CONTINUE 144 | -------------------------------------------------------------------------------- /operators/midas_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # MiDaS 3 | 4 | MiDaS models for computing relative depth from a single image. 5 | 6 | > MiDaS computes relative inverse depth from a single image. The repository provides multiple models that cover different use cases ranging from a small, high-speed model to a very large model that provide the highest accuracy. The models have been trained on 10 distinct datasets using multi-objective optimization to ensure high quality on a wide range of inputs. 7 | 8 | ### Installation: 9 | 10 | To install midas offline: 11 | 12 | ```bash 13 | cd $DORA_DEP_HOME/dependencies/ 14 | git clone git@github.com:isl-org/MiDaS.git 15 | cd MiDaS/weights 16 | # If you don't want to add manual download, the program will also automatically download the model file 17 | wget https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt 18 | cp midas_v21_small_256.pt $HOME/.cache/torch/hub/checkpoints/ 19 | ``` 20 | 21 | ## Inputs 22 | 23 | - image: HEIGHT x WIDTH x BGR array. 24 | 25 | ## Outputs 26 | 27 | - bbox: HEIGHT x WIDTH x Relative depth array. 28 | 29 | ## Example output 30 | 31 | ![Imgur](https://i.imgur.com/UrF9iPN.png) 32 | 33 | Add the following dataflow configuration 34 | ```yaml 35 | - id: midas_op 36 | operator: 37 | outputs: 38 | - depth_frame 39 | inputs: 40 | image: webcam/image 41 | python: ../../operators/midas_op.py 42 | env: 43 | PYTORCH_DEVICE: "cuda" 44 | MIDAS_PATH: $DORA_DEP_HOME/dependencies/MiDaS/ 45 | MIDAS_WEIGHT_PATH: $DORA_DEP_HOME/dependencies/MiDaS/weights/midas_v21_small_256.pt 46 | MODEL_TYPE: "MiDaS_small" 47 | MODEL_NAME: "MiDaS_small" 48 | ``` 49 | > - model_type = "DPT_Large" # MiDaS v3 - Large (highest accuracy, slowest inference speed) 50 | > - model_type = "DPT_Hybrid" # MiDaS v3 - Hybrid (medium accuracy, medium inference speed) 51 | > - model_type = "MiDaS_small" # MiDaS v2.1 - Small (lowest accuracy, highest inference speed) 52 | 53 | """ 54 | import os 55 | from typing import Callable 56 | 57 | import cv2 58 | import numpy as np 59 | import torch 60 | from dora import DoraStatus 61 | 62 | IMAGE_WIDTH = 1920 63 | IMAGE_HEIGHT = 1080 64 | DEVICE = os.environ.get("PYTORCH_DEVICE") or "cpu" 65 | MIDAS_PATH = os.environ.get("MIDAS_PATH") 66 | MIDAS_WEIGHT_PATH = os.environ.get("MIDAS_WEIGHT_PATH") 67 | MODEL_TYPE = os.environ.get("MODEL_TYPE") 68 | MODEL_NAME = os.environ.get("MODEL_NAME") 69 | 70 | 71 | # example source: https://pytorch.org/hub/intelisl_midas_v2/ 72 | class Operator: 73 | def __init__(self): 74 | if MIDAS_PATH is None: 75 | # With internet 76 | self.model = torch.hub.load( 77 | "intel-isl/MiDaS", 78 | MODEL_TYPE, 79 | ) 80 | midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") 81 | else: 82 | # Without internet 83 | self.model = torch.hub.load( 84 | repo_or_dir=MIDAS_PATH, 85 | model=MODEL_NAME, 86 | weights=MIDAS_WEIGHT_PATH, 87 | source="local", 88 | ) 89 | midas_transforms = torch.hub.load( 90 | repo_or_dir=MIDAS_PATH, model="transforms", source="local" 91 | ) 92 | if MODEL_TYPE == "DPT_Large" or MODEL_TYPE == "DPT_Hybrid": 93 | self.transform = midas_transforms.dpt_transform 94 | else: 95 | self.transform = midas_transforms.small_transform 96 | self.model.to(torch.device(DEVICE)) 97 | self.model.eval() 98 | 99 | def on_event( 100 | self, 101 | dora_event: dict, 102 | send_output: Callable[[str, bytes], None], 103 | ) -> DoraStatus: 104 | if dora_event["type"] == "INPUT": 105 | return self.on_input(dora_event, send_output) 106 | return DoraStatus.CONTINUE 107 | 108 | def on_input( 109 | self, 110 | dora_input: dict, 111 | send_output: Callable[[str, bytes], None], 112 | ) -> DoraStatus: 113 | """Handle image 114 | Args: 115 | dora_input["id"] (str): Id of the input declared in the yaml configuration 116 | dora_input["data"] (bytes): Bytes message of the input 117 | send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. 118 | """ 119 | if dora_input["id"] == "image": 120 | # Convert bytes to numpy array 121 | frame = np.frombuffer( 122 | dora_input["data"], 123 | np.uint8, 124 | ).reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 125 | 126 | with torch.no_grad(): 127 | image = frame[:, :, :3] 128 | img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 129 | input_batch = self.transform(img).to(DEVICE) 130 | prediction = self.model(input_batch) 131 | prediction = torch.nn.functional.interpolate( 132 | prediction.unsqueeze(1), 133 | size=img.shape[:2], 134 | mode="bicubic", 135 | align_corners=False, 136 | ).squeeze() 137 | depth_output = prediction.cpu().numpy() 138 | depth_min = depth_output.min() 139 | depth_max = depth_output.max() 140 | normalized_depth = ( 141 | 255 * (depth_output - depth_min) / (depth_max - depth_min) 142 | ) 143 | normalized_depth *= 3 144 | depth_frame = ( 145 | np.repeat(np.expand_dims(normalized_depth, 2), 3, axis=2) / 3 146 | ) 147 | depth_frame = cv2.applyColorMap( 148 | np.uint8(depth_frame), cv2.COLORMAP_INFERNO 149 | ) 150 | height, width = depth_frame.shape[:2] 151 | depth_frame_4 = np.dstack( 152 | [depth_frame, np.ones((height, width), dtype="uint8") * 255] 153 | ) 154 | 155 | send_output( 156 | "depth_frame", 157 | depth_frame_4.tobytes(), 158 | dora_input["metadata"], 159 | ) 160 | return DoraStatus.CONTINUE 161 | -------------------------------------------------------------------------------- /carla/carla_source_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import logging 4 | import typing 5 | 6 | import numpy as np 7 | import pyarrow as pa 8 | 9 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 10 | from _generate_world import add_camera, add_lidar, spawn_actors, spawn_driving_vehicle 11 | from dora import Node 12 | from numpy import linalg as LA 13 | from opentelemetry import trace 14 | from opentelemetry.exporter.jaeger.thrift import JaegerExporter 15 | from opentelemetry.sdk.resources import SERVICE_NAME, Resource 16 | from opentelemetry.sdk.trace import TracerProvider 17 | from opentelemetry.sdk.trace.export import BatchSpanProcessor 18 | from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator 19 | from scipy.spatial.transform import Rotation as R 20 | 21 | from carla import Client, Location, Rotation, Transform, VehicleControl, command 22 | 23 | 24 | def radians_to_steer(rad: float, steer_gain: float): 25 | """Converts radians to steer input. 26 | 27 | Returns: 28 | :obj:`float`: Between [-1.0, 1.0]. 29 | """ 30 | steer = steer_gain * rad 31 | if steer > 0: 32 | steer = min(steer, 1) 33 | else: 34 | steer = max(steer, -1) 35 | return steer 36 | 37 | 38 | def euler_to_quaternion(yaw, pitch, roll): 39 | qx = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos( 40 | roll / 2 41 | ) * np.sin(pitch / 2) * np.sin(yaw / 2) 42 | qy = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin( 43 | roll / 2 44 | ) * np.cos(pitch / 2) * np.sin(yaw / 2) 45 | qz = np.cos(roll / 2) * np.cos(pitch / 2) * np.sin(yaw / 2) - np.sin( 46 | roll / 2 47 | ) * np.sin(pitch / 2) * np.cos(yaw / 2) 48 | qw = np.cos(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) + np.sin( 49 | roll / 2 50 | ) * np.sin(pitch / 2) * np.sin(yaw / 2) 51 | return [qx, qy, qz, qw] 52 | 53 | 54 | def serialize_context(context: dict) -> str: 55 | output = "" 56 | for key, value in context.items(): 57 | output += f"{key}:{value};" 58 | return output 59 | 60 | 61 | logger = logging.Logger("") 62 | CarrierT = typing.TypeVar("CarrierT") 63 | propagator = TraceContextTextMapPropagator() 64 | 65 | trace.set_tracer_provider( 66 | TracerProvider(resource=Resource.create({SERVICE_NAME: "carla_source_node"})) 67 | ) 68 | tracer = trace.get_tracer(__name__) 69 | jaeger_exporter = JaegerExporter( 70 | agent_host_name="172.17.0.1", 71 | agent_port=6831, 72 | ) 73 | span_processor = BatchSpanProcessor(jaeger_exporter) 74 | trace.get_tracer_provider().add_span_processor(span_processor) 75 | 76 | 77 | CARLA_SIMULATOR_HOST = "localhost" 78 | CARLA_SIMULATOR_PORT = "2000" 79 | LABELS = "image" 80 | IMAGE_WIDTH = 1920 81 | IMAGE_HEIGHT = 1080 82 | OBJECTIVE_WAYPOINTS = np.array([[234, 59, 39]], np.float32).ravel() 83 | STEER_GAIN = 2 84 | 85 | lidar_pc = None 86 | depth_frame = None 87 | camera_frame = None 88 | segmented_frame = None 89 | last_position = np.array([0.0, 0.0]) 90 | 91 | sensor_transform = Transform(Location(3, 0, 1), Rotation(pitch=0, yaw=0, roll=0)) 92 | 93 | 94 | def on_lidar_msg(frame): 95 | global lidar_pc 96 | frame = np.frombuffer(frame.raw_data, np.float32) 97 | point_cloud = np.reshape(frame, (-1, 4)) 98 | point_cloud = point_cloud[:, :3] 99 | 100 | lidar_pc = pa.array(np.ascontiguousarray(point_cloud).ravel()) 101 | 102 | 103 | def on_camera_msg(frame): 104 | # frame = np.frombuffer(frame.raw_data, np.uint8) 105 | # frame = np.reshape(frame, (IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 106 | 107 | global camera_frame 108 | camera_frame = pa.array(np.frombuffer(frame.raw_data, np.uint8)) 109 | 110 | 111 | client = Client(CARLA_SIMULATOR_HOST, int(CARLA_SIMULATOR_PORT)) 112 | client.set_timeout(30.0) # seconds 113 | world = client.get_world() 114 | 115 | (_, _, _) = spawn_actors( 116 | client, 117 | world, 118 | 8000, 119 | "0.9.13", 120 | -1, 121 | True, 122 | 0, 123 | 0, 124 | logger, 125 | ) 126 | 127 | 128 | ego_vehicle, vehicle_id = spawn_driving_vehicle(client, world) 129 | lidar = add_lidar(world, sensor_transform, on_lidar_msg, ego_vehicle) 130 | camera = add_camera(world, sensor_transform, on_camera_msg, ego_vehicle) 131 | 132 | node = Node() 133 | 134 | node.send_output("opendrive", pa.array([world.get_map().to_opendrive()])) 135 | 136 | 137 | def main(): 138 | global last_position 139 | if camera_frame is None: 140 | return {} 141 | 142 | vec_transform = ego_vehicle.get_transform() 143 | x = vec_transform.location.x 144 | y = vec_transform.location.y 145 | z = vec_transform.location.z 146 | yaw = vec_transform.rotation.yaw 147 | pitch = vec_transform.rotation.pitch 148 | roll = vec_transform.rotation.roll 149 | 150 | [[qx, qy, qz, qw]] = R.from_euler( 151 | "xyz", [[roll, pitch, yaw]], degrees=True 152 | ).as_quat() 153 | 154 | position = np.array([x, y, z, qx, qy, qz, qw], np.float32) 155 | # with tracer.start_as_current_span("source") as _span: 156 | output = {} 157 | propagator.inject(output) 158 | metadata = {"open_telemetry_context": serialize_context(output)} 159 | node.send_output("position", pa.array(position), metadata) 160 | node.send_output( 161 | "speed", 162 | pa.array(np.array([LA.norm(position[:2] - last_position[:2])], np.float32)), 163 | metadata, 164 | ) 165 | node.send_output("image", camera_frame, metadata) 166 | node.send_output( 167 | "objective_waypoints", 168 | pa.array(OBJECTIVE_WAYPOINTS), 169 | metadata, 170 | ) 171 | # node.send_output("depth_frame", depth_frame, metadata) 172 | # node.send_output("segmented_frame", segmented_frame, metadata) 173 | node.send_output("lidar_pc", lidar_pc, metadata) 174 | last_position = position 175 | 176 | 177 | for event in node: 178 | if event["type"] == "INPUT": 179 | if event["id"] == "control": 180 | [throttle, target_angle, brake] = np.array(event["value"]) 181 | 182 | steer = radians_to_steer(target_angle, STEER_GAIN) 183 | vec_control = VehicleControl( 184 | steer=float(steer), 185 | throttle=float(throttle), 186 | brake=float(brake), 187 | hand_brake=False, 188 | ) 189 | 190 | client.apply_batch([command.ApplyVehicleControl(vehicle_id, vec_control)]) 191 | else: 192 | main() 193 | -------------------------------------------------------------------------------- /operators/imfnet_op.py: -------------------------------------------------------------------------------- 1 | import math 2 | import time 3 | import zlib 4 | from typing import Callable 5 | 6 | import cv2 7 | import numpy as np 8 | import open3d as o3d 9 | import torch 10 | from dora import DoraStatus 11 | from imfnet import ( 12 | extract_features, 13 | get_model, 14 | make_open3d_feature_from_numpy, 15 | make_open3d_point_cloud, 16 | process_image, 17 | ) 18 | 19 | VOXEL_SIZE = 2.5 20 | IMAGE_WIDTH = 1920 21 | IMAGE_HEIGHT = 1080 22 | 23 | 24 | def run_ransac(xyz0, xyz1, feat0, feat1, voxel_size, ransac_n=4): 25 | distance_threshold = voxel_size * 1.5 26 | result_ransac = o3d.pipelines.registration.registration_ransac_based_on_feature_matching( 27 | source=xyz0, 28 | target=xyz1, 29 | source_feature=feat0, 30 | target_feature=feat1, 31 | max_correspondence_distance=distance_threshold, 32 | estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint( 33 | False 34 | ), 35 | ransac_n=ransac_n, 36 | checkers=[ 37 | o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength( 38 | 0.9 39 | ), 40 | o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance( 41 | distance_threshold 42 | ), 43 | ], 44 | criteria=o3d.pipelines.registration.RANSACConvergenceCriteria( 45 | 1_000_000, 0.999 46 | ), 47 | mutual_filter=False, 48 | ) 49 | return result_ransac 50 | 51 | 52 | class Operator: 53 | """ 54 | Infering object from images 55 | """ 56 | 57 | def __init__(self): 58 | self.model, self.config = get_model() 59 | self.frame = [] 60 | self.point_cloud = [] 61 | self.previous_pc_down = [] 62 | self.previous_features = [] 63 | 64 | def on_event( 65 | self, 66 | dora_event: dict, 67 | send_output: Callable[[str, bytes], None], 68 | ) -> DoraStatus: 69 | if dora_event["type"] == "INPUT": 70 | return self.on_input(dora_event, send_output) 71 | return DoraStatus.CONTINUE 72 | 73 | def on_input( 74 | self, 75 | dora_input: dict, 76 | send_output: Callable[[str, bytes], None], 77 | ) -> DoraStatus: 78 | """Handle image 79 | Args: 80 | dora_input["id"](str): Id of the input declared in the yaml configuration 81 | dora_input["data"] (bytes): Bytes message of the input 82 | send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. 83 | """ 84 | 85 | if dora_input["id"] == "lidar_pc": 86 | point_cloud = np.frombuffer( 87 | zlib.decompress(dora_input["data"]), np.dtype("f4") 88 | ) 89 | point_cloud = np.reshape( 90 | point_cloud, (int(point_cloud.shape[0] / 4), 4) 91 | ) 92 | 93 | # Default Unreal coordinate is: 94 | # +x is forward, +y to the right, and +z up 95 | # To velodyne coordinate first 96 | # +x into the screen, +y to the left, and +z up. 97 | # The latest coordinate space is the unreal space. 98 | 99 | point_cloud = np.dot( 100 | point_cloud, 101 | np.array( 102 | [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] 103 | ), 104 | ) 105 | 106 | point_cloud = point_cloud[:, :3] 107 | self.point_cloud = point_cloud 108 | 109 | if dora_input["id"] == "image": 110 | frame = np.frombuffer( 111 | dora_input["data"], 112 | np.uint8, 113 | ).reshape((IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 114 | # if len(self.frame) != 0: 115 | # cv2.imwrite("previous_image.jpg", self.frame) 116 | # cv2.imwrite("current_image.jpg", frame) 117 | 118 | frame = frame[:, :, :3] 119 | 120 | if ( 121 | frame.shape[0] != self.config.image_H 122 | or frame.shape[1] != self.config.image_W 123 | ): 124 | image = process_image( 125 | image=frame, 126 | aim_H=self.config.image_H, 127 | aim_W=self.config.image_W, 128 | ) 129 | 130 | image = np.transpose(image, axes=(2, 0, 1)) 131 | self.frame = np.expand_dims(image, axis=0) 132 | return DoraStatus.CONTINUE 133 | 134 | if len(self.frame) == 0: 135 | return DoraStatus.CONTINUE 136 | # Extract features 137 | current_pc_down, current_features = extract_features( 138 | self.model, 139 | xyz=self.point_cloud, 140 | rgb=None, 141 | normal=None, 142 | voxel_size=VOXEL_SIZE, 143 | device=torch.device("cuda"), 144 | skip_check=True, 145 | image=self.frame, 146 | ) 147 | 148 | current_pc_down = make_open3d_point_cloud(current_pc_down) 149 | current_features = current_features.cpu().detach().numpy() 150 | current_features = make_open3d_feature_from_numpy(current_features) 151 | 152 | if isinstance(self.previous_features, list): 153 | self.previous_pc_down = current_pc_down 154 | self.previous_features = current_features 155 | return DoraStatus.CONTINUE 156 | 157 | timestamp = time.time() 158 | print("running ransac") 159 | result = run_ransac( 160 | self.previous_pc_down, 161 | current_pc_down, 162 | self.previous_features, 163 | current_features, 164 | VOXEL_SIZE, 165 | ) 166 | T = result.transformation 167 | 168 | if result.fitness < 0.6: 169 | print(f"fitness: {result.fitness}") 170 | print("could not fit IMFnet") 171 | self.previous_pc_down = current_pc_down 172 | self.previous_features = current_features 173 | return DoraStatus.CONTINUE 174 | 175 | if result.fitness == 1.0: 176 | print("car did not move") 177 | self.previous_pc_down = current_pc_down 178 | self.previous_features = current_features 179 | return DoraStatus.CONTINUE 180 | 181 | print(f"elapsed: {time.time() - timestamp}") 182 | 183 | self.previous_pc_down.paint_uniform_color([1, 0.706, 0]) 184 | current_pc_down.paint_uniform_color([0, 0.651, 0.929]) 185 | o3d.visualization.draw_geometries( 186 | [self.previous_pc_down, current_pc_down] 187 | ) 188 | # o3d.io.write_point_cloud("previous_pc.pcd", self.previous_pc_down) 189 | # o3d.io.write_point_cloud("current_pc.pcd", current_pc_down) 190 | self.previous_pc_down.transform(T) 191 | o3d.visualization.draw_geometries( 192 | [self.previous_pc_down, current_pc_down] 193 | ) 194 | 195 | # Velodyne coordinate is 196 | # +x into the screen, +y to the left, and +z up. 197 | # Camera coordinate is 198 | # +x to right, +y to down, +z into the screen. 199 | pitch_r = math.atan2(T[2, 1], T[2, 2]) 200 | yaw_r = math.atan2(T[1, 0], T[0, 0]) 201 | roll_r = -math.atan2(-T[2, 0], math.sqrt(T[2, 1] ** 2 + T[2, 2] ** 2)) 202 | x, y, z = -T[0, 3], T[1, 3], T[2, 3] 203 | 204 | relative_position = np.array( 205 | [ 206 | x, 207 | y, 208 | z, 209 | math.degrees(pitch_r), 210 | math.degrees(yaw_r), 211 | math.degrees(roll_r), 212 | ], 213 | np.float32, 214 | ) 215 | print(f"Infered: {relative_position}") 216 | 217 | print(f"fitness: {result.fitness}") 218 | # send_output("relative_position", position.tobytes()) 219 | 220 | self.previous_pc_down = current_pc_down 221 | self.previous_features = current_features 222 | 223 | return DoraStatus.CONTINUE 224 | -------------------------------------------------------------------------------- /carla/_dora_utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | from sklearn.metrics import pairwise_distances 6 | 7 | 8 | def distance_vertex(left_vertix: np.array, right_vertix: np.array) -> np.array: 9 | return np.linalg.norm(left_vertix[:3] - right_vertix[:3]) 10 | 11 | 12 | def distance_points(left_point: np.array, right_point: np.array) -> np.array: 13 | return np.linalg.norm(left_point - right_point) 14 | 15 | 16 | def closest_vertex(vertices: np.array, point: np.array) -> Tuple[int, np.array]: 17 | assert ( 18 | vertices.shape[1] == point.shape[1] 19 | ), "vertice has more coordinate than point" 20 | argmin_vertice = pairwise_distances(vertices, point).argmin() 21 | 22 | min_vertice = vertices[argmin_vertice] 23 | 24 | return (argmin_vertice, min_vertice) 25 | 26 | 27 | def get_projection_matrix(position: np.array): 28 | """Creates a transformation matrix to convert points in the 3D world 29 | coordinate space with respect to the object. 30 | Use the transform_points function to transpose a given set of points 31 | with respect to the object. 32 | Args: 33 | position (np.array): [x, y, z, stir, yaw, roll] 34 | Returns: 35 | A 4x4 numpy matrix which represents the transformation matrix. 36 | """ 37 | matrix = np.identity(4) 38 | [x, y, z, pitch, yaw, roll] = position 39 | cy = np.cos(np.radians(yaw)) 40 | sy = np.sin(np.radians(yaw)) 41 | cr = np.cos(np.radians(roll)) 42 | sr = np.sin(np.radians(roll)) 43 | cp = np.cos(np.radians(pitch)) 44 | sp = np.sin(np.radians(pitch)) 45 | matrix[:3, 3] = [x, y, z] 46 | matrix[0, 0] = cp * cy 47 | matrix[0, 1] = cy * sp * sr - sy * cr 48 | matrix[0, 2] = -1 * (cy * sp * cr + sy * sr) 49 | matrix[1, 0] = sy * cp 50 | matrix[1, 1] = sy * sp * sr + cy * cr 51 | matrix[1, 2] = cy * sr - sy * sp * cr 52 | matrix[2, 0] = sp 53 | matrix[2, 1] = -1 * (cp * sr) 54 | matrix[2, 2] = cp * cr 55 | return matrix 56 | 57 | 58 | def to_world_coordinate(points: np.array, matrix: np.array) -> np.array: 59 | """Internal function to transform the points according to the 60 | given matrix. This function either converts the points from 61 | coordinate space relative to the transform to the world coordinate 62 | space (using self.matrix), or from world coordinate space to the 63 | space relative to the transform (using inv(self.matrix)) 64 | 65 | Args: 66 | points: An n by 3 numpy array, where each row is the 67 | (x, y, z) coordinates of a point. 68 | matrix: The matrix of the transformation to apply. 69 | 70 | Returns: 71 | An n by 3 numpy array of transformed points. 72 | """ 73 | # Needed format: [[X0,..Xn],[Y0,..Yn],[Z0,..Zn]]. 74 | # So let's transpose the point matrix. 75 | points = points.transpose() 76 | 77 | # Add 1s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[1,..1]] 78 | points = np.append(points, np.ones((1, points.shape[1])), axis=0) 79 | 80 | # Point transformation (depends on the given matrix) 81 | points = np.dot(matrix, points) 82 | 83 | # Get all but the last row in array form. 84 | points = np.asarray(points[0:3].transpose()).astype(np.float32) 85 | 86 | return points 87 | 88 | 89 | def get_extrinsic_matrix(transform): 90 | """Converts a Transform from the camera coordinate space to the 91 | Unreal coordinate space. 92 | The camera space is defined as: 93 | +x to right, +y to down, +z into the screen. 94 | The unreal coordinate space is defined as: 95 | +x into the screen, +y to right, +z to up. 96 | Args: 97 | transform (:py:class:`~pylot.utils.Transform`): The transform to 98 | convert to Unreal coordinate space. 99 | Returns: 100 | :py:class:`~pylot.utils.Transform`: The given transform after 101 | transforming to the Unreal coordinate space. 102 | """ 103 | 104 | to_unreal_transform = np.array( 105 | [[0, 0, 1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]] 106 | ) 107 | return np.dot(transform, to_unreal_transform) 108 | 109 | 110 | def get_intrinsic_matrix(width: int, height: int, fov: float): 111 | """Creates the intrinsic matrix for a camera with the given 112 | parameters. 113 | Args: 114 | width (int): The width of the image returned by the camera. 115 | height (int): The height of the image returned by the camera. 116 | fov (float): The field-of-view of the camera. 117 | Returns: 118 | :py:class:`numpy.ndarray`: A 3x3 intrinsic matrix of the camera. 119 | """ 120 | 121 | k = np.identity(3) 122 | # We use width - 1 and height - 1 to find the center column and row 123 | # of the image, because the images are indexed from 0. 124 | 125 | # Center column of the image. 126 | k[0, 2] = (width - 1) / 2.0 127 | # Center row of the image. 128 | k[1, 2] = (height - 1) / 2.0 129 | # Focal length. 130 | k[0, 0] = k[1, 1] = (width - 1) / (2.0 * np.tan(fov * np.pi / 360.0)) 131 | return k 132 | 133 | 134 | def get_angle(left, right) -> float: 135 | """Computes the angle between the vector and another vector 136 | in radians.""" 137 | [left_x, left_y] = left 138 | [right_x, right_y] = right 139 | 140 | angle = math.atan2(left_y, left_x) - math.atan2(right_y, right_x) 141 | if angle > math.pi: 142 | angle -= 2 * math.pi 143 | elif angle < -math.pi: 144 | angle += 2 * math.pi 145 | return angle 146 | 147 | 148 | def location_to_camera_view( 149 | location: np.array, intrinsic_matrix, extrinsic_matrix 150 | ): 151 | """Converts the given 3D vector to the view of the camera using 152 | the extrinsic and the intrinsic matrix. 153 | Args: 154 | location = [[x, y, z]] 155 | extrinsic_matrix: The extrinsic matrix of the camera. 156 | Returns: 157 | :py:class:`.Vector3D`: An instance with the coordinates converted 158 | to the camera view. 159 | """ 160 | position_vector = location.T 161 | position_vector = np.append(position_vector, [[1.0]]) 162 | 163 | # Transform the points to the camera in 3D. 164 | transformed_3D_pos = np.dot( 165 | np.linalg.inv(extrinsic_matrix), position_vector 166 | ) 167 | 168 | # Transform the points to 2D. 169 | position_2D = np.dot(intrinsic_matrix, transformed_3D_pos[:3]) 170 | 171 | # Normalize the 2D points. 172 | location_2D = np.array( 173 | [ 174 | float(position_2D[0] / position_2D[2]), 175 | float(position_2D[1] / position_2D[2]), 176 | float(position_2D[2]), 177 | ] 178 | ) 179 | return location_2D 180 | 181 | 182 | LABELS = [ 183 | "person", 184 | "bicycle", 185 | "car", 186 | "motorcycle", 187 | "airplane", 188 | "bus", 189 | "train", 190 | "truck", 191 | "boat", 192 | "traffic light", 193 | "fire hydrant", 194 | "stop sign", 195 | "parking meter", 196 | "bench", 197 | "bird", 198 | "cat", 199 | "dog", 200 | "horse", 201 | "sheep", 202 | "cow", 203 | "elephant", 204 | "bear", 205 | "zebra", 206 | "giraffe", 207 | "backpack", 208 | "umbrella", 209 | "handbag", 210 | "tie", 211 | "suitcase", 212 | "frisbee", 213 | "skis", 214 | "snowboard", 215 | "sports ball", 216 | "kite", 217 | "baseball bat", 218 | "baseball glove", 219 | "skateboard", 220 | "surfboard", 221 | "tennis racket", 222 | "bottle", 223 | "wine glass", 224 | "cup", 225 | "fork", 226 | "knife", 227 | "spoon", 228 | "bowl", 229 | "banana", 230 | "apple", 231 | "sandwich", 232 | "orange", 233 | "broccoli", 234 | "carrot", 235 | "hot dog", 236 | "pizza", 237 | "donut", 238 | "cake", 239 | "chair", 240 | "couch", 241 | "potted plant", 242 | "bed", 243 | "dining table", 244 | "toilet", 245 | "tv", 246 | "laptop", 247 | "mouse", 248 | "remote", 249 | "keyboard", 250 | "cell phone", 251 | "microwave", 252 | "oven", 253 | "toaster", 254 | "sink", 255 | "refrigerator", 256 | "book", 257 | "clock", 258 | "vase", 259 | "scissors", 260 | "teddy bear", 261 | "hair drier", 262 | "toothbrush", 263 | ] 264 | -------------------------------------------------------------------------------- /operators/hybrid_astar_op.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Callable 3 | 4 | import numpy as np 5 | from dora import DoraStatus 6 | from dora_utils import closest_vertex, pairwise_distances 7 | from hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper import ( 8 | apply_hybrid_astar, 9 | ) 10 | 11 | # Hybrid ASTAR 12 | STEP_SIZE_HYBRID_ASTAR = 3.0 13 | MAX_ITERATIONS_HYBRID_ASTAR = 2000 14 | COMPLETION_THRESHOLD = 1.0 15 | ANGLE_COMPLETION_THRESHOLD = 100 16 | RAD_STEP = 4.0 17 | RAD_UPPER_RANGE = 4.0 18 | RAD_LOWER_RANGE = 4.0 19 | OBSTACLE_CLEARANCE_HYBRID_ASTAR = 0.5 20 | LANE_WIDTH_HYBRID_ASTAR = 6.0 21 | RADIUS = 6.0 22 | CAR_LENGTH = 4.0 23 | CAR_WIDTH = 1.8 24 | 25 | OBSTACLE_DISTANCE_WAYPOINTS_THRESHOLD = 10 26 | OBSTACLE_RADIUS = 1.0 27 | 28 | # Planning general 29 | TARGET_SPEED = 10.0 30 | NUM_WAYPOINTS_AHEAD = 30 31 | GOAL_LOCATION = [234, 59, 39] 32 | 33 | 34 | def get_obstacle_list(obstacle_predictions, waypoints): 35 | if len(obstacle_predictions) == 0 or len(waypoints) == 0: 36 | return np.empty((0, 4)) 37 | obstacle_list = [] 38 | 39 | distances = pairwise_distances(waypoints, obstacle_predictions[:, :2]).min( 40 | 0 41 | ) 42 | for distance, prediction in zip(distances, obstacle_predictions): 43 | # Use all prediction times as potential obstacles. 44 | if distance < OBSTACLE_DISTANCE_WAYPOINTS_THRESHOLD: 45 | [x, y, _, _confidence, _label] = prediction 46 | obstacle_size = np.array( 47 | [ 48 | x - OBSTACLE_RADIUS, 49 | y - OBSTACLE_RADIUS, 50 | x + OBSTACLE_RADIUS, 51 | y + OBSTACLE_RADIUS, 52 | ] 53 | ) 54 | 55 | # Remove traffic light. TODO: Take into account traffic light. 56 | if _label != 9: 57 | obstacle_list.append(obstacle_size) 58 | 59 | if len(obstacle_list) == 0: 60 | return np.empty((0, 4)) 61 | return np.array(obstacle_list) 62 | 63 | 64 | class Operator: 65 | """ 66 | Compute a `control` based on the position and the waypoints of the car. 67 | """ 68 | 69 | def __init__(self): 70 | self.obstacles = [] 71 | self.position = [] 72 | self.gps_waypoints = [] 73 | self.waypoints = [] 74 | self.obstacle_metadata = {} 75 | self.gps_metadata = {} 76 | self.metadata = {} 77 | self._hyperparameters = { 78 | "step_size": STEP_SIZE_HYBRID_ASTAR, 79 | "max_iterations": MAX_ITERATIONS_HYBRID_ASTAR, 80 | "completion_threshold": COMPLETION_THRESHOLD, 81 | "angle_completion_threshold": ANGLE_COMPLETION_THRESHOLD, 82 | "rad_step": RAD_STEP, 83 | "rad_upper_range": RAD_UPPER_RANGE, 84 | "rad_lower_range": RAD_LOWER_RANGE, 85 | "obstacle_clearance": OBSTACLE_CLEARANCE_HYBRID_ASTAR, 86 | "lane_width": LANE_WIDTH_HYBRID_ASTAR, 87 | "radius": RADIUS, 88 | "car_length": CAR_LENGTH, 89 | "car_width": CAR_WIDTH, 90 | } 91 | 92 | def on_event( 93 | self, 94 | dora_event: dict, 95 | send_output: Callable[[str, bytes], None], 96 | ) -> DoraStatus: 97 | if dora_event["type"] == "INPUT": 98 | return self.on_input(dora_event, send_output) 99 | return DoraStatus.CONTINUE 100 | 101 | def on_input( 102 | self, 103 | dora_input: dict, 104 | send_output: Callable[[str, bytes], None], 105 | ): 106 | 107 | if dora_input["id"] == "gps_waypoints": 108 | waypoints = np.frombuffer(dora_input["data"]) 109 | waypoints = waypoints.reshape((-1, 3)) 110 | self.gps_waypoints = waypoints[0:2].T 111 | self.gps_metadata = dora_input["metadata"] 112 | if len(self.waypoints) == 0: 113 | self.waypoints = self.gps_waypoints 114 | 115 | elif dora_input["id"] == "position": 116 | self.position = np.frombuffer(dora_input["data"], np.float32) 117 | 118 | elif dora_input["id"] == "obstacles": 119 | obstacles = np.frombuffer(dora_input["data"], np.float32).reshape( 120 | (-1, 5) 121 | ) 122 | self.obstacles = obstacles 123 | self.obstacle_metadata = dora_input["metadata"] 124 | 125 | if len(self.gps_waypoints) != 0: 126 | (waypoints, target_speeds) = self.run( 127 | time.time() 128 | ) # , open_drive) 129 | self.waypoints = waypoints 130 | 131 | waypoints_array = np.concatenate( 132 | [waypoints.T, target_speeds.reshape(1, -1)] 133 | ).T 134 | 135 | send_output( 136 | "waypoints", waypoints_array.tobytes(), self.metadata 137 | ) 138 | return DoraStatus.CONTINUE 139 | 140 | def run(self, _ttd=None): 141 | """Runs the planner. 142 | 143 | Note: 144 | The planner assumes that the world is up-to-date. 145 | 146 | Returns: 147 | :py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the 148 | planned trajectory. 149 | """ 150 | 151 | # Remove already past waypoints for gps 152 | (index, _) = closest_vertex( 153 | self.gps_waypoints, 154 | np.array([self.position[:2]]), 155 | ) 156 | 157 | self.gps_waypoints = self.gps_waypoints[ 158 | index : index + NUM_WAYPOINTS_AHEAD 159 | ] 160 | 161 | # Check if the obstacles are still on gps waypoint trajectory 162 | obstacle_list = get_obstacle_list(self.obstacles, self.gps_waypoints) 163 | 164 | if len(obstacle_list) == 0: 165 | # Do not use Hybrid A* if there are no obstacles. 166 | speeds = np.array([TARGET_SPEED] * len(self.gps_waypoints)) 167 | self.metadata = self.obstacle_metadata 168 | return self.gps_waypoints, speeds 169 | 170 | # Remove already past waypoints 171 | (index, _) = closest_vertex( 172 | self.waypoints, 173 | np.array([self.position[:2]]), 174 | ) 175 | 176 | self.waypoints = self.waypoints[index : index + NUM_WAYPOINTS_AHEAD] 177 | 178 | # Check if obstacles are on solved waypoints trajectory 179 | obstacle_list = get_obstacle_list(self.obstacles, self.waypoints) 180 | self.metadata = self.obstacle_metadata 181 | 182 | if len(obstacle_list) == 0: 183 | # Do not use Hybrid A* if there are no obstacles. 184 | speeds = np.array([TARGET_SPEED] * len(self.waypoints)) 185 | return self.waypoints, speeds 186 | # Hybrid a* does not take into account the driveable region. 187 | # It constructs search space as a top down, minimum bounding 188 | # rectangle with padding in each dimension. 189 | 190 | initial_conditions = self._compute_initial_conditions(obstacle_list) 191 | 192 | path_x, path_y, _, success = apply_hybrid_astar( 193 | initial_conditions, self._hyperparameters 194 | ) 195 | 196 | if not success: 197 | print("could not find waypoints") 198 | speeds = np.array([0] * len(self.waypoints)) 199 | return self.waypoints, speeds 200 | else: 201 | output_wps = np.array([path_x, path_y]).T 202 | speeds = np.array([TARGET_SPEED] * len(path_x)) 203 | 204 | return output_wps, speeds 205 | 206 | def _compute_initial_conditions(self, obstacle_list): 207 | [x, y, _, _, yaw, _, _] = self.position 208 | start = np.array( 209 | [ 210 | x, 211 | y, 212 | np.deg2rad(yaw), 213 | ] 214 | ) 215 | end_index = min( 216 | NUM_WAYPOINTS_AHEAD, 217 | len(self.waypoints) - 1, 218 | ) 219 | 220 | if end_index < 0: 221 | # If no more waypoints left. Then our location is our end wp. 222 | end = np.array( 223 | [ 224 | x, 225 | y, 226 | np.deg2rad(yaw), 227 | ] 228 | ) 229 | 230 | else: 231 | [end_x, end_y] = self.waypoints[end_index] 232 | end = np.array( 233 | [ 234 | end_x, 235 | end_y, 236 | np.deg2rad(yaw), 237 | ] 238 | ) 239 | 240 | initial_conditions = { 241 | "start": start, 242 | "end": end, 243 | "obs": obstacle_list, 244 | } 245 | return initial_conditions 246 | -------------------------------------------------------------------------------- /operators/dora_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | from scipy.spatial.transform import Rotation as R 5 | from sklearn.metrics import pairwise_distances 6 | 7 | 8 | def distance_vertex(left_vertix: np.array, right_vertix: np.array) -> np.array: 9 | return np.linalg.norm(left_vertix[:2] - right_vertix[:2]) 10 | 11 | 12 | def distance_points(left_point: np.array, right_point: np.array) -> np.array: 13 | return np.linalg.norm(left_point - right_point) 14 | 15 | 16 | def closest_vertex(vertices: np.array, point: np.array) -> Tuple[int, np.array]: 17 | assert ( 18 | vertices.shape[1] == point.shape[1] 19 | ), "vertice has more coordinate than point" 20 | argmin_vertice = pairwise_distances(vertices, point).argmin() 21 | 22 | min_vertice = vertices[argmin_vertice] 23 | 24 | return (argmin_vertice, min_vertice) 25 | 26 | 27 | def get_projection_matrix(position: np.array): 28 | """Creates a transformation matrix to convert points in the 3D world 29 | coordinate space with respect to the object. 30 | Use the transform_points function to transpose a given set of points 31 | with respect to the object. 32 | Args: 33 | position (np.array): [x, y, z, stir, yaw, roll] 34 | Returns: 35 | A 4x4 numpy matrix which represents the transformation matrix. 36 | """ 37 | matrix = np.identity(4) 38 | [x, y, z, rx, ry, rz, rw] = position 39 | [roll, pitch, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler( 40 | "xyz", degrees=False 41 | ) 42 | 43 | cy = np.cos((yaw)) 44 | sy = np.sin((yaw)) 45 | cr = np.cos((roll)) 46 | sr = np.sin((roll)) 47 | cp = np.cos((pitch)) 48 | sp = np.sin((pitch)) 49 | matrix[:3, 3] = [x, y, z] 50 | matrix[0, 0] = cp * cy 51 | matrix[0, 1] = cy * sp * sr - sy * cr 52 | matrix[0, 2] = -1 * (cy * sp * cr + sy * sr) 53 | matrix[1, 0] = sy * cp 54 | matrix[1, 1] = sy * sp * sr + cy * cr 55 | matrix[1, 2] = cy * sr - sy * sp * cr 56 | matrix[2, 0] = sp 57 | matrix[2, 1] = -1 * (cp * sr) 58 | matrix[2, 2] = cp * cr 59 | return matrix 60 | 61 | 62 | def to_world_coordinate(points: np.array, matrix: np.array) -> np.array: 63 | """Internal function to transform the points according to the 64 | given matrix. This function either converts the points from 65 | coordinate space relative to the transform to the world coordinate 66 | space (using self.matrix), or from world coordinate space to the 67 | space relative to the transform (using inv(self.matrix)) 68 | Args: 69 | points: An n by 3 numpy array, where each row is the 70 | (x, y, z) coordinates of a point. 71 | matrix: The matrix of the transformation to apply. 72 | Returns: 73 | An n by 3 numpy array of transformed points. 74 | """ 75 | # Needed format: [[X0,..Xn],[Y0,..Yn],[Z0,..Zn]]. 76 | # So let's transpose the point matrix. 77 | points = points.T 78 | 79 | # Add 1s row: [[X0..,Xn],[Y0..,Yn],[Z0..,Zn],[1,..1]] 80 | points = np.append(points, np.ones((1, points.shape[1])), axis=0) 81 | 82 | # Point transformation (depends on the given matrix) 83 | points = np.dot(matrix, points) 84 | 85 | # Get all but the last row in array form. 86 | points = np.asarray(points[0:3].T).astype(np.float32) 87 | 88 | return points 89 | 90 | 91 | def get_extrinsic_matrix(transform): 92 | """Converts a Transform from the camera coordinate space to the 93 | Unreal coordinate space. 94 | The camera space is defined as: 95 | +x to right, +y to down, +z into the screen. 96 | The unreal coordinate space is defined as: 97 | +x into the screen, +y to right, +z to up. 98 | Args: 99 | transform (:py:class:`~pylot.utils.Transform`): The transform to 100 | convert to Unreal coordinate space. 101 | Returns: 102 | :py:class:`~pylot.utils.Transform`: The given transform after 103 | transforming to the Unreal coordinate space. 104 | """ 105 | 106 | to_unreal_transform = np.array( 107 | # [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]] 108 | [[0, 0, 1, 0], [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]] 109 | ) 110 | return np.dot(transform, to_unreal_transform) 111 | 112 | 113 | def get_intrinsic_matrix(width: int, height: int, fov: float): 114 | """Creates the intrinsic matrix for a camera with the given 115 | parameters. 116 | Args: 117 | width (int): The width of the image returned by the camera. 118 | height (int): The height of the image returned by the camera. 119 | fov (float): The field-of-view of the camera. 120 | Returns: 121 | :py:class:`numpy.ndarray`: A 3x3 intrinsic matrix of the camera. 122 | """ 123 | 124 | k = np.identity(3) 125 | # We use width - 1 and height - 1 to find the center column and row 126 | # of the image, because the images are indexed from 0. 127 | 128 | # Center column of the image. 129 | k[0, 2] = (width - 1) / 2.0 130 | # Center row of the image. 131 | k[1, 2] = (height - 1) / 2.0 132 | # Focal length. 133 | k[0, 0] = k[1, 1] = (width - 1) / (2.0 * np.tan(fov * np.pi / 360.0)) 134 | return k 135 | 136 | 137 | def location_to_camera_view( 138 | location: np.array, intrinsic_matrix, inv_extrinsic_matrix 139 | ): 140 | """Converts the given 3D vector to the view of the camera using 141 | the extrinsic and the intrinsic matrix. 142 | Args: 143 | location = [[x, y, z]] 144 | extrinsic_matrix: The extrinsic matrix of the camera. 145 | Returns: 146 | :py:class:`.Vector3D`: An instance with the coordinates converted 147 | to the camera view. 148 | """ 149 | if len(location) == 0: 150 | return np.array([]) 151 | position_vector = np.hstack((location, np.ones((location.shape[0], 1)))) 152 | position_vector = position_vector.T 153 | 154 | # Transform the points to the camera in 3D. 155 | transformed_3D_pos = np.dot(inv_extrinsic_matrix, position_vector) 156 | 157 | # Transform the points to 2D. 158 | position_2D = np.dot(intrinsic_matrix, transformed_3D_pos[:3]) 159 | 160 | # Normalize the 2D points. 161 | if not position_2D[2].all(): 162 | print("could not inverse to camera image") 163 | return np.array([]) 164 | 165 | location_2D = np.array( 166 | [ 167 | (position_2D[0] / position_2D[2]), 168 | (position_2D[1] / position_2D[2]), 169 | (position_2D[2]), 170 | ] 171 | ) 172 | return location_2D 173 | 174 | 175 | def local_points_to_camera_view(location: np.array, intrinsic_matrix): 176 | """Converts the given 3D vector to the view of the camera using 177 | the extrinsic and the intrinsic matrix. 178 | Args: 179 | location = [[x, y, z]] 180 | extrinsic_matrix: The extrinsic matrix of the camera. 181 | Returns: 182 | :py:class:`.Vector3D`: An instance with the coordinates converted 183 | to the camera view. 184 | """ 185 | if len(location) == 0: 186 | return np.array([]) 187 | 188 | # Transform the points to 2D. 189 | position_2D = np.dot(intrinsic_matrix, location.T) 190 | 191 | # Normalize the 2D points. 192 | if not position_2D[2].all(): 193 | print("could not inverse to camera image") 194 | return np.array([]) 195 | 196 | location_2D = np.array( 197 | [ 198 | (position_2D[0] / position_2D[2]), 199 | (position_2D[1] / position_2D[2]), 200 | (position_2D[2]), 201 | ] 202 | ) 203 | return location_2D 204 | 205 | 206 | LABELS = [ 207 | "person", 208 | "bicycle", 209 | "car", 210 | "motorcycle", 211 | "airplane", 212 | "bus", 213 | "train", 214 | "truck", 215 | "boat", 216 | "traffic light", 217 | "fire hydrant", 218 | "stop sign", 219 | "parking meter", 220 | "bench", 221 | "bird", 222 | "cat", 223 | "dog", 224 | "horse", 225 | "sheep", 226 | "cow", 227 | "elephant", 228 | "bear", 229 | "zebra", 230 | "giraffe", 231 | "backpack", 232 | "umbrella", 233 | "handbag", 234 | "tie", 235 | "suitcase", 236 | "frisbee", 237 | "skis", 238 | "snowboard", 239 | "sports ball", 240 | "kite", 241 | "baseball bat", 242 | "baseball glove", 243 | "skateboard", 244 | "surfboard", 245 | "tennis racket", 246 | "bottle", 247 | "wine glass", 248 | "cup", 249 | "fork", 250 | "knife", 251 | "spoon", 252 | "bowl", 253 | "banana", 254 | "apple", 255 | "sandwich", 256 | "orange", 257 | "broccoli", 258 | "carrot", 259 | "hot dog", 260 | "pizza", 261 | "donut", 262 | "cake", 263 | "chair", 264 | "couch", 265 | "potted plant", 266 | "bed", 267 | "dining table", 268 | "toilet", 269 | "tv", 270 | "laptop", 271 | "mouse", 272 | "remote", 273 | "keyboard", 274 | "cell phone", 275 | "microwave", 276 | "oven", 277 | "toaster", 278 | "sink", 279 | "refrigerator", 280 | "book", 281 | "clock", 282 | "vase", 283 | "scissors", 284 | "teddy bear", 285 | "hair drier", 286 | "toothbrush", 287 | ] 288 | -------------------------------------------------------------------------------- /operators/pid_control_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # PID Control operator 3 | 4 | `pid` control operator computes the command that needs to be executed to follow the given waypoints. 5 | It reacts to the car current speed and position in a way that accelerates or brake according to previous inputs. 6 | 7 | ## Inputs 8 | 9 | - waypoints coordinates to follow. 10 | 11 | ## Outputs 12 | 13 | - throttle, steering (rad) and braking. 14 | 15 | ## Graph Description 16 | 17 | ```yaml 18 | - id: pid_control_op 19 | operator: 20 | python: ../../operators/pid_control_op.py 21 | outputs: 22 | - control 23 | inputs: 24 | position: oasis_agent/position 25 | speed: oasis_agent/speed 26 | waypoints: fot_op/waypoints 27 | ``` 28 | 29 | ## Graph Viz 30 | 31 | ```mermaid 32 | flowchart TB 33 | oasis_agent 34 | subgraph fot_op 35 | fot_op/op[op] 36 | end 37 | subgraph pid_control_op 38 | pid_control_op/op[op] 39 | end 40 | oasis_agent -- position --> pid_control_op/op 41 | oasis_agent -- speed --> pid_control_op/op 42 | fot_op/op -- waypoints --> pid_control_op/op 43 | pid_control_op/op -- control --> oasis_agent 44 | ``` 45 | 46 | ## Hyperparameters consider changing 47 | 48 | See: https://en.wikipedia.org/wiki/PID_controller 49 | 50 | ``` 51 | pid_p = 0.1 52 | pid_d = 0.0 53 | pid_i = 0.05 54 | dt = 1.0 / 20 55 | ``` 56 | """ 57 | import math 58 | import time 59 | from collections import deque 60 | from typing import Callable 61 | 62 | import numpy as np 63 | import pyarrow as pa 64 | from dora import DoraStatus 65 | from numpy import linalg as LA 66 | from scipy.spatial.transform import Rotation as R 67 | from sklearn.metrics import pairwise_distances 68 | 69 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 70 | 71 | MIN_PID_WAYPOINT_DISTANCE = 1 72 | pid_p = 0.4 73 | pid_d = 0.0 74 | pid_i = 0.05 75 | dt = 1.0 / 20 76 | pid_use_real_time = False 77 | 78 | BRAKE_MAX = 1.0 79 | THROTTLE_MAX = 0.5 80 | 81 | 82 | def get_angle(left, right) -> float: 83 | """Computes the angle between the vector and another vector 84 | in radians.""" 85 | 86 | angle = left - right 87 | if angle > math.pi: 88 | angle -= 2 * math.pi 89 | elif angle < -math.pi: 90 | angle += 2 * math.pi 91 | return angle 92 | 93 | 94 | def compute_throttle_and_brake(pid, current_speed: float, target_speed: float): 95 | """Computes the throttle/brake required to reach the target speed. 96 | 97 | It uses the longitudinal controller to derive the required information. 98 | 99 | Args: 100 | pid: The pid controller. 101 | current_speed (:obj:`float`): The current speed of the ego vehicle 102 | (in m/s). 103 | target_speed (:obj:`float`): The target speed to reach (in m/s). 104 | 105 | Returns: 106 | Throttle and brake. 107 | """ 108 | if current_speed < 0: 109 | print("Current speed is negative: {}".format(current_speed)) 110 | non_negative_speed = 0 111 | else: 112 | non_negative_speed = current_speed 113 | acceleration = pid.run_step(target_speed, non_negative_speed) 114 | if acceleration >= 0.0: 115 | throttle = min(acceleration, THROTTLE_MAX) 116 | brake = 0 117 | else: 118 | throttle = 0.0 119 | brake = min(abs(acceleration), BRAKE_MAX) 120 | # Keep the brake pressed when stopped or when sliding back on a hill. 121 | if (current_speed < 1 and target_speed == 0) or current_speed < -0.3: 122 | brake = 1.0 123 | return throttle, brake 124 | 125 | 126 | class PIDLongitudinalController(object): 127 | """Implements longitudinal control using a PID. 128 | 129 | Args: 130 | K_P (:obj:`float`): Proportional term. 131 | K_D (:obj:`float`): Differential term. 132 | K_I (:obj:`float`): Integral term. 133 | dt (:obj:`float`): time differential in seconds. 134 | """ 135 | 136 | def __init__( 137 | self, 138 | K_P: float = 1.0, 139 | K_D: float = 0.0, 140 | K_I: float = 0.0, 141 | dt: float = 0.03, 142 | use_real_time: bool = False, 143 | ): 144 | self._k_p = K_P 145 | self._k_d = K_D 146 | self._k_i = K_I 147 | self._dt = dt 148 | self._use_real_time = use_real_time 149 | self._last_time = time.time() 150 | self._error_buffer = deque(maxlen=10) 151 | 152 | def run_step(self, target_speed: float, current_speed: float): 153 | """Computes the throttle/brake based on the PID equations. 154 | 155 | Args: 156 | target_speed (:obj:`float`): Target speed in m/s. 157 | current_speed (:obj:`float`): Current speed in m/s. 158 | 159 | Returns: 160 | Throttle, brake and steering. 161 | """ 162 | # Transform to km/h 163 | error = (target_speed - current_speed) * 3.6 164 | self._error_buffer.append(error) 165 | 166 | if self._use_real_time: 167 | time_now = time.time() 168 | dt = time_now - self._last_time 169 | self._last_time = time_now 170 | else: 171 | dt = self._dt 172 | if len(self._error_buffer) >= 2: 173 | _de = (self._error_buffer[-1] - self._error_buffer[-2]) / dt 174 | _ie = sum(self._error_buffer) * dt 175 | else: 176 | _de = 0.0 177 | _ie = 0.0 178 | 179 | return np.clip( 180 | (self._k_p * error) + (self._k_d * _de) + (self._k_i * _ie), 181 | -1.0, 182 | 1.0, 183 | ) 184 | 185 | 186 | pid = PIDLongitudinalController(pid_p, pid_d, pid_i, dt, pid_use_real_time) 187 | 188 | 189 | class Operator: 190 | """ 191 | Compute the throttle, target angle and brake given a `position`, a `speed` and a `waypoints`. 192 | """ 193 | 194 | def __init__(self): 195 | self.waypoints = [] 196 | self.target_speeds = [] 197 | self.metadata = {} 198 | self.position = [] 199 | self.speed = [] 200 | self.previous_position = [] 201 | self.current_speed = [] 202 | self.previous_time = time.time() 203 | 204 | def on_event( 205 | self, 206 | dora_event: dict, 207 | send_output: Callable[[str, bytes], None], 208 | ) -> DoraStatus: 209 | if dora_event["type"] == "INPUT": 210 | return self.on_input(dora_event, send_output) 211 | return DoraStatus.CONTINUE 212 | 213 | def on_input( 214 | self, 215 | dora_input: dict, 216 | send_output: Callable[[str, bytes], None], 217 | ): 218 | """Handle input. 219 | Args: 220 | dora_input["id"] (str): Id of the input declared in the yaml configuration 221 | dora_input["value"] (arrow.array(UInt8)): Bytes message of the input 222 | send_output (Callable[[str, bytes]]): Function enabling sending output back to dora. 223 | """ 224 | 225 | if "position" == dora_input["id"]: 226 | self.position = dora_input["value"].to_numpy() 227 | return DoraStatus.CONTINUE 228 | 229 | elif dora_input["id"] == "speed": 230 | self.speed = np.array(dora_input["value"]) 231 | return DoraStatus.CONTINUE 232 | 233 | elif "waypoints" == dora_input["id"]: 234 | waypoints = dora_input["value"].to_numpy() 235 | waypoints = waypoints.reshape((-1, 3)) 236 | 237 | self.target_speeds = waypoints[:, 2] 238 | self.waypoints = waypoints[:, :2] 239 | self.metadata = dora_input["metadata"] 240 | 241 | if len(self.position) == 0 or len(self.speed) == 0: 242 | return DoraStatus.CONTINUE 243 | 244 | if len(self.waypoints) == 0: 245 | send_output( 246 | "control", 247 | pa.array(np.array([0, 0, 1], np.float16).ravel()), 248 | self.metadata, 249 | ) 250 | return DoraStatus.CONTINUE 251 | 252 | [x, y, _, rx, ry, rz, rw] = self.position 253 | [_, _, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler("xyz", degrees=False) 254 | distances = pairwise_distances(self.waypoints, np.array([[x, y]])).T[0] 255 | 256 | index = distances > MIN_PID_WAYPOINT_DISTANCE 257 | self.waypoints = self.waypoints[index] 258 | self.target_speeds = self.target_speeds[index] 259 | distances = distances[index] 260 | 261 | if len(self.waypoints) == 0: 262 | target_angle = 0 263 | target_speed = 0 264 | else: 265 | argmin_distance = np.argmin(distances) 266 | 267 | ## Retrieve the closest point to the steer distance 268 | target_location = self.waypoints[argmin_distance] 269 | 270 | target_speed = self.target_speeds[argmin_distance] 271 | 272 | ## Compute the angle of steering 273 | target_vector = target_location - [x, y] 274 | 275 | target_angle = get_angle( 276 | math.atan2(target_vector[1], target_vector[0]), yaw 277 | ) 278 | 279 | throttle, brake = compute_throttle_and_brake( 280 | pid, LA.norm(self.speed), target_speed 281 | ) 282 | 283 | send_output( 284 | "control", 285 | pa.array(np.array([throttle, target_angle, brake], np.float16)), 286 | self.metadata, 287 | ) 288 | return DoraStatus.CONTINUE 289 | -------------------------------------------------------------------------------- /operators/obstacle_location_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Obstacle location operator 3 | 4 | The obstacle location operator match bounding box with depth frame to find an approximative position of obstacles. 5 | 6 | There is two logic within it: 7 | - One is for the ground dot for lane detection. 8 | - One is for bounding box obstacle localisation. 9 | 10 | Both logic are based on he computation of the projection in 2D space of the lidar 3D point and then reusing the index to get the 3D position. 11 | 12 | - In the case of ground dot detection, the approximation is based on a knnr, as we might not have enough data on the floor. 13 | - In the case of bounding box, we use first quantile closest point within the bounding box to estimate the distance. We use the first quantile closest point to remove the noise. 14 | 15 | The mecanism to project the lidar point cloud into a 2D is also used in the `plot.py` operator. You can use the input `lidar_pc` within it to help you debug. 16 | 17 | ## Inputs 18 | 19 | - 2D Obstacles bounding box. 20 | 21 | ## Outputs 22 | 23 | - 3D position of obstacles as dot. 24 | 25 | ## Example plot (green dor in the middle of the bounding box) 26 | 27 | ![Imgur](https://i.imgur.com/Aq33qy5.png) 28 | 29 | ## Graph Description 30 | 31 | ```yaml 32 | - id: obstacle_location_op 33 | operator: 34 | outputs: 35 | - obstacles 36 | inputs: 37 | lidar_pc: oasis_agent/lidar_pc 38 | obstacles_bbox: yolov5/bbox 39 | position: oasis_agent/position 40 | python: ../../operators/obstacle_location_op.py 41 | ``` 42 | 43 | ## Graph Viz 44 | 45 | ```mermaid 46 | flowchart TB 47 | oasis_agent 48 | subgraph yolov5 49 | yolov5/op[op] 50 | end 51 | subgraph fot_op 52 | fot_op/op[op] 53 | end 54 | subgraph obstacle_location_op 55 | obstacle_location_op/op[op] 56 | end 57 | oasis_agent -- lidar_pc --> obstacle_location_op/op 58 | yolov5/op -- bbox as obstacles_bbox --> obstacle_location_op/op 59 | oasis_agent -- position --> obstacle_location_op/op 60 | obstacle_location_op/op -- obstacles --> fot_op/op 61 | ``` 62 | """ 63 | from typing import Callable 64 | 65 | import numpy as np 66 | import pyarrow as pa 67 | from dora import DoraStatus 68 | from dora_utils import ( 69 | get_extrinsic_matrix, 70 | get_intrinsic_matrix, 71 | get_projection_matrix, 72 | local_points_to_camera_view, 73 | ) 74 | from sklearn.neighbors import KNeighborsRegressor 75 | 76 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 77 | 78 | DEPTH_IMAGE_WIDTH = 1920 79 | DEPTH_IMAGE_HEIGHT = 1080 80 | DEPTH_FOV = 90 81 | SENSOR_POSITION = np.array([3, 0, 1, 0, 0, 0]) 82 | INTRINSIC_MATRIX = get_intrinsic_matrix( 83 | DEPTH_IMAGE_WIDTH, DEPTH_IMAGE_HEIGHT, DEPTH_FOV 84 | ) 85 | 86 | INV_INTRINSIC_MATRIX = np.linalg.inv(INTRINSIC_MATRIX) 87 | VELODYNE_MATRIX = np.array([[0, 0, 1], [1, 0, 0], [0, -1, 0]]) 88 | UNREAL_MATRIX = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) 89 | INV_UNREAL_MATRIX = np.linalg.inv(UNREAL_MATRIX) 90 | INV_VELODYNE_MATRIX = np.linalg.inv(VELODYNE_MATRIX) 91 | 92 | 93 | def get_predictions(obstacles, obstacle_with_locations): 94 | """Extracts obstacle predictions out of the message. 95 | This method is useful to build obstacle predictions when 96 | the operator directly receives detections instead of predictions. 97 | The method assumes that the obstacles are static. 98 | """ 99 | predictions = [] 100 | # Transform the obstacle into a prediction. 101 | for obstacle, location in zip(obstacles, obstacle_with_locations): 102 | obstacle = np.append(location, obstacle[-2:]) 103 | predictions.append(obstacle) 104 | 105 | return predictions 106 | 107 | 108 | class Operator: 109 | """ 110 | Compute the location of obstacles, given 2D `bbox`, LIDAR point cloud and a position. 111 | """ 112 | 113 | def __init__(self): 114 | self.point_cloud = [] 115 | self.camera_point_cloud = [] 116 | self.ground_point_cloud = [] 117 | self.camera_ground_point_cloud = [] 118 | self.last_point_cloud = [] 119 | self.last_camera_point_cloud = [] 120 | self.obstacles = [] 121 | self.obstacles_bbox = [] 122 | self.position = [] 123 | self.lanes = [] 124 | 125 | def on_event( 126 | self, 127 | dora_event: dict, 128 | send_output: Callable[[str, bytes], None], 129 | ) -> DoraStatus: 130 | if dora_event["type"] == "INPUT": 131 | return self.on_input(dora_event, send_output) 132 | return DoraStatus.CONTINUE 133 | 134 | def on_input( 135 | self, 136 | dora_input: dict, 137 | send_output: Callable[[str, bytes], None], 138 | ): 139 | if "lidar_pc" == dora_input["id"]: 140 | point_cloud = np.array(dora_input["value"]) 141 | point_cloud = point_cloud.reshape((-1, 3)) 142 | 143 | # From Velodyne axis to Camera axis 144 | # from Velodyne axis: 145 | # x -> forward, y -> right, z -> top 146 | # to Camera axis: 147 | # x -> right, y -> bottom, z -> forward 148 | point_cloud = np.dot( 149 | point_cloud, 150 | VELODYNE_MATRIX, 151 | ) 152 | 153 | # Forward points only ( forward = z > 0.1 ) 154 | point_cloud = point_cloud[np.where(point_cloud[:, 2] > 0.1)] 155 | 156 | # Remove ground points. Above lidar only ( bottom = y < 1.0 ) 157 | above_ground_point_index = np.where(point_cloud[:, 1] < 1.0) 158 | point_cloud = point_cloud[above_ground_point_index] 159 | self.ground_point_cloud = point_cloud[above_ground_point_index == False] 160 | 161 | # 3D array -> 2D array with index_x -> pixel x, index_y -> pixel_y, value -> z 162 | camera_point_cloud = local_points_to_camera_view( 163 | point_cloud, INTRINSIC_MATRIX 164 | ).T 165 | self.camera_ground_point_cloud = local_points_to_camera_view( 166 | self.ground_point_cloud, INTRINSIC_MATRIX 167 | ).T 168 | 169 | self.camera_point_cloud = camera_point_cloud 170 | self.point_cloud = point_cloud 171 | 172 | elif "position" == dora_input["id"]: 173 | # Add sensor transform 174 | self.position = dora_input["value"].to_numpy() 175 | self.extrinsic_matrix = get_extrinsic_matrix( 176 | get_projection_matrix(self.position) 177 | ) 178 | 179 | elif "lanes" == dora_input["id"]: 180 | lanes = np.array(dora_input["value"]).reshape((-1, 60, 2)) 181 | 182 | knnr = KNeighborsRegressor(n_neighbors=4) 183 | knnr.fit(self.camera_ground_point_cloud[:, :2], self.ground_point_cloud) 184 | 185 | processed_lanes = [] 186 | for lane in lanes: 187 | lane_location = knnr.predict(lane) 188 | lane_location = np.array(lane_location) 189 | 190 | lane_location = np.hstack( 191 | ( 192 | lane_location, 193 | np.ones((lane_location.shape[0], 1)), 194 | ) 195 | ) 196 | lane_location = np.dot(lane_location, self.extrinsic_matrix.T)[:, :3] 197 | processed_lanes.append(lane_location) 198 | processed_lanes = pa.array(np.array(processed_lanes, np.float32).ravel()) 199 | 200 | send_output("global_lanes", processed_lanes, dora_input["metadata"]) 201 | 202 | elif "obstacles_bbox" == dora_input["id"]: 203 | if len(self.position) == 0 or len(self.point_cloud) == 0: 204 | return DoraStatus.CONTINUE 205 | 206 | # bbox = np.array([[min_x, max_x, min_y, max_y, confidence, label], ... n_bbox ... ]) 207 | self.obstacles_bbox = np.array(dora_input["value"]).reshape((-1, 6)) 208 | 209 | obstacles_with_location = [] 210 | for obstacle_bb in self.obstacles_bbox: 211 | [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb 212 | z_points = self.point_cloud[ 213 | np.where( 214 | (self.camera_point_cloud[:, 0] > min_x) 215 | & (self.camera_point_cloud[:, 0] < max_x) 216 | & (self.camera_point_cloud[:, 1] > min_y) 217 | & (self.camera_point_cloud[:, 1] < max_y) 218 | ) 219 | ] 220 | if len(z_points) > 0: 221 | closest_point = z_points[ 222 | z_points[:, 2].argsort()[int(len(z_points) / 4)] 223 | ] 224 | obstacles_with_location.append(closest_point) 225 | if len(obstacles_with_location) > 0: 226 | obstacles_with_location = np.array(obstacles_with_location) 227 | obstacles_with_location = np.hstack( 228 | ( 229 | obstacles_with_location, 230 | np.ones((obstacles_with_location.shape[0], 1)), 231 | ) 232 | ) 233 | obstacles_with_location = np.dot( 234 | obstacles_with_location, self.extrinsic_matrix.T 235 | )[:, :3] 236 | 237 | predictions = get_predictions( 238 | self.obstacles_bbox, obstacles_with_location 239 | ) 240 | predictions_bytes = pa.array(np.array(predictions, np.float32).ravel()) 241 | 242 | send_output("obstacles", predictions_bytes, dora_input["metadata"]) 243 | else: 244 | send_output( 245 | "obstacles", 246 | pa.array(np.array([]).ravel()), 247 | dora_input["metadata"], 248 | ) 249 | return DoraStatus.CONTINUE 250 | -------------------------------------------------------------------------------- /docs/src/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2022] [The Dora-rs Authors] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /carla/oasis_agent.py: -------------------------------------------------------------------------------- 1 | import math 2 | import os.path 3 | import xml.etree.ElementTree as ET 4 | 5 | import numpy as np 6 | import pyarrow as pa 7 | from autoagents.autonomous_agent import AutonomousAgent 8 | from dora import Node 9 | from dora_tracing import propagator, serialize_context, tracer 10 | from scipy.spatial.transform import Rotation as R 11 | 12 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 13 | from carla import VehicleControl 14 | 15 | IMAGE_WIDTH = 1920 16 | IMAGE_HEIGHT = 1080 17 | STEER_GAIN = 2 18 | AVERAGE_WINDOW = 10 19 | 20 | node = Node() 21 | 22 | 23 | def _get_latlon_ref(xodr): 24 | """ 25 | Convert from waypoints world coordinates to CARLA GPS coordinates 26 | :return: tuple with lat and lon coordinates 27 | """ 28 | tree = ET.ElementTree(ET.fromstring(xodr)) 29 | 30 | # default reference 31 | lat_ref = 42.0 32 | lon_ref = 2.0 33 | 34 | for opendrive in tree.iter("OpenDRIVE"): 35 | for header in opendrive.iter("header"): 36 | for georef in header.iter("geoReference"): 37 | if georef.text: 38 | str_list = georef.text.split(" ") 39 | for item in str_list: 40 | if "+lat_0" in item: 41 | lat_ref = float(item.split("=")[1]) 42 | if "+lon_0" in item: 43 | lon_ref = float(item.split("=")[1]) 44 | return lat_ref, lon_ref 45 | 46 | 47 | def from_gps_to_world_coordinate(lat, lon, lat_ref, lon_ref): 48 | EARTH_RADIUS_EQUA = 6378137.0 # pylint: disable=invalid-name 49 | scale = math.cos(lat_ref * math.pi / 180.0) 50 | mx_initial = scale * lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0 51 | my_initial = ( 52 | scale 53 | * EARTH_RADIUS_EQUA 54 | * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0)) 55 | ) 56 | 57 | # lon = mx * 180.0 / (math.pi * EARTH_RADIUS_EQUA * scale) 58 | # lat = 360.0 * math.atan(math.exp(my / (EARTH_RADIUS_EQUA * scale))) / math.pi - 90.0 59 | 60 | mx = lon / 180.0 * (math.pi * EARTH_RADIUS_EQUA * scale) 61 | my = math.log(math.tan((lat + 90.0) * math.pi / 360.0)) * ( 62 | EARTH_RADIUS_EQUA * scale 63 | ) 64 | x = mx - mx_initial 65 | y = -(my - my_initial) 66 | 67 | return [x, y] 68 | 69 | 70 | def radians_to_steer(rad: float, steer_gain: float): 71 | """Converts radians to steer input. 72 | 73 | Returns: 74 | :obj:`float`: Between [-1.0, 1.0]. 75 | """ 76 | steer = steer_gain * rad 77 | if steer > 0: 78 | steer = min(steer, 1) 79 | else: 80 | steer = max(steer, -1) 81 | return steer 82 | 83 | 84 | def get_entry_point(): 85 | return "DoraAgent" 86 | 87 | 88 | class DoraAgent(AutonomousAgent): 89 | def setup(self, destination, path_to_conf_file): 90 | """ 91 | Setup the agent parameters 92 | """ 93 | self.previous_positions = [] 94 | self.destination = destination 95 | self.lat_ref = None 96 | self.lon_ref = None 97 | self.opendrive_map = None 98 | self.last_metadata = "" 99 | 100 | def sensors(self): # pylint: disable=no-self-use 101 | """ 102 | Define the sensor suite required by the agent 103 | return: a list containing the required sensors in the following format: 104 | [ 105 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 106 | width': 300, 'height': 200, 'fov': 100, 'id': 'Left'}, 107 | {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, 108 | width': 300, 'height': 200, 'fov': 100, 'id': 'Right'}, 109 | {'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, 110 | id': 'LIDAR'} 111 | ] 112 | """ 113 | sensors = [ 114 | { 115 | "type": "sensor.camera.rgb", 116 | "id": "camera.center", 117 | "x": 2.0, 118 | "y": 0.0, 119 | "z": 1.5, 120 | "roll": 0.0, 121 | "pitch": 0.0, 122 | "yaw": 0.0, 123 | "width": IMAGE_WIDTH, 124 | "height": IMAGE_HEIGHT, 125 | "fov": 90, 126 | }, 127 | { 128 | "type": "sensor.lidar.ray_cast", 129 | "id": "LIDAR", 130 | "x": 2.0, 131 | "y": 0.0, 132 | "z": 1.5, 133 | "roll": 0.0, 134 | "pitch": 0.0, 135 | "yaw": 0.0, 136 | }, 137 | # {'type': 'sensor.other.radar', 'id': 'RADAR', 138 | #'x': 0.7, 'y': -0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': -45.0, 'fov': 30}, 139 | { 140 | "type": "sensor.other.gnss", 141 | "id": "GPS", 142 | "x": 2, 143 | "y": 0, 144 | "z": 1.5, 145 | }, 146 | { 147 | "type": "sensor.other.imu", 148 | "id": "IMU", 149 | "x": 2, 150 | "y": 0, 151 | "z": 1.5, 152 | "roll": 0.0, 153 | "pitch": 0.0, 154 | "yaw": 0.0, 155 | }, 156 | { 157 | "type": "sensor.opendrive_map", 158 | "id": "高精地图传感器", 159 | "reading_frequency": 1, 160 | }, 161 | {"type": "sensor.speedometer", "id": "速度传感器"}, 162 | ] 163 | 164 | return sensors 165 | 166 | def save_input_data(self, keys, inputdata): 167 | import json 168 | 169 | data = {keys: inputdata} 170 | opendrive_file = "/home/dora/workspace/simulate/inputdata_log.txt" 171 | if os.path.exists(opendrive_file): 172 | os.remove(opendrive_file) 173 | with open(opendrive_file, "w") as f: 174 | f.write(json.dumps(data)) 175 | f.close() 176 | 177 | def run_step(self, input_data, timestamp): 178 | """ 179 | Execute one step of navigation. 180 | :return: control 181 | """ 182 | with tracer.start_as_current_span(name="run_step") as child_span: 183 | output = {} 184 | propagator.inject(output) 185 | metadata = {"open_telemetry_context": serialize_context(output)} 186 | ### Opendrive preprocessing 187 | if "高精地图传感器" in input_data.keys(): 188 | if self.opendrive_map is None: 189 | opendrive_map = input_data["高精地图传感器"][1]["opendrive"] 190 | self.save_input_data("高精地图传感器", input_data["高精地图传感器"]) 191 | 192 | self.opendrive_map = opendrive_map 193 | self.lat_ref, self.lon_ref = _get_latlon_ref(opendrive_map) 194 | node.send_output("opendrive", pa.array([opendrive_map]), metadata) 195 | if "速度传感器" in input_data.keys(): 196 | node.send_output( 197 | "speed", 198 | pa.array( 199 | np.array(input_data["速度传感器"][1]["speed"].ravel(), np.float32) 200 | ), 201 | metadata, 202 | ) 203 | 204 | if self.lat_ref is None: 205 | return VehicleControl( 206 | steer=0.0, 207 | throttle=0.0, 208 | brake=0.0, 209 | hand_brake=False, 210 | ) 211 | 212 | ### Position preprocessing 213 | [lat, lon, z] = input_data["GPS"][1] 214 | [x, y] = from_gps_to_world_coordinate(lat, lon, self.lat_ref, self.lon_ref) 215 | yaw = input_data["IMU"][1][-1] - np.pi / 2 216 | roll = 0.0 217 | pitch = 0.0 218 | [[qx, qy, qz, qw]] = R.from_euler( 219 | "xyz", [[roll, pitch, yaw]], degrees=False 220 | ).as_quat() 221 | try: 222 | R.from_quat([qx, qy, qz, qw]) 223 | except: 224 | print("Error in quaternion.") 225 | return VehicleControl( 226 | steer=0.0, 227 | throttle=0.0, 228 | brake=0.0, 229 | hand_brake=False, 230 | ) 231 | 232 | self.previous_positions.append([x, y]) 233 | 234 | ## Accumulate previous position until having window size average. 235 | if len(self.previous_positions) < AVERAGE_WINDOW: 236 | return VehicleControl( 237 | steer=0.0, 238 | throttle=0.0, 239 | brake=0.0, 240 | hand_brake=False, 241 | ) 242 | self.previous_positions = self.previous_positions[-AVERAGE_WINDOW:] 243 | 244 | ## Average last 5 position 245 | [avg_x, avg_y] = np.array(self.previous_positions).mean(axis=0) 246 | position = np.array([avg_x, avg_y, 0.0, qx, qy, qz, qw], np.float32) 247 | 248 | ### Camera preprocessing 249 | frame_raw_data = input_data["camera.center"][1] 250 | ## frame = np.frombuffer(frame_raw_data, np.uint8) 251 | ## frame = np.reshape(frame, (IMAGE_HEIGHT, IMAGE_WIDTH, 4)) 252 | camera_frame = pa.array(frame_raw_data.ravel()) 253 | 254 | ### LIDAR preprocessing 255 | frame_raw_data = input_data["LIDAR"][1] 256 | frame = np.frombuffer(frame_raw_data, np.float32) 257 | point_cloud = np.reshape(frame, (-1, 4)) 258 | point_cloud = point_cloud[:, :3] 259 | lidar_pc = pa.array(point_cloud.ravel()) 260 | 261 | ### Waypoints preprocessing 262 | waypoints_xyz = np.array( 263 | [ 264 | [ 265 | self.destination.x, 266 | self.destination.y, 267 | 0.0, 268 | ] 269 | ], 270 | np.float32, 271 | ) 272 | 273 | ## Sending data into the dataflow 274 | node.send_output( 275 | "position", 276 | pa.array(position.ravel()), 277 | metadata, 278 | ) 279 | node.send_output("image", camera_frame, metadata) 280 | node.send_output("lidar_pc", lidar_pc, metadata) 281 | node.send_output( 282 | "objective_waypoints", 283 | pa.array(waypoints_xyz.ravel()), 284 | metadata, 285 | ) 286 | 287 | # Receiving back control information 288 | ## Using tick to avoid deadlock due to unreceived input. 289 | for iteration in range(5): 290 | event = node.next() 291 | if event["type"] == "INPUT": 292 | input_id = event["id"] 293 | value = event["value"] 294 | 295 | if input_id == "tick" and iteration > 0 and iteration < 4: 296 | print(f"Did not receive control after {iteration} ticks...") 297 | elif input_id == "tick" and iteration == 4: 298 | print( 299 | f"Sending null control after waiting {iteration} ticks..." 300 | ) 301 | value = np.array([0.0, 0.0, 0.0], np.float16) 302 | elif input_id == "control": 303 | break 304 | 305 | [throttle, target_angle, brake] = np.array(value) 306 | 307 | steer = radians_to_steer(target_angle, STEER_GAIN) 308 | vec_control = VehicleControl( 309 | steer=float(steer), 310 | throttle=float(throttle), 311 | brake=float(brake), 312 | hand_brake=False, 313 | ) 314 | 315 | return vec_control 316 | -------------------------------------------------------------------------------- /carla/_generate_world.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import time 4 | 5 | from carla import command 6 | 7 | IMAGE_WIDTH = 1920 8 | IMAGE_HEIGHT = 1080 9 | AUTOPILOT = os.environ.get("SET_AUTOPILOT") == "true" 10 | 11 | 12 | def add_lidar(world, transform, callback, vehicle): 13 | lidar_blueprint = world.get_blueprint_library().find( 14 | "sensor.lidar.ray_cast" 15 | ) 16 | lidar_blueprint.set_attribute("channels", "32") 17 | lidar_blueprint.set_attribute("range", "10000") 18 | lidar_blueprint.set_attribute("points_per_second", "500000") 19 | lidar_blueprint.set_attribute("rotation_frequency", "20") 20 | lidar_blueprint.set_attribute("upper_fov", "15") 21 | lidar_blueprint.set_attribute("lower_fov", "-30") 22 | lidar = world.spawn_actor(lidar_blueprint, transform, attach_to=vehicle) 23 | # Register callback to be invoked when a new point cloud is received. 24 | lidar.listen(callback) 25 | return lidar 26 | 27 | 28 | def add_depth_camera(world, transform, callback, vehicle): 29 | depth_blueprint = world.get_blueprint_library().find("sensor.camera.depth") 30 | depth_blueprint.set_attribute("image_size_x", str(IMAGE_WIDTH)) 31 | depth_blueprint.set_attribute("image_size_y", str(IMAGE_HEIGHT)) 32 | depth_blueprint.set_attribute("fov", str(90.0)) 33 | depth_camera = world.spawn_actor( 34 | depth_blueprint, transform, attach_to=vehicle 35 | ) 36 | # Register callback to be invoked when a new frame is received. 37 | depth_camera.listen(callback) 38 | return depth_camera 39 | 40 | 41 | def add_camera(world, transform, callback, vehicle): 42 | camera_blueprint = world.get_blueprint_library().find("sensor.camera.rgb") 43 | camera_blueprint.set_attribute("image_size_x", str(IMAGE_WIDTH)) 44 | camera_blueprint.set_attribute("image_size_y", str(IMAGE_HEIGHT)) 45 | camera = world.spawn_actor(camera_blueprint, transform, attach_to=vehicle) 46 | # Register callback to be invoked when a new frame is received. 47 | camera.listen(callback) 48 | return camera 49 | 50 | 51 | def add_segmented_camera(world, transform, callback, vehicle): 52 | segmented_blueprint = world.get_blueprint_library().find( 53 | "sensor.camera.semantic_segmentation" 54 | ) 55 | segmented_blueprint.set_attribute("image_size_x", str(IMAGE_WIDTH)) 56 | segmented_blueprint.set_attribute("image_size_y", str(IMAGE_HEIGHT)) 57 | segmented_blueprint.set_attribute("fov", str(90.0)) 58 | segmented_camera = world.spawn_actor( 59 | segmented_blueprint, transform, attach_to=vehicle 60 | ) 61 | segmented_camera.listen(callback) 62 | return segmented_camera 63 | 64 | 65 | def spawn_driving_vehicle(client, world): 66 | """This function spawns the driving vehicle and puts it into 67 | an autopilot mode. 68 | Args: 69 | client: The Client instance representing the simulation to 70 | connect to. 71 | world: The world inside the current simulation. 72 | Returns: 73 | A Actor instance representing the vehicle that was just spawned. 74 | """ 75 | # Get the blueprint of the vehicle and set it to AutoPilot. 76 | vehicle_bp = random.choice( 77 | world.get_blueprint_library().filter("vehicle.*") 78 | ) 79 | while ( 80 | not vehicle_bp.has_attribute("number_of_wheels") 81 | or int(vehicle_bp.get_attribute("number_of_wheels")) != 4 82 | ): 83 | vehicle_bp = random.choice( 84 | world.get_blueprint_library().filter("vehicle.*") 85 | ) 86 | if AUTOPILOT: 87 | vehicle_bp.set_attribute("role_name", "autopilot") 88 | print("setting carla autopilot") 89 | 90 | # Get the spawn point of the vehicle. 91 | start_pose = random.choice(world.get_map().get_spawn_points()) 92 | 93 | # Spawn the vehicle. 94 | batch = [ 95 | command.SpawnActor(vehicle_bp, start_pose).then( 96 | command.SetAutopilot(command.FutureActor, AUTOPILOT) 97 | ) 98 | ] 99 | 100 | vehicle_id = client.apply_batch_sync(batch)[0].actor_id 101 | while world.get_actors().find(vehicle_id) is None: 102 | 103 | # Find the vehicle and return the Actor instance. 104 | time.sleep( 105 | 5 106 | ) # This is so that the vehicle gets registered in the actors. 107 | print("waiting for ego vehicle to create") 108 | return world.get_actors().find(vehicle_id), vehicle_id 109 | 110 | 111 | def check_simulator_version( 112 | simulator_version: str, 113 | required_major: int = 0, 114 | required_minor: int = 9, 115 | required_patch: int = 1, 116 | ): 117 | """Checks if the simulator meets the minimum version requirements.""" 118 | ver_strs = simulator_version.split(".") 119 | if len(ver_strs) < 2 or len(ver_strs) > 3: 120 | print( 121 | "WARNING: Assuming that installed CARLA {} API is compatible " 122 | "with CARLA 0.9.13 API".format(simulator_version) 123 | ) 124 | ver_strs = "0.9.13".split(".") 125 | major = int(ver_strs[0]) 126 | minor = int(ver_strs[1]) 127 | if major != required_major: 128 | return major > required_major 129 | else: 130 | if minor != required_minor: 131 | return minor > required_minor 132 | else: 133 | if len(ver_strs) == 3: 134 | patch = int(ver_strs[2]) 135 | return patch >= required_patch 136 | else: 137 | return True 138 | 139 | 140 | def spawn_actors( 141 | client, 142 | world, 143 | traffic_manager_port: int, 144 | simulator_version: str, 145 | ego_spawn_point_index: int, 146 | auto_pilot: bool, 147 | num_people: int, 148 | num_vehicles: int, 149 | logger, 150 | ): 151 | vehicle_ids = spawn_vehicles( 152 | client, world, traffic_manager_port, num_vehicles, logger 153 | ) 154 | ego_vehicle = spawn_ego_vehicle( 155 | world, traffic_manager_port, ego_spawn_point_index, auto_pilot 156 | ) 157 | people = [] 158 | 159 | if check_simulator_version( 160 | simulator_version, required_minor=9, required_patch=6 161 | ): 162 | # People do not move in versions older than 0.9.6. 163 | (people, people_control_ids) = spawn_people( 164 | client, world, num_people, logger 165 | ) 166 | people_actors = world.get_actors(people_control_ids) 167 | for i, ped_control_id in enumerate(people_control_ids): 168 | # Start person. 169 | people_actors[i].start() 170 | people_actors[i].go_to_location( 171 | world.get_random_location_from_navigation() 172 | ) 173 | return ego_vehicle, vehicle_ids, people 174 | 175 | 176 | def spawn_ego_vehicle( 177 | world, 178 | traffic_manager_port: int, 179 | spawn_point_index: int, 180 | auto_pilot: bool, 181 | blueprint: str = "vehicle.lincoln.mkz_2017", 182 | ): 183 | v_blueprint = world.get_blueprint_library().filter(blueprint)[0] 184 | ego_vehicle = None 185 | while not ego_vehicle: 186 | if spawn_point_index == -1: 187 | # Pick a random spawn point. 188 | start_pose = random.choice(world.get_map().get_spawn_points()) 189 | else: 190 | spawn_points = world.get_map().get_spawn_points() 191 | assert spawn_point_index < len(spawn_points), ( 192 | "Spawn point index is too big. " 193 | "Town does not have sufficient spawn points." 194 | ) 195 | start_pose = spawn_points[spawn_point_index] 196 | 197 | ego_vehicle = world.try_spawn_actor(v_blueprint, start_pose) 198 | if auto_pilot: 199 | ego_vehicle.set_autopilot(True, traffic_manager_port) 200 | return ego_vehicle 201 | 202 | 203 | def spawn_people(client, world, num_people: int, logger): 204 | """Spawns people at random locations inside the world. 205 | 206 | Args: 207 | num_people: The number of people to spawn. 208 | """ 209 | from carla import Transform 210 | 211 | p_blueprints = world.get_blueprint_library().filter("walker.pedestrian.*") 212 | unique_locs = set([]) 213 | spawn_points = [] 214 | # Get unique spawn points. 215 | for i in range(num_people): 216 | attempt = 0 217 | while attempt < 10: 218 | spawn_point = Transform() 219 | loc = world.get_random_location_from_navigation() 220 | if loc is not None: 221 | # Transform to tuple so that location is comparable. 222 | p_loc = (loc.x, loc.y, loc.z) 223 | if p_loc not in unique_locs: 224 | spawn_point.location = loc 225 | spawn_points.append(spawn_point) 226 | unique_locs.add(p_loc) 227 | break 228 | attempt += 1 229 | if attempt == 10: 230 | logger.error("Could not find unique person spawn point") 231 | # Spawn the people. 232 | batch = [] 233 | for spawn_point in spawn_points: 234 | p_blueprint = random.choice(p_blueprints) 235 | if p_blueprint.has_attribute("is_invincible"): 236 | p_blueprint.set_attribute("is_invincible", "false") 237 | batch.append(command.SpawnActor(p_blueprint, spawn_point)) 238 | # Apply the batch and retrieve the identifiers. 239 | ped_ids = [] 240 | for response in client.apply_batch_sync(batch, True): 241 | if response.error: 242 | logger.info( 243 | "Received an error while spawning a person: {}".format( 244 | response.error 245 | ) 246 | ) 247 | else: 248 | ped_ids.append(response.actor_id) 249 | # Spawn the person controllers 250 | ped_controller_bp = world.get_blueprint_library().find( 251 | "controller.ai.walker" 252 | ) 253 | batch = [] 254 | for ped_id in ped_ids: 255 | batch.append(command.SpawnActor(ped_controller_bp, Transform(), ped_id)) 256 | ped_control_ids = [] 257 | for response in client.apply_batch_sync(batch, True): 258 | if response.error: 259 | logger.info( 260 | "Error while spawning a person controller: {}".format( 261 | response.error 262 | ) 263 | ) 264 | else: 265 | ped_control_ids.append(response.actor_id) 266 | 267 | return (ped_ids, ped_control_ids) 268 | 269 | 270 | def spawn_vehicles( 271 | client, world, traffic_manager_port: int, num_vehicles: int, logger 272 | ): 273 | """Spawns vehicles at random locations inside the world. 274 | 275 | Args: 276 | num_vehicles: The number of vehicles to spawn. 277 | """ 278 | 279 | logger.debug("Trying to spawn {} vehicles.".format(num_vehicles)) 280 | # Get the spawn points and ensure that the number of vehicles 281 | # requested are less than the number of spawn points. 282 | spawn_points = world.get_map().get_spawn_points() 283 | if num_vehicles >= len(spawn_points): 284 | logger.warning( 285 | "Requested {} vehicles but only found {} spawn points".format( 286 | num_vehicles, len(spawn_points) 287 | ) 288 | ) 289 | num_vehicles = len(spawn_points) 290 | else: 291 | random.shuffle(spawn_points) 292 | 293 | # Get all the possible vehicle blueprints inside the world. 294 | v_blueprints = world.get_blueprint_library().filter("vehicle.*") 295 | 296 | # Construct a batch message that spawns the vehicles. 297 | batch = [] 298 | for transform in spawn_points[:num_vehicles]: 299 | blueprint = random.choice(v_blueprints) 300 | 301 | # Change the color of the vehicle. 302 | if blueprint.has_attribute("color"): 303 | color = random.choice( 304 | blueprint.get_attribute("color").recommended_values 305 | ) 306 | blueprint.set_attribute("color", color) 307 | 308 | # Let the vehicle drive itself. 309 | blueprint.set_attribute("role_name", "autopilot") 310 | 311 | batch.append( 312 | command.SpawnActor(blueprint, transform).then( 313 | command.SetAutopilot( 314 | command.FutureActor, True, traffic_manager_port 315 | ) 316 | ) 317 | ) 318 | 319 | # Apply the batch and retrieve the identifiers. 320 | vehicle_ids = [] 321 | for response in client.apply_batch_sync(batch, True): 322 | if response.error: 323 | logger.info( 324 | "Received an error while spawning a vehicle: {}".format( 325 | response.error 326 | ) 327 | ) 328 | else: 329 | vehicle_ids.append(response.actor_id) 330 | return vehicle_ids 331 | -------------------------------------------------------------------------------- /operators/yolop_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # yolop operator 3 | 4 | `yolop` recognizes lanes, and drivable area from a specific images. 5 | 6 | More info here: [https://github.com/hustvl/YOLOP](https://github.com/hustvl/YOLOP) 7 | 8 | You can also choose to allocate the model in GPU using the environment variable: 9 | - `PYTORCH_DEVICE: cuda # or cpu` 10 | 11 | ## Inputs 12 | 13 | - image: HEIGHT x WIDTH x BGR array. 14 | 15 | ## Outputs 16 | 17 | - drivable_area: drivable area as contour points 18 | - lanes: lanes as 60 points representing the lanes 19 | 20 | ## Example plot ( lanes in red, drivable area in green) 21 | 22 | ![Imgur](https://i.imgur.com/I531NIT.gif) 23 | 24 | ## Graph Description 25 | 26 | ```yaml 27 | - id: yolop 28 | operator: 29 | outputs: 30 | - lanes 31 | - drivable_area 32 | inputs: 33 | image: webcam/image 34 | python: ../../operators/yolop_op.py 35 | ``` 36 | """ 37 | 38 | import os 39 | from typing import Callable 40 | 41 | import cv2 42 | import numpy as np 43 | import torch 44 | import torchvision 45 | import torchvision.transforms as transforms 46 | from dora import DoraStatus 47 | 48 | DEVICE = os.environ.get("PYTORCH_DEVICE") or "cpu" 49 | 50 | 51 | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 52 | 53 | transform = transforms.Compose([transforms.ToTensor(), normalize]) 54 | 55 | 56 | def xywh2xyxy(x): 57 | # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right 58 | y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x) 59 | y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x 60 | y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y 61 | y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x 62 | y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y 63 | return y 64 | 65 | 66 | def non_max_suppression( 67 | prediction, 68 | conf_thres=0.85, 69 | iou_thres=0.15, 70 | ): 71 | """Performs Non-Maximum Suppression (NMS) on inference results 72 | Returns: 73 | detections with shape: nx6 (x1, y1, x2, y2, conf, cls) 74 | """ 75 | 76 | output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] 77 | 78 | for xi, x in enumerate(prediction): # image index, image inference 79 | # Apply constraints 80 | # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height 81 | 82 | # Compute conf 83 | confidence = x[:, 5:] * x[:, 4:5] # conf = obj_conf * cls_conf 84 | 85 | # Box (center x, center y, width, height) to (x1, y1, x2, y2) 86 | box = xywh2xyxy(x[:, :4]) 87 | 88 | # Detections matrix nx6 (xyxy, conf, cls) 89 | conf, j = confidence.max(1, keepdim=True) 90 | x = torch.cat((box, conf, j.float()), dim=1)[conf.view(-1) > conf_thres] 91 | 92 | # Check shape 93 | if not x.shape[0]: # no boxes 94 | continue 95 | # Batched NMS 96 | boxes, scores = x[:, :4], x[:, 4] # boxes (offset by class), scores 97 | i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS 98 | 99 | output[xi] = x[i] 100 | return output 101 | 102 | 103 | def morphological_process(image, kernel_size=5, func_type=cv2.MORPH_CLOSE): 104 | """ 105 | morphological process to fill the hole in the binary segmentation result 106 | :param image: 107 | :param kernel_size: 108 | :return: 109 | """ 110 | if len(image.shape) == 3: 111 | raise ValueError( 112 | "Binary segmentation result image should be a single channel image" 113 | ) 114 | 115 | if image.dtype is not np.uint8: 116 | image = np.array(image, np.uint8) 117 | 118 | kernel = cv2.getStructuringElement( 119 | shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size) 120 | ) 121 | 122 | # close operation fille hole 123 | closing = cv2.morphologyEx(image, func_type, kernel, iterations=1) 124 | 125 | return closing 126 | 127 | 128 | def if_y(samples_x): 129 | for sample_x in samples_x: 130 | if len(sample_x): 131 | # if len(sample_x) != (sample_x[-1] - sample_x[0] + 1) or sample_x[-1] == sample_x[0]: 132 | if sample_x[-1] == sample_x[0]: 133 | return False 134 | return True 135 | 136 | 137 | def fitlane(mask, sel_labels, labels, stats): 138 | H, W = mask.shape 139 | lanes = [] 140 | for label_group in sel_labels: 141 | states = [stats[k] for k in label_group] 142 | x, y, w, h, _ = states[0] 143 | # if len(label_group) > 1: 144 | # print('in') 145 | # for m in range(len(label_group)-1): 146 | # labels[labels == label_group[m+1]] = label_group[0] 147 | t = label_group[0] 148 | # samples_y = np.linspace(y, H-1, 60) 149 | # else: 150 | samples_y = np.linspace(y, y + h - 1, 60) 151 | 152 | samples_x = [np.where(labels[int(sample_y)] == t)[0] for sample_y in samples_y] 153 | 154 | if if_y(samples_x): 155 | samples_x = [ 156 | int(np.mean(sample_x)) if len(sample_x) else -1 157 | for sample_x in samples_x 158 | ] 159 | samples_x = np.array(samples_x) 160 | samples_y = np.array(samples_y) 161 | samples_y = samples_y[samples_x != -1] 162 | samples_x = samples_x[samples_x != -1] 163 | func = np.polyfit(samples_y, samples_x, 2) 164 | x_limits = np.polyval(func, H - 1) 165 | # if (y_max + h - 1) >= 720: 166 | 167 | draw_y = np.linspace(y, y + h - 1, 60) 168 | draw_x = np.polyval(func, draw_y) 169 | # draw_y = draw_y[draw_x < W] 170 | # draw_x = draw_x[draw_x < W] 171 | lanes.append((np.asarray([draw_x, draw_y]).T).astype(np.int32)) 172 | else: 173 | # if ( + w - 1) >= 1280: 174 | samples_x = np.linspace(x, W - 1, 60) 175 | # else: 176 | # samples_x = np.linspace(x, x_max+w-1, 60) 177 | samples_y = [ 178 | np.where(labels[:, int(sample_x)] == t)[0] for sample_x in samples_x 179 | ] 180 | samples_y = [ 181 | int(np.mean(sample_y)) if len(sample_y) else -1 182 | for sample_y in samples_y 183 | ] 184 | samples_x = np.array(samples_x) 185 | samples_y = np.array(samples_y) 186 | samples_x = samples_x[samples_y != -1] 187 | samples_y = samples_y[samples_y != -1] 188 | try: 189 | func = np.polyfit(samples_x, samples_y, 2) 190 | except: 191 | print("polyfit did not work") 192 | # y_limits = np.polyval(func, 0) 193 | # if y_limits > 720 or y_limits < 0: 194 | # if (x + w - 1) >= 1280: 195 | # draw_x = np.linspace(x, 1280-1, 1280-x) 196 | # else: 197 | draw_x = np.linspace(x, x + w - 1, 60) 198 | draw_y = np.polyval(func, draw_x) 199 | lanes.append((np.asarray([draw_x, draw_y]).T).astype(np.int32)) 200 | # cv2.polylines(mask, [draw_points], False, 1, thickness=3) 201 | return lanes 202 | 203 | 204 | def connect_lane(image, shadow_height=0, kernel_size=7, func_type=cv2.MORPH_OPEN): 205 | if len(image.shape) == 3: 206 | gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 207 | else: 208 | gray_image = image 209 | if shadow_height: 210 | image[:shadow_height] = 0 211 | mask = np.zeros((image.shape[0], image.shape[1]), np.uint8) 212 | 213 | num_labels, labels, stats, centers = cv2.connectedComponentsWithStats( 214 | gray_image, connectivity=8, ltype=cv2.CV_32S 215 | ) 216 | # ratios = [] 217 | selected_label = [] 218 | 219 | for t in range(1, num_labels, 1): 220 | _, _, _, _, area = stats[t] 221 | if area > 400: 222 | selected_label.append(t) 223 | if len(selected_label) == 0: 224 | return mask 225 | else: 226 | split_labels = [ 227 | [ 228 | label, 229 | ] 230 | for label in selected_label 231 | ] 232 | points = fitlane(mask, split_labels, labels, stats) 233 | return points 234 | 235 | # close operation fill hole 236 | closing = cv2.morphologyEx(image, func_type, kernel_size, iterations=1) 237 | 238 | return closing 239 | 240 | 241 | def letterbox_for_img( 242 | img, 243 | new_shape=(640, 640), 244 | color=(114, 114, 114), 245 | auto=True, 246 | scaleFill=False, 247 | scaleup=True, 248 | ): 249 | # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 250 | shape = img.shape[:2] # current shape [height, width] 251 | if isinstance(new_shape, int): 252 | new_shape = (new_shape, new_shape) 253 | 254 | # Scale ratio (new / old) 255 | r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) 256 | if not scaleup: # only scale down, do not scale up (for better test mAP) 257 | r = min(r, 1.0) 258 | 259 | # Compute padding 260 | ratio = r, r # width, height ratios 261 | new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) 262 | 263 | dw, dh = ( 264 | new_shape[1] - new_unpad[0], 265 | new_shape[0] - new_unpad[1], 266 | ) # wh padding 267 | 268 | if auto: # minimum rectangle 269 | dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding 270 | 271 | elif scaleFill: # stretch 272 | dw, dh = 0.0, 0.0 273 | new_unpad = (new_shape[1], new_shape[0]) 274 | ratio = ( 275 | new_shape[1] / shape[1], 276 | new_shape[0] / shape[0], 277 | ) # width, height ratios 278 | 279 | dw /= 2 # divide padding into 2 sides 280 | dh /= 2 281 | if shape[::-1] != new_unpad: # resize 282 | img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA) 283 | 284 | top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) 285 | left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) 286 | img = cv2.copyMakeBorder( 287 | img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color 288 | ) # add border 289 | return img, ratio, (dw, dh) 290 | 291 | 292 | class Operator: 293 | """ 294 | Infering object from images 295 | """ 296 | 297 | def __init__(self): 298 | self.model = torch.hub.load("hustvl/yolop", "yolop", pretrained=True) 299 | self.model.to(torch.device(DEVICE)) 300 | self.model.eval() 301 | 302 | def on_event( 303 | self, 304 | dora_event: dict, 305 | send_output: Callable[[str, bytes], None], 306 | ) -> DoraStatus: 307 | if dora_event["type"] == "INPUT": 308 | return self.on_input(dora_event, send_output) 309 | return DoraStatus.CONTINUE 310 | 311 | def on_input( 312 | self, 313 | dora_input: dict, 314 | send_output: Callable[[str, bytes], None], 315 | ) -> DoraStatus: 316 | # inference 317 | frame = cv2.imdecode( 318 | np.frombuffer( 319 | dora_input["data"], 320 | np.uint8, 321 | ), 322 | -1, 323 | ) 324 | 325 | frame = frame[:, :, :3] 326 | h0, w0, _ = frame.shape 327 | h, w = (640, 640) 328 | frame, _, (pad_w, pad_h) = letterbox_for_img(frame) 329 | ratio = w / w0 330 | pad_h, pad_w = (int(pad_h), int(pad_w)) 331 | 332 | img = torch.unsqueeze(transform(frame), dim=0) 333 | half = False # half precision only supported on CUDA 334 | img = img.half() if half else img.float() # uint8 to fp16/32 335 | img = img.to(torch.device(DEVICE)) 336 | det_out, da_seg_out, ll_seg_out = self.model(img) 337 | 338 | # det_out = [pred.reshape((1, -1, 6)) for pred in det_out] 339 | # inf_out = torch.cat(det_out, dim=1) 340 | 341 | # det_pred = non_max_suppression( 342 | # inf_out, 343 | # ) 344 | # det = det_pred[0] 345 | 346 | da_predict = da_seg_out[:, :, pad_h : (h0 - pad_h), pad_w : (w0 - pad_w)] 347 | da_seg_mask = torch.nn.functional.interpolate( 348 | da_predict, scale_factor=1 / ratio, mode="bilinear" 349 | ) 350 | _, da_seg_mask = torch.max(da_seg_mask, 1) 351 | da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() 352 | da_seg_mask = morphological_process(da_seg_mask, kernel_size=7) 353 | 354 | contours, _ = cv2.findContours( 355 | da_seg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE 356 | ) 357 | if len(contours) != 0: 358 | contour = max(contours, key=cv2.contourArea) 359 | contour = contour.astype(np.int32) 360 | send_output("drivable_area", contour.tobytes(), dora_input["metadata"]) 361 | else: 362 | send_output("drivable_area", np.array([]).tobytes(), dora_input["metadata"]) 363 | 364 | ll_predict = ll_seg_out[:, :, pad_h : (h0 - pad_h), pad_w : (w0 - pad_w)] 365 | 366 | ll_seg_mask = torch.nn.functional.interpolate( 367 | ll_predict, scale_factor=1 / ratio, mode="bilinear" 368 | ) 369 | 370 | _, ll_seg_mask = torch.max(ll_seg_mask, 1) 371 | ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() 372 | # Lane line post-processing 373 | ll_seg_mask = morphological_process( 374 | ll_seg_mask, kernel_size=7, func_type=cv2.MORPH_OPEN 375 | ) 376 | ll_seg_points = np.array(connect_lane(ll_seg_mask), np.int32) 377 | send_output("lanes", ll_seg_points.tobytes(), dora_input["metadata"]) 378 | return DoraStatus.CONTINUE 379 | -------------------------------------------------------------------------------- /operators/fot_op.py: -------------------------------------------------------------------------------- 1 | """ 2 | # FOT operator 3 | 4 | The Frenet Optimal Planner Operator is based on [https://github.com/erdos-project/frenet_optimal_trajectory_planner/](https://github.com/erdos-project/frenet_optimal_trajectory_planner/) and wrap the different elements `obstacles`, `position`, `speed` ... into a frenet consumable format. 5 | 6 | 7 | FOT inputs are: 8 | ```python 9 | initial_conditions = { 10 | "ps": 0, 11 | "target_speed": # The target speed 12 | "pos": # The x, y current position 13 | "vel": # The vx, vy current speed 14 | "wp": # [[x, y], ... n_waypoints ] desired waypoints 15 | "obs": # [[min_x, min_y, max_x, max_y], ... ] obstacles on the way 16 | } 17 | ``` 18 | There is also a set of hyperparameters that are described below. 19 | 20 | As our obstacles are defined as 3D dot we need to transform those dot into `[min_x, min_y, max_x, max_y]` format. We do that within the `get_obstacle_list` function. This approximation is very basic and probably need to be revisited. 21 | 22 | The output is either a successful trajectory that we can feed into PID. Or it is a failure in which case we send the current position as waypoint. 23 | 24 | ## Inputs 25 | 26 | - image: HEIGHT x WIDTH x BGR array. 27 | 28 | ## Outputs 29 | 30 | - waypoints: x_points, y_points, speeds 31 | 32 | ## Example Image 33 | 34 | ![Imgur](https://i.imgur.com/9AGDlQY.gif) 35 | 36 | ## Graph Description 37 | 38 | ```yaml 39 | - id: fot_op 40 | operator: 41 | python: ../../operators/fot_op.py 42 | outputs: 43 | - waypoints 44 | inputs: 45 | position: oasis_agent/position 46 | speed: oasis_agent/speed 47 | obstacles: obstacle_location_op/obstacles 48 | gps_waypoints: carla_gps_op/gps_waypoints 49 | ``` 50 | 51 | ## Graph Viz 52 | 53 | ```mermaid 54 | flowchart TB 55 | oasis_agent 56 | subgraph carla_gps_op 57 | carla_gps_op/op[op] 58 | end 59 | subgraph fot_op 60 | fot_op/op[op] 61 | end 62 | subgraph obstacle_location_op 63 | obstacle_location_op/op[op] 64 | end 65 | subgraph pid_control_op 66 | pid_control_op/op[op] 67 | end 68 | carla_gps_op/op -- gps_waypoints --> fot_op/op 69 | obstacle_location_op/op -- obstacles --> fot_op/op 70 | oasis_agent -- position --> fot_op/op 71 | oasis_agent -- speed --> fot_op/op 72 | fot_op/op -- waypoints --> pid_control_op/op 73 | ``` 74 | """ 75 | 76 | from typing import Callable 77 | 78 | import numpy as np 79 | import pyarrow as pa 80 | from dora import DoraStatus 81 | from dora_utils import LABELS 82 | from frenetoptimaltrajectory import fot_wrapper 83 | from numpy import linalg as LA 84 | from scipy.spatial.transform import Rotation as R 85 | from sklearn.metrics import pairwise_distances 86 | 87 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 88 | 89 | # Planning general 90 | TARGET_SPEED = 10 91 | 92 | OBSTACLE_CLEARANCE = 3 93 | OBSTACLE_RADIUS = 1 94 | OBSTACLE_RADIUS_TANGENT = 1.5 95 | MAX_CURBATURE = np.pi / 6 96 | 97 | 98 | def get_lane_list(position, lanes, waypoints): 99 | lane_list = [] 100 | 101 | lanes_xy = lanes[:, :, :2] 102 | 103 | for lane in lanes_xy: 104 | lane_list.append(np.concatenate([lane - 0.1, lane + 0.1], axis=1)) 105 | 106 | if len(lane_list) > 0: 107 | return np.concatenate(lane_list) 108 | else: 109 | return np.array([]) 110 | 111 | 112 | def get_obstacle_list(position, obstacle_predictions, waypoints): 113 | [x_ego, y_ego, z, rx, ry, rz, rw] = position 114 | [pitch, roll, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler("xyz", degrees=False) 115 | 116 | if len(obstacle_predictions) == 0 or len(waypoints) == 0: 117 | return np.empty((0, 4)) 118 | obstacle_list = [] 119 | 120 | distances = pairwise_distances(waypoints, obstacle_predictions[:, :2]).min(0) 121 | for distance, prediction in zip(distances, obstacle_predictions): 122 | # Use all prediction times as potential obstacles. 123 | [x, y, _, _confidence, _label] = prediction 124 | angle = np.arctan2(y - y_ego, x - x_ego) 125 | diff_angle = np.arctan2(np.sin(angle - yaw), np.cos(angle - yaw)) 126 | 127 | if distance < OBSTACLE_CLEARANCE and np.abs(diff_angle) < MAX_CURBATURE: 128 | obstacle_size = np.array( 129 | [ 130 | x 131 | - OBSTACLE_RADIUS * np.cos(angle) / 2 132 | - OBSTACLE_RADIUS_TANGENT * np.sin(angle) / 2, 133 | y 134 | - OBSTACLE_RADIUS * np.sin(angle) / 2 135 | - OBSTACLE_RADIUS_TANGENT * np.cos(angle) / 2, 136 | x 137 | + OBSTACLE_RADIUS * np.cos(angle) / 2 138 | + OBSTACLE_RADIUS_TANGENT * np.sin(angle) / 2, 139 | y 140 | + OBSTACLE_RADIUS * np.sin(angle) / 2 141 | + OBSTACLE_RADIUS_TANGENT * np.cos(angle) / 2, 142 | ] 143 | ) 144 | 145 | # Remove traffic light. TODO: Take into account traffic light. 146 | if _label != 9: 147 | obstacle_list.append(obstacle_size) 148 | 149 | if len(obstacle_list) == 0: 150 | return np.empty((0, 4)) 151 | return np.array(obstacle_list) 152 | 153 | 154 | class Operator: 155 | """Send the frenet optimal trajectory `waypoints` given some initial conditions in 156 | cartesian space. 157 | Args: 158 | initial_conditions (dict): dict containing the following items 159 | ps (float): previous longitudinal position 160 | target_speed (float): target speed [m/s] 161 | pos (np.ndarray([float, float])): initial position in global coord 162 | vel (np.ndarray([float, float])): initial velocity [m/s] 163 | wp (np.ndarray([float, float])): list of global waypoints 164 | obs (np.ndarray([float, float, float, float])): list of obstacles 165 | as: [lower left x, lower left y, upper right x, upper right y] 166 | hyperparameters (dict): a dict of optional hyperparameters 167 | max_speed (float): maximum speed [m/s] 168 | max_accel (float): maximum acceleration [m/s^2] 169 | max_curvature (float): maximum curvature [1/m] 170 | max_road_width_l (float): maximum road width to the left [m] 171 | max_road_width_r (float): maximum road width to the right [m] 172 | d_road_w (float): road width sampling discretization [m] 173 | dt (float): time sampling discretization [s] 174 | maxt (float): max prediction horizon [s] 175 | mint (float): min prediction horizon [s] 176 | d_t_s (float): target speed sampling discretization [m/s] 177 | n_s_sample (float): sampling number of target speed 178 | obstacle_clearance (float): obstacle radius [m] 179 | kd (float): positional deviation cost 180 | kv (float): velocity cost 181 | ka (float): acceleration cost 182 | kj (float): jerk cost 183 | kt (float): time cost 184 | ko (float): dist to obstacle cost 185 | klat (float): lateral cost 186 | klon (float): longitudinal cost 187 | Returns: 188 | result_x (np.ndarray(float)): x positions of fot, if it exists 189 | result_y (np.ndarray(float)): y positions of fot, if it exists 190 | speeds (np.ndarray(float)): speeds of fot, if it exists 191 | ix (np.ndarray(float)): spline x of fot, if it exists 192 | iy (np.ndarray(float)): spline y of fot, if it exists 193 | iyaw (np.ndarray(float)): spline yaws of fot, if it exists 194 | d (np.ndarray(float)): lateral offset of fot, if it exists 195 | s (np.ndarray(float)): longitudinal offset of fot, if it exists 196 | speeds_x (np.ndarray(float)): x speeds of fot, if it exists 197 | speeds_y (np.ndarray(float)): y speeds of fot, if it exists 198 | params (dict): next frenet coordinates, if they exist 199 | costs (dict): costs of best frenet path, if it exists 200 | success (bool): whether a fot was found or not 201 | """ 202 | 203 | def __init__(self): 204 | self.obstacles = np.array([]) 205 | self.lanes = np.array([]) 206 | self.position = [] 207 | self.speed = [] 208 | self.last_position = [] 209 | self.waypoints = [] 210 | self.gps_waypoints = np.array([]) 211 | self.last_obstacles = np.array([]) 212 | self.obstacle_metadata = {} 213 | self.gps_metadata = {} 214 | self.metadata = {} 215 | self.orientation = None 216 | self.outputs = [] 217 | self.hyperparameters = { 218 | "max_speed": 25.0, 219 | "max_accel": 45.0, 220 | "max_curvature": 55.0, 221 | "max_road_width_l": 0.1, 222 | "max_road_width_r": 0.1, 223 | "d_road_w": 0.5, 224 | "dt": 0.5, 225 | "maxt": 5.0, 226 | "mint": 2.0, 227 | "d_t_s": 5, 228 | "n_s_sample": 2.0, 229 | "obstacle_clearance": 0.1, 230 | "kd": 1.0, 231 | "kv": 0.1, 232 | "ka": 0.1, 233 | "kj": 0.1, 234 | "kt": 0.1, 235 | "ko": 0.1, 236 | "klat": 1.0, 237 | "klon": 1.0, 238 | "num_threads": 0, # set 0 to avoid using threaded algorithm 239 | } 240 | self.conds = { 241 | "s0": 0, 242 | "target_speed": TARGET_SPEED, 243 | } # paste output from debug log 244 | 245 | def on_event( 246 | self, 247 | dora_event: dict, 248 | send_output: Callable[[str, bytes], None], 249 | ) -> DoraStatus: 250 | if dora_event["type"] == "INPUT": 251 | return self.on_input(dora_event, send_output) 252 | return DoraStatus.CONTINUE 253 | 254 | def on_input( 255 | self, 256 | dora_input: dict, 257 | send_output: Callable[[str, bytes], None], 258 | ): 259 | if dora_input["id"] == "position": 260 | self.last_position = self.position 261 | self.position = np.array(dora_input["value"]) 262 | if len(self.last_position) == 0: 263 | self.last_position = self.position 264 | return DoraStatus.CONTINUE 265 | 266 | elif dora_input["id"] == "speed": 267 | self.speed = np.array(dora_input["value"]) 268 | return DoraStatus.CONTINUE 269 | 270 | elif dora_input["id"] == "obstacles": 271 | obstacles = np.array(dora_input["value"]).reshape((-1, 5)) 272 | if len(self.last_obstacles) > 0: 273 | self.obstacles = np.concatenate([self.last_obstacles, obstacles]) 274 | else: 275 | self.obstacles = obstacles 276 | 277 | elif dora_input["id"] == "global_lanes": 278 | lanes = np.array(dora_input["value"]).reshape((-1, 60, 3)) 279 | self.lanes = lanes 280 | return DoraStatus.CONTINUE 281 | 282 | elif "gps_waypoints" == dora_input["id"]: 283 | waypoints = np.array(dora_input["value"]) 284 | waypoints = waypoints.reshape((-1, 3))[:, :2] 285 | self.gps_waypoints = waypoints 286 | return DoraStatus.CONTINUE 287 | 288 | if len(self.gps_waypoints) == 0: 289 | print("No waypoints") 290 | send_output( 291 | "waypoints", 292 | self.gps_waypoints.tobytes(), 293 | dora_input["metadata"], 294 | ) 295 | return DoraStatus.CONTINUE 296 | 297 | elif len(self.position) == 0 or len(self.speed) == 0: 298 | return DoraStatus.CONTINUE 299 | 300 | [x, y, z, rx, ry, rz, rw] = self.position 301 | [_, _, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler("xyz", degrees=False) 302 | 303 | gps_obstacles = get_obstacle_list( 304 | self.position, self.obstacles, self.gps_waypoints 305 | ) 306 | 307 | if len(self.lanes) > 0: 308 | lanes = get_lane_list(self.position, self.lanes, self.gps_waypoints) 309 | obstacles = np.concatenate([gps_obstacles, lanes]) 310 | else: 311 | obstacles = gps_obstacles 312 | initial_conditions = { 313 | "ps": 0, 314 | "target_speed": self.conds["target_speed"], 315 | "pos": self.position[:2], 316 | "vel": (np.clip(LA.norm(self.speed), 0.5, 40)) 317 | * np.array([np.cos(yaw), np.sin(yaw)]), 318 | "wp": self.gps_waypoints, 319 | "obs": obstacles, 320 | } 321 | 322 | ( 323 | result_x, 324 | result_y, 325 | speeds, 326 | ix, 327 | iy, 328 | iyaw, 329 | d, 330 | s, 331 | speeds_x, 332 | speeds_y, 333 | misc, 334 | costs, 335 | success, 336 | ) = fot_wrapper.run_fot(initial_conditions, self.hyperparameters) 337 | 338 | if not success: 339 | initial_conditions["wp"] = initial_conditions["wp"][:5] 340 | print(f"fot failed. stopping with {initial_conditions}.") 341 | target_distance = LA.norm(self.gps_waypoints[-1] - self.position[:2]) 342 | print(f"Distance to target: {target_distance}") 343 | for obstacle in self.obstacles: 344 | print(f"obstacles:{obstacle}, label: {LABELS[int(obstacle[-1])]}") 345 | 346 | send_output( 347 | "waypoints", 348 | pa.array(np.array([x, y, 0.0], np.float32)), 349 | dora_input["metadata"], 350 | ) 351 | return DoraStatus.CONTINUE 352 | 353 | self.waypoints = np.concatenate([result_x, result_y]).reshape((2, -1)).T 354 | 355 | self.outputs = np.ascontiguousarray( 356 | np.concatenate([result_x, result_y, speeds]) 357 | .reshape((3, -1)) 358 | .T.astype(np.float32) 359 | ) 360 | send_output( 361 | "waypoints", 362 | pa.array(self.outputs.ravel()), 363 | dora_input["metadata"], 364 | ) 365 | return DoraStatus.CONTINUE 366 | -------------------------------------------------------------------------------- /operators/plot.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Plot operator 3 | 4 | Plot operator takes outputs from the graph and plot it on the camera frame. 5 | """ 6 | import time 7 | from typing import Callable 8 | 9 | import cv2 10 | import numpy as np 11 | import pyarrow as pa 12 | from dora import DoraStatus 13 | from dora_utils import ( 14 | LABELS, 15 | get_extrinsic_matrix, 16 | get_intrinsic_matrix, 17 | get_projection_matrix, 18 | local_points_to_camera_view, 19 | location_to_camera_view, 20 | ) 21 | from scipy.spatial.transform import Rotation as R 22 | 23 | pa.array([]) # See: https://github.com/apache/arrow/issues/34994 24 | 25 | CAMERA_WIDTH = 1920 26 | CAMERA_HEIGHT = 1080 27 | DEPTH_IMAGE_WIDTH = 1920 28 | DEPTH_IMAGE_HEIGHT = 1080 29 | DEPTH_FOV = 90 30 | SENSOR_POSITION = np.array([3, 0, 1]) 31 | 32 | VELODYNE_MATRIX = np.array([[0, 0, 1], [1, 0, 0], [0, -1, 0]]) 33 | UNREAL_MATRIX = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) 34 | INV_VELODYNE_MATRIX = np.linalg.inv(VELODYNE_MATRIX) 35 | INTRINSIC_MATRIX = get_intrinsic_matrix( 36 | DEPTH_IMAGE_WIDTH, DEPTH_IMAGE_HEIGHT, DEPTH_FOV 37 | ) 38 | 39 | VERBOSE = True 40 | NO_DISPLAY = False 41 | 42 | writer = cv2.VideoWriter( 43 | "output01.avi", 44 | cv2.VideoWriter_fourcc(*"MJPG"), 45 | 30, 46 | (CAMERA_WIDTH, CAMERA_HEIGHT), 47 | ) 48 | 49 | font = cv2.FONT_HERSHEY_SIMPLEX 50 | bottomLeftCornerOfText = (10, 30) 51 | fontScale = 0.6 52 | fontColor = (255, 0, 255) 53 | thickness = 2 54 | lineType = 2 55 | 56 | 57 | class Operator: 58 | """ 59 | Plot inputs using cv2.imshow 60 | """ 61 | 62 | def __init__(self): 63 | self.waypoints = [] 64 | self.gps_waypoints = [] 65 | self.obstacles = [] 66 | self.raw_obstacles = [] 67 | self.obstacles_bbox = [] 68 | self.obstacles_id = [] 69 | self.lanes = [] 70 | self.global_lanes = [] 71 | self.drivable_area = [] 72 | self.last_timestamp = time.time() 73 | self.position = [] 74 | self.last_position = [] 75 | self.camera_frame = [] 76 | self.traffic_sign_bbox = [] 77 | self.point_cloud = np.array([]) 78 | self.control = [] 79 | self.last_time = time.time() 80 | self.current_speed = [] 81 | 82 | def on_event( 83 | self, 84 | dora_event: dict, 85 | send_output: Callable[[str, bytes], None], 86 | ) -> DoraStatus: 87 | if dora_event["type"] == "INPUT": 88 | return self.on_input(dora_event, send_output) 89 | return DoraStatus.CONTINUE 90 | 91 | def on_input( 92 | self, 93 | dora_input: dict, 94 | send_output: Callable[[str, bytes], None], 95 | ): 96 | if "waypoints" == dora_input["id"]: 97 | waypoints = np.array(dora_input["value"]) 98 | waypoints = waypoints.reshape((-1, 3)) 99 | waypoints = waypoints[:, :2] 100 | # Adding z axis for plot 101 | waypoints = np.hstack((waypoints, -0.5 + np.zeros((waypoints.shape[0], 1)))) 102 | self.waypoints = waypoints 103 | 104 | elif "gps_waypoints" == dora_input["id"]: 105 | gps_waypoints = np.array(dora_input["value"]) 106 | gps_waypoints = gps_waypoints.reshape((-1, 3)) 107 | gps_waypoints = gps_waypoints[:, :2] 108 | # Adding z axis for plot 109 | gps_waypoints = np.hstack( 110 | (gps_waypoints, -0.5 + np.zeros((gps_waypoints.shape[0], 1))) 111 | ) 112 | self.gps_waypoints = gps_waypoints 113 | 114 | elif "control" == dora_input["id"]: 115 | self.control = np.array(dora_input["value"]) 116 | 117 | elif "obstacles_bbox" == dora_input["id"]: 118 | self.obstacles_bbox = dora_input["value"].to_numpy().reshape((-1, 6)) 119 | 120 | elif "traffic_sign_bbox" == dora_input["id"]: 121 | self.traffic_sign_bbox = np.array(dora_input["value"]).reshape((-1, 6)) 122 | 123 | elif "obstacles_id" == dora_input["id"]: 124 | self.obstacles_id = np.array(dora_input["value"]).reshape((-1, 7)) 125 | 126 | elif "obstacles" == dora_input["id"]: 127 | obstacles = np.array(dora_input["value"]).reshape((-1, 5))[:, :3] 128 | self.obstacles = obstacles 129 | 130 | elif "lanes" == dora_input["id"]: 131 | lanes = np.array(dora_input["value"]).reshape((-1, 30, 2)) 132 | self.lanes = lanes 133 | 134 | elif "global_lanes" == dora_input["id"]: 135 | global_lanes = np.array(dora_input["value"]).reshape((-1, 3)) 136 | self.global_lanes = global_lanes 137 | 138 | elif "drivable_area" == dora_input["id"]: 139 | drivable_area = np.array(dora_input["value"]).reshape((1, -1, 2)) 140 | self.drivable_area = drivable_area 141 | 142 | elif "position" == dora_input["id"]: 143 | # Add sensor transform 144 | 145 | self.last_position = self.position 146 | self.position = np.array(dora_input["value"]) 147 | if len(self.last_position) == 0: 148 | return DoraStatus.CONTINUE 149 | 150 | self.current_speed = (self.position[:2] - self.last_position[:2]) * 20 151 | 152 | elif "lidar_pc" == dora_input["id"]: 153 | point_cloud = np.array(dora_input["value"]) 154 | point_cloud = point_cloud.reshape((-1, 3)) 155 | # To camera coordinate 156 | # The latest coordinate space is the unreal space. 157 | point_cloud = np.dot( 158 | point_cloud, 159 | VELODYNE_MATRIX, 160 | ) 161 | point_cloud = point_cloud[np.where(point_cloud[:, 2] > 0.1)] 162 | point_cloud = local_points_to_camera_view(point_cloud, INTRINSIC_MATRIX) 163 | 164 | if len(point_cloud) != 0: 165 | self.point_cloud = point_cloud.T 166 | 167 | elif "image" == dora_input["id"]: 168 | self.camera_frame = ( 169 | dora_input["value"] 170 | .to_numpy() 171 | .copy() 172 | .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 4)) 173 | ) 174 | 175 | if "image" != dora_input["id"] or isinstance(self.camera_frame, list): 176 | return DoraStatus.CONTINUE 177 | 178 | if len(self.position) != 0: 179 | inv_extrinsic_matrix = np.linalg.inv( 180 | get_extrinsic_matrix(get_projection_matrix(self.position)) 181 | ) 182 | else: 183 | inv_extrinsic_matrix = None 184 | # print("no position messages.") 185 | 186 | resized_image = self.camera_frame[:, :, :3] 187 | resized_image = np.ascontiguousarray(resized_image, np.uint8) 188 | 189 | ## Drawing waypoints on frame 190 | if inv_extrinsic_matrix is not None: 191 | waypoints = location_to_camera_view( 192 | self.waypoints, INTRINSIC_MATRIX, inv_extrinsic_matrix 193 | ).T 194 | waypoints = np.clip(waypoints, 0, 1_000_000) 195 | for id, waypoint in enumerate(waypoints): 196 | if np.isnan(waypoint).any(): 197 | break 198 | 199 | cv2.circle( 200 | resized_image, 201 | (int(waypoint[0]), int(waypoint[1])), 202 | 3, 203 | ( 204 | int(np.clip(255 - waypoint[2] * 100, 0, 255)), 205 | int(np.clip(waypoint[2], 0, 255)), 206 | 255, 207 | ), 208 | -1, 209 | ) 210 | if VERBOSE: 211 | [x, y, z] = self.waypoints[id] 212 | cv2.putText( 213 | resized_image, 214 | f"x: {x:.2f}, y: {y:.2f}", 215 | (int(waypoint[0]), int(waypoint[1])), 216 | font, 217 | 0.5, 218 | ( 219 | int(np.clip(255 - waypoint[2] * 100, 0, 255)), 220 | int(np.clip(waypoint[2], 0, 255)), 221 | 255, 222 | ), 223 | 2, 224 | 1, 225 | ) 226 | 227 | ## Drawing gps waypoints on frame 228 | if inv_extrinsic_matrix is not None: 229 | gps_waypoints = location_to_camera_view( 230 | self.gps_waypoints, INTRINSIC_MATRIX, inv_extrinsic_matrix 231 | ).T 232 | 233 | for waypoint in gps_waypoints: 234 | if np.isnan(waypoint).any(): 235 | break 236 | cv2.circle( 237 | resized_image, 238 | (int(waypoint[0]), int(waypoint[1])), 239 | 3, 240 | ( 241 | int(np.clip(255 - waypoint[2] * 100, 0, 255)), 242 | int(np.clip(waypoint[2], 0, 255)), 243 | 122, 244 | ), 245 | -1, 246 | ) 247 | 248 | ## Drawing lanes on frame 249 | if inv_extrinsic_matrix is not None: 250 | lanes = location_to_camera_view( 251 | self.global_lanes, INTRINSIC_MATRIX, inv_extrinsic_matrix 252 | ).T 253 | 254 | for lane_dot in lanes: 255 | if np.isnan(lane_dot).any(): 256 | break 257 | cv2.circle( 258 | resized_image, 259 | (int(lane_dot[0]), int(lane_dot[1])), 260 | 3, 261 | ( 262 | 100, 263 | 100, 264 | 100, 265 | ), 266 | -1, 267 | ) 268 | 269 | ## Draw obstacle dot 270 | if inv_extrinsic_matrix is not None: 271 | obstacles = location_to_camera_view( 272 | self.obstacles, INTRINSIC_MATRIX, inv_extrinsic_matrix 273 | ).T 274 | 275 | for id, obstacle in enumerate(obstacles): 276 | [x, y, z] = obstacle 277 | location = [x, y, z] 278 | cv2.circle( 279 | resized_image, 280 | (int(location[0]), int(location[1])), 281 | 3, 282 | ( 283 | 0, 284 | 200, 285 | 0, 286 | ), 287 | -1, 288 | ) 289 | 290 | if VERBOSE: 291 | [x, y, z] = self.obstacles[id] 292 | cv2.putText( 293 | resized_image, 294 | f"x: {x:.2f}, y: {y:.2f}", 295 | (int(location[0]), int(location[1])), 296 | font, 297 | 0.5, 298 | (0, 200, 0), 299 | 2, 300 | 1, 301 | ) 302 | 303 | for point in self.point_cloud: 304 | cv2.circle( 305 | resized_image, 306 | (int(point[0]), int(point[1])), 307 | 3, 308 | ( 309 | 0, 310 | int(max(255 - point[2] * 100, 0)), 311 | int(min(point[2] * 10, 255)), 312 | ), 313 | -1, 314 | ) 315 | 316 | for obstacle_bb in self.obstacles_bbox: 317 | [min_x, max_x, min_y, max_y, confidence, label] = obstacle_bb 318 | 319 | start = (int(min_x), int(min_y)) 320 | end = (int(max_x), int(max_y)) 321 | cv2.rectangle(resized_image, start, end, (0, 255, 0), 2) 322 | if VERBOSE: 323 | cv2.putText( 324 | resized_image, 325 | LABELS[label] + f", {confidence}%", 326 | (int(min_x), int(max_y)), 327 | font, 328 | 0.5, 329 | (0, 255, 0), 330 | 2, 331 | 1, 332 | ) 333 | 334 | for obstacle_id in self.obstacles_id: 335 | [ 336 | min_x, 337 | max_x, 338 | min_y, 339 | max_y, 340 | track_id, 341 | confidence, 342 | label, 343 | ] = obstacle_id 344 | start = (int(min_x), int(min_y)) 345 | end = (int(max_x), int(max_y)) 346 | # cv2.rectangle(resized_image, start, end, (0, 255, 0), 2) 347 | 348 | cv2.putText( 349 | resized_image, 350 | f"#{track_id}", 351 | (int(max_x), int(max_y + 20)), 352 | font, 353 | 0.75, 354 | (255, 140, 0), 355 | 2, 356 | 1, 357 | ) 358 | 359 | # for lane in self.lanes: 360 | # cv2.polylines(resized_image, [lane], False, (0, 0, 255), 3) 361 | 362 | for contour in self.drivable_area: 363 | if len(contour) != 0: 364 | back = resized_image.copy() 365 | cv2.drawContours(back, [contour], 0, (0, 255, 0), -1) 366 | 367 | # blend with original image 368 | alpha = 0.25 369 | resized_image = cv2.addWeighted( 370 | resized_image, 1 - alpha, back, alpha, 0 371 | ) 372 | if not isinstance(self.position, list): 373 | [x, y, z, rx, ry, rz, rw] = self.position 374 | [pitch, roll, yaw] = R.from_quat([rx, ry, rz, rw]).as_euler( 375 | "xyz", degrees=True 376 | ) 377 | 378 | cv2.putText( 379 | resized_image, 380 | f"""cur: x: {x:.2f}, y: {y:.2f}, pitch: {pitch:.2f}, roll: {roll:.2f}, yaw: {yaw:.2f}""", 381 | (10, 30), 382 | font, 383 | fontScale, 384 | fontColor, 385 | thickness, 386 | lineType, 387 | ) 388 | 389 | if len(self.current_speed) != 0: 390 | cv2.putText( 391 | resized_image, 392 | f"""vx: {self.current_speed[0]:.2f}, vy: {self.current_speed[1]:.2f}""", 393 | (10, 50), 394 | font, 395 | fontScale, 396 | fontColor, 397 | thickness, 398 | lineType, 399 | ) 400 | 401 | if len(self.control) != 0: 402 | cv2.putText( 403 | resized_image, 404 | f"""throttle: {self.control[0]:.2f}, brake: {self.control[2]:.2f}, steering: {np.degrees(self.control[1]):.2f} """, 405 | (10, 70), 406 | font, 407 | fontScale, 408 | fontColor, 409 | thickness, 410 | lineType, 411 | ) 412 | 413 | # cv2.putText( 414 | # resized_image, 415 | # f"""latency: {(time.time() - self.last_time) * 1000:.2f} ms""", 416 | # (10, 105), 417 | # font, 418 | # fontScale, 419 | # fontColor, 420 | # thickness, 421 | # lineType, 422 | # ) 423 | writer.write(resized_image) 424 | resized_image = cv2.resize(resized_image, (800, 600)) 425 | if not NO_DISPLAY: 426 | cv2.imshow("image", resized_image) 427 | cv2.waitKey(1) 428 | self.last_time = time.time() 429 | ## send_output("plot_status", b"") 430 | return DoraStatus.CONTINUE 431 | --------------------------------------------------------------------------------