├── .gitignore
├── isaac_ros_image_proc
├── test
│ ├── test_cases
│ │ ├── image_proc
│ │ │ ├── dense_grid
│ │ │ │ ├── image_raw.json
│ │ │ │ ├── image_color.jpg
│ │ │ │ ├── image_mono.jpg
│ │ │ │ ├── image_raw.jpg
│ │ │ │ ├── image_rect.jpg
│ │ │ │ ├── image_rect_color.jpg
│ │ │ │ └── camera_info.json
│ │ │ ├── white_grid
│ │ │ │ ├── image_raw.json
│ │ │ │ ├── image_color.jpg
│ │ │ │ ├── image_mono.jpg
│ │ │ │ ├── image_raw.jpg
│ │ │ │ ├── image_rect.jpg
│ │ │ │ ├── image_rect_color.jpg
│ │ │ │ └── camera_info.json
│ │ │ └── nvidia_green_grid
│ │ │ │ ├── image_raw.json
│ │ │ │ ├── image_color.jpg
│ │ │ │ ├── image_mono.jpg
│ │ │ │ ├── image_raw.jpg
│ │ │ │ ├── image_rect.jpg
│ │ │ │ ├── image_rect_color.jpg
│ │ │ │ └── camera_info.json
│ │ ├── resize
│ │ │ └── profile
│ │ │ │ ├── image.json
│ │ │ │ ├── NVIDIAprofile.png
│ │ │ │ └── camera_info.json
│ │ ├── NVIDIAprofile.png
│ │ ├── rectify
│ │ │ ├── chessboard0
│ │ │ │ ├── image_raw.json
│ │ │ │ ├── image_raw.jpg
│ │ │ │ ├── image_rect.jpg
│ │ │ │ └── camera_info.json
│ │ │ └── chessboard1
│ │ │ │ ├── image_raw.json
│ │ │ │ ├── image_raw.jpg
│ │ │ │ ├── image_rect.jpg
│ │ │ │ └── camera_info.json
│ │ ├── camera_info.json
│ │ └── generate_test_cases.py
│ ├── isaac_ros_image_format_converter_test.py
│ ├── isaac_ros_image_format_converter_grayscale_test.py
│ ├── isaac_ros_image_format_converter_4_channel_test.py
│ ├── isaac_ros_resize_test.py
│ ├── isaac_ros_resize_aspect_ratio_test.py
│ ├── isaac_ros_rectify_test.py
│ ├── isaac_ros_resize_invalid_test.py
│ └── isaac_ros_image_proc_test.py
├── package.xml
├── include
│ └── isaac_ros_image_proc
│ │ ├── image_format_converter_node.hpp
│ │ ├── resize_node.hpp
│ │ └── rectify_node.hpp
├── src
│ ├── image_proc_main.cpp
│ ├── resize_node.cpp
│ ├── image_format_converter_node.cpp
│ └── rectify_node.cpp
└── CMakeLists.txt
├── isaac_ros_stereo_image_proc
├── test
│ ├── test_cases
│ │ └── stereo_images_chair
│ │ │ ├── image_left.json
│ │ │ ├── image_right.json
│ │ │ ├── image_left.png
│ │ │ ├── image_right.png
│ │ │ ├── test_disparity.png
│ │ │ └── camera_info.json
│ ├── isaac_ros_stereo_disparity_pol.py
│ ├── isaac_ros_stereo_pipeline_pol.py
│ ├── isaac_ros_stereo_point_cloud_pol.py
│ ├── isaac_ros_stereo_point_cloud_OSS_test.py
│ └── isaac_ros_stereo_point_cloud_output_compare.py
├── src
│ ├── stereo_image_proc_main.cpp
│ ├── disparity_node.cpp
│ ├── point_cloud_node_cuda.cu
│ └── point_cloud_node.cpp
├── package.xml
├── include
│ └── isaac_ros_stereo_image_proc
│ │ ├── nvPointCloud.h
│ │ ├── point_cloud_node_cuda.hpp
│ │ ├── disparity_node.hpp
│ │ └── point_cloud_node.hpp
├── launch
│ └── isaac_ros_stereo_image_pipeline_launch.py
└── CMakeLists.txt
├── .gitattributes
├── isaac_ros_image_pipeline
├── CMakeLists.txt
└── package.xml
├── giistr-cla.md
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore all pycache files
2 | **/__pycache__/**
3 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_raw.jpg",
3 | "encoding": "bgr8"
4 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_raw.jpg",
3 | "encoding": "bgr8"
4 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/resize/profile/image.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "NVIDIAprofile.png",
3 | "encoding": "bgr8"
4 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_raw.jpg",
3 | "encoding": "bgr8"
4 | }
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/image_left.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_left.png",
3 | "encoding": "bgr8"
4 | }
5 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/image_right.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_right.png",
3 | "encoding": "bgr8"
4 | }
5 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/NVIDIAprofile.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:aa345be1c5972fa1ad601db83ec2fd56c5fb428e09de166b76b72d266c4cb346
3 | size 94919
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard0/image_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_raw.jpg",
3 | "encoding": "bgr8",
4 | "chessboard": {
5 | "width": 9,
6 | "height": 6
7 | }
8 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard1/image_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "image_raw.jpg",
3 | "encoding": "bgr8",
4 | "chessboard": {
5 | "width": 9,
6 | "height": 6
7 | }
8 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard0/image_raw.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:a6c09cf9f81165b8ba4548778f91ab4d2e5e4595b3a5a570294feec6b59f9c56
3 | size 107022
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard1/image_raw.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:8d7361b05b4310ae35c9969783abaabe2b74e7e195ef8749779bcbea975aad96
3 | size 104743
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/resize/profile/NVIDIAprofile.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:aa345be1c5972fa1ad601db83ec2fd56c5fb428e09de166b76b72d266c4cb346
3 | size 94919
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:6ffa4278dedf9fb52250d21de7ff92ab0ab9c9a8d0a2d9a9adfd6e4a512c2cad
3 | size 280346
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_mono.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f4f2a4095ffafeb3f637da9c8dbaea23873c7fbeba8fe3a6bc89bffb03e2fca5
3 | size 278808
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_raw.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:6ffa4278dedf9fb52250d21de7ff92ab0ab9c9a8d0a2d9a9adfd6e4a512c2cad
3 | size 280346
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_rect.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ca4d6fe93e342b1f054cce9c3dec87281463fff6b4b6f870096c74b25ddb38e1
3 | size 211178
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:81898d205edb74fbffb0b7c74ad6f3ac9a7ffba4de51605773063c9cedb6b7a4
3 | size 59014
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_mono.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f9b156d631bbc9bcfa31acd357db5cd609307bc140a58164b1d3330d4b250bba
3 | size 56288
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_raw.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:81898d205edb74fbffb0b7c74ad6f3ac9a7ffba4de51605773063c9cedb6b7a4
3 | size 59014
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_rect.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ebcd5841e72c4d7ad95e1682fabf00489e033deacaeec86177df13f335c4bdf7
3 | size 70079
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard0/image_rect.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:ec593a972bb9c3131bef689f96f3edd7047daace6379762dd0ab0417bc2fbc0c
3 | size 256505
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard1/image_rect.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:18f594fff19b44d9fad0f08937460a98e9c1b5f2fa211f0ea303fc9fed21297d
3 | size 234942
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/image_rect_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:86c0b731e6fb100164cfcdf15d5c3e4b9ef123b390f9e33df077f36ae58b6094
3 | size 212774
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:5c0250ad9b654ed31cc4b977b45a9dca3eff8dee9079ccf113fedd0d33abb956
3 | size 74074
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_mono.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:02506568a5acd7c87b4435429917fbd8b1fc95b6cf0ddb36ad558dba2855b912
3 | size 50576
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_raw.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:5c0250ad9b654ed31cc4b977b45a9dca3eff8dee9079ccf113fedd0d33abb956
3 | size 74074
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_rect.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:28a6b46b33f421f1a6dc87f03567aa1b51b347b319086b83fd7edba505687118
3 | size 57464
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/image_rect_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d2893426bd607f0a82a61309286bee7637c672b38db59eb4d68c4dff5d26bde3
3 | size 72263
4 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/image_left.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4780d36f05640f73a25cfa9def04e1873781e580c4095bf965d0d13f7933ed4a
3 | size 210446
4 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/image_right.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:fa62ac2156ef0c4f9838545eade0c81bf9c8eb67ce424502a3609e76839dfb8d
3 | size 206410
4 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/image_rect_color.jpg:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:5e581a719d1f65db10b2c74454301f03fb9bde50220a8ff0ee968bc3e243dcb3
3 | size 78805
4 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/test_disparity.png:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0eb2acbd497508cfa8c1b877942addf8e8b52403d32d0629531029111b82202c
3 | size 26878
4 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Images
2 | *.gif filter=lfs diff=lfs merge=lfs -text
3 | *.jpg filter=lfs diff=lfs merge=lfs -text
4 | *.png filter=lfs diff=lfs merge=lfs -text
5 | *.psd filter=lfs diff=lfs merge=lfs -text
6 |
7 |
8 | # Archives
9 | *.gz filter=lfs diff=lfs merge=lfs -text
10 | *.tar filter=lfs diff=lfs merge=lfs -text
11 | *.zip filter=lfs diff=lfs merge=lfs -text
12 |
13 | # Documents
14 | *.pdf filter=lfs diff=lfs merge=lfs -text
15 |
16 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard0/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 1280,
6 | "height": 800,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | 0.039843,
10 | -0.174998,
11 | 0.159336,
12 | -0.048084,
13 | 0.0
14 | ],
15 | "K": [
16 | 434.943999,
17 | 0.000000,
18 | 651.073921,
19 | 0.0,
20 | 431.741273,
21 | 441.878037,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 1.0,
28 | 0.0,
29 | 0.0,
30 | 0.0,
31 | 1.0,
32 | 0.0,
33 | 0.0,
34 | 0.0,
35 | 1.0
36 | ],
37 | "P": [
38 | 1.0,
39 | 0.0,
40 | 0.0,
41 | 0.0,
42 | 0.0,
43 | 1.0,
44 | 0.0,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/rectify/chessboard1/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 1280,
6 | "height": 800,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | 0.039843,
10 | -0.174998,
11 | 0.159336,
12 | -0.048084,
13 | 0.0
14 | ],
15 | "K": [
16 | 434.943999,
17 | 0.000000,
18 | 651.073921,
19 | 0.0,
20 | 431.741273,
21 | 441.878037,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 1.0,
28 | 0.0,
29 | 0.0,
30 | 0.0,
31 | 1.0,
32 | 0.0,
33 | 0.0,
34 | 0.0,
35 | 1.0
36 | ],
37 | "P": [
38 | 1.0,
39 | 0.0,
40 | 0.0,
41 | 0.0,
42 | 0.0,
43 | 1.0,
44 | 0.0,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/test_cases/stereo_images_chair/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 1920,
6 | "height": 1080,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | 0.0,
10 | 0.0,
11 | 0.0,
12 | 0.0,
13 | 0.0
14 | ],
15 | "K": [
16 | 434.943999,
17 | 0.000000,
18 | 651.073921,
19 | 0.0,
20 | 431.741273,
21 | 441.878037,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 1.0,
28 | 0.0,
29 | 0.0,
30 | 0.0,
31 | 1.0,
32 | 0.0,
33 | 0.0,
34 | 0.0,
35 | 1.0
36 | ],
37 | "P": [
38 | 434.943999,
39 | 0.0,
40 | 651.073921,
41 | 160.0,
42 | 0.0,
43 | 431.741273,
44 | 441.878037,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
52 |
53 |
--------------------------------------------------------------------------------
/isaac_ros_image_pipeline/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | cmake_minimum_required(VERSION 3.5)
10 | project(isaac_ros_image_pipeline)
11 |
12 | # Default to C++14
13 | if(NOT CMAKE_CXX_STANDARD)
14 | set(CMAKE_CXX_STANDARD 14)
15 | endif()
16 |
17 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
18 | add_compile_options(-Wall -Wextra -Wpedantic)
19 | endif()
20 |
21 | find_package(ament_cmake REQUIRED)
22 |
23 | if(BUILD_TESTING)
24 | find_package(ament_lint_auto REQUIRED)
25 | ament_lint_auto_find_test_dependencies()
26 | endif()
27 |
28 | ament_package()
29 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "height": 480,
6 | "width": 640,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | -0.363528858080088,
10 | 0.16117037733986861,
11 | -8.1109585007538829e-05,
12 | -0.00044776712298447841,
13 | 0.0
14 | ],
15 | "K": [
16 | 430.15433020105519,
17 | 0.0,
18 | 311.71339830549732,
19 | 0.0,
20 | 430.60920415473657,
21 | 221.06824942698509,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 0.99806560714807102,
28 | 0.0068562422224214027,
29 | 0.061790256276695904,
30 | -0.0067522959054715113,
31 | 0.99997541519165112,
32 | -0.0018909025066874664,
33 | -0.061801701660692349,
34 | 0.0014700186639396652,
35 | 0.99808736527268516
36 | ],
37 | "P": [
38 | 295.53402059708782,
39 | 0.0,
40 | 285.55760765075684,
41 | 0.0,
42 | 0.0,
43 | 295.53402059708782,
44 | 223.29617881774902,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/dense_grid/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 640,
6 | "height": 480,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | -0.363528858080088,
10 | 0.1611703773398686,
11 | -8.110958500753883e-05,
12 | -0.0004477671229844784,
13 | 0.0
14 | ],
15 | "K": [
16 | 430.1543302010552,
17 | 0.0,
18 | 311.7133983054973,
19 | 0.0,
20 | 430.6092041547366,
21 | 221.0682494269851,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 0.998065607148071,
28 | 0.006856242222421403,
29 | 0.061790256276695904,
30 | -0.006752295905471511,
31 | 0.9999754151916511,
32 | -0.0018909025066874664,
33 | -0.06180170166069235,
34 | 0.0014700186639396652,
35 | 0.9980873652726852
36 | ],
37 | "P": [
38 | 295.5340205970878,
39 | 0.0,
40 | 285.55760765075684,
41 | 0.0,
42 | 0.0,
43 | 295.5340205970878,
44 | 223.29617881774902,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/white_grid/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 640,
6 | "height": 480,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | -0.363528858080088,
10 | 0.1611703773398686,
11 | -8.110958500753883e-05,
12 | -0.0004477671229844784,
13 | 0.0
14 | ],
15 | "K": [
16 | 430.1543302010552,
17 | 0.0,
18 | 311.7133983054973,
19 | 0.0,
20 | 430.6092041547366,
21 | 221.0682494269851,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 0.998065607148071,
28 | 0.006856242222421403,
29 | 0.061790256276695904,
30 | -0.006752295905471511,
31 | 0.9999754151916511,
32 | -0.0018909025066874664,
33 | -0.06180170166069235,
34 | 0.0014700186639396652,
35 | 0.9980873652726852
36 | ],
37 | "P": [
38 | 295.5340205970878,
39 | 0.0,
40 | 285.55760765075684,
41 | 0.0,
42 | 0.0,
43 | 295.5340205970878,
44 | 223.29617881774902,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/image_proc/nvidia_green_grid/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "width": 640,
6 | "height": 480,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | -0.363528858080088,
10 | 0.1611703773398686,
11 | -8.110958500753883e-05,
12 | -0.0004477671229844784,
13 | 0.0
14 | ],
15 | "K": [
16 | 430.1543302010552,
17 | 0.0,
18 | 311.7133983054973,
19 | 0.0,
20 | 430.6092041547366,
21 | 221.0682494269851,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 0.998065607148071,
28 | 0.006856242222421403,
29 | 0.061790256276695904,
30 | -0.006752295905471511,
31 | 0.9999754151916511,
32 | -0.0018909025066874664,
33 | -0.06180170166069235,
34 | 0.0014700186639396652,
35 | 0.9980873652726852
36 | ],
37 | "P": [
38 | 295.5340205970878,
39 | 0.0,
40 | 285.55760765075684,
41 | 0.0,
42 | 0.0,
43 | 295.5340205970878,
44 | 223.29617881774902,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/resize/profile/camera_info.json:
--------------------------------------------------------------------------------
1 | {
2 | "header": {
3 | "frame_id": "tf_camera"
4 | },
5 | "height": 460,
6 | "width": 460,
7 | "distortion_model": "plumb_bob",
8 | "D": [
9 | -0.363528858080088,
10 | 0.16117037733986861,
11 | -8.1109585007538829e-05,
12 | -0.00044776712298447841,
13 | 0.0
14 | ],
15 | "K": [
16 | 430.15433020105519,
17 | 0.0,
18 | 311.71339830549732,
19 | 0.0,
20 | 430.60920415473657,
21 | 221.06824942698509,
22 | 0.0,
23 | 0.0,
24 | 1.0
25 | ],
26 | "R": [
27 | 0.99806560714807102,
28 | 0.0068562422224214027,
29 | 0.061790256276695904,
30 | -0.0067522959054715113,
31 | 0.99997541519165112,
32 | -0.0018909025066874664,
33 | -0.061801701660692349,
34 | 0.0014700186639396652,
35 | 0.99808736527268516
36 | ],
37 | "P": [
38 | 295.53402059708782,
39 | 0.0,
40 | 285.55760765075684,
41 | 0.0,
42 | 0.0,
43 | 295.53402059708782,
44 | 223.29617881774902,
45 | 0.0,
46 | 0.0,
47 | 0.0,
48 | 1.0,
49 | 0.0
50 | ]
51 | }
--------------------------------------------------------------------------------
/isaac_ros_image_pipeline/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
12 |
13 |
14 |
15 | isaac_ros_image_pipeline
16 | 0.9.0
17 | Core image processing
18 |
19 | Hemal Shah
20 | JetPack EULA
21 | https://developer.nvidia.com/blog/accelerating-ai-modules-for-ros-and-ros-2-on-jetson/
22 | Jaiveer Singh
23 |
24 | ament_cmake
25 |
26 | isaac_ros_image_proc
27 | isaac_ros_stereo_image_proc
28 |
29 | ament_lint_auto
30 | ament_lint_common
31 |
32 |
33 | ament_cmake
34 |
35 |
36 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
12 |
13 |
14 |
15 | isaac_ros_image_proc
16 | 0.9.0
17 | Single image rectification and color processing
18 |
19 | Hemal Shah
20 | JetPack EULA
21 | https://developer.nvidia.com/blog/accelerating-ai-modules-for-ros-and-ros-2-on-jetson/
22 | Jaiveer Singh
23 | Piero Orderique
24 | Tobi Alade
25 |
26 | ament_cmake_auto
27 |
28 | rclcpp
29 | rclcpp_components
30 | image_transport
31 | cv_bridge
32 | isaac_ros_common
33 |
34 | ament_lint_auto
35 | ament_lint_common
36 | isaac_ros_test
37 |
38 |
39 | ament_cmake
40 |
41 |
42 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/src/stereo_image_proc_main.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include
12 | #include
13 | #include "isaac_ros_stereo_image_proc/disparity_node.hpp"
14 | #include "isaac_ros_stereo_image_proc/point_cloud_node.hpp"
15 |
16 |
17 | int main(int argc, char * argv[])
18 | {
19 | rclcpp::init(argc, argv);
20 |
21 | rclcpp::executors::SingleThreadedExecutor exec;
22 |
23 | // Find the disparity image from the left and right rectified images
24 | rclcpp::NodeOptions disparity_options;
25 | auto disparity_node = std::make_shared(
26 | disparity_options);
27 | exec.add_node(disparity_node);
28 |
29 | // Find the pointcloud from the disparity image
30 | rclcpp::NodeOptions point_cloud_options;
31 | point_cloud_options.arguments(
32 | {
33 | "--ros-args",
34 | "-r", "left/image_rect_color:=left/image_rect"
35 | });
36 | auto point_cloud_node = std::make_shared(
37 | point_cloud_options);
38 | exec.add_node(point_cloud_node);
39 |
40 | // Spin with all the components loaded
41 | exec.spin();
42 |
43 | rclcpp::shutdown();
44 | return 0;
45 | }
46 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/include/isaac_ros_image_proc/image_format_converter_node.hpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_IMAGE_PROC__IMAGE_FORMAT_CONVERTER_NODE_HPP_
12 | #define ISAAC_ROS_IMAGE_PROC__IMAGE_FORMAT_CONVERTER_NODE_HPP_
13 |
14 | #include
15 |
16 | #include "image_transport/image_transport.hpp"
17 | #include "rclcpp/rclcpp.hpp"
18 |
19 | namespace isaac_ros
20 | {
21 | namespace image_proc
22 | {
23 |
24 | class ImageFormatConverterNode : public rclcpp::Node
25 | {
26 | public:
27 | explicit ImageFormatConverterNode(const rclcpp::NodeOptions &);
28 |
29 | private:
30 | /**
31 | * @brief Callback to change the format of the image received by subscription
32 | *
33 | * @param image_msg The image message received
34 | */
35 | void FormatCallback(const sensor_msgs::msg::Image::ConstSharedPtr & image_msg);
36 |
37 | // ROS2 Image subscriber for input and Image publisher for output
38 | image_transport::Subscriber sub_;
39 | image_transport::Publisher pub_;
40 |
41 | // ROS2 parameter for specifying desired encoding
42 | std::string encoding_desired_{};
43 |
44 | // ROS2 parameter for VPI backend flags
45 | uint32_t vpi_backends_{};
46 | };
47 |
48 | } // namespace image_proc
49 | } // namespace isaac_ros
50 |
51 | #endif // ISAAC_ROS_IMAGE_PROC__IMAGE_FORMAT_CONVERTER_NODE_HPP_
52 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
12 |
13 |
14 |
15 | isaac_ros_stereo_image_proc
16 | 0.9.0
17 | Stereo and disparity processing
18 |
19 | Hemal Shah
20 | JetPack EULA
21 | https://developer.nvidia.com/blog/accelerating-ai-modules-for-ros-and-ros-2-on-jetson/
22 | Arjun Bhorkar
23 | Kajanan Chinniah
24 |
25 | ament_cmake_auto
26 |
27 | rclcpp
28 | image_transport
29 | image_geometry
30 | vpi
31 | cv_bridge
32 | message_filters
33 | rclcpp_components
34 | sensor_msgs
35 | stereo_msgs
36 | isaac_ros_common
37 |
38 | ament_lint_auto
39 | ament_lint_common
40 | isaac_ros_test
41 |
42 |
43 | ament_cmake
44 |
45 |
46 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/include/isaac_ros_stereo_image_proc/nvPointCloud.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_STEREO_IMAGE_PROC__NVPOINTCLOUD_H_
12 | #define ISAAC_ROS_STEREO_IMAGE_PROC__NVPOINTCLOUD_H_
13 |
14 | /**
15 | * @brief Struct that holds relevant camera intrinsics
16 | *
17 | */
18 | typedef struct nvPointCloudIntrinsics
19 | {
20 | double reproject_matrix[4][4]; // 4x4 reprojection matrix
21 | int reproject_matrix_rows; // Number of rows of reprojection matrix
22 | int reproject_matrix_cols; // Number of cols of reprojection matrix
23 | int height; // Height of the camera image
24 | int width; // Width of the camera image
25 | float unit_scaling; // Unit to scale the reprojected 3D points
26 | } nvPointCloudIntrinsics;
27 |
28 | /**
29 | * @brief struct that holds relevant cloud properties
30 | *
31 | */
32 | typedef struct nvPointCloudProperties
33 | {
34 | int point_row_step; // Length of a row
35 | int point_step; // Length of a point
36 | int x_offset; // Position of x for one point
37 | int y_offset; // Position of y for one point
38 | int z_offset; // Position of z for one point
39 | int rgb_offset; // Position of rgb for one point
40 | int buffer_size; // Size of pointcloud buffer (in bytes)
41 | bool is_bigendian; // Is byte stream big or little endian
42 | float bad_point; // Representation of a bad point in the cloud
43 | } nvPointCloudProperties;
44 |
45 | #endif // ISAAC_ROS_STEREO_IMAGE_PROC__NVPOINTCLOUD_H_
46 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/include/isaac_ros_image_proc/resize_node.hpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_IMAGE_PROC__RESIZE_NODE_HPP_
12 | #define ISAAC_ROS_IMAGE_PROC__RESIZE_NODE_HPP_
13 |
14 | #include
15 |
16 | #include "image_transport/image_transport.hpp"
17 | #include "rclcpp/rclcpp.hpp"
18 | #include "vpi/VPI.h"
19 |
20 | namespace isaac_ros
21 | {
22 | namespace image_proc
23 | {
24 |
25 | class ResizeNode : public rclcpp::Node
26 | {
27 | public:
28 | explicit ResizeNode(const rclcpp::NodeOptions &);
29 |
30 | private:
31 | /**
32 | * @brief Callback to resize the image received by subscription
33 | *
34 | * @param image_msg The image message received
35 | * @param info_msg The information of the camera that produced the image
36 | */
37 | void ResizeCallback(
38 | const sensor_msgs::msg::Image::ConstSharedPtr & image_msg,
39 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg);
40 |
41 | // ROS2 Camera subscriber for input and Image publisher for output
42 | image_transport::CameraSubscriber sub_;
43 | image_transport::CameraPublisher pub_;
44 |
45 | // ROS2 parameters for configuring the resize operation
46 | bool use_relative_scale_{};
47 | double scale_height_{};
48 | double scale_width_{};
49 | int height_{};
50 | int width_{};
51 |
52 | // ROS2 parameter for VPI backend flags
53 | uint32_t vpi_backends_{};
54 | };
55 |
56 | } // namespace image_proc
57 | } // namespace isaac_ros
58 |
59 | #endif // ISAAC_ROS_IMAGE_PROC__RESIZE_NODE_HPP_
60 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/launch/isaac_ros_stereo_image_pipeline_launch.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import launch
10 | from launch_ros.actions import ComposableNodeContainer, Node
11 | from launch_ros.descriptions import ComposableNode
12 |
13 |
14 | def generate_launch_description():
15 | disparity_node = ComposableNode(
16 | name='disparity',
17 | package='isaac_ros_stereo_image_proc',
18 | plugin='isaac_ros::stereo_image_proc::DisparityNode',
19 | parameters=[{
20 | 'backends': 'CUDA',
21 | 'window_size': 5,
22 | 'max_disparity': 64,
23 | }])
24 |
25 | pointcloud_node = ComposableNode(
26 | package='isaac_ros_stereo_image_proc',
27 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
28 | parameters=[{
29 | 'use_color': False,
30 | }],
31 | remappings=[('/left/image_rect_color', '/left/image_rect')])
32 |
33 | realsense_camera_node = Node(
34 | package='realsense2_camera',
35 | node_executable='realsense2_camera_node',
36 | parameters=[{
37 | 'infra_height': 270,
38 | 'infra_width': 480,
39 | 'enable_color': False,
40 | 'enable_depth': False,
41 | }],
42 | remappings=[('/infra1/image_rect_raw', '/left/image_rect'),
43 | ('/infra1/camera_info', '/left/camera_info'),
44 | ('/infra2/image_rect_raw', '/right/image_rect'),
45 | ('/infra2/camera_info', '/right/camera_info')])
46 |
47 | container = ComposableNodeContainer(
48 | name='disparity_container',
49 | namespace='disparity',
50 | package='rclcpp_components',
51 | executable='component_container',
52 | composable_node_descriptions=[disparity_node, pointcloud_node],
53 | output='screen'
54 | )
55 |
56 | return (launch.LaunchDescription([container, realsense_camera_node]))
57 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/src/image_proc_main.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include
12 |
13 | #include "rclcpp/rclcpp.hpp"
14 |
15 | #include "isaac_ros_image_proc/image_format_converter_node.hpp"
16 | #include "isaac_ros_image_proc/rectify_node.hpp"
17 |
18 | int main(int argc, char * argv[])
19 | {
20 | rclcpp::init(argc, argv);
21 |
22 | rclcpp::executors::MultiThreadedExecutor exec;
23 |
24 | // Make the raw images available in grayscale from /image_raw to /image_mono
25 | rclcpp::NodeOptions format_mono_options;
26 | format_mono_options.arguments(
27 | {
28 | "--ros-args",
29 | "-r", "__node:=image_format_mono",
30 | "-r", "image:=image_mono",
31 | "-p", "encoding_desired:=mono8"
32 | });
33 | auto format_mono_node =
34 | std::make_shared(format_mono_options);
35 | exec.add_node(format_mono_node);
36 |
37 | // Also make the raw images available in color from /image_raw to /image_color
38 | rclcpp::NodeOptions format_color_options;
39 | format_color_options.arguments(
40 | {
41 | "--ros-args",
42 | "-r", "__node:=image_format_color",
43 | "-r", "image:=image_color"
44 | });
45 | auto format_color_node =
46 | std::make_shared(format_color_options);
47 | exec.add_node(format_color_node);
48 |
49 | // Rectify and undistort grayscale images from /image_mono to /image_rect
50 | rclcpp::NodeOptions rectify_mono_options;
51 | rectify_mono_options.arguments(
52 | {
53 | "--ros-args",
54 | "-r", "__node:=rectify_mono",
55 | "-r", "image:=image_mono",
56 | "-r", "image_rect:=image_rect"
57 | });
58 | auto rectify_mono_node = std::make_shared(
59 | rectify_mono_options);
60 | exec.add_node(rectify_mono_node);
61 |
62 | // Also rectify and undistort color images from /image_color to /image_rect_color
63 | rclcpp::NodeOptions rectify_color_options;
64 | rectify_color_options.arguments(
65 | {
66 | "--ros-args",
67 | "-r", "__node:=rectify_color",
68 | "-r", "image:=image_color",
69 | "-r", "image_rect:=image_rect_color"
70 | });
71 | auto rectify_color_node = std::make_shared(
72 | rectify_color_options);
73 | exec.add_node(rectify_color_node);
74 |
75 | // Spin with all the components loaded
76 | exec.spin();
77 |
78 | rclcpp::shutdown();
79 | return 0;
80 | }
81 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | cmake_minimum_required(VERSION 3.5)
10 | project(isaac_ros_stereo_image_proc CUDA CXX)
11 |
12 | # Default to C++17
13 | if(NOT CMAKE_CXX_STANDARD)
14 | set(CMAKE_CXX_STANDARD 17)
15 | endif()
16 |
17 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
18 | add_compile_options(-Wall -Wextra -Wpedantic)
19 | endif()
20 |
21 | find_package(ament_cmake_auto REQUIRED)
22 | ament_auto_find_build_dependencies()
23 |
24 | set(CUDA_MIN_VERSION "10.2")
25 | find_package(CUDA ${CUDA_MIN_VERSION} REQUIRED)
26 |
27 | # Explicitly include directories for NVCC
28 | include_directories(${CUDA_INCLUDE_DIRS})
29 | include_directories(include)
30 |
31 | # point_cloud_node_cuda
32 | cuda_add_library(point_cloud_node_cuda SHARED
33 | src/point_cloud_node_cuda.cu)
34 | ament_target_dependencies(point_cloud_node_cuda CUDA)
35 |
36 | # point_cloud_node
37 | ament_auto_add_library(point_cloud_node SHARED
38 | src/point_cloud_node.cpp)
39 | add_dependencies(point_cloud_node point_cloud_node_cuda)
40 | target_compile_definitions(point_cloud_node
41 | PRIVATE "COMPOSITION_BUILDING_DLL"
42 | )
43 | target_link_libraries(point_cloud_node point_cloud_node_cuda ${CUDA_LIBRARIES})
44 | rclcpp_components_register_nodes(point_cloud_node "isaac_ros::stereo_image_proc::PointCloudNode")
45 | set(node_plugins "${node_plugins}isaac_ros::stereo_image_proc::PointCloudNode;$\n")
46 |
47 | # disparity_node
48 | ament_auto_add_library(disparity_node SHARED
49 | src/disparity_node.cpp)
50 | target_compile_definitions(disparity_node
51 | PRIVATE "COMPOSITION_BUILDING_DLL"
52 | )
53 | target_link_libraries(disparity_node vpi)
54 | rclcpp_components_register_nodes(disparity_node "isaac_ros::stereo_image_proc::DisparityNode")
55 | set(node_plugins "${node_plugins}isaac_ros::stereo_image_proc::DisparityNode;$\n")
56 |
57 | # isaac_ros_stereo_image_proc executable
58 | ament_auto_add_executable(${PROJECT_NAME}
59 | src/stereo_image_proc_main.cpp
60 | )
61 |
62 | file(GLOB SOURCES "src/*.cpp")
63 |
64 | install(TARGETS disparity_node point_cloud_node point_cloud_node_cuda
65 | ARCHIVE DESTINATION lib
66 | LIBRARY DESTINATION lib
67 | RUNTIME DESTINATION bin
68 | )
69 |
70 | if(BUILD_TESTING)
71 | find_package(ament_lint_auto REQUIRED)
72 |
73 | # Ignore copyright notices since we use custom JetPack EULA
74 | set(ament_cmake_copyright_FOUND TRUE)
75 |
76 | ament_lint_auto_find_test_dependencies()
77 |
78 | find_package(launch_testing_ament_cmake REQUIRED)
79 | add_launch_test(test/isaac_ros_stereo_disparity_pol.py)
80 | add_launch_test(test/isaac_ros_stereo_pipeline_pol.py)
81 | add_launch_test(test/isaac_ros_stereo_point_cloud_OSS_test.py)
82 | add_launch_test(test/isaac_ros_stereo_point_cloud_pol.py)
83 | endif()
84 |
85 | ament_auto_package(INSTALL_TO_SHARE launch)
86 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/include/isaac_ros_image_proc/rectify_node.hpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_IMAGE_PROC__RECTIFY_NODE_HPP_
12 | #define ISAAC_ROS_IMAGE_PROC__RECTIFY_NODE_HPP_
13 |
14 | #include
15 | #include
16 |
17 | #include "cv_bridge/cv_bridge.h"
18 | #include "image_transport/image_transport.hpp"
19 | #include "opencv2/opencv.hpp"
20 | #include "rclcpp/rclcpp.hpp"
21 | #include "vpi/VPI.h"
22 |
23 | namespace isaac_ros
24 | {
25 | namespace image_proc
26 | {
27 |
28 | class RectifyNode : public rclcpp::Node
29 | {
30 | public:
31 | explicit RectifyNode(const rclcpp::NodeOptions &);
32 |
33 | ~RectifyNode();
34 |
35 | RectifyNode(const RectifyNode &) = delete;
36 |
37 | RectifyNode & operator=(const RectifyNode &) = delete;
38 |
39 | private:
40 | /**
41 | * @brief Callback to rectify the image received by subscription
42 | *
43 | * @param image_msg The image message received
44 | * @param info_msg The information of the camera that produced the image
45 | */
46 | void RectifyCallback(
47 | const sensor_msgs::msg::Image::ConstSharedPtr & image_msg,
48 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg);
49 |
50 | /**
51 | * @brief Rectify the input image in-place according to the distortion parameters
52 | *
53 | * @param image_ptr [in/out] Pointer to the image to be rectified in-place
54 | * @param K_mat The 3x3 K matrix of camera intrinsics
55 | * @param P_mat The 3x4 P matrix mapping from world points to image points
56 | * @param D_mat The distortion parameters
57 | * @param distortion_model The distortion model according to which to interpret the distortion parameters
58 | * @param roi The region of interest to crop the final rectified image to
59 | */
60 | void RectifyImage(
61 | cv_bridge::CvImagePtr & image_ptr, const cv::Matx33d & K_mat, const cv::Matx34d & P_mat,
62 | const cv::Mat_ & D_mat, const std::string & distortion_model, cv::Rect roi);
63 |
64 | // ROS2 Camera subscriber for input and Image publisher for output
65 | image_transport::CameraSubscriber sub_;
66 | image_transport::Publisher pub_;
67 |
68 | // ROS2 parameter for specifying interpolation type
69 | VPIInterpolationType interpolation_;
70 |
71 | // ROS2 parameter for VPI backend flags
72 | uint32_t vpi_backends_{};
73 |
74 | // Shared VPI resources for all callbacks
75 | VPIStream stream_{};
76 | VPIImage image_{};
77 | VPIPayload remap_{};
78 |
79 | // Temporary input and output images in NV12 format
80 | VPIImage tmp_in_{}, tmp_out_{};
81 |
82 | // Cached VPI camera intrinsics and extrinsics
83 | VPICameraIntrinsic K_{};
84 | VPICameraExtrinsic X_{};
85 |
86 | // Cached values from CameraInfo message to avoid unnecessary computation
87 | std::pair image_dims_{-1, -1};
88 | cv::Matx33d curr_K_mat_{};
89 | cv::Matx34d curr_P_mat_{};
90 | cv::Mat_ curr_D_mat_{};
91 | cv::Rect current_roi_{};
92 | };
93 |
94 | } // namespace image_proc
95 | } // namespace isaac_ros
96 |
97 | #endif // ISAAC_ROS_IMAGE_PROC__RECTIFY_NODE_HPP_
98 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | cmake_minimum_required(VERSION 3.5)
10 | project(isaac_ros_image_proc)
11 |
12 | # Default to C++17
13 | if(NOT CMAKE_CXX_STANDARD)
14 | set(CMAKE_CXX_STANDARD 17)
15 | endif()
16 |
17 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
18 | add_compile_options(-Wall -Wextra -Wpedantic)
19 | endif()
20 |
21 | find_package(ament_cmake_auto REQUIRED)
22 | ament_auto_find_build_dependencies()
23 |
24 | # Find VPI dependency
25 | find_package(vpi REQUIRED)
26 |
27 | # image_format_converter_node
28 | ament_auto_add_library(image_format_converter_node SHARED
29 | src/image_format_converter_node.cpp)
30 | target_compile_definitions(image_format_converter_node
31 | PRIVATE "COMPOSITION_BUILDING_DLL"
32 | )
33 | target_link_libraries(image_format_converter_node vpi)
34 | rclcpp_components_register_nodes(image_format_converter_node "isaac_ros::image_proc::ImageFormatConverterNode")
35 | set(node_plugins "${node_plugins}isaac_ros::image_proc::ImageFormatConverterNode;$\n")
36 |
37 | # rectify_node
38 | ament_auto_add_library(rectify_node SHARED
39 | src/rectify_node.cpp)
40 | target_compile_definitions(rectify_node
41 | PRIVATE "COMPOSITION_BUILDING_DLL"
42 | )
43 | target_link_libraries(rectify_node vpi)
44 | rclcpp_components_register_nodes(rectify_node "isaac_ros::image_proc::RectifyNode")
45 | set(node_plugins "${node_plugins}isaac_ros::image_proc::RectifyNode;$\n")
46 |
47 | # resize_node
48 | ament_auto_add_library(resize_node SHARED
49 | src/resize_node.cpp)
50 | target_compile_definitions(resize_node
51 | PRIVATE "COMPOSITION_BUILDING_DLL"
52 | )
53 | target_link_libraries(resize_node vpi)
54 | rclcpp_components_register_nodes(resize_node "isaac_ros::image_proc::ResizeNode")
55 | set(node_plugins "${node_plugins}isaac_ros::image_proc::ResizeNode;$\n")
56 |
57 | # isaac_ros_image_proc executable
58 | ament_auto_add_executable(${PROJECT_NAME}
59 | src/image_proc_main.cpp
60 | )
61 | target_link_libraries(${PROJECT_NAME}
62 | image_format_converter_node
63 | rectify_node
64 | resize_node
65 | ament_index_cpp::ament_index_cpp
66 | vpi
67 | )
68 |
69 | install(TARGETS ${PROJECT_NAME}
70 | ARCHIVE DESTINATION lib
71 | LIBRARY DESTINATION lib
72 | RUNTIME DESTINATION bin
73 | )
74 | if(BUILD_TESTING)
75 | find_package(ament_lint_auto REQUIRED)
76 |
77 | # Ignore copyright notices since we use custom JetPack EULA
78 | set(ament_cmake_copyright_FOUND TRUE)
79 |
80 | ament_lint_auto_find_test_dependencies()
81 |
82 | find_package(launch_testing_ament_cmake REQUIRED)
83 | add_launch_test(test/isaac_ros_image_proc_test.py)
84 | add_launch_test(test/isaac_ros_image_format_converter_test.py)
85 | add_launch_test(test/isaac_ros_image_format_converter_4_channel_test.py)
86 | add_launch_test(test/isaac_ros_image_format_converter_grayscale_test.py)
87 | add_launch_test(test/isaac_ros_rectify_test.py)
88 | add_launch_test(test/isaac_ros_resize_test.py)
89 | add_launch_test(test/isaac_ros_resize_aspect_ratio_test.py)
90 | add_launch_test(test/isaac_ros_resize_invalid_test.py)
91 |
92 | endif()
93 |
94 | ament_auto_package()
95 |
--------------------------------------------------------------------------------
/giistr-cla.md:
--------------------------------------------------------------------------------
1 | ## Individual Contributor License Agreement (CLA)
2 |
3 | **Thank you for submitting your contributions to this project.**
4 |
5 | By signing this CLA, you agree that the following terms apply to all of your past, present and future contributions
6 | to the project.
7 |
8 | ### License.
9 |
10 | You hereby represent that all present, past and future contributions are governed by the
11 | [MIT License](https://opensource.org/licenses/MIT)
12 | copyright statement.
13 |
14 | This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights
15 | of the code or documents you contribute to the project itself or its maintainers.
16 | Furthermore you also represent that you have the authority to perform the above waiver
17 | with respect to the entirety of you contributions.
18 |
19 | ### Moral Rights.
20 |
21 | To the fullest extent permitted under applicable law, you hereby waive, and agree not to
22 | assert, all of your “moral rights” in or relating to your contributions for the benefit of the project.
23 |
24 | ### Third Party Content.
25 |
26 | If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools,
27 | specifications, documentation, data, materials, feedback, information or other works of authorship that were not
28 | authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary
29 | rights associated with your Contribution (“Third Party Rights”),
30 | then you agree to include with the submission of your Contribution full details respecting such Third Party
31 | Content and Third Party Rights, including, without limitation, identification of which aspects of your
32 | Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the
33 | Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable
34 | third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater
35 | certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights
36 | do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
37 |
38 | ### Representations.
39 |
40 | You represent that, other than the Third Party Content and Third Party Rights identified by
41 | you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled
42 | to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were
43 | created in the course of your employment with your past or present employer(s), you represent that such
44 | employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer
45 | (s) has waived all of their right, title or interest in or to your Contributions.
46 |
47 | ### Disclaimer.
48 |
49 | To the fullest extent permitted under applicable law, your Contributions are provided on an "as is"
50 | basis, without any warranties or conditions, express or implied, including, without limitation, any implied
51 | warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not
52 | required to provide support for your Contributions, except to the extent you desire to provide support.
53 |
54 | ### No Obligation.
55 |
56 | You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions
57 | into the project. The decision to use or incorporate your contributions into the project will be made at the
58 | sole discretion of the maintainers or their authorized delegates.
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/isaac_ros_stereo_disparity_pol.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import os
10 | import pathlib
11 | import time
12 |
13 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
14 |
15 | from launch_ros.actions import ComposableNodeContainer
16 | from launch_ros.descriptions import ComposableNode
17 |
18 | import pytest
19 | import rclpy
20 |
21 | from sensor_msgs.msg import CameraInfo, Image
22 | from stereo_msgs.msg import DisparityImage
23 |
24 |
25 | @pytest.mark.rostest
26 | def generate_test_description():
27 |
28 | disparity_node = ComposableNode(
29 | name='disparity',
30 | package='isaac_ros_stereo_image_proc',
31 | plugin='isaac_ros::stereo_image_proc::DisparityNode',
32 | namespace=IsaacROSDisparityTest.generate_namespace(),
33 | parameters=[{
34 | 'backends': 'CUDA',
35 | 'window_size': 5,
36 | 'max_disparity': 64,
37 | }])
38 |
39 | container = ComposableNodeContainer(
40 | name='disparity_container',
41 | namespace='',
42 | package='rclcpp_components',
43 | executable='component_container',
44 | composable_node_descriptions=[disparity_node],
45 | output='screen'
46 | )
47 | return IsaacROSDisparityTest.generate_test_description([container])
48 |
49 |
50 | class IsaacROSDisparityTest(IsaacROSBaseTest):
51 | filepath = pathlib.Path(os.path.dirname(__file__))
52 |
53 | @IsaacROSBaseTest.for_each_test_case()
54 | def test_image_disparity(self, test_folder):
55 | TIMEOUT = 10
56 | received_messages = {}
57 |
58 | self.generate_namespace_lookup(['left/image_rect', 'right/image_rect',
59 | 'left/camera_info', 'right/camera_info',
60 | 'disparity'])
61 |
62 | subs = self.create_logging_subscribers(
63 | [('disparity', DisparityImage)], received_messages)
64 |
65 | image_left_pub = self.node.create_publisher(
66 | Image, self.namespaces['left/image_rect'], self.DEFAULT_QOS
67 | )
68 | image_right_pub = self.node.create_publisher(
69 | Image, self.namespaces['right/image_rect'], self.DEFAULT_QOS
70 | )
71 | camera_info_right = self.node.create_publisher(
72 | CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS
73 | )
74 | camera_info_left = self.node.create_publisher(
75 | CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS
76 | )
77 |
78 | try:
79 | image_left = JSONConversion.load_image_from_json(test_folder / 'image_left.json')
80 | image_right = JSONConversion.load_image_from_json(test_folder / 'image_right.json')
81 | camera_info = JSONConversion.load_camera_info_from_json(
82 | test_folder / 'camera_info.json')
83 |
84 | end_time = time.time() + TIMEOUT
85 | done = False
86 |
87 | while time.time() < end_time:
88 |
89 | image_left_pub.publish(image_left)
90 | image_right_pub.publish(image_right)
91 | camera_info_left.publish(camera_info)
92 | camera_info_right.publish(camera_info)
93 |
94 | rclpy.spin_once(self.node, timeout_sec=0.1)
95 |
96 | if 'disparity' in received_messages:
97 | done = True
98 | break
99 | self.assertTrue(done, 'Didnt recieve output on disparity topic')
100 |
101 | finally:
102 | [self.node.destroy_subscription(sub) for sub in subs]
103 | self.node.destroy_publisher(image_left_pub)
104 | self.node.destroy_publisher(image_right_pub)
105 | self.node.destroy_publisher(camera_info_right)
106 | self.node.destroy_publisher(camera_info_left)
107 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/include/isaac_ros_stereo_image_proc/point_cloud_node_cuda.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_CUDA_HPP_
12 | #define ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_CUDA_HPP_
13 |
14 | #include "nvPointCloud.h"
15 |
16 | #include
17 | #include
18 |
19 | #include
20 | #include
21 | #include
22 | #include
23 |
24 | /**
25 | * @namespace PointCloudNode_CUDA
26 | *
27 | * @brief Functions for calling CUDA code for the PointCloudNode
28 | *
29 | */
30 | namespace isaac_ros
31 | {
32 | namespace stereo_image_proc
33 | {
34 | namespace PointCloudNode_CUDA
35 | {
36 |
37 | /**
38 | * @brief Macro that calls the _checkCudaErrors function
39 | *
40 | * @param result The result from the CUDA function call
41 | */
42 | #define throwIfCudaError(result) {_throwIfCudaError((result), __FILE__, __LINE__); \
43 | }
44 |
45 | /**
46 | * @brief Checks if a CUDA error has occurred. If so, throws a runtime error.
47 | *
48 | * @note This does not perform any ROS logging and thus should be used
49 | * for CUDA-only files
50 | *
51 | * @param result The result of a CUDA function call
52 | * @param filename The name of the file calling
53 | * @param line_number The line number that the error occurred
54 | */
55 | inline void _throwIfCudaError(cudaError_t result, const char * filename, int line_number)
56 | {
57 | if (result != cudaSuccess) {
58 | throw std::runtime_error(
59 | "CUDA Error: " + std::string(cudaGetErrorString(result)) + " (error code: " +
60 | std::to_string(result) + ") at " + std::string(filename) + " in line " +
61 | std::to_string(line_number));
62 | }
63 | }
64 |
65 | /**
66 | * @brief Host function that calls the addColorToPointCloud kernel
67 | *
68 | * @warning This function only synchronizes with respect to the input stream
69 | *
70 | * @param pointcloud_data The input point cloud data buffer that will be modified
71 | * @param rgb The RGB image represented as a 1D raw byte stream
72 | * @param rgb_row_step The step size for a row of the RGB image
73 | * @param red_offset The offset of the red pixel for one pixel
74 | * @param green_offset The offset of the green pixel for one pixel
75 | * @param blue_offset The offset of the blue pixel for one pixel
76 | * @param color_step The number of points in one pixel
77 | * @param intrinsics A struct that holds relevant camera intrinsic information
78 | * @param cloud_properties A struct that holds relevant point cloud properties
79 | * @param stream The CUDA stream to execute the kernel
80 | */
81 |
82 | void addColorToPointCloud(
83 | float * pointcloud_data, const uint8_t * rgb, const int rgb_row_step,
84 | const int red_offset, const int green_offset, const int blue_offset, const int color_step,
85 | const nvPointCloudIntrinsics * intrinsics,
86 | const nvPointCloudProperties * cloud_properties,
87 | cudaStream_t stream);
88 |
89 | /**
90 | * @brief Host function that calls the convertDisparityToPointCloud kernel
91 | *
92 | * @warning This function only synchronizes with respect to the input stream
93 | *
94 | * @param T The data type of the disparity image
95 | * @param pointcloud_data The input point cloud data buffer that will be modified
96 | * @param disparity_img The disparity image array, represented as a 1D array
97 | * @param disparity_row_step The step size for a row of the disparity image
98 | * @param intrinsics A struct that holds relevant camera intrinsic information
99 | * @param cloud_properties A struct that holds relevant point cloud properties
100 | * @param stream The CUDA stream to execute the kernel
101 | */
102 | template
103 | void convertDisparityToPointCloud(
104 | float * pointcloud_data, const T * disparity_img,
105 | const int disparity_row_step, const nvPointCloudIntrinsics * intrinsics,
106 | const nvPointCloudProperties * cloud_properties,
107 | cudaStream_t stream);
108 |
109 | } // namespace PointCloudNode_CUDA
110 | } // namespace stereo_image_proc
111 | } // namespace isaac_ros
112 |
113 | #endif // ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_CUDA_HPP_
114 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_image_format_converter_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """3-channel color conversion test for the Isaac ROS Image Format Converter node."""
10 |
11 | import time
12 |
13 | import cv2
14 | from cv_bridge import CvBridge
15 | from isaac_ros_test import IsaacROSBaseTest
16 | import launch_ros
17 | import numpy as np
18 | import pytest
19 | import rclpy
20 | from sensor_msgs.msg import CameraInfo, Image
21 |
22 | ENCODING_DESIRED = 'bgr8'
23 | BACKENDS = 'CUDA'
24 |
25 |
26 | @pytest.mark.rostest
27 | def generate_test_description():
28 | """Generate launch description with all ROS2 nodes for testing."""
29 | composable_nodes = [
30 | launch_ros.descriptions.ComposableNode(
31 | package='isaac_ros_image_proc',
32 | plugin='isaac_ros::image_proc::ImageFormatConverterNode',
33 | name='image_format_node',
34 | namespace=IsaacROSFormatTest.generate_namespace(),
35 | parameters=[{
36 | 'encoding_desired': ENCODING_DESIRED,
37 | 'backends': BACKENDS
38 | }])]
39 |
40 | format_container = launch_ros.actions.ComposableNodeContainer(
41 | name='format_container',
42 | namespace='',
43 | package='rclcpp_components',
44 | executable='component_container',
45 | composable_node_descriptions=composable_nodes,
46 | output='screen'
47 | )
48 |
49 | return IsaacROSFormatTest.generate_test_description([format_container])
50 |
51 |
52 | class IsaacROSFormatTest(IsaacROSBaseTest):
53 | """Vaidate format conversion to the bgr8 format."""
54 |
55 | def test_rgb_to_bgr_conversion(self) -> None:
56 | """Expect the node to convert rgb8 input images into the bgr8 format."""
57 | self.generate_namespace_lookup(['image_raw', 'image'])
58 | received_messages = {}
59 |
60 | image_sub, = self.create_logging_subscribers(
61 | subscription_requests=[('image', Image)],
62 | received_messages=received_messages
63 | )
64 |
65 | image_raw_pub = self.node.create_publisher(
66 | Image, self.generate_namespace('image_raw'), self.DEFAULT_QOS)
67 |
68 | try:
69 | # Generate an input image in RGB encoding
70 | cv_image = np.zeros((300, 300, 3), np.uint8)
71 | cv_image[:] = (255, 0, 0) # Full red
72 |
73 | timestamp = self.node.get_clock().now().to_msg()
74 | image_raw = CvBridge().cv2_to_imgmsg(cv_image)
75 | image_raw.header.stamp = timestamp
76 | image_raw.encoding = 'rgb8' # Set image encoding explicitly
77 | camera_info = CameraInfo()
78 | camera_info.header.stamp = timestamp
79 | # Arbitrary valid K matrix that will satisfy calibration check
80 | camera_info.k = np.eye(3).reshape(9)
81 |
82 | # Publish test case over single topic
83 | image_raw_pub.publish(image_raw)
84 |
85 | # Wait at most TIMEOUT seconds for subscriber to respond
86 | TIMEOUT = 2
87 | end_time = time.time() + TIMEOUT
88 |
89 | done = False
90 | while time.time() < end_time:
91 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
92 |
93 | # If we have received a message on the output topic, break
94 | if 'image' in received_messages:
95 | done = True
96 | break
97 |
98 | self.assertTrue(done, "Didn't receive output on image topic!")
99 |
100 | image = received_messages['image']
101 |
102 | # Make sure that the output image has the desired encoding
103 | self.assertEqual(image.encoding, ENCODING_DESIRED, 'Incorrect output encoding!')
104 |
105 | # Make sure output image pixels match OpenCV result
106 | image_mono_actual = CvBridge().imgmsg_to_cv2(image)
107 | image_mono_expected = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
108 | self.assertImagesEqual(image_mono_actual, image_mono_expected)
109 |
110 | finally:
111 | self.node.destroy_subscription(image_sub)
112 | self.node.destroy_publisher(image_raw_pub)
113 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_image_format_converter_grayscale_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Grayscale conversion test for the Isaac ROS Image Format Converter node."""
10 |
11 | import time
12 |
13 | import cv2
14 | from cv_bridge import CvBridge
15 | from isaac_ros_test import IsaacROSBaseTest
16 | import launch_ros
17 | import numpy as np
18 | import pytest
19 | import rclpy
20 | from sensor_msgs.msg import CameraInfo, Image
21 |
22 | ENCODING_DESIRED = 'mono8'
23 | BACKENDS = 'CUDA'
24 |
25 |
26 | @pytest.mark.rostest
27 | def generate_test_description():
28 | """Generate launch description with all ROS2 nodes for testing."""
29 | composable_nodes = [
30 | launch_ros.descriptions.ComposableNode(
31 | package='isaac_ros_image_proc',
32 | plugin='isaac_ros::image_proc::ImageFormatConverterNode',
33 | name='image_format_node',
34 | namespace=IsaacROSFormatMono8Test.generate_namespace(),
35 | parameters=[{
36 | 'encoding_desired': ENCODING_DESIRED,
37 | 'backends': BACKENDS
38 | }])]
39 |
40 | format_container = launch_ros.actions.ComposableNodeContainer(
41 | name='format_container',
42 | namespace='',
43 | package='rclcpp_components',
44 | executable='component_container',
45 | composable_node_descriptions=composable_nodes,
46 | output='screen'
47 | )
48 |
49 | return IsaacROSFormatMono8Test.generate_test_description([format_container])
50 |
51 |
52 | class IsaacROSFormatMono8Test(IsaacROSBaseTest):
53 | """Vaidate format conversion to the mono8 format."""
54 |
55 | def test_rgba_to_mono_conversion(self) -> None:
56 | """Expect the node to convert rgba8 input images into the mono8 format."""
57 | self.generate_namespace_lookup(['image_raw', 'image'])
58 | received_messages = {}
59 |
60 | image_sub, = self.create_logging_subscribers(
61 | subscription_requests=[('image', Image)],
62 | received_messages=received_messages
63 | )
64 |
65 | image_raw_pub = self.node.create_publisher(
66 | Image, self.generate_namespace('image_raw'), self.DEFAULT_QOS)
67 |
68 | try:
69 | # Generate an input image in RGBA encoding
70 | cv_image = np.zeros((300, 300, 4), np.uint8)
71 | cv_image[:] = (255, 0, 0, 200) # Full red, partial opacity
72 |
73 | timestamp = self.node.get_clock().now().to_msg()
74 | image_raw = CvBridge().cv2_to_imgmsg(cv_image)
75 | image_raw.header.stamp = timestamp
76 | image_raw.encoding = 'rgba8' # Set image encoding explicitly
77 | camera_info = CameraInfo()
78 | camera_info.header.stamp = timestamp
79 | # Arbitrary valid K matrix that will satisfy calibration check
80 | camera_info.k = np.eye(3).reshape(9)
81 |
82 | # Publish test case over single topic
83 | image_raw_pub.publish(image_raw)
84 |
85 | # Wait at most TIMEOUT seconds for subscriber to respond
86 | TIMEOUT = 2
87 | end_time = time.time() + TIMEOUT
88 |
89 | done = False
90 | while time.time() < end_time:
91 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
92 |
93 | # If we have received a message on the output topic, break
94 | if 'image' in received_messages:
95 | done = True
96 | break
97 |
98 | self.assertTrue(done, "Didn't receive output on image topic!")
99 |
100 | image = received_messages['image']
101 |
102 | # Make sure that the output image has the desired encoding
103 | self.assertEqual(image.encoding, ENCODING_DESIRED, 'Incorrect output encoding!')
104 |
105 | # Make sure output image pixels match OpenCV result
106 | image_mono_actual = CvBridge().imgmsg_to_cv2(image)
107 | image_mono_expected = cv2.cvtColor(cv_image, cv2.COLOR_RGBA2GRAY)
108 | self.assertImagesEqual(image_mono_actual, image_mono_expected)
109 |
110 | finally:
111 | self.node.destroy_subscription(image_sub)
112 | self.node.destroy_publisher(image_raw_pub)
113 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/isaac_ros_stereo_pipeline_pol.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import os
10 | import pathlib
11 | import time
12 |
13 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
14 |
15 | from launch_ros.actions import ComposableNodeContainer
16 | from launch_ros.descriptions import ComposableNode
17 |
18 | import pytest
19 | import rclpy
20 |
21 | from sensor_msgs.msg import CameraInfo, Image, PointCloud2
22 |
23 |
24 | @pytest.mark.rostest
25 | def generate_test_description():
26 |
27 | disparity_node = ComposableNode(
28 | name='disparity',
29 | package='isaac_ros_stereo_image_proc',
30 | plugin='isaac_ros::stereo_image_proc::DisparityNode',
31 | namespace=IsaacROSStereoPipelineTest.generate_namespace(),
32 | parameters=[{
33 | 'backends': 'CUDA',
34 | 'window_size': 5,
35 | 'max_disparity': 64,
36 | }])
37 |
38 | pointcloud_node = ComposableNode(
39 | package='isaac_ros_stereo_image_proc',
40 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
41 | namespace=IsaacROSStereoPipelineTest.generate_namespace(),
42 | parameters=[{
43 | 'use_color': False,
44 | }],
45 | remappings=[('left/image_rect_color', 'left/image_rect')])
46 |
47 | container = ComposableNodeContainer(
48 | name='disparity_container',
49 | namespace='',
50 | package='rclcpp_components',
51 | executable='component_container',
52 | composable_node_descriptions=[disparity_node, pointcloud_node],
53 | output='screen'
54 | )
55 | return IsaacROSStereoPipelineTest.generate_test_description([container])
56 |
57 |
58 | class IsaacROSStereoPipelineTest(IsaacROSBaseTest):
59 | filepath = pathlib.Path(os.path.dirname(__file__))
60 |
61 | @IsaacROSBaseTest.for_each_test_case()
62 | def test_image_pointcloud(self, test_folder):
63 | TIMEOUT = 10
64 | received_messages = {}
65 |
66 | self.generate_namespace_lookup(['left/image_rect', 'right/image_rect',
67 | 'left/camera_info', 'right/camera_info',
68 | 'points2'])
69 |
70 | subs = self.create_logging_subscribers(
71 | [('points2', PointCloud2)], received_messages)
72 |
73 | image_left_pub = self.node.create_publisher(
74 | Image, self.namespaces['left/image_rect'], self.DEFAULT_QOS
75 | )
76 | image_right_pub = self.node.create_publisher(
77 | Image, self.namespaces['right/image_rect'], self.DEFAULT_QOS
78 | )
79 | camera_info_right = self.node.create_publisher(
80 | CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS
81 | )
82 | camera_info_left = self.node.create_publisher(
83 | CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS
84 | )
85 |
86 | try:
87 | image_left = JSONConversion.load_image_from_json(test_folder / 'image_left.json')
88 | image_right = JSONConversion.load_image_from_json(test_folder / 'image_right.json')
89 | camera_info = JSONConversion.load_camera_info_from_json(
90 | test_folder / 'camera_info.json')
91 |
92 | end_time = time.time() + TIMEOUT
93 | done = False
94 |
95 | while time.time() < end_time:
96 |
97 | image_left_pub.publish(image_left)
98 | image_right_pub.publish(image_right)
99 | camera_info_left.publish(camera_info)
100 | camera_info_right.publish(camera_info)
101 |
102 | rclpy.spin_once(self.node, timeout_sec=0.1)
103 |
104 | if 'points2' in received_messages:
105 | done = True
106 | break
107 |
108 | self.assertTrue(done, 'Didnt recieve output on points2 topic')
109 |
110 | finally:
111 | [self.node.destroy_subscription(sub) for sub in subs]
112 | self.node.destroy_publisher(image_left_pub)
113 | self.node.destroy_publisher(image_right_pub)
114 | self.node.destroy_publisher(camera_info_right)
115 | self.node.destroy_publisher(camera_info_left)
116 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_image_format_converter_4_channel_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """4-channel color conversion test for the Isaac ROS Image Format Converter node."""
10 |
11 | import time
12 |
13 | import cv2
14 | from cv_bridge import CvBridge
15 | from isaac_ros_test import IsaacROSBaseTest
16 | import launch_ros
17 | import numpy as np
18 | import pytest
19 | import rclpy
20 | from sensor_msgs.msg import CameraInfo, Image
21 |
22 | ENCODING_DESIRED = 'bgra8'
23 | BACKENDS = 'CUDA'
24 |
25 |
26 | @pytest.mark.rostest
27 | def generate_test_description():
28 | """Generate launch description with all ROS2 nodes for testing."""
29 | composable_nodes = [
30 | launch_ros.descriptions.ComposableNode(
31 | package='isaac_ros_image_proc',
32 | plugin='isaac_ros::image_proc::ImageFormatConverterNode',
33 | name='image_format_node',
34 | namespace=IsaacROSFormatBGRATest.generate_namespace(),
35 | parameters=[{
36 | 'encoding_desired': ENCODING_DESIRED,
37 | 'backends': BACKENDS
38 | }])]
39 |
40 | format_container = launch_ros.actions.ComposableNodeContainer(
41 | name='format_container',
42 | namespace='',
43 | package='rclcpp_components',
44 | executable='component_container',
45 | composable_node_descriptions=composable_nodes,
46 | output='screen'
47 | )
48 |
49 | return IsaacROSFormatBGRATest.generate_test_description([format_container])
50 |
51 |
52 | class IsaacROSFormatBGRATest(IsaacROSBaseTest):
53 | """Vaidate format conversion to the bgra8 format."""
54 |
55 | def test_rgba_to_bgra_conversion(self) -> None:
56 | """Expect the node to convert rgba8 input images into the bgra8 format."""
57 | self.generate_namespace_lookup(['image_raw', 'image'])
58 | received_messages = {}
59 |
60 | image_sub, = self.create_logging_subscribers(
61 | subscription_requests=[('image', Image)],
62 | received_messages=received_messages
63 | )
64 |
65 | image_raw_pub = self.node.create_publisher(
66 | Image, self.generate_namespace('image_raw'), self.DEFAULT_QOS)
67 |
68 | try:
69 | # Generate an input image in RGBA encoding
70 | cv_image = np.zeros((300, 300, 4), np.uint8)
71 | cv_image[:] = (255, 0, 0, 200) # Full red, partial opacity
72 |
73 | timestamp = self.node.get_clock().now().to_msg()
74 | image_raw = CvBridge().cv2_to_imgmsg(cv_image)
75 | image_raw.header.stamp = timestamp
76 | image_raw.encoding = 'rgba8' # Set image encoding explicitly
77 | camera_info = CameraInfo()
78 | camera_info.header.stamp = timestamp
79 | # Arbitrary valid K matrix that will satisfy calibration check
80 | camera_info.k = np.eye(3).reshape(9)
81 |
82 | # Publish test case over single topic
83 | image_raw_pub.publish(image_raw)
84 |
85 | # Wait at most TIMEOUT seconds for subscriber to respond
86 | TIMEOUT = 2
87 | end_time = time.time() + TIMEOUT
88 |
89 | done = False
90 | while time.time() < end_time:
91 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
92 |
93 | # If we have received a message on the output topic, break
94 | if 'image' in received_messages:
95 | done = True
96 | break
97 |
98 | self.assertTrue(done, "Didn't receive output on image topic!")
99 |
100 | image = received_messages['image']
101 |
102 | # Make sure that the output image has the desired encoding
103 | self.assertEqual(image.encoding, ENCODING_DESIRED, 'Incorrect output encoding!')
104 |
105 | # Make sure output image pixels match OpenCV result
106 | image_mono_actual = CvBridge().imgmsg_to_cv2(image)
107 | image_mono_expected = cv2.cvtColor(cv_image, cv2.COLOR_RGBA2BGRA)
108 | self.assertImagesEqual(image_mono_actual, image_mono_expected)
109 |
110 | finally:
111 | self.node.destroy_subscription(image_sub)
112 | self.node.destroy_publisher(image_raw_pub)
113 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/isaac_ros_stereo_point_cloud_pol.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 |
10 | import os
11 | import pathlib
12 | import time
13 |
14 | import cv2
15 | from cv_bridge import CvBridge
16 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
17 |
18 | from launch_ros.actions import ComposableNodeContainer
19 | from launch_ros.descriptions import ComposableNode
20 |
21 | import numpy as np
22 | import pytest
23 | import rclpy
24 |
25 | from sensor_msgs.msg import CameraInfo, Image
26 | from sensor_msgs.msg import PointCloud2
27 | from stereo_msgs.msg import DisparityImage
28 |
29 |
30 | @pytest.mark.rostest
31 | def generate_test_description():
32 | pointcloud_node = ComposableNode(
33 | name='point_cloud',
34 | package='isaac_ros_stereo_image_proc',
35 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
36 | namespace=IsaacROSPointCloudTest.generate_namespace(),
37 | parameters=[{
38 | 'use_color': True,
39 | }])
40 |
41 | container = ComposableNodeContainer(
42 | name='point_cloud_container',
43 | namespace='',
44 | package='rclcpp_components',
45 | executable='component_container',
46 | composable_node_descriptions=[pointcloud_node],
47 | output='screen'
48 | )
49 |
50 | return IsaacROSPointCloudTest.generate_test_description([container])
51 |
52 |
53 | class IsaacROSPointCloudTest(IsaacROSBaseTest):
54 | filepath = pathlib.Path(os.path.dirname(__file__))
55 |
56 | @IsaacROSBaseTest.for_each_test_case()
57 | def test_rgbxyz_point_cloud(self, test_folder):
58 | TIMEOUT = 10
59 | received_messages = {}
60 |
61 | self.generate_namespace_lookup(['left/image_rect_color', 'disparity',
62 | 'left/camera_info', 'right/camera_info',
63 | 'points2'])
64 |
65 | subs = self.create_logging_subscribers(
66 | [('points2', PointCloud2)], received_messages)
67 |
68 | image_left_pub = self.node.create_publisher(
69 | Image, self.namespaces['left/image_rect_color'], self.DEFAULT_QOS
70 | )
71 | disparity_pub = self.node.create_publisher(
72 | DisparityImage, self.namespaces['disparity'], self.DEFAULT_QOS
73 | )
74 | camera_info_left = self.node.create_publisher(
75 | CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS
76 | )
77 | camera_info_right = self.node.create_publisher(
78 | CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS
79 | )
80 |
81 | try:
82 | image_left = JSONConversion.load_image_from_json(
83 | test_folder / 'image_left.json')
84 |
85 | disparity_image = DisparityImage()
86 | disp_img = cv2.imread(os.path.join(
87 | self.filepath, 'test_cases', 'stereo_images_chair', 'test_disparity.png'),
88 | cv2.IMREAD_UNCHANGED).astype(np.float32)
89 | disparity_image.image = CvBridge().cv2_to_imgmsg(disp_img, '32FC1')
90 | disparity_image.min_disparity = np.min(disp_img).astype(np.float)
91 |
92 | camera_info = JSONConversion.load_camera_info_from_json(
93 | test_folder / 'camera_info.json')
94 |
95 | end_time = time.time() + TIMEOUT
96 | done = False
97 |
98 | while time.time() < end_time:
99 | image_left_pub.publish(image_left)
100 | disparity_pub.publish(disparity_image)
101 | camera_info_left.publish(camera_info)
102 | camera_info_right.publish(camera_info)
103 |
104 | rclpy.spin_once(self.node, timeout_sec=0.1)
105 |
106 | if 'points2' in received_messages:
107 | done = True
108 | break
109 |
110 | self.assertTrue(done, 'Appropiate output not received')
111 |
112 | finally:
113 | [self.node.destroy_subscription(sub) for sub in subs]
114 | self.node.destroy_publisher(image_left_pub)
115 | self.node.destroy_publisher(disparity_pub)
116 | self.node.destroy_publisher(camera_info_right)
117 | self.node.destroy_publisher(camera_info_left)
118 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_resize_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Proof-of-Life test for the Isaac ROS Resize node."""
10 |
11 | import os
12 | import pathlib
13 | import time
14 |
15 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
16 | import launch_ros
17 | import pytest
18 | import rclpy
19 | from sensor_msgs.msg import CameraInfo, Image
20 |
21 | USE_RELATIVE_SCALE = False
22 | SCALE_HEIGHT = 2.0
23 | SCALE_WIDTH = 2.0
24 | HEIGHT = 20
25 | WIDTH = 20
26 | BACKENDS = 'CUDA'
27 |
28 |
29 | @pytest.mark.rostest
30 | def generate_test_description():
31 | """Generate launch description with all ROS2 nodes for testing."""
32 | composable_nodes = [
33 | launch_ros.descriptions.ComposableNode(
34 | package='isaac_ros_image_proc',
35 | plugin='isaac_ros::image_proc::ResizeNode',
36 | name='resize_node',
37 | namespace=IsaacROSResizeTest.generate_namespace(),
38 | parameters=[{
39 | 'use_relative_scale': USE_RELATIVE_SCALE,
40 | 'scale_height': SCALE_HEIGHT,
41 | 'scale_width': SCALE_WIDTH,
42 | 'height': HEIGHT,
43 | 'width': WIDTH,
44 | 'backends': BACKENDS,
45 | }])]
46 |
47 | resize_container = launch_ros.actions.ComposableNodeContainer(
48 | package='rclcpp_components',
49 | name='resize_container',
50 | namespace='',
51 | executable='component_container',
52 | composable_node_descriptions=composable_nodes,
53 | output='screen'
54 | )
55 |
56 | return IsaacROSResizeTest.generate_test_description([resize_container])
57 |
58 |
59 | class IsaacROSResizeTest(IsaacROSBaseTest):
60 | """Validate image resizing in typical case."""
61 |
62 | filepath = pathlib.Path(os.path.dirname(__file__))
63 |
64 | @IsaacROSBaseTest.for_each_test_case(subfolder='resize')
65 | def test_resize_typical(self, test_folder) -> None:
66 | """Expect the node to output images with correctly resized dimensions."""
67 | self.generate_namespace_lookup(['image', 'camera_info', 'resized/image'])
68 | received_messages = {}
69 |
70 | resized_image_sub = self.create_logging_subscribers(
71 | subscription_requests=[('resized/image', Image)],
72 | received_messages=received_messages
73 | )
74 |
75 | image_pub = self.node.create_publisher(
76 | Image, self.namespaces['image'], self.DEFAULT_QOS)
77 | camera_info_pub = self.node.create_publisher(
78 | CameraInfo, self.namespaces['camera_info'], self.DEFAULT_QOS)
79 |
80 | try:
81 | image = JSONConversion.load_image_from_json(
82 | test_folder / 'image.json')
83 | camera_info = JSONConversion.load_camera_info_from_json(
84 | test_folder / 'camera_info.json')
85 |
86 | # Publish test case over both topics
87 | image_pub.publish(image)
88 | camera_info_pub.publish(camera_info)
89 |
90 | # Wait at most TIMEOUT seconds for subscriber to respond
91 | TIMEOUT = 2
92 | end_time = time.time() + TIMEOUT
93 |
94 | done = False
95 | while time.time() < end_time:
96 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
97 |
98 | # If we have received a message on the output topic, break
99 | if 'resized/image' in received_messages:
100 | done = True
101 | break
102 |
103 | self.assertTrue(done, "Didn't receive output on resized/image topic!")
104 |
105 | resized_image = received_messages['resized/image']
106 |
107 | # Make sure that the output image size is set to desired dimensions
108 | desired_height = image.height * SCALE_HEIGHT if USE_RELATIVE_SCALE else HEIGHT
109 | desired_width = image.width * SCALE_WIDTH if USE_RELATIVE_SCALE else WIDTH
110 | self.assertEqual(resized_image.height, desired_height,
111 | f'Height is not {desired_height}!')
112 | self.assertEqual(resized_image.width, desired_width, f'Width is not {desired_width}!')
113 |
114 | finally:
115 | self.node.destroy_subscription(resized_image_sub)
116 | self.node.destroy_publisher(image_pub)
117 | self.node.destroy_publisher(camera_info_pub)
118 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_resize_aspect_ratio_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Aspect ratio test for the Isaac ROS Resize node."""
10 |
11 | import os
12 | import pathlib
13 | import time
14 |
15 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
16 | import launch_ros
17 | import pytest
18 | import rclpy
19 | from sensor_msgs.msg import CameraInfo, Image
20 |
21 | USE_RELATIVE_SCALE = True
22 | SCALE_HEIGHT = 2.0
23 | SCALE_WIDTH = 3.0
24 | HEIGHT = 20
25 | WIDTH = 30
26 | BACKENDS = 'CUDA'
27 |
28 |
29 | @pytest.mark.rostest
30 | def generate_test_description():
31 | """Generate launch description with all ROS2 nodes for testing."""
32 | composable_nodes = [
33 | launch_ros.descriptions.ComposableNode(
34 | package='isaac_ros_image_proc',
35 | plugin='isaac_ros::image_proc::ResizeNode',
36 | name='resize_node',
37 | namespace=IsaacROSResizeAspectRatioTest.generate_namespace(),
38 | parameters=[{
39 | 'use_relative_scale': USE_RELATIVE_SCALE,
40 | 'scale_height': SCALE_HEIGHT,
41 | 'scale_width': SCALE_WIDTH,
42 | 'height': HEIGHT,
43 | 'width': WIDTH,
44 | 'backends': BACKENDS,
45 | }])]
46 |
47 | resize_container = launch_ros.actions.ComposableNodeContainer(
48 | package='rclcpp_components',
49 | name='resize_container',
50 | namespace='',
51 | executable='component_container',
52 | composable_node_descriptions=composable_nodes,
53 | output='screen'
54 | )
55 |
56 | return IsaacROSResizeAspectRatioTest.generate_test_description([resize_container])
57 |
58 |
59 | class IsaacROSResizeAspectRatioTest(IsaacROSBaseTest):
60 | """Validate resizing with different aspect ratio."""
61 |
62 | filepath = pathlib.Path(os.path.dirname(__file__))
63 |
64 | @IsaacROSBaseTest.for_each_test_case(subfolder='resize')
65 | def test_resize_image_forwarded(self, test_folder) -> None:
66 | """Expect the image to be resized into a different aspect ratio."""
67 | self.generate_namespace_lookup(['image', 'camera_info', 'resized/image'])
68 | received_messages = {}
69 |
70 | resized_image_sub, = self.create_logging_subscribers(
71 | subscription_requests=[('resized/image', Image)],
72 | received_messages=received_messages
73 | )
74 |
75 | image_pub = self.node.create_publisher(
76 | Image, self.namespaces['image'], self.DEFAULT_QOS)
77 | camera_info_pub = self.node.create_publisher(
78 | CameraInfo, self.namespaces['camera_info'], self.DEFAULT_QOS)
79 |
80 | try:
81 | image = JSONConversion.load_image_from_json(
82 | test_folder / 'image.json')
83 | camera_info = JSONConversion.load_camera_info_from_json(
84 | test_folder / 'camera_info.json')
85 |
86 | # Publish test case over both topics
87 | image_pub.publish(image)
88 | camera_info_pub.publish(camera_info)
89 |
90 | # Wait at most TIMEOUT seconds for subscriber to respond
91 | TIMEOUT = 2
92 | end_time = time.time() + TIMEOUT
93 |
94 | done = False
95 | while time.time() < end_time:
96 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
97 |
98 | # If we have received a message on the output topic, break
99 | if 'resized/image' in received_messages:
100 | done = True
101 | break
102 |
103 | self.assertTrue(done, "Didn't receive output on resized/image topic!")
104 |
105 | resized_image = received_messages['resized/image']
106 |
107 | # Make sure that the output image size is set to desired dimensions
108 | desired_height = image.height * SCALE_HEIGHT if USE_RELATIVE_SCALE else HEIGHT
109 | desired_width = image.width * SCALE_WIDTH if USE_RELATIVE_SCALE else WIDTH
110 | self.assertEqual(resized_image.height, desired_height,
111 | f'Height is not {desired_height}!')
112 | self.assertEqual(resized_image.width, desired_width, f'Width is not {desired_width}!')
113 |
114 | finally:
115 | self.node.destroy_subscription(resized_image_sub)
116 | self.node.destroy_publisher(image_pub)
117 | self.node.destroy_publisher(camera_info_pub)
118 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/include/isaac_ros_stereo_image_proc/disparity_node.hpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #ifndef ISAAC_ROS_STEREO_IMAGE_PROC__DISPARITY_NODE_HPP_
12 | #define ISAAC_ROS_STEREO_IMAGE_PROC__DISPARITY_NODE_HPP_
13 |
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include
20 |
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 |
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 | #include
34 | #include
35 | #include
36 |
37 | #include
38 | #include
39 |
40 | namespace isaac_ros
41 | {
42 | namespace stereo_image_proc
43 | {
44 |
45 | class DisparityNode : public rclcpp::Node
46 | {
47 | public:
48 | /**
49 | * @brief Construct a new Disparity Node object
50 | */
51 | explicit DisparityNode(const rclcpp::NodeOptions &);
52 |
53 | ~DisparityNode();
54 |
55 | private:
56 | message_filters::Subscriber sub_left_image_; // Left rectified image
57 | message_filters::Subscriber sub_right_image_; // Right rectified image
58 | message_filters::Subscriber sub_left_info_; // Left image info
59 | message_filters::Subscriber sub_right_info_; // Right image info
60 |
61 | using ExactPolicy = message_filters::sync_policies::ExactTime<
62 | sensor_msgs::msg::Image,
63 | sensor_msgs::msg::Image,
64 | sensor_msgs::msg::CameraInfo,
65 | sensor_msgs::msg::CameraInfo>;
66 |
67 | using ExactSync = message_filters::Synchronizer;
68 |
69 | std::shared_ptr exact_sync_; // The message sync policy
70 |
71 | std::shared_ptr> pub_disparity_;
72 |
73 | /**
74 | * @brief Callback for the left and right rectified image subscriber
75 | *
76 | * @param left_rectified The left rectified image message received
77 | * @param right_rectified The right rectified image message received
78 | * @param sub_left_info_ The left image info message received
79 | * @param sub_right_info_ The right image info message received
80 | */
81 | void cam_cb(
82 | const sensor_msgs::msg::Image::ConstSharedPtr & left_rectified_,
83 | const sensor_msgs::msg::Image::ConstSharedPtr & right_rectified_,
84 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & sub_left_info_,
85 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & sub_right_info_);
86 |
87 | VPIStream vpi_stream_ = nullptr; // The VPI stream for VPI tasks
88 | VPIImage vpi_left_ = nullptr; // The VPI object for left image
89 | VPIImage vpi_right_ = nullptr; // The VPI object for left image
90 | VPIImage vpi_disparity_ = nullptr; // The VPI object for disparity image
91 | VPIImage tmp_left_ = nullptr; // The VPI object for temporary image conversions
92 | VPIImage tmp_right_ = nullptr; // The VPI object for temporary image conversions
93 | VPIImage confidence_map_ = nullptr; // The VPI object for confidence map
94 | VPIImage temp_scale_ = nullptr; // The VPI object for final scaling
95 | VPIImage stereo_left_ = nullptr; // The VPI object for stereo output
96 | VPIImage stereo_right_ = nullptr; // The VPI object for stereo output
97 | uint32_t backends_; // VPI backends flags
98 | VPIStereoDisparityEstimatorCreationParams params_; // Disparity sgm parameters
99 | VPIConvertImageFormatParams conv_params_; // Image conversion params
100 | VPIConvertImageFormatParams scale_params_; // Image conversion params
101 | VPIPayload stereo_payload_ = nullptr; // The VPI object for disparity job payload
102 | VPIImageFormat stereo_format_; // The VPI object for format specification
103 |
104 | int max_disparity_ = 0; // Maximum value for pixel in disparity image
105 | int prev_height_ = 0; // Cached height
106 | int prev_width_ = 0; // Cached width
107 | int stereo_width_ = 0; // Stereo image height
108 | int stereo_height_ = 0; // Stereo image width
109 | };
110 |
111 | } // namespace stereo_image_proc
112 | } // namespace isaac_ros
113 |
114 | #endif // ISAAC_ROS_STEREO_IMAGE_PROC__DISPARITY_NODE_HPP_
115 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_rectify_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Proof-of-Life test for the Isaac ROS Rectify node."""
10 |
11 | import os
12 | import pathlib
13 | import time
14 |
15 | import cv2
16 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
17 | import launch_ros
18 | import pytest
19 | import rclpy
20 | from sensor_msgs.msg import CameraInfo, Image
21 | from sklearn.linear_model import LinearRegression
22 |
23 |
24 | @pytest.mark.rostest
25 | def generate_test_description():
26 | """Generate launch description with all ROS2 nodes for testing."""
27 | composable_nodes = [
28 | launch_ros.descriptions.ComposableNode(
29 | package='isaac_ros_image_proc',
30 | plugin='isaac_ros::image_proc::RectifyNode',
31 | name='rectify_node',
32 | namespace=IsaacROSRectifyTest.generate_namespace(),
33 | remappings=[('image', 'image_raw')]
34 | )]
35 |
36 | rectify_container = launch_ros.actions.ComposableNodeContainer(
37 | name='rectify_container',
38 | namespace='',
39 | package='rclcpp_components',
40 | executable='component_container',
41 | composable_node_descriptions=composable_nodes,
42 | output='screen'
43 | )
44 |
45 | return IsaacROSRectifyTest.generate_test_description([rectify_container])
46 |
47 |
48 | class IsaacROSRectifyTest(IsaacROSBaseTest):
49 | """Validate image rectification with chessboard images."""
50 |
51 | filepath = pathlib.Path(os.path.dirname(__file__))
52 |
53 | @IsaacROSBaseTest.for_each_test_case(subfolder='rectify')
54 | def test_rectify_chessboard(self, test_folder) -> None:
55 | """Expect the node to rectify chessboard images."""
56 | self.generate_namespace_lookup(['image_raw', 'camera_info', 'image_rect'])
57 |
58 | image_raw_pub = self.node.create_publisher(
59 | Image, self.namespaces['image_raw'], self.DEFAULT_QOS)
60 | camera_info_pub = self.node.create_publisher(
61 | CameraInfo, self.namespaces['camera_info'], self.DEFAULT_QOS)
62 |
63 | received_messages = {}
64 | image_rect_sub, = self.create_logging_subscribers(
65 | [('image_rect', Image)], received_messages)
66 |
67 | try:
68 | image_raw, chessboard_dims = JSONConversion.load_chessboard_image_from_json(
69 | test_folder / 'image_raw.json')
70 | camera_info = JSONConversion.load_camera_info_from_json(
71 | test_folder / 'camera_info.json')
72 |
73 | # Publish test case over both topics
74 | image_raw_pub.publish(image_raw)
75 | camera_info_pub.publish(camera_info)
76 |
77 | # Wait at most TIMEOUT seconds for subscriber to respond
78 | TIMEOUT = 2
79 | end_time = time.time() + TIMEOUT
80 |
81 | done = False
82 | while time.time() < end_time:
83 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
84 |
85 | # If we have received a message on the output topic, break
86 | if 'image_rect' in received_messages:
87 | done = True
88 | break
89 |
90 | self.assertTrue(done, "Didn't receive output on image_rect topic!")
91 |
92 | # Collect received image
93 | image_rect = self.bridge.imgmsg_to_cv2(received_messages['image_rect'])
94 |
95 | # Convert to grayscale and find chessboard corners in rectified image
96 | image_rect_gray = cv2.cvtColor(image_rect, cv2.COLOR_BGR2GRAY)
97 | ret, corners = cv2.findChessboardCorners(image_rect_gray, chessboard_dims, None)
98 | self.assertTrue(ret, "Couldn't find chessboard corners in output image!")
99 |
100 | # Make sure that each row of chessboard in rectified image is close to a straight line
101 | # Since the chessboard image is initially very distorted, this check confirms that the
102 | # image was actually rectified
103 | R_SQUARED_THRESHOLD = 0.81 # Allows R = 0.9
104 | for i in range(chessboard_dims[1]):
105 | row = corners[chessboard_dims[0]*i:chessboard_dims[0]*(i+1)]
106 | x, y = row[:, :, 0], row[:, :, 1]
107 | reg = LinearRegression().fit(x, y)
108 | self.assertGreaterEqual(
109 | reg.score(x, y), R_SQUARED_THRESHOLD,
110 | f'Rectified chessboard corners for row {i} were not in a straight line!')
111 |
112 | finally:
113 | self.assertTrue(self.node.destroy_subscription(image_rect_sub))
114 | self.assertTrue(self.node.destroy_publisher(image_raw_pub))
115 | self.assertTrue(self.node.destroy_publisher(camera_info_pub))
116 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_resize_invalid_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Edge case test for the Isaac ROS Resize node."""
10 |
11 | import os
12 | import pathlib
13 | import time
14 |
15 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
16 | import launch_ros
17 | import pytest
18 | from rcl_interfaces.msg import Log
19 | import rclpy
20 | from rclpy.logging import LoggingSeverity
21 | from sensor_msgs.msg import CameraInfo, Image
22 |
23 | USE_RELATIVE_SCALE = True
24 | SCALE_HEIGHT = -2.0
25 | SCALE_WIDTH = -3.0
26 | HEIGHT = -20
27 | WIDTH = -20
28 | BACKENDS = 'CUDA'
29 |
30 |
31 | @pytest.mark.rostest
32 | def generate_test_description():
33 | """Generate launch description with all ROS2 nodes for testing."""
34 | composable_nodes = [
35 | launch_ros.descriptions.ComposableNode(
36 | package='isaac_ros_image_proc',
37 | plugin='isaac_ros::image_proc::ResizeNode',
38 | name='resize_node',
39 | namespace=IsaacROSResizeInvalidTest.generate_namespace(),
40 | parameters=[{
41 | 'use_relative_scale': USE_RELATIVE_SCALE,
42 | 'scale_height': SCALE_HEIGHT,
43 | 'scale_width': SCALE_WIDTH,
44 | 'height': HEIGHT,
45 | 'width': WIDTH,
46 | 'backends': BACKENDS,
47 | }])]
48 |
49 | resize_container = launch_ros.actions.ComposableNodeContainer(
50 | name='resize_container',
51 | namespace='',
52 | package='rclcpp_components',
53 | executable='component_container',
54 | composable_node_descriptions=composable_nodes,
55 | output='screen'
56 | )
57 |
58 | return IsaacROSResizeInvalidTest.generate_test_description([resize_container])
59 |
60 |
61 | class IsaacROSResizeInvalidTest(IsaacROSBaseTest):
62 | """Validate error-catching behavior with invalid numbers."""
63 |
64 | filepath = pathlib.Path(os.path.dirname(__file__))
65 |
66 | @IsaacROSBaseTest.for_each_test_case(subfolder='resize')
67 | def test_resize_invalid(self, test_folder) -> None:
68 | """Expect the node to log an error when given invalid input."""
69 | self.generate_namespace_lookup(['image', 'camera_info', 'resized/image'])
70 | received_messages = {}
71 |
72 | resized_image_sub, rosout_sub = self.create_logging_subscribers(
73 | subscription_requests=[(self.namespaces['resized/image'], Image), ('/rosout', Log)],
74 | use_namespace_lookup=False,
75 | received_messages=received_messages
76 | )
77 |
78 | image_pub = self.node.create_publisher(
79 | Image, self.namespaces['image'], self.DEFAULT_QOS)
80 | camera_info_pub = self.node.create_publisher(
81 | CameraInfo, self.namespaces['camera_info'], self.DEFAULT_QOS)
82 |
83 | try:
84 | image = JSONConversion.load_image_from_json(
85 | test_folder / 'image.json')
86 | camera_info = JSONConversion.load_camera_info_from_json(
87 | test_folder / 'camera_info.json')
88 |
89 | # Publish test case over both topics
90 | image_pub.publish(image)
91 | camera_info_pub.publish(camera_info)
92 |
93 | # Wait at most TIMEOUT seconds for subscriber to respond
94 | TIMEOUT = 2
95 | end_time = time.time() + TIMEOUT
96 |
97 | done = False
98 | while time.time() < end_time:
99 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
100 |
101 | # If we have received a message on the output topic, break
102 | if '/rosout' in received_messages:
103 | done = True
104 | break
105 |
106 | self.assertTrue(done, "Didn't receive output on /rosout topic!")
107 |
108 | rosout = received_messages['/rosout']
109 |
110 | # Make sure that the output log message size is a non-empty error
111 | self.assertEqual(LoggingSeverity(rosout.level), LoggingSeverity.ERROR,
112 | 'Output did not have Error severity!')
113 | self.assertGreater(len(rosout.msg), 0, 'Output had empty message!')
114 |
115 | # Make sure no output image was received in the error case
116 | self.assertNotIn(
117 | self.namespaces['resized/image'], received_messages,
118 | 'Resized image was received despite error!')
119 |
120 | finally:
121 | self.node.destroy_subscription(resized_image_sub)
122 | self.node.destroy_subscription(rosout_sub)
123 | self.node.destroy_subscription(rosout_sub)
124 | self.node.destroy_publisher(image_pub)
125 | self.node.destroy_publisher(camera_info_pub)
126 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/isaac_ros_image_proc_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Proof-of-Life test for the precompiled Isaac ROS Image Proc executable."""
10 |
11 | import os
12 | import pathlib
13 | import time
14 |
15 | import cv2
16 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
17 | import launch_ros
18 | import pytest
19 | import rclpy
20 | from sensor_msgs.msg import CameraInfo, Image
21 |
22 |
23 | @pytest.mark.rostest
24 | def generate_test_description():
25 | """Generate launch description with all ROS2 nodes for testing."""
26 | nodes = [
27 | launch_ros.actions.Node(
28 | package='isaac_ros_image_proc',
29 | executable='isaac_ros_image_proc',
30 | name='isaac_ros_image_proc',
31 | namespace=IsaacROSImageProcTest.generate_namespace()
32 | )]
33 |
34 | return IsaacROSImageProcTest.generate_test_description(nodes)
35 |
36 |
37 | class IsaacROSImageProcTest(IsaacROSBaseTest):
38 | """Validate standard image proc results with sample images."""
39 |
40 | filepath = pathlib.Path(os.path.dirname(__file__))
41 |
42 | @IsaacROSBaseTest.for_each_test_case(subfolder='image_proc')
43 | def test_image_proc(self, test_folder) -> None:
44 | """Expect the node to forward images from input to all output topics."""
45 | self.generate_namespace_lookup([
46 | 'image_raw', 'camera_info',
47 | 'image_mono', 'image_rect',
48 | 'image_color', 'image_rect_color'
49 | ])
50 |
51 | image_raw_pub = self.node.create_publisher(
52 | Image, self.namespaces['image_raw'], self.DEFAULT_QOS)
53 | camera_info_pub = self.node.create_publisher(
54 | CameraInfo, self.namespaces['camera_info'], self.DEFAULT_QOS)
55 |
56 | received_messages = {}
57 | image_mono_sub, image_rect_sub, image_color_sub, image_rect_color_sub = \
58 | self.create_logging_subscribers([
59 | ('image_mono', Image),
60 | ('image_rect', Image),
61 | ('image_color', Image),
62 | ('image_rect_color', Image),
63 | ], received_messages)
64 |
65 | try:
66 | timestamp = self.node.get_clock().now().to_msg()
67 | image_raw = JSONConversion.load_image_from_json(test_folder / 'image_raw.json')
68 | image_raw.header.stamp = timestamp
69 | camera_info = JSONConversion.load_camera_info_from_json(
70 | test_folder / 'camera_info.json')
71 | camera_info.header.stamp = timestamp
72 |
73 | # Publish test case over both topics
74 | image_raw_pub.publish(image_raw)
75 | camera_info_pub.publish(camera_info)
76 |
77 | # Wait at most TIMEOUT seconds for subscriber to respond
78 | TIMEOUT = 2
79 | end_time = time.time() + TIMEOUT
80 |
81 | done = False
82 | output_topics = ['image_mono', 'image_rect', 'image_color', 'image_rect_color']
83 | while time.time() < end_time:
84 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
85 |
86 | # If we have received exactly one message on each output topic, break
87 | if all([topic in received_messages for topic in output_topics]):
88 | done = True
89 | break
90 |
91 | self.assertTrue(done,
92 | "Didn't receive output on each topic!\n"
93 | 'Expected messages on:\n'
94 | + '\n'.join(output_topics)
95 | + '\nReceived messages on: \n'
96 | + '\n'.join(received_messages.keys()))
97 |
98 | # Collect received images and compare to baseline
99 | image_mono_actual = self.bridge.imgmsg_to_cv2(received_messages['image_mono'])
100 | image_rect_actual = self.bridge.imgmsg_to_cv2(received_messages['image_rect'])
101 | image_color_actual = self.bridge.imgmsg_to_cv2(received_messages['image_color'])
102 | image_rect_color_actual = self.bridge.imgmsg_to_cv2(
103 | received_messages['image_rect_color'])
104 |
105 | image_mono_expected = cv2.imread(
106 | str(test_folder / 'image_mono.jpg'), cv2.IMREAD_GRAYSCALE)
107 | image_rect_expected = cv2.imread(
108 | str(test_folder / 'image_rect.jpg'), cv2.IMREAD_GRAYSCALE)
109 | image_color_expected = cv2.imread(str(test_folder / 'image_color.jpg'))
110 | image_rect_color_expected = cv2.imread(str(test_folder / 'image_rect_color.jpg'))
111 |
112 | self.assertImagesEqual(image_mono_actual, image_mono_expected)
113 | self.assertImagesEqual(image_rect_actual, image_rect_expected)
114 | self.assertImagesEqual(image_color_actual, image_color_expected)
115 | self.assertImagesEqual(image_rect_color_actual, image_rect_color_expected)
116 |
117 | finally:
118 | self.assertTrue(self.node.destroy_subscription(image_mono_sub))
119 | self.assertTrue(self.node.destroy_subscription(image_rect_sub))
120 | self.assertTrue(self.node.destroy_subscription(image_color_sub))
121 | self.assertTrue(self.node.destroy_subscription(image_rect_color_sub))
122 | self.assertTrue(self.node.destroy_publisher(image_raw_pub))
123 | self.assertTrue(self.node.destroy_publisher(camera_info_pub))
124 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/isaac_ros_stereo_point_cloud_OSS_test.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | import os
10 | import pathlib
11 | import time
12 |
13 | import cv2
14 | from cv_bridge import CvBridge
15 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
16 |
17 | from launch_ros.actions import ComposableNodeContainer
18 | from launch_ros.descriptions import ComposableNode
19 |
20 | import numpy as np
21 | import pytest
22 | import rclpy
23 |
24 | from sensor_msgs.msg import CameraInfo, Image
25 | from sensor_msgs.msg import PointCloud2
26 | from stereo_msgs.msg import DisparityImage
27 |
28 |
29 | @pytest.mark.rostest
30 | def generate_test_description():
31 | pointcloud_node = ComposableNode(
32 | name='point_cloud',
33 | package='isaac_ros_stereo_image_proc',
34 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
35 | namespace=IsaacROSPointCloudOSSTest.generate_namespace(),
36 | parameters=[{
37 | 'use_color': True,
38 | }],)
39 |
40 | container = ComposableNodeContainer(
41 | name='point_cloud_container',
42 | namespace='',
43 | package='rclcpp_components',
44 | executable='component_container',
45 | composable_node_descriptions=[pointcloud_node],
46 | output='screen',
47 | )
48 |
49 | pointcloud_node_xyz = ComposableNode(
50 | name='point_cloud_node_xyz',
51 | package='isaac_ros_stereo_image_proc',
52 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
53 | namespace=IsaacROSPointCloudOSSTest.generate_namespace(),
54 | parameters=[{
55 | 'use_color': False,
56 | }],
57 | remappings=[
58 | ('points2', 'xyz/points2'),
59 | ])
60 |
61 | container_xyz = ComposableNodeContainer(
62 | name='point_cloud_xyz_container',
63 | namespace='',
64 | package='rclcpp_components',
65 | executable='component_container',
66 | composable_node_descriptions=[pointcloud_node_xyz],
67 | output='screen',
68 | )
69 |
70 | return IsaacROSPointCloudOSSTest.generate_test_description([container, container_xyz])
71 |
72 |
73 | class IsaacROSPointCloudOSSTest(IsaacROSBaseTest):
74 | filepath = pathlib.Path(os.path.dirname(__file__))
75 |
76 | @IsaacROSBaseTest.for_each_test_case()
77 | def test_comparison(self, test_folder):
78 | TIMEOUT = 20
79 | received_messages = {}
80 |
81 | self.generate_namespace_lookup(['left/image_rect_color', 'disparity',
82 | 'left/camera_info', 'right/camera_info',
83 | 'points2', 'xyz/points2'])
84 |
85 | rgb_sub, xyz_sub = \
86 | self.create_logging_subscribers([
87 | ('points2', PointCloud2),
88 | ('xyz/points2', PointCloud2),
89 | ], received_messages,
90 | accept_multiple_messages=True)
91 |
92 | image_left_pub = self.node.create_publisher(
93 | Image, self.namespaces['left/image_rect_color'], self.DEFAULT_QOS
94 | )
95 | disparity_pub = self.node.create_publisher(
96 | DisparityImage, self.namespaces['disparity'], self.DEFAULT_QOS
97 | )
98 | camera_info_left = self.node.create_publisher(
99 | CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS
100 | )
101 | camera_info_right = self.node.create_publisher(
102 | CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS
103 | )
104 |
105 | try:
106 | image_left = JSONConversion.load_image_from_json(
107 | test_folder / 'image_left.json')
108 |
109 | disparity_image = DisparityImage()
110 | disp_img = cv2.imread(os.path.join(
111 | self.filepath, 'test_cases', 'stereo_images_chair', 'test_disparity.png'),
112 | cv2.IMREAD_UNCHANGED).astype(np.float32)
113 | disparity_image.image = CvBridge().cv2_to_imgmsg(disp_img, '32FC1')
114 | disparity_image.min_disparity = np.min(disp_img).astype(np.float)
115 |
116 | camera_info = JSONConversion.load_camera_info_from_json(
117 | test_folder / 'camera_info.json')
118 |
119 | end_time = time.time() + TIMEOUT
120 | done = False
121 |
122 | while time.time() < end_time:
123 | image_left_pub.publish(image_left)
124 | disparity_pub.publish(disparity_image)
125 | camera_info_left.publish(camera_info)
126 | camera_info_right.publish(camera_info)
127 |
128 | rclpy.spin_once(self.node, timeout_sec=(0.1))
129 |
130 | if all([
131 | len(messages) >= 1
132 | for messages in received_messages.values()
133 | ]):
134 | done = True
135 | break
136 | self.assertTrue(done)
137 | for rgb_msg in received_messages['points2']:
138 | self.assertEqual(rgb_msg.point_step, 16)
139 |
140 | for xyz_msg in received_messages['xyz/points2']:
141 | self.assertEqual(xyz_msg.point_step, 12)
142 | finally:
143 | self.node.destroy_subscription(rgb_sub)
144 | self.node.destroy_subscription(xyz_sub)
145 | self.node.destroy_publisher(image_left_pub)
146 | self.node.destroy_publisher(disparity_pub)
147 | self.node.destroy_publisher(camera_info_right)
148 | self.node.destroy_publisher(camera_info_left)
149 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/src/resize_node.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_image_proc/resize_node.hpp"
12 |
13 | #include "cv_bridge/cv_bridge.h"
14 | #include "image_transport/image_transport.hpp"
15 | #include "opencv2/opencv.hpp"
16 | #include "rclcpp/rclcpp.hpp"
17 | #include "sensor_msgs/msg/camera_info.hpp"
18 | #include "sensor_msgs/msg/image.hpp"
19 | #include "vpi/algo/ConvertImageFormat.h"
20 | #include "vpi/algo/Rescale.h"
21 | #include "vpi/OpenCVInterop.hpp"
22 | #include "vpi/VPI.h"
23 |
24 | #include "isaac_ros_common/vpi_utilities.hpp"
25 |
26 | namespace isaac_ros
27 | {
28 | namespace image_proc
29 | {
30 |
31 | ResizeNode::ResizeNode(const rclcpp::NodeOptions & options)
32 | : rclcpp::Node("ResizeNode", options),
33 | sub_{image_transport::create_camera_subscription(
34 | this, "image", std::bind(
35 | &ResizeNode::ResizeCallback,
36 | this, std::placeholders::_1, std::placeholders::_2), "raw")},
37 | pub_{image_transport::create_camera_publisher(this, "resized/image")},
38 | use_relative_scale_{declare_parameter("use_relative_scale", true)},
39 | scale_height_{declare_parameter("scale_height", 1.0)},
40 | scale_width_{declare_parameter("scale_width", 1.0)},
41 | height_{static_cast(declare_parameter("height", -1))},
42 | width_{static_cast(declare_parameter("width", -1))},
43 | vpi_backends_{isaac_ros::common::DeclareVPIBackendParameter(this, VPI_BACKEND_CUDA)} {}
44 |
45 | void ResizeNode::ResizeCallback(
46 | const sensor_msgs::msg::Image::ConstSharedPtr & image_msg,
47 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg)
48 | {
49 | cv_bridge::CvImagePtr image_ptr;
50 | try {
51 | image_ptr = cv_bridge::toCvCopy(image_msg);
52 | } catch (cv_bridge::Exception & e) {
53 | RCLCPP_ERROR(get_logger(), "cv_bridge exception: %s", e.what());
54 | return;
55 | }
56 |
57 | // Set the output image's header and encoding
58 | cv_bridge::CvImage output_image;
59 | output_image.header = image_ptr->header;
60 | output_image.encoding = image_ptr->encoding;
61 |
62 | VPIImage input{};
63 | CHECK_STATUS(vpiImageCreateOpenCVMatWrapper(image_ptr->image, 0, &input));
64 |
65 | VPIImageFormat type{VPI_IMAGE_FORMAT_U8};
66 | CHECK_STATUS(vpiImageGetFormat(input, &type));
67 |
68 | // Initialize VPI stream for all VPI operations
69 | VPIStream stream{};
70 | CHECK_STATUS(vpiStreamCreate(vpi_backends_, &stream));
71 |
72 | // Prepare CameraInfo output with desired size
73 | // The original dimensions will either be scaled or replaced entirely
74 | double scale_y, scale_x;
75 | sensor_msgs::msg::CameraInfo output_info_msg{*info_msg};
76 | if (use_relative_scale_) {
77 | if (scale_height_ <= 0 || scale_width_ <= 0) {
78 | RCLCPP_ERROR(get_logger(), "scale_height and scale_width must be greater than 0");
79 | return;
80 | }
81 |
82 | scale_y = scale_height_;
83 | scale_x = scale_width_;
84 | output_info_msg.height = static_cast(info_msg->height * scale_height_);
85 | output_info_msg.width = static_cast(info_msg->width * scale_width_);
86 | } else {
87 | if (height_ <= 0 || width_ <= 0) {
88 | RCLCPP_ERROR(get_logger(), "height and width must be greater than 0");
89 | return;
90 | }
91 |
92 | scale_y = static_cast(height_) / info_msg->height;
93 | scale_x = static_cast(width_) / info_msg->width;
94 | output_info_msg.height = height_;
95 | output_info_msg.width = width_;
96 | }
97 |
98 | // Rescale the relevant entries of the intrinsic and extrinsic matrices
99 | output_info_msg.k[0] = output_info_msg.k[0] * scale_x; // fx
100 | output_info_msg.k[2] = output_info_msg.k[2] * scale_x; // cx
101 | output_info_msg.k[4] = output_info_msg.k[4] * scale_y; // fy
102 | output_info_msg.k[5] = output_info_msg.k[5] * scale_y; // cy
103 |
104 | output_info_msg.p[0] = output_info_msg.p[0] * scale_x; // fx
105 | output_info_msg.p[2] = output_info_msg.p[2] * scale_x; // cx
106 | output_info_msg.p[3] = output_info_msg.p[3] * scale_x; // T
107 | output_info_msg.p[5] = output_info_msg.p[5] * scale_y; // fy
108 | output_info_msg.p[6] = output_info_msg.p[6] * scale_y; // cy
109 |
110 | // Prepare intermediate image with input dimensions
111 | VPIImage tmp_in{};
112 | CHECK_STATUS(
113 | vpiImageCreate(
114 | info_msg->width, info_msg->height, VPI_IMAGE_FORMAT_NV12_ER, 0,
115 | &tmp_in));
116 |
117 | // Prepare intermediate and output images with output dimensions
118 | VPIImage tmp_out{}, output{};
119 | CHECK_STATUS(
120 | vpiImageCreate(
121 | output_info_msg.width, output_info_msg.height, VPI_IMAGE_FORMAT_NV12_ER, 0,
122 | &tmp_out));
123 | CHECK_STATUS(
124 | vpiImageCreate(
125 | output_info_msg.width, output_info_msg.height, type, 0,
126 | &output));
127 |
128 | // Convert input to NV12 format
129 | CHECK_STATUS(vpiSubmitConvertImageFormat(stream, vpi_backends_, input, tmp_in, nullptr));
130 |
131 | // Rescale while still in NV12 format
132 | CHECK_STATUS(
133 | vpiSubmitRescale(
134 | stream, vpi_backends_, tmp_in, tmp_out, VPI_INTERP_LINEAR, VPI_BORDER_ZERO,
135 | 0));
136 |
137 | // Convert output to original format
138 | CHECK_STATUS(vpiSubmitConvertImageFormat(stream, vpi_backends_, tmp_out, output, nullptr));
139 |
140 | // Wait until all operations are complete
141 | CHECK_STATUS(vpiStreamSync(stream));
142 |
143 | // Transfer VPI output to image output
144 | VPIImageData outData{};
145 | CHECK_STATUS(vpiImageLock(output, VPI_LOCK_READ, &outData));
146 | output_image.image =
147 | cv::Mat{outData.planes[0].height, outData.planes[0].width, CV_8UC3, outData.planes[0].data,
148 | static_cast(outData.planes[0].pitchBytes)};
149 | CHECK_STATUS(vpiImageUnlock(output));
150 |
151 | pub_.publish(*output_image.toImageMsg(), output_info_msg);
152 |
153 | vpiStreamDestroy(stream);
154 | vpiImageDestroy(input);
155 | vpiImageDestroy(output);
156 | vpiImageDestroy(tmp_in);
157 | vpiImageDestroy(tmp_out);
158 | }
159 |
160 | } // namespace image_proc
161 | } // namespace isaac_ros
162 |
163 | // Register as a component
164 | #include "rclcpp_components/register_node_macro.hpp"
165 | RCLCPP_COMPONENTS_REGISTER_NODE(isaac_ros::image_proc::ResizeNode)
166 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/src/image_format_converter_node.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_image_proc/image_format_converter_node.hpp"
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 |
20 | #include "cv_bridge/cv_bridge.h"
21 | #include "image_transport/image_transport.hpp"
22 | #include "rclcpp/rclcpp.hpp"
23 | #include "rclcpp/qos.hpp"
24 | #include "sensor_msgs/image_encodings.hpp"
25 | #include "sensor_msgs/msg/camera_info.hpp"
26 | #include "sensor_msgs/msg/image.hpp"
27 | #include "vpi/algo/ConvertImageFormat.h"
28 | #include "vpi/Image.h"
29 | #include "vpi/ImageFormat.h"
30 | #include "vpi/OpenCVInterop.hpp"
31 | #include "vpi/Stream.h"
32 |
33 | #include "isaac_ros_common/vpi_utilities.hpp"
34 |
35 | namespace
36 | {
37 |
38 | // Create OpenCV Supported Conversions
39 | // (https://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython)
40 | const std::unordered_set g_cv_supported_types({
41 | {"mono8", "mono16", "bgr8", "rgb8", "bgra8", "rgba8"}});
42 |
43 | // Map the encoding desired string to the VPI Image Format needed
44 | const std::unordered_map g_str_to_vpi_format({
45 | {"mono8", VPI_IMAGE_FORMAT_U8},
46 | {"mono16", VPI_IMAGE_FORMAT_U16},
47 | {"bgr8", VPI_IMAGE_FORMAT_BGR8},
48 | {"rgb8", VPI_IMAGE_FORMAT_RGB8},
49 | {"bgra8", VPI_IMAGE_FORMAT_BGRA8},
50 | {"rgba8", VPI_IMAGE_FORMAT_RGBA8}});
51 |
52 | // Map the encoding desired string to the number of channels needed
53 | const std::unordered_map g_str_to_channels({
54 | {"mono8", CV_8UC1},
55 | {"mono16", CV_16UC1},
56 | {"bgr8", CV_8UC3},
57 | {"rgb8", CV_8UC3},
58 | {"bgra8", CV_8UC4},
59 | {"rgba8", CV_8UC4}});
60 |
61 | // Perform image format conversion using VPI
62 | cv::Mat GetConvertedMat(
63 | VPIImage & input, VPIImage & output, VPIStream & stream, const cv::Mat & cv_image,
64 | const uint32_t backends, const std::string encoding_current, const std::string encoding_desired)
65 | {
66 | CHECK_STATUS(vpiStreamCreate(backends, &stream));
67 | CHECK_STATUS(
68 | vpiImageCreateOpenCVMatWrapper(
69 | cv_image, g_str_to_vpi_format.at(encoding_current), 0, &input));
70 | CHECK_STATUS(
71 | vpiImageCreate(
72 | cv_image.cols, cv_image.rows, g_str_to_vpi_format.at(encoding_desired), 0, &output));
73 |
74 | // Convert input from current encoding to encoding desired
75 | CHECK_STATUS(vpiSubmitConvertImageFormat(stream, backends, input, output, nullptr));
76 |
77 | CHECK_STATUS(vpiStreamSync(stream));
78 |
79 | // Retrieve the output image contents into OpenCV matrix and return result
80 | VPIImageData out_data;
81 | CHECK_STATUS(vpiImageLock(output, VPI_LOCK_READ, &out_data));
82 | cv::Mat output_mat{out_data.planes[0].height, out_data.planes[0].width,
83 | g_str_to_channels.at(encoding_desired), out_data.planes[0].data,
84 | static_cast(out_data.planes[0].pitchBytes)};
85 | CHECK_STATUS(vpiImageUnlock(output));
86 |
87 | return output_mat;
88 | }
89 | } // namespace
90 |
91 | namespace isaac_ros
92 | {
93 | namespace image_proc
94 | {
95 |
96 | ImageFormatConverterNode::ImageFormatConverterNode(const rclcpp::NodeOptions & options)
97 | : rclcpp::Node("ImageFormatConverterNode", options),
98 | sub_{image_transport::create_subscription(
99 | this, "image_raw", std::bind(
100 | &ImageFormatConverterNode::FormatCallback,
101 | this, std::placeholders::_1), "raw")},
102 | pub_{image_transport::create_publisher(this, "image")},
103 | encoding_desired_{static_cast(
104 | this->declare_parameter("encoding_desired", "bgr8"))},
105 | vpi_backends_{isaac_ros::common::DeclareVPIBackendParameter(this, VPI_BACKEND_CUDA)} {}
106 |
107 | void ImageFormatConverterNode::FormatCallback(
108 | const sensor_msgs::msg::Image::ConstSharedPtr & image_msg)
109 | {
110 | cv_bridge::CvImagePtr image_ptr;
111 | try {
112 | image_ptr = cv_bridge::toCvCopy(image_msg);
113 | } catch (cv_bridge::Exception & e) {
114 | RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
115 | return;
116 | }
117 |
118 | // Identify source encoding from the image message
119 | std::string encoding_current{image_ptr->encoding};
120 |
121 | // Skip processing if image is already in desired encoding
122 | if (encoding_current == encoding_desired_) {
123 | pub_.publish(*image_msg);
124 | return;
125 | }
126 |
127 | const bool cv_supported = g_cv_supported_types.find(encoding_current) !=
128 | g_cv_supported_types.end() &&
129 | g_cv_supported_types.find(encoding_desired_) != g_cv_supported_types.end();
130 |
131 | VPIImage input = nullptr;
132 | VPIImage output = nullptr;
133 | VPIStream stream = nullptr;
134 |
135 | cv_bridge::CvImage output_image;
136 | output_image.header = image_ptr->header;
137 | output_image.encoding = encoding_desired_;
138 |
139 | bool publish = true;
140 | try {
141 | // Convert image from source to target encoding using VPI
142 | output_image.image = GetConvertedMat(
143 | input, output, stream, image_ptr->image, vpi_backends_, encoding_current,
144 | encoding_desired_);
145 | } catch (std::runtime_error & e) {
146 | // If there is an error converting the image using VPI, use OpenCV instead
147 | auto & clk = *get_clock();
148 | RCLCPP_WARN_THROTTLE(get_logger(), clk, 5000, "Exception: %s", e.what());
149 | if (cv_supported) {
150 | RCLCPP_INFO_THROTTLE(get_logger(), clk, 5000, "Attempting conversion using OpenCV");
151 | output_image = *cv_bridge::cvtColor(image_ptr, encoding_desired_);
152 | } else {
153 | publish = false;
154 | }
155 | }
156 |
157 | if (publish) {
158 | pub_.publish(output_image.toImageMsg());
159 | } else {
160 | auto & clk = *get_clock();
161 | RCLCPP_ERROR_THROTTLE(
162 | get_logger(), clk, 5000,
163 | "Image format conversion from '%s' to '%s' is not supported for both VPI and OpenCV",
164 | encoding_current, encoding_desired_);
165 | }
166 | vpiStreamDestroy(stream);
167 | vpiImageDestroy(input);
168 | vpiImageDestroy(output);
169 | }
170 |
171 | } // namespace image_proc
172 | } // namespace isaac_ros
173 |
174 | // Register as a component
175 | #include "rclcpp_components/register_node_macro.hpp"
176 | RCLCPP_COMPONENTS_REGISTER_NODE(isaac_ros::image_proc::ImageFormatConverterNode)
177 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/test/isaac_ros_stereo_point_cloud_output_compare.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 |
10 | import os
11 | import pathlib
12 | import time
13 |
14 | import cv2
15 | from cv_bridge import CvBridge
16 | from isaac_ros_test import IsaacROSBaseTest, JSONConversion
17 |
18 | from launch_ros.actions import ComposableNodeContainer, Node
19 | from launch_ros.descriptions import ComposableNode
20 |
21 | import numpy as np
22 | import pytest
23 | import rclpy
24 |
25 | from sensor_msgs.msg import CameraInfo, Image
26 | from sensor_msgs.msg import PointCloud2
27 | from sensor_msgs_py import point_cloud2
28 | from stereo_msgs.msg import DisparityImage
29 |
30 | USE_COLOR = True
31 | AVOID_PADDING = False
32 |
33 |
34 | @pytest.mark.rostest
35 | def generate_test_description():
36 | pointcloud_node = ComposableNode(
37 | name='point_cloud',
38 | package='isaac_ros_stereo_image_proc',
39 | plugin='isaac_ros::stereo_image_proc::PointCloudNode',
40 | namespace=IsaacROSPointCloudComparisonTest.generate_namespace(),
41 | parameters=[{
42 | 'use_color': USE_COLOR,
43 | }],
44 | remappings=[
45 | ('points2', 'isaac_ros/points2'),
46 | ])
47 |
48 | container = ComposableNodeContainer(
49 | name='point_cloud_container',
50 | namespace='',
51 | package='rclcpp_components',
52 | executable='component_container',
53 | composable_node_descriptions=[pointcloud_node],
54 | output='screen',
55 | )
56 |
57 | # Reference Node
58 | ref = Node(
59 | package='stereo_image_proc',
60 | executable='point_cloud_node',
61 | name='point_cloud_node_proc',
62 | namespace=IsaacROSPointCloudComparisonTest.generate_namespace(),
63 | output='screen',
64 | parameters=[{
65 | 'use_color': USE_COLOR,
66 | 'use_system_default_qos': True,
67 | 'avoid_point_cloud_padding': AVOID_PADDING
68 | }],
69 | remappings=[
70 | ('points2', 'proc/points2'),
71 | ]
72 | )
73 |
74 | return IsaacROSPointCloudComparisonTest.generate_test_description([container, ref])
75 |
76 |
77 | class IsaacROSPointCloudComparisonTest(IsaacROSBaseTest):
78 | filepath = pathlib.Path(os.path.dirname(__file__))
79 |
80 | @IsaacROSBaseTest.for_each_test_case()
81 | def test_comparison(self, test_folder):
82 | TIMEOUT = 20
83 | received_messages = {}
84 |
85 | self.generate_namespace_lookup(['left/image_rect_color', 'disparity',
86 | 'left/camera_info', 'right/camera_info',
87 | 'isaac_ros/points2', 'proc/points2'])
88 | isaac_ros_sub, ref_sub = \
89 | self.create_logging_subscribers([
90 | ('isaac_ros/points2', PointCloud2),
91 | ('proc/points2', PointCloud2),
92 | ], received_messages,
93 | accept_multiple_messages=True)
94 |
95 | image_left_pub = self.node.create_publisher(
96 | Image, self.namespaces['left/image_rect_color'], self.DEFAULT_QOS
97 | )
98 | disparity_pub = self.node.create_publisher(
99 | DisparityImage, self.namespaces['disparity'], self.DEFAULT_QOS
100 | )
101 | camera_info_left = self.node.create_publisher(
102 | CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS
103 | )
104 | camera_info_right = self.node.create_publisher(
105 | CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS
106 | )
107 |
108 | try:
109 | image_left = JSONConversion.load_image_from_json(
110 | test_folder / 'image_left.json')
111 |
112 | disparity_image = DisparityImage()
113 | disp_img = cv2.imread(os.path.join(
114 | self.filepath, 'test_cases', 'stereo_images_chair', 'test_disparity.png'),
115 | cv2.IMREAD_UNCHANGED).astype(np.float32)
116 | disparity_image.image = CvBridge().cv2_to_imgmsg(disp_img, '32FC1')
117 | disparity_image.min_disparity = np.min(disp_img).astype(np.float)
118 |
119 | camera_info = JSONConversion.load_camera_info_from_json(
120 | test_folder / 'camera_info.json')
121 |
122 | end_time = time.time() + TIMEOUT
123 | done = False
124 |
125 | while time.time() < end_time:
126 | image_left_pub.publish(image_left)
127 | disparity_pub.publish(disparity_image)
128 | camera_info_left.publish(camera_info)
129 | camera_info_right.publish(camera_info)
130 |
131 | rclpy.spin_once(self.node, timeout_sec=(0.1))
132 |
133 | if all([
134 | len(messages) >= 1
135 | for messages in received_messages.values()
136 | ]):
137 | done = True
138 | break
139 | self.assertTrue(done)
140 |
141 | for isaac_ros_msg, ref_msg in \
142 | zip(received_messages['isaac_ros/points2'], received_messages['proc/points2']):
143 | if USE_COLOR:
144 | isaac_ros_pts = point_cloud2.read_points(
145 | isaac_ros_msg, field_names=('x', 'y', 'z', 'rgb'), skip_nans=True)
146 | ref_pts = point_cloud2.read_points(
147 | ref_msg, field_names=('x', 'y', 'z', 'rgb'), skip_nans=True)
148 | else:
149 | isaac_ros_pts = point_cloud2.read_points(
150 | isaac_ros_msg, field_names=('x', 'y', 'z'), skip_nans=True)
151 | ref_pts = point_cloud2.read_points(
152 | ref_msg, field_names=('x', 'y', 'z'), skip_nans=True)
153 | xyz_error = 0
154 | rgb_error = 0
155 | n = 0
156 | for (isaac_ros_pt, ref_pt) in zip(isaac_ros_pts, ref_pts):
157 | xyz_error += (isaac_ros_pt[0] - ref_pt[0])**2
158 | xyz_error += (isaac_ros_pt[1] - ref_pt[1])**2
159 | xyz_error += (isaac_ros_pt[2] - ref_pt[2])**2
160 |
161 | if USE_COLOR:
162 | rgb_error += (isaac_ros_pt[3] - ref_pt[3])**2
163 |
164 | n += 1
165 | self.assertGreater(n, 0)
166 | # MSE error
167 | xyz_error = xyz_error / n
168 | self.node.get_logger().info(f'XYZ Error: {xyz_error}')
169 | self.node.get_logger().info(f'RGB Error: {rgb_error}')
170 | self.assertLess(xyz_error, 1e-6)
171 | self.assertEqual(rgb_error, 0)
172 | finally:
173 | self.node.destroy_subscription(isaac_ros_sub)
174 | self.node.destroy_subscription(ref_sub)
175 | self.node.destroy_publisher(image_left_pub)
176 | self.node.destroy_publisher(disparity_pub)
177 | self.node.destroy_publisher(camera_info_right)
178 | self.node.destroy_publisher(camera_info_left)
179 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/test/test_cases/generate_test_cases.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2 | #
3 | # NVIDIA CORPORATION and its licensors retain all intellectual property
4 | # and proprietary rights in and to this software, related documentation
5 | # and any modifications thereto. Any use, reproduction, disclosure or
6 | # distribution of this software and related documentation without an express
7 | # license agreement from NVIDIA CORPORATION is strictly prohibited.
8 |
9 | """Tool to generate test cases for the isaac_ros_image_proc ROS2 package."""
10 |
11 | import os
12 | from pathlib import Path
13 | import time
14 | from typing import Tuple
15 |
16 | import cv2
17 | from cv_bridge import CvBridge
18 | from isaac_ros_test import JSONConversion
19 | import numpy as np
20 | import rclpy
21 | from sensor_msgs.msg import CameraInfo, Image
22 |
23 |
24 | class TestCaseGenerator:
25 | """Class for test case generation utilities."""
26 |
27 | def __init__(self, test_cases_filepath: Path) -> None:
28 | """
29 | Create a TestCaseGenerator.
30 |
31 | Parameters
32 | ----------
33 | test_cases_filepath : Path
34 | The path to the folder in which test cases should be populated.
35 |
36 | """
37 | rclpy.init()
38 | self.node = rclpy.create_node('generate_test_cases')
39 | self.bridge = CvBridge()
40 | self.test_cases_filepath = test_cases_filepath
41 |
42 | def shutdown(self) -> None:
43 | """Shut down the test case generator tool."""
44 | rclpy.shutdown()
45 |
46 | def remove_all_test_cases(self) -> None:
47 | """Remove all test cases in this generator's test cases folder."""
48 | for test_case in self.test_cases_filepath.iterdir():
49 | for file in test_case.iterdir():
50 | file.unlink()
51 | test_case.rmdir()
52 |
53 | def add_test_case(
54 | self, name: str, image: np.ndarray,
55 | encoding: str = 'bgr8', camera_info: CameraInfo = None) -> None:
56 | """
57 | Add a test case to this generator's test cases folder.
58 |
59 | Parameters
60 | ----------
61 | name : str
62 | The name of the folder for this test case
63 | image : np.ndarray
64 | The input image for this test case, as NumPy array
65 | encoding : str, optional
66 | The image encoding to use, by default 'bgr8'
67 | camera_info : CameraInfo, optional
68 | The camera calibration parameters to use, by default None.
69 | If None, a default set of parameters is loaded from file.
70 |
71 | """
72 | (self.test_cases_filepath / name).mkdir(exist_ok=True)
73 | image_msg = self.bridge.cv2_to_imgmsg(image)
74 | image_msg.encoding = encoding
75 |
76 | if camera_info is None:
77 | # If no camera_info was specified, load default values from the test_cases/ folder
78 | # TODO(jaiveers): Should we instead randomly generate a valid camera_info message?
79 | camera_info = JSONConversion.load_camera_info_from_json(
80 | self.test_cases_filepath.parent / 'camera_info.json')
81 |
82 | JSONConversion.save_camera_info_to_json(
83 | camera_info, self.test_cases_filepath / name / 'camera_info.json')
84 | JSONConversion.save_image_to_json(
85 | image_msg, self.test_cases_filepath / name / 'image_raw.json')
86 |
87 | self.generate_ground_truth(image_msg, camera_info, self.test_cases_filepath / name)
88 |
89 | def add_grid_test_case(self,
90 | name: str,
91 | width: int = 640, height: int = 480,
92 | horizontal_lines: int = 10, vertical_lines: int = 10,
93 | background_color: Tuple[int, int, int] = (0, 0, 0),
94 | line_color: Tuple[int, int, int] = (255, 255, 255),
95 | line_thickness: int = 7
96 | ) -> None:
97 | """
98 | Add a grid-based test case.
99 |
100 | Parameters
101 | ----------
102 | name : str
103 | The name of this test case
104 | width : int, optional
105 | The width of the grid image, by default 640
106 | height : int, optional
107 | The height of the grid image, by default 480
108 | horizontal_lines : int, optional
109 | The number of horizontal lines to draw, by default 10
110 | vertical_lines : int, optional
111 | The number of vertical lines to draw, by default 10
112 | background_color : Tuple[int, int, int], optional
113 | The background color of the grid in BGR, by default (0, 0, 0)
114 | line_color : Tuple[int, int, int], optional
115 | The line color of the grid in BGR, by default (255, 255, 255)
116 | line_thickness : int, optional
117 | The thickness of the lines in pixels, by default 7
118 |
119 | """
120 | # Create 3-channel image that will be distorted by the processor
121 | distorted_image = np.zeros((height, width, 3), np.uint8)
122 | distorted_image[:, :] = background_color
123 |
124 | # Add horizontal lines
125 | for y in np.linspace(0, height, num=horizontal_lines):
126 | y = int(y)
127 | cv2.line(distorted_image, (0, y), (width, y), line_color, line_thickness)
128 |
129 | # Add vertical lines
130 | for x in np.linspace(0, width, num=vertical_lines):
131 | x = int(x)
132 | cv2.line(distorted_image, (x, 0), (x, height), line_color, line_thickness)
133 |
134 | self.add_test_case(name, distorted_image)
135 |
136 | def add_image_file_test_case(self, name: str, image_filepath: Path) -> None:
137 | """
138 | Add an image file-based test case.
139 |
140 | Parameters
141 | ----------
142 | name : str
143 | The name of this test case
144 | image_filepath : Path
145 | The path to the image file to use
146 |
147 | """
148 | image = cv2.imread(str(image_filepath))
149 | self.add_test_case(name, image)
150 |
151 | def generate_ground_truth(
152 | self, image_raw: Image, camera_info: CameraInfo, test_folder: Path) -> None:
153 | """
154 | Publish the test input and receive the test output to save as ground truth.
155 |
156 | Parameters
157 | ----------
158 | image_raw : Image
159 | The input ROS2 Image message to send
160 | camera_info : CameraInfo
161 | The input ROS2 Camera Info message to send
162 | test_folder : Path
163 | The test case folder to save the ground truth outputs to
164 |
165 | """
166 | QOS = 10 # Default Quality of Service queue length
167 |
168 | image_raw_pub = self.node.create_publisher(
169 | Image, 'image_raw', QOS)
170 | camera_info_pub = self.node.create_publisher(
171 | CameraInfo, 'camera_info', QOS)
172 |
173 | received_images = {}
174 | image_mono_sub = self.node.create_subscription(
175 | Image, 'image_mono', lambda msg: received_images.update({'image_mono': msg}), QOS)
176 | image_rect_sub = self.node.create_subscription(
177 | Image, 'image_rect', lambda msg: received_images.update({'image_rect': msg}), QOS)
178 | image_color_sub = self.node.create_subscription(
179 | Image, 'image_color', lambda msg: received_images.update({'image_color': msg}), QOS)
180 | image_rect_color_sub = self.node.create_subscription(
181 | Image, 'image_rect_color',
182 | lambda msg: received_images.update({'image_rect_color': msg}), QOS)
183 |
184 | try:
185 | # Publish test case over both topics
186 | image_raw_pub.publish(image_raw)
187 | camera_info_pub.publish(camera_info)
188 |
189 | # Wait at most TIMEOUT seconds to receive ground truth images
190 | TIMEOUT = 2
191 | end_time = time.time() + TIMEOUT
192 |
193 | done = False
194 | output_topics = ['image_mono', 'image_rect', 'image_color', 'image_rect_color']
195 | while time.time() < end_time:
196 | rclpy.spin_once(self.node, timeout_sec=TIMEOUT)
197 |
198 | # If we have received exactly one message on each output topic, break
199 | if all([topic in received_images for topic in output_topics]):
200 | done = True
201 | break
202 | assert done, "Didn't receive output messages on all subscribers! " \
203 | 'Make sure image_proc is running!'
204 |
205 | for topic in output_topics:
206 | cv2.imwrite(str(test_folder / f'{topic}.jpg'),
207 | self.bridge.imgmsg_to_cv2(received_images[topic]))
208 |
209 | finally:
210 | self.node.destroy_subscription(image_mono_sub)
211 | self.node.destroy_subscription(image_rect_sub)
212 | self.node.destroy_subscription(image_color_sub)
213 | self.node.destroy_subscription(image_rect_color_sub)
214 | self.node.destroy_publisher(image_raw_pub)
215 | self.node.destroy_publisher(camera_info_pub)
216 |
217 |
218 | if __name__ == '__main__':
219 | gen = TestCaseGenerator(Path(os.path.dirname(__file__)) / 'test_cases')
220 |
221 | gen.remove_all_test_cases()
222 | gen.add_grid_test_case('white_grid')
223 | gen.add_grid_test_case('nvidia_green_grid', line_color=(0, 185, 118)) # NVIDIA green
224 | gen.add_grid_test_case('dense_grid', horizontal_lines=50,
225 | vertical_lines=50, line_thickness=2)
226 | gen.add_image_file_test_case('nvidia_icon', Path(
227 | os.path.dirname(__file__)) / 'test_cases' / 'NVIDIAprofile.png')
228 | gen.shutdown()
229 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/src/disparity_node.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_stereo_image_proc/disparity_node.hpp"
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 |
21 | #include
22 | #include
23 |
24 | #include "isaac_ros_common/vpi_utilities.hpp"
25 |
26 | // VPI status check macro
27 | #define CHECK_STATUS(STMT) \
28 | do { \
29 | VPIStatus status = (STMT); \
30 | if (status != VPI_SUCCESS) { \
31 | char buffer[VPI_MAX_STATUS_MESSAGE_LENGTH]; \
32 | vpiGetLastStatusMessage(buffer, sizeof(buffer)); \
33 | std::ostringstream ss; \
34 | ss << vpiStatusGetName(status) << ": " << buffer; \
35 | throw std::runtime_error(ss.str()); \
36 | } \
37 | } while (0);
38 |
39 | namespace isaac_ros
40 | {
41 | namespace stereo_image_proc
42 | {
43 |
44 | DisparityNode::DisparityNode(const rclcpp::NodeOptions & options)
45 | : Node("disp_node", options),
46 | backends_{isaac_ros::common::DeclareVPIBackendParameter(this, VPI_BACKEND_CUDA)}
47 | {
48 | using namespace std::placeholders;
49 |
50 | // Initialize message time sync filter
51 | exact_sync_.reset(
52 | new ExactSync(
53 | ExactPolicy(5),
54 | sub_left_image_, sub_right_image_, sub_left_info_, sub_right_info_));
55 | exact_sync_->registerCallback(&DisparityNode::cam_cb, this);
56 |
57 | // Publisher
58 | pub_disparity_ = create_publisher("disparity", 1);
59 |
60 | // Subscriber
61 | sub_left_image_.subscribe(this, "left/image_rect");
62 | sub_right_image_.subscribe(this, "right/image_rect");
63 | sub_left_info_.subscribe(this, "left/camera_info");
64 | sub_right_info_.subscribe(this, "right/camera_info");
65 |
66 | // Get parameters
67 | max_disparity_ = declare_parameter("max_disparity", 64);
68 | if (max_disparity_ < 1) {
69 | RCLCPP_ERROR(this->get_logger(), "Max disparity cannot be 0");
70 | return;
71 | }
72 |
73 | // Initialize parameters
74 | CHECK_STATUS(vpiInitConvertImageFormatParams(&conv_params_));
75 | CHECK_STATUS(vpiInitConvertImageFormatParams(&scale_params_));
76 | CHECK_STATUS(vpiInitStereoDisparityEstimatorCreationParams(¶ms_));
77 | params_.maxDisparity = max_disparity_;
78 | scale_params_.scale = 255.0 / (32 * params_.maxDisparity);
79 |
80 | // Print out backend used
81 | std::string backend_string = "CUDA";
82 | if (backends_ == VPI_BACKEND_TEGRA) {
83 | backend_string = "PVA-NVENC-VIC";
84 | }
85 | RCLCPP_INFO(this->get_logger(), "Using backend %s", backend_string.c_str());
86 |
87 | // Initialize VPI Stream
88 | vpiStreamCreate(0, &vpi_stream_);
89 | }
90 |
91 | DisparityNode::~DisparityNode()
92 | {
93 | // Close VPI Stream when if still open
94 | if (!vpi_stream_) {
95 | vpiStreamSync(vpi_stream_);
96 | }
97 | if (backends_ == VPI_BACKEND_TEGRA) {
98 | vpiImageDestroy(confidence_map_);
99 | vpiImageDestroy(tmp_left_);
100 | vpiImageDestroy(tmp_right_);
101 | }
102 | vpiPayloadDestroy(stereo_payload_);
103 | vpiImageDestroy(temp_scale_);
104 | vpiImageDestroy(stereo_left_);
105 | vpiImageDestroy(stereo_right_);
106 | vpiImageDestroy(vpi_disparity_);
107 | vpiStreamDestroy(vpi_stream_);
108 | vpiImageDestroy(vpi_left_);
109 | vpiImageDestroy(vpi_right_);
110 | }
111 |
112 | void DisparityNode::cam_cb(
113 | const sensor_msgs::msg::Image::ConstSharedPtr & left_rectified_,
114 | const sensor_msgs::msg::Image::ConstSharedPtr & right_rectified_,
115 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & sub_left_info_,
116 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & sub_right_info_)
117 | {
118 | // Convert images to mono8
119 | const cv::Mat left_mono8 = cv_bridge::toCvShare(left_rectified_, "mono8")->image;
120 | const cv::Mat right_mono8 = cv_bridge::toCvShare(right_rectified_, "mono8")->image;
121 | int inputWidth, inputHeight;
122 | inputWidth = left_mono8.cols;
123 | inputHeight = left_mono8.rows;
124 |
125 | const bool resolution_change =
126 | prev_height_ != inputHeight || prev_width_ != inputWidth;
127 | const bool resolution_cache_check = prev_height_ != 0 || prev_width_ != 0;
128 | if ((!vpi_left_ || !vpi_right_ ) || resolution_change) {
129 | if (resolution_change && resolution_cache_check) {
130 | vpiImageDestroy(vpi_left_);
131 | vpiImageDestroy(vpi_right_);
132 | }
133 | vpiImageCreateOpenCVMatWrapper(left_mono8, 0, &vpi_left_);
134 | vpiImageCreateOpenCVMatWrapper(right_mono8, 0, &vpi_right_);
135 | } else {
136 | vpiImageSetWrappedOpenCVMat(vpi_left_, left_mono8);
137 | vpiImageSetWrappedOpenCVMat(vpi_right_, right_mono8);
138 | }
139 |
140 | if (!stereo_payload_ || resolution_change) {
141 | if (resolution_change && resolution_cache_check) {
142 | vpiPayloadDestroy(stereo_payload_);
143 | vpiImageDestroy(stereo_left_);
144 | vpiImageDestroy(stereo_right_);
145 | vpiImageDestroy(vpi_disparity_);
146 | }
147 |
148 | if (backends_ == VPI_BACKEND_TEGRA) {
149 | if (resolution_change && resolution_cache_check) {
150 | vpiImageDestroy(confidence_map_);
151 | vpiImageDestroy(tmp_left_);
152 | vpiImageDestroy(tmp_right_);
153 | }
154 |
155 | // PVA-NVENC-VIC backend only accepts 1920x1080 images and Y16 Block linear format.
156 | stereo_format_ = VPI_IMAGE_FORMAT_Y16_ER_BL;
157 | stereo_width_ = 1920;
158 | stereo_height_ = 1080;
159 | conv_params_.scale = 256;
160 | CHECK_STATUS(
161 | vpiImageCreate(
162 | stereo_width_, stereo_height_, VPI_IMAGE_FORMAT_U16, 0,
163 | &confidence_map_));
164 | CHECK_STATUS(vpiImageCreate(inputWidth, inputHeight, VPI_IMAGE_FORMAT_Y16_ER, 0, &tmp_left_));
165 | CHECK_STATUS(
166 | vpiImageCreate(
167 | inputWidth, inputHeight, VPI_IMAGE_FORMAT_Y16_ER, 0,
168 | &tmp_right_));
169 | } else {
170 | stereo_format_ = VPI_IMAGE_FORMAT_Y16_ER;
171 | stereo_width_ = inputWidth;
172 | stereo_height_ = inputHeight;
173 | confidence_map_ = nullptr;
174 | }
175 | prev_height_ = inputHeight;
176 | prev_width_ = inputWidth;
177 | CHECK_STATUS(
178 | vpiCreateStereoDisparityEstimator(
179 | backends_, stereo_width_, stereo_height_,
180 | stereo_format_, ¶ms_, &stereo_payload_));
181 | CHECK_STATUS(
182 | vpiImageCreate(
183 | stereo_width_, stereo_height_, VPI_IMAGE_FORMAT_U16,
184 | 0, &vpi_disparity_));
185 | CHECK_STATUS(vpiImageCreate(stereo_width_, stereo_height_, stereo_format_, 0, &stereo_left_));
186 | CHECK_STATUS(vpiImageCreate(stereo_width_, stereo_height_, stereo_format_, 0, &stereo_right_));
187 | }
188 |
189 | if (backends_ == VPI_BACKEND_TEGRA) {
190 | CHECK_STATUS(
191 | vpiSubmitConvertImageFormat(
192 | vpi_stream_, VPI_BACKEND_CUDA,
193 | vpi_left_, tmp_left_, &conv_params_));
194 | CHECK_STATUS(
195 | vpiSubmitConvertImageFormat(
196 | vpi_stream_, VPI_BACKEND_CUDA,
197 | vpi_right_, tmp_right_, &conv_params_));
198 | CHECK_STATUS(
199 | vpiSubmitRescale(
200 | vpi_stream_, VPI_BACKEND_VIC, tmp_left_, stereo_left_,
201 | VPI_INTERP_LINEAR, VPI_BORDER_CLAMP, 0));
202 | CHECK_STATUS(
203 | vpiSubmitRescale(
204 | vpi_stream_, VPI_BACKEND_VIC, tmp_right_, stereo_right_,
205 | VPI_INTERP_LINEAR, VPI_BORDER_CLAMP, 0));
206 | } else {
207 | CHECK_STATUS(
208 | vpiSubmitConvertImageFormat(
209 | vpi_stream_, VPI_BACKEND_CUDA,
210 | vpi_left_, stereo_left_, &conv_params_));
211 | CHECK_STATUS(
212 | vpiSubmitConvertImageFormat(
213 | vpi_stream_, VPI_BACKEND_CUDA,
214 | vpi_right_, stereo_right_, &conv_params_));
215 | }
216 | CHECK_STATUS(
217 | vpiSubmitStereoDisparityEstimator(
218 | vpi_stream_, backends_,
219 | stereo_payload_, stereo_left_, stereo_right_,
220 | vpi_disparity_, confidence_map_, nullptr));
221 |
222 | cv::Mat cvOut;
223 | if (!temp_scale_) {
224 | CHECK_STATUS(
225 | vpiImageCreate(
226 | stereo_width_, stereo_height_, VPI_IMAGE_FORMAT_U16, 0,
227 | &temp_scale_));
228 | }
229 | CHECK_STATUS(
230 | vpiSubmitConvertImageFormat(
231 | vpi_stream_, VPI_BACKEND_CUDA,
232 | vpi_disparity_, temp_scale_, &scale_params_));
233 | VPIImageData data;
234 |
235 | CHECK_STATUS(vpiStreamSync(vpi_stream_));
236 | CHECK_STATUS(vpiImageLock(temp_scale_, VPI_LOCK_READ, &data));
237 |
238 | try {
239 | CHECK_STATUS(vpiImageDataExportOpenCVMat(data, &cvOut));
240 |
241 | stereo_msgs::msg::DisparityImage disparity_image;
242 | disparity_image.header = sub_left_info_->header;
243 | disparity_image.image =
244 | *cv_bridge::CvImage(sub_left_info_->header, "mono16", cvOut).toImageMsg();
245 | disparity_image.f = sub_right_info_->p[0];
246 | disparity_image.t = sub_right_info_->p[3];
247 | disparity_image.min_disparity = 0;
248 | disparity_image.max_disparity = params_.maxDisparity;
249 | pub_disparity_->publish(disparity_image);
250 | } catch (...) {
251 | vpiImageUnlock(temp_scale_);
252 | RCLCPP_ERROR(this->get_logger(), "Exception occurred with locked image");
253 | }
254 | vpiImageUnlock(temp_scale_);
255 | }
256 |
257 | } // namespace stereo_image_proc
258 | } // namespace isaac_ros
259 |
260 | #include "rclcpp_components/register_node_macro.hpp"
261 | RCLCPP_COMPONENTS_REGISTER_NODE(isaac_ros::stereo_image_proc::DisparityNode)
262 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/src/point_cloud_node_cuda.cu:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_stereo_image_proc/point_cloud_node_cuda.hpp"
12 | #include
13 | #include
14 |
15 | namespace isaac_ros
16 | {
17 | namespace stereo_image_proc
18 | {
19 | namespace PointCloudNode_CUDA
20 | {
21 | __global__ void addColorToPointCloud_CUDA(
22 | float * pointcloud_data, const uint8_t * rgb, const int rgb_row_step,
23 | const int red_offset, const int green_offset, const int blue_offset, const int color_step,
24 | const nvPointCloudIntrinsics * intrinsics, const nvPointCloudProperties * cloud_properties)
25 | {
26 | /**
27 | * @brief CUDA function that writes RGB values from a 8-bit RGB byte stream into a pointcloud data buffer
28 | *
29 | * @param pointcloud_data The input point cloud data buffer that will be modified
30 | * @param rgb The RGB image represented as a 1D raw byte stream
31 | * @param rgb_row_step The step size for a row of the RGB image
32 | * @param red_offset The offset of the red pixel for one pixel
33 | * @param green_offset The offset of the green pixel for one pixel
34 | * @param blue_offset The offset of the blue pixel for one pixel
35 | * @param color_step The number of points in one pixel
36 | * @param intrinsics A struct that holds relevant camera intrinsic information
37 | * @param cloud_properties A struct that holds relevant point cloud properties
38 | */
39 |
40 | // Calculate the index and stride for the loop
41 | int u_idx = blockIdx.x * blockDim.x + threadIdx.x;
42 | int u_stride = gridDim.x * blockDim.x;
43 |
44 | int v_idx = blockIdx.y * blockDim.y + threadIdx.y;
45 | int v_stride = gridDim.y * blockDim.y;
46 |
47 | for (int v = v_idx; v < intrinsics->height; v += v_stride) {
48 | for (int u = u_idx; u < intrinsics->width; u += u_stride) {
49 | // Compute the location that we should access
50 | // And then access the R,G and B pixels
51 | int base_idx = v * rgb_row_step + u * color_step;
52 | uint8_t r_pixel = rgb[base_idx + red_offset];
53 | uint8_t g_pixel = rgb[base_idx + green_offset];
54 | uint8_t b_pixel = rgb[base_idx + blue_offset];
55 |
56 | uint32_t pixel;
57 |
58 | // Little endian: DC, R, G, B (DC = don't care)
59 | if (!cloud_properties->is_bigendian) {
60 | // Assumes 32-bit
61 | pixel = (static_cast(r_pixel) << 16 |
62 | static_cast(g_pixel) << 8 |
63 | static_cast(b_pixel));
64 | }
65 | // Big endian: B, G, R, DC (DC = don't care)
66 | else {
67 | pixel = (static_cast(b_pixel) << 24 |
68 | static_cast(g_pixel) << 16 |
69 | static_cast(r_pixel) << 8);
70 | }
71 |
72 | // Compute the point cloud data index to write to and write to it
73 | // Assumes 32-bit floating point data buffer
74 | int pointcloud_idx = v * cloud_properties->point_row_step + u * cloud_properties->point_step +
75 | cloud_properties->rgb_offset;
76 | pointcloud_data[pointcloud_idx] = *reinterpret_cast(&pixel);
77 | }
78 | }
79 | }
80 |
81 | void addColorToPointCloud(
82 | float * pointcloud_data, const uint8_t * rgb, int rgb_row_step,
83 | int red_offset, int green_offset, int blue_offset, int color_step,
84 | const nvPointCloudIntrinsics * intrinsics, const nvPointCloudProperties * cloud_properties,
85 | cudaStream_t stream)
86 | {
87 |
88 | // The number of blocks per thread and the minimum number of blocks to use
89 | const int num_threads_per_block_dim_x = 16;
90 | const int num_threads_per_block_dim_y = 16;
91 | int num_blocks_x = (intrinsics->width + num_threads_per_block_dim_y - 1) / num_threads_per_block_dim_x;
92 | int num_blocks_y = (intrinsics->height + num_threads_per_block_dim_y - 1) / num_threads_per_block_dim_x;
93 |
94 | dim3 block_num(num_blocks_x, num_blocks_y, 1);
95 | dim3 threads_per_block(num_threads_per_block_dim_x, num_threads_per_block_dim_y, 1);
96 |
97 | addColorToPointCloud_CUDA <<>> (pointcloud_data, rgb, rgb_row_step,
98 | red_offset, green_offset, blue_offset, color_step, intrinsics, cloud_properties);
99 |
100 | cudaError_t cuda_result = cudaGetLastError();
101 | throwIfCudaError(cuda_result);
102 |
103 | // Wait for stream to finish before continuing
104 | cuda_result = cudaStreamSynchronize(stream);
105 | throwIfCudaError(cuda_result);
106 | }
107 |
108 | template
109 | __global__ void convertDisparityToPointCloud_CUDA(
110 | float * pointcloud_data, const T * disparity_img, const int disparity_row_step,
111 | const nvPointCloudIntrinsics * intrinsics,
112 | const nvPointCloudProperties * cloud_properties)
113 | {
114 | /**
115 | * @brief CUDA function that writes computes 3D points from a disparity image and writes
116 | * them to point cloud data buffer
117 | *
118 | * @param T The data type of the disparity image
119 | * @param pointcloud_data The input point cloud data buffer that will be modified
120 | * @param disparity_img The disparity image array, represented as a 1D array
121 | * @param disparity_row_step The step size for a row of the disparity image
122 | * @param intrinsics A struct that holds relevant camera intrinsic information
123 | * @param cloud_properties A struct that holds relevant point cloud properties
124 | */
125 |
126 | // Calculate the index and stride for the loop
127 | int u_idx = blockIdx.x * blockDim.x + threadIdx.x;
128 | int u_stride = gridDim.x * blockDim.x;
129 |
130 | int v_idx = blockIdx.y * blockDim.y + threadIdx.y;
131 | int v_stride = gridDim.y * blockDim.y;
132 |
133 | for (int v = v_idx; v < intrinsics->height; v += v_stride) {
134 | for (int u = u_idx; u < intrinsics->width; u += u_stride) {
135 | // Type cast the data type to a float (ensure it's 32 bit) then assign it to a double
136 | // In order to check the error
137 | float disparity = static_cast(disparity_img[v * disparity_row_step + u]);
138 | float X, Y, Z, W;
139 |
140 | // Calculate the X, Y, Z and W (scaling factor) values using the reprojection matrix
141 | W =
142 | static_cast(intrinsics->reproject_matrix[3][2] * disparity +
143 | intrinsics->reproject_matrix[3][3]);
144 | X =
145 | static_cast((intrinsics->reproject_matrix[0][0] * static_cast(u) +
146 | intrinsics->reproject_matrix[0][3]) / W);
147 | Y =
148 | static_cast((intrinsics->reproject_matrix[1][1] * static_cast(v) +
149 | intrinsics->reproject_matrix[1][3]) / W);
150 | Z = static_cast(intrinsics->reproject_matrix[2][3] / W);
151 |
152 | // The computed value is not finite; it's a bad point
153 | if (!isfinite(Z) || !isfinite(W) || !isfinite(X) || !isfinite(Y)) {
154 | X = cloud_properties->bad_point;
155 | Z = cloud_properties->bad_point;
156 | Y = cloud_properties->bad_point;
157 | }
158 |
159 | // Compute the point cloud data index to write to and write to it
160 | // Also scales it by the unit scaling (if necessary)
161 | int base_idx = v * cloud_properties->point_row_step + u * cloud_properties->point_step;
162 | pointcloud_data[base_idx +
163 | cloud_properties->x_offset] = static_cast(X * intrinsics->unit_scaling);
164 | pointcloud_data[base_idx +
165 | cloud_properties->y_offset] = static_cast(Y * intrinsics->unit_scaling);
166 | pointcloud_data[base_idx +
167 | cloud_properties->z_offset] = static_cast(Z * intrinsics->unit_scaling);
168 | }
169 | }
170 | }
171 |
172 | template
173 | void convertDisparityToPointCloud(
174 | float * pointcloud_data, const T * disparity_img, const int disparity_row_step,
175 | const nvPointCloudIntrinsics * intrinsics, const nvPointCloudProperties * cloud_properties,
176 | cudaStream_t stream)
177 | {
178 | // The number of blocks per thread and the minimum number of blocks to use
179 | const int num_threads_per_block_dim_x = 16;
180 | const int num_threads_per_block_dim_y = 16;
181 | int num_blocks_x = (intrinsics->width + num_threads_per_block_dim_y - 1) / num_threads_per_block_dim_x;
182 | int num_blocks_y = (intrinsics->height + num_threads_per_block_dim_y - 1) / num_threads_per_block_dim_x;
183 |
184 | dim3 block_num(num_blocks_x, num_blocks_y, 1);
185 | dim3 threads_per_block(num_threads_per_block_dim_x, num_threads_per_block_dim_y, 1);
186 | convertDisparityToPointCloud_CUDA<<< block_num, threads_per_block, 0, stream>>>
187 | (pointcloud_data, disparity_img, disparity_row_step, intrinsics, cloud_properties);
188 |
189 | cudaError_t cuda_result = cudaGetLastError();
190 | throwIfCudaError(cuda_result);
191 |
192 | // Wait for stream to finish before continuing
193 | cuda_result = cudaStreamSynchronize(stream);
194 | throwIfCudaError(cuda_result);
195 | }
196 |
197 | // Template initialization
198 | template void convertDisparityToPointCloud(
199 | float * pointcloud_data, const uint8_t * disparity_img,
200 | const int disparity_row_step, const nvPointCloudIntrinsics * intrinsics,
201 | const nvPointCloudProperties * cloud_properties, cudaStream_t stream);
202 |
203 | template void convertDisparityToPointCloud(
204 | float * pointcloud_data, const float * disparity_img,
205 | const int disparity_row_step, const nvPointCloudIntrinsics * intrinsics,
206 | const nvPointCloudProperties * cloud_properties, cudaStream_t stream);
207 |
208 | template void convertDisparityToPointCloud(
209 | float * pointcloud_data, const uint16_t * disparity_img,
210 | const int disparity_row_step, const nvPointCloudIntrinsics * intrinsics,
211 | const nvPointCloudProperties * cloud_properties, cudaStream_t stream);
212 |
213 | } // namespace PointCloudNode_CUDA
214 | } // namespace stereo_image_proc
215 | } // namespace isaac_ros
216 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/include/isaac_ros_stereo_image_proc/point_cloud_node.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 | #ifndef ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_HPP_
11 | #define ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_HPP_
12 |
13 | #include "nvPointCloud.h"
14 | #include "point_cloud_node_cuda.hpp"
15 |
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 |
24 | #include
25 | #include
26 |
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 |
33 | #include
34 | #include
35 |
36 | #include
37 | #include
38 | #include
39 |
40 | namespace isaac_ros
41 | {
42 | namespace stereo_image_proc
43 | {
44 | /**
45 | * @class PointCloudNode
46 | * @brief This node combines a disparity image message and a rectified color image and outputs a point cloud
47 | */
48 |
49 | class PointCloudNode : public rclcpp::Node
50 | {
51 | public:
52 | /**
53 | * @brief Construct a new Point Cloud Node object
54 | */
55 | explicit PointCloudNode(const rclcpp::NodeOptions & options);
56 |
57 | /**
58 | * @brief Destroy the Point Cloud Node object
59 | */
60 | ~PointCloudNode();
61 |
62 | private:
63 | /**
64 | * @brief Function that assigns ROS parameters to member variables
65 | *
66 | * The following parameters are available:
67 | * @param queue_size Determines the queue size of the subscriber
68 | * @param unit_scaling Determines the amount to scale the point cloud xyz points by
69 | * @param use_color Determines whether the output point cloud should have color or not
70 | */
71 | void initializeParameters();
72 |
73 | /**
74 | * @brief Function that sets up the message sync policy and subscribes the node to the relevant subscribers
75 | *
76 | */
77 | void setupSubscribers();
78 |
79 | /**
80 | * @brief Function that sets up the topic that the node will publish to
81 | *
82 | */
83 | void setupPublishers();
84 |
85 | message_filters::Subscriber sub_left_image_; // Left rectified image
86 | message_filters::Subscriber sub_left_info_; // Left camera info
87 | message_filters::Subscriber sub_right_info_; // Right camera info
88 | message_filters::Subscriber sub_disparity_; // Disparity image
89 |
90 | using ExactPolicy = message_filters::sync_policies::ExactTime<
91 | sensor_msgs::msg::Image,
92 | sensor_msgs::msg::CameraInfo,
93 | sensor_msgs::msg::CameraInfo,
94 | stereo_msgs::msg::DisparityImage>;
95 | using ExactSync = message_filters::Synchronizer;
96 |
97 | std::shared_ptr exact_sync_; // Exact message sync policy
98 |
99 | std::shared_ptr<
100 | rclcpp::Publisher> pub_points2_; // PointCloud2 publisher
101 |
102 | /**
103 | * @brief Callback function for the left image, left camera info,
104 | * right camera info and disparity image subscriber.
105 | * This method will also publish the point cloud to the relevant topic
106 | *
107 | * @param left_image_msg The left rectified image received
108 | * @param left_info_msg The left image info message received
109 | * @param right_info_msg The right image info message received
110 | * @param disp_msg The disparity image message received
111 | */
112 | void image_cb(
113 | const sensor_msgs::msg::Image::ConstSharedPtr & left_image_msg,
114 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & left_info_msg,
115 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & right_info_msg,
116 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg);
117 |
118 | /**
119 | * @brief Initializes an empty PointCloud2 message using information from
120 | * the disparity image and use_color parameters
121 | *
122 | * @warning Method does not fill the PointCloud2 message with points
123 | *
124 | * @param cloud_msg The input PointCloud2 message that will be modified
125 | * @param disp_msg The disparity image message that determines the PointCloud2 size
126 | */
127 | void formatPointCloudMessage(
128 | sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg,
129 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg);
130 |
131 | /**
132 | * @brief Updates RGB, Disparity and PointCloud2 data buffer sizes for use in GPU
133 | *
134 | * @note If the buffers are not initialized, this method will call initializeCuda
135 | *
136 | * @param cloud_msg The PointCloud2 message that determines the PointCloud2 data buffer size
137 | * @param left_image_msg The left rectified image that determines the RGB image buffer size
138 | * @param disp_msg The disparity image that determines the disparity image buffer size
139 | */
140 | void updateCudaBuffers(
141 | const sensor_msgs::msg::PointCloud2::ConstSharedPtr & cloud_msg,
142 | const sensor_msgs::msg::Image::ConstSharedPtr & left_image_msg,
143 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg);
144 |
145 | /**
146 | * @brief Initializes the PointCloud2, RGB and disparity data buffers,
147 | * and the intrinsics and cloud properties structs for use in GPU.
148 | * This function also initializes the CUDA streams
149 | *
150 | * @param cloud_size The PointCloud2 data buffer size that will be allocated
151 | * @param rgb_image_size The RGB image data buffer size that will be allocated
152 | * @param disparity_image_size The disparity image data buffer size that will allocated
153 | */
154 | void initializeCuda(int cloud_size, int rgb_image_size, int disparity_image_size);
155 |
156 | /**
157 | * @brief Macro that calls the _checkCudaErrors function
158 | *
159 | * @param result The result from the CUDA function call
160 | */
161 | #define checkCudaErrors(result) {_checkCudaErrors((result), __FILE__, __LINE__); \
162 | }
163 | /**
164 | * @brief Checks if a CUDA error occurred. If so, reports the error and terminates the program
165 | *
166 | * @param result The result from a CUDA function call
167 | * @param filename The file that called the CUDA function
168 | * @param line_number The line number that the error occurred
169 | */
170 | void _checkCudaErrors(cudaError_t result, const char * filename, int line_number);
171 |
172 | /**
173 | * @brief Updates the camera intrinsics struct variables
174 | *
175 | * @param disp_msg The disparity image that updates the height,
176 | * width and minimum disparity
177 | */
178 | void updateIntrinsics(
179 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg);
180 |
181 | /**
182 | * @brief Updates the cloud properties struct variables
183 | *
184 | * @param cloud_msg The PointCloud2 message to extract the cloud properties from
185 | */
186 | void updateCloudProperties(
187 | const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg);
188 |
189 | /**
190 | * @brief Converts a disparity image into 3D points
191 | * and writes them into a PointCloud2 data buffer
192 | *
193 | * @note This method internally copies the disparity image into GPU memory
194 | * @note Only MONO8, 8UC1, MONO16, 16UC1 and 32FC1 disparity image encodings are supported
195 | * @note The parameter T is used to decode the disparity image's buffer
196 | *
197 | * @param T Template type that represents the type of the disparity image's pixels
198 | * @param cloud_buffer Empty float array that represents the PointCloud2's data buffer
199 | * @param disp_msg The disparity image message that will be used to generate 3D points
200 | */
201 | template
202 | void convertDisparityToPointCloud(
203 | float * cloud_buffer,
204 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg);
205 |
206 | /**
207 | * @brief Writes RGB from the left rectified image into the PointCloud2 data buffer
208 | *
209 | * @note This method internally copies the RGB image into GPU memory
210 | * @note Only RGB8, BGR8 and MONO8 are supported
211 | *
212 | * @param cloud_buffer Float array that represents the Pointcloud2's data buffer
213 | * @param rgb_image_msg The image that will be used to extract RGB values
214 | */
215 | void addColorToPointCloud(
216 | float * cloud_buffer,
217 | const sensor_msgs::msg::Image::ConstSharedPtr & rgb_image_msg);
218 |
219 | /**
220 | * @brief Copies the GPU PointCloud data buffer into a PointCloud2 message
221 | *
222 | * @param cloud_msg The input PointCloud2 message with a data buffer
223 | * that will be overwritten
224 | */
225 | void copyPointCloudBufferToMessage(
226 | sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg);
227 |
228 | nvPointCloudIntrinsics * intrinsics_ = nullptr; // Relevant camera info struct
229 | nvPointCloudProperties * cloud_properties_ = nullptr; // Relevant cloud properties struct
230 |
231 | uint8_t * disparity_image_buffer_ = nullptr; // GPU disparity image data buffer
232 | int disparity_image_buffer_size_ = 0; // GPU disparity image data buffer size
233 | cudaStream_t stream_; // CUDA stream
234 |
235 | uint8_t * rgb_image_buffer_ = nullptr; // GPU RGB image data buffer
236 | int rgb_image_buffer_size_ = 0; // GPU RGB image data buffer size
237 |
238 | uint8_t * pointcloud_data_buffer_ = nullptr; // GPU point cloud data buffer
239 | int pointcloud_data_buffer_size_ = 0; // GPU point cloud data buffer size
240 |
241 | image_geometry::StereoCameraModel model_; // Stereo image geometry
242 | bool cuda_initialized_ = false; // Boolean to check if CUDA memory has been initialized
243 | float unit_scaling_; // Parameter to scale the x, y and z output of the node
244 | int queue_size_; // Queue size of the subscriber
245 | bool use_color_; // Boolean to decide whether to use color or not
246 | };
247 |
248 | } // namespace stereo_image_proc
249 | } // namespace isaac_ros
250 | #endif // ISAAC_ROS_STEREO_IMAGE_PROC__POINT_CLOUD_NODE_HPP_
251 |
--------------------------------------------------------------------------------
/isaac_ros_image_proc/src/rectify_node.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_image_proc/rectify_node.hpp"
12 |
13 | #include
14 | #include
15 |
16 | #include "cv_bridge/cv_bridge.h"
17 | #include "image_transport/image_transport.hpp"
18 | #include "opencv2/opencv.hpp"
19 | #include "opencv2/calib3d/calib3d.hpp"
20 | #include "rclcpp/rclcpp.hpp"
21 | #include "sensor_msgs/distortion_models.hpp"
22 | #include "vpi/algo/ConvertImageFormat.h"
23 | #include "vpi/algo/Remap.h"
24 | #include "vpi/LensDistortionModels.h"
25 | #include "vpi/OpenCVInterop.hpp"
26 | #include "vpi/VPI.h"
27 |
28 | #include "isaac_ros_common/vpi_utilities.hpp"
29 |
30 | namespace
31 | {
32 | // Map the encoding desired string to the VPI Image Format needed
33 | const std::unordered_map g_str_to_vpi_format({
34 | {"mono8", VPI_IMAGE_FORMAT_U8},
35 | {"mono16", VPI_IMAGE_FORMAT_U16},
36 | {"bgr8", VPI_IMAGE_FORMAT_BGR8},
37 | {"rgb8", VPI_IMAGE_FORMAT_RGB8},
38 | {"bgra8", VPI_IMAGE_FORMAT_BGRA8},
39 | {"rgba8", VPI_IMAGE_FORMAT_RGBA8}});
40 | } // namespace
41 |
42 | namespace isaac_ros
43 | {
44 | namespace image_proc
45 | {
46 |
47 | RectifyNode::RectifyNode(const rclcpp::NodeOptions & options)
48 | : Node("RectifyNode", options),
49 | sub_{image_transport::create_camera_subscription(
50 | this, "image", std::bind(
51 | &RectifyNode::RectifyCallback,
52 | this, std::placeholders::_1, std::placeholders::_2), "raw")},
53 | pub_{image_transport::create_publisher(this, "image_rect")},
54 | interpolation_{static_cast(declare_parameter("interpolation",
55 | static_cast(VPI_INTERP_CATMULL_ROM)))},
56 | vpi_backends_{isaac_ros::common::DeclareVPIBackendParameter(this, VPI_BACKEND_CUDA)}
57 | {
58 | try {
59 | std::string backend_str = "CUDA";
60 | if (vpi_backends_ == VPI_BACKEND_VIC) {
61 | backend_str = "VIC";
62 | }
63 | RCLCPP_INFO(get_logger(), "Using backend %s", backend_str.c_str());
64 |
65 | // Create stream on specified backend
66 | CHECK_STATUS(vpiStreamCreate(vpi_backends_, &stream_));
67 | } catch (std::runtime_error & e) {
68 | RCLCPP_ERROR(get_logger(), "Error while initializing Rectify Node: %s", e.what());
69 | }
70 | }
71 |
72 | RectifyNode::~RectifyNode()
73 | {
74 | vpiImageDestroy(image_);
75 | vpiStreamDestroy(stream_);
76 | vpiPayloadDestroy(remap_);
77 | vpiImageDestroy(tmp_in_);
78 | vpiImageDestroy(tmp_out_);
79 | }
80 |
81 | void RectifyNode::RectifyCallback(
82 | const sensor_msgs::msg::Image::ConstSharedPtr & image_msg,
83 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg)
84 | {
85 | // If there are no consumers for the rectified image, then don't spend compute resources
86 | if (pub_.getNumSubscribers() < 1) {
87 | return;
88 | }
89 |
90 | // Check focal length to ensure that the camera is calibrated
91 | if (info_msg->k[0] == 0.0) {
92 | RCLCPP_ERROR(
93 | get_logger(), "Rectified topic '%s' requested but camera publishing '%s' "
94 | "is uncalibrated", pub_.getTopic().c_str(), sub_.getInfoTopic().c_str());
95 | return;
96 | }
97 |
98 | // Make sure this is a supported image encoding
99 | if (g_str_to_vpi_format.find(image_msg->encoding) == g_str_to_vpi_format.end()) {
100 | RCLCPP_ERROR(
101 | get_logger(), "Requested image format %s has no known VPI correspondence",
102 | image_msg->encoding);
103 | return;
104 | }
105 |
106 | // If the image is undistorted (all distortion coeffs are 0), then no need to rectify
107 | // Simply republish the image
108 | if (std::all_of(info_msg->d.begin(), info_msg->d.end(), [](auto el) {return el == 0.0;})) {
109 | pub_.publish(image_msg);
110 | return;
111 | }
112 |
113 | // Collect pixel binning parameters from camera info
114 | uint32_t binning_x = info_msg->binning_x || 1;
115 | uint32_t binning_y = info_msg->binning_y || 1;
116 | cv::Size binned_resolution{static_cast(info_msg->width / binning_x),
117 | static_cast(info_msg->height / binning_y)};
118 |
119 | // Collect ROI parameters from camera info
120 | auto roi_msg = info_msg->roi;
121 | if (roi_msg.x_offset == 0 && roi_msg.y_offset == 0 && roi_msg.width == 0 && roi_msg.height == 0) {
122 | // ROI of all zeroes is treated as full resolution
123 | roi_msg.width = info_msg->width;
124 | roi_msg.height = info_msg->height;
125 | }
126 | cv::Rect roi{static_cast(roi_msg.x_offset), static_cast(roi_msg.y_offset),
127 | static_cast(roi_msg.width), static_cast(roi_msg.height)};
128 |
129 | // Convert K, P, D from arrays to cv::Mat (R is omitted)
130 | cv::Matx33d K_mat{&info_msg->k[0]};
131 | cv::Matx34d P_mat{&info_msg->p[0]};
132 | cv::Mat_ D_mat(info_msg->d);
133 |
134 | // Since the ROI is given in full-image coordinates, adjust offset before scaling
135 | if (roi.x != 0 || roi.y != 0) {
136 | // Move principal point by the offset
137 | K_mat(0, 2) -= roi.x;
138 | K_mat(1, 2) -= roi.y;
139 | P_mat(0, 2) -= roi.x;
140 | P_mat(1, 2) -= roi.y;
141 | }
142 | if (binning_x > 1) {
143 | // Scale for pixel binning in x dimension
144 | K_mat(0, 0) /= binning_x;
145 | K_mat(0, 2) /= binning_x;
146 | P_mat(0, 0) /= binning_x;
147 | P_mat(0, 2) /= binning_x;
148 | P_mat(0, 3) /= binning_x;
149 | roi.x /= binning_x;
150 | roi.width /= binning_x;
151 | }
152 | if (binning_y > 1) {
153 | // Scale for pixel binning in y dimension
154 | K_mat(1, 1) /= binning_y;
155 | K_mat(1, 2) /= binning_y;
156 | P_mat(1, 1) /= binning_y;
157 | P_mat(1, 2) /= binning_y;
158 | P_mat(1, 3) /= binning_y;
159 | roi.y /= binning_y;
160 | roi.height /= binning_y;
161 | }
162 |
163 | // Create input/output CV Image
164 | auto image_ptr = cv_bridge::toCvCopy(image_msg);
165 |
166 | // Use VPI to rectify image
167 | RectifyImage(image_ptr, K_mat, P_mat, D_mat, info_msg->distortion_model, roi);
168 |
169 | // Allocate and publish new rectified image message
170 | pub_.publish(
171 | cv_bridge::CvImage(
172 | image_msg->header, image_msg->encoding,
173 | image_ptr->image).toImageMsg());
174 | }
175 |
176 | void RectifyNode::RectifyImage(
177 | cv_bridge::CvImagePtr & image_ptr, const cv::Matx33d & K_mat, const cv::Matx34d & P_mat,
178 | const cv::Mat_ & D_mat,
179 | const std::string & distortion_model,
180 | cv::Rect roi)
181 | {
182 | // Flag to track whether or not to regenerate warp map
183 | bool update_map = false;
184 |
185 | try {
186 | // Update warp map only when the camera intrinsics and extrinsics matrices are changed
187 | if (K_mat != curr_K_mat_) {
188 | curr_K_mat_ = K_mat;
189 | // Prepare camera intrinsics
190 | for (int i = 0; i < 2; ++i) {
191 | // Note that VPI doesn't require the bottom [0, 0, 1] row of intrinsic matrix
192 | for (int j = 0; j < 3; ++j) {
193 | K_[i][j] = static_cast(K_mat(i, j));
194 | }
195 | }
196 | update_map = true;
197 | }
198 | if (P_mat != curr_P_mat_) {
199 | curr_P_mat_ = P_mat;
200 | // Prepare camera extrinsics
201 | for (int i = 0; i < 3; ++i) {
202 | for (int j = 0; j < 4; ++j) {
203 | X_[i][j] = static_cast(P_mat(i, j));
204 | }
205 | }
206 | update_map = true;
207 | }
208 | if (curr_D_mat_.empty() || cv::countNonZero(D_mat != curr_D_mat_) > 0) {
209 | curr_D_mat_ = D_mat;
210 | update_map = true;
211 | }
212 |
213 | if (current_roi_ != roi) {
214 | current_roi_ = roi;
215 | update_map = true;
216 | }
217 |
218 | // If either map or the camera intrinsics and extrinsics changed, regenerate warp maps
219 | if (update_map) {
220 | VPIWarpMap map{};
221 | map.grid.numHorizRegions = 1;
222 | map.grid.numVertRegions = 1;
223 | map.grid.horizInterval[0] = 8;
224 | map.grid.vertInterval[0] = 8;
225 | map.grid.regionWidth[0] = roi.width;
226 | map.grid.regionHeight[0] = roi.height;
227 |
228 | CHECK_STATUS(vpiWarpMapAllocData(&map));
229 | if (distortion_model == sensor_msgs::distortion_models::PLUMB_BOB) {
230 | // The VPI Fisheye model uses 4 parameters, while the ROS2 Plumb Bob model uses 5 parameters
231 | // We drop the 5th parameter when converting from ROS2 to VPI
232 | VPIFisheyeLensDistortionModel fisheye{};
233 | fisheye.mapping = VPI_FISHEYE_EQUIDISTANT;
234 | fisheye.k1 = D_mat(0);
235 | fisheye.k2 = D_mat(1);
236 | fisheye.k3 = D_mat(2);
237 | fisheye.k4 = D_mat(3);
238 | CHECK_STATUS(
239 | vpiWarpMapGenerateFromFisheyeLensDistortionModel(K_, X_, K_, &fisheye, &map));
240 | } else if (distortion_model == sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL) {
241 | VPIPolynomialLensDistortionModel polynomial{};
242 | polynomial.k1 = D_mat(0);
243 | polynomial.k2 = D_mat(1);
244 | polynomial.k3 = D_mat(2);
245 | polynomial.k4 = D_mat(3);
246 | polynomial.k5 = D_mat(4);
247 | polynomial.k6 = D_mat(5);
248 | polynomial.p1 = D_mat(6);
249 | polynomial.p2 = D_mat(7);
250 | CHECK_STATUS(
251 | vpiWarpMapGenerateFromPolynomialLensDistortionModel(K_, X_, K_, &polynomial, &map));
252 | } else { // Unknown distortion model
253 | throw std::runtime_error("Unrecognized distortion model: " + distortion_model);
254 | }
255 |
256 | // Create remap to undistort based on the generated distortion map
257 | if (remap_ != nullptr) {
258 | vpiPayloadDestroy(remap_);
259 | }
260 | CHECK_STATUS(vpiCreateRemap(vpi_backends_, &map, &remap_));
261 |
262 | vpiWarpMapFreeData(&map);
263 | // Prepare temporary VPI images for conversion to NV12 format
264 | CHECK_STATUS(
265 | vpiImageCreate(
266 | roi.width, roi.height, VPI_IMAGE_FORMAT_NV12_ER, 0,
267 | &tmp_in_));
268 | CHECK_STATUS(
269 | vpiImageCreate(
270 | roi.width, roi.height, VPI_IMAGE_FORMAT_NV12_ER, 0,
271 | &tmp_out_));
272 | }
273 | const bool consistent_dimensions = (image_dims_.first == image_ptr->image.rows &&
274 | image_dims_.second == image_ptr->image.cols);
275 | if (image_ == nullptr || !consistent_dimensions) {
276 | // Image can be "destroyed" even if it is null
277 | vpiImageDestroy(image_);
278 | CHECK_STATUS(
279 | vpiImageCreateOpenCVMatWrapper(
280 | image_ptr->image, g_str_to_vpi_format.at(image_ptr->encoding), 0, &image_));
281 | image_dims_ = {image_ptr->image.rows, image_ptr->image.cols};
282 | } else {
283 | CHECK_STATUS(vpiImageSetWrappedOpenCVMat(image_, image_ptr->image));
284 | }
285 |
286 | // Convert input to NV12 format, using CUDA backend for best performance
287 | CHECK_STATUS(vpiSubmitConvertImageFormat(stream_, VPI_BACKEND_CUDA, image_, tmp_in_, nullptr));
288 |
289 | // Undistort image
290 | CHECK_STATUS(
291 | vpiSubmitRemap(
292 | stream_, vpi_backends_, remap_, tmp_in_, tmp_out_, interpolation_,
293 | VPI_BORDER_ZERO, 0));
294 |
295 | // Convert output to original format, writing to original image for in-place modification
296 | // Using CUDA backend for best performance
297 | CHECK_STATUS(vpiSubmitConvertImageFormat(stream_, VPI_BACKEND_CUDA, tmp_out_, image_, nullptr));
298 |
299 | // Wait until all operations are complete
300 | CHECK_STATUS(vpiStreamSync(stream_));
301 | } catch (std::runtime_error & e) {
302 | RCLCPP_ERROR(get_logger(), "Error while rectifying image: %s", e.what());
303 | }
304 | }
305 |
306 | } // namespace image_proc
307 | } // namespace isaac_ros
308 |
309 | // Register as a component
310 | #include "rclcpp_components/register_node_macro.hpp"
311 | RCLCPP_COMPONENTS_REGISTER_NODE(isaac_ros::image_proc::RectifyNode)
312 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # `isaac_ros_image_pipeline`
2 |
3 | ---
4 | **WARNING**
5 |
6 | Isaac ROS Image Pipeline has moved to [NVIDIA-ISAAC-ROS](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_image_pipeline) (2021-10-20). Please visit [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_image_pipeline) to find new development and file issues.
7 |
8 | ---
9 |
10 | ## Overview
11 | This metapackage offers similar functionality as the standard, CPU-based [`image_pipeline` metapackage](http://wiki.ros.org/image_pipeline), but does so by leveraging the Jetson platform's specialized computer vision hardware. Considerable effort has been made to ensure that replacing `image_pipeline` with `isaac_ros_image_pipeline` on a Jetson device is as painless a transition as possible.
12 |
13 | ## System Requirements
14 | This Isaac ROS package is designed and tested to be compatible with ROS2 Foxy on Jetson hardware.
15 | ### Jetson
16 | - AGX Xavier or Xavier NX
17 | - JetPack 4.6
18 |
19 | ### x86_64
20 | - CUDA 10.2/11.2 supported discrete GPU
21 | - VPI 1.1.11
22 | - Ubuntu 18.04+
23 |
24 | ### Docker
25 | Precompiled ROS2 Foxy packages are not available for JetPack 4.6 (based on Ubuntu 18.04 Bionic). You can either manually compile ROS2 Foxy and required dependent packages from source or use the Isaac ROS development Docker image from [Isaac ROS Common](https://github.com/NVIDIA-AI-IOT/isaac_ros_common) based on images from [jetson-containers](https://github.com/dusty-nv/jetson-containers). The Docker images support both Jetson and x86_64 platfroms. The x86_64 docker image includes VPI Debian packages for CUDA 11.2.
26 |
27 | Run the following script in `isaac_ros_common` to build the image and launch the container:
28 |
29 | `$ scripts/run_dev.sh `
30 |
31 | You can either provide an optional path to mirror in your host ROS workspace with Isaac ROS packages, which will be made available in the container as `/workspaces/isaac_ros-dev`, or you can setup a new workspace in the container.
32 |
33 | ### Package Dependencies
34 | - [isaac_ros_common](https://github.com/NVIDIA-AI-IOT/isaac_ros_common)
35 | - [image_common](https://github.com/ros-perception/image_common.git)
36 | - [vision_cv](https://github.com/ros-perception/vision_opencv.git)
37 | - [OpenCV 4.5+](https://opencv.org/)
38 | - [scikit-learn](https://scikit-learn.org/stable/install.html)
39 | - [Eigen3](https://eigen.tuxfamily.org/index.php?title=Main_Page)
40 |
41 | **Note:** `isaac_ros_common' is used for running tests and/or creating a development container. It also contains VPI Debian packages that can be installed natively on a development machine without a container.
42 |
43 | ## Quickstart
44 | 1. Create a ROS2 workspace if one is not already prepared:
45 | `mkdir -p your_ws/src`
46 | **Note:** The workspace can have any name; the quickstart assumes you name it `your_ws`.
47 | 2. Clone this metapackage repository to `your_ws/src/isaac_ros_image_pipeline`. Check that you have [Git LFS](https://git-lfs.github.com/) installed before cloning to pull down all large files.
48 | `cd your_ws/src && git clone https://github.com/NVIDIA-AI-IOT/isaac_ros_image_pipeline`
49 | 3. Build and source the workspace:
50 | `cd your_ws && colcon build --symlink-install && source install/setup.bash`
51 | 4. (Optional) Run tests to verify complete and correct installation:
52 | `colcon test`
53 | 5. Start `isaac_ros_image_proc` using the prebuilt executable:
54 | `ros2 run isaac_ros_image_proc isaac_ros_image_proc`
55 | 6. In a separate terminal, spin up a **calibrated** camera publisher to `/image_raw` and `/camera_info` using any package(for example, `v4l2_camera`):
56 | `ros2 run v4l2_camera v4l2_camera_node`
57 | 7. Observe the rectified image output in grayscale and color on `/image_rect` and `/image_rect_color`, respectively:
58 | `ros2 run image_view image_view --ros-args -r image:=image_rect`
59 | `ros2 run image_view image_view --ros-args -r image:=image_rect_color`
60 |
61 | ### Replacing `image_pipeline` with `isaac_ros_image_pipeline`
62 | 1. Add a dependency on `isaac_ros_image_pipeline` to `your_package/package.xml` and `your_package/CMakeLists.txt`. If all desired packages under an existing `image_pipeline` dependency have Isaac ROS alternatives (see **Supported Packages**), then the original `image_pipeline` dependency may be removed entirely.
63 | 2. Change the package and plugin names in any `*.launch.py` launch files to use `[package name]` and `isaac_ros::image_proc::[component_name]` respectively. For a list of all packages, see **Supported Packages**. For a list of all ROS2 Components made available, see the per-package detailed documentation below.
64 |
65 | ## Supported Packages
66 | At this time, the packages under the standard `image_pipeline` have the following support:
67 |
68 | | Existing Package | Isaac ROS Alternative |
69 | | -------------------- | --------------------------------- |
70 | | `image_pipeline` | See `isaac_ros_image_pipeline` |
71 | | `image_proc` | See `isaac_ros_image_proc` |
72 | | `stereo_image_proc` | See `isaac_ros_stereo_image_proc` |
73 | | `depth_image_proc` | On roadmap |
74 | | `camera_calibration` | Continue using existing package |
75 | | `image_publisher` | Continue using existing package |
76 | | `image_view` | Continue using existing package |
77 | | `image_rotate` | Continue using existing package |
78 |
79 | See also:
80 | - `isaac_ros_apriltag`: Accelerated ROS2 wrapper for Apriltag detection
81 | - `isaac_ros_common`: Utilities for robust ROS2 testing, in conjunction with `launch_test`
82 |
83 | ## Tutorial - Stereo Image Pipeline
84 | 1. Connect a compatible Realsense camera (D435, D455) to your host machine.
85 | 2. Build and source the workspace:
86 | `cd your_ws && colcon build --symlink-install && source install/setup.bash`
87 | 3. Spin up the stereo image pipeline and Realsense camera node with the launchfile:
88 | `ros2 launch isaac_ros_stereo_image_proc isaac_ros_stereo_image_pipeline_launch.py`
89 |
90 | **Note** For best performance on Jetson, ensure that power settings are configured appropriately ([Power Management for Jetson](https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/power_management_jetson_xavier.html#wwpID0EUHA)).
91 |
92 | # ROS2 Package API
93 | ## `isaac_ros_image_proc`
94 | ### Overview
95 | The `isaac_ros_image_proc` package offers functionality for rectifying/undistorting images from a monocular camera setup, resizing the image, and changing the image format. It largely replaces the `image_proc` package, though the image format conversion facility also functions as a way to replace the CPU-based image format conversion in `cv_bridge`.
96 |
97 | ### Available Components
98 |
99 | | Component | Topics Subscribed | Topics Published | Parameters |
100 | | -------------------------- | --------------------------------------------------- | ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
101 | | `ImageFormatConverterNode` | `image_raw`, `camera_info`: The input camera stream | `image`: The converted image | `backends`: The VPI backend to use, which is CUDA by default (options: "CPU", "CUDA", "VIC")
`encoding_desired`: Target encoding to convert to. Note: VIC does not support RGB8 and BGR8 for either input or output encoding. | |
102 | | `RectifyNode` | `image`, `camera_info`: The input camera stream | `image_rect`: The rectified image | `interpolation`: The VPI interpolation scheme to use during undistortion, which is Catmull-Rom Spline by default
`backends`: The VPI backend to use, which is CUDA by default (options: "CUDA", "VIC") |
103 | | `ResizeNode` | `image`, `camera_info`: The input camera stream | `resized/image`, `resized/camera_info`: The resized camera stream | `use_relative_scale`: Whether to scale in a relative fashion, which is true by default
`scale_height`: The fraction to relatively scale height by
`scale_width`: The fraction to relatively scale width by
`height`: The absolute height to resize to
`width`: The absolute width to resize to
`backends`: The VPI backend to use, which is CUDA by default(options: "CPU", "CUDA", "VIC") |
104 |
105 | ## `isaac_ros_stereo_image_proc`
106 | ### Overview
107 | The `isaac_ros_stereo_image_proc` package offers functionality for handling image pairs from a binocular/stereo camera setup, calculating the disparity between the two images, and producing a point cloud with depth information. It largely replaces the `stereo_image_proc` package.
108 |
109 | ### Available Components
110 |
111 | | Component | Topics Subscribed | Topics Published | Parameters |
112 | | ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
113 | | `DisparityNode` | `left/image_rect`, `left/camera_info`: The left camera stream
`right/image_rect`, `right/camera_info`: The right camera stream | `disparity`: The disparity between the two cameras | `max_disparity`: The maximum value for disparity per pixel, which is 64 by default. With TEGRA backend, this value must be 256.
`window_size`: The window size for SGM, which is 5 by default
`backends`: The VPI backend to use, which is CUDA by default (options: "CUDA", "TEGRA") |
114 | | `PointCloudNode` | `left/image_rect_color`: The coloring for the point cloud
`left/camera_info`: The left camera info
`right/camera_info`: The right camera info
`disparity` The disparity between the two cameras | `points2`: The output point cloud | `queue_size`: The length of the subscription queues, which is `rmw_qos_profile_default.depth` by default
`use_color`: Whether or not the output point cloud should have color. The default value is true.
`unit_scaling`: The amount to scale the xyz points by |
115 |
116 |
117 | # References
118 | [1] D. Scharstein, H. Hirschmüller, Y. Kitajima, G. Krathwohl, N. Nesic, X. Wang, and P. Westling. [High-resolution stereo datasets with subpixel-accurate ground truth](http://www.cs.middlebury.edu/~schar/papers/datasets-gcpr2014.pdf). In German Conference on Pattern Recognition (GCPR 2014), Münster, Germany, September 2014.
119 |
--------------------------------------------------------------------------------
/isaac_ros_stereo_image_proc/src/point_cloud_node.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * NVIDIA CORPORATION and its licensors retain all intellectual property
5 | * and proprietary rights in and to this software, related documentation
6 | * and any modifications thereto. Any use, reproduction, disclosure or
7 | * distribution of this software and related documentation without an express
8 | * license agreement from NVIDIA CORPORATION is strictly prohibited.
9 | */
10 |
11 | #include "isaac_ros_stereo_image_proc/point_cloud_node.hpp"
12 |
13 | #include
14 | #include
15 |
16 | #include
17 | #include
18 | #include
19 |
20 | namespace isaac_ros
21 | {
22 | namespace stereo_image_proc
23 | {
24 | PointCloudNode::PointCloudNode(const rclcpp::NodeOptions & options)
25 | : Node("point_cloud_node", options)
26 | {
27 | // Make sure assumption of 32 bit floating point value is valid
28 | if (!std::numeric_limits::is_iec559) {
29 | throw std::runtime_error(
30 | "Hardware does not support 32-bit IEEE754 floating point standard");
31 | }
32 |
33 | initializeParameters();
34 | setupSubscribers();
35 | setupPublishers();
36 | }
37 |
38 | PointCloudNode::~PointCloudNode()
39 | {
40 | cudaStreamDestroy(stream_);
41 | cudaFree(intrinsics_);
42 | cudaFree(cloud_properties_);
43 | cudaFree(disparity_image_buffer_);
44 | cudaFree(rgb_image_buffer_);
45 | cudaFree(pointcloud_data_buffer_);
46 | }
47 |
48 | void PointCloudNode::initializeParameters()
49 | {
50 | // Declare ROS parameters
51 | queue_size_ = declare_parameter("queue_size", rmw_qos_profile_default.depth);
52 | use_color_ = declare_parameter("use_color", false);
53 | unit_scaling_ = declare_parameter("unit_scaling", 1.0f);
54 | }
55 |
56 | void PointCloudNode::setupSubscribers()
57 | {
58 | using namespace std::placeholders;
59 |
60 | // Initialize message sync policy
61 | exact_sync_.reset(
62 | new ExactSync(
63 | ExactPolicy(queue_size_),
64 | sub_left_image_, sub_left_info_,
65 | sub_right_info_, sub_disparity_));
66 |
67 | exact_sync_->registerCallback(
68 | std::bind(&PointCloudNode::image_cb, this, _1, _2, _3, _4));
69 |
70 | // Subscribe to the relevant topics
71 | sub_left_image_.subscribe(this, "left/image_rect_color");
72 | sub_left_info_.subscribe(this, "left/camera_info");
73 | sub_right_info_.subscribe(this, "right/camera_info");
74 | sub_disparity_.subscribe(this, "disparity");
75 | }
76 |
77 | void PointCloudNode::setupPublishers()
78 | {
79 | // Create a publisher to the relevant topic
80 | pub_points2_ = create_publisher("points2", 1);
81 | }
82 |
83 | void PointCloudNode::image_cb(
84 | const sensor_msgs::msg::Image::ConstSharedPtr & left_image_msg,
85 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & left_info_msg,
86 | const sensor_msgs::msg::CameraInfo::ConstSharedPtr & right_info_msg,
87 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg)
88 | {
89 | auto cloud_msg = std::make_shared();
90 | formatPointCloudMessage(cloud_msg, disp_msg);
91 |
92 | updateCudaBuffers(cloud_msg, left_image_msg, disp_msg);
93 |
94 | // Update camera model to get reprojection matrix
95 | model_.fromCameraInfo(left_info_msg, right_info_msg);
96 | updateIntrinsics(disp_msg);
97 |
98 | // Reinterpret the point cloud data buffer as a float
99 | float * pointcloud_data_buffer = reinterpret_cast(pointcloud_data_buffer_);
100 | updateCloudProperties(cloud_msg);
101 |
102 | // Determine the image format of the disparity image and convert
103 | // point cloud according to the datatype
104 | if (disp_msg->image.encoding == sensor_msgs::image_encodings::TYPE_8UC1 ||
105 | disp_msg->image.encoding == sensor_msgs::image_encodings::MONO8)
106 | {
107 | convertDisparityToPointCloud(pointcloud_data_buffer, disp_msg);
108 | } else if (disp_msg->image.encoding == sensor_msgs::image_encodings::TYPE_16UC1) {
109 | convertDisparityToPointCloud(pointcloud_data_buffer, disp_msg);
110 | } else if (disp_msg->image.encoding == sensor_msgs::image_encodings::MONO16) {
111 | convertDisparityToPointCloud(pointcloud_data_buffer, disp_msg);
112 | } else if (disp_msg->image.encoding == sensor_msgs::image_encodings::TYPE_32FC1) {
113 | convertDisparityToPointCloud(pointcloud_data_buffer, disp_msg);
114 | } else {
115 | RCLCPP_ERROR(
116 | this->get_logger(),
117 | "Unsupported image encoding [%s]. Not publishing",
118 | disp_msg->image.encoding.c_str());
119 | return;
120 | }
121 |
122 | if (use_color_) {
123 | addColorToPointCloud(pointcloud_data_buffer, left_image_msg);
124 | }
125 |
126 | // Wait for CUDA to finish before continuing
127 | cudaError_t cuda_result = cudaStreamSynchronize(stream_);
128 | checkCudaErrors(cuda_result);
129 |
130 | copyPointCloudBufferToMessage(cloud_msg);
131 |
132 | pub_points2_->publish(*cloud_msg);
133 | }
134 |
135 | void PointCloudNode::formatPointCloudMessage(
136 | sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg,
137 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg)
138 | {
139 | cloud_msg->header = disp_msg->header;
140 | cloud_msg->height = disp_msg->image.height;
141 | cloud_msg->width = disp_msg->image.width;
142 | cloud_msg->is_bigendian = false;
143 | cloud_msg->is_dense = false;
144 |
145 | sensor_msgs::PointCloud2Modifier pc2_modifier(*cloud_msg);
146 |
147 | // DC = don't care, all items are 32-bit floats
148 | if (use_color_) {
149 | // Data format: x, y, z, rgb
150 | // 16 bytes per point
151 | pc2_modifier.setPointCloud2Fields(
152 | 4,
153 | "x", 1, sensor_msgs::msg::PointField::FLOAT32,
154 | "y", 1, sensor_msgs::msg::PointField::FLOAT32,
155 | "z", 1, sensor_msgs::msg::PointField::FLOAT32,
156 | "rgb", 1, sensor_msgs::msg::PointField::FLOAT32);
157 | } else {
158 | // Data format: x, y, z
159 | // 12 bytes per point
160 | pc2_modifier.setPointCloud2Fields(
161 | 3,
162 | "x", 1, sensor_msgs::msg::PointField::FLOAT32,
163 | "y", 1, sensor_msgs::msg::PointField::FLOAT32,
164 | "z", 1, sensor_msgs::msg::PointField::FLOAT32);
165 | }
166 | }
167 |
168 | void PointCloudNode::updateCudaBuffers(
169 | const sensor_msgs::msg::PointCloud2::ConstSharedPtr & cloud_msg,
170 | const sensor_msgs::msg::Image::ConstSharedPtr & left_image_msg,
171 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg)
172 | {
173 | // Calculate the size of each buffer (in bytes)
174 | int cloud_size = cloud_msg->row_step * cloud_msg->height;
175 | int rgb_image_size = left_image_msg->step * left_image_msg->height;
176 | int disparity_image_size = disp_msg->image.step * disp_msg->image.height;
177 | cudaError_t cuda_result;
178 |
179 | if (!cuda_initialized_) {
180 | initializeCuda(cloud_size, rgb_image_size, disparity_image_size);
181 | cuda_initialized_ = true;
182 | return;
183 | }
184 |
185 | // Checks if every buffer is the correct size. If not, update it.
186 | if (disparity_image_buffer_size_ < disparity_image_size) {
187 | cuda_result = cudaFree(disparity_image_buffer_);
188 | checkCudaErrors(cuda_result);
189 |
190 | cuda_result =
191 | cudaMallocManaged(&disparity_image_buffer_, disparity_image_size, cudaMemAttachHost);
192 | checkCudaErrors(cuda_result);
193 |
194 | cuda_result = cudaStreamAttachMemAsync(stream_, disparity_image_buffer_);
195 | checkCudaErrors(cuda_result);
196 |
197 | disparity_image_buffer_size_ = disparity_image_size;
198 | }
199 |
200 | // If color isn't necessary, avoid allocating the buffer
201 | if (rgb_image_buffer_size_ < rgb_image_size && use_color_) {
202 | cuda_result = cudaFree(rgb_image_buffer_);
203 | checkCudaErrors(cuda_result);
204 |
205 | cuda_result = cudaMallocManaged(&rgb_image_buffer_, rgb_image_size, cudaMemAttachHost);
206 | checkCudaErrors(cuda_result);
207 |
208 | cuda_result = cudaStreamAttachMemAsync(stream_, rgb_image_buffer_);
209 | checkCudaErrors(cuda_result);
210 |
211 | rgb_image_buffer_size_ = rgb_image_size;
212 | }
213 |
214 | if (pointcloud_data_buffer_size_ < cloud_size) {
215 | cuda_result = cudaFree(pointcloud_data_buffer_);
216 | checkCudaErrors(cuda_result);
217 |
218 | cuda_result = cudaMallocManaged(&pointcloud_data_buffer_, cloud_size, cudaMemAttachHost);
219 | checkCudaErrors(cuda_result);
220 |
221 | cuda_result = cudaStreamAttachMemAsync(stream_, pointcloud_data_buffer_);
222 | checkCudaErrors(cuda_result);
223 |
224 | pointcloud_data_buffer_size_ = cloud_size;
225 | }
226 |
227 | cuda_result = cudaStreamSynchronize(stream_);
228 | checkCudaErrors(cuda_result);
229 | }
230 |
231 | void PointCloudNode::initializeCuda(int cloud_size, int rgb_image_size, int disparity_image_size)
232 | {
233 | // Allocates memory in GPU for every struct and buffer needed in the GPU
234 | cudaError_t cuda_result = cudaMallocManaged(&intrinsics_, sizeof(nvPointCloudIntrinsics));
235 | checkCudaErrors(cuda_result);
236 |
237 | cuda_result = cudaMallocManaged(&cloud_properties_, sizeof(nvPointCloudProperties));
238 | checkCudaErrors(cuda_result);
239 |
240 | cuda_result =
241 | cudaMallocManaged(&disparity_image_buffer_, disparity_image_size, cudaMemAttachHost);
242 | checkCudaErrors(cuda_result);
243 | disparity_image_buffer_size_ = disparity_image_size;
244 |
245 | // If color isn't necessary, don't allocate the buffer
246 | if (use_color_) {
247 | cuda_result = cudaMallocManaged(&rgb_image_buffer_, rgb_image_size, cudaMemAttachHost);
248 | checkCudaErrors(cuda_result);
249 | rgb_image_buffer_size_ = rgb_image_size;
250 | }
251 |
252 | cuda_result = cudaMallocManaged(&pointcloud_data_buffer_, cloud_size, cudaMemAttachHost);
253 | checkCudaErrors(cuda_result);
254 | pointcloud_data_buffer_size_ = cloud_size;
255 |
256 | // Create the CUDA streams; do both just in case color parameter changes
257 | cuda_result = cudaStreamCreate(&stream_);
258 | checkCudaErrors(cuda_result);
259 |
260 | if (use_color_) {
261 | cuda_result = cudaStreamAttachMemAsync(stream_, rgb_image_buffer_);
262 | checkCudaErrors(cuda_result);
263 | }
264 |
265 | cuda_result = cudaStreamAttachMemAsync(stream_, disparity_image_buffer_);
266 | checkCudaErrors(cuda_result);
267 |
268 | cuda_result = cudaStreamAttachMemAsync(stream_, pointcloud_data_buffer_);
269 | checkCudaErrors(cuda_result);
270 |
271 | cuda_result = cudaStreamSynchronize(stream_);
272 | checkCudaErrors(cuda_result);
273 | }
274 |
275 | inline void PointCloudNode::_checkCudaErrors(
276 | cudaError_t result, const char * filename, int line_number)
277 | {
278 | if (result != cudaSuccess) {
279 | RCLCPP_ERROR(
280 | this->get_logger(),
281 | "CUDA Error: [%s] (error code: [%d]) at [%s] in line [%d]",
282 | cudaGetErrorString(result), result, filename, line_number);
283 |
284 | throw std::runtime_error(
285 | "CUDA Error: " + std::string(cudaGetErrorString(result)) + " (error code: " +
286 | std::to_string(result) + ") at " + std::string(filename) + " in line " +
287 | std::to_string(line_number));
288 | }
289 | }
290 |
291 | void PointCloudNode::updateIntrinsics(
292 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg)
293 | {
294 | const cv::Matx44d reproject_matrix = model_.reprojectionMatrix();
295 |
296 | // Only getting the relevant entries
297 | intrinsics_->reproject_matrix_rows = 4;
298 | intrinsics_->reproject_matrix_cols = 4;
299 | intrinsics_->reproject_matrix[0][0] = reproject_matrix(0, 0);
300 | intrinsics_->reproject_matrix[0][3] = reproject_matrix(0, 3);
301 | intrinsics_->reproject_matrix[1][1] = reproject_matrix(1, 1);
302 | intrinsics_->reproject_matrix[1][3] = reproject_matrix(1, 3);
303 | intrinsics_->reproject_matrix[2][3] = reproject_matrix(2, 3);
304 | intrinsics_->reproject_matrix[3][2] = reproject_matrix(3, 2);
305 | intrinsics_->reproject_matrix[3][3] = reproject_matrix(3, 3);
306 |
307 | intrinsics_->height = disp_msg->image.height;
308 | intrinsics_->width = disp_msg->image.width;
309 | intrinsics_->unit_scaling = unit_scaling_;
310 | }
311 |
312 | void PointCloudNode::updateCloudProperties(
313 | const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg)
314 | {
315 | // The offsets are given in bytes, but the cloud data will be in floats
316 | // The byte unit conversion is to ensure that the parameters work with a float array
317 | const int byte_unit_conversion_factor = sizeof(float);
318 | cloud_properties_->point_row_step = cloud_msg->row_step / byte_unit_conversion_factor;
319 | cloud_properties_->point_step = cloud_msg->point_step / byte_unit_conversion_factor;
320 | cloud_properties_->x_offset = cloud_msg->fields[0].offset / byte_unit_conversion_factor;
321 | cloud_properties_->y_offset = cloud_msg->fields[1].offset / byte_unit_conversion_factor;
322 | cloud_properties_->z_offset = cloud_msg->fields[2].offset / byte_unit_conversion_factor;
323 | cloud_properties_->is_bigendian = cloud_msg->is_bigendian;
324 | cloud_properties_->bad_point = std::numeric_limits::quiet_NaN();
325 |
326 | if (use_color_) {
327 | cloud_properties_->rgb_offset = cloud_msg->fields[3].offset / byte_unit_conversion_factor;
328 | }
329 |
330 | cloud_properties_->buffer_size = cloud_msg->row_step * cloud_msg->height;
331 | }
332 |
333 | void PointCloudNode::copyPointCloudBufferToMessage(
334 | sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg)
335 | {
336 | // Resize the cloud message buffer and then copy from GPU to CPU
337 | cloud_msg->data.resize(cloud_properties_->buffer_size);
338 | cudaError_t cuda_result = cudaMemcpyAsync(
339 | cloud_msg->data.data(),
340 | pointcloud_data_buffer_,
341 | cloud_properties_->buffer_size, cudaMemcpyDeviceToHost, stream_);
342 | checkCudaErrors(cuda_result);
343 |
344 | cuda_result = cudaStreamSynchronize(stream_);
345 | checkCudaErrors(cuda_result);
346 | }
347 |
348 | template
349 | void PointCloudNode::convertDisparityToPointCloud(
350 | float * pointcloud_data,
351 | const stereo_msgs::msg::DisparityImage::ConstSharedPtr & disp_msg)
352 | {
353 | int disparity_image_size = disp_msg->image.step * disp_msg->image.height;
354 |
355 | // Redunant sanity check to ensure that memory accesses are valid
356 | if (disparity_image_buffer_size_ < disparity_image_size ||
357 | pointcloud_data_buffer_size_ < cloud_properties_->buffer_size)
358 | {
359 | RCLCPP_ERROR(
360 | this->get_logger(),
361 | "Size allocated to CUDA buffer cannot fit message's data");
362 | throw std::runtime_error(
363 | "Size allocated to CUDA buffer cannot fit message's data");
364 | }
365 |
366 | // Copy the disparity image into GPU
367 | cudaError_t cuda_result;
368 | cuda_result = cudaMemcpyAsync(
369 | disparity_image_buffer_,
370 | disp_msg->image.data.data(),
371 | disparity_image_size, cudaMemcpyHostToDevice,
372 | stream_);
373 | checkCudaErrors(cuda_result);
374 |
375 | cuda_result = cudaStreamSynchronize(stream_);
376 | checkCudaErrors(cuda_result);
377 |
378 | // Reinterpret the disparity buffer as template type T
379 | // And adjust the step size accordingly
380 | const T * disparity_buffer = reinterpret_cast(disparity_image_buffer_);
381 | const int disparity_row_step = disp_msg->image.step / sizeof(T);
382 |
383 | PointCloudNode_CUDA::convertDisparityToPointCloud(
384 | pointcloud_data,
385 | disparity_buffer, disparity_row_step, intrinsics_, cloud_properties_, stream_);
386 | }
387 |
388 | void PointCloudNode::addColorToPointCloud(
389 | float * pointcloud_data,
390 | const sensor_msgs::msg::Image::ConstSharedPtr & rgb_image_msg)
391 | {
392 | const int rgb_image_size = rgb_image_msg->step * rgb_image_msg->height;
393 |
394 | // Redunant sanity check to ensure that memory accesses are valid
395 | if (rgb_image_buffer_size_ < rgb_image_size ||
396 | pointcloud_data_buffer_size_ < cloud_properties_->buffer_size)
397 | {
398 | RCLCPP_ERROR(
399 | this->get_logger(),
400 | "Size allocated to CUDA buffer cannot fit message's data");
401 | throw std::runtime_error("Size allocated to CUDA buffer cannot fit message's data");
402 | }
403 |
404 | // Copy the RGB image into GPU
405 | cudaError_t cuda_result;
406 | cuda_result = cudaMemcpyAsync(
407 | rgb_image_buffer_,
408 | rgb_image_msg->data.data(),
409 | rgb_image_size, cudaMemcpyHostToDevice,
410 | stream_);
411 | checkCudaErrors(cuda_result);
412 |
413 | cuda_result = cudaStreamSynchronize(stream_);
414 | checkCudaErrors(cuda_result);
415 |
416 | int red_offset, green_offset, blue_offset, color_step;
417 | const int rgb_row_step = rgb_image_msg->step;
418 |
419 | // Determines the R,G,B and size of each pixel based on image encoding
420 | if (rgb_image_msg->encoding == sensor_msgs::image_encodings::RGB8) {
421 | red_offset = 0;
422 | green_offset = 1;
423 | blue_offset = 2;
424 | color_step = 3;
425 | } else if (rgb_image_msg->encoding == sensor_msgs::image_encodings::BGR8) {
426 | red_offset = 2;
427 | green_offset = 1;
428 | blue_offset = 0;
429 | color_step = 3;
430 | } else if (rgb_image_msg->encoding == sensor_msgs::image_encodings::MONO8) {
431 | red_offset = 0;
432 | green_offset = 0;
433 | blue_offset = 0;
434 | color_step = 1;
435 | } else {
436 | RCLCPP_ERROR(
437 | this->get_logger(),
438 | "Unsupported data encoding [%s]. Publishing without color",
439 | rgb_image_msg->encoding.c_str());
440 | return;
441 | }
442 |
443 | PointCloudNode_CUDA::addColorToPointCloud(
444 | pointcloud_data, rgb_image_buffer_,
445 | rgb_row_step, red_offset, green_offset, blue_offset,
446 | color_step, intrinsics_, cloud_properties_, stream_);
447 | }
448 | } // namespace stereo_image_proc
449 | } // namespace isaac_ros
450 |
451 | #include "rclcpp_components/register_node_macro.hpp"
452 | RCLCPP_COMPONENTS_REGISTER_NODE(isaac_ros::stereo_image_proc::PointCloudNode)
453 |
--------------------------------------------------------------------------------