├── .editorconfig
├── .gitignore
├── CMakeLists.txt
├── README.md
├── config
└── .gitkeep
├── launch
├── detectnet.ros1.launch
├── detectnet.ros2.launch
├── imagenet.ros1.launch
├── imagenet.ros2.launch
├── segnet.ros1.launch
├── segnet.ros2.launch
├── video_output.ros1.launch
├── video_output.ros2.launch
├── video_source.ros1.launch
├── video_source.ros2.launch
├── video_viewer.ros1.launch
└── video_viewer.ros2.launch
├── package.ros1.xml
├── package.ros2.xml
├── package.xml
├── ros_deep_learning_nodelets.xml
└── src
├── image_converter.cpp
├── image_converter.h
├── node_detectnet.cpp
├── node_imagenet.cpp
├── node_segnet.cpp
├── node_video_output.cpp
├── node_video_source.cpp
├── nodelet_imagenet.cpp
├── ros_compat.cpp
└── ros_compat.h
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | [*.{c,cpp,cu,h,hpp,inl}]
4 | indent_style = tab
5 | indent_size = 5
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | package.xml
2 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | cmake_minimum_required(VERSION 3.5)
3 | project(ros_deep_learning)
4 |
5 | # locate dependencies
6 | find_package(jetson-utils REQUIRED)
7 | find_package(jetson-inference REQUIRED)
8 |
9 | find_package(CUDA REQUIRED)
10 |
11 | # detect ROS1 vs ROS2
12 | find_package(catkin QUIET)
13 |
14 | # detect ROS version
15 | string(TOUPPER $ENV{ROS_DISTRO} ROS_DISTRO)
16 | set(ROS_DISTRO "ROS_${ROS_DISTRO}")
17 | message("detected ROS_DISTRO=${ROS_DISTRO}")
18 |
19 | find_package(VPI 2.0)
20 | link_directories(/usr/lib/aarch64-linux-gnu/tegra)
21 |
22 | if( catkin_FOUND )
23 | message("detected ROS1 (catkin_make)")
24 |
25 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/package.ros1.xml ${CMAKE_CURRENT_SOURCE_DIR}/package.xml COPYONLY)
26 |
27 | find_package(catkin REQUIRED COMPONENTS
28 | image_transport
29 | roscpp
30 | sensor_msgs
31 | vision_msgs
32 | std_msgs
33 | )
34 |
35 | catkin_package(
36 | LIBRARIES ros_deep_learning_nodelets
37 | CATKIN_DEPENDS nodelet roscpp image_transport sensor_msgs
38 | )
39 |
40 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # enable c++11 (TensorRT requirement)
41 | include_directories(${catkin_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS})
42 | add_definitions(-DROS1 -DROS_DISTRO=${ROS_DISTRO})
43 |
44 | else()
45 | find_package(ament_cmake QUIET)
46 |
47 | if( ament_cmake_FOUND )
48 | message("detected ROS2 (ament_cmake)")
49 |
50 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/package.ros2.xml ${CMAKE_CURRENT_SOURCE_DIR}/package.xml COPYONLY)
51 |
52 | find_package(rclcpp REQUIRED)
53 | find_package(std_msgs REQUIRED)
54 | find_package(sensor_msgs REQUIRED)
55 | find_package(vision_msgs REQUIRED)
56 |
57 | set(CMAKE_CXX_STANDARD 14)
58 | include_directories(${CUDA_INCLUDE_DIRS})
59 | add_definitions(-DROS2 -DROS_DISTRO=${ROS_DISTRO})
60 |
61 | else()
62 | message(FATAL_ERROR "could not find either ROS1 (catkin_make) or ROS2 (ament_cmake) packages")
63 | endif()
64 | endif()
65 |
66 | # sources shared across nodes
67 | set(common_src src/image_converter.cpp src/ros_compat.cpp)
68 |
69 | # inference nodes
70 | add_executable(imagenet src/node_imagenet.cpp ${common_src})
71 | target_link_libraries(imagenet ${catkin_LIBRARIES} jetson-inference)
72 |
73 | add_executable(detectnet src/node_detectnet.cpp ${common_src})
74 | target_link_libraries(detectnet ${catkin_LIBRARIES} jetson-inference)
75 |
76 | add_executable(segnet src/node_segnet.cpp ${common_src})
77 | target_link_libraries(segnet ${catkin_LIBRARIES} jetson-inference)
78 |
79 | add_executable(video_source src/node_video_source.cpp ${common_src})
80 | target_link_libraries(video_source ${catkin_LIBRARIES} jetson-inference)
81 |
82 | add_executable(video_output src/node_video_output.cpp ${common_src})
83 | target_link_libraries(video_output ${catkin_LIBRARIES} jetson-inference)
84 |
85 | if( catkin_FOUND )
86 | add_library(ros_deep_learning_nodelets src/nodelet_imagenet.cpp src/image_converter.cpp)
87 | target_link_libraries(ros_deep_learning_nodelets ${catkin_LIBRARIES} jetson-inference)
88 |
89 | if(catkin_EXPORTED_LIBRARIES)
90 | add_dependencies(ros_deep_learning_nodelets ${catkin_EXPORTED_LIBRARIES})
91 | endif()
92 | else()
93 | ament_target_dependencies(imagenet rclcpp std_msgs sensor_msgs vision_msgs)
94 | install(TARGETS imagenet DESTINATION lib/${PROJECT_NAME})
95 |
96 | ament_target_dependencies(detectnet rclcpp std_msgs sensor_msgs vision_msgs)
97 | install(TARGETS detectnet DESTINATION lib/${PROJECT_NAME})
98 |
99 | ament_target_dependencies(segnet rclcpp std_msgs sensor_msgs vision_msgs)
100 | install(TARGETS segnet DESTINATION lib/${PROJECT_NAME})
101 |
102 | ament_target_dependencies(video_source rclcpp std_msgs sensor_msgs vision_msgs)
103 | install(TARGETS video_source DESTINATION lib/${PROJECT_NAME})
104 |
105 | ament_target_dependencies(video_output rclcpp std_msgs sensor_msgs vision_msgs)
106 | install(TARGETS video_output DESTINATION lib/${PROJECT_NAME})
107 |
108 | install(DIRECTORY launch DESTINATION share/${PROJECT_NAME}/)
109 |
110 | ament_package()
111 | endif()
112 |
113 |
114 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DNN Inference Nodes for ROS/ROS2
2 | This package contains DNN inference nodes and camera/video streaming nodes for ROS/ROS2 with support for NVIDIA **[Jetson Nano / TX1 / TX2 / Xavier / Orin](https://developer.nvidia.com/embedded-computing)** devices and TensorRT.
3 |
4 | The nodes use the image recognition, object detection, and semantic segmentation DNN's from the [`jetson-inference`](https://github.com/dusty-nv/jetson-inference) library and NVIDIA [Hello AI World](https://github.com/dusty-nv/jetson-inference#hello-ai-world) tutorial, which come with several built-in pretrained networks for classification, detection, and segmentation and the ability to load customized user-trained models.
5 |
6 | The camera & video streaming nodes support the following [input/output interfaces](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md):
7 |
8 | * MIPI CSI cameras
9 | * V4L2 cameras
10 | * RTP / RTSP streams
11 | * WebRTC streams
12 | * Videos & Images
13 | * Image sequences
14 | * OpenGL windows
15 |
16 | Various distribution of ROS are supported either from source or through containers (including Melodic, Noetic, Foxy, Galactic, Humble, and Iron). The same branch supports both ROS1 and ROS2.
17 |
18 | ### Table of Contents
19 |
20 | * [Installation](#installation)
21 | * [Testing](#testing)
22 | * [Video Viewer](#video-viewer)
23 | * [imagenet Node](#imagenet-node)
24 | * [detectnet Node](#detectnet-node)
25 | * [segnet Node](#segnet-node)
26 | * [Topics & Parameters](#topics-messages)
27 | * [imagenet Node](#imagenet-node-1)
28 | * [detectnet Node](#detectnet-node-1)
29 | * [segnet Node](#segnet-node-1)
30 | * [video_source Node](#video_source-node)
31 | * [video_output Node](#video_output-node)
32 |
33 | ## Installation
34 |
35 | The easiest way to get up and running is by cloning [jetson-inference](https://github.com/dusty-nv/jetson-inference) (which ros_deep_learning is a submodule of) and running the pre-built container, which automatically mounts the required model directories and devices:
36 |
37 | ``` bash
38 | $ git clone --recursive --depth=1 https://github.com/dusty-nv/jetson-inference
39 | $ cd jetson-inference
40 | $ docker/run.sh --ros=humble # noetic, foxy, galactic, humble, iron
41 | ```
42 |
43 | > **note**: the ros_deep_learning nodes rely on data from the jetson-inference tree for storing models, so clone and mount `jetson-inference/data` if you're using your own container or source installation method.
44 |
45 | The `--ros` argument to the [`docker/run.sh`](https://github.com/dusty-nv/jetson-inference/blob/master/docker/run.sh) script selects the ROS distro to use. They in turn use the `ros:$ROS_DISTRO-pytorch` container images from [jetson-containers](https://github.com/dusty-nv/jetson-containers), which include jetson-inference and this.
46 |
47 | For previous information about building the ros_deep_learning package for an uncontainerized ROS installation, expand the section below (the parts about installing ROS may require adapting for the particular version of ROS/ROS2 that you want to install)
48 |
49 |
50 | Legacy Install Instructions
51 |
52 | ### jetson-inference
53 |
54 | These ROS nodes use the DNN objects from the [`jetson-inference`](https://github.com/dusty-nv/jetson-inference) project (aka Hello AI World). To build and install jetson-inference, see [this page](https://github.com/dusty-nv/jetson-inference/blob/master/docs/building-repo-2.md) or run the commands below:
55 |
56 | ```bash
57 | $ cd ~
58 | $ sudo apt-get install git cmake
59 | $ git clone --recursive --depth=1 https://github.com/dusty-nv/jetson-inference
60 | $ cd jetson-inference
61 | $ mkdir build
62 | $ cd build
63 | $ cmake ../
64 | $ make -j$(nproc)
65 | $ sudo make install
66 | $ sudo ldconfig
67 | ```
68 | Before proceeding, it's worthwhile to test that `jetson-inference` is working properly on your system by following this step of the Hello AI World tutorial:
69 | * [Classifying Images with ImageNet](https://github.com/dusty-nv/jetson-inference/blob/master/docs/imagenet-console-2.md)
70 |
71 | ### ROS/ROS2
72 |
73 | Install the `ros-melodic-ros-base` or `ros-eloquent-ros-base` package on your Jetson following these directions:
74 |
75 | * ROS Melodic - [ROS Install Instructions](http://wiki.ros.org/melodic/Installation/Ubuntu)
76 | * ROS2 Eloquent - [ROS2 Install Instructions](https://index.ros.org/doc/ros2/Installation/Eloquent/Linux-Install-Debians/)
77 |
78 | Depending on which version of ROS you're using, install some additional dependencies and create a workspace:
79 |
80 | #### ROS Melodic
81 | ```bash
82 | $ sudo apt-get install ros-melodic-image-transport ros-melodic-vision-msgs
83 | ```
84 |
85 | For ROS Melodic, create a Catkin workspace (`~/ros_workspace`) using these steps:
86 | http://wiki.ros.org/ROS/Tutorials/InstallingandConfiguringROSEnvironment#Create_a_ROS_Workspace
87 |
88 | #### ROS Eloquent
89 | ```bash
90 | $ sudo apt-get install ros-eloquent-vision-msgs \
91 | ros-eloquent-launch-xml \
92 | ros-eloquent-launch-yaml \
93 | python3-colcon-common-extensions
94 | ```
95 |
96 | For ROS Eloquent, create a workspace (`~/ros_workspace`) to use:
97 |
98 | ```bash
99 | $ mkdir -p ~/ros2_example_ws/src
100 | ```
101 |
102 | ### ros_deep_learning
103 |
104 | Next, navigate into your ROS workspace's `src` directory and clone `ros_deep_learning`:
105 |
106 | ```bash
107 | $ cd ~/ros_workspace/src
108 | $ git clone https://github.com/dusty-nv/ros_deep_learning
109 | ```
110 |
111 | Then build it - if you are using ROS Melodic, use `catkin_make`. If you are using ROS2 Eloquent, use `colcon build`:
112 |
113 | ```bash
114 | $ cd ~/ros_workspace/
115 |
116 | # ROS Melodic
117 | $ catkin_make
118 | $ source devel/setup.bash
119 |
120 | # ROS2 Eloquent
121 | $ colcon build
122 | $ source install/local_setup.bash
123 | ```
124 |
125 | The nodes should now be built and ready to use. Remember to source the overlay as shown above so that ROS can find the nodes.
126 |
127 |
128 |
129 | ## Testing
130 |
131 | Before proceeding, if you're using ROS Melodic make sure that `roscore` is running first:
132 |
133 | ```bash
134 | $ roscore
135 | ```
136 |
137 | If you're using ROS2, running the core service is no longer required.
138 |
139 | ### Video Viewer
140 |
141 | First, it's recommended to test that you can stream a video feed using the [`video_source`](#video-source-node) and [`video_output`](#video-output-node) nodes. See [Camera Streaming & Multimedia](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md) for valid input/output streams, and substitute your desired `input` and `output` argument below. For example, you can use video files for the input or output, or use V4L2 cameras instead of MIPI CSI cameras. You can also use RTP/RTSP streams over the network.
142 |
143 | ```bash
144 | # ROS
145 | $ roslaunch ros_deep_learning video_viewer.ros1.launch input:=csi://0 output:=display://0
146 |
147 | # ROS2
148 | $ ros2 launch ros_deep_learning video_viewer.ros2.launch input:=csi://0 output:=display://0
149 | ```
150 |
151 | ### imagenet Node
152 |
153 | You can launch a classification demo with the following commands - substitute your desired camera or video path to the `input` argument below (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md) for valid input/output streams).
154 |
155 | Note that the `imagenet` node also publishes classification metadata on the `imagenet/classification` topic in a [`vision_msgs/Detection2DArray`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/Detection2DArray.html) message -- see the [Topics & Parameters](#imagenet-node-1) section below for more info.
156 |
157 | ```bash
158 | # ROS
159 | $ roslaunch ros_deep_learning imagenet.ros1.launch input:=csi://0 output:=display://0
160 |
161 | # ROS2
162 | $ ros2 launch ros_deep_learning imagenet.ros2.launch input:=csi://0 output:=display://0
163 | ```
164 |
165 | ### detectnet Node
166 |
167 | To launch an object detection demo, substitute your desired camera or video path to the `input` argument below (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md) for valid input/output streams). Note that the `detectnet` node also publishes the metadata in a `vision_msgs/Detection2DArray` message -- see the [Topics & Parameters](#detectnet-node-1) section below for more info.
168 |
169 | ####
170 |
171 | ```bash
172 | # ROS
173 | $ roslaunch ros_deep_learning detectnet.ros1.launch input:=csi://0 output:=display://0
174 |
175 | # ROS2
176 | $ ros2 launch ros_deep_learning detectnet.ros2.launch input:=csi://0 output:=display://0
177 | ```
178 |
179 | ### segnet Node
180 |
181 | To launch a semantic segmentation demo, substitute your desired camera or video path to the `input` argument below (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md) for valid input/output streams). Note that the `segnet` node also publishes raw segmentation results to the `segnet/class_mask` topic -- see the [Topics & Parameters](#segnet-node-1) section below for more info.
182 |
183 | ```bash
184 | # ROS
185 | $ roslaunch ros_deep_learning segnet.ros1.launch input:=csi://0 output:=display://0
186 |
187 | # ROS2
188 | $ ros2 launch ros_deep_learning segnet.ros2.launch input:=csi://0 output:=display://0
189 | ```
190 |
191 | ## Topics & Parameters
192 |
193 | Below are the message topics and parameters that each node implements.
194 |
195 | ### imagenet Node
196 |
197 | | Topic Name | I/O | Message Type | Description |
198 | |----------------|:------:|--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|
199 | | image_in | Input | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Raw input image |
200 | | classification | Output | [`vision_msgs/Classification2D`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/Classification2D.html) | Classification results (class ID + confidence) |
201 | | vision_info | Output | [`vision_msgs/VisionInfo`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/VisionInfo.html) | Vision metadata (class labels parameter list name) |
202 | | overlay | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Input image overlayed with the classification results |
203 |
204 | | Parameter Name | Type | Default | Description |
205 | |-------------------|:----------------:|:-------------:|--------------------------------------------------------------------------------------------------------------------|
206 | | model_name | `string` | `"googlenet"` | Built-in model name (see [here](https://github.com/dusty-nv/jetson-inference#image-recognition) for valid values) |
207 | | model_path | `string` | `""` | Path to custom caffe or ONNX model |
208 | | prototxt_path | `string` | `""` | Path to custom caffe prototxt file |
209 | | input_blob | `string` | `"data"` | Name of DNN input layer |
210 | | output_blob | `string` | `"prob"` | Name of DNN output layer |
211 | | class_labels_path | `string` | `""` | Path to custom class labels file |
212 | | class_labels_HASH | `vector` | class names | List of class labels, where HASH is model-specific (actual name of parameter is found via the `vision_info` topic) |
213 |
214 | ### detectnet Node
215 |
216 | | Topic Name | I/O | Message Type | Description |
217 | |-------------|:------:|--------------------------------------------------------------------------------------------------------------|------------------------------------------------------------|
218 | | image_in | Input | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Raw input image |
219 | | detections | Output | [`vision_msgs/Detection2DArray`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/Detection2DArray.html) | Detection results (bounding boxes, class IDs, confidences) |
220 | | vision_info | Output | [`vision_msgs/VisionInfo`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/VisionInfo.html) | Vision metadata (class labels parameter list name) |
221 | | overlay | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Input image overlayed with the detection results |
222 |
223 | | Parameter Name | Type | Default | Description |
224 | |-------------------|:----------------:|:--------------------:|--------------------------------------------------------------------------------------------------------------------|
225 | | model_name | `string` | `"ssd-mobilenet-v2"` | Built-in model name (see [here](https://github.com/dusty-nv/jetson-inference#object-detection) for valid values) |
226 | | model_path | `string` | `""` | Path to custom caffe or ONNX model |
227 | | prototxt_path | `string` | `""` | Path to custom caffe prototxt file |
228 | | input_blob | `string` | `"data"` | Name of DNN input layer |
229 | | output_cvg | `string` | `"coverage"` | Name of DNN output layer (coverage/scores) |
230 | | output_bbox | `string` | `"bboxes"` | Name of DNN output layer (bounding boxes) |
231 | | class_labels_path | `string` | `""` | Path to custom class labels file |
232 | | class_labels_HASH | `vector` | class names | List of class labels, where HASH is model-specific (actual name of parameter is found via the `vision_info` topic) |
233 | | overlay_flags | `string` | `"box,labels,conf"` | Flags used to generate the overlay (some combination of `none,box,labels,conf`) |
234 | | mean_pixel_value | `float` | 0.0 | Mean pixel subtraction value to be applied to input (normally 0) |
235 | | threshold | `float` | 0.5 | Minimum confidence value for positive detections (0.0 - 1.0) |
236 |
237 | ### segnet Node
238 |
239 | | Topic Name | I/O | Message Type | Description |
240 | |-------------|:------:|--------------------------------------------------------------------------------------------------|----------------------------------------------------------|
241 | | image_in | Input | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Raw input image |
242 | | vision_info | Output | [`vision_msgs/VisionInfo`](http://docs.ros.org/melodic/api/vision_msgs/html/msg/VisionInfo.html) | Vision metadata (class labels parameter list name) |
243 | | overlay | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Input image overlayed with the classification results |
244 | | color_mask | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Colorized segmentation class mask out |
245 | | class_mask | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | 8-bit single-channel image where each pixel is a classID |
246 |
247 | | Parameter Name | Type | Default | Description |
248 | |-------------------|:----------------:|:------------------------------------:|-----------------------------------------------------------------------------------------------------------------------|
249 | | model_name | `string` | `"fcn-resnet18-cityscapes-1024x512"` | Built-in model name (see [here](https://github.com/dusty-nv/jetson-inference#semantic-segmentation) for valid values) |
250 | | model_path | `string` | `""` | Path to custom caffe or ONNX model |
251 | | prototxt_path | `string` | `""` | Path to custom caffe prototxt file |
252 | | input_blob | `string` | `"data"` | Name of DNN input layer |
253 | | output_blob | `string` | `"score_fr_21classes"` | Name of DNN output layer |
254 | | class_colors_path | `string` | `""` | Path to custom class colors file |
255 | | class_labels_path | `string` | `""` | Path to custom class labels file |
256 | | class_labels_HASH | `vector` | class names | List of class labels, where HASH is model-specific (actual name of parameter is found via the `vision_info` topic) |
257 | | mask_filter | `string` | `"linear"` | Filtering to apply to color_mask topic (`linear` or `point`) |
258 | | overlay_filter | `string` | `"linear"` | Filtering to apply to overlay topic (`linear` or `point`) |
259 | | overlay_alpha | `float` | `180.0` | Alpha blending value used by overlay topic (0.0 - 255.0) |
260 |
261 | ### video_source Node
262 |
263 | | Topic Name | I/O | Message Type | Description |
264 | |------------|:------:|----------------------------------------------------------------------------------------|-------------------------|
265 | | raw | Output | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Raw output image (BGR8) |
266 |
267 | | Parameter | Type | Default | Description |
268 | |----------------|:--------:|:-----------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
269 | | resource | `string` | `"csi://0"` | Input stream URI (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#input-streams) for valid protocols) |
270 | | codec | `string` | `""` | Manually specify codec for compressed streams (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#input-options) for valid values) |
271 | | width | `int` | 0 | Manually specify desired width of stream (0 = stream default) |
272 | | height | `int` | 0 | Manually specify desired height of stream (0 = stream default) |
273 | | framerate | `int` | 0 | Manually specify desired framerate of stream (0 = stream default) |
274 | | loop | `int` | 0 | For video files: `0` = don't loop, `>0` = # of loops, `-1` = loop forever |
275 | | flip | `string` | `""` | Set the flip method for MIPI CSI cameras (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#input-options) for valid values) |
276 | ### video_output Node
277 |
278 | | Topic Name | I/O | Message Type | Description |
279 | |------------|:-----:|----------------------------------------------------------------------------------------|-----------------|
280 | | image_in | Input | [`sensor_msgs/Image`](http://docs.ros.org/melodic/api/sensor_msgs/html/msg/Image.html) | Raw input image |
281 |
282 | | Parameter | Type | Default | Description |
283 | |----------------|:--------:|:---------------:|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
284 | | resource | `string` | `"display://0"` | Output stream URI (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#output-streams) for valid protocols) |
285 | | codec | `string` | `"h264"` | Codec used for compressed streams (see [here](https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#input-options) for valid values) |
286 | | bitrate | `int` | 4000000 | Target VBR bitrate of encoded streams (in bits per second) |
287 |
288 |
--------------------------------------------------------------------------------
/config/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dusty-nv/ros_deep_learning/5229849ff50cf43bb9c7c3e0f3ae21b3646b68e0/config/.gitkeep
--------------------------------------------------------------------------------
/launch/detectnet.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/launch/detectnet.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/launch/imagenet.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/launch/imagenet.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/launch/segnet.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/launch/segnet.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/launch/video_output.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/launch/video_output.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/launch/video_source.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/launch/video_source.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/launch/video_viewer.ros1.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/launch/video_viewer.ros2.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/package.ros1.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | ros_deep_learning
4 | 0.0.0
5 | Deep-learning nodes for ROS with support for NVIDIA Jetson and TensorRT, based on the jetson-inference library
6 | Dustin Franklin
7 | Open
8 |
9 | catkin
10 | cv_bridge
11 | image_transport
12 | roscpp
13 | sensor_msgs
14 | std_msgs
15 | nodelet
16 | cv_bridge
17 | image_transport
18 | roscpp
19 | sensor_msgs
20 | std_msgs
21 | nodelet
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/package.ros2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | ros_deep_learning
4 | 0.0.0
5 | Deep-learning nodes for ROS with support for NVIDIA Jetson and TensorRT, based on the jetson-inference library
6 | Dustin Franklin
7 | Open
8 |
9 | ament_cmake
10 |
11 | ament_index_cpp
12 | rclcpp
13 | std_msgs
14 | sensor_msgs
15 | vision_msgs
16 |
17 |
18 | ament_cmake
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | ros_deep_learning
4 | 0.0.0
5 | Deep-learning nodes for ROS with support for NVIDIA Jetson and TensorRT, based on the jetson-inference library
6 | Dustin Franklin
7 | Open
8 |
9 | catkin
10 | cv_bridge
11 | image_transport
12 | roscpp
13 | sensor_msgs
14 | std_msgs
15 | nodelet
16 | cv_bridge
17 | image_transport
18 | roscpp
19 | sensor_msgs
20 | std_msgs
21 | nodelet
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/ros_deep_learning_nodelets.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | A node to classify images according to ILSVRC classes
4 |
5 |
6 |
--------------------------------------------------------------------------------
/src/image_converter.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "image_converter.h"
24 |
25 | #include
26 | #include
27 |
28 |
29 |
30 | static imageFormat imageFormatFromEncoding( const std::string& encoding )
31 | {
32 | if( encoding == sensor_msgs::image_encodings::BGR8 )
33 | return IMAGE_BGR8;
34 | else if( encoding == sensor_msgs::image_encodings::BGRA8 )
35 | return IMAGE_BGRA8;
36 | else if( encoding == sensor_msgs::image_encodings::RGB8 )
37 | return IMAGE_RGB8;
38 | else if( encoding == sensor_msgs::image_encodings::RGBA8 )
39 | return IMAGE_RGBA8;
40 | else if( encoding == sensor_msgs::image_encodings::MONO8 )
41 | return IMAGE_GRAY8;
42 | else if( encoding == sensor_msgs::image_encodings::YUV422 )
43 | return IMAGE_UYVY;
44 | else if( encoding == sensor_msgs::image_encodings::BAYER_RGGB8 )
45 | return IMAGE_BAYER_RGGB;
46 | else if( encoding == sensor_msgs::image_encodings::BAYER_BGGR8 )
47 | return IMAGE_BAYER_BGGR;
48 | else if( encoding == sensor_msgs::image_encodings::BAYER_GBRG8 )
49 | return IMAGE_BAYER_GBRG;
50 | else if( encoding == sensor_msgs::image_encodings::BAYER_GRBG8 )
51 | return IMAGE_BAYER_GRBG;
52 |
53 | return IMAGE_UNKNOWN;
54 | }
55 |
56 | static std::string imageFormatToEncoding( imageFormat fmt )
57 | {
58 | switch(fmt)
59 | {
60 | case IMAGE_BGR8: return sensor_msgs::image_encodings::BGR8;
61 | case IMAGE_BGRA8: return sensor_msgs::image_encodings::BGRA8;
62 | case IMAGE_RGB8: return sensor_msgs::image_encodings::RGB8;
63 | case IMAGE_RGBA8: return sensor_msgs::image_encodings::RGBA8;
64 | case IMAGE_GRAY8: return sensor_msgs::image_encodings::MONO8;
65 | case IMAGE_UYVY: return sensor_msgs::image_encodings::YUV422;
66 | case IMAGE_BAYER_RGGB: return sensor_msgs::image_encodings::BAYER_RGGB8;
67 | case IMAGE_BAYER_BGGR: return sensor_msgs::image_encodings::BAYER_BGGR8;
68 | case IMAGE_BAYER_GBRG: return sensor_msgs::image_encodings::BAYER_GBRG8;
69 | case IMAGE_BAYER_GRBG: return sensor_msgs::image_encodings::BAYER_GRBG8;
70 | }
71 |
72 | return "invalid";
73 | }
74 |
75 |
76 | // constructor
77 | imageConverter::imageConverter()
78 | {
79 | mWidth = 0;
80 | mHeight = 0;
81 | mSizeInput = 0;
82 | mSizeOutput = 0;
83 |
84 | mInputCPU = NULL;
85 | mInputGPU = NULL;
86 |
87 | mOutputCPU = NULL;
88 | mOutputGPU = NULL;
89 | }
90 |
91 |
92 | // destructor
93 | imageConverter::~imageConverter()
94 | {
95 | Free();
96 | }
97 |
98 |
99 | // Free
100 | void imageConverter::Free()
101 | {
102 | if( mInputCPU != NULL )
103 | {
104 | CUDA(cudaFreeHost(mInputCPU));
105 |
106 | mInputCPU = NULL;
107 | mInputGPU = NULL;
108 | }
109 |
110 | if( mOutputCPU != NULL )
111 | {
112 | CUDA(cudaFreeHost(mOutputCPU));
113 |
114 | mOutputCPU = NULL;
115 | mOutputGPU = NULL;
116 | }
117 | }
118 |
119 | // Convert
120 | bool imageConverter::Convert( const sensor_msgs::ImageConstPtr& input )
121 | {
122 | ROS_DEBUG("converting %ux%u %s image", input->width, input->height, input->encoding.c_str());
123 |
124 | // parse the input format
125 | const imageFormat input_format = imageFormatFromEncoding(input->encoding);
126 |
127 | if( input_format == IMAGE_UNKNOWN )
128 | {
129 | ROS_ERROR("image encoding %s is not a compatible format to use with ros_deep_learning", input->encoding.c_str());
130 | return false;
131 | }
132 |
133 | // assure memory allocation
134 | if( !Resize(input->width, input->height, input_format) )
135 | return false;
136 |
137 | // copy input to shared memory
138 | memcpy(mInputCPU, input->data.data(), imageFormatSize(input_format, input->width, input->height));
139 |
140 | // convert image format
141 | if( CUDA_FAILED(cudaConvertColor(mInputGPU, input_format, mOutputGPU, InternalFormat, input->width, input->height)) )
142 | {
143 | ROS_ERROR("failed to convert %ux%u image (from %s to %s) with CUDA", mWidth, mHeight, imageFormatToStr(input_format), imageFormatToStr(InternalFormat));
144 | return false;
145 | }
146 |
147 | return true;
148 | }
149 |
150 |
151 | // Convert
152 | bool imageConverter::Convert( sensor_msgs::Image& msg, imageFormat format )
153 | {
154 | return Convert(msg, format, ImageGPU());
155 | }
156 |
157 |
158 | // Convert
159 | bool imageConverter::Convert( sensor_msgs::Image& msg, imageFormat format, PixelType* imageGPU )
160 | {
161 | if( !mInputCPU || !imageGPU || mWidth == 0 || mHeight == 0 || mSizeInput == 0 || mSizeOutput == 0 )
162 | return false;
163 |
164 | // perform colorspace conversion into the desired encoding
165 | // in this direction, we reverse use of input/output pointers
166 | if( CUDA_FAILED(cudaConvertColor(imageGPU, InternalFormat, mInputGPU, format, mWidth, mHeight)) )
167 | {
168 | ROS_ERROR("failed to convert %ux%u image (from %s to %s) with CUDA", mWidth, mHeight, imageFormatToStr(InternalFormat), imageFormatToStr(format));
169 | return false;
170 | }
171 |
172 | // calculate size of the msg
173 | const size_t msg_size = imageFormatSize(format, mWidth, mHeight);
174 |
175 | // allocate msg storage
176 | msg.data.resize(msg_size);
177 |
178 | // copy the converted image into the msg
179 | if (format != IMAGE_GRAY8)
180 | {
181 | memcpy(msg.data.data(), mInputCPU, msg_size);
182 | }
183 | else
184 | {
185 | memcpy(msg.data.data(), mOutputCPU, msg_size);
186 | }
187 |
188 | // populate metadata
189 | msg.width = mWidth;
190 | msg.height = mHeight;
191 | msg.step = (mWidth * imageFormatDepth(format)) / 8;
192 |
193 | msg.encoding = imageFormatToEncoding(format);
194 | msg.is_bigendian = false;
195 |
196 | return true;
197 | }
198 |
199 |
200 | // Resize
201 | bool imageConverter::Resize( uint32_t width, uint32_t height, imageFormat inputFormat )
202 | {
203 | const size_t input_size = imageFormatSize(inputFormat, width, height);
204 | const size_t output_size = imageFormatSize(InternalFormat, width, height);
205 |
206 | if( input_size != mSizeInput || output_size != mSizeOutput || mWidth != width || mHeight != height )
207 | {
208 | Free();
209 |
210 | if( !cudaAllocMapped((void**)&mInputCPU, (void**)&mInputGPU, input_size) ||
211 | !cudaAllocMapped((void**)&mOutputCPU, (void**)&mOutputGPU, output_size) )
212 | {
213 | ROS_ERROR("failed to allocate memory for %ux%u image conversion", width, height);
214 | return false;
215 | }
216 |
217 | ROS_INFO("allocated CUDA memory for %ux%u image conversion", width, height);
218 |
219 | mWidth = width;
220 | mHeight = height;
221 | mSizeInput = input_size;
222 | mSizeOutput = output_size;
223 | }
224 |
225 | return true;
226 | }
227 |
228 |
229 |
--------------------------------------------------------------------------------
/src/image_converter.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #ifndef __ROS_DEEP_LEARNING_IMAGE_CONVERTER_H_
24 | #define __ROS_DEEP_LEARNING_IMAGE_CONVERTER_H_
25 |
26 | #include
27 | #include
28 |
29 | #include "ros_compat.h"
30 |
31 |
32 | /**
33 | * GPU image conversion
34 | */
35 | class imageConverter
36 | {
37 | public:
38 | /**
39 | * Output image pixel type
40 | */
41 | typedef uchar3 PixelType;
42 |
43 | /**
44 | * Image format used for internal CUDA processing
45 | */
46 | static const imageFormat InternalFormat = IMAGE_RGB8;
47 |
48 | /**
49 | * Image format used for outputting ROS image messages
50 | */
51 | static const imageFormat ROSOutputFormat = IMAGE_BGR8;
52 |
53 | /**
54 | * Constructor
55 | */
56 | imageConverter();
57 |
58 | /**
59 | * Destructor
60 | */
61 | ~imageConverter();
62 |
63 | /**
64 | * Free the memory
65 | */
66 | void Free();
67 |
68 | /**
69 | * Convert to 32-bit RGBA floating point
70 | */
71 | bool Convert( const sensor_msgs::ImageConstPtr& input );
72 |
73 | /**
74 | * Convert to ROS sensor_msgs::Image message
75 | */
76 | bool Convert( sensor_msgs::Image& msg_out, imageFormat outputFormat );
77 |
78 | /**
79 | * Convert to ROS sensor_msgs::Image message
80 | */
81 | bool Convert( sensor_msgs::Image& msg_out, imageFormat outputFormat, PixelType* imageGPU );
82 |
83 | /**
84 | * Resize the memory (if necessary)
85 | */
86 | bool Resize( uint32_t width, uint32_t height, imageFormat inputFormat );
87 |
88 | /**
89 | * Retrieve the converted image width
90 | */
91 | inline uint32_t GetWidth() const { return mWidth; }
92 |
93 | /**
94 | * Retrieve the converted image height
95 | */
96 | inline uint32_t GetHeight() const { return mHeight; }
97 |
98 | /**
99 | * Retrieve the GPU pointer of the converted image
100 | */
101 | inline PixelType* ImageGPU() const { return mOutputGPU; }
102 |
103 | private:
104 |
105 | uint32_t mWidth;
106 | uint32_t mHeight;
107 | size_t mSizeInput;
108 | size_t mSizeOutput;
109 |
110 | void* mInputCPU;
111 | void* mInputGPU;
112 |
113 | PixelType* mOutputCPU;
114 | PixelType* mOutputGPU;
115 | };
116 |
117 | #endif
118 |
119 |
--------------------------------------------------------------------------------
/src/node_detectnet.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 | #include "image_converter.h"
25 |
26 | #include
27 |
28 | #include
29 |
30 |
31 | // globals
32 | detectNet* net = NULL;
33 | uint32_t overlay_flags = detectNet::OVERLAY_NONE;
34 |
35 | imageConverter* input_cvt = NULL;
36 | imageConverter* overlay_cvt = NULL;
37 |
38 | Publisher detection_pub = NULL;
39 | Publisher overlay_pub = NULL;
40 | Publisher info_pub = NULL;
41 |
42 | vision_msgs::VisionInfo info_msg;
43 |
44 |
45 | // triggered when a new subscriber connected
46 | void info_callback()
47 | {
48 | ROS_INFO("new subscriber connected to vision_info topic, sending VisionInfo msg");
49 | info_pub->publish(info_msg);
50 | }
51 |
52 |
53 | // publish overlay image
54 | bool publish_overlay( detectNet::Detection* detections, int numDetections )
55 | {
56 | // get the image dimensions
57 | const uint32_t width = input_cvt->GetWidth();
58 | const uint32_t height = input_cvt->GetHeight();
59 |
60 | // assure correct image size
61 | if( !overlay_cvt->Resize(width, height, imageConverter::ROSOutputFormat) )
62 | return false;
63 |
64 | // generate the overlay
65 | if( !net->Overlay(input_cvt->ImageGPU(), overlay_cvt->ImageGPU(), width, height,
66 | imageConverter::InternalFormat, detections, numDetections, overlay_flags) )
67 | {
68 | return false;
69 | }
70 |
71 | // populate the message
72 | sensor_msgs::Image msg;
73 |
74 | if( !overlay_cvt->Convert(msg, imageConverter::ROSOutputFormat) )
75 | return false;
76 |
77 | // populate timestamp in header field
78 | msg.header.stamp = ROS_TIME_NOW();
79 |
80 | // publish the message
81 | overlay_pub->publish(msg);
82 | ROS_DEBUG("publishing %ux%u overlay image", width, height);
83 | }
84 |
85 |
86 | // input image subscriber callback
87 | void img_callback( const sensor_msgs::ImageConstPtr input )
88 | {
89 | // convert the image to reside on GPU
90 | if( !input_cvt || !input_cvt->Convert(input) )
91 | {
92 | ROS_INFO("failed to convert %ux%u %s image", input->width, input->height, input->encoding.c_str());
93 | return;
94 | }
95 |
96 | // classify the image
97 | detectNet::Detection* detections = NULL;
98 |
99 | const int numDetections = net->Detect(input_cvt->ImageGPU(), input_cvt->GetWidth(), input_cvt->GetHeight(), &detections, detectNet::OVERLAY_NONE);
100 |
101 | // verify success
102 | if( numDetections < 0 )
103 | {
104 | ROS_ERROR("failed to run object detection on %ux%u image", input->width, input->height);
105 | return;
106 | }
107 |
108 | // if objects were detected, send out message
109 | if( numDetections > 0 )
110 | {
111 | ROS_INFO("detected %i objects in %ux%u image", numDetections, input->width, input->height);
112 |
113 | // create a detection for each bounding box
114 | vision_msgs::Detection2DArray msg;
115 |
116 | for( int n=0; n < numDetections; n++ )
117 | {
118 | detectNet::Detection* det = detections + n;
119 |
120 | ROS_INFO("object %i class #%u (%s) confidence=%f", n, det->ClassID, net->GetClassDesc(det->ClassID), det->Confidence);
121 | ROS_INFO("object %i bounding box (%f, %f) (%f, %f) w=%f h=%f", n, det->Left, det->Top, det->Right, det->Bottom, det->Width(), det->Height());
122 |
123 | // create a detection sub-message
124 | vision_msgs::Detection2D detMsg;
125 |
126 | detMsg.bbox.size_x = det->Width();
127 | detMsg.bbox.size_y = det->Height();
128 |
129 | float cx, cy;
130 | det->Center(&cx, &cy);
131 |
132 | #if ROS_DISTRO >= ROS_HUMBLE
133 | detMsg.bbox.center.position.x = cx;
134 | detMsg.bbox.center.position.y = cy;
135 | #else
136 | detMsg.bbox.center.x = cx;
137 | detMsg.bbox.center.y = cy;
138 | #endif
139 |
140 | detMsg.bbox.center.theta = 0.0f; // TODO optionally output object image
141 |
142 | // create classification hypothesis
143 | vision_msgs::ObjectHypothesisWithPose hyp;
144 |
145 | #if ROS_DISTRO >= ROS_GALACTIC
146 | hyp.hypothesis.class_id = det->ClassID;
147 | hyp.hypothesis.score = det->Confidence;
148 | #else
149 | hyp.id = det->ClassID;
150 | hyp.score = det->Confidence;
151 | #endif
152 |
153 | detMsg.results.push_back(hyp);
154 | msg.detections.push_back(detMsg);
155 | }
156 |
157 | // populate timestamp in header field
158 | msg.header.stamp = ROS_TIME_NOW();
159 |
160 | // publish the detection message
161 | detection_pub->publish(msg);
162 | }
163 |
164 | // generate the overlay (if there are subscribers)
165 | if( ROS_NUM_SUBSCRIBERS(overlay_pub) > 0 )
166 | publish_overlay(detections, numDetections);
167 | }
168 |
169 |
170 | // node main loop
171 | int main(int argc, char **argv)
172 | {
173 | /*
174 | * create node instance
175 | */
176 | ROS_CREATE_NODE("detectnet");
177 |
178 | /*
179 | * retrieve parameters
180 | */
181 | std::string model_name = "ssd-mobilenet-v2";
182 | std::string model_path;
183 | std::string prototxt_path;
184 | std::string class_labels_path;
185 |
186 | std::string input_blob = DETECTNET_DEFAULT_INPUT;
187 | std::string output_cvg = DETECTNET_DEFAULT_COVERAGE;
188 | std::string output_bbox = DETECTNET_DEFAULT_BBOX;
189 | std::string overlay_str = "box,labels,conf";
190 |
191 | float mean_pixel = 0.0f;
192 | float threshold = DETECTNET_DEFAULT_THRESHOLD;
193 |
194 | ROS_DECLARE_PARAMETER("model_name", model_name);
195 | ROS_DECLARE_PARAMETER("model_path", model_path);
196 | ROS_DECLARE_PARAMETER("prototxt_path", prototxt_path);
197 | ROS_DECLARE_PARAMETER("class_labels_path", class_labels_path);
198 | ROS_DECLARE_PARAMETER("input_blob", input_blob);
199 | ROS_DECLARE_PARAMETER("output_cvg", output_cvg);
200 | ROS_DECLARE_PARAMETER("output_bbox", output_bbox);
201 | ROS_DECLARE_PARAMETER("overlay_flags", overlay_str);
202 | ROS_DECLARE_PARAMETER("mean_pixel_value", mean_pixel);
203 | ROS_DECLARE_PARAMETER("threshold", threshold);
204 |
205 |
206 | /*
207 | * retrieve parameters
208 | */
209 | ROS_GET_PARAMETER("model_name", model_name);
210 | ROS_GET_PARAMETER("model_path", model_path);
211 | ROS_GET_PARAMETER("prototxt_path", prototxt_path);
212 | ROS_GET_PARAMETER("class_labels_path", class_labels_path);
213 | ROS_GET_PARAMETER("input_blob", input_blob);
214 | ROS_GET_PARAMETER("output_cvg", output_cvg);
215 | ROS_GET_PARAMETER("output_bbox", output_bbox);
216 | ROS_GET_PARAMETER("overlay_flags", overlay_str);
217 | ROS_GET_PARAMETER("mean_pixel_value", mean_pixel);
218 | ROS_GET_PARAMETER("threshold", threshold);
219 |
220 | overlay_flags = detectNet::OverlayFlagsFromStr(overlay_str.c_str());
221 |
222 |
223 | /*
224 | * load object detection network
225 | */
226 | if( model_path.size() > 0 )
227 | {
228 | // create network using custom model paths
229 | net = detectNet::Create(prototxt_path.c_str(), model_path.c_str(),
230 | mean_pixel, class_labels_path.c_str(), threshold,
231 | input_blob.c_str(), output_cvg.c_str(), output_bbox.c_str());
232 | }
233 | else
234 | {
235 | // create network using the built-in model
236 | net = detectNet::Create(model_name.c_str());
237 | }
238 |
239 | if( !net )
240 | {
241 | ROS_ERROR("failed to load detectNet model");
242 | return 0;
243 | }
244 |
245 |
246 | /*
247 | * create the class labels parameter vector
248 | */
249 | std::hash model_hasher; // hash the model path to avoid collisions on the param server
250 | std::string model_hash_str = std::string(net->GetModelPath()) + std::string(net->GetClassPath());
251 | const size_t model_hash = model_hasher(model_hash_str);
252 |
253 | ROS_INFO("model hash => %zu", model_hash);
254 | ROS_INFO("hash string => %s", model_hash_str.c_str());
255 |
256 | // obtain the list of class descriptions
257 | std::vector class_descriptions;
258 | const uint32_t num_classes = net->GetNumClasses();
259 |
260 | for( uint32_t n=0; n < num_classes; n++ )
261 | class_descriptions.push_back(net->GetClassDesc(n));
262 |
263 | // create the key on the param server
264 | std::string class_key = std::string("class_labels_") + std::to_string(model_hash);
265 |
266 | ROS_DECLARE_PARAMETER(class_key, class_descriptions);
267 | ROS_SET_PARAMETER(class_key, class_descriptions);
268 |
269 | // populate the vision info msg
270 | std::string node_namespace = ROS_GET_NAMESPACE();
271 | ROS_INFO("node namespace => %s", node_namespace.c_str());
272 |
273 | info_msg.database_location = node_namespace + std::string("/") + class_key;
274 | info_msg.database_version = 0;
275 | info_msg.method = net->GetModelPath();
276 |
277 | ROS_INFO("class labels => %s", info_msg.database_location.c_str());
278 |
279 |
280 | /*
281 | * create image converter objects
282 | */
283 | input_cvt = new imageConverter();
284 | overlay_cvt = new imageConverter();
285 |
286 | if( !input_cvt || !overlay_cvt )
287 | {
288 | ROS_ERROR("failed to create imageConverter objects");
289 | return 0;
290 | }
291 |
292 |
293 | /*
294 | * advertise publisher topics
295 | */
296 | ROS_CREATE_PUBLISHER(vision_msgs::Detection2DArray, "detections", 25, detection_pub);
297 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "overlay", 2, overlay_pub);
298 |
299 | ROS_CREATE_PUBLISHER_STATUS(vision_msgs::VisionInfo, "vision_info", 1, info_callback, info_pub);
300 |
301 |
302 | /*
303 | * subscribe to image topic
304 | */
305 | auto img_sub = ROS_CREATE_SUBSCRIBER(sensor_msgs::Image, "image_in", 5, img_callback);
306 |
307 |
308 | /*
309 | * wait for messages
310 | */
311 | ROS_INFO("detectnet node initialized, waiting for messages");
312 | ROS_SPIN();
313 |
314 |
315 | /*
316 | * free resources
317 | */
318 | delete net;
319 | delete input_cvt;
320 | delete overlay_cvt;
321 |
322 | return 0;
323 | }
324 |
325 |
--------------------------------------------------------------------------------
/src/node_imagenet.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 | #include "image_converter.h"
25 |
26 | #include
27 | #include
28 |
29 | #include
30 |
31 |
32 | // globals
33 | imageNet* net = NULL;
34 | cudaFont* font = NULL;
35 |
36 | imageConverter* input_cvt = NULL;
37 | imageConverter* overlay_cvt = NULL;
38 |
39 | Publisher classify_pub = NULL;
40 | Publisher overlay_pub = NULL;
41 | Publisher info_pub = NULL;
42 |
43 | vision_msgs::VisionInfo info_msg;
44 |
45 |
46 | // triggered when a new subscriber connected
47 | void info_callback()
48 | {
49 | ROS_INFO("new subscriber connected to vision_info topic, sending VisionInfo msg");
50 | info_pub->publish(info_msg);
51 | }
52 |
53 |
54 | // publish overlay image
55 | bool publish_overlay( int img_class, float confidence )
56 | {
57 | // create font for image overlay
58 | font = cudaFont::Create();
59 |
60 | if( !font )
61 | {
62 | ROS_ERROR("failed to load font for overlay");
63 | return false;
64 | }
65 |
66 | // get the image dimensions
67 | const uint32_t width = input_cvt->GetWidth();
68 | const uint32_t height = input_cvt->GetHeight();
69 |
70 | // assure correct image size
71 | if( !overlay_cvt->Resize(width, height, imageConverter::ROSOutputFormat) )
72 | return false;
73 |
74 | // generate the overlay
75 | char str[256];
76 | sprintf(str, "%05.2f%% %s", confidence * 100.0f, net->GetClassDesc(img_class));
77 |
78 | font->OverlayText(input_cvt->ImageGPU(), width, height,
79 | str, 5, 5, make_float4(255, 255, 255, 255), make_float4(0, 0, 0, 100));
80 |
81 | // populate the message
82 | sensor_msgs::Image msg;
83 |
84 | if( !overlay_cvt->Convert(msg, imageConverter::ROSOutputFormat, input_cvt->ImageGPU()) )
85 | return false;
86 |
87 | // populate timestamp in header field
88 | msg.header.stamp = ROS_TIME_NOW();
89 |
90 | // publish the message
91 | overlay_pub->publish(msg);
92 | ROS_DEBUG("publishing %ux%u overlay image", width, height);
93 | }
94 |
95 |
96 | // triggered when recieved a new image on input topic
97 | void img_callback( const sensor_msgs::ImageConstPtr input )
98 | {
99 | // convert the image to reside on GPU
100 | if( !input_cvt || !input_cvt->Convert(input) )
101 | {
102 | ROS_INFO("failed to convert %ux%u %s image", input->width, input->height, input->encoding.c_str());
103 | return;
104 | }
105 |
106 | // classify the image
107 | float confidence = 0.0f;
108 | const int img_class = net->Classify(input_cvt->ImageGPU(), input_cvt->GetWidth(), input_cvt->GetHeight(), &confidence);
109 |
110 | // verify the output
111 | if( img_class >= 0 )
112 | {
113 | ROS_INFO("classified image, %f %s (class=%i)", confidence, net->GetClassDesc(img_class), img_class);
114 |
115 | // create the classification message
116 | vision_msgs::Classification2D msg;
117 | vision_msgs::ObjectHypothesis obj;
118 |
119 | #if ROS_DISTRO >= ROS_GALACTIC
120 | obj.class_id = img_class;
121 | #else
122 | obj.id = img_class;
123 | #endif
124 | obj.score = confidence;
125 |
126 | msg.results.push_back(obj); // TODO optionally add source image to msg
127 |
128 | // populate timestamp in header field
129 | msg.header.stamp = ROS_TIME_NOW();
130 |
131 | // publish the classification message
132 | classify_pub->publish(msg);
133 |
134 | // generate the overlay (if there are subscribers)
135 | if( ROS_NUM_SUBSCRIBERS(overlay_pub) > 0 )
136 | publish_overlay(img_class, confidence);
137 | }
138 | else
139 | {
140 | // an error occurred if the output class is < 0
141 | ROS_ERROR("failed to classify %ux%u image", input->width, input->height);
142 | }
143 | }
144 |
145 |
146 | // node main loop
147 | int main(int argc, char **argv)
148 | {
149 | /*
150 | * create node instance
151 | */
152 | ROS_CREATE_NODE("imagenet");
153 |
154 |
155 | /*
156 | * declare parameters
157 | */
158 | std::string model_name = "googlenet";
159 | std::string model_path;
160 | std::string prototxt_path;
161 | std::string class_labels_path;
162 |
163 | std::string input_blob = IMAGENET_DEFAULT_INPUT;
164 | std::string output_blob = IMAGENET_DEFAULT_OUTPUT;
165 |
166 | ROS_DECLARE_PARAMETER("model_name", model_name);
167 | ROS_DECLARE_PARAMETER("model_path", model_path);
168 | ROS_DECLARE_PARAMETER("prototxt_path", prototxt_path);
169 | ROS_DECLARE_PARAMETER("class_labels_path", class_labels_path);
170 | ROS_DECLARE_PARAMETER("input_blob", input_blob);
171 | ROS_DECLARE_PARAMETER("output_blob", output_blob);
172 |
173 |
174 | /*
175 | * retrieve parameters
176 | */
177 | ROS_GET_PARAMETER("model_name", model_name);
178 | ROS_GET_PARAMETER("model_path", model_path);
179 | ROS_GET_PARAMETER("prototxt_path", prototxt_path);
180 | ROS_GET_PARAMETER("class_labels_path", class_labels_path);
181 | ROS_GET_PARAMETER("input_blob", input_blob);
182 | ROS_GET_PARAMETER("output_blob", output_blob);
183 |
184 |
185 | /*
186 | * load image recognition network
187 | */
188 | if( model_path.size() > 0 )
189 | {
190 | // create network using custom model paths
191 | net = imageNet::Create(prototxt_path.c_str(), model_path.c_str(),
192 | NULL, class_labels_path.c_str(),
193 | input_blob.c_str(), output_blob.c_str());
194 |
195 | }
196 | else
197 | {
198 | // create network using the built-in model
199 | net = imageNet::Create(model_name.c_str());
200 | }
201 |
202 | if( !net )
203 | {
204 | ROS_ERROR("failed to load imageNet model");
205 | return 0;
206 | }
207 |
208 |
209 | /*
210 | * create the class labels parameter vector
211 | */
212 | std::hash model_hasher; // hash the model path to avoid collisions on the param server
213 | std::string model_hash_str = std::string(net->GetModelPath()) + std::string(net->GetClassPath());
214 | const size_t model_hash = model_hasher(model_hash_str);
215 |
216 | ROS_INFO("model hash => %zu", model_hash);
217 | ROS_INFO("hash string => %s", model_hash_str.c_str());
218 |
219 | // obtain the list of class descriptions
220 | std::vector class_descriptions;
221 | const uint32_t num_classes = net->GetNumClasses();
222 |
223 | for( uint32_t n=0; n < num_classes; n++ )
224 | class_descriptions.push_back(net->GetClassDesc(n));
225 |
226 |
227 | // create the key on the param server
228 | std::string class_key = std::string("class_labels_") + std::to_string(model_hash);
229 |
230 | ROS_DECLARE_PARAMETER(class_key, class_descriptions);
231 | ROS_SET_PARAMETER(class_key, class_descriptions);
232 |
233 | // populate the vision info msg
234 | const std::string node_namespace = ROS_GET_NAMESPACE();
235 | ROS_INFO("node namespace => %s", node_namespace.c_str());
236 |
237 | info_msg.database_location = node_namespace + std::string("/") + class_key;
238 | info_msg.database_version = 0;
239 | info_msg.method = net->GetModelPath();
240 |
241 | ROS_INFO("class labels => %s", info_msg.database_location.c_str());
242 |
243 |
244 | /*
245 | * create image converter objects
246 | */
247 | input_cvt = new imageConverter();
248 | overlay_cvt = new imageConverter();
249 |
250 | if( !input_cvt || !overlay_cvt )
251 | {
252 | ROS_ERROR("failed to create imageConverter objects");
253 | return 0;
254 | }
255 |
256 |
257 | /*
258 | * advertise publisher topics
259 | */
260 | ROS_CREATE_PUBLISHER(vision_msgs::Classification2D, "classification", 5, classify_pub);
261 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "overlay", 2, overlay_pub);
262 |
263 | ROS_CREATE_PUBLISHER_STATUS(vision_msgs::VisionInfo, "vision_info", 1, info_callback, info_pub);
264 |
265 |
266 | /*
267 | * subscribe to image topic
268 | */
269 | auto img_sub = ROS_CREATE_SUBSCRIBER(sensor_msgs::Image, "image_in", 5, img_callback);
270 |
271 |
272 | /*
273 | * wait for messages
274 | */
275 | ROS_INFO("imagenet node initialized, waiting for messages");
276 | ROS_SPIN();
277 |
278 |
279 | /*
280 | * free resources
281 | */
282 | delete net;
283 | delete input_cvt;
284 | delete overlay_cvt;
285 |
286 | return 0;
287 | }
288 |
289 |
--------------------------------------------------------------------------------
/src/node_segnet.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 | #include "image_converter.h"
25 |
26 | #include
27 |
28 | #include
29 |
30 |
31 | // globals
32 | segNet* net = NULL;
33 |
34 | segNet::FilterMode overlay_filter = segNet::FILTER_LINEAR;
35 | segNet::FilterMode mask_filter = segNet::FILTER_LINEAR;
36 |
37 | imageConverter* input_cvt = NULL;
38 | imageConverter* overlay_cvt = NULL;
39 | imageConverter* mask_color_cvt = NULL;
40 | imageConverter* mask_class_cvt = NULL;
41 |
42 | Publisher overlay_pub = NULL;
43 | Publisher mask_color_pub = NULL;
44 | Publisher mask_class_pub = NULL;
45 |
46 | Publisher info_pub = NULL;
47 | vision_msgs::VisionInfo info_msg;
48 |
49 |
50 | // triggered when a new subscriber connected
51 | void info_callback()
52 | {
53 | ROS_INFO("new subscriber connected to vision_info topic, sending VisionInfo msg");
54 | info_pub->publish(info_msg);
55 | }
56 |
57 |
58 | // publish overlay image
59 | bool publish_overlay( uint32_t width, uint32_t height )
60 | {
61 | // assure correct image size
62 | if( !overlay_cvt->Resize(width, height, imageConverter::ROSOutputFormat) )
63 | return false;
64 |
65 | // generate the overlay
66 | if( !net->Overlay(overlay_cvt->ImageGPU(), width, height, overlay_filter) )
67 | return false;
68 |
69 | // populate the message
70 | sensor_msgs::Image msg;
71 |
72 | if( !overlay_cvt->Convert(msg, imageConverter::ROSOutputFormat) )
73 | return false;
74 |
75 | // populate timestamp in header field
76 | msg.header.stamp = ROS_TIME_NOW();
77 |
78 | // publish the message
79 | overlay_pub->publish(msg);
80 | }
81 |
82 |
83 | // publish color mask
84 | bool publish_mask_color( uint32_t width, uint32_t height )
85 | {
86 | // assure correct image size
87 | if( !mask_color_cvt->Resize(width, height, imageConverter::ROSOutputFormat) )
88 | return false;
89 |
90 | // generate the overlay
91 | if( !net->Mask(mask_color_cvt->ImageGPU(), width, height, mask_filter) )
92 | return false;
93 |
94 | // populate the message
95 | sensor_msgs::Image msg;
96 |
97 | if( !mask_color_cvt->Convert(msg, imageConverter::ROSOutputFormat) )
98 | return false;
99 |
100 | // populate timestamp in header field
101 | msg.header.stamp = ROS_TIME_NOW();
102 |
103 | // publish the message
104 | mask_color_pub->publish(msg);
105 | }
106 |
107 |
108 | // publish class mask
109 | bool publish_mask_class( uint32_t width, uint32_t height )
110 | {
111 | // assure correct image size
112 | if( !mask_class_cvt->Resize(width, height, IMAGE_GRAY8) )
113 | return false;
114 |
115 | // generate the overlay
116 | if( !net->Mask((uint8_t*)mask_class_cvt->ImageGPU(), width, height) )
117 | return false;
118 |
119 | // populate the message
120 | sensor_msgs::Image msg;
121 |
122 | if( !mask_class_cvt->Convert(msg, IMAGE_GRAY8) )
123 | return false;
124 |
125 | // populate timestamp in header field
126 | msg.header.stamp = ROS_TIME_NOW();
127 |
128 | // publish the message
129 | mask_class_pub->publish(msg);
130 | }
131 |
132 |
133 | // input image subscriber callback
134 | void img_callback( const sensor_msgs::ImageConstPtr input )
135 | {
136 | // convert the image to reside on GPU
137 | if( !input_cvt || !input_cvt->Convert(input) )
138 | {
139 | ROS_INFO("failed to convert %ux%u %s image", input->width, input->height, input->encoding.c_str());
140 | return;
141 | }
142 |
143 | // process the segmentation network
144 | if( !net->Process(input_cvt->ImageGPU(), input_cvt->GetWidth(), input_cvt->GetHeight()) )
145 | {
146 | ROS_ERROR("failed to process segmentation on %ux%u image", input->width, input->height);
147 | return;
148 | }
149 |
150 | // color overlay
151 | if( ROS_NUM_SUBSCRIBERS(overlay_pub) > 0 )
152 | publish_overlay(input->width, input->height);
153 |
154 | // color mask
155 | if( ROS_NUM_SUBSCRIBERS(mask_color_pub) > 0 )
156 | publish_mask_color(input->width, input->height);
157 |
158 | // class mask
159 | if( ROS_NUM_SUBSCRIBERS(mask_class_pub) > 0 )
160 | publish_mask_class(net->GetGridWidth(), net->GetGridHeight());
161 | }
162 |
163 |
164 | // node main loop
165 | int main(int argc, char **argv)
166 | {
167 | /*
168 | * create node instance
169 | */
170 | ROS_CREATE_NODE("segnet");
171 |
172 |
173 | /*
174 | * declare parameters
175 | */
176 | std::string model_name = "fcn-resnet18-cityscapes-1024x512";
177 | std::string model_path;
178 | std::string prototxt_path;
179 | std::string class_labels_path;
180 | std::string class_colors_path;
181 |
182 | std::string input_blob = SEGNET_DEFAULT_INPUT;
183 | std::string output_blob = SEGNET_DEFAULT_OUTPUT;
184 |
185 | std::string mask_filter_str = "linear";
186 | std::string overlay_filter_str = "linear";
187 |
188 | float overlay_alpha = 180.0f;
189 |
190 | ROS_DECLARE_PARAMETER("model_name", model_name);
191 | ROS_DECLARE_PARAMETER("model_path", model_path);
192 | ROS_DECLARE_PARAMETER("prototxt_path", prototxt_path);
193 | ROS_DECLARE_PARAMETER("class_labels_path", class_labels_path);
194 | ROS_DECLARE_PARAMETER("class_colors_path", class_colors_path);
195 | ROS_DECLARE_PARAMETER("input_blob", input_blob);
196 | ROS_DECLARE_PARAMETER("output_blob", output_blob);
197 | ROS_DECLARE_PARAMETER("mask_filter", mask_filter_str);
198 | ROS_DECLARE_PARAMETER("overlay_filter", overlay_filter_str);
199 | ROS_DECLARE_PARAMETER("overlay_alpha", overlay_alpha);
200 |
201 | /*
202 | * retrieve parameters
203 | */
204 | ROS_GET_PARAMETER("model_name", model_name);
205 | ROS_GET_PARAMETER("model_path", model_path);
206 | ROS_GET_PARAMETER("prototxt_path", prototxt_path);
207 | ROS_GET_PARAMETER("class_labels_path", class_labels_path);
208 | ROS_GET_PARAMETER("class_colors_path", class_colors_path);
209 | ROS_GET_PARAMETER("input_blob", input_blob);
210 | ROS_GET_PARAMETER("output_blob", output_blob);
211 | ROS_GET_PARAMETER("mask_filter", mask_filter_str);
212 | ROS_GET_PARAMETER("overlay_filter", overlay_filter_str);
213 | ROS_GET_PARAMETER("overlay_alpha", overlay_alpha);
214 |
215 | overlay_filter = segNet::FilterModeFromStr(overlay_filter_str.c_str(), segNet::FILTER_LINEAR);
216 | mask_filter = segNet::FilterModeFromStr(mask_filter_str.c_str(), segNet::FILTER_LINEAR);
217 |
218 |
219 | /*
220 | * load segmentation network
221 | */
222 | if( model_path.size() > 0 )
223 | {
224 | // create network using custom model paths
225 | net = segNet::Create(prototxt_path.c_str(), model_path.c_str(),
226 | class_labels_path.c_str(), class_colors_path.c_str(),
227 | input_blob.c_str(), output_blob.c_str());
228 | }
229 | else
230 | {
231 | // create network using the built-in model
232 | net = segNet::Create(model_name.c_str());
233 | }
234 |
235 | if( !net )
236 | {
237 | ROS_ERROR("failed to load segNet model");
238 | return 0;
239 | }
240 |
241 | // set alpha blending value for classes that don't explicitly already have an alpha
242 | net->SetOverlayAlpha(overlay_alpha);
243 |
244 |
245 | /*
246 | * create the class labels parameter vector
247 | */
248 | std::hash model_hasher; // hash the model path to avoid collisions on the param server
249 | std::string model_hash_str = std::string(net->GetModelPath()) + std::string(net->GetClassPath());
250 | const size_t model_hash = model_hasher(model_hash_str);
251 |
252 | ROS_INFO("model hash => %zu", model_hash);
253 | ROS_INFO("hash string => %s", model_hash_str.c_str());
254 |
255 | // obtain the list of class descriptions
256 | std::vector class_descriptions;
257 | const uint32_t num_classes = net->GetNumClasses();
258 |
259 | for( uint32_t n=0; n < num_classes; n++ )
260 | {
261 | const char* label = net->GetClassDesc(n);
262 |
263 | if( label != NULL )
264 | class_descriptions.push_back(label);
265 | }
266 |
267 | // create the key on the param server
268 | std::string class_key = std::string("class_labels_") + std::to_string(model_hash);
269 |
270 | ROS_DECLARE_PARAMETER(class_key, class_descriptions);
271 | ROS_SET_PARAMETER(class_key, class_descriptions);
272 |
273 | // populate the vision info msg
274 | std::string node_namespace = ROS_GET_NAMESPACE();
275 | ROS_INFO("node namespace => %s", node_namespace.c_str());
276 |
277 | info_msg.database_location = node_namespace + std::string("/") + class_key;
278 | info_msg.database_version = 0;
279 | info_msg.method = net->GetModelPath();
280 |
281 | ROS_INFO("class labels => %s", info_msg.database_location.c_str());
282 |
283 |
284 | /*
285 | * create image converters
286 | */
287 | input_cvt = new imageConverter();
288 | overlay_cvt = new imageConverter();
289 | mask_color_cvt = new imageConverter();
290 | mask_class_cvt = new imageConverter();
291 |
292 | if( !input_cvt || !overlay_cvt || !mask_color_cvt || !mask_class_cvt )
293 | {
294 | ROS_ERROR("failed to create imageConverter objects");
295 | return 0;
296 | }
297 |
298 |
299 | /*
300 | * advertise publisher topics
301 | */
302 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "overlay", 2, overlay_pub);
303 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "color_mask", 2, mask_color_pub);
304 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "class_mask", 2, mask_class_pub);
305 |
306 | ROS_CREATE_PUBLISHER_STATUS(vision_msgs::VisionInfo, "vision_info", 1, info_callback, info_pub);
307 |
308 |
309 | /*
310 | * subscribe to image topic
311 | */
312 | auto img_sub = ROS_CREATE_SUBSCRIBER(sensor_msgs::Image, "image_in", 5, img_callback);
313 |
314 |
315 | /*
316 | * wait for messages
317 | */
318 | ROS_INFO("segnet node initialized, waiting for messages");
319 | ROS_SPIN();
320 |
321 |
322 | /*
323 | * free resources
324 | */
325 | delete net;
326 | delete input_cvt;
327 | delete overlay_cvt;
328 | delete mask_color_cvt;
329 | delete mask_class_cvt;
330 |
331 | return 0;
332 | }
333 |
334 |
--------------------------------------------------------------------------------
/src/node_video_output.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 | #include "image_converter.h"
25 |
26 | #include
27 |
28 |
29 |
30 | // globals
31 | videoOutput* stream = NULL;
32 | imageConverter* image_cvt = NULL;
33 |
34 | std::string topic_name;
35 |
36 |
37 | // input image subscriber callback
38 | void img_callback( const sensor_msgs::ImageConstPtr input )
39 | {
40 | // convert the image to reside on GPU
41 | if( !image_cvt || !image_cvt->Convert(input) )
42 | {
43 | ROS_INFO("failed to convert %ux%u %s image", input->width, input->height, input->encoding.c_str());
44 | return;
45 | }
46 |
47 | // render the image
48 | stream->Render(image_cvt->ImageGPU(), image_cvt->GetWidth(), image_cvt->GetHeight());
49 |
50 | // update status bar
51 | char str[256];
52 | sprintf(str, "%s (%ux%u) | %.1f FPS", topic_name.c_str(), image_cvt->GetWidth(), image_cvt->GetHeight(), stream->GetFrameRate());
53 | stream->SetStatus(str);
54 |
55 | // check for EOS
56 | if( !stream->IsStreaming() )
57 | ROS_SHUTDOWN();
58 | }
59 |
60 |
61 | // node main loop
62 | int main(int argc, char **argv)
63 | {
64 | /*
65 | * create node instance
66 | */
67 | ROS_CREATE_NODE("video_output");
68 |
69 |
70 | /*
71 | * declare parameters
72 | */
73 | videoOptions video_options;
74 |
75 | std::string resource_str;
76 | std::string codec_str;
77 |
78 | int video_bitrate = video_options.bitRate;
79 |
80 | ROS_DECLARE_PARAMETER("resource", resource_str);
81 | ROS_DECLARE_PARAMETER("codec", codec_str);
82 | ROS_DECLARE_PARAMETER("bitrate", video_bitrate);
83 |
84 | /*
85 | * retrieve parameters
86 | */
87 | ROS_GET_PARAMETER("resource", resource_str);
88 | ROS_GET_PARAMETER("codec", codec_str);
89 | ROS_GET_PARAMETER("bitrate", video_bitrate);
90 |
91 | if( resource_str.size() == 0 )
92 | {
93 | ROS_ERROR("resource param wasn't set - please set the node's resource parameter to the input device/filename/URL");
94 | return 0;
95 | }
96 |
97 | if( codec_str.size() != 0 )
98 | video_options.codec = videoOptions::CodecFromStr(codec_str.c_str());
99 |
100 | video_options.bitRate = video_bitrate;
101 |
102 | ROS_INFO("opening video output: %s", resource_str.c_str());
103 |
104 |
105 | /*
106 | * create stream
107 | */
108 | stream = videoOutput::Create(resource_str.c_str(), video_options);
109 |
110 | if( !stream )
111 | {
112 | ROS_ERROR("failed to open video output");
113 | return 0;
114 | }
115 |
116 |
117 | /*
118 | * create image converter
119 | */
120 | image_cvt = new imageConverter();
121 |
122 | if( !image_cvt )
123 | {
124 | ROS_ERROR("failed to create imageConverter");
125 | return 0;
126 | }
127 |
128 |
129 | /*
130 | * subscribe to image topic
131 | */
132 | auto img_sub = ROS_CREATE_SUBSCRIBER(sensor_msgs::Image, "image_in", 5, img_callback);
133 |
134 | topic_name = ROS_SUBSCRIBER_TOPIC(img_sub);
135 |
136 |
137 | /*
138 | * start streaming
139 | */
140 | if( !stream->Open() )
141 | {
142 | ROS_ERROR("failed to start streaming video source");
143 | return 0;
144 | }
145 |
146 |
147 | /*
148 | * start publishing video frames
149 | */
150 | ROS_INFO("video_output node initialized, waiting for messages");
151 | ROS_SPIN();
152 |
153 |
154 | /*
155 | * free resources
156 | */
157 | delete stream;
158 | delete image_cvt;
159 |
160 | return 0;
161 | }
162 |
163 |
--------------------------------------------------------------------------------
/src/node_video_source.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 | #include "image_converter.h"
25 |
26 | #include
27 |
28 |
29 |
30 | // globals
31 | videoSource* stream = NULL;
32 | imageConverter* image_cvt = NULL;
33 | Publisher image_pub = NULL;
34 |
35 |
36 | // aquire and publish camera frame
37 | bool aquireFrame()
38 | {
39 | imageConverter::PixelType* nextFrame = NULL;
40 |
41 | // get the latest frame
42 | if( !stream->Capture(&nextFrame, 1000) )
43 | {
44 | ROS_ERROR("failed to capture next frame");
45 | return false;
46 | }
47 |
48 | // assure correct image size
49 | if( !image_cvt->Resize(stream->GetWidth(), stream->GetHeight(), imageConverter::ROSOutputFormat) )
50 | {
51 | ROS_ERROR("failed to resize camera image converter");
52 | return false;
53 | }
54 |
55 | // populate the message
56 | sensor_msgs::Image msg;
57 |
58 | if( !image_cvt->Convert(msg, imageConverter::ROSOutputFormat, nextFrame) )
59 | {
60 | ROS_ERROR("failed to convert video stream frame to sensor_msgs::Image");
61 | return false;
62 | }
63 |
64 | // populate timestamp in header field
65 | msg.header.stamp = ROS_TIME_NOW();
66 |
67 | // publish the message
68 | image_pub->publish(msg);
69 | ROS_DEBUG("published %ux%u video frame", stream->GetWidth(), stream->GetHeight());
70 |
71 | return true;
72 | }
73 |
74 |
75 | // node main loop
76 | int main(int argc, char **argv)
77 | {
78 | /*
79 | * create node instance
80 | */
81 | ROS_CREATE_NODE("video_source");
82 |
83 | /*
84 | * declare parameters
85 | */
86 | videoOptions video_options;
87 |
88 | std::string resource_str;
89 | std::string codec_str;
90 | std::string flip_str;
91 |
92 | int video_width = video_options.width;
93 | int video_height = video_options.height;
94 | int latency = video_options.latency;
95 |
96 | ROS_DECLARE_PARAMETER("resource", resource_str);
97 | ROS_DECLARE_PARAMETER("codec", codec_str);
98 | ROS_DECLARE_PARAMETER("width", video_width);
99 | ROS_DECLARE_PARAMETER("height", video_height);
100 | ROS_DECLARE_PARAMETER("framerate", video_options.frameRate);
101 | ROS_DECLARE_PARAMETER("loop", video_options.loop);
102 | ROS_DECLARE_PARAMETER("flip", flip_str);
103 | ROS_DECLARE_PARAMETER("latency", latency);
104 |
105 | /*
106 | * retrieve parameters
107 | */
108 | ROS_GET_PARAMETER("resource", resource_str);
109 | ROS_GET_PARAMETER("codec", codec_str);
110 | ROS_GET_PARAMETER("width", video_width);
111 | ROS_GET_PARAMETER("height", video_height);
112 | ROS_GET_PARAMETER("framerate", video_options.frameRate);
113 | ROS_GET_PARAMETER("loop", video_options.loop);
114 | ROS_GET_PARAMETER("flip", flip_str);
115 | ROS_GET_PARAMETER("latency", latency);
116 |
117 | if( resource_str.size() == 0 )
118 | {
119 | ROS_ERROR("resource param wasn't set - please set the node's resource parameter to the input device/filename/URL");
120 | return 0;
121 | }
122 |
123 | if( codec_str.size() != 0 )
124 | video_options.codec = videoOptions::CodecFromStr(codec_str.c_str());
125 |
126 | if( flip_str.size() != 0 )
127 | video_options.flipMethod = videoOptions::FlipMethodFromStr(flip_str.c_str());
128 |
129 | video_options.width = video_width;
130 | video_options.height = video_height;
131 | video_options.latency = latency;
132 |
133 | ROS_INFO("opening video source: %s", resource_str.c_str());
134 |
135 | /*
136 | * open video source
137 | */
138 | stream = videoSource::Create(resource_str.c_str(), video_options);
139 |
140 | if( !stream )
141 | {
142 | ROS_ERROR("failed to open video source");
143 | return 0;
144 | }
145 |
146 |
147 | /*
148 | * create image converter
149 | */
150 | image_cvt = new imageConverter();
151 |
152 | if( !image_cvt )
153 | {
154 | ROS_ERROR("failed to create imageConverter");
155 | return 0;
156 | }
157 |
158 |
159 | /*
160 | * advertise publisher topics
161 | */
162 | ROS_CREATE_PUBLISHER(sensor_msgs::Image, "raw", 2, image_pub);
163 |
164 |
165 | /*
166 | * start the camera streaming
167 | */
168 | if( !stream->Open() )
169 | {
170 | ROS_ERROR("failed to start streaming video source");
171 | return 0;
172 | }
173 |
174 |
175 | /*
176 | * start publishing video frames
177 | */
178 | while( ROS_OK() )
179 | {
180 | if( !aquireFrame() )
181 | {
182 | if( !stream->IsStreaming() )
183 | {
184 | ROS_INFO("stream is closed or reached EOS, exiting node...");
185 | break;
186 | }
187 | }
188 |
189 | if( ROS_OK() )
190 | ROS_SPIN_ONCE();
191 | }
192 |
193 |
194 | /*
195 | * free resources
196 | */
197 | delete stream;
198 | delete image_cvt;
199 |
200 | return 0;
201 | }
202 |
203 |
--------------------------------------------------------------------------------
/src/nodelet_imagenet.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 | #include
6 |
7 | #include
8 | #include "image_converter.h"
9 |
10 | #include
11 | #include
12 |
13 | #include
14 | #include
15 |
16 | namespace ros_deep_learning{
17 |
18 | class ros_imagenet : public nodelet::Nodelet
19 | {
20 | public:
21 | ~ros_imagenet()
22 | {
23 | ROS_INFO("\nshutting down...\n");
24 | delete net;
25 | }
26 | void onInit()
27 | {
28 | // get a private nodehandle
29 | ros::NodeHandle& private_nh = getPrivateNodeHandle();
30 | // get parameters from server, checking for errors as it goes
31 | std::string prototxt_path, model_path, mean_binary_path, class_labels_path;
32 | if(! private_nh.getParam("prototxt_path", prototxt_path) )
33 | ROS_ERROR("unable to read prototxt_path for imagenet node");
34 | if(! private_nh.getParam("model_path", model_path) )
35 | ROS_ERROR("unable to read model_path for imagenet node");
36 | if(! private_nh.getParam("class_labels_path", class_labels_path) )
37 | ROS_ERROR("unable to read class_labels_path for imagenet node");
38 |
39 | // make sure files exist (and we can read them)
40 | if( access(prototxt_path.c_str(),R_OK) )
41 | ROS_ERROR("unable to read file \"%s\", check filename and permissions",prototxt_path.c_str());
42 | if( access(model_path.c_str(),R_OK) )
43 | ROS_ERROR("unable to read file \"%s\", check filename and permissions",model_path.c_str());
44 | if( access(class_labels_path.c_str(),R_OK) )
45 | ROS_ERROR("unable to read file \"%s\", check filename and permissions",class_labels_path.c_str());
46 |
47 | // create imageNet
48 | net = imageNet::Create(prototxt_path.c_str(),model_path.c_str(),NULL,class_labels_path.c_str());
49 | if( !net )
50 | {
51 | ROS_ERROR("imagenet: failed to initialize imageNet model");
52 | return;
53 | }
54 |
55 | // create image converter
56 | imgCvt = new imageConverter();
57 |
58 | if( !imgCvt )
59 | {
60 | ROS_ERROR("failed to create imageConverter object");
61 | return;
62 | }
63 |
64 | // setup image transport
65 | image_transport::ImageTransport it(private_nh);
66 | // subscriber for passing in images
67 | imsub = it.subscribe("imin", 10, &ros_imagenet::callback, this);
68 | // publisher for classifier output
69 | class_pub = private_nh.advertise("class",10);
70 | // publisher for human-readable classifier output
71 | class_str_pub = private_nh.advertise("class_str",10);
72 | }
73 |
74 |
75 | private:
76 | void callback(const sensor_msgs::ImageConstPtr& input)
77 | {
78 | // convert the image to reside on GPU
79 | if( !imgCvt || !imgCvt->Convert(input) )
80 | {
81 | ROS_INFO("failed to convert %ux%u %s image", input->width, input->height, input->encoding.c_str());
82 | return;
83 | }
84 |
85 | float confidence = 0.0f;
86 |
87 | // classify image
88 | const int img_class = net->Classify(imgCvt->ImageGPU(), imgCvt->GetWidth(), imgCvt->GetHeight(), &confidence);
89 |
90 | // publish class
91 | std_msgs::Int32 class_msg;
92 | class_msg.data = img_class;
93 | class_pub.publish(class_msg);
94 | // publish class string
95 | std_msgs::String class_msg_str;
96 | class_msg_str.data = net->GetClassDesc(img_class);
97 | class_str_pub.publish(class_msg_str);
98 |
99 | if( img_class < 0 )
100 | ROS_ERROR("imagenet: failed to classify (result=%i)", img_class);
101 | }
102 |
103 | // private variables
104 | image_transport::Subscriber imsub;
105 | imageConverter* imgCvt;
106 | ros::Publisher class_pub;
107 | ros::Publisher class_str_pub;
108 | imageNet* net;
109 | };
110 |
111 | #if ROS_VERSION_MINIMUM(1, 14, 0)
112 | PLUGINLIB_EXPORT_CLASS(ros_deep_learning::ros_imagenet, nodelet::Nodelet);
113 | #else
114 | PLUGINLIB_DECLARE_CLASS(ros_deep_learning, ros_imagenet, ros_deep_learning::ros_imagenet, nodelet::Nodelet);
115 | #endif
116 |
117 | } // namespace ros_deep_learning
118 |
--------------------------------------------------------------------------------
/src/ros_compat.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #include "ros_compat.h"
24 |
25 | #ifdef ROS2
26 |
27 | std::string __node_name_;
28 | rclcpp::Clock::SharedPtr __global_clock_;
29 |
30 | #endif
31 |
32 |
33 |
--------------------------------------------------------------------------------
/src/ros_compat.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
3 | *
4 | * Permission is hereby granted, free of charge, to any person obtaining a
5 | * copy of this software and associated documentation files (the "Software"),
6 | * to deal in the Software without restriction, including without limitation
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 | * and/or sell copies of the Software, and to permit persons to whom the
9 | * Software is furnished to do so, subject to the following conditions:
10 | *
11 | * The above copyright notice and this permission notice shall be included in
12 | * all copies or substantial portions of the Software.
13 | *
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 | * DEALINGS IN THE SOFTWARE.
21 | */
22 |
23 | #ifndef __ROS_DEEP_LEARNING_COMPATABILITY_H_
24 | #define __ROS_DEEP_LEARNING_COMPATABILITY_H_
25 |
26 | #define ROS_KINETIC 0
27 | #define ROS_MELODIC 1
28 | #define ROS_NOETIC 2
29 | #define ROS_DASHING 3
30 | #define ROS_ELOQUENT 4
31 | #define ROS_FOXY 5
32 | #define ROS_GALACTIC 6
33 | #define ROS_HUMBLE 7
34 | #define ROS_IRON 8
35 |
36 | #ifdef ROS1
37 |
38 | #include
39 | #include
40 | #include
41 | #include
42 | #include
43 | #include
44 |
45 | #define ROS_CREATE_NODE(name) \
46 | ros::init(argc, argv, name); \
47 | ros::NodeHandle nh; \
48 | ros::NodeHandle private_nh("~");
49 |
50 | template
51 | using Publisher = ros::Publisher*;
52 |
53 | #define ROS_CREATE_PUBLISHER(msg, topic, queue, ptr) ros::Publisher __publisher_##ptr = private_nh.advertise(topic, queue); ptr = &__publisher_##ptr
54 | #define ROS_CREATE_PUBLISHER_STATUS(msg, topic, queue, callback, ptr) ros::Publisher __publisher_##ptr = private_nh.advertise(topic, queue, [&](const ros::SingleSubscriberPublisher& connect_msg){callback();}); ptr = &__publisher_##ptr
55 |
56 | #define ROS_CREATE_SUBSCRIBER(msg, topic, queue, callback) private_nh.subscribe(topic, queue, callback)
57 | #define ROS_SUBSCRIBER_TOPIC(subscriber) subscriber.getTopic()
58 |
59 | #define ROS_NUM_SUBSCRIBERS(publisher) publisher->getNumSubscribers()
60 | #define ROS_GET_NAMESPACE() private_nh.getNamespace()
61 | #define ROS_GET_PARAMETER(name, val) private_nh.getParam(name, val)
62 | #define ROS_GET_PARAMETER_OR(name, val, alt) private_nh.param(name, val, alt)
63 | #define ROS_SET_PARAMETER(name, val) private_nh.setParam(name, val)
64 |
65 | template static void __ros_declare_parameter( ros::NodeHandle& nh, const std::string& name, const T& default_value )
66 | {
67 | if( !nh.hasParam(name) )
68 | nh.setParam(name, default_value);
69 | }
70 |
71 | #define ROS_DECLARE_PARAMETER(name, default_value) __ros_declare_parameter(private_nh, name, default_value)
72 |
73 | #define ROS_TIME_NOW() ros::Time::now()
74 | #define ROS_SPIN() ros::spin()
75 | #define ROS_SPIN_ONCE() ros::spinOnce()
76 | #define ROS_OK() ros::ok()
77 | #define ROS_SHUTDOWN() ros::shutdown()
78 |
79 | #elif ROS2
80 |
81 | #include
82 | #include
83 | #include
84 |
85 | #if ROS_DISTRO >= ROS_GALACTIC
86 | #include
87 | #else
88 | #include
89 | #endif
90 |
91 | #include
92 | #include
93 |
94 | namespace vision_msgs
95 | {
96 | #if ROS_DISTRO >= ROS_GALACTIC
97 | typedef msg::Classification Classification2D;
98 | #else
99 | typedef msg::Classification2D Classification2D;
100 | #endif
101 | typedef msg::Detection2D Detection2D;
102 | typedef msg::Detection2DArray Detection2DArray;
103 | typedef msg::ObjectHypothesis ObjectHypothesis;
104 | typedef msg::ObjectHypothesisWithPose ObjectHypothesisWithPose;
105 | typedef msg::VisionInfo VisionInfo;
106 | }
107 |
108 | namespace sensor_msgs
109 | {
110 | typedef msg::Image Image;
111 | typedef msg::Image::SharedPtr ImagePtr;
112 | typedef msg::Image::ConstSharedPtr ImageConstPtr;
113 | }
114 |
115 | namespace ros = rclcpp;
116 |
117 | extern std::string __node_name_;
118 |
119 | #define ROS_INFO(...) RCUTILS_LOG_INFO_NAMED(__node_name_.c_str(), __VA_ARGS__)
120 | #define ROS_DEBUG(...) RCUTILS_LOG_DEBUG_NAMED(__node_name_.c_str(), __VA_ARGS__)
121 | #define ROS_ERROR(...) RCUTILS_LOG_ERROR_NAMED(__node_name_.c_str(), __VA_ARGS__)
122 |
123 | #define ROS_CREATE_NODE(name) \
124 | rclcpp::init(argc, argv); \
125 | auto node = rclcpp::Node::make_shared(name, "/" name); \
126 | __node_name_ = name; \
127 | __global_clock_ = std::make_shared(RCL_ROS_TIME);
128 |
129 | template
130 | using Publisher = std::shared_ptr>;
131 |
132 | #define ROS_CREATE_PUBLISHER(msg, topic, queue, ptr) ptr = node->create_publisher(topic, queue)
133 | #define ROS_CREATE_PUBLISHER_STATUS(msg, topic, queue, callback, ptr) ptr = node->create_publisher(topic, queue); rclcpp::TimerBase::SharedPtr __timer_publisher_##ptr = node->create_wall_timer(std::chrono::milliseconds(500), \
134 | [&](){ static int __subscribers_##ptr=0; const size_t __subscription_count=ptr->get_subscription_count(); if(__subscribers_##ptr != __subscription_count) { if(__subscription_count > __subscribers_##ptr) callback(); __subscribers_##ptr=__subscription_count; }})
135 |
136 | #define ROS_CREATE_SUBSCRIBER(msg, topic, queue, callback) node->create_subscription(topic, queue, callback)
137 | #define ROS_SUBSCRIBER_TOPIC(subscriber) subscriber->get_topic_name();
138 |
139 | #define ROS_NUM_SUBSCRIBERS(publisher) publisher->get_subscription_count()
140 | #define ROS_GET_NAMESPACE() node->get_namespace()
141 | #define ROS_GET_PARAMETER(name, val) node->get_parameter(name, val)
142 | #define ROS_GET_PARAMETER_OR(name, val, alt) node->get_parameter_or(name, val, alt) // TODO set undefined params in param server
143 | #define ROS_SET_PARAMETER(name, val) node->set_parameter(rclcpp::Parameter(name, val))
144 |
145 | #define ROS_DECLARE_PARAMETER(name, default_value) node->declare_parameter(name, default_value)
146 |
147 | extern rclcpp::Clock::SharedPtr __global_clock_;
148 |
149 | #define ROS_TIME_NOW() __global_clock_->now()
150 | #define ROS_SPIN() rclcpp::spin(node)
151 | #define ROS_SPIN_ONCE() rclcpp::spin_some(node)
152 | #define ROS_OK() rclcpp::ok()
153 | #define ROS_SHUTDOWN() rclcpp::shutdown()
154 |
155 | #endif
156 | #endif
157 |
158 |
--------------------------------------------------------------------------------