├── .gitignore
├── CMakeLists.txt
├── LICENSE
├── README.md
├── cfg
├── depth_completion.rviz
├── depth_completion_params.cfg
├── stereo_vision.rviz
└── stereo_vision_params.cfg
├── docs
├── images
│ ├── depth_completion.png
│ └── stereo_pointcloud.png
├── index.md
└── videos
│ ├── depth_completion.gif
│ └── stereo_vision.gif
├── include
└── sensor_processing
│ ├── depth_completion.h
│ └── stereo_vision.h
├── launch
├── depth_completion_demo.launch
├── stereo_vision_demo.launch
└── tools.launch
├── mkdocs.yml
├── package.xml
├── src
├── depth_completion.cpp
├── depth_completion.py
├── depth_map_utils.py
├── nodes
│ ├── depth_completion_node.cpp
│ └── stereo_vision_node.cpp
├── pystereo.py
└── stereo_vision.cpp
├── statick_config
└── rsc
│ ├── _clang-format
│ ├── cccc.opt
│ ├── config.yaml
│ ├── exceptions.yaml
│ └── profile.yaml
└── test
├── test_node_example_listener.cpp
├── test_node_example_listener.test
├── test_node_example_talker.cpp
└── test_node_example_talker.test
/.gitignore:
--------------------------------------------------------------------------------
1 | *.cmake
2 | *.bin
3 | *.pyc
4 | bin
5 | build
6 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(sensor_processing)
3 |
4 | find_package(catkin REQUIRED
5 | dynamic_reconfigure
6 | message_generation
7 | rosgraph_msgs
8 | rospy
9 | roscpp
10 | std_msgs
11 | message_filters
12 | image_geometry
13 | cv_bridge
14 | pcl_ros
15 | )
16 |
17 | find_package( OpenCV REQUIRED )
18 | include_directories( ${OpenCV_INCLUDE_DIRS} )
19 |
20 | generate_dynamic_reconfigure_options(
21 | cfg/stereo_vision_params.cfg
22 | cfg/depth_completion_params.cfg
23 | )
24 |
25 | catkin_package(
26 | CATKIN_DEPENDS dynamic_reconfigure message_runtime rosgraph_msgs rospy roscpp std_msgs image_geometry
27 | )
28 |
29 | include_directories(include ${catkin_INCLUDE_DIRS})
30 |
31 | add_executable(stereo_vision src/nodes/stereo_vision_node.cpp src/stereo_vision.cpp)
32 | target_link_libraries(stereo_vision ${catkin_LIBRARIES})
33 | add_dependencies(stereo_vision stereo_image_gencfg)
34 |
35 | add_executable(depth_completion src/nodes/depth_completion_node.cpp src/depth_completion.cpp)
36 | target_link_libraries(depth_completion ${catkin_LIBRARIES})
37 | add_dependencies(depth_completion depth_completion_gencfg)
38 |
39 | if(CATKIN_ENABLE_TESTING)
40 | find_package(rostest REQUIRED)
41 |
42 | endif()
43 |
44 |
45 | install(DIRECTORY include/${PROJECT_NAME}/
46 | DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
47 | FILES_MATCHING PATTERN "*.h"
48 | )
49 |
50 | install(PROGRAMS src/pystereo.py DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
51 |
52 | install(TARGETS stereo_vision
53 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
54 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
55 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
56 | )
57 |
58 | install(DIRECTORY launch
59 | DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
60 | )
61 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014, Thomas Denewiler
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | * Neither the name of Thomas Denewiler nor the names of its
15 | contributors may be used to endorse or promote products derived from
16 | this software without specific prior written permission.
17 |
18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # SASensorProcessing
2 |
3 | ROS node to create pointcloud out of stereo images and complete sparse pointclouds from the KITTI Vision Benchmark Suite
4 |
5 | ## Demos
6 | ### Stereo Vision
7 |
8 |
9 |
10 |
11 |
12 | ### Depth Completion
13 |
14 |
15 |
16 |
17 |
18 |
19 | ## Run
20 |
21 | ```
22 | roslaunch sensor_processing stereo_vision.launch home_dir:=YOUR_HOME_DIRECTORY scenario:=0060
23 | roslaunch sensor_processing depth_completion.launch home_dir:=YOUR_HOME_DIRECTORY scenario:=0060
24 | ```
25 |
26 | ## Background & Set up
27 |
28 | This is a side project of my main project: https://github.com/appinho/SARosPerceptionKitti
29 | Orientate yourself there to set up the project and acquire the data.
30 |
31 | To run it use the following commands:
32 |
33 | ```
34 | cd ~/catkin_ws/src
35 | git clone https://github.com/appinho/SASensorProcessing.git
36 | cd ~/catkin_ws
37 | catkin_make
38 | source devel/setup.bash
39 | ```
40 |
41 | ## Sources
42 |
43 | * [ROS Image processing wiki](http://wiki.ros.org/stereo_image_proc)
44 | * [Depth completion paper](https://arxiv.org/pdf/1802.00036v1.pdf)
45 |
--------------------------------------------------------------------------------
/cfg/depth_completion.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 0
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /Global Options1
8 | - /Status1
9 | - /PointCloud22
10 | Splitter Ratio: 0.5
11 | Tree Height: 307
12 | - Class: rviz/Selection
13 | Name: Selection
14 | - Class: rviz/Tool Properties
15 | Expanded:
16 | - /2D Pose Estimate1
17 | - /2D Nav Goal1
18 | - /Publish Point1
19 | Name: Tool Properties
20 | Splitter Ratio: 0.588679016
21 | - Class: rviz/Views
22 | Expanded:
23 | - /Current View1
24 | Name: Views
25 | Splitter Ratio: 0.5
26 | - Class: rviz/Time
27 | Experimental: false
28 | Name: Time
29 | SyncMode: 0
30 | SyncSource: Image
31 | Toolbars:
32 | toolButtonStyle: 2
33 | Visualization Manager:
34 | Class: ""
35 | Displays:
36 | - Alpha: 0.5
37 | Cell Size: 1
38 | Class: rviz/Grid
39 | Color: 160; 160; 164
40 | Enabled: false
41 | Line Style:
42 | Line Width: 0.0299999993
43 | Value: Lines
44 | Name: Grid
45 | Normal Cell Count: 0
46 | Offset:
47 | X: 0
48 | Y: 0
49 | Z: 0
50 | Plane: XY
51 | Plane Cell Count: 10
52 | Reference Frame:
53 | Value: false
54 | - Class: rviz/Image
55 | Enabled: true
56 | Image Topic: /kitti/camera_color_left/image_raw
57 | Max Value: 1
58 | Median window: 5
59 | Min Value: 0
60 | Name: Image
61 | Normalize Range: true
62 | Queue Size: 2
63 | Transport Hint: raw
64 | Unreliable: false
65 | Value: true
66 | - Class: rviz/Image
67 | Enabled: true
68 | Image Topic: /kitti/depth_image
69 | Max Value: 1
70 | Median window: 5
71 | Min Value: 0
72 | Name: Image
73 | Normalize Range: true
74 | Queue Size: 2
75 | Transport Hint: raw
76 | Unreliable: false
77 | Value: true
78 | - Alpha: 1
79 | Autocompute Intensity Bounds: true
80 | Autocompute Value Bounds:
81 | Max Value: 10
82 | Min Value: -10
83 | Value: true
84 | Axis: Z
85 | Channel Name: intensity
86 | Class: rviz/PointCloud2
87 | Color: 255; 120; 0
88 | Color Transformer: FlatColor
89 | Decay Time: 0
90 | Enabled: false
91 | Invert Rainbow: false
92 | Max Color: 255; 255; 255
93 | Max Intensity: 4096
94 | Min Color: 0; 0; 0
95 | Min Intensity: 0
96 | Name: PointCloud2
97 | Position Transformer: XYZ
98 | Queue Size: 10
99 | Selectable: true
100 | Size (Pixels): 3
101 | Size (m): 0.100000001
102 | Style: Points
103 | Topic: /kitti/velo/pointcloud
104 | Unreliable: false
105 | Use Fixed Frame: true
106 | Use rainbow: true
107 | Value: false
108 | - Alpha: 1
109 | Autocompute Intensity Bounds: true
110 | Autocompute Value Bounds:
111 | Max Value: 10
112 | Min Value: -10
113 | Value: true
114 | Axis: Z
115 | Channel Name: intensity
116 | Class: rviz/PointCloud2
117 | Color: 255; 255; 0
118 | Color Transformer: RGB8
119 | Decay Time: 0
120 | Enabled: true
121 | Invert Rainbow: false
122 | Max Color: 255; 255; 255
123 | Max Intensity: 4096
124 | Min Color: 0; 0; 0
125 | Min Intensity: 0
126 | Name: PointCloud2
127 | Position Transformer: XYZ
128 | Queue Size: 10
129 | Selectable: true
130 | Size (Pixels): 2
131 | Size (m): 0.00999999978
132 | Style: Points
133 | Topic: /kitti/completed_pointcloud
134 | Unreliable: false
135 | Use Fixed Frame: true
136 | Use rainbow: true
137 | Value: true
138 | - Class: rviz/Image
139 | Enabled: true
140 | Image Topic: /kitti/completed_image
141 | Max Value: 1
142 | Median window: 5
143 | Min Value: 0
144 | Name: Image
145 | Normalize Range: true
146 | Queue Size: 2
147 | Transport Hint: raw
148 | Unreliable: false
149 | Value: true
150 | Enabled: true
151 | Global Options:
152 | Background Color: 48; 48; 48
153 | Default Light: true
154 | Fixed Frame: velo_link
155 | Frame Rate: 30
156 | Name: root
157 | Tools:
158 | - Class: rviz/Interact
159 | Hide Inactive Objects: true
160 | - Class: rviz/MoveCamera
161 | - Class: rviz/Select
162 | - Class: rviz/FocusCamera
163 | - Class: rviz/Measure
164 | - Class: rviz/SetInitialPose
165 | Topic: /initialpose
166 | - Class: rviz/SetGoal
167 | Topic: /move_base_simple/goal
168 | - Class: rviz/PublishPoint
169 | Single click: true
170 | Topic: /clicked_point
171 | Value: true
172 | Views:
173 | Current:
174 | Class: rviz/Orbit
175 | Distance: 36.0129204
176 | Enable Stereo Rendering:
177 | Stereo Eye Separation: 0.0599999987
178 | Stereo Focal Distance: 1
179 | Swap Stereo Eyes: false
180 | Value: false
181 | Focal Point:
182 | X: 27.7509384
183 | Y: -3.01130486
184 | Z: -1.52654934
185 | Focal Shape Fixed Size: true
186 | Focal Shape Size: 0.0500000007
187 | Invert Z Axis: false
188 | Name: Current View
189 | Near Clip Distance: 0.00999999978
190 | Pitch: 0.124796875
191 | Target Frame:
192 | Value: Orbit (rviz)
193 | Yaw: 3.05930853
194 | Saved: ~
195 | Window Geometry:
196 | Displays:
197 | collapsed: false
198 | Height: 1407
199 | Hide Left Dock: false
200 | Hide Right Dock: true
201 | Image:
202 | collapsed: false
203 | QMainWindow State: 000000ff00000000fd000000040000000000000343000004dcfc020000000ffb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006600fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003100000181000000e400fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000001ba000001160000001b00fffffffb0000000a0049006d00610067006501000002d8000001180000001b00fffffffb0000000a0049006d00610067006501000003f8000001150000001b00fffffffb0000000a0049006d00610067006501000001af000000e00000000000000000fb0000000a0049006d0061006700650100000295000000b40000000000000000fb0000000a0049006d006100670065010000034f0000006f0000000000000000fb0000000a0049006d00610067006501000003a30000016a0000000000000000000000010000010f00000396fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002800000396000000c200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000009ba00000046fc0100000002fb0000000800540069006d00650100000000000009ba000003de00fffffffb0000000800540069006d006501000000000000045000000000000000000000066f000004dc00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
204 | Selection:
205 | collapsed: false
206 | Time:
207 | collapsed: false
208 | Tool Properties:
209 | collapsed: false
210 | Views:
211 | collapsed: true
212 | Width: 2490
213 | X: 70
214 | Y: 33
215 |
--------------------------------------------------------------------------------
/cfg/depth_completion_params.cfg:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | PACKAGE='sensor_processing'
4 | import roslib
5 | roslib.load_manifest(PACKAGE)
6 |
7 | from dynamic_reconfigure.parameter_generator_catkin import *
8 |
9 | gen = ParameterGenerator()
10 | # Name Type Level Description Default Min Max
11 | gen.add("enable", bool_t, 0, "Enable depth completion", True)
12 | gen.add("diamondKernelSize", int_t, 0, "Size for diamond kernel", 2, 0, 5)
13 | gen.add("fullKernelSize", int_t, 0, "Size for full kernel", 1, 0, 5)
14 | gen.add("closureKernelSize", int_t, 0, "Size for closure kernel", 1, 0, 5)
15 | gen.add("fillKernelSize", int_t, 0, "Size for fill kernel", 1, 0, 5)
16 | gen.add("medianKernelSize", int_t, 0, "Size for median kernel", 2, 0, 2)
17 |
18 | size_enum = gen.enum([ gen.const("None", int_t, 0, "No blurring"),
19 | gen.const("Gaussian", int_t, 1, "Gaussian blurring"),
20 | gen.const("Bilateral", int_t, 2, "Bilateral Filtering")],
21 | "Different blurring options")
22 | gen.add("blurMethod", int_t, 0, "Blurring method", 2, 0, 2, edit_method=size_enum)
23 | gen.add("blurKernelSize", int_t, 0, "Size for blur kernel", 1, 0, 5)
24 | gen.add("bilateralSigma", double_t, 0,"Sigma of bilateral filter", 4.5, 0,50.0)
25 |
26 | exit(gen.generate(PACKAGE, "sensor_processing", "DepthCompletionParams"))
--------------------------------------------------------------------------------
/cfg/stereo_vision.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 0
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /Global Options1
8 | - /Status1
9 | - /PointCloud21
10 | - /PointCloud22
11 | Splitter Ratio: 0.5
12 | Tree Height: 644
13 | - Class: rviz/Selection
14 | Name: Selection
15 | - Class: rviz/Tool Properties
16 | Expanded:
17 | - /2D Pose Estimate1
18 | - /2D Nav Goal1
19 | - /Publish Point1
20 | Name: Tool Properties
21 | Splitter Ratio: 0.588679016
22 | - Class: rviz/Views
23 | Expanded:
24 | - /Current View1
25 | Name: Views
26 | Splitter Ratio: 0.5
27 | - Class: rviz/Time
28 | Experimental: false
29 | Name: Time
30 | SyncMode: 0
31 | SyncSource: Image
32 | Toolbars:
33 | toolButtonStyle: 2
34 | Visualization Manager:
35 | Class: ""
36 | Displays:
37 | - Alpha: 0.5
38 | Cell Size: 1
39 | Class: rviz/Grid
40 | Color: 160; 160; 164
41 | Enabled: true
42 | Line Style:
43 | Line Width: 0.0299999993
44 | Value: Lines
45 | Name: Grid
46 | Normal Cell Count: 0
47 | Offset:
48 | X: 0
49 | Y: 0
50 | Z: 0
51 | Plane: XY
52 | Plane Cell Count: 10
53 | Reference Frame:
54 | Value: true
55 | - Class: rviz/Image
56 | Enabled: true
57 | Image Topic: /kitti/camera_color_left/image_raw
58 | Max Value: 1
59 | Median window: 5
60 | Min Value: 0
61 | Name: Image
62 | Normalize Range: true
63 | Queue Size: 2
64 | Transport Hint: raw
65 | Unreliable: false
66 | Value: true
67 | - Class: rviz/Image
68 | Enabled: false
69 | Image Topic: /kitti/disparity_image
70 | Max Value: 1
71 | Median window: 5
72 | Min Value: 0
73 | Name: Image
74 | Normalize Range: true
75 | Queue Size: 2
76 | Transport Hint: raw
77 | Unreliable: false
78 | Value: false
79 | - Class: rviz/Image
80 | Enabled: false
81 | Image Topic: /kitti/depth_image
82 | Max Value: 1
83 | Median window: 5
84 | Min Value: 0
85 | Name: Image
86 | Normalize Range: true
87 | Queue Size: 2
88 | Transport Hint: raw
89 | Unreliable: false
90 | Value: false
91 | - Alpha: 1
92 | Autocompute Intensity Bounds: true
93 | Autocompute Value Bounds:
94 | Max Value: 10
95 | Min Value: -10
96 | Value: true
97 | Axis: Z
98 | Channel Name: intensity
99 | Class: rviz/PointCloud2
100 | Color: 255; 0; 255
101 | Color Transformer: FlatColor
102 | Decay Time: 0
103 | Enabled: true
104 | Invert Rainbow: false
105 | Max Color: 255; 255; 255
106 | Max Intensity: 4096
107 | Min Color: 0; 0; 0
108 | Min Intensity: 0
109 | Name: PointCloud2
110 | Position Transformer: XYZ
111 | Queue Size: 10
112 | Selectable: true
113 | Size (Pixels): 3
114 | Size (m): 0.100000001
115 | Style: Points
116 | Topic: /kitti/velo/pointcloud
117 | Unreliable: false
118 | Use Fixed Frame: true
119 | Use rainbow: true
120 | Value: true
121 | - Alpha: 1
122 | Autocompute Intensity Bounds: true
123 | Autocompute Value Bounds:
124 | Max Value: 10
125 | Min Value: -10
126 | Value: true
127 | Axis: Z
128 | Channel Name: intensity
129 | Class: rviz/PointCloud2
130 | Color: 255; 255; 255
131 | Color Transformer: MONO8
132 | Decay Time: 0
133 | Enabled: true
134 | Invert Rainbow: false
135 | Max Color: 255; 255; 255
136 | Max Intensity: 4096
137 | Min Color: 0; 0; 0
138 | Min Intensity: 0
139 | Name: PointCloud2
140 | Position Transformer: XYZ
141 | Queue Size: 10
142 | Selectable: true
143 | Size (Pixels): 2
144 | Size (m): 0.00999999978
145 | Style: Points
146 | Topic: /kitti/stereo_pointcloud
147 | Unreliable: false
148 | Use Fixed Frame: true
149 | Use rainbow: true
150 | Value: true
151 | Enabled: true
152 | Global Options:
153 | Background Color: 48; 48; 48
154 | Default Light: true
155 | Fixed Frame: velo_link
156 | Frame Rate: 30
157 | Name: root
158 | Tools:
159 | - Class: rviz/Interact
160 | Hide Inactive Objects: true
161 | - Class: rviz/MoveCamera
162 | - Class: rviz/Select
163 | - Class: rviz/FocusCamera
164 | - Class: rviz/Measure
165 | - Class: rviz/SetInitialPose
166 | Topic: /initialpose
167 | - Class: rviz/SetGoal
168 | Topic: /move_base_simple/goal
169 | - Class: rviz/PublishPoint
170 | Single click: true
171 | Topic: /clicked_point
172 | Value: true
173 | Views:
174 | Current:
175 | Class: rviz/Orbit
176 | Distance: 90.7981415
177 | Enable Stereo Rendering:
178 | Stereo Eye Separation: 0.0599999987
179 | Stereo Focal Distance: 1
180 | Swap Stereo Eyes: false
181 | Value: false
182 | Focal Point:
183 | X: 47.5785599
184 | Y: 2.81667185
185 | Z: 2.89683628
186 | Focal Shape Fixed Size: true
187 | Focal Shape Size: 0.0500000007
188 | Invert Z Axis: false
189 | Name: Current View
190 | Near Clip Distance: 0.00999999978
191 | Pitch: 0.0847872719
192 | Target Frame:
193 | Value: Orbit (rviz)
194 | Yaw: 3.10554981
195 | Saved: ~
196 | Window Geometry:
197 | Displays:
198 | collapsed: false
199 | Height: 1407
200 | Hide Left Dock: false
201 | Hide Right Dock: true
202 | Image:
203 | collapsed: false
204 | QMainWindow State: 000000ff00000000fd0000000400000000000003f2000004dcfc020000000efb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006600fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730100000031000002d2000000e400fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d006100670065000000027e000000ea0000001b00fffffffb0000000a0049006d0061006700650000000234000001680000001b00fffffffb0000000a0049006d006100670065010000030b000002020000001b00fffffffb0000000a0049006d00610067006501000001af000000e00000000000000000fb0000000a0049006d0061006700650100000295000000b40000000000000000fb0000000a0049006d006100670065010000034f0000006f0000000000000000000000010000010f00000396fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000002800000396000000c200fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000009ba00000046fc0100000002fb0000000800540069006d00650100000000000009ba000003de00fffffffb0000000800540069006d00650100000000000004500000000000000000000005c0000004dc00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
205 | Selection:
206 | collapsed: false
207 | Time:
208 | collapsed: false
209 | Tool Properties:
210 | collapsed: false
211 | Views:
212 | collapsed: true
213 | Width: 2490
214 | X: 70
215 | Y: 33
216 |
--------------------------------------------------------------------------------
/cfg/stereo_vision_params.cfg:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 |
3 | PACKAGE='sensor_processing'
4 | import roslib
5 | roslib.load_manifest(PACKAGE)
6 |
7 | from dynamic_reconfigure.parameter_generator_catkin import *
8 |
9 | gen = ParameterGenerator()
10 | # Name Type Level Description Default Min Max
11 | gen.add("numDisparities", int_t, 0, "Number of Disparity", 3, 1, 5)
12 | gen.add("blockSize", int_t, 0, "Block Size", 12, 0, 125)
13 |
14 | exit(gen.generate(PACKAGE, "sensor_processing", "StereoVisionParams"))
15 |
--------------------------------------------------------------------------------
/docs/images/depth_completion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appinho/SASensorProcessing/acba5b23bf2080f3eb65f459339aa04ad27b6f14/docs/images/depth_completion.png
--------------------------------------------------------------------------------
/docs/images/stereo_pointcloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appinho/SASensorProcessing/acba5b23bf2080f3eb65f459339aa04ad27b6f14/docs/images/stereo_pointcloud.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Node Example
2 |
3 | [ROS](http://ros.org) allows for creating nodes that communicate with each other.
4 | It is very common to use C++ and Python to write these nodes.
5 |
6 | This package contains example nodes written in C++ and Python that show minimal examples of using
7 | some very basic but powerful features of ROS.
8 | Those features include:
9 |
10 | * [parameter server](http://wiki.ros.org/Parameter%20Server)
11 | * [dynamic reconfigure](http://wiki.ros.org/dynamic_reconfigure/Tutorials)
12 | * [timers](http://wiki.ros.org/roscpp/Overview/Timers)
13 | * [custom messages](http://wiki.ros.org/ROS/Tutorials/DefiningCustomMessages)
14 | * classes with callback functions for
15 | [publishers and subscribers](http://wiki.ros.org/roscpp/Overview/Publishers%20and%20Subscribers)
16 | * [remap](http://wiki.ros.org/roslaunch/XML/remap) topic names
17 |
18 | More ideas that are explored are deploying documentation using [GitHub Pages](https://pages.github.com/),
19 | writing unit tests, and checking build status and code coverage.
20 |
21 | ## Description
22 |
23 | There are several launch files included, the main one being `node_example.launch`.
24 | This will start a talker and listener written in C++ and a talker and listener written in Python.
25 | One GUI will open allowing you to see what messages are being recieved by the listeners and another GUI will allow
26 | you to change the values sent from each talker.
27 | Both listener nodes receive messages from both talkers, showing that the languages used to write the talkers and
28 | listeners can be mixed.
29 |
30 | ## Usage
31 |
32 | [Build a workspace](http://wiki.ros.org/catkin/Tutorials/create_a_workspace) containing this repository.
33 | A `node_example.rosinstall` file has been included for convenience with [`wstool`](http://wiki.ros.org/wstool).
34 |
35 | To start all the nodes run
36 |
37 | roslaunch node_example node_example.launch
38 |
39 | You should see two windows open: `rqt_reconfigure` and `rqt_console`.
40 | They will look like the following screenshots.
41 |
42 | 
43 |
44 | 
45 |
46 | At this point you can modify the strings or numbers in the reconfigure GUI and you should see those changes show
47 | up in the console GUI.
48 | There are `enable` parameters in each of the talker nodes so that the nodes can effectively be paused during runtime.
49 | This is a nice feature that allows easily turning system components on and off during operation for whatever reason
50 | (such as wanting to run multiple similar nodes side-by-side for comparison without using too many CPU/RAM resources,
51 | only running certain nodes when some conditions are met, etc.).
52 |
53 | ## Branches
54 |
55 | The `master` branch will try to keep up with the latest long-term support release version of ROS (currently Kinetic).
56 | The `hydro-dev` branch was tested on ROS Hydro, Indigo, and Kinetic.
57 | The `fuerte-dev` branch was tested on ROS Fuerte.
58 |
59 | ## Testing
60 |
61 | During development there are large benefits to employing unit tests to verify that code changes do not break existing functionality.
62 | This package contains unit tests for each of the C++ nodes.
63 | The unit tests are run using the `*.test` files in the `test/` directory.
64 | The `*.test` files start the node to be tested plus the unit test code.
65 | The unit test code is written such that it publishes and subscribes to the topics that correspond to the interfaces of the node under test.
66 | Callbacks are used to verify that the expected data is available on the specified topics.
67 |
68 | There are several methods of running the unit tests.
69 | Running the tests with continuous integration services for pull requests is a common method used to ensure pull requests can be safely merged.
70 | A popular continuous integration provider for open source projects is [Travis CI](https://travis-ci.org).
71 | The build and test results for this package can be found in the table at the top of this page.
72 |
73 | Unit tests are not magic bullets.
74 | The inputs to the nodes must take on enough values to verify that functions return valid values.
75 | This will be different for each function and is not fully covered here.
76 | Another aspect of unit tests is to ensure that all lines of code are exercised by unit tests, also referred to as code coverage.
77 |
78 | A popular code coverage provider for open source projects is [codecov](https://codecov.io).
79 | The code coverage results for this package can be found in the table at the top of this page.
80 | This tool provides some measure of confidence that the existing unit tests will catch any issues, and that new changes are introduced with unit test code.
81 |
82 | The configuration file for Travis is in this repository at [.travis.yml](.travis.yml).
83 | That file contains build flags to ensure that unit tests run and that code coverage results can be calculated.
84 |
--------------------------------------------------------------------------------
/docs/videos/depth_completion.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appinho/SASensorProcessing/acba5b23bf2080f3eb65f459339aa04ad27b6f14/docs/videos/depth_completion.gif
--------------------------------------------------------------------------------
/docs/videos/stereo_vision.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/appinho/SASensorProcessing/acba5b23bf2080f3eb65f459339aa04ad27b6f14/docs/videos/stereo_vision.gif
--------------------------------------------------------------------------------
/include/sensor_processing/depth_completion.h:
--------------------------------------------------------------------------------
1 | #ifndef DEPTHCOMPLETION_H
2 | #define DEPTHCOMPLETION_H
3 |
4 | #include "sensor_processing/DepthCompletionParamsConfig.h"
5 |
6 | #include
7 | #include
8 |
9 | #include
10 | #include
11 |
12 | #include
13 | #include
14 | #include
15 |
16 | #include
17 | #include
18 | #include
19 |
20 | namespace sensor_processing
21 | {
22 |
23 | using namespace sensor_msgs;
24 | using namespace message_filters::sync_policies;
25 |
26 | class DepthCompletion
27 | {
28 | public:
29 | //! Constructor.
30 | explicit DepthCompletion(ros::NodeHandle nh);
31 |
32 | private:
33 |
34 | void callback(const PointCloud2ConstPtr& pc_msg,
35 | const CameraInfoConstPtr& l_info_msg,
36 | const ImageConstPtr& l_image_msg);
37 | void pointCloudToDepthImage(
38 | const PointCloud2ConstPtr& pc,
39 | const CameraInfoConstPtr& cam_info,
40 | cv::Mat& depth_image);
41 | void processDepthCompletion(
42 | const CameraInfoConstPtr& cam_info,
43 | const cv::Mat depth_image,
44 | cv::Mat & depth_completion_image);
45 | void depthImageToRGBPointCloud(
46 | const cv::Mat depth_image,
47 | const ImageConstPtr& image_msg,
48 | const CameraInfoConstPtr& cam_info,
49 | PointCloud2 & pc);
50 |
51 | bool inImage(const CameraInfoConstPtr& cam_info, const int u, const int v);
52 | void depthToCV8UC1(const cv::Mat& float_img, cv::Mat& mono8_img);
53 |
54 | void configCallback(sensor_processing::DepthCompletionParamsConfig &config, uint32_t level);
55 | int convertKernelSize(const int full_kernel_size);
56 |
57 |
58 | ros::NodeHandle nh_;
59 |
60 | message_filters::Subscriber sub_pointcloud_;
61 | message_filters::Subscriber sub_left_color_camera_info_;
62 | message_filters::Subscriber sub_left_color_image_;
63 | typedef ExactTime ExactPolicy;
64 | typedef message_filters::Synchronizer ExactSync;
65 | ExactSync exact_sync_;
66 |
67 | ros::Publisher pub_depth_image_;
68 | ros::Publisher pub_completion_image_;
69 | ros::Publisher pub_completed_pointcloud_;
70 |
71 | dynamic_reconfigure::Server dr_srv_;
72 |
73 | tf::TransformListener listener_;
74 |
75 | bool enable_;
76 | int diamond_kernel_size_;
77 | int full_kernel_size_;
78 | int closure_kernel_size_;
79 | int fill_kernel_size_;
80 | int median_kernel_size_;
81 | int blur_method_;
82 | int blur_kernel_size_;
83 | double bilateral_sigma_;
84 | };
85 | }
86 |
87 | #endif // DEPTHCOMPLETION_H
88 |
--------------------------------------------------------------------------------
/include/sensor_processing/stereo_vision.h:
--------------------------------------------------------------------------------
1 | #ifndef STEREOVISION_H
2 | #define STEREOVISION_H
3 |
4 | #include "sensor_processing/StereoVisionParamsConfig.h"
5 |
6 | #include
7 | #include
8 |
9 | #include
10 | #include
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | namespace sensor_processing
19 | {
20 |
21 | using namespace sensor_msgs;
22 | using namespace stereo_msgs;
23 | using namespace message_filters::sync_policies;
24 |
25 | class StereoVision
26 | {
27 | public:
28 | //! Constructor.
29 | explicit StereoVision(ros::NodeHandle nh);
30 |
31 | private:
32 | void configCallback(sensor_processing::StereoVisionParamsConfig &config, uint32_t level);
33 |
34 | void callback(const ImageConstPtr& l_image_msg,
35 | const CameraInfoConstPtr& l_info_msg,
36 | const ImageConstPtr& r_image_msg,
37 | const CameraInfoConstPtr& r_info_msg);
38 |
39 | void processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect,
40 | DisparityImage& disparity);
41 | void processPoints2(const DisparityImage& disparity,
42 | const cv::Mat& color, const std::string& encoding,
43 | PointCloud2& points);
44 | bool isValidPoint(const cv::Vec3f& pt);
45 | int convertNumDisparities(const int num_disparities);
46 | int convertBlockSize(const int block_size);
47 |
48 | ros::NodeHandle nh_;
49 |
50 | message_filters::Subscriber sub_left_image_;
51 | message_filters::Subscriber sub_left_camera_info_;
52 | message_filters::Subscriber sub_right_image_;
53 | message_filters::Subscriber sub_right_camera_info_;
54 | typedef ExactTime ExactPolicy;
55 | typedef message_filters::Synchronizer ExactSync;
56 | ExactSync exact_sync_;
57 |
58 | ros::Publisher pub_disparity_;
59 | ros::Publisher pub_disparity_image_;;
60 | ros::Publisher pub_points2_;
61 |
62 | dynamic_reconfigure::Server dr_srv_;
63 |
64 | image_geometry::StereoCameraModel model_;
65 | cv::Ptr block_matcher_;
66 | cv::Ptr sg_block_matcher_;
67 |
68 | cv::Mat_ disparity16_;
69 | cv::Mat_ dense_points_;
70 |
71 | int num_disparities_;
72 | int block_size_;
73 | };
74 | }
75 |
76 | #endif // STEREOVISION_H
77 |
--------------------------------------------------------------------------------
/launch/depth_completion_demo.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/launch/stereo_vision_demo.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/launch/tools.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: ROS Node Example
2 | repo_url: https://github.com/tdenewiler/node_example
3 | site_url: https://tdenewiler.github.io/node_example
4 | theme: material
5 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 | sensor_processing
3 | 0.0.1
4 |
5 | Stereo depth image and lidar completition package
6 |
7 | Simon Appel
8 | Simon Appel
9 | MIT
10 | https://github.com/appinho/SASensorProcessing
11 |
12 | catkin
13 | dynamic_reconfigure
14 | message_runtime
15 | message_generation
16 | rosgraph_msgs
17 | rospy
18 | roscpp
19 | std_msgs
20 | image_geometry
21 | cv_bridge
22 | pcl_ros
23 |
24 | dynamic_reconfigure
25 | message_runtime
26 | message_generation
27 | rosgraph_msgs
28 | rospy
29 | roscpp
30 | std_msgs
31 | image_geometry
32 | cv_bridge
33 | pcl_ros
34 |
35 | rostest
36 |
37 |
--------------------------------------------------------------------------------
/src/depth_completion.cpp:
--------------------------------------------------------------------------------
1 | #include "sensor_processing/depth_completion.h"
2 |
3 | #include
4 | #include
5 | #include
6 |
7 | namespace sensor_processing
8 | {
9 | DepthCompletion::DepthCompletion(ros::NodeHandle nh) :
10 | nh_(nh),
11 | exact_sync_(ExactPolicy(10),
12 | sub_pointcloud_, sub_left_color_camera_info_, sub_left_color_image_)
13 | {
14 | // Set up a dynamic reconfigure server.
15 | // Do this before parameter server, else some of the parameter server values can be overwritten.
16 | dynamic_reconfigure::Server::CallbackType cb;
17 | cb = boost::bind(&DepthCompletion::configCallback, this, _1, _2);
18 | dr_srv_.setCallback(cb);
19 |
20 | // Initialize node parameters from launch file or command line. Use a private node handle so that multiple instances
21 | // of the node can be run simultaneously while using different parameters.
22 | ros::NodeHandle pnh("~");
23 | int kernel;
24 | pnh.param("diamondKernelSize", kernel, 2);
25 | diamond_kernel_size_ = convertKernelSize(kernel);
26 | pnh.param("fullKernelSize", kernel, 2);
27 | full_kernel_size_ = convertKernelSize(kernel);
28 | pnh.param("closureKernelSize", kernel, 2);
29 | closure_kernel_size_ = convertKernelSize(kernel);
30 | pnh.param("fillKernelSize", kernel, 2);
31 | fill_kernel_size_ = convertKernelSize(kernel);
32 | pnh.param("medianKernelSize", kernel, 2);
33 | median_kernel_size_ = convertKernelSize(kernel);
34 | pnh.param("blurMethod", blur_method_);
35 | pnh.param("blurKernelSize", kernel, 1);
36 | blur_kernel_size_ = convertKernelSize(kernel);
37 | pnh.param("bilateralSigma", bilateral_sigma_, 10.0);
38 |
39 | sub_pointcloud_.subscribe(nh_, "/kitti/velo/pointcloud", 1);
40 | sub_left_color_camera_info_.subscribe(nh_, "/kitti/camera_color_left/camera_info", 1);
41 | sub_left_color_image_.subscribe(nh_, "/kitti/camera_color_left/image_raw", 1);
42 | exact_sync_.registerCallback(boost::bind(&DepthCompletion::callback,
43 | this, _1, _2, _3));
44 |
45 | pub_depth_image_ = nh_.advertise("/kitti/depth_image", 1);
46 | pub_completion_image_ = nh_.advertise("/kitti/completed_image", 1);
47 | pub_completed_pointcloud_ = nh_.advertise("/kitti/completed_pointcloud", 1);
48 |
49 | }
50 |
51 | void DepthCompletion::callback(const PointCloud2ConstPtr& pc_msg,
52 | const CameraInfoConstPtr& l_info_msg,
53 | const ImageConstPtr& l_image_msg)
54 | {
55 |
56 | cv::Mat depth_image;
57 | pointCloudToDepthImage(pc_msg, l_info_msg, depth_image);
58 | cv::Mat depth_image_8;
59 | depthToCV8UC1(depth_image, depth_image_8);
60 | cv_bridge::CvImage cv_bridge_depth_image;
61 | cv_bridge_depth_image.image = depth_image_8;
62 | cv_bridge_depth_image.encoding = "mono8";
63 | cv_bridge_depth_image.header.stamp = l_info_msg->header.stamp;
64 | pub_depth_image_.publish(cv_bridge_depth_image.toImageMsg());
65 |
66 |
67 | cv::Mat depth_completion_image;
68 | processDepthCompletion(l_info_msg, depth_image, depth_completion_image);
69 | cv::Mat depth_completion_image_8;
70 | depthToCV8UC1(depth_completion_image, depth_completion_image_8);
71 | cv_bridge::CvImage cv_bridge_depth_completion_image;
72 | cv_bridge_depth_completion_image.image = depth_completion_image_8;
73 | cv_bridge_depth_completion_image.encoding = "mono8";
74 | cv_bridge_depth_completion_image.header.stamp = l_info_msg->header.stamp;
75 | pub_completion_image_.publish(cv_bridge_depth_completion_image.toImageMsg());
76 |
77 | PointCloud2 pc;
78 | pc.header.frame_id = pc_msg->header.frame_id;
79 | if(enable_)
80 | depthImageToRGBPointCloud(depth_completion_image, l_image_msg, l_info_msg, pc);
81 | else
82 | depthImageToRGBPointCloud(depth_image, l_image_msg, l_info_msg, pc);
83 | pc.header.stamp = ros::Time::now();
84 | pub_completed_pointcloud_.publish(pc);
85 |
86 | }
87 |
88 | void DepthCompletion::pointCloudToDepthImage(
89 | const PointCloud2ConstPtr& pc,
90 | const CameraInfoConstPtr& cam_info,
91 | cv::Mat& depth_image)
92 | {
93 | // 1. To cam frame
94 | PointCloud2 pc_cam;
95 | std::string target_frame = cam_info->header.frame_id;
96 | bool transformed = pcl_ros::transformPointCloud(target_frame, *pc, pc_cam, listener_);
97 |
98 | // 2. To rect cam frame
99 | PointCloud2 pc_rect_cam;
100 | Eigen::Matrix4f R = Eigen::Matrix4f();
101 | R << cam_info->R[0], cam_info->R[1], cam_info->R[2], 0,
102 | cam_info->R[3], cam_info->R[4], cam_info->R[5], 0,
103 | cam_info->R[6], cam_info->R[7], cam_info->R[8], 0,
104 | 0, 0, 0, 1;
105 | pcl_ros::transformPointCloud(R, pc_cam, pc_rect_cam);
106 |
107 | // 3. To image frame
108 | PointCloud2 pc_img;
109 | Eigen::Matrix4f P = Eigen::Matrix4f();
110 | P << cam_info->P[0], cam_info->P[1], cam_info->P[2], cam_info->P[3],
111 | cam_info->P[4], cam_info->P[5], cam_info->P[6], cam_info->P[7],
112 | cam_info->P[8], cam_info->P[9], cam_info->P[10], cam_info->P[11],
113 | 0, 0, 0, 1;
114 | pcl_ros::transformPointCloud(P, pc_rect_cam, pc_img);
115 |
116 | // Init depth image
117 | if(depth_image.rows == 0 || depth_image.cols == 0)
118 | {
119 | depth_image = cv::Mat::zeros(cam_info->width, cam_info->height, CV_32FC1);
120 | }
121 |
122 | PointCloud2Iterator iter_cam_z(pc_rect_cam, "z");
123 | PointCloud2Iterator iter_img_x(pc_img, "x");
124 | PointCloud2Iterator iter_img_y(pc_img, "y");
125 | PointCloud2Iterator iter_img_z(pc_img, "z");
126 |
127 | for (; iter_cam_z != iter_cam_z.end();
128 | ++iter_cam_z, ++iter_img_x, ++iter_img_y, ++iter_img_z)
129 | {
130 |
131 | const float& depth = *iter_cam_z;
132 | if (depth <= 0) continue;
133 |
134 | const float& img_x = *iter_img_x;
135 | const float& img_y = *iter_img_y;
136 | const float& img_z = *iter_img_z;
137 |
138 | if (img_z == 0) continue;
139 | const int u = img_x / img_z;
140 | const int v = img_y / img_z;
141 |
142 |
143 | if (inImage(cam_info, u, v))
144 | {
145 | if (depth_image.at(v, u) == 0 ||
146 | depth_image.at(v, u) > depth)
147 | {
148 | depth_image.at(v, u) = depth;
149 | }
150 | }
151 | }
152 | }
153 |
154 | void DepthCompletion::depthImageToRGBPointCloud(
155 | const cv::Mat depth_image,
156 | const ImageConstPtr& image_msg,
157 | const CameraInfoConstPtr& cam_info,
158 | PointCloud2 & pc)
159 | {
160 |
161 | const cv::Mat_ cv_image = cv_bridge::toCvShare(image_msg, image_encodings::BGR8)->image;
162 | pcl::PointCloud pcl_img;
163 | for(int u = 0; u < depth_image.cols; u++)
164 | {
165 | for(int v = 0; v < depth_image.rows; v++)
166 | {
167 | const float & depth = depth_image.at(v, u);
168 | if (depth == 0) continue;
169 | pcl::PointXYZRGB point;
170 | const float img_x = u * depth;
171 | const float img_y = v * depth;
172 | const float img_z = depth;
173 | point.x = img_x;
174 | point.y = img_y;
175 | point.z = img_z;
176 | point.r = cv_image.at(v, u)[2];
177 | point.g = cv_image.at(v, u)[1];
178 | point.b = cv_image.at(v, u)[0];
179 | pcl_img.points.push_back(point);
180 | }
181 | }
182 | PointCloud2 pc_img;
183 | pcl::toROSMsg(pcl_img, pc_img);
184 |
185 | Eigen::Matrix4f P = Eigen::Matrix4f();
186 | P << cam_info->P[0], cam_info->P[1], cam_info->P[2], cam_info->P[3],
187 | cam_info->P[4], cam_info->P[5], cam_info->P[6], cam_info->P[7],
188 | cam_info->P[8], cam_info->P[9], cam_info->P[10], cam_info->P[11],
189 | 0, 0, 0, 1;
190 | PointCloud2 pc_rect_cam;
191 | pcl_ros::transformPointCloud(P.inverse(), pc_img, pc_rect_cam);
192 |
193 | PointCloud2 pc_cam;
194 | Eigen::Matrix4f R = Eigen::Matrix4f();
195 | R << cam_info->R[0], cam_info->R[1], cam_info->R[2], 0,
196 | cam_info->R[3], cam_info->R[4], cam_info->R[5], 0,
197 | cam_info->R[6], cam_info->R[7], cam_info->R[8], 0,
198 | 0, 0, 0, 1;
199 | pcl_ros::transformPointCloud(R.inverse(), pc_rect_cam, pc_cam);
200 |
201 | std::string target_frame = pc.header.frame_id;
202 | std::string source_frame = cam_info->header.frame_id;
203 | pc_cam.header.stamp = ros::Time::now();
204 | pc_cam.header.frame_id = source_frame;
205 | bool transformed = pcl_ros::transformPointCloud(target_frame, pc_cam, pc, listener_);
206 | }
207 |
208 | void DepthCompletion::processDepthCompletion(
209 | const CameraInfoConstPtr& cam_info,
210 | const cv::Mat depth_image,
211 | cv::Mat & depth_completion_image)
212 | {
213 | // 1. Depth Inversion
214 | cv::Mat inv_depth_image = cv::Mat::zeros(cam_info->width, cam_info->height, CV_32FC1);
215 | for(int u = 0; u < depth_image.cols; u++)
216 | {
217 | for(int v = 0; v < depth_image.rows; v++)
218 | {
219 | const float & depth = depth_image.at(v, u);
220 | if (depth == 0) continue;
221 | inv_depth_image.at(v, u) = 100 - depth;
222 | }
223 | }
224 |
225 | // 2. Custom Kernel Dilation
226 | // TODO: Move kernel to constructor once parameter are fix
227 | cv::Mat dilated_depth_image_1;
228 | if(diamond_kernel_size_){
229 | cv::Mat diamond_kernel =
230 | cv::Mat::zeros(convertKernelSize(diamond_kernel_size_),
231 | convertKernelSize(diamond_kernel_size_ ), CV_8UC1);
232 | for(int i = -diamond_kernel_size_; i <=diamond_kernel_size_; i++)
233 | {
234 | int r = diamond_kernel_size_ - std::abs(i);
235 | for(int j = -r; j <= r; j++)
236 | {
237 | int x = diamond_kernel_size_ + i;
238 | int y = diamond_kernel_size_ + j;
239 | diamond_kernel.at(y, x) = 1;
240 | }
241 | }
242 | cv::dilate(inv_depth_image, dilated_depth_image_1, diamond_kernel);
243 | }
244 | else{
245 | dilated_depth_image_1 = inv_depth_image;
246 | }
247 |
248 | // 3. Small Hole Closure
249 | cv::Mat dilated_depth_image_2;
250 | if(closure_kernel_size_ > 0){
251 | cv::Mat closure_kernel = cv::Mat::ones(
252 | closure_kernel_size_, closure_kernel_size_, CV_8UC1);
253 | cv::morphologyEx(dilated_depth_image_1, dilated_depth_image_2,
254 | cv::MORPH_CLOSE, closure_kernel);
255 | }
256 | else{
257 | dilated_depth_image_2 = dilated_depth_image_1;
258 | }
259 |
260 | // 4. Small Hole Fill
261 | cv::Mat empty_depth_image;
262 | if(fill_kernel_size_ > 0){
263 | cv::Mat fill_kernel = cv::Mat::ones(
264 | fill_kernel_size_, fill_kernel_size_, CV_8UC1);
265 | cv::dilate(dilated_depth_image_2, empty_depth_image, fill_kernel);
266 | for(int u = 0; u < dilated_depth_image_2.cols; u++)
267 | {
268 | for(int v = 0; v < dilated_depth_image_2.rows; v++)
269 | {
270 | const float & depth = dilated_depth_image_2.at(v, u);
271 | if (depth != 0) continue;
272 | dilated_depth_image_2.at(v, u) = empty_depth_image.at(v, u);
273 | }
274 | }
275 | }
276 |
277 | // 5. Extension to top of frame
278 | // 6. Large Hole Fill
279 |
280 | // 7. Blurring
281 | cv::Mat blurred_depth_image;
282 | if(median_kernel_size_ > 0){
283 | cv::medianBlur(dilated_depth_image_2, blurred_depth_image, median_kernel_size_);
284 | }
285 | else{
286 | blurred_depth_image = dilated_depth_image_2;
287 | }
288 | cv::Mat final_depth_image;
289 | if(blur_method_ == 2){
290 | cv::bilateralFilter(blurred_depth_image, final_depth_image, blur_kernel_size_,
291 | bilateral_sigma_, bilateral_sigma_);
292 | }
293 | else if(blur_method_ == 1){
294 | cv::GaussianBlur(blurred_depth_image, final_depth_image,
295 | cv::Size(blur_kernel_size_, blur_kernel_size_), 0);
296 | }
297 | else{
298 | final_depth_image = blurred_depth_image;
299 | }
300 |
301 | // 8. Depth Inversion
302 | depth_completion_image = cv::Mat::zeros(cam_info->width, cam_info->height, CV_32FC1);
303 | for(int u = 0; u < final_depth_image.cols; u++)
304 | {
305 | for(int v = 0; v < final_depth_image.rows; v++)
306 | {
307 | const float & depth = final_depth_image.at(v, u);
308 | if (depth == 0) continue;
309 | depth_completion_image.at(v, u) = 100 - final_depth_image.at(v, u);
310 | }
311 | }
312 | }
313 |
314 | bool DepthCompletion::inImage(const CameraInfoConstPtr& cam_info, const int u, const int v)
315 | {
316 | return(u >= 0 && u < cam_info->height && v >= 0 && v < cam_info->width);
317 | }
318 |
319 | void DepthCompletion::depthToCV8UC1(const cv::Mat& float_img, cv::Mat& mono8_img){
320 | //Process images
321 | if(mono8_img.rows != float_img.rows || mono8_img.cols != float_img.cols)
322 | {
323 | mono8_img = cv::Mat(float_img.size(), CV_8UC1);
324 | }
325 | //The following doesn't work if there are NaNs
326 | double minVal, maxVal;
327 | minMaxLoc(float_img, &minVal, &maxVal);
328 | ROS_INFO("Minimum/Maximum Depth in current image: %f/%f", minVal, maxVal);
329 | cv::convertScaleAbs(float_img, mono8_img, 2.55, 0.0);
330 | }
331 |
332 | void DepthCompletion::configCallback(sensor_processing::DepthCompletionParamsConfig &config, uint32_t level)
333 | {
334 | enable_ = config.enable;
335 | diamond_kernel_size_ = config.diamondKernelSize;
336 | full_kernel_size_ = convertKernelSize(config.fullKernelSize);
337 | closure_kernel_size_ = convertKernelSize(config.closureKernelSize);
338 | fill_kernel_size_ = convertKernelSize(config.fillKernelSize);
339 | median_kernel_size_ = convertKernelSize(config.medianKernelSize);
340 | blur_method_ = config.blurMethod;
341 | blur_kernel_size_ = convertKernelSize(config.blurKernelSize);
342 | bilateral_sigma_ = config.bilateralSigma;
343 | ROS_INFO("Reconfigure Request");
344 | ROS_INFO("enable %d", enable_);
345 | ROS_INFO("diamondKernelSize %d", diamond_kernel_size_);
346 | ROS_INFO("fullKernelSize %d", full_kernel_size_);
347 | ROS_INFO("closureKernelSize %d", closure_kernel_size_);
348 | ROS_INFO("fillKernelSize %d", fill_kernel_size_);
349 | ROS_INFO("medianKernelSize %d", median_kernel_size_);
350 | ROS_INFO("blurMethod %d", blur_method_);
351 | ROS_INFO("blurKernelSize %d", blur_kernel_size_);
352 | ROS_INFO("bilateralSigma %f", bilateral_sigma_);
353 | }
354 |
355 | int DepthCompletion::convertKernelSize(const int kernel_size){
356 | return 2 * kernel_size + 1;
357 | }
358 |
359 | }
360 |
--------------------------------------------------------------------------------
/src/depth_completion.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import rospy
5 |
6 | # from dynamic_reconfigure.server import Server as DynamicReconfigureServer
7 | from sensor_msgs.msg import PointCloud2, Image, CameraInfo
8 | from cv_bridge import CvBridge, CvBridgeError
9 |
10 | # from stereo_image.cfg import DepthCompletionConfig as ConfigType
11 |
12 | import glob
13 | import os
14 | import sys
15 | import time
16 |
17 | import cv2
18 | import numpy as np
19 | # import png
20 |
21 | #import depth_map_utils
22 | #import vis_utils
23 |
24 |
25 |
26 | class DepthCompletion(object):
27 | """Node example class."""
28 |
29 | def __init__(self):
30 |
31 | self.bridge = CvBridge()
32 |
33 | self.sub_pc = rospy.Subscriber('/kitti/velo/pointcloud', PointCloud2, self.callback_pc)
34 | self.sub_ci = rospy.Subscriber('/kitti/camera_gray_left/camera_info', CameraInfo, self.callback_ci)
35 | #self.server = DynamicReconfigureServer(ConfigType, self.reconfigure_cb)
36 |
37 | self.pub = rospy.Publisher('/kitti/depth_completion', Image, queue_size=10)
38 |
39 | def callback_pc(self, pointcloud):
40 |
41 | cv_image = np.zeros([400, 600, 3], dtype=np.uint8)
42 |
43 | try:
44 | self.pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
45 | except CvBridgeError as e:
46 | print(e)
47 |
48 | def callback_ci(self, camera_info):
49 |
50 | cam_to_rectcam = camera_info.R
51 | rectcam_to_image = camera_info.P
52 | print(camera_info.P)
53 |
54 | def reconfigure_cb(self, config, dummy):
55 |
56 | """
57 | self.message = config["message"]
58 | self.int_a = config["a"]
59 | self.int_b = config["b"]
60 |
61 | # Check to see if node should be started or stopped.
62 | if self.enable != config["enable"]:
63 | if config["enable"]:
64 | self.start()
65 | else:
66 | self.stop()
67 | self.enable = config["enable"]
68 |
69 | # Return the new variables.
70 | """
71 | return config
72 |
73 | if __name__ == '__main__':
74 |
75 | rospy.init_node('lidar_depth_completion')
76 |
77 | try:
78 | DepthCompletion()
79 | except rospy.ROSInterruptException as e:
80 | print(e)
81 |
82 | rospy.spin()
83 |
84 |
85 | if __name__ == "__main__":
86 | main()
87 |
88 | """
89 | def main():
90 |
91 | ##############################
92 | # Options
93 | ##############################
94 | # Validation set
95 | input_depth_dir = os.path.expanduser(
96 | '~/Kitti/depth/depth_selection/val_selection_cropped/velodyne_raw')
97 | data_split = 'val'
98 |
99 | # Test set
100 | # input_depth_dir = os.path.expanduser(
101 | # '~/Kitti/depth/depth_selection/test_depth_completion_anonymous/velodyne_raw')
102 | # data_split = 'test'
103 |
104 | # Fast fill with Gaussian blur @90Hz (paper result)
105 | fill_type = 'fast'
106 | extrapolate = True
107 | blur_type = 'gaussian'
108 |
109 | # Fast Fill with bilateral blur, no extrapolation @87Hz (recommended)
110 | # fill_type = 'fast'
111 | # extrapolate = False
112 | # blur_type = 'bilateral'
113 |
114 | # Multi-scale dilations with extra noise removal, no extrapolation @ 30Hz
115 | # fill_type = 'multiscale'
116 | # extrapolate = False
117 | # blur_type = 'bilateral'
118 |
119 | # Save output to disk or show process
120 | save_output = True
121 |
122 | ##############################
123 | # Processing
124 | ##############################
125 | if save_output:
126 | # Save to Disk
127 | show_process = False
128 | save_depth_maps = True
129 | else:
130 | if fill_type == 'fast':
131 | raise ValueError('"fast" fill does not support show_process')
132 |
133 | # Show Process
134 | show_process = True
135 | save_depth_maps = False
136 |
137 | # Create output folder
138 | this_file_path = os.path.dirname(os.path.realpath(__file__))
139 | outputs_dir = this_file_path + '/outputs'
140 | os.makedirs(outputs_dir, exist_ok=True)
141 |
142 | output_folder_prefix = 'depth_' + data_split
143 | output_list = sorted(os.listdir(outputs_dir))
144 | if len(output_list) > 0:
145 | split_folders = [folder for folder in output_list
146 | if folder.startswith(output_folder_prefix)]
147 | if len(split_folders) > 0:
148 | last_output_folder = split_folders[-1]
149 | last_output_index = int(last_output_folder.split('_')[-1])
150 | else:
151 | last_output_index = -1
152 | else:
153 | last_output_index = -1
154 | output_depth_dir = outputs_dir + '/{}_{:03d}'.format(
155 | output_folder_prefix, last_output_index + 1)
156 |
157 | if save_output:
158 | if not os.path.exists(output_depth_dir):
159 | os.makedirs(output_depth_dir)
160 | else:
161 | raise FileExistsError('Already exists!')
162 | print('Output dir:', output_depth_dir)
163 |
164 | # Get images in sorted order
165 | images_to_use = sorted(glob.glob(input_depth_dir + '/*'))
166 |
167 | # Rolling average array of times for time estimation
168 | avg_time_arr_length = 10
169 | last_fill_times = np.repeat([1.0], avg_time_arr_length)
170 | last_total_times = np.repeat([1.0], avg_time_arr_length)
171 |
172 | num_images = len(images_to_use)
173 | for i in range(num_images):
174 |
175 | depth_image_path = images_to_use[i]
176 |
177 | # Calculate average time with last n fill times
178 | avg_fill_time = np.mean(last_fill_times)
179 | avg_total_time = np.mean(last_total_times)
180 |
181 | # Show progress
182 | sys.stdout.write('\rProcessing {} / {}, '
183 | 'Avg Fill Time: {:.5f}s, '
184 | 'Avg Total Time: {:.5f}s, '
185 | 'Est Time Remaining: {:.3f}s'.format(
186 | i, num_images - 1, avg_fill_time, avg_total_time,
187 | avg_total_time * (num_images - i)))
188 | sys.stdout.flush()
189 |
190 | # Start timing
191 | start_total_time = time.time()
192 |
193 | # Load depth projections from uint16 image
194 | depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
195 | projected_depths = np.float32(depth_image / 256.0)
196 |
197 | # Fill in
198 | start_fill_time = time.time()
199 | if fill_type == 'fast':
200 | final_depths = depth_map_utils.fill_in_fast(
201 | projected_depths, extrapolate=extrapolate, blur_type=blur_type)
202 | elif fill_type == 'multiscale':
203 | final_depths, process_dict = depth_map_utils.fill_in_multiscale(
204 | projected_depths, extrapolate=extrapolate, blur_type=blur_type,
205 | show_process=show_process)
206 | else:
207 | raise ValueError('Invalid fill_type {}'.format(fill_type))
208 | end_fill_time = time.time()
209 |
210 | # Display images from process_dict
211 | if fill_type == 'multiscale' and show_process:
212 | img_size = (570, 165)
213 |
214 | x_start = 80
215 | y_start = 50
216 | x_offset = img_size[0]
217 | y_offset = img_size[1]
218 | x_padding = 0
219 | y_padding = 28
220 |
221 | img_x = x_start
222 | img_y = y_start
223 | max_x = 1900
224 |
225 | row_idx = 0
226 | for key, value in process_dict.items():
227 |
228 | image_jet = cv2.applyColorMap(
229 | np.uint8(value / np.amax(value) * 255),
230 | cv2.COLORMAP_JET)
231 | vis_utils.cv2_show_image(
232 | key, image_jet,
233 | img_size, (img_x, img_y))
234 |
235 | img_x += x_offset + x_padding
236 | if (img_x + x_offset + x_padding) > max_x:
237 | img_x = x_start
238 | row_idx += 1
239 | img_y = y_start + row_idx * (y_offset + y_padding)
240 |
241 | # Save process images
242 | cv2.imwrite('process/' + key + '.png', image_jet)
243 |
244 | cv2.waitKey()
245 |
246 | # Save depth images to disk
247 | if save_depth_maps:
248 | depth_image_file_name = os.path.split(depth_image_path)[1]
249 |
250 | # Save depth map to a uint16 png (same format as disparity maps)
251 | file_path = output_depth_dir + '/' + depth_image_file_name
252 | with open(file_path, 'wb') as f:
253 | depth_image = (final_depths * 256).astype(np.uint16)
254 |
255 | # pypng is used because cv2 cannot save uint16 format images
256 | writer = png.Writer(width=depth_image.shape[1],
257 | height=depth_image.shape[0],
258 | bitdepth=16,
259 | greyscale=True)
260 | writer.write(f, depth_image)
261 |
262 | end_total_time = time.time()
263 |
264 | # Update fill times
265 | last_fill_times = np.roll(last_fill_times, -1)
266 | last_fill_times[-1] = end_fill_time - start_fill_time
267 |
268 | # Update total times
269 | last_total_times = np.roll(last_total_times, -1)
270 | last_total_times[-1] = end_total_time - start_total_time
271 | """
--------------------------------------------------------------------------------
/src/depth_map_utils.py:
--------------------------------------------------------------------------------
1 | import collections
2 |
3 | import cv2
4 | import numpy as np
5 |
6 | # Full kernels
7 | FULL_KERNEL_3 = np.ones((3, 3), np.uint8)
8 | FULL_KERNEL_5 = np.ones((5, 5), np.uint8)
9 | FULL_KERNEL_7 = np.ones((7, 7), np.uint8)
10 | FULL_KERNEL_9 = np.ones((9, 9), np.uint8)
11 | FULL_KERNEL_31 = np.ones((31, 31), np.uint8)
12 |
13 | # 3x3 cross kernel
14 | CROSS_KERNEL_3 = np.asarray(
15 | [
16 | [0, 1, 0],
17 | [1, 1, 1],
18 | [0, 1, 0],
19 | ], dtype=np.uint8)
20 |
21 | # 5x5 cross kernel
22 | CROSS_KERNEL_5 = np.asarray(
23 | [
24 | [0, 0, 1, 0, 0],
25 | [0, 0, 1, 0, 0],
26 | [1, 1, 1, 1, 1],
27 | [0, 0, 1, 0, 0],
28 | [0, 0, 1, 0, 0],
29 | ], dtype=np.uint8)
30 |
31 | # 5x5 diamond kernel
32 | DIAMOND_KERNEL_5 = np.array(
33 | [
34 | [0, 0, 1, 0, 0],
35 | [0, 1, 1, 1, 0],
36 | [1, 1, 1, 1, 1],
37 | [0, 1, 1, 1, 0],
38 | [0, 0, 1, 0, 0],
39 | ], dtype=np.uint8)
40 |
41 | # 7x7 cross kernel
42 | CROSS_KERNEL_7 = np.asarray(
43 | [
44 | [0, 0, 0, 1, 0, 0, 0],
45 | [0, 0, 0, 1, 0, 0, 0],
46 | [0, 0, 0, 1, 0, 0, 0],
47 | [1, 1, 1, 1, 1, 1, 1],
48 | [0, 0, 0, 1, 0, 0, 0],
49 | [0, 0, 0, 1, 0, 0, 0],
50 | [0, 0, 0, 1, 0, 0, 0],
51 | ], dtype=np.uint8)
52 |
53 | # 7x7 diamond kernel
54 | DIAMOND_KERNEL_7 = np.asarray(
55 | [
56 | [0, 0, 0, 1, 0, 0, 0],
57 | [0, 0, 1, 1, 1, 0, 0],
58 | [0, 1, 1, 1, 1, 1, 0],
59 | [1, 1, 1, 1, 1, 1, 1],
60 | [0, 1, 1, 1, 1, 1, 0],
61 | [0, 0, 1, 1, 1, 0, 0],
62 | [0, 0, 0, 1, 0, 0, 0],
63 | ], dtype=np.uint8)
64 |
65 |
66 | def fill_in_fast(depth_map, max_depth=100.0, custom_kernel=DIAMOND_KERNEL_5,
67 | extrapolate=False, blur_type='bilateral'):
68 | """Fast, in-place depth completion.
69 | Args:
70 | depth_map: projected depths
71 | max_depth: max depth value for inversion
72 | custom_kernel: kernel to apply initial dilation
73 | extrapolate: whether to extrapolate by extending depths to top of
74 | the frame, and applying a 31x31 full kernel dilation
75 | blur_type:
76 | 'bilateral' - preserves local structure (recommended)
77 | 'gaussian' - provides lower RMSE
78 | Returns:
79 | depth_map: dense depth map
80 | """
81 |
82 | # Invert
83 | valid_pixels = (depth_map > 0.1)
84 | depth_map[valid_pixels] = max_depth - depth_map[valid_pixels]
85 |
86 | # Dilate
87 | depth_map = cv2.dilate(depth_map, custom_kernel)
88 |
89 | # Hole closing
90 | depth_map = cv2.morphologyEx(depth_map, cv2.MORPH_CLOSE, FULL_KERNEL_5)
91 |
92 | # Fill empty spaces with dilated values
93 | empty_pixels = (depth_map < 0.1)
94 | dilated = cv2.dilate(depth_map, FULL_KERNEL_7)
95 | depth_map[empty_pixels] = dilated[empty_pixels]
96 |
97 | # Extend highest pixel to top of image
98 | if extrapolate:
99 | top_row_pixels = np.argmax(depth_map > 0.1, axis=0)
100 | top_pixel_values = depth_map[top_row_pixels, range(depth_map.shape[1])]
101 |
102 | for pixel_col_idx in range(depth_map.shape[1]):
103 | depth_map[0:top_row_pixels[pixel_col_idx], pixel_col_idx] = \
104 | top_pixel_values[pixel_col_idx]
105 |
106 | # Large Fill
107 | empty_pixels = depth_map < 0.1
108 | dilated = cv2.dilate(depth_map, FULL_KERNEL_31)
109 | depth_map[empty_pixels] = dilated[empty_pixels]
110 |
111 | # Median blur
112 | depth_map = cv2.medianBlur(depth_map, 5)
113 |
114 | # Bilateral or Gaussian blur
115 | if blur_type == 'bilateral':
116 | # Bilateral blur
117 | depth_map = cv2.bilateralFilter(depth_map, 5, 1.5, 2.0)
118 | elif blur_type == 'gaussian':
119 | # Gaussian blur
120 | valid_pixels = (depth_map > 0.1)
121 | blurred = cv2.GaussianBlur(depth_map, (5, 5), 0)
122 | depth_map[valid_pixels] = blurred[valid_pixels]
123 |
124 | # Invert
125 | valid_pixels = (depth_map > 0.1)
126 | depth_map[valid_pixels] = max_depth - depth_map[valid_pixels]
127 |
128 | return depth_map
129 |
130 |
131 | def fill_in_multiscale(depth_map, max_depth=100.0,
132 | dilation_kernel_far=CROSS_KERNEL_3,
133 | dilation_kernel_med=CROSS_KERNEL_5,
134 | dilation_kernel_near=CROSS_KERNEL_7,
135 | extrapolate=False,
136 | blur_type='bilateral',
137 | show_process=False):
138 | """Slower, multi-scale dilation version with additional noise removal that
139 | provides better qualitative results.
140 | Args:
141 | depth_map: projected depths
142 | max_depth: max depth value for inversion
143 | dilation_kernel_far: dilation kernel to use for 30.0 < depths < 80.0 m
144 | dilation_kernel_med: dilation kernel to use for 15.0 < depths < 30.0 m
145 | dilation_kernel_near: dilation kernel to use for 0.1 < depths < 15.0 m
146 | extrapolate:whether to extrapolate by extending depths to top of
147 | the frame, and applying a 31x31 full kernel dilation
148 | blur_type:
149 | 'gaussian' - provides lower RMSE
150 | 'bilateral' - preserves local structure (recommended)
151 | show_process: saves process images into an OrderedDict
152 | Returns:
153 | depth_map: dense depth map
154 | process_dict: OrderedDict of process images
155 | """
156 |
157 | # Convert to float32
158 | depths_in = np.float32(depth_map)
159 |
160 | # Calculate bin masks before inversion
161 | valid_pixels_near = (depths_in > 0.1) & (depths_in <= 15.0)
162 | valid_pixels_med = (depths_in > 15.0) & (depths_in <= 30.0)
163 | valid_pixels_far = (depths_in > 30.0)
164 |
165 | # Invert (and offset)
166 | s1_inverted_depths = np.copy(depths_in)
167 | valid_pixels = (s1_inverted_depths > 0.1)
168 | s1_inverted_depths[valid_pixels] = \
169 | max_depth - s1_inverted_depths[valid_pixels]
170 |
171 | # Multi-scale dilation
172 | dilated_far = cv2.dilate(
173 | np.multiply(s1_inverted_depths, valid_pixels_far),
174 | dilation_kernel_far)
175 | dilated_med = cv2.dilate(
176 | np.multiply(s1_inverted_depths, valid_pixels_med),
177 | dilation_kernel_med)
178 | dilated_near = cv2.dilate(
179 | np.multiply(s1_inverted_depths, valid_pixels_near),
180 | dilation_kernel_near)
181 |
182 | # Find valid pixels for each binned dilation
183 | valid_pixels_near = (dilated_near > 0.1)
184 | valid_pixels_med = (dilated_med > 0.1)
185 | valid_pixels_far = (dilated_far > 0.1)
186 |
187 | # Combine dilated versions, starting farthest to nearest
188 | s2_dilated_depths = np.copy(s1_inverted_depths)
189 | s2_dilated_depths[valid_pixels_far] = dilated_far[valid_pixels_far]
190 | s2_dilated_depths[valid_pixels_med] = dilated_med[valid_pixels_med]
191 | s2_dilated_depths[valid_pixels_near] = dilated_near[valid_pixels_near]
192 |
193 | # Small hole closure
194 | s3_closed_depths = cv2.morphologyEx(
195 | s2_dilated_depths, cv2.MORPH_CLOSE, FULL_KERNEL_5)
196 |
197 | # Median blur to remove outliers
198 | s4_blurred_depths = np.copy(s3_closed_depths)
199 | blurred = cv2.medianBlur(s3_closed_depths, 5)
200 | valid_pixels = (s3_closed_depths > 0.1)
201 | s4_blurred_depths[valid_pixels] = blurred[valid_pixels]
202 |
203 | # Calculate a top mask
204 | top_mask = np.ones(depths_in.shape, dtype=np.bool)
205 | for pixel_col_idx in range(s4_blurred_depths.shape[1]):
206 | pixel_col = s4_blurred_depths[:, pixel_col_idx]
207 | top_pixel_row = np.argmax(pixel_col > 0.1)
208 | top_mask[0:top_pixel_row, pixel_col_idx] = False
209 |
210 | # Get empty mask
211 | valid_pixels = (s4_blurred_depths > 0.1)
212 | empty_pixels = ~valid_pixels & top_mask
213 |
214 | # Hole fill
215 | dilated = cv2.dilate(s4_blurred_depths, FULL_KERNEL_9)
216 | s5_dilated_depths = np.copy(s4_blurred_depths)
217 | s5_dilated_depths[empty_pixels] = dilated[empty_pixels]
218 |
219 | # Extend highest pixel to top of image or create top mask
220 | s6_extended_depths = np.copy(s5_dilated_depths)
221 | top_mask = np.ones(s5_dilated_depths.shape, dtype=np.bool)
222 |
223 | top_row_pixels = np.argmax(s5_dilated_depths > 0.1, axis=0)
224 | top_pixel_values = s5_dilated_depths[top_row_pixels,
225 | range(s5_dilated_depths.shape[1])]
226 |
227 | for pixel_col_idx in range(s5_dilated_depths.shape[1]):
228 | if extrapolate:
229 | s6_extended_depths[0:top_row_pixels[pixel_col_idx],
230 | pixel_col_idx] = top_pixel_values[pixel_col_idx]
231 | else:
232 | # Create top mask
233 | top_mask[0:top_row_pixels[pixel_col_idx], pixel_col_idx] = False
234 |
235 | # Fill large holes with masked dilations
236 | s7_blurred_depths = np.copy(s6_extended_depths)
237 | for i in range(6):
238 | empty_pixels = (s7_blurred_depths < 0.1) & top_mask
239 | dilated = cv2.dilate(s7_blurred_depths, FULL_KERNEL_5)
240 | s7_blurred_depths[empty_pixels] = dilated[empty_pixels]
241 |
242 | # Median blur
243 | blurred = cv2.medianBlur(s7_blurred_depths, 5)
244 | valid_pixels = (s7_blurred_depths > 0.1) & top_mask
245 | s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
246 |
247 | if blur_type == 'gaussian':
248 | # Gaussian blur
249 | blurred = cv2.GaussianBlur(s7_blurred_depths, (5, 5), 0)
250 | valid_pixels = (s7_blurred_depths > 0.1) & top_mask
251 | s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
252 | elif blur_type == 'bilateral':
253 | # Bilateral blur
254 | blurred = cv2.bilateralFilter(s7_blurred_depths, 5, 0.5, 2.0)
255 | s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
256 |
257 | # Invert (and offset)
258 | s8_inverted_depths = np.copy(s7_blurred_depths)
259 | valid_pixels = np.where(s8_inverted_depths > 0.1)
260 | s8_inverted_depths[valid_pixels] = \
261 | max_depth - s8_inverted_depths[valid_pixels]
262 |
263 | depths_out = s8_inverted_depths
264 |
265 | process_dict = None
266 | if show_process:
267 | process_dict = collections.OrderedDict()
268 |
269 | process_dict['s0_depths_in'] = depths_in
270 |
271 | process_dict['s1_inverted_depths'] = s1_inverted_depths
272 | process_dict['s2_dilated_depths'] = s2_dilated_depths
273 | process_dict['s3_closed_depths'] = s3_closed_depths
274 | process_dict['s4_blurred_depths'] = s4_blurred_depths
275 | process_dict['s5_combined_depths'] = s5_dilated_depths
276 | process_dict['s6_extended_depths'] = s6_extended_depths
277 | process_dict['s7_blurred_depths'] = s7_blurred_depths
278 | process_dict['s8_inverted_depths'] = s8_inverted_depths
279 |
280 | process_dict['s9_depths_out'] = depths_out
281 |
282 | return depths_out, process_dict
283 |
--------------------------------------------------------------------------------
/src/nodes/depth_completion_node.cpp:
--------------------------------------------------------------------------------
1 | #include "sensor_processing/depth_completion.h"
2 |
3 | int main(int argc, char **argv)
4 | {
5 | // Set up ROS.
6 | ros::init(argc, argv, "depth_completion");
7 | ros::NodeHandle nh;
8 |
9 | // Create a new node_example::Talker object.
10 | sensor_processing::DepthCompletion node(nh);
11 |
12 | // Let ROS handle all callbacks.
13 | ros::spin();
14 |
15 | return 0;
16 | } // end main()
--------------------------------------------------------------------------------
/src/nodes/stereo_vision_node.cpp:
--------------------------------------------------------------------------------
1 | #include "sensor_processing/stereo_vision.h"
2 |
3 | int main(int argc, char **argv)
4 | {
5 | // Set up ROS.
6 | ros::init(argc, argv, "stereo_vision");
7 | ros::NodeHandle nh;
8 |
9 | // Create a new node_example::Talker object.
10 | sensor_processing::StereoVision node(nh);
11 |
12 | // Let ROS handle all callbacks.
13 | ros::spin();
14 |
15 | return 0;
16 | } // end main()
--------------------------------------------------------------------------------
/src/pystereo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | from __future__ import division
4 |
5 | import roslib
6 | roslib.load_manifest('stereo_image')
7 |
8 | from dynamic_reconfigure.server import Server as DynamicReconfigureServer
9 | from stereo_image.cfg import StereoParamsConfig as ConfigType
10 | import cv2
11 | from image_geometry import StereoCameraModel
12 | from cv_bridge import CvBridge, CvBridgeError
13 | import message_filters
14 | from sensor_msgs.msg import Image, CameraInfo
15 | import rospy
16 | import numpy as np
17 | from sensor_msgs.msg import PointCloud
18 | from geometry_msgs.msg import Point32
19 | import std_msgs.msg
20 |
21 | import matplotlib.pyplot as plt
22 |
23 | class StereoImageNode(object):
24 |
25 | def __init__(self):
26 |
27 | # Parameters
28 | self.server = DynamicReconfigureServer(ConfigType, self.reconfigure_callback)
29 | self.numDisparities = self.convert_num_disparities(rospy.get_param('~numDisparities', 2))
30 | self.blockSize = self.convert_block_size(rospy.get_param('~blockSize', 8))
31 |
32 | # Stereo matcher and model
33 | self.block_matcher = cv2.StereoBM_create(
34 | numDisparities=self.numDisparities,
35 | blockSize=self.blockSize)
36 | self.model = StereoCameraModel()
37 | # TODO sg_block_matcher_ = cv::StereoSGBM::create(1, 1, 10);
38 |
39 | # Subscriber
40 | self.bridge = CvBridge()
41 | left_image_sub = message_filters.Subscriber('/kitti/camera_gray_left/image_raw', Image)
42 | right_image_sub = message_filters.Subscriber('/kitti/camera_gray_right/image_raw', Image)
43 | left_caminfo_sub = message_filters.Subscriber('/kitti/camera_gray_left/camera_info', CameraInfo)
44 | right_caminfo_sub = message_filters.Subscriber('/kitti/camera_gray_right/camera_info', CameraInfo)
45 | ts = message_filters.TimeSynchronizer([left_image_sub, right_image_sub, left_caminfo_sub, right_caminfo_sub], 10)
46 | ts.registerCallback(self.callback)
47 |
48 | # Publisher
49 | self.disparity_image_pub = rospy.Publisher("/kitti/disparity_image", Image, queue_size=10)
50 | self.depth_image_pub = rospy.Publisher("/kitti/depth_image", Image, queue_size=10)
51 | self.stereo_pointcloud_pub = rospy.Publisher("/kitti/stereo_pointcloud", PointCloud, queue_size=10)
52 |
53 | def callback(self, left_image, right_image, left_caminfo, right_caminfo):
54 |
55 | self.model.fromCameraInfo(left_caminfo, right_caminfo)
56 |
57 | try:
58 | left_cv_image = self.bridge.imgmsg_to_cv2(left_image, "mono8")
59 | except CvBridgeError as e:
60 | print(e)
61 | try:
62 | right_cv_image = self.bridge.imgmsg_to_cv2(right_image, "mono8")
63 | except CvBridgeError as e:
64 | print(e)
65 |
66 | disparity = self.processDisparity(left_cv_image, right_cv_image)
67 |
68 | pointcloud = self.processPointCloud(disparity, left_image.header.frame_id)
69 |
70 | def processDisparity(self, left_image, right_image):
71 |
72 | disparity16 = self.block_matcher.compute(left_image, right_image)
73 | rospy.loginfo("DISP16 %d %d" % (np.amin(disparity16), np.amax(disparity16)))
74 | rospy.loginfo(disparity16.dtype)
75 | # We convert from fixed-point to float disparity and also adjust for any x-offset between
76 | # the principal points: d = d_fp*inv_dpp - (cx_l - cx_r)
77 | # rospy.loginfo("%f %f" % (self.model.left.cx(), self.model.right.cx()))
78 | disparity = np.float32(disparity16) * (1.0 / 16) - (self.model.left.cx() - self.model.right.cx())
79 | rospy.loginfo("DISP %d %d" % (np.amin(disparity), np.amax(disparity)))
80 | rospy.loginfo(disparity.dtype)
81 |
82 | disparity8 = (disparity16 + 15).astype(np.uint8)
83 | rospy.loginfo("DISP8 %d %d" % (np.amin(disparity8), np.amax(disparity8)))
84 | rospy.loginfo(disparity8.dtype)
85 |
86 | try:
87 | self.disparity_image_pub.publish(
88 | self.bridge.cv2_to_imgmsg(disparity8, "mono8"))
89 | except CvBridgeError as e:
90 | print(e)
91 |
92 | return disparity
93 |
94 | def processPointCloud(self, disparity, frame_id):
95 |
96 | stereo_pointcloud = PointCloud()
97 | counter = 0
98 | min_x = [1000,0,0,-1]
99 | max_x = [-1000,0,0,-1]
100 | min_y = [1000,0,0,-1]
101 | max_y = [-1000,0,0,-1]
102 | min_z = [1000,0,0,-1]
103 | max_z = [-1000,0,0,-1]
104 | depth_image = np.zeros(disparity.shape)
105 | for u, row in enumerate(disparity):
106 | for v, pixel in enumerate(row):
107 | if pixel > 0:
108 | point = self.model.projectPixelTo3d((u,v), pixel)
109 | depth = point[2]
110 | depth_image[u][v] = depth
111 | if depth > 0 and depth < 70:
112 | point32 = Point32(point[0], point[1], point[2])
113 | stereo_pointcloud.points.append(point32)
114 | counter +=1
115 | if min_x[0] > point32.x:
116 | min_x = [point32.x,u,v,pixel]
117 | #print("MINX",u,v,point)
118 | if min_y[0] > point32.y:
119 | min_y = [point32.y,u,v,pixel]
120 | #print("MINY", u,v,point)
121 | if min_z[0] > point32.z:
122 | min_z = [point32.z,u,v,pixel]
123 | #print("MINZ",u,v,point)
124 | if max_x[0] < point32.x:
125 | max_x = [point32.x,u,v,pixel]
126 | #print("MAXX",u,v,point)
127 | if max_y[0] < point32.y:
128 | max_y = [point32.y,u,v,pixel]
129 | #print("MAXY",u,v,point)
130 | if max_z[0] < point32.z:
131 | max_z = [point32.z,u,v,pixel]
132 | #print("MAXZ",u,v,point)
133 | #if counter % 10000 == 0:
134 | # print(u,v,pixel,point)
135 | print("X",min_x, max_x)
136 | print("Y",min_y, max_y)
137 | print("Z",min_z, max_z)
138 | rospy.loginfo("Depth completition rate %f", counter / disparity.shape[0] / disparity.shape[1])
139 | """
140 | plt.figure(1)
141 | plt.subplot(211)
142 | plt.imshow(disparity, 'gray')
143 | plt.subplot(212)
144 | plt.imshow(depth_image, vmax=70)
145 | mng = plt.get_current_fig_manager()
146 | mng.resize(*mng.window.maxsize())
147 | plt.show()
148 | """
149 | depth_image8 = depth_image.astype(np.uint8)
150 | try:
151 | self.depth_image_pub.publish(
152 | self.bridge.cv2_to_imgmsg(depth_image8, "mono8"))
153 | except CvBridgeError as e:
154 | print(e)
155 |
156 | header = std_msgs.msg.Header()
157 | header.stamp = rospy.Time.now()
158 | header.frame_id = frame_id
159 | stereo_pointcloud.header = header
160 | self.stereo_pointcloud_pub.publish(stereo_pointcloud)
161 |
162 | return stereo_pointcloud
163 |
164 |
165 | def reconfigure_callback(self, config, dummy):
166 |
167 | self.numDisparities = self.convert_num_disparities(config["numDisparities"])
168 | self.blockSize = self.convert_block_size(config["blockSize"])
169 | self.block_matcher = cv2.StereoBM_create(
170 | numDisparities=self.numDisparities,
171 | blockSize=self.blockSize)
172 | rospy.loginfo("""Reconfigure Request: {numDisparities}, {blockSize},""".format(**config))
173 | # Check to see if node should be started or stopped.
174 | # if self.enable != config["enable"]:
175 | # if config["enable"]:
176 | # self.start()
177 | # else:
178 | # self.stop()
179 | # self.enable = config["enable"]
180 |
181 | # Return the new variables.
182 | return config
183 |
184 | def convert_num_disparities(self, numDisparities):
185 | return 16 * numDisparities
186 |
187 | def convert_block_size(self, blockSize):
188 | return 2 * blockSize + 5
189 |
190 | if __name__ == '__main__':
191 |
192 | rospy.init_node('pystereo')
193 | try:
194 | StereoImageNode()
195 | except rospy.ROSInterruptException:
196 | pass
197 | rospy.spin()
198 |
--------------------------------------------------------------------------------
/src/stereo_vision.cpp:
--------------------------------------------------------------------------------
1 | #include "sensor_processing/stereo_vision.h"
2 |
3 | #include
4 |
5 | namespace sensor_processing
6 | {
7 | StereoVision::StereoVision(ros::NodeHandle nh) :
8 | nh_(nh),
9 | exact_sync_(ExactPolicy(10),
10 | sub_left_image_, sub_left_camera_info_,
11 | sub_right_image_, sub_right_camera_info_)
12 | {
13 | // Set up a dynamic reconfigure server.
14 | // Do this before parameter server, else some of the parameter server values can be overwritten.
15 | dynamic_reconfigure::Server::CallbackType cb;
16 | cb = boost::bind(&StereoVision::configCallback, this, _1, _2);
17 | dr_srv_.setCallback(cb);
18 |
19 | // Initialize node parameters from launch file or command line. Use a private node handle so that multiple instances
20 | // of the node can be run simultaneously while using different parameters.
21 | ros::NodeHandle pnh("~");
22 | int block_size;
23 | int num_disparities;
24 | pnh.param("numDisparities", num_disparities, num_disparities);
25 | pnh.param("blockSize", block_size, block_size);
26 | num_disparities_ = convertNumDisparities(num_disparities);
27 | block_size_ = convertBlockSize(block_size);
28 |
29 | sub_left_image_.subscribe(nh_, "kitti/camera_gray_left/image_raw", 1);
30 | sub_left_camera_info_.subscribe(nh_, "/kitti/camera_gray_left/camera_info", 1);
31 | sub_right_image_.subscribe(nh_, "/kitti/camera_gray_right/image_raw", 1);
32 | sub_right_camera_info_.subscribe(nh_, "/kitti/camera_gray_right/camera_info", 1);
33 | exact_sync_.registerCallback(boost::bind(&StereoVision::callback,
34 | this, _1, _2, _3, _4));
35 |
36 | pub_disparity_ = nh.advertise("/kitti/disparity", 1);
37 | pub_disparity_image_ = nh_.advertise("/kitti/disparity_image", 1);
38 | pub_points2_ = nh_.advertise("/kitti/stereo_pointcloud", 1);
39 |
40 | block_matcher_ = cv::StereoBM::create(num_disparities_, block_size_);
41 | sg_block_matcher_ = cv::StereoSGBM::create(1, 1, 10);
42 |
43 | }
44 |
45 | void StereoVision::callback(const ImageConstPtr& l_image_msg,
46 | const CameraInfoConstPtr& l_info_msg,
47 | const ImageConstPtr& r_image_msg,
48 | const CameraInfoConstPtr& r_info_msg)
49 | {
50 |
51 | // Update the camera model
52 | model_.fromCameraInfo(l_info_msg, r_info_msg);
53 |
54 | // Allocate new disparity image message
55 | DisparityImagePtr disp_msg = boost::make_shared();
56 | disp_msg->header = l_info_msg->header;
57 | disp_msg->image.header = l_info_msg->header;
58 |
59 | // Compute window of (potentially) valid disparities
60 | int border = block_size_ / 2;
61 | int left = num_disparities_ + block_matcher_->getMinDisparity() + border - 1;
62 | int wtf = (block_matcher_->getMinDisparity() >= 0) ? border + block_matcher_->getMinDisparity() : std::max(border, -block_matcher_->getMinDisparity());
63 | int right = disp_msg->image.width - 1 - wtf;
64 | int top = border;
65 | int bottom = disp_msg->image.height - 1 - border;
66 | disp_msg->valid_window.x_offset = left;
67 | disp_msg->valid_window.y_offset = top;
68 | disp_msg->valid_window.width = right - left;
69 | disp_msg->valid_window.height = bottom - top;
70 |
71 | // Create cv::Mat views onto all buffers
72 | const cv::Mat_ l_image = cv_bridge::toCvShare(l_image_msg, image_encodings::MONO8)->image;
73 | const cv::Mat_ r_image = cv_bridge::toCvShare(r_image_msg, image_encodings::MONO8)->image;
74 |
75 | // Perform block matching to find the disparities
76 | // TODO PASS MODEL_
77 | processDisparity(l_image, r_image, *disp_msg);
78 |
79 | // Adjust for any x-offset between the principal points: d' = d - (cx_l - cx_r)
80 | double cx_l = model_.left().cx();
81 | double cx_r = model_.right().cx();
82 | if (cx_l != cx_r) {
83 | cv::Mat_ disp_image(disp_msg->image.height, disp_msg->image.width,
84 | reinterpret_cast(&disp_msg->image.data[0]),
85 | disp_msg->image.step);
86 | cv::subtract(disp_image, cv::Scalar(cx_l - cx_r), disp_image);
87 | }
88 |
89 | pub_disparity_.publish(disp_msg);
90 |
91 | // OPTIONAL FOR VIZ
92 | cv_bridge::CvImage cv_bridge_disparity_image;
93 | cv_bridge_disparity_image.image = disparity16_;
94 | cv_bridge_disparity_image.encoding = "mono16";
95 | cv_bridge_disparity_image.header.stamp = l_image_msg->header.stamp;
96 | pub_disparity_image_.publish(cv_bridge_disparity_image.toImageMsg());
97 |
98 |
99 | PointCloud2Ptr points_msg = boost::make_shared();
100 | points_msg->header = disp_msg->header;
101 | // Calculate point cloud
102 | points_msg->height = disp_msg->image.height;
103 | points_msg->width = disp_msg->image.width;
104 | points_msg->is_bigendian = false;
105 | points_msg->is_dense = false; // there may be invalid points
106 | // TODO PASS MODEL_
107 | processPoints2(*disp_msg, l_image, "mono8", *points_msg);
108 |
109 | // TODO DIE POINTS RGB
110 | pub_points2_.publish(points_msg);
111 |
112 | }
113 |
114 | void StereoVision::processDisparity(const cv::Mat& left_rect, const cv::Mat& right_rect,
115 | DisparityImage& disparity)
116 | {
117 |
118 | // Fixed-point disparity is 16 times the true value: d = d_fp / 16.0 = x_l - x_r.
119 | static const int DPP = 16; // disparities per pixel
120 | static const double inv_dpp = 1.0 / DPP;
121 |
122 | // Block matcher produces 16-bit signed (fixed point) disparity image
123 | block_matcher_->compute(left_rect, right_rect, disparity16_);
124 | //sg_block_matcher_->compute(left_rect, right_rect, disparity16_);
125 |
126 | // Fill in DisparityImage image data, converting to 32-bit float
127 | Image& dimage = disparity.image;
128 | dimage.height = disparity16_.rows;
129 | dimage.width = disparity16_.cols;
130 | dimage.encoding = image_encodings::TYPE_32FC1;
131 | dimage.step = dimage.width * sizeof(float);
132 | dimage.data.resize(dimage.step * dimage.height);
133 | cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
134 | // We convert from fixed-point to float disparity and also adjust for any x-offset between
135 | // the principal points: d = d_fp*inv_dpp - (cx_l - cx_r)
136 | disparity16_.convertTo(dmat, dmat.type(), inv_dpp, -(model_.left().cx() - model_.right().cx()));
137 | ROS_ASSERT(dmat.data == &dimage.data[0]);
138 | /// @todo is_bigendian? :)
139 |
140 | // StereoVision parameters
141 | disparity.f = model_.right().fx();
142 | disparity.T = model_.baseline();
143 |
144 | /// @todo Window of (potentially) valid disparities
145 |
146 | // Disparity search range
147 | disparity.min_disparity = block_matcher_->getMinDisparity();
148 | disparity.max_disparity = block_matcher_->getMinDisparity() + num_disparities_ - 1;
149 | disparity.delta_d = inv_dpp;
150 | }
151 |
152 | void StereoVision::processPoints2(const DisparityImage& disparity,
153 | const cv::Mat& color, const std::string& encoding,
154 | PointCloud2& points)
155 | {
156 |
157 | // Calculate dense point cloud
158 | const Image& dimage = disparity.image;
159 | const cv::Mat_ dmat(dimage.height, dimage.width, (float*)&dimage.data[0], dimage.step);
160 | model_.projectDisparityImageTo3d(dmat, dense_points_, true);
161 |
162 | // Fill in sparse point cloud message
163 | points.height = dense_points_.rows;
164 | points.width = dense_points_.cols;
165 | points.fields.resize (4);
166 | points.fields[0].name = "x";
167 | points.fields[0].offset = 0;
168 | points.fields[0].count = 1;
169 | points.fields[0].datatype = PointField::FLOAT32;
170 | points.fields[1].name = "y";
171 | points.fields[1].offset = 4;
172 | points.fields[1].count = 1;
173 | points.fields[1].datatype = PointField::FLOAT32;
174 | points.fields[2].name = "z";
175 | points.fields[2].offset = 8;
176 | points.fields[2].count = 1;
177 | points.fields[2].datatype = PointField::FLOAT32;
178 | points.fields[3].name = "rgb";
179 | points.fields[3].offset = 12;
180 | points.fields[3].count = 1;
181 | points.fields[3].datatype = PointField::FLOAT32;
182 | //points.is_bigendian = false; ???
183 | points.point_step = 16;
184 | points.row_step = points.point_step * points.width;
185 | points.data.resize (points.row_step * points.height);
186 | points.is_dense = false; // there may be invalid points
187 |
188 | float bad_point = std::numeric_limits::quiet_NaN ();
189 | int i = 0;
190 | for (int32_t u = 0; u < dense_points_.rows; ++u) {
191 | for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
192 | if (isValidPoint(dense_points_(u,v))) {
193 | // x,y,z,rgba
194 | memcpy (&points.data[i * points.point_step + 0], &dense_points_(u,v)[0], sizeof (float));
195 | memcpy (&points.data[i * points.point_step + 4], &dense_points_(u,v)[1], sizeof (float));
196 | memcpy (&points.data[i * points.point_step + 8], &dense_points_(u,v)[2], sizeof (float));
197 | }
198 | else {
199 | memcpy (&points.data[i * points.point_step + 0], &bad_point, sizeof (float));
200 | memcpy (&points.data[i * points.point_step + 4], &bad_point, sizeof (float));
201 | memcpy (&points.data[i * points.point_step + 8], &bad_point, sizeof (float));
202 | }
203 | }
204 | }
205 |
206 | // Fill in color
207 | namespace enc = image_encodings;
208 | i = 0;
209 | if (encoding == enc::MONO8) {
210 | for (int32_t u = 0; u < dense_points_.rows; ++u) {
211 | for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
212 | if (isValidPoint(dense_points_(u,v))) {
213 | uint8_t g = color.at(u,v);
214 | int32_t rgb = (g << 16) | (g << 8) | g;
215 | memcpy (&points.data[i * points.point_step + 12], &rgb, sizeof (int32_t));
216 | }
217 | else {
218 | memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
219 | }
220 | }
221 | }
222 | }
223 | else if (encoding == enc::RGB8) {
224 | for (int32_t u = 0; u < dense_points_.rows; ++u) {
225 | for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
226 | if (isValidPoint(dense_points_(u,v))) {
227 | const cv::Vec3b& rgb = color.at(u,v);
228 | int32_t rgb_packed = (rgb[0] << 16) | (rgb[1] << 8) | rgb[2];
229 | memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
230 | }
231 | else {
232 | memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
233 | }
234 | }
235 | }
236 | }
237 | else if (encoding == enc::BGR8) {
238 | for (int32_t u = 0; u < dense_points_.rows; ++u) {
239 | for (int32_t v = 0; v < dense_points_.cols; ++v, ++i) {
240 | if (isValidPoint(dense_points_(u,v))) {
241 | const cv::Vec3b& bgr = color.at(u,v);
242 | int32_t rgb_packed = (bgr[2] << 16) | (bgr[1] << 8) | bgr[0];
243 | memcpy (&points.data[i * points.point_step + 12], &rgb_packed, sizeof (int32_t));
244 | }
245 | else {
246 | memcpy (&points.data[i * points.point_step + 12], &bad_point, sizeof (float));
247 | }
248 | }
249 | }
250 | }
251 | else {
252 | ROS_WARN("Could not fill color channel of the point cloud, unrecognized encoding '%s'", encoding.c_str());
253 | }
254 |
255 | }
256 |
257 | bool StereoVision::isValidPoint(const cv::Vec3f& pt)
258 | {
259 | // Check both for disparities explicitly marked as invalid (where OpenCV maps pt.z to MISSING_Z)
260 | // and zero disparities (point mapped to infinity).
261 | return pt[2] != image_geometry::StereoCameraModel::MISSING_Z && !std::isinf(pt[2]);
262 | }
263 |
264 | void StereoVision::configCallback(sensor_processing::StereoVisionParamsConfig &config, uint32_t level __attribute__((unused)))
265 | {
266 | num_disparities_ = convertNumDisparities(config.numDisparities);
267 | block_size_ = convertBlockSize(config.blockSize);
268 | ROS_DEBUG("Reconfigure Request");
269 | ROS_DEBUG("numDisparities %d", num_disparities_);
270 | ROS_DEBUG("blockSize %d", block_size_);
271 | block_matcher_ = cv::StereoBM::create(num_disparities_, block_size_);
272 | }
273 |
274 | int StereoVision::convertNumDisparities(const int num_disparities){
275 | return 16 * num_disparities;
276 | }
277 |
278 | int StereoVision::convertBlockSize(const int block_size){
279 | return 2 * block_size + 5;
280 | }
281 |
282 | }
283 |
--------------------------------------------------------------------------------
/statick_config/rsc/_clang-format:
--------------------------------------------------------------------------------
1 | ---
2 | Language: Cpp
3 | AccessModifierOffset: -1
4 | AlignAfterOpenBracket: Align
5 | AlignConsecutiveAssignments: false
6 | AlignConsecutiveDeclarations: false
7 | AlignEscapedNewlinesLeft: true
8 | AlignOperands: true
9 | AlignTrailingComments: true
10 | AllowAllParametersOfDeclarationOnNextLine: true
11 | AllowShortBlocksOnASingleLine: false
12 | AllowShortCaseLabelsOnASingleLine: false
13 | AllowShortFunctionsOnASingleLine: None
14 | AllowShortIfStatementsOnASingleLine: false
15 | AllowShortLoopsOnASingleLine: false
16 | AlwaysBreakAfterDefinitionReturnType: None
17 | AlwaysBreakAfterReturnType: None
18 | AlwaysBreakBeforeMultilineStrings: true
19 | AlwaysBreakTemplateDeclarations: true
20 | BinPackArguments: true
21 | BinPackParameters: true
22 | BraceWrapping:
23 | AfterClass: true
24 | AfterControlStatement: true
25 | AfterEnum: true
26 | AfterFunction: true
27 | AfterNamespace: true
28 | AfterObjCDeclaration: true
29 | AfterStruct: true
30 | AfterUnion: true
31 | BeforeCatch: true
32 | BeforeElse: true
33 | IndentBraces: false
34 | BreakBeforeBinaryOperators: None
35 | BreakBeforeBraces: Custom
36 | BreakBeforeTernaryOperators: true
37 | BreakConstructorInitializersBeforeComma: false
38 | BreakAfterJavaFieldAnnotations: false
39 | BreakStringLiterals: true
40 | ColumnLimit: 120
41 | CommentPragmas: '^ IWYU pragma:'
42 | ConstructorInitializerAllOnOneLineOrOnePerLine: true
43 | ConstructorInitializerIndentWidth: 4
44 | ContinuationIndentWidth: 4
45 | Cpp11BracedListStyle: true
46 | DerivePointerAlignment: false
47 | DisableFormat: false
48 | ExperimentalAutoDetectBinPacking: false
49 | ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
50 | IncludeCategories:
51 | - Regex: '^"(llvm|llvm-c|clang|clang-c)/'
52 | Priority: 2
53 | - Regex: '^(<|"(gtest|isl|json)/)'
54 | Priority: 3
55 | - Regex: '.*'
56 | Priority: 1
57 | IncludeIsMainRegex: '$'
58 | IndentCaseLabels: true
59 | IndentWidth: 2
60 | IndentWrappedFunctionNames: false
61 | JavaScriptQuotes: Leave
62 | JavaScriptWrapImports: true
63 | KeepEmptyLinesAtTheStartOfBlocks: false
64 | MacroBlockBegin: ''
65 | MacroBlockEnd: ''
66 | MaxEmptyLinesToKeep: 1
67 | NamespaceIndentation: None
68 | ObjCBlockIndentWidth: 2
69 | ObjCSpaceAfterProperty: false
70 | ObjCSpaceBeforeProtocolList: false
71 | PenaltyBreakBeforeFirstCallParameter: 1
72 | PenaltyBreakComment: 300
73 | PenaltyBreakFirstLessLess: 120
74 | PenaltyBreakString: 1000
75 | PenaltyExcessCharacter: 1000000
76 | PenaltyReturnTypeOnItsOwnLine: 200
77 | PointerAlignment: Right
78 | ReflowComments: true
79 | SortIncludes: false
80 | SpaceAfterCStyleCast: false
81 | SpaceBeforeAssignmentOperators: true
82 | SpaceBeforeParens: ControlStatements
83 | SpaceInEmptyParentheses: false
84 | SpacesBeforeTrailingComments: 2
85 | SpacesInAngles: false
86 | SpacesInContainerLiterals: true
87 | SpacesInCStyleCastParentheses: false
88 | SpacesInParentheses: false
89 | SpacesInSquareBrackets: false
90 | Standard: Auto
91 | TabWidth: 2
92 | UseTab: Never
93 | ...
94 |
--------------------------------------------------------------------------------
/statick_config/rsc/cccc.opt:
--------------------------------------------------------------------------------
1 | CCCC_FileExt@.ADA@ada.95@
2 | CCCC_FileExt@.ADB@ada.95@
3 | CCCC_FileExt@.ADS@ada.95@
4 | CCCC_FileExt@.C@c++.ansi@
5 | CCCC_FileExt@.CC@c++.ansi@
6 | CCCC_FileExt@.CPP@c++.ansi@
7 | CCCC_FileExt@.CXX@c++.ansi@
8 | CCCC_FileExt@.H@c++.ansi@
9 | CCCC_FileExt@.H++@c++.ansi@
10 | CCCC_FileExt@.HH@c++.ansi@
11 | CCCC_FileExt@.HPP@c++.ansi@
12 | CCCC_FileExt@.HXX@c++.ansi@
13 | CCCC_FileExt@.J@java@
14 | CCCC_FileExt@.JAV@java@
15 | CCCC_FileExt@.JAVA@java@
16 | CCCC_FileExt@.ada@ada.95@
17 | CCCC_FileExt@.adb@ada.95@
18 | CCCC_FileExt@.ads@ada.95@
19 | CCCC_FileExt@.c@c.ansi@
20 | CCCC_FileExt@.c++@c++.ansi@
21 | CCCC_FileExt@.cc@c++.ansi@
22 | CCCC_FileExt@.cpp@c++.ansi@
23 | CCCC_FileExt@.cxx@c++.ansi@
24 | CCCC_FileExt@.h@c++.ansi@
25 | CCCC_FileExt@.h++@c++.ansi@
26 | CCCC_FileExt@.hh@c++.ansi@
27 | CCCC_FileExt@.hpp@c++.ansi@
28 | CCCC_FileExt@.hxx@c++.ansi@
29 | CCCC_FileExt@.j@java@
30 | CCCC_FileExt@.jav@java@
31 | CCCC_FileExt@.java@java@
32 | CCCC_MetTmnt@8.3@999999.000000@999999.000000@0@8@3@General format for fixed precision 3 d.p.@
33 | CCCC_MetTmnt@CBO@12.000000@30.000000@0@6@0@Coupling between objects@
34 | CCCC_MetTmnt@COM@999999.000000@999999.000000@0@6@0@Comment lines@
35 | CCCC_MetTmnt@COMper@999999.000000@999999.000000@0@6@3@Comment lines (averaged)@
36 | CCCC_MetTmnt@DIT@3.000000@6.000000@0@6@0@Depth of Inheritance Tree@
37 | CCCC_MetTmnt@FI@12.000000@20.000000@0@6@0@Fan in (overall)@
38 | CCCC_MetTmnt@FIc@10.000000@12.000000@0@6@0@Fan in (concrete uses only)@
39 | CCCC_MetTmnt@FIv@10.000000@12.000000@0@6@0@Fan in (visible uses only)@
40 | CCCC_MetTmnt@FO@12.000000@20.000000@0@6@0@Fan out (overall)@
41 | CCCC_MetTmnt@FOc@6.000000@12.000000@0@6@0@Fan out (concrete uses only)@
42 | CCCC_MetTmnt@FOv@6.000000@12.000000@0@6@0@Fan out (visible uses only)@
43 | CCCC_MetTmnt@IF4@100.000000@1000.000000@0@6@0@Henry-Kafura/Shepperd measure (overall)@
44 | CCCC_MetTmnt@IF4c@30.000000@100.000000@0@6@0@Henry-Kafura/Shepperd measure (concrete)@
45 | CCCC_MetTmnt@IF4v@30.000000@100.000000@0@6@0@Henry-Kafura/Shepperd measure (visible)@
46 | CCCC_MetTmnt@LOCf@30.000000@100.000000@0@6@0@Lines of code/function@
47 | CCCC_MetTmnt@LOCm@500.000000@2000.000000@0@6@0@Lines of code/single module@
48 | CCCC_MetTmnt@LOCp@999999.000000@999999.000000@0@6@0@Lines of code/project@
49 | CCCC_MetTmnt@LOCper@500.000000@2000.000000@0@6@3@Lines of code/average module@
50 | CCCC_MetTmnt@L_C@7.000000@30.000000@20@6@3@LOC/COM Lines of code/comment line@
51 | CCCC_MetTmnt@MVGf@10.000000@30.000000@0@6@0@Cyclomatic complexity/function@
52 | CCCC_MetTmnt@MVGm@200.000000@1000.000000@0@6@0@Cyclomatic complexity/single module@
53 | CCCC_MetTmnt@MVGp@999999.000000@999999.000000@0@6@0@Cyclomatic complexity/project@
54 | CCCC_MetTmnt@MVGper@200.000000@1000.000000@0@6@3@Cyclomatic complexity/average module@
55 | CCCC_MetTmnt@M_C@5.000000@10.000000@5@6@3@MVG/COM McCabe/comment line@
56 | CCCC_MetTmnt@NOC@4.000000@15.000000@0@6@0@Number of children@
57 | CCCC_MetTmnt@WMC1@30.000000@100.000000@0@6@0@Weighting function=1 unit per method@
58 | CCCC_MetTmnt@WMCv@10.000000@30.000000@0@6@0@Weighting function=1 unit per visible method@
59 | CCCC_Dialect@c++.mfc@BEGIN_MESSAGE_MAP@start_skipping@
60 | CCCC_Dialect@c++.mfc@END_MESSAGE_MAP@stop_skipping@
61 | CCCC_Dialect@c++.stl@__STL_BEGIN_NAMESPACE@ignore@
62 | CCCC_Dialect@c++.stl@__STL_END_NAMESPACE@ignore@
63 |
--------------------------------------------------------------------------------
/statick_config/rsc/config.yaml:
--------------------------------------------------------------------------------
1 | levels:
2 | local:
3 | tool:
4 | bandit:
5 | flags: ""
6 | catkin_lint:
7 | flags: "-W2 --ignore DESCRIPTION_BOILERPLATE,DESCRIPTION_MEANINGLESS,GLOBAL_VAR_COLLISION,LINK_DIRECTORY,LITERAL_PROJECT_NAME,TARGET_NAME_COLLISION"
8 | cccc:
9 | flags: ""
10 | clang-format:
11 | flags: ""
12 | version: 3.9
13 | clang-tidy:
14 | flags: "-checks='cert*,clang-analyzer-alpha.cplusplus.virtualCall,clang-analyzer-alpha.security.ArrayBoundV2,clang-analyzer-core.UndefinedBinaryOperatorResult,clang-analyzer-cplusplus.NewDelete,clang-analyzer-cplusplus.NewDeleteLeaks,clang-analyzer-unix.MismatchedDeallocatorclang-analyzer-unix.Malloc,misc-new-delete-overheads,misc-non-copyable-objects,misc-static-assert,clang-diagnostic-dangling-initializer-list,clang-diagnostic-delete-incomplete,clang-diagnostic-delete-non-virtual-dtor,clang-diagnostic-dynamic-class-memaccess,clang-diagnostic-exceptions,clang-diagnostic-implicit-int,clang-diagnostic-incompatible-pointer-types,clang-diagnostic-incompatible-pointer-types-discards-qualifiers,clang-diagnostic-int-conversion,clang-diagnostic-int-to-pointer-castclang-diagnostic-invalid-noreturn,clang-diagnostic-invalid-offsetof,clang-diagnostic-mismatched-new-delete,clang-diagnostic-parentheses,clang-diagnostic-reorder,clang-diagnostic-reserved-id-macro,clang-diagnostic-return-type,clang-diagnostic-sometimes-uninitialized,clang-diagnostic-unevaluated-expression,clang-diagnostic-uninitialized,clang-diagnostic-unsequenced,clang-diagnostic-user-defined-literals,clang-diagnostic-varargs,clang-diagnostic-vexing-parse,clang-diagnostic-trigraphs,clang-diagnostic-unreachable-code,clang-diagnostic-format,clang-diagnostic-cast-align,clang-diagnostic-cast-qual,clang-diagnostic-format-security,clang-diagnostic-sequence-point,clang-diagnostic-char-subscripts,clang-diagnostic-unused-parameter,clang-diagnostic-unused-value,-boost-use-to-string,-cert-err58-cpp,-cert-err60-cpp,-clang-analyzer-deadcode.DeadStores,-clang-analyzer-alpha.deadcode.UnreachableCode,-clang-analyzer-optin.performance.Padding,-cppcoreguidelines-*,-google-default-arguments,-google-readability-namespace-comments,-google-runtime-int,-google-runtime-references,-llvm-include-order,-llvm-namespace-comment,-modernize-*,-misc-misplaced-widening-cast,-misc-unused-parameters,-readability-else-after-return'"
15 | cmakelint:
16 | flags: "--spaces=2 --filter=-linelength,-whitespace/indent"
17 | cppcheck:
18 | flags: "-j 4 --enable=warning --template='[{file}:{line}]: ({severity} {id}) {message}' -j 4 --suppress=unreadVariable --suppress=unusedPrivateFunction --suppress=unusedStructMember"
19 | cpplint:
20 | flags: "--filter=-build/header_guard,-build/include,-build/include_order,-build/c++11,-readability/function,-readability/streams,-readability/todo,-readability/namespace,-readability/multiline_comment,-readability/fn_size,-readability/alt_tokens,-readability/braces,-readability/inheritance,-runtime/indentation_namespace,-runtime/int,-runtime/threadsafe_fn,-runtime/references,-runtime/array,-whitespace,-legal"
21 | # Not using findbugs since it does not install nicely on 14.04 (Travis).
22 | # findbugs:
23 | # flags: ""
24 | # include: "findBugs-security.xml"
25 | flawfinder:
26 | flags: ""
27 | lizard:
28 | flags: ""
29 | make:
30 | flags: "-Wcast-align -Wchar-subscripts -Wformat-security -Wformat -Wimplicit-int -Wint-to-pointer-cast -Wparentheses -Winvalid-offsetof -Wsequence-point -Wdelete-non-virtual-dtor -Wreorder -Wreturn-type -Wuninitialized -Wunused-parameter -Wunreachable-code -Wunused-value -Wvarargs -Wtrigraphs -Wcast-qual -Wall -Wextra -Woverloaded-virtual -Wnon-virtual-dtor -Wold-style-cast -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-parameter"
31 | pycodestyle:
32 | flags: ""
33 | pydocstyle:
34 | flags: ""
35 | pyflakes:
36 | flags: ""
37 | pylint:
38 | flags: ""
39 | # Not using uncrustify since it is duplicated and not totally compatible with clang-format.
40 | # uncrustify:
41 | # flags: ""
42 | # Not using xmllint since it does not install nicely on 14.04 (Travis).
43 | # xmllint:
44 | # flags: ""
45 | yamllint:
46 | flags: "-d '{extends: default,
47 | rules: {
48 | colons: {max-spaces-before: 0, max-spaces-after: -1},
49 | commas: disable,
50 | document-start: disable,
51 | line-length: disable}}'"
52 |
--------------------------------------------------------------------------------
/statick_config/rsc/exceptions.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | packages:
3 | node_example:
4 | exceptions:
5 | file:
6 | # System headers
7 | - tools: all
8 | globs: ["/opt/ros/*", "/opt/pleora/*", "/usr/*"]
9 | # ROS auto-generated files
10 | - tools: all
11 | globs: ["*/devel/*", "*/build/*", "*/logs/*", "*/site/*", "*/.catkin_tools/*"]
12 | # Statick auto-generate files
13 | - tools: all
14 | globs: ["*/.cccc/*"]
15 | message_regex:
16 | # Python 2/3 compatibility requires inheriting from `object`.
17 | - tools: [pylint]
18 | regex: ".+: Class .+ inherits from object, can be safely removed from bases in python3"
19 | # Lizard finds some ROS auto-generated files
20 | - tools: [lizard]
21 | regex: "_rollback_env_variable.+"
22 | - tools: [lizard]
23 | regex: "find_env_hooks.+"
24 |
25 | ignore_packages:
26 | - catkin_tools_prebuild
27 |
--------------------------------------------------------------------------------
/statick_config/rsc/profile.yaml:
--------------------------------------------------------------------------------
1 | default: local
2 |
3 | packages:
4 |
5 | # You can do individual package level overrides like:
6 | # packages:
7 | # my_package: objective
8 |
--------------------------------------------------------------------------------
/test/test_node_example_listener.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 | #include
6 |
7 | #include
8 |
9 | class Helper
10 | {
11 | public:
12 | Helper() : got_msg_(false)
13 | {
14 | rosout_sub_ = nh_.subscribe("rosout", 1, &Helper::rosoutCallback, this);
15 | example_pub_ = nh_.advertise("example", 1, true);
16 |
17 | spinForTime(0.5);
18 | join();
19 | }
20 |
21 | void spinForTime(double secs)
22 | {
23 | spin_secs_ = secs;
24 | spin_thread_.reset(new boost::thread(boost::bind(&Helper::spinThread, this)));
25 | }
26 |
27 | void join()
28 | {
29 | spin_thread_->join();
30 | }
31 |
32 | bool gotMsg()
33 | {
34 | return got_msg_;
35 | }
36 |
37 | void sendData(double a, double b, const std::string &message)
38 | {
39 | node_example::NodeExampleData msg;
40 | msg.a = a;
41 | msg.b = b;
42 | msg.message = message;
43 | example_pub_.publish(msg);
44 | spinForTime(1.0);
45 | join();
46 | }
47 |
48 | private:
49 | void spinThread()
50 | {
51 | ros::Rate r(10.0);
52 | ros::Time start = ros::Time::now();
53 |
54 | while ((ros::Time::now() - start) < ros::Duration(spin_secs_))
55 | {
56 | ros::spinOnce();
57 | r.sleep();
58 | }
59 | }
60 |
61 | void rosoutCallback(const rosgraph_msgs::LogConstPtr &msg __attribute__((unused)))
62 | {
63 | got_msg_ = true;
64 | }
65 |
66 | private:
67 | ros::NodeHandle nh_;
68 | ros::Subscriber rosout_sub_;
69 | ros::Publisher example_pub_;
70 |
71 | bool got_msg_;
72 | double spin_secs_;
73 | boost::shared_ptr spin_thread_;
74 | };
75 |
76 | TEST(NodeExampleTest, rosoutPopulated)
77 | {
78 | Helper h;
79 | h.sendData(1, 1, "test");
80 | EXPECT_TRUE(h.gotMsg());
81 | }
82 |
83 | int main(int argc, char **argv)
84 | {
85 | testing::InitGoogleTest(&argc, argv);
86 |
87 | ros::init(argc, argv, "test_node_example_listener");
88 | ros::NodeHandle nh;
89 |
90 | return RUN_ALL_TESTS();
91 | }
92 |
--------------------------------------------------------------------------------
/test/test_node_example_listener.test:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/test/test_node_example_talker.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #include
10 |
11 | class Helper
12 | {
13 | public:
14 | Helper() : got_msg_(false)
15 | {
16 | example_sub_ = nh_.subscribe("example", 1, &Helper::exampleCallback, this);
17 | }
18 |
19 | void spinForTime(double secs)
20 | {
21 | spin_secs_ = secs;
22 | spin_thread_.reset(new boost::thread(boost::bind(&Helper::spinThread, this)));
23 | join();
24 | }
25 |
26 | void join()
27 | {
28 | spin_thread_->join();
29 | }
30 |
31 | bool gotMsg()
32 | {
33 | return got_msg_;
34 | }
35 |
36 | void reset()
37 | {
38 | got_msg_ = false;
39 | }
40 |
41 | void setEnable(bool enable)
42 | {
43 | dynamic_reconfigure::ReconfigureRequest srv_req;
44 | dynamic_reconfigure::ReconfigureResponse srv_resp;
45 | dynamic_reconfigure::BoolParameter bool_param;
46 | dynamic_reconfigure::Config conf;
47 |
48 | bool_param.name = "enable";
49 | bool_param.value = enable;
50 | conf.bools.push_back(bool_param);
51 |
52 | srv_req.config = conf;
53 |
54 | ros::service::call("/talker/set_parameters", srv_req, srv_resp);
55 | }
56 |
57 | private:
58 | void spinThread()
59 | {
60 | ros::Rate r(10.0);
61 | ros::Time start = ros::Time::now();
62 |
63 | while ((ros::Time::now() - start) < ros::Duration(spin_secs_))
64 | {
65 | ros::spinOnce();
66 | r.sleep();
67 | }
68 | }
69 |
70 | void exampleCallback(const node_example::NodeExampleDataConstPtr &msg __attribute__((unused)))
71 | {
72 | got_msg_ = true;
73 | }
74 |
75 | private:
76 | ros::NodeHandle nh_;
77 | ros::Subscriber example_sub_;
78 |
79 | bool got_msg_;
80 | double spin_secs_;
81 | boost::shared_ptr spin_thread_;
82 | };
83 |
84 | TEST(NodeExampleTest, getMessage)
85 | {
86 | Helper h;
87 | h.spinForTime(1.0);
88 | EXPECT_TRUE(h.gotMsg());
89 | }
90 |
91 | TEST(NodeExampleTest, stopNode)
92 | {
93 | Helper h;
94 | h.setEnable(false);
95 | h.reset();
96 | h.spinForTime(1.0);
97 | EXPECT_FALSE(h.gotMsg());
98 | }
99 |
100 | TEST(NodeExampleTest, restartNode)
101 | {
102 | Helper h;
103 | h.setEnable(false);
104 | h.spinForTime(0.5);
105 | h.setEnable(true);
106 | h.reset();
107 | h.spinForTime(1.0);
108 | EXPECT_TRUE(h.gotMsg());
109 | }
110 |
111 | int main(int argc, char **argv)
112 | {
113 | testing::InitGoogleTest(&argc, argv);
114 |
115 | ros::init(argc, argv, "test_node_example_talker");
116 | ros::NodeHandle nh;
117 |
118 | return RUN_ALL_TESTS();
119 | }
120 |
--------------------------------------------------------------------------------
/test/test_node_example_talker.test:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------