├── .circleci
└── config.yml
├── .gitignore
├── CMakeLists.txt
├── LICENSE.md
├── README.md
├── cfg
├── flame.rviz
├── flame_nodelet.yaml
├── flame_offline_asl.yaml
├── flame_offline_tum.yaml
└── kinect.yaml
├── flame_ros_nodelets.xml
├── flame_ros_plugins.xml
├── launch
├── flame_nodelet.launch
├── flame_offline_asl.launch
├── flame_offline_tum.launch
└── frames.launch
├── msg
├── FlameNodeletStats.msg
└── FlameStats.msg
├── package.xml
├── scripts
├── Dockerfile
└── flame_docker_example.sh
└── src
├── CMakeLists.txt
├── dataset_utils
├── asl
│ ├── dataset.h
│ └── types.h
├── utils.cc
└── utils.h
├── flame_nodelet.cc
├── flame_offline_asl.cc
├── flame_offline_tum.cc
├── flame_rviz_plugins
├── surface_normals_visual.cc
├── surface_normals_visual.h
├── textured_mesh_display.cc
├── textured_mesh_display.h
├── textured_mesh_visual.cc
├── textured_mesh_visual.h
└── visual.h
├── ros_sensor_streams
├── asl_rgbd_offline_stream.cc
├── asl_rgbd_offline_stream.h
├── conversions.h
├── thread_safe_queue.h
├── tracked_image_stream.cc
├── tracked_image_stream.h
├── tum_rgbd_offline_stream.cc
└── tum_rgbd_offline_stream.h
├── utils.cc
└── utils.h
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Configuration file for CircleCI 2.0.
2 | version: 2
3 | jobs:
4 | # Main build job.
5 | build:
6 | working_directory: ~/flame_ws
7 | docker:
8 | - image: osrf/ros:kinetic-desktop-full
9 | steps:
10 | # Commands to checkout/build repo.
11 | - checkout:
12 | path: ~/flame_ws/src/flame_ros
13 |
14 | # Install apt dependencies.
15 | - run: apt-get update
16 | - run: apt-get install -y sudo apt-utils lsb-release git openssh-client wget
17 | - run: apt-get install -y libboost-all-dev libpcl-dev
18 |
19 | # Install catkin_tools.
20 | - run: sh -c 'echo "deb http://packages.ros.org/ros/ubuntu `lsb_release -sc` main" > /etc/apt/sources.list.d/ros-latest.list'
21 | - run: wget http://packages.ros.org/ros.key -O - | sudo apt-key add -
22 | - run: apt-get update && apt-get install -y python-catkin-tools
23 |
24 | # Checkout flame.
25 | - run: mkdir ~/.ssh/ && echo -e "Host github.com\n\tStrictHostKeyChecking no\n" > ~/.ssh/config
26 | - run: cd src && git clone git@github.com:robustrobotics/flame.git
27 |
28 | # Init workspace and install rosdep dependencies.
29 | - run: source /opt/ros/kinetic/setup.sh && catkin init
30 | - run: source /opt/ros/kinetic/setup.sh && rosdep install -iy --from-paths ./src
31 |
32 | # Install dependencies from source.
33 | - run: mkdir -p ./dependencies/src
34 | - run: ./src/flame/scripts/eigen.sh ./dependencies/src ./dependencies
35 | - run: ./src/flame/scripts/sophus.sh ./dependencies/src ./dependencies
36 | - run: cp ./src/flame/scripts/env.sh ./dependencies
37 |
38 | # Build!
39 | - run: source /opt/ros/kinetic/setup.sh && source ./dependencies/env.sh && catkin build --no-status --no-notify -j2 --mem-limit 3G
40 |
41 | # Two main workflows: One run per commit, one to run master nightly.
42 | workflows:
43 | version: 2
44 | commit-workflow:
45 | jobs:
46 | - build
47 | nightly-workflow:
48 | triggers:
49 | - schedule:
50 | cron: "0 1 * * *"
51 | filters:
52 | branches:
53 | only: master
54 | jobs:
55 | - build
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | id_rsa
2 | cpplint.txt
3 | cppcheck.xml
4 |
5 | # Compiled Object files
6 | *.slo
7 | *.lo
8 | *.o
9 | *.obj
10 |
11 | # Precompiled Headers
12 | *.gch
13 | *.pch
14 |
15 | # Compiled Dynamic libraries
16 | *.so
17 | *.dylib
18 | *.dll
19 |
20 | # Fortran module files
21 | *.mod
22 |
23 | # Compiled Static libraries
24 | *.lai
25 | *.la
26 | *.a
27 | *.lib
28 |
29 | # Executables
30 | *.exe
31 | *.out
32 | *.app
33 |
34 | # Temporary files.
35 | *~
36 |
37 | # Build artifacts.
38 | build
39 | devel
40 | install
41 | logs
42 | dependencies
43 | .catkin_workspace
44 | .catkin_tools
45 | src/drivers/realsense
46 |
47 | # lcmgen puts auto-generated messages source files here.
48 | src/utilities/common_lcmtypes/src
49 |
50 | # eclipse settings
51 | *.cproject
52 | *.project
53 | *.settings
54 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 |
3 | # Configure CCache if available
4 | find_program(CCACHE_FOUND ccache)
5 | if (CCACHE_FOUND)
6 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
7 | set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
8 | endif (CCACHE_FOUND)
9 |
10 | project(flame_ros)
11 |
12 | option(FLAME_WITH_FLA "Compile with FLA modifications." OFF)
13 | if (FLAME_WITH_FLA)
14 | message(STATUS "Compiling with FLA modifications.")
15 | set(REQ_CATKIN_PKGS ${REQ_CATKIN_PKGS} fla_msgs)
16 | add_definitions("-DFLAME_WITH_FLA")
17 | endif(FLAME_WITH_FLA)
18 |
19 | option(WITH_COVERAGE "Compile with code coverage" OFF)
20 | if (WITH_COVERAGE)
21 | set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g -O0 -fprofile-arcs -ftest-coverage")
22 | set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -g -O0 -fprofile-arcs -ftest-coverage")
23 | set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} -fprofile-arcs -ftest-coverage")
24 | endif ()
25 |
26 | add_definitions("-std=c++11")
27 | if (NOT CMAKE_BUILD_TYPE)
28 | set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." FORCE)
29 | endif (NOT CMAKE_BUILD_TYPE)
30 |
31 | ## Find catkin macros and libraries
32 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
33 | ## is used, also find other catkin packages
34 | set(REQ_CATKIN_PKGS
35 | roscpp
36 | rviz
37 | std_msgs
38 | nav_msgs
39 | sensor_msgs
40 | pcl_ros
41 | pcl_msgs
42 | pcl_conversions
43 | nodelet
44 | cv_bridge
45 | message_generation
46 | camera_info_manager
47 | image_geometry
48 | )
49 | find_package(catkin REQUIRED COMPONENTS ${REQ_CATKIN_PKGS})
50 |
51 | ## System dependencies are found with CMake's conventions
52 | # find_package(Boost REQUIRED COMPONENTS system)
53 | find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc features2d calib3d video)
54 | find_package(PCL 1.7 REQUIRED COMPONENTS common)
55 | find_package(Boost 1.54 REQUIRED)
56 | find_package(Sophus REQUIRED)
57 | find_package(flame REQUIRED)
58 |
59 | find_package(PkgConfig REQUIRED)
60 | pkg_check_modules(EIGEN3 REQUIRED eigen3>=3.2)
61 | pkg_check_modules(YAML_CPP REQUIRED yaml-cpp>=0.5)
62 |
63 | option(FLAME_WITH_RVIZ "Compile flame_ros RViz plugin." ON)
64 | if (FLAME_WITH_RVIZ)
65 | pkg_check_modules(OGRE OGRE)
66 |
67 | ## This plugin includes Qt widgets, so we must include Qt like so:
68 | ## We'll use the version that rviz used so they are compatible.
69 | set(CMAKE_AUTOMOC ON) # This setting causes Qt's "MOC" generation to happen automatically.
70 | if(rviz_QT_VERSION VERSION_LESS "5")
71 | message(STATUS "Using Qt4 based on the rviz_QT_VERSION: ${rviz_QT_VERSION}")
72 | find_package(Qt4 ${rviz_QT_VERSION} EXACT REQUIRED QtCore QtGui)
73 | ## pull in all required include dirs, define QT_LIBRARIES, etc.
74 | include(${QT_USE_FILE})
75 | else()
76 | message(STATUS "Using Qt5 based on the rviz_QT_VERSION: ${rviz_QT_VERSION}")
77 | find_package(Qt5 ${rviz_QT_VERSION} EXACT REQUIRED Core Widgets)
78 | ## make target_link_libraries(${QT_LIBRARIES}) pull in all required dependencies
79 | set(QT_LIBRARIES Qt5::Widgets)
80 | endif()
81 |
82 | ## Prefer the Qt signals and slots to avoid defining "emit", "slots",
83 | ## etc because they can conflict with boost signals, so define QT_NO_KEYWORDS here.
84 | add_definitions(-DQT_NO_KEYWORDS)
85 |
86 | include_directories(${OGRE_INCLUDE_DIRS})
87 | link_directories(${OGRE_LIBRARY_DIRS})
88 | endif ()
89 |
90 | # Set some compiler flags.
91 | add_definitions("-DENABLE_SSE")
92 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${SSE_FLAGS} -march=native")
93 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${SSE_FLAGS} -march=native")
94 | set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${SSE_FLAGS} -march=native")
95 | set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${SSE_FLAGS} -march=native")
96 |
97 | ## Uncomment this if the package has a setup.py. This macro ensures
98 | ## modules and global scripts declared therein get installed
99 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
100 | # catkin_python_setup()
101 |
102 | ################################################
103 | ## Declare ROS messages, services and actions ##
104 | ################################################
105 |
106 | ## To declare and build messages, services or actions from within this
107 | ## package, follow these steps:
108 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in
109 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
110 | ## * In the file package.xml:
111 | ## * add a build_depend tag for "message_generation"
112 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
113 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
114 | ## but can be declared for certainty nonetheless:
115 | ## * add a run_depend tag for "message_runtime"
116 | ## * In this file (CMakeLists.txt):
117 | ## * add "message_generation" and every package in MSG_DEP_SET to
118 | ## find_package(catkin REQUIRED COMPONENTS ...)
119 | ## * add "message_runtime" and every package in MSG_DEP_SET to
120 | ## catkin_package(CATKIN_DEPENDS ...)
121 | ## * uncomment the add_*_files sections below as needed
122 | ## and list every .msg/.srv/.action file to be processed
123 | ## * uncomment the generate_messages entry below
124 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
125 |
126 | ## Generate messages in the 'msg' folder
127 | # add_message_files(
128 | # FILES
129 | # Message1.msg
130 | # Message2.msg
131 | # )
132 | add_message_files(FILES
133 | FlameStats.msg
134 | FlameNodeletStats.msg)
135 |
136 | ## Generate services in the 'srv' folder
137 | # add_service_files(
138 | # FILES
139 | # Service1.srv
140 | # Service2.srv
141 | # )
142 |
143 | ## Generate actions in the 'action' folder
144 | # add_action_files(
145 | # FILES
146 | # Action1.action
147 | # Action2.action
148 | # )
149 |
150 | ## Generate added messages and services with any dependencies listed here
151 | # generate_messages(
152 | # DEPENDENCIES
153 | # std_msgs # Or other packages containing msgs
154 | # )
155 | generate_messages(DEPENDENCIES std_msgs)
156 |
157 | ################################################
158 | ## Declare ROS dynamic reconfigure parameters ##
159 | ################################################
160 |
161 | ## To declare and build dynamic reconfigure parameters within this
162 | ## package, follow these steps:
163 | ## * In the file package.xml:
164 | ## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
165 | ## * In this file (CMakeLists.txt):
166 | ## * add "dynamic_reconfigure" to
167 | ## find_package(catkin REQUIRED COMPONENTS ...)
168 | ## * uncomment the "generate_dynamic_reconfigure_options" section below
169 | ## and list every .cfg file to be processed
170 |
171 | ## Generate dynamic reconfigure parameters in the 'cfg' folder
172 | # generate_dynamic_reconfigure_options(
173 | # cfg/DynReconf1.cfg
174 | # cfg/DynReconf2.cfg
175 | # )
176 |
177 | ###################################
178 | ## catkin specific configuration ##
179 | ###################################
180 | ## The catkin_package macro generates cmake config files for your package
181 | ## Declare things to be passed to dependent projects
182 | ## INCLUDE_DIRS: uncomment this if you package contains header files
183 | ## LIBRARIES: libraries you create in this project that dependent projects also need
184 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need
185 | ## DEPENDS: system dependencies of this project that dependent projects also need
186 | catkin_package(
187 | INCLUDE_DIRS src ${EIGEN3_INCLUDE_DIRS}
188 | LIBRARIES ${PROJECT_NAME}_nodelets
189 | CATKIN_DEPENDS ${REQ_CATKIN_PKGS}
190 | DEPENDS Boost OpenCV PCL Sophus flame
191 | )
192 |
193 | ###########
194 | ## Build ##
195 | ###########
196 |
197 | ## Specify additional locations of header files
198 | ## Your package locations should be listed before other locations
199 | include_directories(./src
200 | ${EIGEN3_INCLUDE_DIRS}
201 | ${YAML_CPP_INCLUDE_DIRS}
202 | ${PCL_INCLUDE_DIRS}
203 | ${Boost_INCLUDE_DIRS}
204 | ${Sophus_INCLUDE_DIRS}
205 | ${flame_INCLUDE_DIRS}
206 | ${catkin_INCLUDE_DIRS})
207 |
208 | add_definitions("-std=c++11")
209 | add_definitions(${PCL_DEFINITIONS})
210 |
211 | ## Declare a C++ library
212 | # add_library(flame_ros
213 | # src/${PROJECT_NAME}/flame_ros.cpp
214 | # )
215 |
216 | ## Add cmake target dependencies of the library
217 | ## as an example, code may need to be generated before libraries
218 | ## either from message generation or dynamic reconfigure
219 | # add_dependencies(flame_ros ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
220 |
221 | ## Declare a C++ executable
222 | # add_executable(flame_ros_node src/flame_ros_node.cpp)
223 |
224 | ## Add cmake target dependencies of the executable
225 | ## same as for the library above
226 | # add_dependencies(flame_ros_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
227 |
228 | ## Specify libraries to link a library or executable target against
229 | # target_link_libraries(flame_ros_node
230 | # ${catkin_LIBRARIES}
231 | # )
232 |
233 | add_subdirectory(./src)
234 |
235 | #############
236 | ## Install ##
237 | #############
238 |
239 | # all install targets should use catkin DESTINATION variables
240 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
241 |
242 | ## Mark executable scripts (Python etc.) for installation
243 | ## in contrast to setup.py, you can choose the destination
244 | # install(PROGRAMS
245 | # scripts/my_python_script
246 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
247 | # )
248 |
249 | ## Mark executables and/or libraries for installation
250 | # install(TARGETS flame_ros flame_ros_node
251 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
252 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
253 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
254 | # )
255 |
256 | ## Mark cpp header files for installation
257 | # install(DIRECTORY include/${PROJECT_NAME}/
258 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
259 | # FILES_MATCHING PATTERN "*.h"
260 | # PATTERN ".svn" EXCLUDE
261 | # )
262 |
263 | ## Mark other files for installation (e.g. launch and bag files, etc.)
264 | # install(FILES
265 | # # myfile1
266 | # # myfile2
267 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
268 | # )
269 |
270 | #############
271 | ## Testing ##
272 | #############
273 |
274 | ## Add gtest based cpp test target and link libraries
275 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_flame_ros.cpp)
276 | # if(TARGET ${PROJECT_NAME}-test)
277 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
278 | # endif()
279 |
280 | ## Add folders to be run by python nosetests
281 | # catkin_add_nosetests(test)
282 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://circleci.com/gh/robustrobotics/flame_ros/tree/master)
2 |
3 | # flame_ros
4 | **FLaME** (Fast Lightweight Mesh Estimation) is a lightweight, CPU-only method
5 | for dense online monocular depth estimation. Given a sequence of camera images
6 | with known poses, **FLaME** is able to reconstruct dense 3D meshes of the
7 | environment by posing the depth estimation problem as a variational optimization
8 | over a Delaunay graph that can be solved at framerate, even on computationally
9 | constrained platforms.
10 |
11 | The `flame_ros` repository contains the ROS bindings, visualization code, and
12 | offline frontends for the algorithm. The core library can be
13 | found [here](https://github.com/robustrobotics/flame.git).
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 | ### Related Publications:
22 | * [**FLaME: Fast Lightweight Mesh Estimation using Variational Smoothing on Delaunay Graphs**](https://groups.csail.mit.edu/rrg/papers/greene_iccv17.pdf),
23 | *W. Nicholas Greene and Nicholas Roy*, ICCV 2017.
24 |
25 | ## Author
26 | - W. Nicholas Greene (wng@csail.mit.edu)
27 |
28 | ## Quickstart
29 | Build the provided [Docker](https://www.docker.com/) image and run an example
30 | dataset (requires [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) for
31 | rviz):
32 | ```bash
33 | # Build the image.
34 | cd flame_ros
35 | docker build --rm -t flame -f scripts/Dockerfile .
36 |
37 | # Run an example dataset.
38 | ./scripts/flame_docker_example.sh
39 | ```
40 | You may need to run `xhost +local:root` in order to forward rviz outside the container.
41 |
42 | ## Dependencies
43 | - Ubuntu 16.04
44 | - ROS Kinetic
45 | - OpenCV 3.2
46 | - Boost 1.54
47 | - PCL 1.7
48 | - Eigen 3.2
49 | - Sophus (SHA: b474f05f839c0f63c281aa4e7ece03145729a2cd)
50 | - [flame](https://github.com/robustrobotics/flame.git)
51 | - catkin_tools (optional)
52 |
53 | ## Installation
54 | **NOTE:** These instructions assume you are running ROS Kinetic on Ubuntu 16.04
55 | and are interested in installing both `flame` and `flame_ros`. See the
56 | installation instructions for `flame` if you only wish to install `flame`.
57 |
58 | 1. Install `apt` dependencies:
59 | ```bash
60 | sudo apt-get install libboost-all-dev libpcl-dev python-catkin-tools
61 | ```
62 |
63 | 2. Create a Catkin workspace using `catkin_tools`:
64 | ```bash
65 | # Source ROS.
66 | source /opt/ros/kinetic/setup.sh
67 |
68 | # Create workspace source folder.
69 | mkdir -p flame_ws/src
70 |
71 | # Checkout flame and flame_ros into workspace.
72 | cd flame_ws/src
73 | git clone https://github.com/robustrobotics/flame.git
74 | git clone https://github.com/robustrobotics/flame_ros.git
75 |
76 | # Initialize workspace.
77 | cd ..
78 | catkin init
79 |
80 | # Install ROS dependencies using rosdep.
81 | rosdep install -iy --from-paths ./src
82 | ```
83 |
84 | 3. Install Eigen 3.2 and Sophus using the scripts provided with `flame`:
85 | ```bash
86 | # Create a dependencies folder.
87 | mkdir -p flame_ws/dependencies/src
88 |
89 | # Checkout Eigen and Sophus into ./dependencies/src and install into ./dependencies.
90 | cd flame_ws
91 | ./src/flame/scripts/eigen.sh ./dependencies/src ./dependencies
92 | ./src/flame/scripts/sophus.sh ./dependencies/src ./dependencies
93 |
94 | # Copy and source environment variable script:
95 | cp ./src/flame/scripts/env.sh ./dependencies/
96 | source ./dependencies/env.sh
97 | ```
98 |
99 | 4. Build workspace:
100 | ```bash
101 | # Build!
102 | catkin build
103 |
104 | # Source workspace.
105 | source ./devel/setup.sh
106 | ```
107 |
108 | ## Offline Processing
109 | Two types of offline nodes are provided, one to process video from
110 | the
111 | [EuRoC MAV Dataset](http://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) and
112 | one to process video from
113 | the
114 | [TUM RGB-D SLAM Benchmark](https://vision.in.tum.de/data/datasets/rgbd-dataset).
115 |
116 | ### EuRoC Data
117 | First, download and extract one of the ASL
118 | datasets
119 | [here](http://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets) (the
120 | Vicon Room datasets should work well).
121 |
122 | Next, update the parameter file in `flame_ros/cfg/flame_offline_asl.yaml` to
123 | point to where you extracted the data:
124 | ```bash
125 | pose_path: /mav0/state_groundtruth_estimate0
126 | rgb_path: /mav0/cam0
127 | ```
128 |
129 | Finally, to process the data launch:
130 | ```bash
131 | roslaunch flame_ros flame_offline_asl.launch
132 | ```
133 | The mesh should be published on the `/flame/mesh` topic. To visualize this topic
134 | in rviz, consult the the [Visualization](#Visualization) section.
135 |
136 | ### TUM Data
137 | First, download and extract one of the
138 | datasets [here](https://vision.in.tum.de/data/datasets/rgbd-dataset/download)
139 | (`fr3/structure_texture_far` or `fr3/long_office_household` should work well).
140 | Use the `associate.py`
141 | script [here](https://vision.in.tum.de/data/datasets/rgbd-dataset/tools) to
142 | associate the pose (`groundtruth.txt`) and RGB (`rgb.txt`) files (you can
143 | associate the depthmaps as well).
144 |
145 | A ROS-compliant camera calibration YAML file will be needed. You can use the one
146 | provided in `flame_ros/cfg/kinect.yaml`, which has the default parameters for
147 | the Microsoft Kinect used to collect the TUM datasets.
148 |
149 | Next, update the parameter file in `flame_ros/cfg/flame_offline_tum.yaml` to
150 | point to where you extracted the data:
151 | ```bash
152 | input_file: /groundtruth_rgb.txt
153 | calib_file: /cfg/kinect.yaml
154 | ```
155 |
156 | Finally, to process the data launch:
157 | ```bash
158 | roslaunch flame_ros flame_offline_tum.launch
159 | ```
160 | The mesh should be published on the `/flame/mesh` topic. To visualize this topic
161 | in rviz, consult the the [Visualization](#Visualization) section.
162 |
163 | ## Online Processing
164 | The online nodelet can be launched using `flame_nodelet.launch`:
165 | ```bash
166 | roslaunch flame_ros flame_nodelet.launch image:=/image
167 | ```
168 | where `/image` is your live rectified/undistorted image stream. The `frame_id` of
169 | this topic must correspond to a Right-Down-Forward frame attached to the
170 | camera. The `tf` tree must be complete such that the pose of the camera in the
171 | world frame can be resolved by `tf`.
172 |
173 | The mesh should be published on the `/flame/mesh` topic. To visualize this topic
174 | in rviz, consult the the [Visualization](#Visualization) section.
175 |
176 | The `flame_nodelet.launch` launch file loads the parameters listed in
177 | `flame_ros/cfg/flame_nodelet.yaml`. You may need to update the
178 | `input/camera_frame_id` param for your data. See the [Parameters](#Parameters)
179 | section for more parameter information.
180 |
181 | ## Visualization
182 | You can use the provided configuration file (`flame_ros/cfg/flame.rviz`) to
183 | visualize the output data in rviz. This approach uses a custom rviz plugin to
184 | render the `/flame/mesh` messages. `flame_ros` can also publish the depth data
185 | in other formats (e.g. as a `sensor_msgs/PointCloud2` or a `sensor_msgs/Image`),
186 | which can be visualized by enabling the corresponding plugins.
187 |
188 | ## Parameters
189 | There are many parameters that control processing, but only a handful are
190 | particularly important:
191 |
192 | - `output/*`: Controls what type of output messages are produced. If you are
193 | concerned about speed, you should prefer publishing only the mesh data
194 | (`output/mesh: True`), but other types of output can be enabled here.
195 |
196 | - `debug/*`: Controls what type of debug images are published. Creating these
197 | images is relatively expensive, so disable for real-time operation.
198 |
199 | - `threading/openmp/*`: Controls OpenMP-accelerated sections. You may wish to
200 | tune the number of threads per parallel section
201 | (`threading/openmp/num_threads`) or the number of chunks per thread
202 | (`threading/openmp/chunk_size`) for your processor.
203 |
204 | - `features/detection/min_grad_mag`: Controls the minimum gradient magnitude for
205 | detected features.
206 |
207 | - `features/detection/win_size`: Features are detected by dividing the image
208 | domain into `win_size x win_size` blocks and selecting the best trackable
209 | pixel inside each block. Set to a large number (e.g. 32) for coarse, but fast
210 | reconstructions, and a small number (e.g. 8) for finer reconstructions.
211 |
212 | - `regularization/nltgv2/data_factor`: Controls the balance between smoothing
213 | and data-fitting in the regularizer. It should be set in relation to the
214 | detection window size. Some good values are 0.1-0.25.
215 |
216 | ### Performance Tips
217 | For best results use a high framerate (>= 30 Hz) camera with VGA-sized
218 | images. Higher resolution images will require more accurate poses. The feature
219 | detection window size (`features/detection/win_size`) and the data scaling term
220 | (`regularization/nltgv2/data_factor`) are the primary knobs for tuning
221 | performance. The default parameters should work well in most cases, but you may
222 | need to tune for your specific data.
223 |
224 | By default, `flame_ros` will publish several debug images. While helpful to
225 | observe during operation, they will slow down the pipeline. Disable them if you
226 | are trying to increase throughput.
227 |
228 | The usual tips for monocular SLAM/depth estimation systems also apply:
229 | - Prefer slow translational motion
230 | - Avoid fast rotations when possible
231 | - Use an accurate pose source (e.g. one of the many available visual
232 | odometry/SLAM packages)
233 | - Prefer texture-rich environments
234 | - Prefer environments with even lighting
235 |
--------------------------------------------------------------------------------
/cfg/flame.rviz:
--------------------------------------------------------------------------------
1 | Panels:
2 | - Class: rviz/Displays
3 | Help Height: 0
4 | Name: Displays
5 | Property Tree Widget:
6 | Expanded:
7 | - /DepthMap1/Autocompute Value Bounds1
8 | - /RawDepths1/Autocompute Value Bounds1
9 | - /TexturedMesh1
10 | Splitter Ratio: 0.5
11 | Tree Height: 655
12 | - Class: rviz/Selection
13 | Name: Selection
14 | - Class: rviz/Tool Properties
15 | Expanded:
16 | - /2D Pose Estimate1
17 | - /2D Nav Goal1
18 | - /Publish Point1
19 | Name: Tool Properties
20 | Splitter Ratio: 0.588679016
21 | - Class: rviz/Views
22 | Expanded:
23 | - /Current View1
24 | - /Current View1/Focal Point1
25 | Name: Views
26 | Splitter Ratio: 0.5
27 | - Class: rviz/Time
28 | Experimental: false
29 | Name: Time
30 | SyncMode: 0
31 | SyncSource: InputImage
32 | Visualization Manager:
33 | Class: ""
34 | Displays:
35 | - Alpha: 0.5
36 | Cell Size: 10
37 | Class: rviz/Grid
38 | Color: 160; 160; 164
39 | Enabled: true
40 | Line Style:
41 | Line Width: 0.0299999993
42 | Value: Lines
43 | Name: Grid
44 | Normal Cell Count: 0
45 | Offset:
46 | X: 0
47 | Y: 0
48 | Z: 0
49 | Plane: XY
50 | Plane Cell Count: 100
51 | Reference Frame:
52 | Value: true
53 | - Class: rviz/TF
54 | Enabled: true
55 | Frame Timeout: 15
56 | Frames:
57 | All Enabled: false
58 | camera:
59 | Value: true
60 | camera_world:
61 | Value: true
62 | world:
63 | Value: true
64 | Marker Scale: 1
65 | Name: TF
66 | Show Arrows: false
67 | Show Axes: true
68 | Show Names: false
69 | Tree:
70 | world:
71 | camera_world:
72 | camera:
73 | {}
74 | Update Interval: 0
75 | Value: true
76 | - Class: rviz/Image
77 | Enabled: true
78 | Image Topic: /camera/rgb/image_rect_color
79 | Max Value: 1
80 | Median window: 5
81 | Min Value: 0
82 | Name: InputImage
83 | Normalize Range: true
84 | Queue Size: 2
85 | Transport Hint: raw
86 | Unreliable: false
87 | Value: true
88 | - Alpha: 1
89 | Auto Size:
90 | Auto Size Factor: 1
91 | Value: true
92 | Autocompute Intensity Bounds: true
93 | Autocompute Value Bounds:
94 | Max Value: 10
95 | Min Value: -10
96 | Value: true
97 | Axis: Z
98 | Channel Name: intensity
99 | Class: rviz/DepthCloud
100 | Color: 255; 255; 255
101 | Color Image Topic: /camera/rgb/image_rect_color
102 | Color Transformer: RGB8
103 | Color Transport Hint: raw
104 | Decay Time: 0
105 | Depth Map Topic: /camera/depth_registered/image_rect
106 | Depth Map Transport Hint: raw
107 | Enabled: false
108 | Invert Rainbow: false
109 | Max Color: 255; 255; 255
110 | Max Intensity: 4096
111 | Min Color: 0; 0; 0
112 | Min Intensity: 0
113 | Name: TrueDepths
114 | Occlusion Compensation:
115 | Occlusion Time-Out: 30
116 | Value: false
117 | Position Transformer: XYZ
118 | Queue Size: 5
119 | Selectable: true
120 | Size (Pixels): 1
121 | Style: Points
122 | Topic Filter: true
123 | Use Fixed Frame: true
124 | Use rainbow: true
125 | Value: false
126 | - Alpha: 1
127 | Auto Size:
128 | Auto Size Factor: 1
129 | Value: true
130 | Autocompute Intensity Bounds: true
131 | Autocompute Value Bounds:
132 | Max Value: 10
133 | Min Value: 0
134 | Value: false
135 | Axis: Z
136 | Channel Name: intensity
137 | Class: rviz/DepthCloud
138 | Color: 255; 255; 255
139 | Color Image Topic: ""
140 | Color Transformer: AxisColor
141 | Color Transport Hint: raw
142 | Decay Time: 0
143 | Depth Map Topic: /flame/depth_registered/image_rect
144 | Depth Map Transport Hint: raw
145 | Enabled: false
146 | Invert Rainbow: false
147 | Max Color: 255; 255; 255
148 | Max Intensity: 4096
149 | Min Color: 0; 0; 0
150 | Min Intensity: 0
151 | Name: DepthMap
152 | Occlusion Compensation:
153 | Occlusion Time-Out: 30
154 | Value: false
155 | Position Transformer: XYZ
156 | Queue Size: 5
157 | Selectable: true
158 | Size (Pixels): 2
159 | Style: Points
160 | Topic Filter: true
161 | Use Fixed Frame: false
162 | Use rainbow: true
163 | Value: false
164 | - Alpha: 1
165 | Auto Size:
166 | Auto Size Factor: 1
167 | Value: true
168 | Autocompute Intensity Bounds: true
169 | Autocompute Value Bounds:
170 | Max Value: 10
171 | Min Value: 0
172 | Value: false
173 | Axis: Z
174 | Channel Name: intensity
175 | Class: rviz/DepthCloud
176 | Color: 255; 0; 255
177 | Color Image Topic: ""
178 | Color Transformer: FlatColor
179 | Color Transport Hint: raw
180 | Decay Time: 0
181 | Depth Map Topic: /flame/depth_registered_raw/image_rect
182 | Depth Map Transport Hint: raw
183 | Enabled: false
184 | Invert Rainbow: false
185 | Max Color: 255; 255; 255
186 | Max Intensity: 4096
187 | Min Color: 0; 0; 0
188 | Min Intensity: 0
189 | Name: RawDepths
190 | Occlusion Compensation:
191 | Occlusion Time-Out: 30
192 | Value: false
193 | Position Transformer: XYZ
194 | Queue Size: 5
195 | Selectable: true
196 | Size (Pixels): 3
197 | Style: Points
198 | Topic Filter: true
199 | Use Fixed Frame: false
200 | Use rainbow: true
201 | Value: false
202 | - Alpha: 1
203 | Autocompute Intensity Bounds: true
204 | Autocompute Value Bounds:
205 | Max Value: 30
206 | Min Value: 1
207 | Value: false
208 | Axis: Z
209 | Channel Name: intensity
210 | Class: rviz/PointCloud2
211 | Color: 255; 255; 255
212 | Color Transformer: AxisColor
213 | Decay Time: 0
214 | Enabled: false
215 | Invert Rainbow: false
216 | Max Color: 255; 255; 255
217 | Max Intensity: 4096
218 | Min Color: 0; 0; 0
219 | Min Intensity: 0
220 | Name: DepthEstCloud
221 | Position Transformer: XYZ
222 | Queue Size: 10
223 | Selectable: true
224 | Size (Pixels): 3
225 | Size (m): 0.00999999978
226 | Style: Points
227 | Topic: /flame/cloud
228 | Unreliable: false
229 | Use Fixed Frame: false
230 | Use rainbow: true
231 | Value: false
232 | - Class: flame_rviz_plugins/TexturedMesh
233 | Enabled: true
234 | Name: TexturedMesh
235 | Normal Size: 0.0500000007
236 | Phong Shading: true
237 | Polygon Mode: SOLID
238 | PolygonMesh Topic: /flame/mesh
239 | Queue Size: 25
240 | Scene Color Scale: 1
241 | Shader Program: TEXTURE
242 | Show Normals: false
243 | Texture Transport Hint: raw
244 | Texture topic: /camera/rgb/image_rect_color
245 | Value: true
246 | - Class: rviz/Image
247 | Enabled: true
248 | Image Topic: /flame/debug/features
249 | Max Value: 1
250 | Median window: 5
251 | Min Value: 0
252 | Name: DebugFeatures
253 | Normalize Range: true
254 | Queue Size: 2
255 | Transport Hint: raw
256 | Unreliable: false
257 | Value: true
258 | - Class: rviz/Image
259 | Enabled: true
260 | Image Topic: /flame/debug/wireframe
261 | Max Value: 1
262 | Median window: 5
263 | Min Value: 0
264 | Name: DebugWireframe
265 | Normalize Range: true
266 | Queue Size: 2
267 | Transport Hint: raw
268 | Unreliable: false
269 | Value: true
270 | - Class: rviz/Image
271 | Enabled: true
272 | Image Topic: /flame/debug/idepthmap
273 | Max Value: 1
274 | Median window: 5
275 | Min Value: 0
276 | Name: DebugDepthMap
277 | Normalize Range: true
278 | Queue Size: 2
279 | Transport Hint: raw
280 | Unreliable: false
281 | Value: true
282 | - Class: rviz/Image
283 | Enabled: false
284 | Image Topic: /flame/debug/matches
285 | Max Value: 1
286 | Median window: 5
287 | Min Value: 0
288 | Name: DebugMatches
289 | Normalize Range: true
290 | Queue Size: 2
291 | Transport Hint: raw
292 | Unreliable: false
293 | Value: false
294 | - Class: rviz/Image
295 | Enabled: false
296 | Image Topic: /flame/debug/detections
297 | Max Value: 1
298 | Median window: 5
299 | Min Value: 0
300 | Name: DebugDetections
301 | Normalize Range: true
302 | Queue Size: 2
303 | Transport Hint: raw
304 | Unreliable: false
305 | Value: false
306 | Enabled: true
307 | Global Options:
308 | Background Color: 48; 48; 48
309 | Fixed Frame: world
310 | Frame Rate: 30
311 | Name: root
312 | Tools:
313 | - Class: rviz/Interact
314 | Hide Inactive Objects: true
315 | - Class: rviz/MoveCamera
316 | - Class: rviz/Select
317 | - Class: rviz/FocusCamera
318 | - Class: rviz/Measure
319 | - Class: rviz/SetInitialPose
320 | Topic: /initialpose
321 | - Class: rviz/SetGoal
322 | Topic: /move_base_simple/goal
323 | - Class: rviz/PublishPoint
324 | Single click: true
325 | Topic: /clicked_point
326 | Value: true
327 | Views:
328 | Current:
329 | Class: rviz/ThirdPersonFollower
330 | Distance: 15.0147667
331 | Enable Stereo Rendering:
332 | Stereo Eye Separation: 0.0599999987
333 | Stereo Focal Distance: 1
334 | Swap Stereo Eyes: false
335 | Value: false
336 | Focal Point:
337 | X: 2.36587143
338 | Y: 9.77760315
339 | Z: -6.79653549
340 | Focal Shape Fixed Size: true
341 | Focal Shape Size: 0.0500000007
342 | Name: Current View
343 | Near Clip Distance: 0.00999999978
344 | Pitch: 0.379798025
345 | Target Frame: camera
346 | Value: ThirdPersonFollower (rviz)
347 | Yaw: 4.52289104
348 | Saved: ~
349 | Window Geometry:
350 | DebugDepthMap:
351 | collapsed: false
352 | DebugDetections:
353 | collapsed: false
354 | DebugFeatures:
355 | collapsed: false
356 | DebugMatches:
357 | collapsed: false
358 | DebugWireframe:
359 | collapsed: false
360 | Displays:
361 | collapsed: false
362 | Height: 1178
363 | Hide Left Dock: false
364 | Hide Right Dock: true
365 | InputImage:
366 | collapsed: false
367 | QMainWindow State: 000000ff00000000fd0000000400000000000001cc000002cffc020000000cfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005e00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730100000019000002cf000000d000fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650000000359000001230000000000000000fb0000000a0049006d00610067006501000003620000011a0000000000000000fb0000000a0049006d006100670065000000016e0000017a0000000000000000fb0000000a0049006d006100670065010000019c0000014c0000000000000000000000010000010f000002cffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000019000002cf000000a900fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000018efc0100000009fb000000140049006e0070007500740049006d0061006700650100000000000001dd0000007500fffffffb0000001a004400650062007500670046006500610074007500720065007301000001e3000001dd0000008900fffffffb0000001c004400650062007500670057006900720065006600720061006d006501000003c6000001dd0000009400fffffffb0000001a0044006500620075006700440065007000740068004d0061007001000005a9000001d70000009400fffffffb0000001800440065006200750067004d00610074006300680065007300000005010000027f0000008800fffffffb0000001e004400650062007500670044006500740065006300740069006f006e007300000005a6000001da0000009400fffffffc000005a6000001da0000000000fffffffa000000000200000001fb000000140049006e0070007500740049006d0061006700650000000000ffffffff0000000000000000fb0000000800540069006d00650000000000000007800000022400fffffffb0000000800540069006d00650100000000000004500000000000000000000005ae000002cf00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730000000000ffffffff0000000000000000
368 | Selection:
369 | collapsed: false
370 | Time:
371 | collapsed: false
372 | Tool Properties:
373 | collapsed: false
374 | Views:
375 | collapsed: true
376 | Width: 1920
377 | X: 1920
378 | Y: 0
379 |
--------------------------------------------------------------------------------
/cfg/flame_nodelet.yaml:
--------------------------------------------------------------------------------
1 | # FlameNodelet params.
2 | input:
3 | camera_frame_id: openni_rgb_optical_frame
4 | camera_world_frame_id: camera_world
5 | subsample_factor: 1 # Process one out of this many images.
6 | poseframe_subsample_factor: 6 # Create pf every this number of images. Ignored if use_poseframe_updates is true.
7 | use_poseframe_updates: False # Listen to updates for poseframe poses.
8 | poseframe_child_frame_id: body # Frame of pose information in pf messages.
9 | resize_factor: 1 # Downsample image width/height by this factor.
10 |
11 | output:
12 | quiet: false # Turn off terminal output.
13 | mesh: true # Publish mesh as pcl_msgs/PolygonMesh.
14 | idepthmap: false # Publish idepthmap as sensor_msgs/Image.
15 | depthmap: false # Publish depthmap as sensor_msgs/Image.
16 | cloud: false # Publish pointcloud sensor_msgs/PointCloud2. Prefer mesh or depthmap (in that order).
17 | features: false # Publish raw features points as sensors_msgs/Image.
18 | stats: true # Publish statistics message.
19 |
20 | # Integrate load measurements over this many frames before reporting. Timing
21 | # resolution of /proc is only a Jiffy (usually 0.01 sec), so querying faster
22 | # than this is not meaningful.
23 | load_integration_factor: 15
24 |
25 | # Used to scale idepths before they are colormapped. For example, if average
26 | # scene depth is >> 1m, then coloring by idepth will not provide much dynamic
27 | # range. Coloring by scene_color_scale * idepth (if scene_color_scale > 1) is
28 | # much more informative.
29 | scene_color_scale: 1.0
30 |
31 | # Filter out oblique triangles for display. This does not change the
32 | # underlying graph - it only affects the output/display.
33 | filter_oblique_triangles: True
34 | oblique_normal_thresh: 1.57 # 1.39626 # 80 degress in radians.
35 | oblique_idepth_diff_factor: 0.35 # Relative difference between max/min triangle idepth.
36 | oblique_idepth_diff_abs: 0.1 # Absolute difference between max/min triangle idepth.
37 |
38 | # Filter out triangles with long edges for display. This does not change the
39 | # underlying graph - it only affects the output/display.
40 | filter_long_edges: True
41 | edge_length_thresh: 0.333 # As a fraction of image width.
42 |
43 | # Filter out triangles that are far from the camera for display. This does not
44 | # change the underlying graph - it only affects the output/display.
45 | filter_triangles_by_idepth: True
46 | min_triangle_idepth: 0.01
47 |
48 | # Don't publish output if angular rate exceeds this threshold. Will be ignored if 0.
49 | max_angular_rate: 0.0 # deg/sec
50 |
51 | debug:
52 | wireframe: true # Mesh wireframe colored by idepth.
53 | features: true # Features colored by idepth.
54 | detections: false # Feature detections
55 | matches: false # Epipolar line searches (green success, red failure).
56 | normals: false # Image colored by interpolated normal vectors.
57 | idepthmap: true # Colored idepthmap.
58 | text_overlay: true # Draw text overlays on images.
59 | flip_images: false # Rotated debug images by 180 degrees for display.
60 |
61 | threading:
62 | openmp:
63 | num_threads: 4 # Number of threads used in parallel sections.
64 | chunk_size: 1024 # Number of items given to each thread.
65 |
66 | features:
67 | do_letterbox: False # Process only middle third of image.
68 | detection:
69 | min_grad_mag: 5.0 # Minimum gradient magntiude.
70 | min_error: 100 # Minimum photo error.
71 | win_size: 16 # Detection grid resolution (win_size x win_size).
72 | tracking:
73 | win_size: 5 # Feature tracking window size. If not using LK, must be odd.
74 | max_dropouts: 5 # Max number of missed detections before feature is killed.
75 | epipolar_line_var: 4.0 # Epipolar line noise variance.
76 |
77 | regularization:
78 | do_median_filter: false # Apply median filter on graph.
79 | do_lowpass_filter: false # Apply lowpass filter on graph.
80 | do_nltgv2: true # Apply NLTGV2-L1 (i.e. planar) regularization.
81 | nltgv2:
82 | adaptive_data_weights: false # Set vertex data weights to inverse idepth variance.
83 | rescale_data: false # Rescale data to have mean 1.
84 | init_with_prediction: true # Initialize vertex idepths with predicted value from dense idepthmap.
85 | idepth_var_max: 0.01 # Maximum idepth var before feature can be added to graph.
86 | data_factor: 0.15 # Scalar that controls smoothness vs. data (0.1 for lvl5, 0.25 for lvl3).
87 | step_x: 0.001 # Optimization primal step size.
88 | step_q: 125.0 # Optimization dual step size.
89 | theta: 0.25 # Extra-gradient step size.
90 | min_height: -100000000000000.0 # Minimum height of features that are added to graph.
91 | max_height: 100000000000000.0 # Maximum height of features that are added to graph.
92 | check_sticky_obstacles: False
93 |
--------------------------------------------------------------------------------
/cfg/flame_offline_asl.yaml:
--------------------------------------------------------------------------------
1 | # flame_offline parameters.
2 | pose_path: /home/wng/Projects/rrg/data/euroc_mav_datasets/V1_01_easy/mav0/state_groundtruth_estimate0
3 | rgb_path: /home/wng/Projects/rrg/data/euroc_mav_datasets/V1_01_easy/mav0/cam0
4 | depth_path: /home/wng/Projects/rrg/data/euroc_mav_datasets/V1_01_easy/mav0/depthmap0
5 | world_frame: RFU
6 |
7 | input:
8 | camera_frame_id: camera
9 | camera_world_frame_id: camera_world
10 | subsample_factor: 1 # Process one out of this many images.
11 | poseframe_subsample_factor: 6 # Create pf every this number of images.
12 | resize_factor: 1 # Downsample image width/height by this factor.
13 |
14 | # Target rate at which to process images. This allows ROS to clear the
15 | # callback queues so that things get bagged.
16 | rate: 30
17 |
18 | output:
19 | quiet: false # Turn off terminal output.
20 | mesh: true # Publish mesh as pcl_msgs/PolygonMesh.
21 | idepthmap: false # Publish idepthmap as sensor_msgs/Image.
22 | depthmap: false # Publish depthmap as sensor_msgs/Image.
23 | cloud: false # Publish pointcloud sensor_msgs/PointCloud2. Prefer mesh or depthmap (in that order).
24 | features: false # Publish raw features points as sensors_msgs/Image.
25 | stats: true # Publish statistics message.
26 |
27 | # Integrate load measurements over this many frames before reporting. Timing
28 | # resolution of /proc is only a Jiffy (usually 0.01 sec), so querying faster
29 | # than this is not meaningful.
30 | load_integration_factor: 15
31 |
32 | # Used to scale idepths before they are colormapped. For example, if average
33 | # scene depth is >> 1m, then coloring by idepth will not provide much dynamic
34 | # range. Coloring by scene_color_scale * idepth (if scene_color_scale > 1) is
35 | # much more informative.
36 | scene_color_scale: 1.0
37 |
38 | # Filter out oblique triangles for display. This does not change the
39 | # underlying graph - it only affects the output/display.
40 | filter_oblique_triangles: True
41 | oblique_normal_thresh: 1.57 # 1.39626 # 80 degress in radians.
42 | oblique_idepth_diff_factor: 0.35 # Relative difference between max/min triangle idepth.
43 | oblique_idepth_diff_abs: 0.1 # Absolute difference between max/min triangle idepth.
44 |
45 | # Filter out triangles with long edges for display. This does not change the
46 | # underlying graph - it only affects the output/display.
47 | filter_long_edges: True
48 | edge_length_thresh: 0.333 # As a fraction of image width.
49 |
50 | # Filter out triangles that are far from the camera for display. This does not
51 | # change the underlying graph - it only affects the output/display.
52 | filter_triangles_by_idepth: True
53 | min_triangle_idepth: 0.01
54 |
55 | # Don't publish output if angular rate exceeds this threshold. Will be ignored if 0.
56 | max_angular_rate: 0.0 # deg/sec
57 |
58 | debug:
59 | wireframe: true # Mesh wireframe colored by idepth.
60 | features: true # Features colored by idepth.
61 | detections: false # Feature detections
62 | matches: false # Epipolar line searches (green success, red failure).
63 | normals: false # Image colored by interpolated normal vectors.
64 | idepthmap: true # Colored idepthmap.
65 | text_overlay: true # Draw text overlays on images.
66 | flip_images: false # Rotated debug images by 180 degrees for display.
67 |
68 | threading:
69 | openmp:
70 | num_threads: 4 # Number of threads used in parallel sections.
71 | chunk_size: 1024 # Number of items given to each thread.
72 |
73 | features:
74 | do_letterbox: False # Process only middle third of image.
75 | detection:
76 | min_grad_mag: 5.0 # Minimum gradient magntiude.
77 | min_error: 100 # Minimum photo error.
78 | win_size: 16 # Detection grid resolution (win_size x win_size).
79 | tracking:
80 | win_size: 5 # Feature tracking window size. If not using LK, must be odd.
81 | max_dropouts: 5 # Max number of missed detections before feature is killed.
82 | epipolar_line_var: 4.0 # Epipolar line noise variance.
83 |
84 | regularization:
85 | do_median_filter: false # Apply median filter on graph.
86 | do_lowpass_filter: false # Apply lowpass filter on graph.
87 | do_nltgv2: true # Apply NLTGV2-L1 (i.e. planar) regularization.
88 | nltgv2:
89 | adaptive_data_weights: false # Set vertex data weights to inverse idepth variance.
90 | rescale_data: false # Rescale data to have mean 1.
91 | init_with_prediction: true # Initialize vertex idepths with predicted value from dense idepthmap.
92 | idepth_var_max: 0.01 # Maximum idepth var before feature can be added to graph.
93 | data_factor: 0.15 # Scalar that controls smoothness vs. data (0.1 for lvl5, 0.25 for lvl3).
94 | step_x: 0.001 # Optimization primal step size.
95 | step_q: 125.0 # Optimization dual step size.
96 | theta: 0.25 # Extra-gradient step size.
97 | min_height: -100000000000000.0 # Minimum height of features that are added to graph.
98 | max_height: 100000000000000.0 # Maximum height of features that are added to graph.
99 | check_sticky_obstacles: False
100 |
101 | # Not sure if these options still work, so don't set them before checking.
102 | analysis:
103 | pass_in_truth: false
104 |
--------------------------------------------------------------------------------
/cfg/flame_offline_tum.yaml:
--------------------------------------------------------------------------------
1 | # flame_offline_tum parameters.
2 | input_file: /home/wng/Projects/rrg/data/tum_rgbd_benchmarks/rgbd_dataset_freiburg3_long_office_household/groundtruth_rgb_depth.txt
3 | calib_file: /home/wng/Projects/rrg/data/tum_rgbd_benchmarks/rgbd_dataset_freiburg3_long_office_household/kinect.yaml
4 | input_frame: RDF_IN_FLU
5 | depth_scale_factor: 5000.0
6 |
7 | input:
8 | camera_frame_id: camera
9 | camera_world_frame_id: camera_world
10 | subsample_factor: 1 # Process one out of this many images.
11 | poseframe_subsample_factor: 6 # Create pf every this number of images.
12 | resize_factor: 1 # Downsample image width/height by this factor.
13 |
14 | # Target rate at which to process images. This allows ROS to clear the
15 | # callback queues so that things get bagged.
16 | rate: 30
17 |
18 | output:
19 | quiet: false # Turn off terminal output.
20 | mesh: true # Publish mesh as pcl_msgs/PolygonMesh.
21 | idepthmap: false # Publish idepthmap as sensor_msgs/Image.
22 | depthmap: false # Publish depthmap as sensor_msgs/Image.
23 | cloud: false # Publish pointcloud sensor_msgs/PointCloud2. Prefer mesh or depthmap (in that order).
24 | features: false # Publish raw features points as sensors_msgs/Image.
25 | stats: true # Publish statistics message.
26 |
27 | # Integrate load measurements over this many frames before reporting. Timing
28 | # resolution of /proc is only a Jiffy (usually 0.01 sec), so querying faster
29 | # than this is not meaningful.
30 | load_integration_factor: 15
31 |
32 | # Used to scale idepths before they are colormapped. For example, if average
33 | # scene depth is >> 1m, then coloring by idepth will not provide much dynamic
34 | # range. Coloring by scene_color_scale * idepth (if scene_color_scale > 1) is
35 | # much more informative.
36 | scene_color_scale: 1.0
37 |
38 | # Filter out oblique triangles for display. This does not change the
39 | # underlying graph - it only affects the output/display.
40 | filter_oblique_triangles: True
41 | oblique_normal_thresh: 1.57 # 1.39626 # 80 degress in radians.
42 | oblique_idepth_diff_factor: 0.35 # Relative difference between max/min triangle idepth.
43 | oblique_idepth_diff_abs: 0.1 # Absolute difference between max/min triangle idepth.
44 |
45 | # Filter out triangles with long edges for display. This does not change the
46 | # underlying graph - it only affects the output/display.
47 | filter_long_edges: True
48 | edge_length_thresh: 0.333 # As a fraction of image width.
49 |
50 | # Filter out triangles that are far from the camera for display. This does not
51 | # change the underlying graph - it only affects the output/display.
52 | filter_triangles_by_idepth: True
53 | min_triangle_idepth: 0.01
54 |
55 | # Don't publish output if angular rate exceeds this threshold. Will be ignored if 0.
56 | max_angular_rate: 0.0 # deg/sec
57 |
58 | debug:
59 | wireframe: true # Mesh wireframe colored by idepth.
60 | features: true # Features colored by idepth.
61 | detections: false # Feature detections
62 | matches: false # Epipolar line searches (green success, red failure).
63 | normals: false # Image colored by interpolated normal vectors.
64 | idepthmap: true # Colored idepthmap.
65 | text_overlay: true # Draw text overlays on images.
66 | flip_images: false # Rotated debug images by 180 degrees for display.
67 |
68 | threading:
69 | openmp:
70 | num_threads: 4 # Number of threads used in parallel sections.
71 | chunk_size: 1024 # Number of items given to each thread.
72 |
73 | features:
74 | do_letterbox: False # Process only middle third of image.
75 | detection:
76 | min_grad_mag: 5.0 # Minimum gradient magntiude.
77 | min_error: 100 # Minimum photo error.
78 | win_size: 16 # Detection grid resolution (win_size x win_size).
79 | tracking:
80 | win_size: 5 # Feature tracking window size. If not using LK, must be odd.
81 | max_dropouts: 5 # Max number of missed detections before feature is killed.
82 | epipolar_line_var: 4.0 # Epipolar line noise variance.
83 |
84 | regularization:
85 | do_median_filter: false # Apply median filter on graph.
86 | do_lowpass_filter: false # Apply lowpass filter on graph.
87 | do_nltgv2: true # Apply NLTGV2-L1 (i.e. planar) regularization.
88 | nltgv2:
89 | adaptive_data_weights: false # Set vertex data weights to inverse idepth variance.
90 | rescale_data: false # Rescale data to have mean 1.
91 | init_with_prediction: true # Initialize vertex idepths with predicted value from dense idepthmap.
92 | idepth_var_max: 0.01 # Maximum idepth var before feature can be added to graph.
93 | data_factor: 0.15 # Scalar that controls smoothness vs. data (0.1 for lvl5, 0.25 for lvl3).
94 | step_x: 0.001 # Optimization primal step size.
95 | step_q: 125.0 # Optimization dual step size.
96 | theta: 0.25 # Extra-gradient step size.
97 | min_height: -100000000000000.0 # Minimum height of features that are added to graph.
98 | max_height: 100000000000000.0 # Maximum height of features that are added to graph.
99 | check_sticky_obstacles: False
100 |
101 | # Not sure if these options still work, so don't set them before checking.
102 | analysis:
103 | pass_in_truth: false
104 |
--------------------------------------------------------------------------------
/cfg/kinect.yaml:
--------------------------------------------------------------------------------
1 | image_height: 480
2 | image_width: 640
3 | camera_name: kinect
4 | camera_matrix:
5 | rows: 3
6 | cols: 3
7 | data: [525.0, 0.0, 319.5, 0.0, 525.0, 239.5, 0.0, 0.0, 1.0]
8 | distortion_model: plumb_bob
9 | distortion_coefficients:
10 | rows: 1
11 | cols: 5
12 | data: [0.0, 0.0, 0.0, 0.0, 0.0]
13 | rectification_matrix:
14 | rows: 3
15 | cols: 3
16 | data: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
17 | projection_matrix:
18 | rows: 3
19 | cols: 4
20 | data: [525.0, 0.0, 319.5, 0.0, 0.0, 525.0, 239.5, 0.0, 0.0, 0.0,
21 | 1.0, 0.0]
22 |
--------------------------------------------------------------------------------
/flame_ros_nodelets.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 | Nodelet for Fast Lightweight Mesh Estimation (FLAME).
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/flame_ros_plugins.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | Displays a textured triangle mesh. PointCloud2 field of
7 | pcl_msgs/PolygonMesh must have texture coordinates.
8 |
9 | pcl_msgs/PolygonMesh
10 | sensor_msgs/Image
11 |
12 |
13 |
--------------------------------------------------------------------------------
/launch/flame_nodelet.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
25 |
26 |
27 |
28 |
31 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/launch/flame_offline_asl.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
15 |
17 |
18 |
19 |
20 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/launch/flame_offline_tum.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
15 |
17 |
18 |
19 |
20 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/launch/frames.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/msg/FlameNodeletStats.msg:
--------------------------------------------------------------------------------
1 | # This message contains statistics from FlameNodelet.
2 | std_msgs/Header header
3 |
4 | int32 img_id
5 | float64 timestamp
6 |
7 | # Timing stats
8 | int32 queue_size # Number of items in input queue.
9 | float32 fps # Frames-per-second throughout.
10 | float32 fps_max # Theoretical FPS based on main_ms.
11 | float32 main_ms # Time to execute one iteration of main loop.
12 | float32 waiting_ms # Time waiting for input.
13 | float32 process_frame_ms # Time to process a frame.
14 | float32 publishing_ms # Time to publish output.
15 | float32 debug_publishing_ms # Time to publish debug output.
16 | float32 latency_ms # Difference between getting input and publishing.
17 |
18 | # Max possible load.
19 | float32 max_load_cpu # Number of possible concurrent threads.
20 | float32 max_load_mem # Amount of memory in MB.
21 | float32 max_load_swap # Amount of available SWAP.
22 |
23 | # System load.
24 | float32 sys_load_cpu # CPU load on the system.
25 | float32 sys_load_mem # Memory load on the system.
26 | float32 sys_load_swap # SWAP load on the system.
27 |
28 | # Process load.
29 | int32 pid # Process ID.
30 | float32 pid_load_cpu # CPU load for this process.
31 | float32 pid_load_mem # Memory load for this process.
32 | float32 pid_load_swap # Swap load for this process.
--------------------------------------------------------------------------------
/msg/FlameStats.msg:
--------------------------------------------------------------------------------
1 | # This message contains statistics from Flame.
2 | std_msgs/Header header
3 |
4 | int32 img_id
5 | float64 timestamp
6 |
7 | int32 num_feats
8 | int32 num_vtx
9 | int32 num_tris
10 | int32 num_edges
11 | float32 coverage # Fraction of frame with depths.
12 |
13 | # Depth estimation/tracking stats.
14 | int32 num_idepth_updates
15 | int32 num_fail_max_var
16 | int32 num_fail_max_dropouts
17 | int32 num_fail_ref_patch_grad
18 | int32 num_fail_ambiguous_match
19 | int32 num_fail_max_cost
20 |
21 | # Regularizer stats.
22 | float32 nltgv2_total_smoothness_cost # Total cost.
23 | float32 nltgv2_avg_smoothness_cost # Cost per vertex.
24 | float32 nltgv2_total_data_cost
25 | float32 nltgv2_avg_data_cost
26 |
27 | float32 total_photo_error # Photo error over all valid pixels.
28 | float32 avg_photo_error # Photo error/pixel.
29 |
30 | # Timing stats
31 | float32 fps # Frames-per-second throughout
32 | float32 fps_max # Theoretical FPS based on update time.
33 | float32 update_ms # Main update function runtime.
34 | float32 update_locking_ms # Time waiting for update_lock.
35 | float32 frame_creation_ms
36 | float32 interpolate_ms
37 | float32 keyframe_ms
38 | float32 detection_ms
39 | float32 detection_loop_ms
40 | float32 update_idepths_ms
41 | float32 project_features_ms
42 | float32 project_graph_ms
43 | float32 sync_graph_ms
44 | float32 triangulate_ms
45 | float32 median_filter_ms
46 | float32 lowpass_filter_ms
47 |
48 |
49 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | flame_ros
4 | 1.0.0
5 | ROS bindings for flame.
6 |
7 |
8 |
9 |
10 | W. Nicholas Greene
11 |
12 |
13 |
14 |
15 |
16 | GPLv3
17 |
18 |
19 |
20 |
21 |
22 | https://github.com/robustrobotics/flame_ros
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | catkin
43 |
44 | boost
45 | libpcl-all-dev
46 | roscpp
47 | std_msgs
48 | sensor_msgs
49 | nav_msgs
50 | pcl_ros
51 | pcl_msgs
52 | pcl_conversions
53 | nodelet
54 | cv_bridge
55 | message_generation
56 | image_geometry
57 | camera_info_manager
58 | rviz
59 | flame
60 |
61 | boost
62 | libpcl-all-dev
63 | roscpp
64 | std_msgs
65 | sensor_msgs
66 | nav_msgs
67 | pcl_ros
68 | pcl_msgs
69 | pcl_conversions
70 | nodelet
71 | cv_bridge
72 | camera_info_manager_py
73 | message_generation
74 | message_runtime
75 | image_geometry
76 | camera_info_manager
77 | rviz
78 | flame
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/scripts/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM osrf/ros:kinetic-desktop-full
2 |
3 | MAINTAINER W. Nicholas Greene "wng@csail.mit.edu"
4 |
5 | # nvidia-docker hooks
6 | LABEL com.nvidia.volumes.needed="nvidia_driver"
7 | ENV PATH /usr/local/nvidia/bin:${PATH}
8 | ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
9 |
10 | # Install general software dependencies.
11 | RUN apt-get update
12 | RUN apt-get install -y apt-utils lsb-release
13 | RUN apt-get install -y git openssh-client wget
14 | RUN apt-get install -y sudo && rm -rf /var/lib/apt/lists/*
15 |
16 | # Install flame dependencies.
17 | RUN apt-get install -y libboost-all-dev libpcl-dev
18 |
19 | # Install catkin_tools.
20 | RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu `lsb_release -sc` main" > /etc/apt/sources.list.d/ros-latest.list'
21 | RUN wget http://packages.ros.org/ros.key -O - | sudo apt-key add -
22 | RUN apt-get update && apt-get install -y python-catkin-tools
23 |
24 | # Create a catkin workspace.
25 | RUN mkdir -p flame_ws/src
26 |
27 | # Clone the repo into the docker container.
28 | RUN cd flame_ws/src && git clone git@github.com:robustrobotics/flame.git
29 | RUN cd flame_ws/src && git clone git@github.com:robustrobotics/flame_ros.git
30 |
31 | # Initialize the workspace.
32 | RUN cd flame_ws && catkin init
33 |
34 | # Install rosdeps.
35 | RUN /bin/bash -c "source /opt/ros/kinetic/setup.sh && cd flame_ws && rosdep install -iy --from-paths ./src"
36 |
37 | # Install Eigen and Sophus.
38 | RUN mkdir -p flame_ws/dependencies/src
39 | RUN flame_ws/src/flame/scripts/eigen.sh /flame_ws/dependencies/src /flame_ws/dependencies
40 | RUN flame_ws/src/flame/scripts/sophus.sh /flame_ws/dependencies/src /flame_ws/dependencies
41 | RUN cp flame_ws/src/flame/scripts/env.sh /flame_ws/dependencies/
42 |
43 | # Now build flame and flame_ros.
44 | RUN /bin/bash -c "source /opt/ros/kinetic/setup.sh && cd flame_ws && source ./dependencies/env.sh && catkin build"
45 |
46 | # Add sourcing commands bashrc.
47 | RUN echo "source /flame_ws/devel/setup.bash" >> ~/.bashrc
48 |
49 | # Download and extract EuRoC dataset.
50 | RUN wget http://robotics.ethz.ch/~asl-datasets/ijrr_euroc_mav_dataset/vicon_room1/V1_01_easy/V1_01_easy.zip
51 | RUN mkdir data && cd data && unzip ../V1_01_easy.zip
52 |
53 | # Edit the configuarion yaml to point to the extracted data.
54 | RUN sed 's#/home/wng/Projects/rrg/data/euroc_mav_datasets/V1_01_easy#/data#g' -i /flame_ws/src/flame_ros/cfg/flame_offline_asl.yaml
55 |
56 |
--------------------------------------------------------------------------------
/scripts/flame_docker_example.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # This script will run an example dataset through flame inside a docker
4 | # container and display the output in RViz.
5 | #
6 | # Usage:
7 | #
8 | # >> ./flame_docker_example.sh
9 |
10 | nvidia-docker run -it --rm \
11 | --env="DISPLAY" \
12 | --env="QT_X11_NO_MITSHM=1" \
13 | --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
14 | flame \
15 | bash -ic "roscore & rviz -d /flame_ws/src/flame_ros/cfg/flame.rviz & sleep 5 && roslaunch flame_ros flame_offline_asl.launch"
16 |
--------------------------------------------------------------------------------
/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # flame_ros_nodelets
2 | add_library(flame_ros_nodelets
3 | flame_nodelet.cc
4 | utils.h
5 | utils.cc
6 | ros_sensor_streams/tracked_image_stream.h
7 | ros_sensor_streams/tracked_image_stream.cc
8 | ros_sensor_streams/conversions.h
9 | )
10 | target_link_libraries(flame_ros_nodelets
11 | ${flame_LIBRARIES}
12 | ${OpenCV_LIBS}
13 | ${PCL_LIBRARIES}
14 | ${catkin_LIBRARIES})
15 | install(TARGETS flame_ros_nodelets
16 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
17 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
18 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
19 |
20 | # Needed to ensure flame_ros messages are built before these targets.
21 | add_dependencies(flame_ros_nodelets
22 | ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
23 | if (FLAME_WITH_FLA)
24 | add_dependencies(flame_ros_nodelets fla_msgs_generate_messages_cpp)
25 | endif (FLAME_WITH_FLA)
26 |
27 | # flame_offline_tum.
28 | add_executable(flame_offline_tum flame_offline_tum.cc
29 | utils.h
30 | utils.cc
31 | ros_sensor_streams/tum_rgbd_offline_stream.h
32 | ros_sensor_streams/tum_rgbd_offline_stream.cc)
33 | target_link_libraries(flame_offline_tum
34 | ${flame_LIBRARIES}
35 | ${OpenCV_LIBS}
36 | ${PCL_LIBRARIES}
37 | ${catkin_LIBRARIES})
38 | install(TARGETS flame_offline_tum
39 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
40 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
41 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
42 |
43 | # Needed to ensure flame_ros messages are built before these targets.
44 | add_dependencies(flame_offline_tum
45 | ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
46 |
47 | # flame_offline_asl.
48 | add_executable(flame_offline_asl flame_offline_asl.cc
49 | utils.h
50 | utils.cc
51 | ros_sensor_streams/asl_rgbd_offline_stream.h
52 | ros_sensor_streams/asl_rgbd_offline_stream.cc
53 | dataset_utils/asl/dataset.h
54 | dataset_utils/asl/types.h
55 | dataset_utils/utils.h
56 | dataset_utils/utils.cc)
57 | target_link_libraries(flame_offline_asl
58 | ${flame_LIBRARIES}
59 | ${OpenCV_LIBS}
60 | ${PCL_LIBRARIES}
61 | ${YAML_CPP_LIBRARIES}
62 | ${catkin_LIBRARIES})
63 | install(TARGETS flame_offline_asl
64 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
65 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
66 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
67 |
68 | # Needed to ensure flame_ros messages are built before these targets.
69 | add_dependencies(flame_offline_asl
70 | ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
71 |
72 | if (FLAME_WITH_RVIZ)
73 | ## Here we specify which header files need to be run through "moc",
74 | ## Qt's meta-object compiler.
75 | set(MOC_FILES
76 | flame_rviz_plugins/textured_mesh_display.h)
77 |
78 | ## Declare a cpp library
79 | # add_library(flame_rviz_plugins
80 | # flame_rviz_plugins/flame_rviz_plugins.cpp
81 | # )
82 | add_library(flame_rviz_plugins
83 | flame_rviz_plugins/visual.h
84 | flame_rviz_plugins/textured_mesh_display.cc
85 | flame_rviz_plugins/textured_mesh_visual.h
86 | flame_rviz_plugins/textured_mesh_visual.cc
87 | flame_rviz_plugins/surface_normals_visual.h
88 | flame_rviz_plugins/surface_normals_visual.cc
89 | ${MOC_FILES})
90 |
91 | ## Add cmake target dependencies of the executable/library
92 | ## as an example, message headers may need to be generated before nodes
93 | # add_dependencies(flame_rviz_plugins_node flame_rviz_plugins_generate_messages_cpp)
94 | add_dependencies(flame_rviz_plugins
95 | pcl_msgs_generate_messages_cpp)
96 |
97 | ## Specify libraries to link a library or executable target against
98 | # target_link_libraries(flame_rviz_plugins_node
99 | # ${catkin_LIBRARIES}
100 | # )
101 | target_link_libraries(flame_rviz_plugins
102 | ${OGRE_LIBRARIES}
103 | ${QT_LIBRARIES}
104 | ${catkin_LIBRARIES}
105 | ${OpenCV_LIBS})
106 |
107 | install(TARGETS flame_rviz_plugins
108 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
109 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
110 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
111 |
112 | endif ()
113 |
--------------------------------------------------------------------------------
/src/dataset_utils/asl/dataset.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file dataset.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-26 19:36:17 (Wed)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 | #include
28 |
29 | #include
30 |
31 | #include "dataset_utils/utils.h"
32 |
33 | namespace dataset_utils {
34 |
35 | namespace asl {
36 |
37 | /**
38 | * @brief Class that represents data in ASL format.
39 | *
40 | * An ASL dataset is a folder containing two files:
41 | * 1. A Yaml file containing metadata.
42 | * 2. A CSV file with one measurement per line.
43 | *
44 | * @tparam Data Type of data contained in this dataset.
45 | */
46 | template
47 | class Dataset final {
48 | public:
49 | Dataset() = default;
50 |
51 | /**
52 | * @brief Construct dataset from disk.
53 | *
54 | * @param[in] path Path to data folder.
55 | * @param[in] yaml Name of yaml file.
56 | * @param[in] csv Name of CSV file.
57 | */
58 | explicit Dataset(const std::string& path,
59 | const std::string& yaml = "sensor.yaml",
60 | const std::string& csv = "data.csv") :
61 | path_(),
62 | metadata_(),
63 | data_() {
64 | read(path, yaml, csv);
65 | return;
66 | }
67 |
68 | ~Dataset() = default;
69 |
70 | Dataset(const Dataset& rhs) = delete;
71 | Dataset& operator=(const Dataset& rhs) = delete;
72 |
73 | Dataset(Dataset&& rhs) = default;
74 | Dataset& operator=(Dataset&& rhs) = default;
75 |
76 | /**
77 | * @brief Read dataset from disk.
78 | *
79 | * @param[in] path Path to data folder.
80 | * @param[in] yaml Name of yaml file.
81 | * @param[in] csv Name of CSV file.
82 | */
83 | void read(const std::string& path,
84 | const std::string& yaml = "sensor.yaml",
85 | const std::string& csv = "data.csv") {
86 | path_ = path;
87 |
88 | // Strip trailing / in path if it exists.
89 | if (path_.back() == '/') {
90 | path_ = path_.substr(0, path_.size() - 1);
91 | }
92 |
93 | // Read in Yaml.
94 | metadata_ = YAML::LoadFile(path_ + "/" + yaml);
95 |
96 | // Read data into vector. Skip first line that describes columns.
97 | data_.clear();
98 | std::vector lines = std::move(readLines(path_ + "/" + csv));
99 | for (int ii = 1; ii < lines.size(); ++ii) {
100 | data_.emplace_back(Data(lines[ii]));
101 | }
102 | return;
103 | }
104 |
105 | // Accessors.
106 | const std::string& path() const { return path_; }
107 | const YAML::Node& metadata() const { return metadata_; }
108 | const std::vector& data() const { return data_; }
109 |
110 | std::string& path() { return path_; }
111 | YAML::Node& metadata() { return metadata_; }
112 | std::vector& data() { return data_; }
113 |
114 | const Data& operator[](std::size_t idx) const { return data_[idx]; }
115 | Data& operator[](std::size_t idx) { return data_[idx]; }
116 |
117 | const std::size_t size() { return data_.size(); }
118 |
119 | private:
120 | std::string path_; // Path to containing folder.
121 | YAML::Node metadata_; // Metadata from Yaml file.
122 | std::vector data_; // Vector of parsed data.
123 | };
124 |
125 | } // namespace asl
126 |
127 | } // namespace dataset_utils
128 |
--------------------------------------------------------------------------------
/src/dataset_utils/asl/types.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file types.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-26 13:49:35 (Wed)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include "dataset_utils/utils.h"
29 |
30 | namespace dataset_utils {
31 |
32 | namespace asl {
33 |
34 | /**
35 | * @brief Raw IMU data read from ASL csv file
36 | */
37 | struct IMUData {
38 | IMUData() : timestamp(0), gyro{0, 0, 0}, accel{0, 0, 0} {}
39 |
40 | /**
41 | * @brief Construct from ASL-formatted CSV string.
42 | *
43 | * Format: timestamp,gx,gy,gz,ax,ay,az
44 | */
45 | explicit IMUData(const std::string& csv) :
46 | timestamp(0), gyro{0, 0, 0}, accel{0, 0, 0} {
47 | parse(csv, ',', ×tamp, gyro, gyro + 1, gyro + 2,
48 | accel, accel + 1, accel + 2);
49 | return;
50 | }
51 |
52 | uint64_t timestamp; // Timestamp in nanoseconds.
53 | double gyro[3]; // Angular velocity [rad/sec].
54 | double accel[3]; // Linear acceleration [m/sec^2].
55 | };
56 |
57 | /**
58 | * @brief Pose data (e.g. Vicon or other 6DOF motion capture system).
59 | */
60 | struct PoseData {
61 | PoseData() : timestamp(0), trans{0, 0, 0}, quat{0, 0, 0, 1} {}
62 |
63 | /**
64 | * @brief Construct from ASL-formatted CSV string.
65 | *
66 | * Format: timestamp,tx,ty,tz,qw,qx,qy,qz
67 | */
68 | explicit PoseData(const std::string& csv) :
69 | timestamp(0), trans{0, 0, 0}, quat{0, 0, 0, 1} {
70 | parse(csv, ',', ×tamp, trans, trans + 1, trans + 2,
71 | quat + 3, quat, quat + 1, quat + 2);
72 | return;
73 | }
74 |
75 | uint64_t timestamp; // Timestamp in nanoseconds.
76 | double trans[3]; // Translation.
77 | double quat[4]; // Orientation (xyzw order).
78 | };
79 |
80 | /**
81 | * @brief Position data (e.g. Leica).
82 | */
83 | struct PositionData {
84 | PositionData() : timestamp(0), pos{0, 0, 0} {}
85 |
86 | /**
87 | * @brief Construct from ASL-formatted CSV string.
88 | *
89 | * Format: timestamp,tx,ty,tz
90 | */
91 | explicit PositionData(const std::string& csv) :
92 | timestamp(0), pos{0, 0, 0} {
93 | parse(csv, ',', ×tamp, pos, pos + 1, pos + 2);
94 | return;
95 | }
96 |
97 | uint64_t timestamp; // Timestamp in nanoseconds.
98 | double pos[3]; // Position data.
99 | };
100 |
101 | /**
102 | * @brief File data (e.g. for images or pointclouds stored in binary files).
103 | */
104 | struct FileData {
105 | FileData() : timestamp(0), filename() {}
106 |
107 | /**
108 | * @brief Construct from ASL-formatted CSV string.
109 | *
110 | * Format: timestamp,filename
111 | */
112 | explicit FileData(const std::string& csv) :
113 | timestamp(0), filename() {
114 | parse(csv, ',', ×tamp, &filename);
115 | return;
116 | }
117 |
118 | uint64_t timestamp; // Timestamp in nanoseconds.
119 | std::string filename; // Filename.
120 | };
121 |
122 | } // namespace asl
123 |
124 | } // namespace dataset_utils
125 |
--------------------------------------------------------------------------------
/src/dataset_utils/utils.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file utils.cc
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-26 17:24:19 (Wed)
21 | */
22 |
23 | #include "dataset_utils/utils.h"
24 |
25 | #include
26 | #include
27 | #include
28 |
29 | namespace dataset_utils {
30 |
31 | std::vector readLines(const std::string& file) {
32 | std::ifstream stream(file.c_str());
33 | std::vector lines;
34 | std::string line;
35 | while (std::getline(stream, line)) {
36 | // Remove carriage returns.
37 | line.erase(std::remove(line.begin(), line.end(), '\r'), line.end());
38 | lines.push_back(line);
39 | }
40 |
41 | return lines;
42 | }
43 |
44 | std::vector split(const std::string &str, char delim) {
45 | std::vector tokens;
46 | std::stringstream ss;
47 | ss.str(str);
48 | std::string token;
49 | while (std::getline(ss, token, delim)) {
50 | tokens.push_back(token);
51 | }
52 |
53 | return tokens;
54 | }
55 |
56 | } // namespace dataset_utils
57 |
--------------------------------------------------------------------------------
/src/dataset_utils/utils.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file utils.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-26 17:22:39 (Wed)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 |
31 | #include
32 |
33 | namespace dataset_utils {
34 |
35 | /**
36 | * @brief Associate two vectors of arbitrary types.
37 | *
38 | * Adapted from associate.py in the TUM utilities.
39 | *
40 | * @tparam T Type of first vector.
41 | * @tparam U Type of second vector.
42 | * @tparam DiffFunctor Returns difference between elements of two vectors.
43 | * @param[in] a First vector.
44 | * @param[in] b Second vector.
45 | * @param[out] aidxs Indexes into a that are associated.
46 | * @param[out] bidxs Indexes into b that are associated.
47 | * @param[in] diff Functor that computes difference between vector elements.
48 | * @param[in] max_diff Maximum allowed difference to count as a match.
49 | */
50 | template
51 | void associate(const std::vector& a, const std::vector& b,
52 | std::vector* aidxs, std::vector* bidxs,
53 | DiffFunctor diff, float max_diff = 0.02f) {
54 | // Find potential matches.
55 | std::vector > potential_matches;
56 | for (std::size_t aidx = 0; aidx < a.size(); ++aidx) {
57 | for (std::size_t bidx = 0; bidx < b.size(); ++bidx) {
58 | float diffab = diff(a[aidx], b[bidx]);
59 | if (diffab < max_diff) {
60 | potential_matches.emplace_back(diffab, aidx, bidx);
61 | }
62 | }
63 | }
64 |
65 | // Sort potential matches by distance.
66 | auto comp = [](const std::tuple& x,
67 | const std::tuple& y) {
68 | return std::get<0>(x) < std::get<0>(y);
69 | };
70 | std::sort(potential_matches.begin(), potential_matches.end(), comp);
71 |
72 | // Select best matches with no repeats.
73 | std::unordered_set as, bs;
74 | aidxs->clear();
75 | bidxs->clear();
76 | for (const auto& match : potential_matches) {
77 | std::size_t aidx = std::get<1>(match);
78 | std::size_t bidx = std::get<2>(match);
79 |
80 | if ((as.count(aidx) == 0) && (bs.count(bidx) == 0)) {
81 | aidxs->push_back(aidx);
82 | bidxs->push_back(bidx);
83 | as.insert(aidx);
84 | bs.insert(bidx);
85 | }
86 | }
87 |
88 | // Finally sort selected matches.
89 | std::sort(aidxs->begin(), aidxs->end());
90 | std::sort(bidxs->begin(), bidxs->end());
91 |
92 | return;
93 | }
94 |
95 | /**
96 | * @brief Read a file and return the lines in a vector.
97 | *
98 | * @param[in] file Input filename.
99 | */
100 | std::vector readLines(const std::string& file);
101 |
102 | /**
103 | * @brief Split a string with given delimiter.
104 | *
105 | * Adapted from:
106 | * https://stackoverflow.com/questions/236129/most-elegant-way-to-split-a-string
107 | *
108 | * @param[in] str Input string.
109 | * @param[in] delim Delimiter that separates tokens.
110 | */
111 | std::vector split(const std::string &str, char delim = ' ');
112 |
113 | /**
114 | * @brief Read a matrix of a given size from a yaml node.
115 | *
116 | * @tparam Scalar Element type of matrix.
117 | * @param[in] yaml Yaml node.
118 | * @param[in] name Name of matrix in yaml node.
119 | * @param[in] rows Expected matrix rows.
120 | * @param[in] cols Expected matrix cols.
121 | * @param[out] mat Matrix in row-major order (must be pre-allocated).
122 | */
123 | template
124 | bool readMatrix(const YAML::Node& yaml, const std::string& name,
125 | const int rows, const int cols, Scalar* mat) {
126 | if ((rows != yaml[name]["rows"].as()) ||
127 | (cols != yaml[name]["cols"].as())) {
128 | return false;
129 | }
130 |
131 | for (int ii = 0; ii < rows; ++ii) {
132 | for (int jj = 0; jj < cols; ++jj) {
133 | int idx = ii * cols + jj;
134 | mat[idx] = yaml[name]["data"][idx].as();
135 | }
136 | }
137 |
138 | return true;
139 | }
140 |
141 | /**
142 | * @brief Recursion base case.
143 | *
144 | * Converts a delimited stringstream to an output argument.
145 | *
146 | * @param[in] ss Input stringstream.
147 | * @param[in] delim Delimeter character.
148 | * @param[out] t0 Output value.
149 | */
150 | template
151 | void parseStream(std::stringstream& ss, char delim, T0* t0) {
152 | std::string token;
153 | std::getline(ss, token, delim);
154 | std::stringstream sst(token);
155 | sst >> (*t0);
156 | return;
157 | }
158 |
159 | /**
160 | * @brief Recursive variadic template function to parse stringstream.
161 | *
162 | * Uses template metaprogramming magic to parse a delimited stringstream into
163 | * given types.
164 | *
165 | * @param[in] ss Input stringstream.
166 | * @param[in] delim Delimeter character.
167 | * @param[out] t0 Output value that will be set.
168 | * @param[out] t1 Output value that will be recursed.
169 | * @param[out] t2s Output values that will be recursed.
170 | */
171 | template
172 | void parseStream(std::stringstream& ss, char delim, T0* t0, T1* t1, T2s* ... t2s) {
173 | std::string token;
174 | std::getline(ss, token, delim);
175 | std::stringstream sst(token);
176 | sst >> (*t0);
177 | parseStream(ss, delim, t1, t2s...);
178 | return;
179 | }
180 |
181 | /**
182 | * @brief Parse a delimited string into different types.
183 | *
184 | * Uses template metaprogramming magic to parse a delimited string into given
185 | * types.
186 | *
187 | * Usage:
188 | * int a;
189 | * float b;
190 | * char c;
191 | * parse("1,1.1,a", ',', &a, &b, &c);
192 | *
193 | * @param[in] line Input string.
194 | * @param[in] delim Delimiter character.
195 | * @param[out] args Output arguments.
196 | */
197 | template
198 | void parse(const std::string& line, char delim, Ts* ... args) {
199 | std::stringstream ss(line);
200 | parseStream(ss, delim, args...);
201 | return;
202 | }
203 |
204 | } // namespace dataset_utils
205 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/surface_normals_visual.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file surface_normals_visual.cc
19 | * @author W. Nicholas Greene
20 | * @date 2017-03-25 19:22:59 (Sat)
21 | */
22 |
23 | #include "flame_rviz_plugins/surface_normals_visual.h"
24 |
25 | #include
26 |
27 | #include
28 | #include
29 | #include
30 | #include
31 | // #include
32 |
33 | namespace flame_rviz_plugins {
34 |
35 | SurfaceNormalsVisual::SurfaceNormalsVisual(Ogre::SceneManager* scene_manager,
36 | Ogre::SceneNode* parent_node,
37 | Ogre::ColourValue color,
38 | float normal_size) :
39 | Visual(scene_manager, parent_node),
40 | mobject_(scene_manager->createManualObject()),
41 | color_(color),
42 | normal_size_(normal_size) {
43 | scene_node->attachObject(mobject_.get());
44 |
45 | mobject_->setVisible(false);
46 | mobject_->begin("BaseWhiteNoLighting", Ogre::RenderOperation::OT_LINE_LIST);
47 | mobject_->position(0, 0, 0);
48 | mobject_->colour(Ogre::ColourValue::Red);
49 | mobject_->position(5, 0, 0);
50 | mobject_->colour(Ogre::ColourValue::Red);
51 | mobject_->end();
52 |
53 | return;
54 | }
55 |
56 | void SurfaceNormalsVisual::setFromMessage(const pcl_msgs::PolygonMesh::ConstPtr& msg) {
57 | setFromMessage(*msg);
58 | return;
59 | }
60 |
61 | void SurfaceNormalsVisual::setFromMessage(const pcl_msgs::PolygonMesh& msg) {
62 | std::lock_guard lock(*Visual::getMutex());
63 |
64 | // Grab offset of position and normals in buffer.
65 | int pos_offset = 0;
66 | int normal_offset = 0;
67 | for (int ii = 0; ii < msg.cloud.fields.size(); ++ii) {
68 | std::string name = msg.cloud.fields[ii].name;
69 | if (name == "x") {
70 | // Assume next two fields are yz.
71 | pos_offset = msg.cloud.fields[ii].offset;
72 | } else if (name == "normal_x") {
73 | // Assume nexto two fields are normal_y and normal_z.
74 | normal_offset = msg.cloud.fields[ii].offset;
75 | }
76 | }
77 |
78 | // Walk over vertices and draw.
79 | mobject_->beginUpdate(0);
80 |
81 | for (int ii = 0; ii < msg.cloud.height; ++ii) {
82 | for (int jj = 0; jj < msg.cloud.width; ++jj) {
83 | // Grab position.
84 | float xyz[3];
85 | int poffset = ii*msg.cloud.row_step + jj*msg.cloud.point_step + pos_offset;
86 | memcpy(&xyz, &(msg.cloud.data[poffset]), sizeof(float) * 3);
87 |
88 | // Grab normal.
89 | float nxyz[3];
90 | int noffset = ii*msg.cloud.row_step + jj*msg.cloud.point_step + normal_offset;
91 | memcpy(&nxyz, &(msg.cloud.data[noffset]), sizeof(float) * 3);
92 |
93 | // Draw.
94 | mobject_->position(xyz[0], xyz[1], xyz[2]);
95 | mobject_->colour(color_);
96 |
97 | mobject_->position(normal_size_*nxyz[0] + xyz[0],
98 | normal_size_*nxyz[1] + xyz[1],
99 | normal_size_*nxyz[2] + xyz[2]);
100 | }
101 | }
102 |
103 | mobject_->end();
104 |
105 | return;
106 | }
107 |
108 | } // namespace flame_rviz_plugins
109 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/surface_normals_visual.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file surface_normals_visual.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-03-25 19:18:48 (Sat)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include
29 | #include
30 |
31 | #include
32 |
33 | #include
34 |
35 | namespace Ogre {
36 | class SceneNode;
37 | class SceneManager;
38 | } // namespace Ogre
39 |
40 | namespace flame_rviz_plugins {
41 |
42 | /**
43 | * \brief Draws surface normals as lines.
44 | */
45 | class SurfaceNormalsVisual : public Visual {
46 | public:
47 | /**
48 | * \brief Constructor.
49 | *
50 | * @param scene_manager[in] The current scene manager.
51 | * @param parent_node[in] The parent node. Only used to create a child node for this visual.
52 | * @param color[in] Color of the frustum.
53 | */
54 | SurfaceNormalsVisual(Ogre::SceneManager* scene_manager, Ogre::SceneNode* parent_node,
55 | Ogre::ColourValue color = Ogre::ColourValue::Red,
56 | float normal_size = 0.05f);
57 |
58 | virtual ~SurfaceNormalsVisual() = default;
59 |
60 | /**
61 | * \brief Set the frustum from a message.
62 | *
63 | * @param msg[in] View message.
64 | */
65 | void setFromMessage(const pcl_msgs::PolygonMesh::ConstPtr& msg);
66 | void setFromMessage(const pcl_msgs::PolygonMesh& msg);
67 |
68 | /**
69 | * @brief Set the size of the normal vectors.
70 | */
71 | void setNormalSize(float size) {
72 | std::lock_guard lock(*Visual::getMutex());
73 | normal_size_ = size;
74 | return;
75 | }
76 |
77 | /**
78 | * @brief Set visibility of normals.
79 | */
80 | void setVisible(bool visible) {
81 | std::lock_guard lock(*Visual::getMutex());
82 | mobject_->setVisible(visible);
83 | return;
84 | }
85 |
86 | private:
87 | std::shared_ptr mobject_;
88 | Ogre::ColourValue color_;
89 | float normal_size_;
90 | };
91 |
92 | } // namespace flame_rviz_plugins
93 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/textured_mesh_display.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file textured_mesh_display.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-02-21 15:49:11 (Tue)
21 | */
22 |
23 | #pragma once
24 |
25 | #ifndef Q_MOC_RUN
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 |
32 | #include
33 | #include
34 |
35 | #include
36 | #include
37 |
38 | #include
39 |
40 | #include
41 | #include
42 |
43 | #include
44 | #include
45 | #include
46 | #include
47 | #include
48 |
49 | #include
50 |
51 | #include
52 | #include
53 |
54 | #endif
55 |
56 | namespace flame_rviz_plugins {
57 |
58 | using rviz::EnumProperty; // Have to do this so that Qt can find the slot.
59 |
60 | class TexturedMeshVisual;
61 | class SurfaceNormalsVisual;
62 |
63 | /**
64 | * \brief Class that displays a textured mesh.
65 | *
66 | * Draws a triangle mesh defined in a pcl_msgs/PolygonMesh message. An image
67 | * topic can also be provided to be used as a texture.
68 | *
69 | * The PointCloud2 inside the PolygonMesh should have xyz fields defined as
70 | * floats and normal_xyz fields (also as floats). Texture coordinates are can be
71 | * supplied with uv fields (also as floats).
72 | *
73 | * The mesh can be shaded with different fragment shader programs (with and
74 | * without Phong shading) depending on your preferences.
75 | */
76 | class TexturedMeshDisplay: public rviz::Display {
77 | Q_OBJECT // NOLINT(whitespace/indent)
78 |
79 | public:
80 | TexturedMeshDisplay();
81 |
82 | void onInitialize();
83 |
84 | virtual ~TexturedMeshDisplay();
85 |
86 | void reset();
87 |
88 | private Q_SLOTS: // NOLINT(whitespace/indent)
89 | void updateTopic();
90 | void updateQueueSize();
91 | void updatePolygonMode();
92 | void updateShaderProgram();
93 | void updatePhongShading();
94 | void updateSceneColorScale();
95 | void updateShowNormals();
96 | void updateNormalSize();
97 |
98 | /**
99 | * @brief Fill list of available and working transport options.
100 | * Copied from rviz::DepthCloudDisplay.
101 | */
102 | void fillTransportOptionList(EnumProperty* property);
103 |
104 | protected:
105 | // Scans for available transport plugins. Copied from rviz::DepthCloudDisplay.
106 | void scanForTransportSubscriberPlugins();
107 |
108 | void subscribe();
109 | void unsubscribe();
110 |
111 | void onEnable();
112 | void onDisable();
113 |
114 | void fixedFrameChanged();
115 | void updateFixedFrameTransform(const std_msgs::Header& header);
116 |
117 | void processTextureMessage(const sensor_msgs::Image::ConstPtr& tex_msg);
118 | void processPolygonMeshMessage(const pcl_msgs::PolygonMesh::ConstPtr& msg);
119 | void processTexturedMeshMessages(const pcl_msgs::PolygonMesh::ConstPtr& mesh_msg,
120 | const sensor_msgs::Image::ConstPtr& img_msg);
121 |
122 | // Subscribes 'n stuff.
123 | std::shared_ptr > mesh_filter_;
124 | uint32_t num_meshes_received_;
125 |
126 | std::shared_ptr tex_it_;
127 | std::shared_ptr tex_filter_;
128 |
129 | // Internal message queues used to synchronize meshes and textures.
130 | std::queue mesh_queue_;
131 | std::queue tex_queue_;
132 |
133 | // Main visual object that draws stuff.
134 | std::shared_ptr visual_;
135 |
136 | // Surface normals visual.
137 | std::shared_ptr normals_;
138 |
139 | // RViz properties 'n stuff.
140 | rviz::RosTopicProperty mesh_topic_prop_;
141 | rviz::RosTopicProperty tex_topic_prop_;
142 | std::shared_ptr tex_transport_prop_;
143 | rviz::EnumProperty polygon_mode_prop_;
144 | rviz::EnumProperty shader_program_prop_;
145 | rviz::BoolProperty phong_shading_prop_;
146 | rviz::FloatProperty scene_color_scale_prop_;
147 | rviz::BoolProperty show_normals_prop_;
148 | rviz::FloatProperty normal_size_prop_;
149 | rviz::IntProperty queue_size_prop_;
150 |
151 | int queue_size_;
152 | std::set transport_plugin_types_;
153 |
154 | std::recursive_mutex mtx_;
155 | };
156 |
157 | } // end namespace flame_rviz_plugins
158 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/textured_mesh_visual.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file textured_mesh_visual.cc
19 | * @author W. Nicholas Greene
20 | * @date 2017-02-21 20:53:05 (Tue)
21 | */
22 |
23 | #include "flame_rviz_plugins/textured_mesh_visual.h"
24 |
25 | #include
26 |
27 | #include
28 | #include
29 |
30 | #include
31 |
32 | #include
33 | #include
34 | #include
35 | #include
36 | #include
37 | #include
38 |
39 | #include
40 |
41 | namespace flame_rviz_plugins {
42 |
43 | TexturedMeshVisual::TexturedMeshVisual(Ogre::SceneManager* scene_manager,
44 | Ogre::SceneNode* parent_node,
45 | Ogre::PolygonMode poly_mode,
46 | ShaderProgram shader_program) :
47 | Visual(scene_manager, parent_node),
48 | mesh_(),
49 | mesh_name_("textured_mesh_visual_mesh"),
50 | entity_(nullptr),
51 | entity_name_("textured_mesh_visual_entity"),
52 | mesh_material_(),
53 | material_name_("textured_mesh_visual_mesh_material"),
54 | tex_img_(),
55 | tex_name_("textured_mesh_mesh_texture"),
56 | mode_(poly_mode),
57 | mesh_visibility_(true),
58 | shader_program_(shader_program),
59 | scene_color_scale_(1.0f),
60 | phong_shading_(true),
61 | vtx_shader_(),
62 | texture_shader_(),
63 | idepth_shader_(),
64 | jet_shader_(),
65 | normal_shader_() {
66 | // Set ambient light.
67 | scene_manager->setAmbientLight(Ogre::ColourValue(1.0, 1.0, 1.0));
68 |
69 | // Create material.
70 | Ogre::MaterialManager& material_manager = Ogre::MaterialManager::getSingleton();
71 | Ogre::String resource_group =
72 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME;
73 |
74 | mesh_material_ = material_manager.create(material_name_, resource_group);
75 |
76 | Ogre::Technique* tech = mesh_material_->getTechnique(0);
77 | Ogre::Pass* pass = tech->getPass(0);
78 | pass->setLightingEnabled(false);
79 | pass->setPolygonMode(mode_);
80 |
81 | // pass->setAmbient(0.5, 0.5, 0.5);
82 | // pass->setDiffuse(0.5, 0.5, 0.5, 1.0);
83 | // // pass->setSpecular(1.0, 0.0, 0.0, 10.0);
84 | // pass->setShadingMode(Ogre::ShadeOptions::SO_PHONG);
85 |
86 | // Vertex shader.
87 | vtx_shader_ = Ogre::HighLevelGpuProgramManager::getSingletonPtr()->
88 | createProgram("vertex_shader",
89 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
90 | "glsl", Ogre::GpuProgramType::GPT_VERTEX_PROGRAM);
91 | vtx_shader_->setSource(vtx_shader_src_);
92 | vtx_shader_->load();
93 | pass->setVertexProgram(vtx_shader_->getName());
94 |
95 | auto vparams = pass->getVertexProgramParameters();
96 | vparams->setNamedAutoConstant("MVP",
97 | Ogre::GpuProgramParameters::AutoConstantType::ACT_WORLDVIEWPROJ_MATRIX);
98 | vparams->setNamedAutoConstant("MV",
99 | Ogre::GpuProgramParameters::AutoConstantType::ACT_WORLDVIEW_MATRIX);
100 | vparams->setNamedAutoConstant("MVinvT",
101 | Ogre::GpuProgramParameters::AutoConstantType::ACT_INVERSE_TRANSPOSE_WORLDVIEW_MATRIX);
102 |
103 | // Texture fragment shader.
104 | texture_shader_ = Ogre::HighLevelGpuProgramManager::getSingletonPtr()->
105 | createProgram("texture_shader",
106 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
107 | "glsl", Ogre::GpuProgramType::GPT_FRAGMENT_PROGRAM);
108 | texture_shader_->setSource(texture_shader_src_);
109 | texture_shader_->load();
110 |
111 | // Inverse depth fragment shader.
112 | idepth_shader_ = Ogre::HighLevelGpuProgramManager::getSingletonPtr()->
113 | createProgram("idepth_shader",
114 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
115 | "glsl", Ogre::GpuProgramType::GPT_FRAGMENT_PROGRAM);
116 | idepth_shader_->setSource(idepth_shader_src_);
117 | idepth_shader_->load();
118 |
119 | // Jet fragment shader.
120 | jet_shader_ = Ogre::HighLevelGpuProgramManager::getSingletonPtr()->
121 | createProgram("jet_shader",
122 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
123 | "glsl", Ogre::GpuProgramType::GPT_FRAGMENT_PROGRAM);
124 | jet_shader_->setSource(jet_shader_src_);
125 | jet_shader_->load();
126 |
127 | // Surface normal fragment shader.
128 | normal_shader_ = Ogre::HighLevelGpuProgramManager::getSingletonPtr()->
129 | createProgram("normal_shader",
130 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME,
131 | "glsl", Ogre::GpuProgramType::GPT_FRAGMENT_PROGRAM);
132 | normal_shader_->setSource(normal_shader_src_);
133 | normal_shader_->load();
134 |
135 | // Set default to texture shader.
136 | pass->setFragmentProgram(texture_shader_->getName());
137 | auto fparams = pass->getFragmentProgramParameters();
138 | fparams->setNamedAutoConstant("ambient_color",
139 | Ogre::GpuProgramParameters::AutoConstantType::ACT_AMBIENT_LIGHT_COLOUR);
140 | fparams->setNamedAutoConstant("diffuse_color",
141 | Ogre::GpuProgramParameters::AutoConstantType::ACT_LIGHT_DIFFUSE_COLOUR);
142 | fparams->setNamedAutoConstant("specular_color",
143 | Ogre::GpuProgramParameters::AutoConstantType::ACT_LIGHT_SPECULAR_COLOUR);
144 | fparams->setNamedAutoConstant("light_dir",
145 | Ogre::GpuProgramParameters::AutoConstantType::ACT_LIGHT_DIRECTION_VIEW_SPACE);
146 | fparams->setNamedConstant("scene_color_scale", scene_color_scale_);
147 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
148 |
149 | return;
150 | }
151 |
152 | TexturedMeshVisual::~TexturedMeshVisual() {
153 | std::lock_guard lock(*getMutex());
154 |
155 | if (entity_ != nullptr) {
156 | getSceneNode()->detachObject(entity_);
157 | getSceneManager()->destroyEntity(entity_);
158 | }
159 |
160 | if (!mesh_material_.isNull()) {
161 | mesh_material_->getTechnique(0)->getPass(0)->removeAllTextureUnitStates();
162 | Ogre::MaterialManager::getSingleton().remove(material_name_);
163 | }
164 |
165 | if (!mesh_.isNull()) {
166 | Ogre::MeshManager::getSingleton().remove(mesh_->getHandle());
167 | mesh_.setNull();
168 | }
169 |
170 | return;
171 | }
172 |
173 | void TexturedMeshVisual::setShaderProgram(ShaderProgram shader_program) {
174 | std::lock_guard lock(*getMutex());
175 | shader_program_ = shader_program;
176 |
177 | if (mesh_material_.isNull()) {
178 | return;
179 | }
180 |
181 | Ogre::Pass* pass = mesh_material_->getTechnique(0)->getPass(0);
182 |
183 | if (shader_program_ == ShaderProgram::TEXTURE) {
184 | pass->setFragmentProgram(texture_shader_->getName());
185 | auto fparams = pass->getFragmentProgramParameters();
186 | fparams->setNamedConstant("scene_color_scale", scene_color_scale_);
187 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
188 | updateTexture(mesh_material_, tex_img_);
189 | } else if (shader_program_ == ShaderProgram::INVERSE_DEPTH) {
190 | pass->setFragmentProgram(idepth_shader_->getName());
191 | pass->removeAllTextureUnitStates();
192 | auto fparams = pass->getFragmentProgramParameters();
193 | fparams->setNamedConstant("scene_color_scale", scene_color_scale_);
194 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
195 | } else if (shader_program_ == ShaderProgram::JET) {
196 | pass->setFragmentProgram(jet_shader_->getName());
197 | pass->removeAllTextureUnitStates();
198 | auto fparams = pass->getFragmentProgramParameters();
199 | fparams->setNamedConstant("scene_color_scale", scene_color_scale_);
200 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
201 | } else if (shader_program_ == ShaderProgram::SURFACE_NORMAL) {
202 | pass->setFragmentProgram(normal_shader_->getName());
203 | pass->removeAllTextureUnitStates();
204 | auto fparams = pass->getFragmentProgramParameters();
205 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
206 | } else {
207 | ROS_WARN("Unrecognized ShaderProgram!");
208 | return;
209 | }
210 |
211 | return;
212 | }
213 |
214 | // Adapted from:
215 | // https://grahamedgecombe.com/blog/custom-meshes-in-ogre3d
216 | // http://www.ogre3d.org/tikiwiki/Generating+A+Mesh
217 | void TexturedMeshVisual::
218 | setFromMessage(const pcl_msgs::PolygonMesh::ConstPtr& mesh_msg,
219 | const sensor_msgs::Image::ConstPtr& tex_msg) {
220 | std::lock_guard lock(*getMutex());
221 |
222 | ROS_DEBUG("Updating mesh!");
223 |
224 | if (mesh_msg->cloud.row_step !=
225 | mesh_msg->cloud.point_step * mesh_msg->cloud.width) {
226 | ROS_WARN("Row padding not supported!\n");
227 | return;
228 | }
229 |
230 | /*==================== Update mesh geometry. ====================*/
231 | if (mesh_.isNull()) {
232 | // Create mesh and submesh.
233 | createMesh(mesh_msg->cloud.fields);
234 | }
235 |
236 | updateVertexBuffer(mesh_, mesh_msg->cloud);
237 | updateIndexBuffer(mesh_, mesh_msg->polygons);
238 |
239 | mesh_->_setBounds(Ogre::AxisAlignedBox(-100, -100, -100, 100, 100, 100));
240 | mesh_->load();
241 |
242 | /*==================== Update mesh texture. ====================*/
243 | if ((shader_program_ == ShaderProgram::TEXTURE) && (tex_msg != nullptr)) {
244 | // Decompress image.
245 | tex_img_ = cv_bridge::toCvCopy(tex_msg, "rgb8")->image;
246 | updateTexture(mesh_material_, tex_img_);
247 | } else if ((shader_program_ == ShaderProgram::TEXTURE) && (tex_msg == nullptr)) {
248 | ROS_ERROR("ShaderProgram set to TEXTURE, but texture message is NULL!");
249 | return;
250 | }
251 |
252 | /*==================== Create instance and attach. ====================*/
253 | if (entity_ == nullptr) {
254 | Ogre::String resource_group =
255 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME;
256 |
257 | entity_ = getSceneManager()->createEntity(entity_name_, mesh_name_,
258 | resource_group);
259 | entity_->setMaterialName(material_name_, resource_group);
260 | getSceneNode()->attachObject(entity_);
261 | }
262 |
263 | getSceneNode()->setVisible(mesh_visibility_);
264 |
265 | ROS_DEBUG("Updated mesh!");
266 |
267 | return;
268 | }
269 |
270 | void TexturedMeshVisual::createMesh(const std::vector& fields) {
271 | Ogre::String resource_group =
272 | Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME;
273 |
274 | mesh_ = Ogre::MeshManager::getSingleton().createManual(mesh_name_,
275 | resource_group);
276 | mesh_->createSubMesh();
277 | mesh_->sharedVertexData = new Ogre::VertexData(); // Destroyed by mesh.
278 |
279 | // Declare vertex data.
280 | Ogre::VertexDeclaration* vtx_dec = mesh_->sharedVertexData->vertexDeclaration;
281 |
282 | // Loop through point fields and add vertex elements.
283 | int field_idx = 0;
284 | while (field_idx < fields.size()) {
285 | std::string name = fields[field_idx].name;
286 |
287 | if (name == "x") {
288 | // Assume next two fields are yz.
289 | vtx_dec->addElement(0, fields[field_idx].offset,
290 | Ogre::VET_FLOAT3, Ogre::VES_POSITION);
291 | field_idx += 3;
292 | } else if (name == "normal_x") {
293 | // Assume nexto two fields are normal_y and normal_z.
294 | vtx_dec->addElement(0, fields[field_idx].offset,
295 | Ogre::VET_FLOAT3, Ogre::VES_NORMAL);
296 | field_idx += 3;
297 | } else if (name == "u") {
298 | // Assume next field is v.
299 | vtx_dec->addElement(0, fields[field_idx].offset,
300 | Ogre::VET_FLOAT2, Ogre::VES_TEXTURE_COORDINATES);
301 | field_idx += 2;
302 | } else {
303 | field_idx++;
304 | }
305 | }
306 |
307 | return;
308 | }
309 |
310 | void TexturedMeshVisual::
311 | updateVertexBuffer(const Ogre::MeshPtr& mesh,
312 | const sensor_msgs::PointCloud2& cloud) {
313 | Ogre::HardwareVertexBufferSharedPtr vtx_buffer;
314 |
315 | if (mesh->sharedVertexData->vertexBufferBinding->getBufferCount() > 0) {
316 | vtx_buffer = mesh->sharedVertexData->vertexBufferBinding->getBuffer(0);
317 | }
318 |
319 | if ((vtx_buffer.isNull()) || (vtx_buffer->getSizeInBytes() < cloud.data.size())) {
320 | // Create a new vertex buffer.
321 | Ogre::HardwareBufferManager& hw_manager =
322 | Ogre::HardwareBufferManager::getSingleton();
323 | vtx_buffer = hw_manager.createVertexBuffer(cloud.point_step,
324 | cloud.height * cloud.width,
325 | Ogre::HardwareBuffer::HBU_STATIC_WRITE_ONLY);
326 |
327 | mesh->sharedVertexData->vertexBufferBinding->setBinding(0, vtx_buffer);
328 | }
329 |
330 | // Copy data.
331 | vtx_buffer->writeData(0, cloud.data.size(), cloud.data.data(), true);
332 |
333 | // Update listed number of vertices.
334 | mesh->sharedVertexData->vertexCount = cloud.height * cloud.width;
335 |
336 | return;
337 | }
338 |
339 | void TexturedMeshVisual::updateIndexBuffer(const Ogre::MeshPtr& mesh,
340 | const std::vector& indices) {
341 | Ogre::SubMesh* sub_mesh = mesh->getSubMesh(0);
342 | sub_mesh->useSharedVertices = true;
343 | sub_mesh->indexData->indexCount = indices.size() * 3;
344 | sub_mesh->indexData->indexStart = 0;
345 |
346 | auto idx_buffer = sub_mesh->indexData->indexBuffer;
347 |
348 | if (idx_buffer.isNull() ||
349 | (idx_buffer->getNumIndexes() < sub_mesh->indexData->indexCount)) {
350 | // Create new index buffer.
351 | Ogre::HardwareBufferManager& hw_manager =
352 | Ogre::HardwareBufferManager::getSingleton();
353 | idx_buffer =
354 | hw_manager.createIndexBuffer(Ogre::HardwareIndexBuffer::IT_32BIT,
355 | indices.size() * 3,
356 | Ogre::HardwareBuffer::HBU_STATIC_WRITE_ONLY);
357 | sub_mesh->indexData->indexBuffer = idx_buffer;
358 | }
359 |
360 | // Lock buffer and grab pointer to data.
361 | uint32_t* idx_data =
362 | static_cast(idx_buffer->lock(Ogre::HardwareBuffer::HBL_NORMAL));
363 |
364 | // Copy index data.
365 | for (int ii = 0; ii < indices.size(); ++ii) {
366 | idx_data[3*ii] = indices[ii].vertices[0];
367 | idx_data[3*ii + 1] = indices[ii].vertices[1];
368 | idx_data[3*ii + 2] = indices[ii].vertices[2];
369 | }
370 |
371 | // Unlock.
372 | idx_buffer->unlock();
373 |
374 | return;
375 | }
376 |
377 | void TexturedMeshVisual::updateTexture(const Ogre::MaterialPtr& material,
378 | const cv::Mat3b& tex_img) {
379 | Ogre::Image ogre_img;
380 | ogre_img.loadDynamicImage(tex_img.data, tex_img.cols, tex_img.rows,
381 | Ogre::PixelFormat::PF_B8G8R8);
382 |
383 | Ogre::String resource_group = Ogre::ResourceGroupManager::DEFAULT_RESOURCE_GROUP_NAME;
384 | Ogre::TextureManager* tex_man = Ogre::TextureManager::getSingletonPtr();
385 |
386 | Ogre::TexturePtr tex = tex_man->getByName(tex_name_, resource_group);
387 | if (!tex.isNull()) {
388 | // Delete old texture.
389 | tex_man->remove(tex_name_);
390 | }
391 |
392 | tex = tex_man->loadImage(tex_name_, resource_group, ogre_img);
393 |
394 | Ogre::Pass* pass = material->getTechnique(0)->getPass(0);
395 | Ogre::TextureUnitState* tex_unit = nullptr;
396 | if (pass->getNumTextureUnitStates() == 0) {
397 | // Create texture unit state.
398 | tex_unit = pass->createTextureUnitState();
399 | } else {
400 | tex_unit = pass->getTextureUnitState(0);
401 | }
402 |
403 | tex_unit->setTexture(tex);
404 |
405 | return;
406 | }
407 |
408 | } // namespace flame_rviz_plugins
409 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/textured_mesh_visual.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file textured_mesh_visual.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-02-21 20:57:52 (Tue)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include
29 |
30 | #include
31 |
32 | #include
33 | #include
34 |
35 | #include
36 |
37 | namespace Ogre {
38 | class SceneNode;
39 | class SceneManager;
40 | class SubMesh;
41 | class TextureUnitState;
42 | } // namespace Ogre
43 |
44 | namespace flame_rviz_plugins {
45 |
46 | /**
47 | * @brief Class that draws a triangle mesh.
48 | *
49 | * Draws a triangle mesh defined in a pcl_msgs/PolygonMesh message. An image
50 | * topic can also be provided to be used as a texture.
51 | *
52 | * The PointCloud2 inside the PolygonMesh should have xyz fields defined as
53 | * floats and normal_xyz fields (also as floats). Texture coordinates are can be
54 | * supplied with uv fields (also as floats).
55 | *
56 | * The mesh can be shaded with different fragment shader programs (with and
57 | * without Phong shading) depending on your preferences.
58 | */
59 | class TexturedMeshVisual final : public Visual {
60 | public:
61 | /**
62 | * @Brief Shader program for the mesh.
63 | */
64 | enum ShaderProgram {
65 | TEXTURE, // Use texture.
66 | INVERSE_DEPTH, // Color by inverse depth.
67 | JET, // Color by inverse depth using jet colormap.
68 | SURFACE_NORMAL // Color by surface normal.
69 | };
70 |
71 | /**
72 | * @brief Constructor.
73 | *
74 | * @param[in] scene_manager The Ogre scene manager.
75 | * @param[in] parent_node The parent scene node in the Ogre scene graph.
76 | * @param[in] poly_mode The default polygon mode to draw the mesh.
77 | * @param[in] shader_program The default fragment shader program to draw the mesh.
78 | */
79 | TexturedMeshVisual(Ogre::SceneManager* scene_manager,
80 | Ogre::SceneNode* parent_node,
81 | Ogre::PolygonMode poly_mode = Ogre::PM_WIREFRAME,
82 | ShaderProgram shader_program = ShaderProgram::INVERSE_DEPTH);
83 | ~TexturedMeshVisual();
84 |
85 | TexturedMeshVisual(const TexturedMeshVisual& rhs) = delete;
86 | TexturedMeshVisual& operator=(const TexturedMeshVisual& rhs) = delete;
87 |
88 | TexturedMeshVisual(const TexturedMeshVisual&& rhs) = delete;
89 | TexturedMeshVisual& operator=(const TexturedMeshVisual&& rhs) = delete;
90 |
91 | /**
92 | * @brief Draw mesh defined by PolygonMesh.
93 | *
94 | * @param msg[in] Incoming message.
95 | */
96 | void setFromMessage(const pcl_msgs::PolygonMesh::ConstPtr& msg,
97 | const sensor_msgs::Image::ConstPtr& tex_msg = nullptr);
98 |
99 | /**
100 | * @brief Set mesh visibility.
101 | *
102 | * @param new_vis[in] Desired visibility.
103 | */
104 | void setMeshVisibility(const bool new_vis) {
105 | std::lock_guard lock(*getMutex());
106 | mesh_visibility_ = new_vis;
107 | getSceneNode()->setVisible(new_vis);
108 | return;
109 | }
110 |
111 | /**
112 | * @brief Set the polygon drawing mode.
113 | *
114 | * Either points, wireframe, or solid mesh.
115 | *
116 | * @param mode[in] New mode.
117 | */
118 | void setPolygonMode(Ogre::PolygonMode mode) {
119 | std::lock_guard lock(*getMutex());
120 | this->mode_ = mode;
121 | if (mesh_material_.get() != nullptr) {
122 | mesh_material_->getTechnique(0)->getPass(0)->setPolygonMode(mode_);
123 | }
124 | return;
125 | }
126 |
127 | /**
128 | * @brief Set the scene color scale for jet shader.
129 | */
130 | void setSceneColorScale(float scale) {
131 | std::lock_guard lock(*getMutex());
132 | scene_color_scale_ = scale;
133 |
134 | if (mesh_material_.isNull()) {
135 | return;
136 | }
137 |
138 | Ogre::Pass* pass = mesh_material_->getTechnique(0)->getPass(0);
139 | auto fparams = pass->getFragmentProgramParameters();
140 | fparams->setNamedConstant("scene_color_scale", scene_color_scale_);
141 |
142 | return;
143 | }
144 |
145 | /**
146 | * @brief Enable or disable Phong Shading.
147 | */
148 | void setPhongShading(bool phong_shading) {
149 | std::lock_guard lock(*getMutex());
150 | phong_shading_ = phong_shading;
151 |
152 | if (mesh_material_.isNull()) {
153 | return;
154 | }
155 |
156 | Ogre::Pass* pass = mesh_material_->getTechnique(0)->getPass(0);
157 | auto fparams = pass->getFragmentProgramParameters();
158 | fparams->setNamedConstant("phong_shading", phong_shading_ ? 1 : 0);
159 |
160 | return;
161 | }
162 |
163 | /**
164 | * @brief Set the shader for the mesh.
165 | */
166 | void setShaderProgram(ShaderProgram shader_program);
167 |
168 | private:
169 | // Basic vertex shader.
170 | // This Converts each vertex into OpenGL clip coordinates (multiplication with
171 | // the MVP matrix) and forwards other information to the fragment shader
172 | // including the position of each fragment in "view coordinates"
173 | // (multiplication with the MV matrix), the outward surface normal for each
174 | // fragment in view coordinates (multiplication with the transpose of the
175 | // inverse of the MV matrix), and the depth of each fragment. Constants to the
176 | // shader are defined as uniforms, while inputs and outputs are defined as ins
177 | // and outs (per vertex and per fragment).
178 | // See here for tutorial: https://learnopengl.com/#!Lighting/Basic-Lighting
179 | const std::string vtx_shader_src_ =
180 | "#version 330 core\n"
181 | "uniform mat4 MVP;\n"
182 | "uniform mat4 MV;\n"
183 | "uniform mat4 MVinvT;\n"
184 | "in vec4 vertex;\n"
185 | "in vec3 normal;\n"
186 | "in vec2 uv0;\n"
187 | "out vec4 frag_pos;\n"
188 | "out vec3 frag_normal;\n"
189 | "out float frag_depth;\n"
190 | "out vec2 out_texcoord;\n"
191 | "void main() {\n"
192 | " frag_pos = MV * vertex;\n"
193 | " frag_normal = vec3(MVinvT * vec4(normal, 0.0));\n"
194 | " frag_depth = vertex.z;\n"
195 | " gl_Position = MVP * vertex;\n"
196 | " out_texcoord = uv0;\n"
197 | "}";
198 |
199 | // Fragment shader using image texture.
200 | // Optionally performs phong shading.
201 | // See here for tutorial: https://learnopengl.com/#!Lighting/Basic-Lighting
202 | const std::string texture_shader_src_ =
203 | "#version 330 core\n"
204 | "uniform float scene_color_scale;\n"
205 | "uniform int phong_shading;\n"
206 | "uniform vec3 ambient_color;\n"
207 | "uniform vec3 diffuse_color;\n"
208 | "uniform vec3 specular_color;\n"
209 | "uniform vec3 light_dir;\n"
210 | "uniform sampler2D tex_sampler;\n"
211 | "in vec4 frag_pos;\n"
212 | "in vec3 frag_normal;\n"
213 | "in float frag_depth;\n"
214 | "in vec2 out_texcoord;\n"
215 | "out vec4 frag_color;\n"
216 | ""
217 | "void main() {\n"
218 | " // Color from colormap.\n"
219 | " vec3 rgb = texture2D(tex_sampler, out_texcoord).xyz;\n"
220 | ""
221 | " // Ambient color.\n"
222 | " vec3 acolor = ambient_color;\n"
223 | ""
224 | " // Diffuse color.\n"
225 | " float dfactor = max(dot(frag_normal, -light_dir), 0.0);\n"
226 | " vec3 dcolor = dfactor * diffuse_color;\n"
227 | ""
228 | " // Specular color.\n"
229 | " vec3 view_dir = normalize(-frag_pos.xyz);\n"
230 | " vec3 reflect_dir = reflect(light_dir, frag_normal);\n"
231 | " int shininess = 32;"
232 | " float sfactor = pow(max(dot(view_dir, reflect_dir), 0.0), shininess);\n"
233 | " vec3 scolor = sfactor * specular_color;\n"
234 | ""
235 | " if (phong_shading > 0) {\n"
236 | " frag_color = vec4((0.7*acolor + 0.2*dcolor + 0.1*scolor) * rgb, 1.0); \n"
237 | " } else {\n"
238 | " frag_color = vec4(rgb, 1.0);\n"
239 | " }\n"
240 | "}";
241 |
242 | // Fragment shader using grayscale inverse depth color scheme.
243 | // Optionally performs phong shading.
244 | // See here for tutorial: https://learnopengl.com/#!Lighting/Basic-Lighting
245 | const std::string idepth_shader_src_ =
246 | "#version 330 core\n"
247 | "uniform float scene_color_scale;\n"
248 | "uniform int phong_shading;\n"
249 | "uniform vec3 ambient_color;\n"
250 | "uniform vec3 diffuse_color;\n"
251 | "uniform vec3 specular_color;\n"
252 | "uniform vec3 light_dir;\n"
253 | "in vec4 frag_pos;\n"
254 | "in vec3 frag_normal;\n"
255 | "in float frag_depth;\n"
256 | "out vec4 frag_color;\n"
257 | ""
258 | "void main() {\n"
259 | " // Color from colormap.\n"
260 | " vec3 cmap = scene_color_scale * vec3(1.0/frag_depth, 1.0/frag_depth, 1.0/frag_depth);\n"
261 | ""
262 | " // Ambient color.\n"
263 | " vec3 acolor = ambient_color;\n"
264 | ""
265 | " // Diffuse color.\n"
266 | " float dfactor = max(dot(frag_normal, -light_dir), 0.0);\n"
267 | " vec3 dcolor = dfactor * diffuse_color;\n"
268 | ""
269 | " // Specular color.\n"
270 | " vec3 view_dir = normalize(-frag_pos.xyz);\n"
271 | " vec3 reflect_dir = reflect(light_dir, frag_normal);\n"
272 | " int shininess = 32;"
273 | " float sfactor = pow(max(dot(view_dir, reflect_dir), 0.0), shininess);\n"
274 | " vec3 scolor = sfactor * specular_color;\n"
275 | ""
276 | " if (phong_shading > 0) {\n"
277 | " frag_color = vec4((0.7*acolor + 0.2*dcolor + 0.1*scolor) * cmap, 1.0);\n"
278 | " } else {\n"
279 | " frag_color = vec4(cmap, 1.0);\n"
280 | " }\n"
281 | "}";
282 |
283 | // Fragment shader using jet colormap applied to inverse depth.
284 | // Optionally performs phong shading.
285 | // See here for tutorial: https://learnopengl.com/#!Lighting/Basic-Lighting
286 | const std::string jet_shader_src_ =
287 | "#version 330 core\n"
288 | "uniform float scene_color_scale;\n"
289 | "uniform int phong_shading;\n"
290 | "uniform vec3 ambient_color;\n"
291 | "uniform vec3 diffuse_color;\n"
292 | "uniform vec3 specular_color;\n"
293 | "uniform vec3 light_dir;\n"
294 | "in vec4 frag_pos;\n"
295 | "in vec3 frag_normal;\n"
296 | "in float frag_depth;\n"
297 | "out vec4 frag_color;\n"
298 | ""
299 | "vec3 jet(float v, float vmin, float vmax) {\n"
300 | " vec3 c = vec3(255, 255, 255); // white\n"
301 | " float dv;\n"
302 | ""
303 | " if (v < vmin)\n"
304 | " v = vmin;\n"
305 | " if (v > vmax)\n"
306 | " v = vmax;\n"
307 | " dv = vmax - vmin;\n"
308 | ""
309 | " if (v < (vmin + 0.25 * dv)) {\n"
310 | " c[0] = 0;\n"
311 | " c[1] = 255 * (4 * (v - vmin) / dv);\n"
312 | " } else if (v < (vmin + 0.5 * dv)) {\n"
313 | " c[0] = 0;\n"
314 | " c[2] = 255 * (1 + 4 * (vmin + 0.25 * dv - v) / dv);\n"
315 | " } else if (v < (vmin + 0.75 * dv)) {\n"
316 | " c[0] = 255 * (4 * (v - vmin - 0.5 * dv) / dv);\n"
317 | " c[2] = 0;\n"
318 | " } else {\n"
319 | " c[1] = 255 * (1 + 4 * (vmin + 0.75 * dv - v) / dv);\n"
320 | " c[2] = 0;\n"
321 | " }\n"
322 | " return c;\n"
323 | "}\n"
324 | ""
325 | "void main() {\n"
326 | " // Color from colormap.\n"
327 | " vec3 cmap = jet(scene_color_scale * 1.0/frag_depth, 0.0, 2.0)/255;\n"
328 | ""
329 | " // Ambient color.\n"
330 | " vec3 acolor = ambient_color;\n"
331 | ""
332 | " // Diffuse color.\n"
333 | " float dfactor = max(dot(frag_normal, -light_dir), 0.0);\n"
334 | " vec3 dcolor = dfactor * diffuse_color;\n"
335 | ""
336 | " // Specular color.\n"
337 | " vec3 view_dir = normalize(-frag_pos.xyz);\n"
338 | " vec3 reflect_dir = reflect(light_dir, frag_normal);\n"
339 | " int shininess = 32;"
340 | " float sfactor = pow(max(dot(view_dir, reflect_dir), 0.0), shininess);\n"
341 | " vec3 scolor = sfactor * specular_color;\n"
342 | ""
343 | " if (phong_shading > 0) {\n"
344 | " frag_color = vec4((0.7*acolor + 0.2*dcolor + 0.1*scolor) * cmap, 1.0);\n"
345 | " } else {\n"
346 | " frag_color = vec4(cmap, 1.0);\n"
347 | " }\n"
348 | "}";
349 |
350 | // Fragment shader using normal vector color scheme.
351 | // Optionally performs phong shading.
352 | // See here for tutorial: https://learnopengl.com/#!Lighting/Basic-Lighting
353 | const std::string normal_shader_src_ =
354 | "#version 330 core\n"
355 | "uniform int phong_shading;\n"
356 | "uniform vec3 ambient_color;\n"
357 | "uniform vec3 diffuse_color;\n"
358 | "uniform vec3 specular_color;\n"
359 | "uniform vec3 light_dir;\n"
360 | "in vec4 frag_pos;\n"
361 | "in vec3 frag_normal;\n"
362 | "in float frag_depth;\n"
363 | "out vec4 frag_color;\n"
364 | "void main() {\n"
365 | " // Color from colormap.\n"
366 | " vec3 cmap = vec3((frag_normal.x + 1)/2, (frag_normal.y + 1)/2, (127 * frag_normal.z + 127)/255);\n"
367 | ""
368 | " // Ambient color.\n"
369 | " vec3 acolor = ambient_color;\n"
370 | ""
371 | " // Diffuse color.\n"
372 | " float dfactor = max(dot(frag_normal, -light_dir), 0.0);\n"
373 | " vec3 dcolor = dfactor * diffuse_color;\n"
374 | ""
375 | " // Specular color.\n"
376 | " vec3 view_dir = normalize(-frag_pos.xyz);\n"
377 | " vec3 reflect_dir = reflect(light_dir, frag_normal);\n"
378 | " int shininess = 32;"
379 | " float sfactor = pow(max(dot(view_dir, reflect_dir), 0.0), shininess);\n"
380 | " vec3 scolor = sfactor * specular_color;\n"
381 | ""
382 | " if (phong_shading > 0) {\n"
383 | " frag_color = vec4((0.7*acolor + 0.2*dcolor + 0.1*scolor) * cmap, 1.0);\n"
384 | " } else {\n"
385 | " frag_color = vec4(cmap, 1.0);\n"
386 | " }\n"
387 | "}";
388 |
389 | // Methods to create and update the mesh.
390 | void createMesh(const std::vector& fields);
391 | void updateVertexBuffer(const Ogre::MeshPtr& mesh,
392 | const sensor_msgs::PointCloud2& cloud);
393 | void updateIndexBuffer(const Ogre::MeshPtr& mesh,
394 | const std::vector& indices);
395 | void updateTexture(const Ogre::MaterialPtr& material, const cv::Mat3b& tex_img);
396 |
397 | Ogre::MeshPtr mesh_; // Main mesh object.
398 | std::string mesh_name_; // Ogre string for this mesh.
399 |
400 | Ogre::Entity* entity_; // An instance of a mesh in the scene is called an entity.
401 | std::string entity_name_; // Ogre string for this entity.
402 |
403 | Ogre::MaterialPtr mesh_material_; // Material for the mesh (how it's textured/lit/shaded/etc).
404 | std::string material_name_; // Ogre string for this material.
405 |
406 | cv::Mat3b tex_img_; // Texture image.
407 | std::string tex_name_; // Texture name.
408 |
409 | Ogre::PolygonMode mode_; // Whether to draw as points, wireframe, or solid.
410 |
411 | bool mesh_visibility_; // True if mesh should be visiable.
412 |
413 | float scene_color_scale_; // Parameter for color scale.
414 | bool phong_shading_; // True if phong shading should be applied.
415 |
416 | ShaderProgram shader_program_; // Controls which fragment shader to use.
417 |
418 | Ogre::HighLevelGpuProgramPtr vtx_shader_; // Main vertex shader.
419 | Ogre::HighLevelGpuProgramPtr texture_shader_; // Image texture fragment shader.
420 | Ogre::HighLevelGpuProgramPtr idepth_shader_; // IDepth fragment shader.
421 | Ogre::HighLevelGpuProgramPtr jet_shader_; // Jet fragment shader.
422 | Ogre::HighLevelGpuProgramPtr normal_shader_; // Normal vector fragment shader.
423 | };
424 |
425 | } // namespace flame_rviz_plugins
426 |
--------------------------------------------------------------------------------
/src/flame_rviz_plugins/visual.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file visual.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:47:01 (Mon)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 |
27 | #include
28 | #include
29 |
30 | namespace flame_rviz_plugins {
31 |
32 | /**
33 | * \brief Base class for RVIZ visuals.
34 | */
35 | class Visual {
36 | public:
37 | Visual(Ogre::SceneManager* scene_manager,
38 | Ogre::SceneNode* parent_node) :
39 | scene_manager(scene_manager),
40 | scene_node(parent_node->createChildSceneNode()) {}
41 |
42 | virtual ~Visual() {}
43 |
44 | inline Ogre::SceneManager* getSceneManager() {
45 | return scene_manager;
46 | }
47 |
48 | inline const Ogre::SceneManager* getSceneManager() const {
49 | return scene_manager;
50 | }
51 |
52 | inline Ogre::SceneNode* getSceneNode() {
53 | return scene_node;
54 | }
55 |
56 | inline const Ogre::SceneNode* getSceneNode() const {
57 | return scene_node;
58 | }
59 |
60 | inline std::mutex* getMutex() {
61 | return &mtx;
62 | }
63 |
64 | protected:
65 | Ogre::SceneManager* scene_manager;
66 | Ogre::SceneNode* scene_node;
67 | std::mutex mtx;
68 | };
69 |
70 |
71 | } // namespace flame_rviz_plugins
72 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/asl_rgbd_offline_stream.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file asl_rgbd_offline_stream.cc
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-29 19:27:10 (Sat)
21 | */
22 |
23 | #include "ros_sensor_streams/asl_rgbd_offline_stream.h"
24 |
25 | #include
26 |
27 | #include
28 | #include
29 |
30 | #include
31 |
32 | #include
33 | #include
34 | #include
35 |
36 | #include
37 |
38 | #include
39 |
40 | namespace bfs = boost::filesystem;
41 |
42 | namespace du = dataset_utils;
43 | namespace dua = dataset_utils::asl;
44 |
45 | namespace ros_sensor_streams {
46 |
47 | ASLRGBDOfflineStream::ASLRGBDOfflineStream(ros::NodeHandle& nh,
48 | const std::string& pose_path,
49 | const std::string& rgb_path,
50 | const std::string& depth_path,
51 | const std::string& camera_name,
52 | const std::string& camera_world_frame_id,
53 | const std::string& camera_frame_id,
54 | WorldFrame world_frame,
55 | bool publish) :
56 | nh_(nh),
57 | inited_(false),
58 | camera_name_(camera_name),
59 | camera_world_frame_id_(camera_world_frame_id),
60 | camera_frame_id_(camera_frame_id),
61 | world_frame_(world_frame),
62 | publish_(publish),
63 | curr_idx_(0),
64 | pose_data_(pose_path),
65 | rgb_data_(rgb_path),
66 | depth_data_(),
67 | pose_idxs_(),
68 | rgb_idxs_(),
69 | depth_idxs_(),
70 | q_pose_in_body_(),
71 | t_pose_in_body_(),
72 | q_cam_in_body_(),
73 | t_cam_in_body_(),
74 | width_(),
75 | height_(),
76 | K_(Eigen::Matrix3f::Identity()),
77 | cinfo_(),
78 | intensity_to_depth_factor_(),
79 | tf_pub_(),
80 | it_(nh),
81 | rgb_pub_(),
82 | depth_pub_() {
83 | bfs::path depth_path_fs(depth_path);
84 | if (bfs::exists(depth_path_fs)) {
85 | // Read in depth data if it exists.
86 | depth_data_ = std::move(dataset_utils::asl::
87 | Dataset(depth_path));
88 | }
89 |
90 | // Set calibration information.
91 | width_ = rgb_data_.metadata()["resolution"][0].as();
92 | height_ = rgb_data_.metadata()["resolution"][1].as();
93 | cinfo_.width = width_;
94 | cinfo_.height = height_;
95 | cinfo_.distortion_model = "plumb_bob";
96 |
97 | float fu = rgb_data_.metadata()["intrinsics"][0].as();
98 | float fv = rgb_data_.metadata()["intrinsics"][1].as();
99 | float cu = rgb_data_.metadata()["intrinsics"][2].as();
100 | float cv = rgb_data_.metadata()["intrinsics"][3].as();
101 |
102 | cinfo_.K = {fu, 0, cu,
103 | 0, fv, cv,
104 | 0, 0, 1};
105 |
106 | K_(0, 0) = fu;
107 | K_(0, 2) = cu;
108 | K_(1, 1) = fv;
109 | K_(1, 2) = cv;
110 |
111 | cinfo_.P = {fu, 0, cu, 0,
112 | 0, fv, cv, 0,
113 | 0, 0, 1, 0};
114 |
115 | float k1 = rgb_data_.metadata()["distortion_coefficients"][0].as();
116 | float k2 = rgb_data_.metadata()["distortion_coefficients"][1].as();
117 | float p1 = rgb_data_.metadata()["distortion_coefficients"][2].as();
118 | float p2 = rgb_data_.metadata()["distortion_coefficients"][3].as();
119 | float k3 = 0.0f;
120 |
121 | cinfo_.D = {k1, k2, p1, p2, k3};
122 |
123 | if (!depth_data_.path().empty()) {
124 | intensity_to_depth_factor_ = depth_data_.metadata()["depth_scale_factor"].as();
125 | }
126 |
127 | if (publish_) {
128 | rgb_pub_ = it_.advertiseCamera("/" + camera_name_ + "/rgb/image_rect_color", 5);
129 |
130 | if (!depth_data_.path().empty()) {
131 | depth_pub_ = it_.advertiseCamera("/" + camera_name_ + "/depth_registered/image_rect", 5);
132 | }
133 | }
134 |
135 | associateData();
136 |
137 | // Extract transform of pose sensor in body frame.
138 | Eigen::Matrix T_pose_in_body;
139 | du::readMatrix(pose_data_.metadata(), "T_BS", 4, 4, T_pose_in_body.data());
140 | q_pose_in_body_ = Eigen::Quaterniond(T_pose_in_body.block<3, 3>(0, 0));
141 | t_pose_in_body_ = T_pose_in_body.block<3, 1>(0, 3);
142 |
143 | // Extract transform of camera in body frame.
144 | Eigen::Matrix T_cam_in_body;
145 | du::readMatrix(rgb_data_.metadata(), "T_BS", 4, 4, T_cam_in_body.data());
146 | q_cam_in_body_ = Eigen::Quaterniond(T_cam_in_body.block<3, 3>(0, 0));
147 | t_cam_in_body_ = T_cam_in_body.block<3, 1>(0, 3);
148 |
149 | return;
150 | }
151 |
152 | void ASLRGBDOfflineStream::associateData() {
153 | auto diff = [](const dua::FileData& x, const dua::PoseData& y) {
154 | double tx = static_cast(x.timestamp) * 1e-9;
155 | double ty = static_cast(y.timestamp) * 1e-9;
156 | return std::fabs(tx - ty);
157 | };
158 |
159 | // Match rgb and depth to pose separately.
160 | std::vector pose_rgb_idxs;
161 | std::vector rgb_pose_idxs;
162 | du::associate(rgb_data_.data(), pose_data_.data(), &rgb_pose_idxs,
163 | &pose_rgb_idxs, diff);
164 |
165 | std::vector pose_depth_idxs;
166 | std::vector depth_pose_idxs;
167 | if (!depth_data_.path().empty()) {
168 | du::associate(depth_data_.data(), pose_data_.data(), &depth_pose_idxs,
169 | &pose_depth_idxs, diff);
170 | } else {
171 | // No depth data. Just copy rgb data.
172 | pose_depth_idxs = pose_rgb_idxs;
173 | depth_pose_idxs = rgb_pose_idxs;
174 | }
175 |
176 | // Now take the intersection of pose_rgb_idxs and pose_depth_idxs to get the
177 | // indices that match both.
178 | pose_idxs_.clear();
179 | std::set_intersection(pose_rgb_idxs.begin(), pose_rgb_idxs.end(),
180 | pose_depth_idxs.begin(), pose_depth_idxs.end(),
181 | std::back_inserter(pose_idxs_));
182 |
183 | // Now get corresponding rgb and depth idxs.
184 | std::unordered_set pose_idxs_set(pose_idxs_.begin(), pose_idxs_.end());
185 |
186 | rgb_idxs_.clear();
187 | for (int ii = 0; ii < pose_rgb_idxs.size(); ++ii) {
188 | if (pose_idxs_set.count(pose_rgb_idxs[ii]) > 0) {
189 | rgb_idxs_.push_back(rgb_pose_idxs[ii]);
190 | }
191 | }
192 |
193 | depth_idxs_.clear();
194 | if (!depth_data_.path().empty()) {
195 | for (int ii = 0; ii < pose_depth_idxs.size(); ++ii) {
196 | if (pose_idxs_set.count(pose_depth_idxs[ii]) > 0) {
197 | depth_idxs_.push_back(depth_pose_idxs[ii]);
198 | }
199 | }
200 | }
201 |
202 | return;
203 | }
204 |
205 | void ASLRGBDOfflineStream::get(uint32_t* id, double* time,
206 | cv::Mat3b* rgb, cv::Mat1f* depth,
207 | Eigen::Quaterniond* quat,
208 | Eigen::Vector3d* trans) {
209 | // Make sure we actually have data to read in.
210 | if (empty()) {
211 | ROS_ERROR("No more data!\n");
212 | return;
213 | }
214 |
215 | *id = curr_idx_;
216 | *time = static_cast(rgb_data_[rgb_idxs_[curr_idx_]].timestamp) * 1e-9;
217 |
218 | // Load raw pose, which is the pose of the pose sensor wrt a given world
219 | // frame.
220 | Eigen::Quaterniond q_pose_in_world(pose_data_[pose_idxs_[curr_idx_]].quat);
221 | Eigen::Vector3d t_pose_in_world(pose_data_[pose_idxs_[curr_idx_]].trans);
222 | q_pose_in_world.normalize();
223 |
224 | // Get pose of camera wrt to world frame.
225 | Eigen::Quaterniond q_body_in_pose(q_pose_in_body_.inverse());
226 | Eigen::Vector3d t_body_in_pose(-(q_pose_in_body_.inverse() * t_pose_in_body_));
227 |
228 | Eigen::Quaterniond q_body_in_world(q_pose_in_world * q_body_in_pose);
229 | Eigen::Vector3d t_body_in_world(q_pose_in_world * t_body_in_pose + t_pose_in_world);
230 |
231 | Eigen::Quaterniond q_cam_in_world = q_body_in_world * q_cam_in_body_;
232 | Eigen::Vector3d t_cam_in_world = q_body_in_world * t_cam_in_body_ + t_body_in_world;
233 |
234 | // Convert poses to optical coordinates.
235 | switch (world_frame_) {
236 | case RDF: {
237 | *quat = q_cam_in_world;
238 | *trans = t_cam_in_world;
239 | break;
240 | }
241 | case FLU: {
242 | // Local RDF frame in global FLU frame.
243 | Eigen::Quaterniond q_flu_to_rdf(-0.5, -0.5, 0.5, -0.5);
244 | *quat = q_flu_to_rdf * q_cam_in_world;
245 | *trans = q_flu_to_rdf * t_cam_in_world;
246 | break;
247 | }
248 | case FRD: {
249 | // Local RDF frame in global FRD frame.
250 | Eigen::Matrix3d R_frd_to_rdf;
251 | R_frd_to_rdf << 0.0, 1.0, 0.0,
252 | 0.0, 0.0, 1.0,
253 | 1.0, 0.0, 0.0;
254 |
255 | Eigen::Quaterniond q_frd_to_rdf(R_frd_to_rdf);
256 | *quat = q_frd_to_rdf * q_cam_in_world;
257 | *trans = q_frd_to_rdf * t_cam_in_world;
258 | break;
259 | }
260 | case RFU: {
261 | // Local RDF frame in global RFU frame.
262 | Eigen::Matrix3d R_rfu_to_rdf;
263 | R_rfu_to_rdf << 1.0, 0.0, 0.0,
264 | 0.0, 0.0, -1.0,
265 | 0.0, 1.0, 0.0;
266 |
267 | Eigen::Quaterniond q_rfu_to_rdf(R_rfu_to_rdf);
268 | *quat = q_rfu_to_rdf * q_cam_in_world;
269 | *trans = q_rfu_to_rdf * t_cam_in_world;
270 | break;
271 | }
272 | default:
273 | ROS_ERROR("Unknown input frame specified!\n");
274 | return;
275 | }
276 |
277 | // Load RGB.
278 | bfs::path rgb_path = bfs::path(rgb_data_.path()) / bfs::path("data") /
279 | bfs::path(rgb_data_[rgb_idxs_[curr_idx_]].filename);
280 | cv::Mat3b rgb_raw = cv::imread(rgb_path.string(), cv::IMREAD_COLOR);
281 |
282 | // Undistort image.
283 | cv::Mat Kcv;
284 | eigen2cv(K_, Kcv);
285 | cv::undistort(rgb_raw, *rgb, Kcv, cinfo_.D);
286 |
287 | if (!depth_data_.path().empty()) {
288 | // Have depth data.
289 | bfs::path depth_path = bfs::path(depth_data_.path()) / bfs::path("data") /
290 | bfs::path(depth_data_[depth_idxs_[curr_idx_]].filename);
291 | cv::Mat_ depth_raw =
292 | cv::imread(depth_path.string(), cv::IMREAD_ANYDEPTH);
293 |
294 | // Assume depthmaps are undistorted!
295 | cv::Mat depth_undistorted;
296 | depth_undistorted = depth_raw.clone();
297 |
298 | // Scale depth information.
299 | depth->create(height_, width_);
300 | float* depth_ptr = reinterpret_cast(depth->data);
301 | uint16_t* depth_raw_ptr = reinterpret_cast(depth_undistorted.data);
302 | for (uint32_t ii = 0; ii < height_ * width_; ++ii) {
303 | depth_ptr[ii] = static_cast(depth_raw_ptr[ii]) / intensity_to_depth_factor_;
304 | }
305 | }
306 |
307 | if (publish_) {
308 | // Publish pose over tf.
309 | geometry_msgs::TransformStamped tf;
310 | tf.header.stamp.fromSec(*time);
311 | tf.header.frame_id = camera_world_frame_id_;
312 | tf.child_frame_id = camera_frame_id_;
313 | tf.transform.rotation.w = quat->w();
314 | tf.transform.rotation.x = quat->x();
315 | tf.transform.rotation.y = quat->y();
316 | tf.transform.rotation.z = quat->z();
317 |
318 | tf.transform.translation.x = (*trans)(0);
319 | tf.transform.translation.y = (*trans)(1);
320 | tf.transform.translation.z = (*trans)(2);
321 | tf_pub_.sendTransform(tf);
322 |
323 | // Publish messages over ROS.
324 | std_msgs::Header header;
325 | header.stamp.fromSec(*time);
326 | header.frame_id = camera_frame_id_;
327 |
328 | sensor_msgs::CameraInfo::Ptr cinfo_msg(new sensor_msgs::CameraInfo);
329 | *cinfo_msg = cinfo_;
330 | cinfo_msg->header = header;
331 |
332 | cv_bridge::CvImage rgb_cvi(header, "bgr8", *rgb);
333 | rgb_pub_.publish(rgb_cvi.toImageMsg(), cinfo_msg);
334 |
335 | if (!depth_data_.path().empty()) {
336 | cv_bridge::CvImage depth_cvi(header, "32FC1", *depth);
337 | depth_pub_.publish(depth_cvi.toImageMsg(), cinfo_msg);
338 | }
339 | }
340 |
341 | // Increment counter.
342 | curr_idx_++;
343 |
344 | return;
345 | }
346 |
347 | } // namespace ros_sensor_streams
348 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/asl_rgbd_offline_stream.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file asl_rgbd_offline_stream.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-07-29 19:27:14 (Sat)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include
29 | #include
30 |
31 | #include
32 |
33 | #include
34 |
35 | #include
36 |
37 | #include
38 | #include
39 |
40 | #include
41 |
42 | #include
43 | #include
44 |
45 | namespace ros_sensor_streams {
46 |
47 | /**
48 | * @brief Class that represents an input stream of tracked, undistorted RGBD images.
49 | *
50 | * Input is taken from data in ASL format:
51 | * http://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets
52 | *
53 | * Three ASL folders are needed:
54 | * 1. Pose data relative to a given world frame.
55 | * 2. RGB data
56 | * 3. Depthmap data (optional)
57 | *
58 | * The world frame of the input poses can use one of several frame conventions
59 | * (see the WorldFrame enum), but the output poses should always be relative to
60 | * a Right-Down-Forward world frame (optical coordinates).
61 | *
62 | * We assume the RGB and depth images are registered (i.e. they are relative to
63 | * the same sensor, as if they were taken with the same camera).
64 | */
65 | class ASLRGBDOfflineStream final {
66 | public:
67 | // Enums for the coordinate frame of the input poses. Output poses will always
68 | // be in Right-Down-Forward.
69 | enum WorldFrame {
70 | RDF, // Pose is the transform from the local RDF frame attached to the
71 | // camera to the global RDF frame.
72 | FLU, // Pose is the transform from the local RDF frame attached to the
73 | // camera to the global FLU frame.
74 | FRD, // Pose is the transform from the local RDF frame attached to camera to
75 | // global FRD frame.
76 | RFU, // Pose is the transform from the local RDF frame attached to the
77 | // camera to the global RFU frame.
78 | };
79 |
80 | /**
81 | * @brief Constructor.
82 | *
83 | * @param[in] nh NodeHandle.
84 | * @param[in] pose_path Path of ASL pose data.
85 | * @param[in] rgb_path Path of ASL image data.
86 | * @param[in] depth_path Path of ASL depthmap data.
87 | * @param[in] camera_name Name of camera.
88 | * @param[in] camera_world_frame_id Frame ID of the camera world frame.
89 | * @param[in] camera_frame_id Frame ID of the camera.
90 | * @param[in] world_frame Frame of the input poses.
91 | * @param[in] publish Publish data over ROS.
92 | */
93 | ASLRGBDOfflineStream(ros::NodeHandle& nh,
94 | const std::string& pose_path,
95 | const std::string& rgb_path,
96 | const std::string& depth_path,
97 | const std::string& camera_name,
98 | const std::string& camera_world_frame_id,
99 | const std::string& camera_frame_id,
100 | WorldFrame world_frame,
101 | bool publish = true);
102 |
103 | ~ASLRGBDOfflineStream() = default;
104 |
105 | ASLRGBDOfflineStream(const ASLRGBDOfflineStream& rhs) = delete;
106 | ASLRGBDOfflineStream& operator=(const ASLRGBDOfflineStream& rhs) = delete;
107 |
108 | ASLRGBDOfflineStream(const ASLRGBDOfflineStream&& rhs) = delete;
109 | ASLRGBDOfflineStream& operator=(const ASLRGBDOfflineStream&& rhs) = delete;
110 |
111 | /**
112 | * @brief Get next RGB-D image with pose.
113 | *
114 | * @parma[out] id Image ID.
115 | * @param[out] time Timestamp [sec]
116 | * @param[out] rgb RGB image.
117 | * @param[out] depth Depthmap as float image.
118 | * @param[out] quat Camera orientation.
119 | * @param[out] trans Camera translation.
120 | */
121 | void get(uint32_t* id, double* time, cv::Mat3b* rgb, cv::Mat1f* depth,
122 | Eigen::Quaterniond* quat, Eigen::Vector3d* trans);
123 |
124 | // Accessors.
125 | bool empty() const { return curr_idx_ >= static_cast(pose_idxs_.size()); }
126 | bool inited() const { return inited_; }
127 |
128 | int width() const { return width_; }
129 | int height() const { return height_; }
130 |
131 | const Eigen::Matrix3f& K() const { return K_; }
132 |
133 | const std::string& camera_frame_id() const { return camera_frame_id_; }
134 |
135 | private:
136 | // Associate datasets so that each timestamp has one piece of data.
137 | void associateData();
138 |
139 | ros::NodeHandle& nh_;
140 |
141 | bool inited_;
142 |
143 | std::string camera_name_;
144 | std::string camera_world_frame_id_;
145 | std::string camera_frame_id_;
146 | WorldFrame world_frame_;
147 | bool publish_;
148 |
149 | int curr_idx_;
150 |
151 | // Raw ASl datasets.
152 | dataset_utils::asl::Dataset pose_data_;
153 | dataset_utils::asl::Dataset rgb_data_;
154 | dataset_utils::asl::Dataset depth_data_;
155 |
156 | // Synchronized indexes into each dataset.
157 | std::vector pose_idxs_;
158 | std::vector rgb_idxs_;
159 | std::vector depth_idxs_;
160 |
161 | // Sensor extrinsics relative to body frame.
162 | Eigen::Quaterniond q_pose_in_body_; // Transform of the pose sensor in body frame.
163 | Eigen::Vector3d t_pose_in_body_;
164 |
165 | Eigen::Quaterniond q_cam_in_body_; // Assume both rgb/depth images have same extrinsics.
166 | Eigen::Vector3d t_cam_in_body_;
167 |
168 | int width_;
169 | int height_;
170 | Eigen::Matrix3f K_;
171 | sensor_msgs::CameraInfo cinfo_;
172 | float intensity_to_depth_factor_;
173 |
174 | // Publishers.
175 | tf2_ros::TransformBroadcaster tf_pub_;
176 | image_transport::ImageTransport it_;
177 | image_transport::CameraPublisher rgb_pub_;
178 | image_transport::CameraPublisher depth_pub_;
179 | };
180 |
181 | } // namespace ros_sensor_streams
182 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/conversions.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file conversions.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:31:38 (Mon)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include
29 | #include
30 |
31 | #include
32 | #include
33 |
34 | namespace ros_sensor_streams {
35 |
36 | template
37 | std::string eigenQuaternionToString(const Eigen::Quaternion& q) {
38 | std::stringstream ss;
39 |
40 | ss << "[w = " << q.w() << ", x = " << q.x()
41 | << ", y = " << q.y() << ", z = " << q.z() << "]";
42 |
43 | return ss.str();
44 | }
45 |
46 | template
47 | std::string eigenTranslationToString(const Eigen::Matrix& t) {
48 | std::stringstream ss;
49 |
50 | ss << "[" << t(0) << ", " << t(1) << ", " << t(2) << "]";
51 |
52 | return ss.str();
53 | }
54 |
55 | template
56 | std::string sophusSE3ToString(const Sophus::SE3Group& se3) {
57 | std::string q_str = eigenQuaternionToString(se3.unit_quaternion());
58 | std::string t_str = eigenTranslationToString(se3.translation());
59 |
60 | return q_str + ", " + t_str;
61 | }
62 |
63 | template
64 | std::string sophusSim3ToString(const Sophus::Sim3Group& sim3) {
65 | Scalar scale = sim3.scale();
66 | std::string q_str = eigenQuaternionToString(sim3.quaternion().normalized());
67 | std::string t_str = eigenTranslationToString(sim3.translation());
68 |
69 | return std::to_string(scale) + ", " + q_str + ", " + t_str;
70 | }
71 |
72 | template
73 | inline void sophusSE3ToTf(const Sophus::SE3Group& se3,
74 | geometry_msgs::Transform* tf) {
75 | tf->rotation.w = se3.unit_quaternion().w();
76 | tf->rotation.x = se3.unit_quaternion().x();
77 | tf->rotation.y = se3.unit_quaternion().y();
78 | tf->rotation.z = se3.unit_quaternion().z();
79 |
80 | tf->translation.x = se3.translation()(0);
81 | tf->translation.y = se3.translation()(1);
82 | tf->translation.z = se3.translation()(2);
83 |
84 | return;
85 | }
86 |
87 | template
88 | inline void tfToSophusSE3(const geometry_msgs::Transform& tf,
89 | Sophus::SE3Group* se3) {
90 | Eigen::Quaternion q(tf.rotation.w,
91 | tf.rotation.x,
92 | tf.rotation.y,
93 | tf.rotation.z);
94 | Eigen::Matrix trans(tf.translation.x,
95 | tf.translation.y,
96 | tf.translation.z);
97 | *se3 = Sophus::SE3Group(q, trans);
98 | return;
99 | }
100 |
101 | template
102 | inline void poseToSophusSE3(const geometry_msgs::Pose& pose,
103 | Sophus::SE3Group* se3) {
104 | Eigen::Quaternion q(pose.orientation.w,
105 | pose.orientation.x,
106 | pose.orientation.y,
107 | pose.orientation.z);
108 | Eigen::Matrix trans(pose.position.x,
109 | pose.position.y,
110 | pose.position.z);
111 | *se3 = Sophus::SE3Group(q, trans);
112 | return;
113 | }
114 |
115 | } // namespace ros_sensor_streams
116 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/thread_safe_queue.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file thread_safe_queue.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:31:26 (Mon)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 | #include
28 |
29 | namespace ros_sensor_streams {
30 |
31 | /**
32 | * \brief Template class for a thread-safe queue.
33 | *
34 | * Has a condition variable that will notify when queue is non-empty.
35 | * Heavily inspired from the NotifyBuffer class from LSD-SLAM.
36 | */
37 | template
38 | class ThreadSafeQueue {
39 | public:
40 | /**
41 | * \brief Constructor.
42 | * @param max_queue_size Maximum queue size.
43 | */
44 | explicit ThreadSafeQueue(int max_queue_size = 8) :
45 | max_queue_size(max_queue_size),
46 | queue(),
47 | non_empty_(),
48 | mtx() {}
49 |
50 | ThreadSafeQueue(const ThreadSafeQueue& rhs) = delete;
51 |
52 | ThreadSafeQueue& operator=(const ThreadSafeQueue& rhs) = delete;
53 |
54 | virtual ~ThreadSafeQueue() {}
55 |
56 | /**
57 | * \brief Return number of items in queue.
58 | * @return Number of items.
59 | */
60 | int size() {
61 | std::lock_guard lock(mtx);
62 | return queue.size();
63 | }
64 |
65 | /**
66 | * \brief Push item to the back of the queue or discard if queue is full.
67 | * @param new_item[in] The item to be pushed.
68 | * @return True if item pushed, False if queue is full.
69 | */
70 | bool push(const T& item) {
71 | std::unique_lock lock(mtx);
72 |
73 | if (queue.size() == max_queue_size) {
74 | return false;
75 | }
76 |
77 | queue.push(item);
78 | lock.unlock();
79 |
80 | non_empty_.notify_one();
81 |
82 | return true;
83 | }
84 |
85 | /**
86 | * \brief Pop the first item.
87 | */
88 | void pop() {
89 | std::unique_lock lock(mtx);
90 | non_empty_.wait(lock, [this](){ return !queue.empty(); });
91 | queue.pop();
92 | return;
93 | }
94 |
95 | /**
96 | * \brief Return a reference to the front item.
97 | * @return Reference.
98 | */
99 | const T& front() {
100 | std::unique_lock lock(mtx);
101 | non_empty_.wait(lock, [this](){ return !queue.empty(); });
102 | return queue.front();
103 | }
104 |
105 | /**
106 | * \brief Return a handle to the underlying mutex.
107 | * @return The mutex.
108 | */
109 | inline std::recursive_mutex& mutex() { return mtx; }
110 |
111 | /**
112 | * \brief Non-empty condition variable.
113 | *
114 | * Use to signal that the queue has data.
115 | *
116 | * @return Condition variable.
117 | */
118 | inline std::condition_variable_any& non_empty() {
119 | return non_empty_;
120 | }
121 |
122 | private:
123 | int max_queue_size;
124 | std::queue queue;
125 |
126 | std::condition_variable_any non_empty_;
127 | std::recursive_mutex mtx;
128 | };
129 |
130 | } // namespace ros_sensor_streams
131 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/tracked_image_stream.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file tracked_image_stream.cc
19 | * @author W. Nicholas Greene
20 | * @date 2016-12-14 11:25:22 (Wed)
21 | */
22 |
23 | #include "ros_sensor_streams/tracked_image_stream.h"
24 |
25 | #include
26 |
27 | #include
28 | #include
29 |
30 | #include
31 |
32 | #include
33 |
34 | #include "ros_sensor_streams/conversions.h"
35 |
36 | namespace ros_sensor_streams {
37 |
38 | TrackedImageStream::TrackedImageStream(const std::string& world_frame_id,
39 | ros::NodeHandle& nh,
40 | const Eigen::Matrix3f& K,
41 | const Eigen::VectorXf& D,
42 | bool undistort,
43 | int resize_factor,
44 | int queue_size) :
45 | nh_(nh),
46 | inited_(false),
47 | use_external_cal_(true),
48 | resize_factor_(resize_factor),
49 | undistort_(undistort),
50 | world_frame_id_(world_frame_id),
51 | live_frame_id_(),
52 | width_(0),
53 | height_(0),
54 | K_(K),
55 | D_(D),
56 | tf_listener_(nullptr),
57 | tf_buffer_(),
58 | image_transport_(nullptr),
59 | cam_sub_(),
60 | queue_(queue_size) {
61 | // Double check intrinsics matrix.
62 | if (K_(0, 0) <= 0) {
63 | ROS_ERROR("Camera intrinsics matrix is probably invalid!\n");
64 | ROS_ERROR_STREAM("K = " << std::endl << K_);
65 | return;
66 | }
67 |
68 | // Subscribe to topics.
69 | image_transport::ImageTransport it_(nh_);
70 | image_transport_.reset(new image_transport::ImageTransport(nh_));
71 |
72 | cam_sub_ = image_transport_->subscribeCamera("image", 10,
73 | &TrackedImageStream::callback,
74 | this);
75 |
76 | // Set up tf.
77 | tf_listener_.reset(new tf2_ros::TransformListener(tf_buffer_));
78 |
79 | return;
80 | }
81 |
82 | TrackedImageStream::TrackedImageStream(const std::string& world_frame_id,
83 | ros::NodeHandle& nh,
84 | int queue_size) :
85 | nh_(nh),
86 | inited_(false),
87 | use_external_cal_(false),
88 | resize_factor_(1),
89 | undistort_(false),
90 | world_frame_id_(world_frame_id),
91 | live_frame_id_(),
92 | width_(0),
93 | height_(0),
94 | K_(),
95 | D_(5),
96 | tf_listener_(nullptr),
97 | tf_buffer_(),
98 | image_transport_(nullptr),
99 | cam_sub_(),
100 | queue_(queue_size) {
101 | // Subscribe to topics.
102 | image_transport::ImageTransport it_(nh_);
103 | image_transport_.reset(new image_transport::ImageTransport(nh_));
104 |
105 | cam_sub_ = image_transport_->subscribeCamera("image", 10,
106 | &TrackedImageStream::callback,
107 | this);
108 |
109 | // Set up tf.
110 | tf_listener_.reset(new tf2_ros::TransformListener(tf_buffer_));
111 |
112 | return;
113 | }
114 |
115 | void TrackedImageStream::callback(const sensor_msgs::Image::ConstPtr& rgb_msg,
116 | const sensor_msgs::CameraInfo::ConstPtr& info) {
117 | ROS_DEBUG("Received image data!");
118 |
119 | // Grab rgb data.
120 | cv::Mat3b rgb = cv_bridge::toCvCopy(rgb_msg, "bgr8")->image;
121 |
122 | assert(rgb.isContinuous());
123 |
124 | if (resize_factor_ != 1) {
125 | cv::Mat3b resized_rgb(static_cast(rgb.rows)/resize_factor_,
126 | static_cast(rgb.cols)/resize_factor_);
127 | cv::resize(rgb, resized_rgb, resized_rgb.size());
128 | rgb = resized_rgb;
129 | }
130 |
131 | if (!inited_) {
132 | live_frame_id_ = rgb_msg->header.frame_id;
133 |
134 | // Set calibration.
135 | width_ = rgb.cols;
136 | height_ = rgb.rows;
137 |
138 | if (!use_external_cal_) {
139 | for (int ii = 0; ii < 3; ++ii) {
140 | for (int jj = 0; jj < 3; ++jj) {
141 | K_(ii, jj) = info->P[ii*4 + jj];
142 | }
143 | }
144 |
145 | if (K_(0, 0) <= 0) {
146 | ROS_ERROR("Camera intrinsics matrix is probably invalid!\n");
147 | ROS_ERROR_STREAM("K = " << std::endl << K_);
148 | return;
149 | }
150 |
151 | for (int ii = 0; ii < 5; ++ii) {
152 | D_(ii) = info->D[ii];
153 | }
154 | }
155 |
156 | inited_ = true;
157 |
158 | ROS_DEBUG("Set camera calibration!");
159 | }
160 |
161 | if (undistort_) {
162 | cv::Mat1f Kcv, Dcv;
163 | cv::eigen2cv(K_, Kcv);
164 | cv::eigen2cv(D_, Dcv);
165 | cv::Mat3b rgb_undistorted;
166 | cv::undistort(rgb, rgb_undistorted, Kcv, Dcv);
167 | rgb = rgb_undistorted;
168 | }
169 |
170 | // Get pose of camera.
171 | geometry_msgs::TransformStamped tf;
172 | try {
173 | // Need to remove leading "/" if it exists.
174 | std::string rgb_frame_id = rgb_msg->header.frame_id;
175 | if (rgb_frame_id[0] == '/') {
176 | rgb_frame_id = rgb_frame_id.substr(1, rgb_frame_id.size()-1);
177 | }
178 |
179 | tf = tf_buffer_.lookupTransform(world_frame_id_, rgb_frame_id,
180 | ros::Time(rgb_msg->header.stamp),
181 | ros::Duration(1.0/10));
182 | } catch (tf2::TransformException &ex) {
183 | ROS_ERROR("%s", ex.what());
184 | return;
185 | }
186 |
187 | Sophus::SE3f pose;
188 | tfToSophusSE3(tf.transform, &pose);
189 |
190 | Frame frame;
191 | frame.id = rgb_msg->header.seq;
192 | frame.time = rgb_msg->header.stamp.toSec();
193 | frame.quat = pose.unit_quaternion();
194 | frame.trans = pose.translation();
195 | frame.img = rgb;
196 |
197 | queue_.push(frame);
198 |
199 | return;
200 | }
201 |
202 | } // namespace ros_sensor_streams
203 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/tracked_image_stream.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file tracked_image_stream.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:31:08 (Mon)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include
27 |
28 | #include
29 |
30 | #include
31 | #include
32 |
33 | #include
34 |
35 | #include
36 | #include
37 |
38 | #include
39 |
40 | #include
41 | #include
42 |
43 | #include
44 |
45 | #include
46 |
47 | namespace ros_sensor_streams {
48 |
49 | /**
50 | * \brief Class that represents an input stream of tracked images.
51 | *
52 | * Designed for use in a nodelet - thus there is no ros::spin() call.
53 | */
54 | class TrackedImageStream final {
55 | public:
56 | /**
57 | * @brief Struct to hold image and pose data.
58 | */
59 | struct Frame {
60 | uint32_t id; // Image ID.
61 | double time; // Timestamp.
62 | Eigen::Quaternionf quat; // Orientation as quaternion.
63 | Eigen::Vector3f trans; // Translsation.
64 | cv::Mat3b img; // RGB image.
65 | };
66 |
67 | /**
68 | * @brief Constructor for ROS-calibrated image stream.
69 | *
70 | * @param[in] world_frame_id Frame ID of the camera world frame.
71 | * @param[in] nh External nodehandle.
72 | * @param[in] queue_size Message queue size.
73 | */
74 | TrackedImageStream(const std::string& world_frame_id, ros::NodeHandle& nh,
75 | int queue_size = 8);
76 |
77 | /**
78 | * @brief Constructor for un-ROS-calibrated image stream.
79 | *
80 | * Use this constructor when CameraInfo messages are not filled in.
81 | *
82 | * @param[in] world_frame_id Frame ID of the camera world.
83 | * @param[in] nh External nodehandle.
84 | * @param[in] K Camera intrinsices.
85 | * @param[in] D Camera distortion parameterse (k1, k2, p1, p2, k3).
86 | * @param[in] undistort True if images should be undistorted before adding to queue.
87 | * @param[in] resize_factor Downsample image in each dimension by this factor.
88 | * @param[in] queue_size Message queue size.
89 | */
90 | TrackedImageStream(const std::string& world_frame_id, ros::NodeHandle& nh,
91 | const Eigen::Matrix3f& K, const Eigen::VectorXf& D,
92 | bool undistort, int resize_factor, int queue_size = 8);
93 |
94 | ~TrackedImageStream() = default;
95 |
96 | TrackedImageStream(const TrackedImageStream& rhs) = delete;
97 | TrackedImageStream& operator=(const TrackedImageStream& rhs) = delete;
98 |
99 | TrackedImageStream(const TrackedImageStream&& rhs) = delete;
100 | TrackedImageStream& operator=(const TrackedImageStream&& rhs) = delete;
101 |
102 | ThreadSafeQueue& queue() {
103 | return queue_;
104 | }
105 |
106 | /**
107 | * \brief Returns true if stream is initialized.
108 | */
109 | bool inited() {
110 | return inited_;
111 | }
112 |
113 | /**
114 | * \brief Get the image width.
115 | */
116 | int width() {
117 | return width_;
118 | }
119 |
120 | /**
121 | * \brief Get the image height.
122 | */
123 | int height() {
124 | return height_;
125 | }
126 |
127 | /**
128 | * \brief Return the camera intrinsic matrix.
129 | */
130 | const Eigen::Matrix3f& K() {
131 | return K_;
132 | }
133 |
134 | /**
135 | * @brief Return the distortion params.
136 | *
137 | * k1, k2, p1, p2, k3
138 | */
139 | const Eigen::VectorXf& D() {
140 | return D_;
141 | }
142 |
143 | /**
144 | * \brief Return id of the world frame.
145 | */
146 | const std::string& world_frame_id() {
147 | return world_frame_id_;
148 | }
149 |
150 | /**
151 | * \brief Return id of the live frame (i.e. the pose of the camera).
152 | */
153 | const std::string& live_frame_id() {
154 | return live_frame_id_;
155 | }
156 |
157 | private:
158 | void callback(const sensor_msgs::Image::ConstPtr& rgb,
159 | const sensor_msgs::CameraInfo::ConstPtr& info);
160 |
161 | ros::NodeHandle& nh_;
162 |
163 | bool inited_;
164 |
165 | // Use an external calibration instead of what's in the camera_info message.
166 | bool use_external_cal_;
167 | int resize_factor_; // Factor to resize image. resize_factor_ = 2 will
168 | // downsample by 2 in each dimension.
169 | bool undistort_; // Whether to undistort images.
170 |
171 | std::string world_frame_id_;
172 | std::string live_frame_id_;
173 | int width_;
174 | int height_;
175 | Eigen::Matrix3f K_; // Camera intrinsics.
176 | Eigen::VectorXf D_; // Distortion params: k1, k2, p1, p2, k3.
177 |
178 | std::shared_ptr tf_listener_;
179 | tf2_ros::Buffer tf_buffer_;
180 |
181 | std::shared_ptr image_transport_;
182 | image_transport::CameraSubscriber cam_sub_;
183 |
184 | ThreadSafeQueue queue_;
185 | };
186 |
187 | } // namespace ros_sensor_streams
188 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/tum_rgbd_offline_stream.cc:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file tum_rgbd_offline_stream.cc
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:36:54 (Mon)
21 | */
22 |
23 | #include "ros_sensor_streams/tum_rgbd_offline_stream.h"
24 |
25 | #include
26 |
27 | #include
28 | #include
29 |
30 | #include
31 |
32 | #include
33 | #include
34 |
35 | #include
36 |
37 | #include
38 |
39 | #include
40 |
41 | namespace fs = boost::filesystem;
42 |
43 | namespace ros_sensor_streams {
44 |
45 | TUMRGBDOfflineStream::TUMRGBDOfflineStream(ros::NodeHandle& nh,
46 | const std::string& input_file,
47 | const std::string& calib_file,
48 | const std::string& camera_name,
49 | const std::string& camera_world_frame_id,
50 | const std::string& camera_frame_id,
51 | InputFrame input_frame,
52 | float depth_scale_factor,
53 | bool publish) :
54 | nh_(nh),
55 | inited_(false),
56 | input_file_(input_file),
57 | calib_file_(calib_file),
58 | camera_name_(camera_name),
59 | camera_world_frame_id_(camera_world_frame_id),
60 | camera_frame_id_(camera_frame_id),
61 | input_frame_(input_frame),
62 | width_(),
63 | height_(),
64 | K_(),
65 | cinfo_(),
66 | model_(),
67 | curr_idx_(0),
68 | input_(),
69 | base_dir_(),
70 | depth_scale_factor_(depth_scale_factor),
71 | publish_(publish),
72 | tf_pub_(),
73 | it_(nh),
74 | rgb_pub_(),
75 | depth_pub_() {
76 | // Make sure files exist.
77 | if (!fs::exists(input_file)) {
78 | ROS_ERROR("Input file does not exist: %s\n", input_file_.c_str());
79 | return;
80 | }
81 | if (!fs::exists(calib_file)) {
82 | ROS_ERROR("Calibration file does not exist: %s\n", calib_file_.c_str());
83 | return;
84 | }
85 |
86 | // Load calibration information.
87 | camera_info_manager::CameraInfoManager manager(nh_);
88 | manager.loadCameraInfo("file://" + calib_file);
89 | cinfo_ = manager.getCameraInfo();
90 | model_.fromCameraInfo(cinfo_);
91 |
92 | // Set the camera intrinsics matrix. Note that we use elements of the P matrix
93 | // instead of the K matrix in the CameraInfo message. The upper left corner of
94 | // the P matrix represents the parameters for the undistored images (what we
95 | // will pass outside).
96 | width_ = cinfo_.width;
97 | height_ = cinfo_.height;
98 |
99 | for (int ii = 0; ii < 3; ++ii) {
100 | for (int jj = 0; jj < 3; ++jj) {
101 | K_(ii, jj) = cinfo_.P[ii*4 + jj];
102 | }
103 | }
104 |
105 | // Read in input data.
106 | std::ifstream input_stream(input_file_);
107 | std::string line;
108 | while (std::getline(input_stream, line)) {
109 | input_.push_back(line);
110 | }
111 |
112 | // Get parent directory.
113 | fs::path p(input_file_);
114 | base_dir_ = p.parent_path().string();
115 |
116 | if (publish_) {
117 | rgb_pub_ = it_.advertiseCamera("/" + camera_name_ + "/rgb/image_rect_color", 5);
118 | depth_pub_ = it_.advertiseCamera("/" + camera_name_ + "/depth_registered/image_rect", 5);
119 | }
120 |
121 | return;
122 | }
123 |
124 | void TUMRGBDOfflineStream::get(uint32_t* id, double* time,
125 | cv::Mat3b* rgb, cv::Mat1f* depth,
126 | Eigen::Quaternionf* quat,
127 | Eigen::Vector3f* trans) {
128 | // Make sure we actually have data to read in.
129 | if (empty()) {
130 | ROS_ERROR("No more data!\n");
131 | return;
132 | }
133 |
134 | *id = curr_idx_;
135 |
136 | Eigen::Quaternionf input_quat;
137 | Eigen::Vector3f input_trans;
138 | cv::Mat rgb_raw, depth_raw;
139 | if (!parseLine(input_[curr_idx_], time, &rgb_raw, &depth_raw, &input_quat,
140 | &input_trans)) {
141 | ROS_ERROR("Could not parse input!\n");
142 | }
143 | input_quat.normalize();
144 |
145 | // Convert poses to optical coordinates.
146 | switch (input_frame_) {
147 | case RDF: {
148 | // Right-Down-Forward
149 | *quat = input_quat;
150 | *trans = input_trans;
151 | break;
152 | }
153 | case FLU: {
154 | // Forward-Left-Up
155 | Eigen::Quaternionf q_flu_to_rdf(-0.5, -0.5, 0.5, -0.5);
156 | *quat = q_flu_to_rdf * input_quat * q_flu_to_rdf.inverse();
157 | *trans = q_flu_to_rdf * input_trans;
158 | break;
159 | }
160 | case FRD: {
161 | // Forward-Right-Down
162 | Eigen::Matrix3f R_frd_to_rdf;
163 | R_frd_to_rdf << 0.0f, 1.0f, 0.0f,
164 | 0.0f, 0.0f, 1.0f,
165 | 1.0f, 0.0f, 0.0f;
166 |
167 | Eigen::Quaternionf q_frd_to_rdf(R_frd_to_rdf);
168 | *quat = q_frd_to_rdf * input_quat * q_frd_to_rdf.inverse();
169 | *trans = q_frd_to_rdf * input_trans;
170 | break;
171 | }
172 | case RDF_IN_FLU: {
173 | // Local RDF frame in global FLU frame.
174 | Eigen::Quaternionf q_flu_to_rdf(-0.5, -0.5, 0.5, -0.5);
175 | *quat = q_flu_to_rdf * input_quat;
176 | *trans = q_flu_to_rdf * input_trans;
177 | break;
178 | }
179 | case RDF_IN_FRD: {
180 | // Local RDF frame in global FRD frame.
181 | Eigen::Matrix3f R_frd_to_rdf;
182 | R_frd_to_rdf << 0.0f, 1.0f, 0.0f,
183 | 0.0f, 0.0f, 1.0f,
184 | 1.0f, 0.0f, 0.0f;
185 |
186 | Eigen::Quaternionf q_frd_to_rdf(R_frd_to_rdf);
187 | *quat = q_frd_to_rdf * input_quat;
188 | *trans = q_frd_to_rdf * input_trans;
189 | break;
190 | }
191 | default:
192 | ROS_ERROR("Unknown input frame specified!\n");
193 | return;
194 | }
195 |
196 | // Undistort images.
197 | model_.rectifyImage(rgb_raw, *rgb);
198 |
199 | cv::Mat depth_undistorted;
200 | model_.rectifyImage(depth_raw, depth_undistorted);
201 |
202 | // Scale depth information.
203 | depth->create(height_, width_);
204 | float* depth_ptr = reinterpret_cast(depth->data);
205 | uint16_t* depth_raw_ptr = reinterpret_cast(depth_undistorted.data);
206 | for (uint32_t ii = 0; ii < height_ * width_; ++ii) {
207 | depth_ptr[ii] = static_cast(depth_raw_ptr[ii]) / depth_scale_factor_;
208 | }
209 |
210 | if (publish_) {
211 | // Publish pose over tf.
212 | geometry_msgs::TransformStamped tf;
213 | tf.header.stamp.fromSec(*time);
214 | tf.header.frame_id = camera_world_frame_id_;
215 | tf.child_frame_id = camera_frame_id_;
216 | tf.transform.rotation.w = quat->w();
217 | tf.transform.rotation.x = quat->x();
218 | tf.transform.rotation.y = quat->y();
219 | tf.transform.rotation.z = quat->z();
220 |
221 | tf.transform.translation.x = (*trans)(0);
222 | tf.transform.translation.y = (*trans)(1);
223 | tf.transform.translation.z = (*trans)(2);
224 | tf_pub_.sendTransform(tf);
225 |
226 | // Publish messages over ROS.
227 | std_msgs::Header header;
228 | header.stamp.fromSec(*time);
229 | header.frame_id = camera_frame_id_;
230 |
231 | sensor_msgs::CameraInfo::Ptr cinfo_msg(new sensor_msgs::CameraInfo);
232 | *cinfo_msg = cinfo_;
233 | cinfo_msg->header = header;
234 |
235 | cv_bridge::CvImage rgb_cvi(header, "bgr8", *rgb);
236 | cv_bridge::CvImage depth_cvi(header, "32FC1", *depth);
237 |
238 | rgb_pub_.publish(rgb_cvi.toImageMsg(), cinfo_msg);
239 | depth_pub_.publish(depth_cvi.toImageMsg(), cinfo_msg);
240 | }
241 |
242 | // Increment counter.
243 | curr_idx_++;
244 |
245 | return;
246 | }
247 |
248 | bool TUMRGBDOfflineStream::parseLine(const std::string& line, double* time, cv::Mat* rgb,
249 | cv::Mat* depth, Eigen::Quaternionf* quat,
250 | Eigen::Vector3f* trans) {
251 | // Parse line.
252 | double pose_time;
253 | float tx, ty, tz, qx, qy, qz, qw;
254 | double rgb_time;
255 | char rgb_cstr[256];
256 | double depth_time;
257 | char depth_cstr[256];
258 |
259 | int num_tokens = 0;
260 | std::istringstream ss(line);
261 | if (ss >> pose_time) num_tokens++;
262 | if (ss >> tx) num_tokens++;
263 | if (ss >> ty) num_tokens++;
264 | if (ss >> tz) num_tokens++;
265 | if (ss >> qx) num_tokens++;
266 | if (ss >> qy) num_tokens++;
267 | if (ss >> qz) num_tokens++;
268 | if (ss >> qw) num_tokens++;
269 | if (ss >> rgb_time) num_tokens++;
270 | if (ss >> rgb_cstr) num_tokens++;
271 | if (ss >> depth_time) num_tokens++;
272 | if (ss >> depth_cstr) num_tokens++;
273 |
274 | // Use rgb time as associated time.
275 | *time = rgb_time;
276 |
277 | // Set pose.
278 | quat->x() = qx;
279 | quat->y() = qy;
280 | quat->z() = qz;
281 | quat->w() = qw;
282 | (*trans)(0) = tx;
283 | (*trans)(1) = ty;
284 | (*trans)(2) = tz;
285 |
286 | // Read in rgb image.
287 | *rgb = cv::imread(base_dir_ + "/" + std::string(rgb_cstr), cv::IMREAD_COLOR);
288 |
289 | if (num_tokens < 11) {
290 | // No depth information provided. Create dummy depthmap.
291 | depth->create(rgb->rows, rgb->cols, cv::DataType::type);
292 | *depth = cv::Scalar(0);
293 | } else {
294 | // Read in raw depth image.
295 | *depth = cv::imread(base_dir_ + "/" + std::string(depth_cstr),
296 | cv::IMREAD_ANYDEPTH);
297 | }
298 |
299 | return true;
300 | }
301 |
302 | } // namespace ros_sensor_streams
303 |
--------------------------------------------------------------------------------
/src/ros_sensor_streams/tum_rgbd_offline_stream.h:
--------------------------------------------------------------------------------
1 | /**
2 | * This file is part of flame_ros.
3 | * Copyright (C) 2017 W. Nicholas Greene (wng@csail.mit.edu)
4 | *
5 | * This program is free software; you can redistribute it and/or modify
6 | * it under the terms of the GNU General Public License as published by
7 | * the Free Software Foundation; either version 3 of the License, or
8 | * (at your option) any later version.
9 | *
10 | * This program is distributed in the hope that it will be useful,
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | * GNU General Public License for more details.
14 | *
15 | * You should have received a copy of the GNU General Public License along
16 | * with this program; if not, see .
17 | *
18 | * @file tum_rgbd_offline_stream.h
19 | * @author W. Nicholas Greene
20 | * @date 2017-08-21 21:36:49 (Mon)
21 | */
22 |
23 | #pragma once
24 |
25 | #include
26 | #include