├── CMakeLists.txt ├── README.md ├── include └── register.hpp ├── package.xml └── src └── structure_driver.cpp /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(structure_core) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | add_compile_options(-std=c++11) 6 | 7 | find_package(catkin REQUIRED COMPONENTS 8 | roscpp 9 | sensor_msgs 10 | std_msgs 11 | ) 12 | 13 | ## System dependencies are found with CMake's conventions 14 | # find_package(Boost REQUIRED COMPONENTS system) 15 | 16 | 17 | ## Uncomment this if the package has a setup.py. This macro ensures 18 | ## modules and global scripts declared therein get installed 19 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 20 | # catkin_python_setup() 21 | 22 | ################################################ 23 | ## Declare ROS messages, services and actions ## 24 | ################################################ 25 | 26 | ## To declare and build messages, services or actions from within this 27 | ## package, follow these steps: 28 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 29 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 30 | ## * In the file package.xml: 31 | ## * add a build_depend tag for "message_generation" 32 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 33 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 34 | ## but can be declared for certainty nonetheless: 35 | ## * add a exec_depend tag for "message_runtime" 36 | ## * In this file (CMakeLists.txt): 37 | ## * add "message_generation" and every package in MSG_DEP_SET to 38 | ## find_package(catkin REQUIRED COMPONENTS ...) 39 | ## * add "message_runtime" and every package in MSG_DEP_SET to 40 | ## catkin_package(CATKIN_DEPENDS ...) 41 | ## * uncomment the add_*_files sections below as needed 42 | ## and list every .msg/.srv/.action file to be processed 43 | ## * uncomment the generate_messages entry below 44 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 45 | 46 | ## Generate messages in the 'msg' folder 47 | # add_message_files( 48 | # FILES 49 | # Message1.msg 50 | # Message2.msg 51 | # ) 52 | 53 | ## Generate services in the 'srv' folder 54 | # add_service_files( 55 | # FILES 56 | # Service1.srv 57 | # Service2.srv 58 | # ) 59 | 60 | ## Generate actions in the 'action' folder 61 | # add_action_files( 62 | # FILES 63 | # Action1.action 64 | # Action2.action 65 | # ) 66 | 67 | ## Generate added messages and services with any dependencies listed here 68 | # generate_messages( 69 | # DEPENDENCIES 70 | # std_msgs # Or other packages containing msgs 71 | # ) 72 | 73 | ################################################ 74 | ## Declare ROS dynamic reconfigure parameters ## 75 | ################################################ 76 | 77 | ## To declare and build dynamic reconfigure parameters within this 78 | ## package, follow these steps: 79 | ## * In the file package.xml: 80 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 81 | ## * In this file (CMakeLists.txt): 82 | ## * add "dynamic_reconfigure" to 83 | ## find_package(catkin REQUIRED COMPONENTS ...) 84 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 85 | ## and list every .cfg file to be processed 86 | 87 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 88 | # generate_dynamic_reconfigure_options( 89 | # cfg/DynReconf1.cfg 90 | # cfg/DynReconf2.cfg 91 | # ) 92 | 93 | ################################### 94 | ## catkin specific configuration ## 95 | ################################### 96 | ## The catkin_package macro generates cmake config files for your package 97 | ## Declare things to be passed to dependent projects 98 | ## INCLUDE_DIRS: uncomment this if your package contains header files 99 | ## LIBRARIES: libraries you create in this project that dependent projects also need 100 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 101 | ## DEPENDS: system dependencies of this project that dependent projects also need 102 | catkin_package( 103 | # INCLUDE_DIRS include 104 | # LIBRARIES structure_core 105 | CATKIN_DEPENDS roscpp sensor_msgs std_msgs 106 | # DEPENDS system_lib 107 | ) 108 | 109 | ########### 110 | ## Build ## 111 | ########### 112 | 113 | find_library(STRUCTURE_LIB Structure HINTS "lib") 114 | 115 | find_package(Eigen3 QUIET) 116 | if(NOT EIGEN3_FOUND) 117 | find_package(Eigen REQUIRED) 118 | set(EIGEN3_INCLUDE_DIRS ${EIGEN_INCLUDE_DIRS}) 119 | endif() 120 | 121 | ## Specify additional locations of header files 122 | ## Your package locations should be listed before other locations 123 | include_directories( 124 | include 125 | ${catkin_INCLUDE_DIRS} 126 | ${EIGEN3_INCLUDE_DIRS} 127 | ) 128 | 129 | ## Declare a C++ library 130 | # add_library(${PROJECT_NAME} 131 | # src/${PROJECT_NAME}/structure_core.cpp 132 | # ) 133 | 134 | ## Add cmake target dependencies of the library 135 | ## as an example, code may need to be generated before libraries 136 | ## either from message generation or dynamic reconfigure 137 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 138 | 139 | ## Declare a C++ executable 140 | ## With catkin_make all packages are built within a single CMake context 141 | ## The recommended prefix ensures that target names across packages don't collide 142 | add_executable(structure_driver src/structure_driver.cpp) 143 | 144 | ## Rename C++ executable without prefix 145 | ## The above recommended prefix causes long target names, the following renames the 146 | ## target back to the shorter version for ease of user use 147 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 148 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 149 | 150 | ## Add cmake target dependencies of the executable 151 | ## same as for the library above 152 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 153 | 154 | ## Specify libraries to link a library or executable target against 155 | target_link_libraries(structure_driver 156 | ${catkin_LIBRARIES} ${STRUCTURE_LIB} 157 | ) 158 | 159 | ############# 160 | ## Install ## 161 | ############# 162 | 163 | # all install targets should use catkin DESTINATION variables 164 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 165 | 166 | ## Mark executable scripts (Python etc.) for installation 167 | ## in contrast to setup.py, you can choose the destination 168 | # install(PROGRAMS 169 | # scripts/my_python_script 170 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 171 | # ) 172 | 173 | ## Mark executables and/or libraries for installation 174 | install(TARGETS structure_driver 175 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 176 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 177 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 178 | ) 179 | 180 | ## Mark cpp header files for installation 181 | # install(DIRECTORY include/${PROJECT_NAME}/ 182 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 183 | # FILES_MATCHING PATTERN "*.h" 184 | # PATTERN ".svn" EXCLUDE 185 | # ) 186 | 187 | ## Mark other files for installation (e.g. launch and bag files, etc.) 188 | # install(FILES 189 | # # myfile1 190 | # # myfile2 191 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 192 | # ) 193 | 194 | ############# 195 | ## Testing ## 196 | ############# 197 | 198 | ## Add gtest based cpp test target and link libraries 199 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_structure_core.cpp) 200 | # if(TARGET ${PROJECT_NAME}-test) 201 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 202 | # endif() 203 | 204 | ## Add folders to be run by python nosetests 205 | # catkin_add_nosetests(test) 206 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | To run and build: 2 | 3 | 1) From your Structure Core SDK download, copy the .so library from: 4 | 5 | Libraries/Structure/Linux/x86_64 to lib/libStructure.so 6 | 7 | 2) Copy the ST folder from 8 | 9 | Libaries/Shared/Headers to include/ST 10 | 11 | into this repo. 12 | 13 | Then run: 14 | 15 | 2) source /opt/ros/DISTRO/setup.bash 16 | (source /opt/ros/kinetic/setup.bash or source /opt/ros/melodic/setup.bash) 17 | 18 | 3) catkin_make -DCMAKE_BUILD_TYPE=Release 19 | 20 | Release is important or the code will not optimize correctly and may crash or run very poorly. 21 | 22 | -------------------------------------------------------------------------------- /include/register.hpp: -------------------------------------------------------------------------------- 1 | 2 | /********************************************************************* 3 | * Software License Agreement (BSD License) 4 | * 5 | * Copyright (c) 2008, Willow Garage, Inc. 6 | * All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions 10 | * are met: 11 | * 12 | * * Redistributions of source code must retain the above copyright 13 | * notice, this list of conditions and the following disclaimer. 14 | * * Redistributions in binary form must reproduce the above 15 | * copyright notice, this list of conditions and the following 16 | * disclaimer in the documentation and/or other materials provided 17 | * with the distribution. 18 | * * Neither the name of the Willow Garage nor the names of its 19 | * contributors may be used to endorse or promote products derived 20 | * from this software without specific prior written permission. 21 | * 22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 30 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 32 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 | * POSSIBILITY OF SUCH DAMAGE. 34 | *********************************************************************/ 35 | 36 | // Adapted from image_pipeline/depth_image_proc/src/nodelets/register.cpp 37 | 38 | #include 39 | #include 40 | 41 | void register_convert(const ST::DepthFrame& depth_frame, const ST::ColorFrame& color_frame, std::vector& output) 42 | { 43 | // Allocate memory for registered depth image 44 | output.resize(2*color_frame.width()*color_frame.height(), 0); // Allocate as all zeroes 45 | 46 | // Extract all the parameters we need 47 | double inv_depth_fx = 1.0 / depth_frame.intrinsics().fx; 48 | double inv_depth_fy = 1.0 / depth_frame.intrinsics().fy; 49 | double depth_cx = depth_frame.intrinsics().cx, depth_cy = depth_frame.intrinsics().cy; 50 | double depth_Tx = 0.0, depth_Ty = 0.0; 51 | double rgb_fx = color_frame.intrinsics().fx, rgb_fy = color_frame.intrinsics().fy; 52 | double rgb_cx = color_frame.intrinsics().cx, rgb_cy = color_frame.intrinsics().cy; 53 | double rgb_Tx = 0.0, rgb_Ty = 0.0; 54 | 55 | Eigen::Matrix3d rot; 56 | const ST::Matrix4 pose = depth_frame.visibleCameraPoseInDepthCoordinateFrame(); 57 | for(size_t j = 0; j < 4; j++) 58 | { 59 | for(size_t i = 0; i < 4; i++){ 60 | rot(j, i) = pose.atRowCol(j, i); 61 | } 62 | } 63 | 64 | Eigen::Affine3d depth_to_rgb = Eigen::Translation3d(pose.atRowCol(0,3), 65 | pose.atRowCol(1,3), 66 | pose.atRowCol(2,3)) * 67 | Eigen::Quaterniond(rot); 68 | 69 | 70 | // Transform the depth values into the RGB frame 71 | /// @todo When RGB is higher res, interpolate by rasterizing depth triangles onto the registered image 72 | const float* depth_row = depth_frame.depthInMillimeters(); 73 | int row_step = depth_frame.width(); 74 | uint16_t* registered_data = reinterpret_cast(output.data()); 75 | int raw_index = 0; 76 | for (unsigned v = 0; v < depth_frame.height(); ++v, depth_row += row_step) 77 | { 78 | for (unsigned u = 0; u < depth_frame.width(); ++u, ++raw_index) 79 | { 80 | float raw_depth = depth_row[u]; 81 | if (!std::isfinite(raw_depth)) 82 | continue; 83 | 84 | double depth = raw_depth/1000.0; 85 | 86 | /// @todo Combine all operations into one matrix multiply on (u,v,d) 87 | // Reproject (u,v,Z) to (X,Y,Z,1) in depth camera frame 88 | Eigen::Vector4d xyz_depth; 89 | xyz_depth << ((u - depth_cx)*depth - depth_Tx) * inv_depth_fx, 90 | ((v - depth_cy)*depth - depth_Ty) * inv_depth_fy, 91 | depth, 92 | 1; 93 | 94 | // Transform to RGB camera frame 95 | Eigen::Vector4d xyz_rgb = depth_to_rgb * xyz_depth; 96 | 97 | // Project to (u,v) in RGB image 98 | double inv_Z = 1.0 / xyz_rgb.z(); 99 | int u_rgb = (rgb_fx*xyz_rgb.x() + rgb_Tx)*inv_Z + rgb_cx + 0.5; 100 | int v_rgb = (rgb_fy*xyz_rgb.y() + rgb_Ty)*inv_Z + rgb_cy + 0.5; 101 | 102 | if (u_rgb < 0 || u_rgb >= (int)color_frame.width() || 103 | v_rgb < 0 || v_rgb >= (int)color_frame.height()) 104 | continue; 105 | 106 | uint16_t& reg_depth = registered_data[v_rgb*color_frame.width() + u_rgb]; 107 | uint16_t new_depth = 1000.0*xyz_rgb.z(); 108 | // Validity and Z-buffer checks 109 | if (reg_depth == 0 || reg_depth > new_depth) 110 | reg_depth = new_depth; 111 | } 112 | } 113 | } 114 | 115 | // If you want a depth_frame exactly aligned and same resolution as infrared frame 116 | void register_convert(const ST::DepthFrame& depth_frame, const ST::InfraredFrame& ir_frame, std::vector& output) 117 | { 118 | // Allocate memory for registered depth image 119 | output.resize(2*ir_frame.width()/2*ir_frame.height(), 0); // Allocate as all zeroes 120 | 121 | // Extract all the parameters we need 122 | double inv_depth_fx = 1.0 / depth_frame.intrinsics().fx; 123 | double inv_depth_fy = 1.0 / depth_frame.intrinsics().fy; 124 | double depth_cx = depth_frame.intrinsics().cx, depth_cy = depth_frame.intrinsics().cy; 125 | double depth_Tx = 0.0, depth_Ty = 0.0; 126 | double rgb_fx = ir_frame.intrinsics().fx, rgb_fy = ir_frame.intrinsics().fy; 127 | double rgb_cx = ir_frame.intrinsics().cx, rgb_cy = ir_frame.intrinsics().cy; 128 | double rgb_Tx = 0.0, rgb_Ty = 0.0; 129 | 130 | Eigen::Affine3d depth_to_rgb = Eigen::Translation3d(0, 131 | 0, 132 | 0) * 133 | Eigen::Quaterniond(1.0, 0.0, 0.0, 0.0); 134 | 135 | 136 | // Transform the depth values into the RGB frame 137 | /// @todo When RGB is higher res, interpolate by rasterizing depth triangles onto the registered image 138 | const float* depth_row = depth_frame.depthInMillimeters(); 139 | int row_step = depth_frame.width(); 140 | uint16_t* registered_data = reinterpret_cast(output.data()); 141 | int raw_index = 0; 142 | for (unsigned v = 0; v < depth_frame.height(); ++v, depth_row += row_step) 143 | { 144 | for (unsigned u = 0; u < depth_frame.width(); ++u, ++raw_index) 145 | { 146 | float raw_depth = depth_row[u]; 147 | if (!std::isfinite(raw_depth)) 148 | continue; 149 | 150 | double depth = raw_depth/1000.0; 151 | 152 | /// @todo Combine all operations into one matrix multiply on (u,v,d) 153 | // Reproject (u,v,Z) to (X,Y,Z,1) in depth camera frame 154 | Eigen::Vector4d xyz_depth; 155 | xyz_depth << ((u - depth_cx)*depth - depth_Tx) * inv_depth_fx, 156 | ((v - depth_cy)*depth - depth_Ty) * inv_depth_fy, 157 | depth, 158 | 1; 159 | 160 | // Transform to RGB camera frame 161 | Eigen::Vector4d xyz_rgb = depth_to_rgb * xyz_depth; 162 | 163 | // Project to (u,v) in RGB image 164 | double inv_Z = 1.0 / xyz_rgb.z(); 165 | int u_rgb = (rgb_fx*xyz_rgb.x() + rgb_Tx)*inv_Z + rgb_cx + 0.5; 166 | int v_rgb = (rgb_fy*xyz_rgb.y() + rgb_Ty)*inv_Z + rgb_cy + 0.5; 167 | 168 | if (u_rgb < 0 || u_rgb >= (int)ir_frame.width()/2 || 169 | v_rgb < 0 || v_rgb >= (int)ir_frame.height()) 170 | continue; 171 | 172 | uint16_t& reg_depth = registered_data[v_rgb*ir_frame.width()/2 + u_rgb]; 173 | uint16_t new_depth = 1000.0*xyz_rgb.z(); 174 | // Validity and Z-buffer checks 175 | if (reg_depth == 0 || reg_depth > new_depth) 176 | reg_depth = new_depth; 177 | } 178 | } 179 | } -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | structure_core 4 | 0.0.0 5 | The structure_core package 6 | 7 | Not Maintained 8 | 9 | Apache 2.0 10 | 11 | 12 | 13 | Chad Rockey 14 | 15 | catkin 16 | roscpp 17 | sensor_msgs 18 | std_msgs 19 | eigen 20 | roscpp 21 | sensor_msgs 22 | std_msgs 23 | eigen 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /src/structure_driver.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include "register.hpp" 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | class SessionDelegate : public ST::CaptureSessionDelegate { 16 | private: 17 | std::mutex lock; 18 | std::condition_variable cond; 19 | bool ready = false; 20 | bool done = false; 21 | 22 | ros::Publisher depth_image_pub_; 23 | ros::Publisher depth_info_pub_; 24 | 25 | ros::Publisher depth_color_aligned_pub_; 26 | ros::Publisher depth_color_aligned_info_pub_; 27 | 28 | ros::Publisher depth_ir_aligned_pub_; 29 | ros::Publisher depth_ir_aligned_info_pub_; 30 | 31 | ros::Publisher visible_image_pub_; 32 | ros::Publisher visible_info_pub_; 33 | 34 | ros::Publisher left_image_pub_; 35 | ros::Publisher left_info_pub_; 36 | ros::Publisher right_image_pub_; 37 | ros::Publisher right_info_pub_; 38 | 39 | 40 | sensor_msgs::ImagePtr imageFromDepthFrame(const std::string& frame_id, const ST::DepthFrame& f) 41 | { 42 | sensor_msgs::ImagePtr msg(new sensor_msgs::Image); 43 | 44 | msg->header.frame_id = frame_id; 45 | msg->header.stamp.fromSec(f.timestamp()); 46 | 47 | msg->encoding = "16UC1"; 48 | msg->height = f.height(); 49 | msg->width = f.width(); 50 | msg->step = 2*f.width(); 51 | msg->is_bigendian = 0; 52 | 53 | msg->data.resize(msg->height * msg->step); 54 | uint16_t* data_as_shorts = reinterpret_cast(msg->data.data()); 55 | std::transform (&f.depthInMillimeters()[0], &f.depthInMillimeters()[0]+(f.height()*f.width()), &data_as_shorts[0], 56 | [](float f)->uint16_t{ 57 | if(std::isnormal(f)) 58 | { 59 | return f; 60 | } 61 | return 0; 62 | } 63 | ); 64 | 65 | return msg; 66 | } 67 | 68 | 69 | template 70 | sensor_msgs::CameraInfoPtr infoFromFrame(const std::string& frame_id, const frameType& f, int width_scale=1) 71 | { 72 | sensor_msgs::CameraInfoPtr info(new sensor_msgs::CameraInfo); 73 | info->header.frame_id = frame_id; 74 | info->header.stamp.fromSec(f.timestamp()); 75 | info->height = f.intrinsics().height; 76 | info->width = f.intrinsics().width/width_scale; 77 | info->distortion_model = "plumb_bob"; 78 | info->D = { f.intrinsics().k1, f.intrinsics().k2, f.intrinsics().p1, f.intrinsics().p2, f.intrinsics().k3 }; 79 | info->K = { f.intrinsics().fx, 0, f.intrinsics().cx, 80 | 0, f.intrinsics().fy, f.intrinsics().cy, 81 | 0, 0, 1. }; 82 | info->P = { f.intrinsics().fx, 0, f.intrinsics().cx, 0, 83 | 0, f.intrinsics().fy, f.intrinsics().cy, 0, 84 | 0, 0, 1., 0 }; 85 | info->R = { 1, 0, 0, 86 | 0, 1, 0, 87 | 0, 0, 1 }; 88 | 89 | return info; 90 | } 91 | 92 | sensor_msgs::ImagePtr imageFromVisibleFrame(const std::string& frame_id, const ST::ColorFrame& f) 93 | { 94 | sensor_msgs::ImagePtr msg(new sensor_msgs::Image); 95 | 96 | msg->header.frame_id = frame_id; 97 | msg->header.stamp.fromSec(f.timestamp()); 98 | 99 | int num_channels = f.rgbSize()/(f.width()*f.height()); 100 | 101 | if( num_channels == 3){ 102 | msg->encoding = "rgb8"; 103 | } else { 104 | msg->encoding = "mono8"; 105 | } 106 | 107 | msg->height = f.height(); 108 | msg->width = f.width(); 109 | msg->step = f.rgbSize()/f.height(); 110 | msg->is_bigendian = 0; 111 | 112 | msg->data.resize(msg->height * msg->step); 113 | 114 | std::copy(&f.rgbData()[0], &f.rgbData()[0]+f.rgbSize(), &msg->data[0]); 115 | 116 | return msg; 117 | } 118 | 119 | std::vector imagesFromInfraredFrame(const std::string& left_frame_id, const std::string& right_frame_id, const ST::InfraredFrame& f, bool as_8bit=false) 120 | { 121 | int single_frame_width = f.width()/2; 122 | 123 | sensor_msgs::ImagePtr right(new sensor_msgs::Image); 124 | right->header.frame_id = right_frame_id; 125 | right->header.stamp.fromSec(f.timestamp()); 126 | 127 | right->encoding = "mono16"; 128 | right->height = f.height(); 129 | right->width = single_frame_width; 130 | right->step = 2*single_frame_width; 131 | right->is_bigendian = 0; 132 | right->data.resize(right->height * right->step); 133 | uint16_t* right_as_shorts = reinterpret_cast(right->data.data()); 134 | 135 | sensor_msgs::ImagePtr left(new sensor_msgs::Image); 136 | left->header.frame_id = left_frame_id; 137 | left->header.stamp.fromSec(f.timestamp()); 138 | 139 | left->encoding = "mono16"; 140 | left->height = f.height(); 141 | left->width = single_frame_width; 142 | left->step = 2*single_frame_width; 143 | left->is_bigendian = 0; 144 | left->data.resize(left->height * left->step); 145 | uint16_t* left_as_shorts = reinterpret_cast(left->data.data()); 146 | 147 | 148 | if(as_8bit){ 149 | left->data.resize(left->height*left->width); 150 | right->data.resize(right->height*right->width); 151 | 152 | left->encoding = "mono8"; 153 | right->encoding = "mono8"; 154 | 155 | left->step = single_frame_width; 156 | right->step = single_frame_width; 157 | 158 | for(int j = 0; j < f.height(); j++){ 159 | for(int i = 0; i < f.width(); i++){ 160 | if(i < single_frame_width){ 161 | right->data[right->width*j + i] = f.data()[f.width()*j + i] >> 3; // Reduce from 11 bits to 8 bits 162 | } else { 163 | left->data[left->width*j + i - single_frame_width] = f.data()[f.width()*j + i] >> 3; // Reduce from 11 bits to 8 bits 164 | } 165 | } 166 | } 167 | } 168 | else // Copy as full 16 bit 169 | { 170 | for(int j = 0; j < f.height(); j++){ 171 | // Right is first and main imager 172 | std::copy(&f.data()[f.width()*j], &f.data()[f.width()*j + 0] + single_frame_width, &right_as_shorts[right->width*j]); 173 | std::copy(&f.data()[f.width()*j+single_frame_width], &f.data()[f.width()*j] + 2*single_frame_width, &left_as_shorts[left->width*j]); 174 | } 175 | } 176 | 177 | return {left, right}; 178 | } 179 | 180 | void publishDepthFrame(const ST::DepthFrame& f) 181 | { 182 | if(not f.isValid() or depth_image_pub_.getNumSubscribers() == 0) 183 | { 184 | return; 185 | } 186 | std::string depth_frame_id = "camera_depth_optical_frame"; 187 | depth_image_pub_.publish(imageFromDepthFrame(depth_frame_id, f)); 188 | depth_info_pub_.publish(infoFromFrame(depth_frame_id, f)); 189 | } 190 | 191 | void publishVisibleFrame(const ST::ColorFrame& f) 192 | { 193 | if(not f.isValid() or visible_image_pub_.getNumSubscribers() == 0) 194 | { 195 | return; 196 | } 197 | std::string visible_frame_id = "camera_visible_optical_frame"; 198 | visible_image_pub_.publish(imageFromVisibleFrame(visible_frame_id, f)); 199 | visible_info_pub_.publish(infoFromFrame(visible_frame_id, f)); 200 | } 201 | 202 | void publishInfraredFrame(const ST::InfraredFrame& f, bool as_8bit=false) 203 | { 204 | if(not f.isValid()) // or (left_image_pub_.getNumSubscribers() == 0 and right_image_pub_.getNumSubscribers() == 0) 205 | { 206 | return; 207 | } 208 | std::string left_frame_id = "camera_left_optical_frame"; 209 | std::string right_frame_id = "camera_depth_optical_frame"; 210 | 211 | std::vector frames = imagesFromInfraredFrame(left_frame_id, right_frame_id, f, as_8bit); 212 | left_image_pub_.publish(frames[0]); 213 | left_info_pub_.publish(infoFromFrame(left_frame_id, f, 2)); 214 | 215 | right_image_pub_.publish(frames[1]); 216 | right_info_pub_.publish(infoFromFrame(right_frame_id, f, 2)); 217 | } 218 | 219 | void publishDepthAligned(const ST::DepthFrame& depth, const ST::ColorFrame& visual) 220 | { 221 | if(not depth.isValid() or not visual.isValid() or depth_color_aligned_pub_.getNumSubscribers() == 0){ 222 | return; 223 | } 224 | 225 | sensor_msgs::ImagePtr msg(new sensor_msgs::Image); 226 | msg->header.frame_id = "camera_visible_optical_frame"; 227 | msg->header.stamp.fromSec(depth.timestamp()); 228 | msg->encoding = "16UC1"; 229 | msg->height = visual.height(); 230 | msg->width = visual.width(); 231 | msg->step = 2*visual.width(); 232 | msg->is_bigendian = 0; 233 | register_convert(depth, visual, msg->data); 234 | 235 | // Camera info is same as visual, but with depth timestamp 236 | auto info = infoFromFrame(msg->header.frame_id, visual); 237 | info->header.stamp = msg->header.stamp; 238 | 239 | depth_color_aligned_pub_.publish(msg); 240 | depth_color_aligned_info_pub_.publish(info); 241 | } 242 | 243 | void publishDepthIRAligned(const ST::DepthFrame& depth, const ST::InfraredFrame& ir) 244 | { 245 | if(not depth.isValid() or not ir.isValid() or depth_ir_aligned_pub_.getNumSubscribers() == 0){ 246 | return; 247 | } 248 | 249 | sensor_msgs::ImagePtr msg(new sensor_msgs::Image); 250 | msg->header.frame_id = "camera_depth_optical_frame"; 251 | msg->header.stamp.fromSec(depth.timestamp()); 252 | msg->encoding = "16UC1"; 253 | msg->height = ir.height(); 254 | msg->width = ir.width()/2; 255 | msg->step = 2*msg->width; 256 | msg->is_bigendian = 0; 257 | register_convert(depth, ir, msg->data); 258 | 259 | // Camera info is same as visual, but with depth timestamp 260 | auto info = infoFromFrame(msg->header.frame_id, ir, 2); 261 | info->header.stamp = msg->header.stamp; 262 | 263 | depth_ir_aligned_pub_.publish(msg); 264 | depth_ir_aligned_info_pub_.publish(info); 265 | } 266 | 267 | 268 | public: 269 | 270 | SessionDelegate(ros::NodeHandle& n, ros::NodeHandle& pnh) 271 | { 272 | ros::NodeHandle dn(n, "depth"); 273 | depth_image_pub_ = dn.advertise("image", 10); 274 | depth_info_pub_ = dn.advertise("camera_info", 10); 275 | 276 | ros::NodeHandle da(n, "depth_aligned"); 277 | depth_color_aligned_pub_ = da.advertise("image", 10); 278 | depth_color_aligned_info_pub_ = da.advertise("camera_info", 10); 279 | 280 | ros::NodeHandle di(n, "depth_ir_aligned"); 281 | depth_ir_aligned_pub_ = di.advertise("image", 10); 282 | depth_ir_aligned_info_pub_ = di.advertise("camera_info", 10); 283 | 284 | ros::NodeHandle vn(n, "visible"); 285 | visible_image_pub_ = vn.advertise("image_raw", 10); 286 | visible_info_pub_ = vn.advertise("camera_info", 10); 287 | 288 | ros::NodeHandle ln(n, "left"); 289 | left_image_pub_ = ln.advertise("image_raw", 10); 290 | left_info_pub_ = ln.advertise("camera_info", 10); 291 | 292 | ros::NodeHandle rn(n, "right"); 293 | right_image_pub_ = rn.advertise("image_raw", 10); 294 | right_info_pub_ = rn.advertise("camera_info", 10); 295 | } 296 | 297 | void captureSessionEventDidOccur(ST::CaptureSession *, ST::CaptureSessionEventId event) override { 298 | printf("Received capture session event %d (%s)\n", (int)event, ST::CaptureSessionSample::toString(event)); 299 | switch (event) { 300 | case ST::CaptureSessionEventId::Ready: { 301 | std::unique_lock u(lock); 302 | ready = true; 303 | cond.notify_all(); 304 | } break; 305 | case ST::CaptureSessionEventId::Disconnected: 306 | case ST::CaptureSessionEventId::EndOfFile: 307 | case ST::CaptureSessionEventId::Error: { 308 | std::unique_lock u(lock); 309 | done = true; 310 | cond.notify_all(); 311 | } break; 312 | default: 313 | printf("Event %d unhandled\n", (int)event); 314 | } 315 | } 316 | 317 | void captureSessionDidOutputSample(ST::CaptureSession *, const ST::CaptureSessionSample& sample) { 318 | //printf("Received capture session sample of type %d (%s)\n", (int)sample.type, ST::CaptureSessionSample::toString(sample.type)); 319 | switch (sample.type) { 320 | case ST::CaptureSessionSample::Type::DepthFrame: 321 | //printf("Depth frame: size %dx%d\n", sample.depthFrame.width(), sample.depthFrame.height()); 322 | publishDepthFrame(sample.depthFrame); 323 | break; 324 | case ST::CaptureSessionSample::Type::VisibleFrame: 325 | //printf("Visible frame: size %dx%d\n", sample.visibleFrame.width(), sample.visibleFrame.height()); 326 | publishVisibleFrame(sample.visibleFrame); 327 | break; 328 | case ST::CaptureSessionSample::Type::InfraredFrame: 329 | //printf("Infrared frame: size %dx%d\n", sample.infraredFrame.width(), sample.infraredFrame.height()); 330 | publishInfraredFrame(sample.infraredFrame); 331 | break; 332 | case ST::CaptureSessionSample::Type::SynchronizedFrames: 333 | { 334 | publishDepthFrame(sample.depthFrame); 335 | 336 | publishVisibleFrame(sample.visibleFrame); 337 | 338 | publishInfraredFrame(sample.infraredFrame, true); 339 | 340 | publishDepthAligned(sample.depthFrame, sample.visibleFrame); 341 | 342 | publishDepthIRAligned(sample.depthFrame, sample.infraredFrame); 343 | } 344 | break; 345 | case ST::CaptureSessionSample::Type::AccelerometerEvent: 346 | break; 347 | case ST::CaptureSessionSample::Type::GyroscopeEvent: 348 | break; 349 | default: 350 | printf("Sample type %d unhandled\n", (int)sample.type); 351 | } 352 | } 353 | 354 | void waitUntilReady() { 355 | std::unique_lock u(lock); 356 | cond.wait(u, [this]() { 357 | return ready; 358 | }); 359 | } 360 | 361 | void waitUntilDone() { 362 | std::unique_lock u(lock); 363 | cond.wait(u, [this]() { 364 | return done; 365 | }); 366 | } 367 | }; 368 | 369 | int main(int argc, char **argv) { 370 | 371 | ros::init(argc, argv, "structure_driver"); 372 | 373 | ros::NodeHandle n; 374 | 375 | ros::NodeHandle pnh("~"); 376 | 377 | ST::CaptureSessionSettings settings; 378 | settings.source = ST::CaptureSessionSourceId::StructureCore; 379 | 380 | /** @brief Set to true to enable frame synchronization between visible or color and depth. */ 381 | settings.frameSyncEnabled = true; 382 | /** @brief Set to true to deliver IMU events on a separate, dedicated background thread. Only supported for Structure Core, currently. */ 383 | settings.lowLatencyIMU = true; 384 | /** @brief Set to true to apply a correction filter to the depth before streaming. This may effect performance. */ 385 | settings.applyExpensiveCorrection = true; 386 | /** @brief Set to true to enable depth streaming. */ 387 | settings.structureCore.depthEnabled = true; 388 | /** @brief Set to true to enable infrared streaming. */ 389 | settings.structureCore.infraredEnabled = false; 390 | /** @brief Set to true to enable visible streaming. */ 391 | settings.structureCore.visibleEnabled = true; 392 | /** @brief Set to true to enable accelerometer streaming. */ 393 | settings.structureCore.accelerometerEnabled = false; 394 | /** @brief Set to true to enable gyroscope streaming. */ 395 | settings.structureCore.gyroscopeEnabled = false; 396 | /** @brief The target resolution for streamed depth frames. @see StructureCoreDepthResolution */ 397 | settings.structureCore.depthResolution = ST::StructureCoreDepthResolution::SXGA; 398 | /** @brief The preset depth range mode for streamed depth frames. Modifies the min/max range of the depth values. */ 399 | settings.structureCore.depthRangeMode = ST::StructureCoreDepthRangeMode::Default; 400 | /** @brief The target resolution for streamed depth frames. @see StructureCoreInfraredResolution 401 | Non-default infrared and visible resolutions are currently unavailable. 402 | */ 403 | settings.structureCore.infraredResolution = ST::StructureCoreInfraredResolution::Default; 404 | /** @brief The target resolution for streamed visible frames. @see StructureCoreVisibleResolution 405 | Non-default infrared and visible resolutions are currently unavailable. 406 | */ 407 | settings.structureCore.visibleResolution = ST::StructureCoreVisibleResolution::Default; 408 | /** @brief Set to true to apply gamma correction to incoming visible frames. */ 409 | settings.structureCore.visibleApplyGammaCorrection = true; 410 | /** @brief Enable auto-exposure for infrared frames. */ 411 | settings.structureCore.infraredAutoExposureEnabled = true; 412 | /** @brief Specifies how to stream the infrared frames. @see StructureCoreInfraredMode */ 413 | settings.structureCore.infraredMode = ST::StructureCoreInfraredMode::BothCameras; 414 | /** @brief The target stream rate for IMU data. (gyro and accel) */ 415 | settings.structureCore.imuUpdateRate = ST::StructureCoreIMUUpdateRate::Default; 416 | /** @brief Serial number of sensor to stream. If null, the first connected sensor will be used. */ 417 | settings.structureCore.sensorSerial = nullptr; 418 | /** @brief Maximum amount of time (in milliseconds) to wait for a sensor to connect before throwing a timeout error. */ 419 | settings.structureCore.sensorInitializationTimeout = 6000; 420 | /** @brief The target framerate for the infrared camera. If the value is not supported, the default is 30. */ 421 | settings.structureCore.infraredFramerate = 15.f; 422 | /** @brief The target framerate for the depth sensor. If the value is not supported, the default is 30. */ 423 | settings.structureCore.depthFramerate = 15.f; 424 | /** @brief The target framerate for the visible camera. If the value is not supported, the default is 30. */ 425 | settings.structureCore.visibleFramerate = 15.f; 426 | /** @brief The initial visible exposure to start streaming with (milliseconds, but set in seconds). */ 427 | //settings.structureCore.initialVisibleExposure = 0.033f; 428 | /** @brief The initial visible gain to start streaming with. Can be any number between 1 and 8. */ 429 | //settings.structureCore.initialVisibleGain = 4.0f; 430 | /** @brief The initial infrared exposure to start streaming with. */ 431 | settings.structureCore.initialInfraredExposure = 0.0146f; 432 | /** @brief The initial infrared gain to start streaming with. Can be 0, 1, 2, or 3. */ 433 | settings.structureCore.initialInfraredGain = 3; 434 | /** @brief Setting this to true will eliminate saturation issues, but might result in sparser depth. */ 435 | settings.structureCore.disableInfraredIntensityBalance = true; 436 | /** @brief Setting this to true will reduce latency, but might drop more frame */ 437 | settings.structureCore.latencyReducerEnabled = false; 438 | /** @brief Laser projector power setting from 0.0 to 1.0 inclusive. Projector will only activate if required by streaming configuration. */ 439 | settings.structureCore.initialProjectorPower = 1.0f; 440 | 441 | SessionDelegate delegate(n, pnh); 442 | ST::CaptureSession session; 443 | session.setDelegate(&delegate); 444 | if (!session.startMonitoring(settings)) { 445 | printf("Failed to initialize capture session\n"); 446 | return 1; 447 | } 448 | 449 | printf("Waiting for session to become ready...\n"); 450 | delegate.waitUntilReady(); 451 | session.startStreaming(); 452 | 453 | ros::spin(); 454 | 455 | session.stopStreaming(); 456 | return 0; 457 | } 458 | --------------------------------------------------------------------------------