├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── README.md ├── aluminum_channel_mesh.png ├── exploration_cycle.png ├── nbv_planner ├── CMakeLists.txt ├── config │ └── yak.rviz ├── include │ └── nbv_planner │ │ ├── exploration_controller_node.hpp │ │ ├── nbv_planner_node.hpp │ │ └── octomap_reset_node.hpp ├── launch │ ├── auto_explore.launch │ ├── launch_check.launch │ └── octomap_mapping.launch ├── package.xml ├── src │ ├── exploration_controller_node.cpp │ ├── nbv_planner_node.cpp │ └── octomap_reset_node.cpp └── srv │ └── GetNBV.srv ├── yak ├── CMakeLists.txt ├── cmake │ ├── Modules │ │ └── FindOpenNI.cmake │ ├── Targets.cmake │ └── Utils.cmake ├── include │ └── yak │ │ ├── kfusion │ │ ├── cuda │ │ │ ├── device.hpp │ │ │ ├── device_array.hpp │ │ │ ├── device_memory.hpp │ │ │ ├── imgproc.hpp │ │ │ ├── kernel_containers.hpp │ │ │ ├── projective_icp.hpp │ │ │ ├── temp_utils.hpp │ │ │ ├── texture_binder.hpp │ │ │ └── tsdf_volume.hpp │ │ ├── exports.hpp │ │ ├── internal.hpp │ │ ├── kinfu.hpp │ │ ├── precomp.hpp │ │ ├── safe_call.hpp │ │ └── types.hpp │ │ └── ros │ │ ├── half.hpp │ │ ├── kinfu_server.h │ │ ├── ros_rgbd_camera.hpp │ │ └── ros_sim_camera.hpp ├── launch │ ├── launch_ensenso.launch │ ├── launch_ensenso_big.launch │ ├── launch_sim.launch │ ├── launch_xtion.launch │ ├── launch_xtion_big.launch │ ├── launch_xtion_robot.launch │ ├── start_data_record.launch │ ├── start_dataset.launch │ └── start_my_dataset.launch ├── msg │ ├── SparseTSDF.msg │ └── TSDF.msg ├── package.xml ├── src │ ├── core.cpp │ ├── cuda │ │ ├── imgproc.cu │ │ ├── proj_icp.cu │ │ └── tsdf_volume.cu │ ├── device_memory.cpp │ ├── imgproc.cpp │ ├── kinfu.cpp │ ├── kinfu_node.cpp │ ├── precomp.cpp │ ├── projective_icp.cpp │ ├── ros │ │ ├── kinfu_server.cpp │ │ ├── ros_rgbd_camera.cpp │ │ ├── ros_sim_camera.cpp │ │ ├── volume_marker.cpp │ │ └── volume_tf_broadcaster.cpp │ └── tsdf_volume.cpp └── srv │ ├── GetMesh.srv │ ├── GetSparseTSDF.srv │ ├── GetTSDF.srv │ └── ResetVolume.srv └── yak_meshing ├── CMakeLists.txt ├── include └── half.hpp ├── package.xml ├── src └── yak_meshing_node.cpp └── srv └── GetMesh.srv /.gitignore: -------------------------------------------------------------------------------- 1 | *.autosave 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: trusty 3 | language: generic 4 | compiler: 5 | - gcc 6 | notifications: 7 | email: 8 | on_success: never 9 | on_failure: never 10 | 11 | env: 12 | global: 13 | - ROS_DISTRO=kinetic 14 | - DOCKER_IMAGE=rosindustrial/yak:kinetic 15 | - CATKIN_CONFIG='--no-install' 16 | - BEFORE_SCRIPT='-DCMAKE_LIBRARY_PATH=/usr/local/nvidia/lib64/' 17 | matrix: 18 | - ROS_REPOSITORY_PATH=http://packages.ros.org/ros/ubuntu 19 | - ROS_REPOSITORY_PATH=http://packages.ros.org/ros-shadow-fixed/ubuntu 20 | install: 21 | - git clone https://github.com/ros-industrial/industrial_ci.git .ci_config 22 | script: 23 | - source .ci_config/travis.sh 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #docker build -t rosindustrial/yak:kinetic . 2 | FROM rosindustrial/noether-nvidia:kinetic 3 | WORKDIR / 4 | RUN apt-get update && apt-get install -y --no-install-recommends \ 5 | ros-kinetic-perception \ 6 | libopenvdb-dev \ 7 | ros-kinetic-interactive-markers \ 8 | libtbb-dev \ 9 | libopenexr-dev \ 10 | ros-kinetic-octomap-ros \ 11 | ros-kinetic-moveit-ros-planning-interface && \ 12 | apt-get clean && \ 13 | rm -rf /var/lib/apt/lists/* 14 | 15 | ENTRYPOINT ["/ros_entrypoint.sh"] 16 | CMD ["bash"] 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Austin Deric 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # yak 2 | 3 | yak (yet another kinfu) is a library and ROS wrapper for Truncated Signed Distance Fields (TSDFs). 4 | 5 | A TSDF is a probabilistic representations of a solid surface in 3D space. It's a useful tool for combining many noisy incomplete sensor readings into a single smooth and complete model. 6 | 7 | To break down the name: 8 | 9 | Distance field: Each voxel in the volume contains a value that represents its metric distance from the closest point on the surface. Voxels very far from the surface have high-magnitude distance values, while those near the surface have values approaching zero. 10 | 11 | Signed: Voxels outside the surface have positive distances, while voxels inside the surface have negative distances. This allows the representation of solid objects. The distance field becomes a gradient that shifts from positive to negative as it crosses the surface. 12 | 13 | Truncated: Only the distance values of voxels very close to the surface are regularly updated. Distances beyond a certain threshold have their values capped at +/- 1. This decreases the cost of integrating new readings, since not every voxel in the volume needs to be updated. 14 | 15 | yak handles two very different use cases. It can reconstruct from a RGBD camera moved around by a human without any knowledge of pose relative to the global frame. It can also reconstruct from a sensor mounted on a robot arm using pose hints provided by TF and robot kinematics. The idea is that this second case doesn't need to deduce sensor motion by comparing the most recent reading to previous readings via ICP, so it should work better in situations with incomplete sensor readings. 16 | 17 | The human-guided situation should work out of the box without any other packages. You might need to force the sensor to reset to get the volume positioned in a desirable orientation around your target. The easiest way to do this is to cover and uncover the camera lens. 18 | 19 | The robot-assisted situation is currently partially hardcoded to use the sensors and work cell models from the Godel blending project. This will change soon to make it more generalized! 20 | 21 | # yak_meshing 22 | ![Aluminum part reconstructed with yak and meshed with yak_meshing](/aluminum_channel_mesh.png) 23 | 24 | yak_meshing is a ROS package to mesh TSDF volumes generated by Kinect Fusion-like packages. 25 | 26 | Meshing happens through the /get_mesh service, which in turn calls the kinfu_ros /get_tsdf service. yak_meshing_node expects a serialized TSDF voxel volume, which is a list of TSDF values and weights for every occupied voxel along with a list of the coordinates of each occupied voxel. OpenVDB's voxel meshing algorithm generates a triangular mesh along the zero-value isosurface of the TSDF volume. The mesh is saved as a .obj, which can be viewed and manipulated in a program like Meshlab or Blender. 27 | 28 | # nbv_planner 29 | ![Candidate poses generated and evaluated by nbv_planner](/exploration_cycle.png) 30 | 31 | nbv_planner is a ROS package to perform Next Best View analysis using data provided from RGBD cameras like the Asus Xtion. It uses octomap to track voxel occupancy and integrate new readings. 32 | 33 | Call the /get_nbv service to return a sorted list (best to worst) of candidate poses near the volume that could expose unknown voxels. Currently evaluation of poses is conducted by casting rays corresponding to the camera's field of view into the octomap. More hits on unknowns = better view. 34 | 35 | Executing rosrun nbv_planner exploration_controller_node will execute an exploration routine that will try to move the robot to views that expose unknown regions of a user-specified volume, using the NBV evaluation explained above. The octomap server should be running. 36 | 37 | # Operating Instructions for Human-Guided Reconstruction 38 | 1. Start TSDF/KinFu processes: roslaunch yak launch_xtion_default.launch 39 | 2. Launch the drivers for the RGBD camera. For the Asus Xtion, this is roslaunch openni2_launch openni2.launch. 40 | 3. Start mapping! Since yak doesn't have any way to relate the pose of the camera to the global frame, the initial position of the volume will be centered in front of the camera. You might have to force the camera to reset a few times to get the volume positioned where you need it. 41 | 4. When you decide that the reconstruction is good enough: rosservice call /get_mesh 42 | 43 | # Operating Instructions for Autonomous Exploration and Reconstruction 44 | 0. If you intend to use an actual robot, make sure that its state and motion servers are running, and that autonomous motion is allowed (deadman switch engaged, or auto mode). 45 | 1. roslaunch a moveit planning/execution launch file. My command looks like: roslaunch godel_irb2400_moveit_config moveit_planning_execution.launch robot_ip:=192.168.125.1 sim:=False use_ftp:=False. Wait for rviz and moveit to start up. 46 | 2. Launch the TSDF reconstruction nodes. For example, roslaunch yak launch_xtion_robot.launch. 47 | 3. Launch the drivers for the RGBD camera. For the Asus Xtion, this is roslaunch openni2_launch openni2.launch. 48 | 4. Start the octomap server: roslaunch nbv_planner octomap_mapping.launch 49 | 5. When you want to start exploration: rosrun nbv_planner exploration_controller_node 50 | 6. When you decide that the reconstruction is good enough: rosservice call /get_mesh 51 | 52 | # Build with Docker: 53 | ``` 54 | nvidia-docker run -v "" rosindustrial/yak:kinetic catkin build --workspace /yak_ws -DCMAKE_LIBRARY_PATH=/usr/local/nvidia/lib64/ 55 | ``` 56 | -------------------------------------------------------------------------------- /aluminum_channel_mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AustinDeric/yak/c6066217aa1703435536cf0638318822e7df95bd/aluminum_channel_mesh.png -------------------------------------------------------------------------------- /exploration_cycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AustinDeric/yak/c6066217aa1703435536cf0638318822e7df95bd/exploration_cycle.png -------------------------------------------------------------------------------- /nbv_planner/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(nbv_planner) 3 | add_definitions(-std=c++11) 4 | 5 | find_package(catkin REQUIRED COMPONENTS 6 | message_generation 7 | geometry_msgs 8 | roscpp 9 | rospy 10 | std_msgs 11 | octomap_ros 12 | pcl_ros 13 | tf 14 | moveit_ros_planning_interface 15 | ) 16 | 17 | find_package(octomap REQUIRED) 18 | 19 | add_service_files( 20 | FILES 21 | GetNBV.srv 22 | ) 23 | 24 | generate_messages( 25 | DEPENDENCIES 26 | std_msgs 27 | geometry_msgs 28 | ) 29 | 30 | catkin_package( 31 | INCLUDE_DIRS 32 | include 33 | CATKIN_DEPENDS 34 | message_runtime 35 | geometry_msgs 36 | roscpp 37 | rospy 38 | std_msgs 39 | octomap_ros 40 | pcl_ros 41 | tf 42 | moveit_ros_planning_interface 43 | ) 44 | 45 | include_directories(include 46 | ${catkin_INCLUDE_DIRS} 47 | ${OCTOMAP_INCLUDE_DIRS}) 48 | 49 | link_libraries(${OCTOMAP_LIBRARIES}) 50 | 51 | add_executable(${PROJECT_NAME}_node src/nbv_planner_node.cpp) 52 | add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 53 | 54 | 55 | add_executable(octomap_reset_node src/octomap_reset_node.cpp) 56 | add_dependencies(octomap_reset_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 57 | 58 | 59 | add_executable(exploration_controller_node src/exploration_controller_node.cpp) 60 | add_dependencies(exploration_controller_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 61 | 62 | 63 | target_link_libraries(${PROJECT_NAME}_node 64 | ${catkin_LIBRARIES} 65 | ) 66 | 67 | target_link_libraries(octomap_reset_node 68 | ${catkin_LIBRARIES} 69 | ) 70 | 71 | target_link_libraries(exploration_controller_node 72 | ${catkin_LIBRARIES} 73 | ) 74 | -------------------------------------------------------------------------------- /nbv_planner/config/yak.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 78 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Global Options1 8 | - /Status1 9 | Splitter Ratio: 0.5 10 | Tree Height: 633 11 | - Class: rviz/Selection 12 | Name: Selection 13 | - Class: rviz/Tool Properties 14 | Expanded: 15 | - /2D Pose Estimate1 16 | - /2D Nav Goal1 17 | - /Publish Point1 18 | Name: Tool Properties 19 | Splitter Ratio: 0.588679016 20 | - Class: rviz/Views 21 | Expanded: 22 | - /Current View1 23 | Name: Views 24 | Splitter Ratio: 0.5 25 | - Class: rviz/Time 26 | Experimental: false 27 | Name: Time 28 | SyncMode: 0 29 | SyncSource: "" 30 | Visualization Manager: 31 | Class: "" 32 | Displays: 33 | - Alpha: 0.5 34 | Cell Size: 1 35 | Class: rviz/Grid 36 | Color: 160; 160; 164 37 | Enabled: true 38 | Line Style: 39 | Line Width: 0.0299999993 40 | Value: Lines 41 | Name: Grid 42 | Normal Cell Count: 0 43 | Offset: 44 | X: 0 45 | Y: 0 46 | Z: 0 47 | Plane: XY 48 | Plane Cell Count: 10 49 | Reference Frame: 50 | Value: true 51 | Enabled: true 52 | Global Options: 53 | Background Color: 48; 48; 48 54 | Fixed Frame: map 55 | Frame Rate: 30 56 | Name: root 57 | Tools: 58 | - Class: rviz/Interact 59 | Hide Inactive Objects: true 60 | - Class: rviz/MoveCamera 61 | - Class: rviz/Select 62 | - Class: rviz/FocusCamera 63 | - Class: rviz/Measure 64 | - Class: rviz/SetInitialPose 65 | Topic: /initialpose 66 | - Class: rviz/SetGoal 67 | Topic: /move_base_simple/goal 68 | - Class: rviz/PublishPoint 69 | Single click: true 70 | Topic: /clicked_point 71 | Value: true 72 | Views: 73 | Current: 74 | Class: rviz/Orbit 75 | Distance: 10 76 | Enable Stereo Rendering: 77 | Stereo Eye Separation: 0.0599999987 78 | Stereo Focal Distance: 1 79 | Swap Stereo Eyes: false 80 | Value: false 81 | Focal Point: 82 | X: 0 83 | Y: 0 84 | Z: 0 85 | Focal Shape Fixed Size: true 86 | Focal Shape Size: 0.0500000007 87 | Invert Z Axis: false 88 | Name: Current View 89 | Near Clip Distance: 0.00999999978 90 | Pitch: 0.785398006 91 | Target Frame: 92 | Value: Orbit (rviz) 93 | Yaw: 0.785398006 94 | Saved: ~ 95 | Window Geometry: 96 | Displays: 97 | collapsed: false 98 | Height: 846 99 | Hide Left Dock: false 100 | Hide Right Dock: false 101 | QMainWindow State: 000000ff00000000fd00000004000000000000016a00000308fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000308000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002c4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000028000002c4000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650000000000000004b00000030000fffffffb0000000800540069006d00650100000000000004500000000000000000000003400000030800000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 102 | Selection: 103 | collapsed: false 104 | Time: 105 | collapsed: false 106 | Tool Properties: 107 | collapsed: false 108 | Views: 109 | collapsed: false 110 | Width: 1200 111 | X: 65 112 | Y: 60 113 | -------------------------------------------------------------------------------- /nbv_planner/include/nbv_planner/exploration_controller_node.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "nbv_planner/GetNBV.h" 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | #include 20 | 21 | class Explorer { 22 | public: 23 | Explorer(ros::NodeHandle &nh); 24 | 25 | bool MoveToNBVs(moveit::planning_interface::MoveGroupInterface &move_group); 26 | 27 | // moveit::planning_interface::MoveGroupInterface move_group_; 28 | ros::ServiceClient nbv_client_; 29 | tf::TransformListener tfListener_; 30 | ros::NodeHandle node_handle_; 31 | 32 | // ros::ServiceServer exploration_server_; 33 | 34 | private: 35 | }; 36 | -------------------------------------------------------------------------------- /nbv_planner/include/nbv_planner/nbv_planner_node.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include "nbv_planner/GetNBV.h" 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | 16 | #include 17 | //#include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | 24 | #include 25 | 26 | #include 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | #include 34 | 35 | #include 36 | 37 | #include 38 | 39 | 40 | class NBVSolver { 41 | public: 42 | NBVSolver(ros::NodeHandle& nh); 43 | 44 | bool GetNBV(nbv_planner::GetNBV::Request &req, nbv_planner::GetNBV::Response &res); 45 | 46 | void GenerateViewPosesSpherical(float distance, int slices, float yawMin, float yawMax, float pitchMin, float pitchMax, tf::Transform &origin, std::list &poseList); 47 | 48 | void GenerateViewPosesRandom(int numPoses, float yawMin, float yawMax, float pitchMin, float pitchMax, float dMin, float dMax, std::list &poseList); 49 | 50 | int EvaluateCandidateView(tf::Transform pose, octomap::ColorOcTree &tree, octomap::ColorOcTree &unknownTree); 51 | 52 | float RandInRange(float min, float max); 53 | 54 | void Update(); 55 | 56 | ros::ServiceServer nbv_server_; 57 | 58 | ros::ServiceClient octomap_client_; 59 | 60 | sensor_msgs::PointCloud2 unknown_leaf_cloud_; 61 | ros::Publisher unknown_cloud_publisher_; 62 | 63 | visualization_msgs::Marker ray_line_list_; 64 | visualization_msgs::Marker hit_ray_line_list_; 65 | 66 | ros::Publisher all_ray_pub_; 67 | ros::Publisher hit_ray_pub_; 68 | 69 | tf::TransformBroadcaster broadcaster_; 70 | tf::TransformListener listener_; 71 | 72 | int ray_count_; 73 | int num_pose_slices_; 74 | float raycast_distance_; 75 | 76 | octomath::Vector3 bound_min_; 77 | octomath::Vector3 bound_max_; 78 | 79 | std::list candidate_poses_; 80 | }; 81 | -------------------------------------------------------------------------------- /nbv_planner/include/nbv_planner/octomap_reset_node.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | class OctomapResetter { 6 | public: 7 | OctomapResetter(ros::NodeHandle& nh); 8 | 9 | void UpdateCallback(const visualization_msgs::InteractiveMarkerFeedbackConstPtr& feedback); 10 | 11 | ros::Subscriber volume_update_subscriber_; 12 | ros::ServiceClient volume_reset_client_; 13 | 14 | private: 15 | 16 | }; 17 | -------------------------------------------------------------------------------- /nbv_planner/launch/auto_explore.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /nbv_planner/launch/launch_check.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /nbv_planner/launch/octomap_mapping.launch: -------------------------------------------------------------------------------- 1 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /nbv_planner/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | nbv_planner 4 | 0.0.0 5 | The nbv_planner package 6 | 7 | AustinDeric 8 | joe 9 | 10 | MIT 11 | 12 | catkin 13 | 14 | message_generation 15 | 16 | geometry_msgs 17 | roscpp 18 | rospy 19 | std_msgs 20 | tf 21 | octomap 22 | octomap_ros 23 | moveit_ros_planning_interface 24 | pcl_ros 25 | 26 | message_runtime 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /nbv_planner/src/exploration_controller_node.cpp: -------------------------------------------------------------------------------- 1 | #include "nbv_planner/exploration_controller_node.hpp" 2 | 3 | Explorer::Explorer(ros::NodeHandle &nh) { 4 | // moveit::planning_interface::MoveGroupInterface move_group("manipulator"); 5 | // move_group_ = move_group; 6 | nbv_client_ = nh.serviceClient("/get_nbv"); 7 | node_handle_ = nh; 8 | // exploration_server_ = nh.advertiseService("do_exploration", &Explorer::MoveToNBVs, this); 9 | 10 | } 11 | 12 | bool Explorer::MoveToNBVs(moveit::planning_interface::MoveGroupInterface &move_group) 13 | { 14 | /* 15 | * While "finished" criteria is False: 16 | * - Get next best view given current information 17 | * - Move to view 18 | */ 19 | while (true) 20 | { 21 | // Make a new call to the GetNBV service to get a list of potentially-good poses to move towards. 22 | nbv_planner::GetNBV srv; 23 | nbv_client_.call(srv); 24 | if (srv.response.exploration_done) { 25 | ROS_INFO("Exploration reasonably completed"); 26 | break; 27 | } 28 | geometry_msgs::PoseArray move_targets = srv.response.bestViewPose; 29 | 30 | // Start with the most highly ranked pose 31 | int currentPoseIndex = 0; 32 | geometry_msgs::Pose targetPose = move_targets.poses[currentPoseIndex]; 33 | 34 | 35 | // Get the position of the TSDF volume to set constraint on camera orientation. 36 | tf::StampedTransform baseToVolume; 37 | tfListener_.waitForTransform("base_link", "volume_pose", ros::Time::now(), ros::Duration(0.5)); 38 | tfListener_.lookupTransform("base_link", "volume_pose", ros::Time(0), baseToVolume); 39 | 40 | moveit_msgs::Constraints cameraPoseConstraints; 41 | 42 | // First constraint: keep a TF frame ~60cm in front of the camera within a box around the TSDF volume 43 | // This will keep the camera pointed more or less at the volume to be imaged. 44 | moveit_msgs::PositionConstraint volumeBoxPosConstraint; 45 | 46 | geometry_msgs::Quaternion volumePoseQuat; 47 | geometry_msgs::Vector3 volumePoseVec; 48 | tf::vector3TFToMsg(baseToVolume.getOrigin(), volumePoseVec); 49 | tf::quaternionTFToMsg(baseToVolume.getRotation(), volumePoseQuat); 50 | 51 | geometry_msgs::Pose volumePose; 52 | volumePose.orientation = volumePoseQuat; 53 | volumePose.position.x = volumePoseVec.x - 0.3; 54 | volumePose.position.y = volumePoseVec.y - 0.3; 55 | volumePose.position.z = volumePoseVec.z + 0.3; 56 | 57 | shape_msgs::SolidPrimitive boundingBox; 58 | boundingBox.type = boundingBox.BOX; 59 | boundingBox.dimensions.resize(3); 60 | boundingBox.dimensions[0] = 0.6; 61 | boundingBox.dimensions[1] = 0.6; 62 | boundingBox.dimensions[2] = 0.6; 63 | 64 | moveit_msgs::BoundingVolume volume; 65 | volume.primitives.push_back(boundingBox); 66 | volume.primitive_poses.push_back(volumePose); 67 | 68 | volumeBoxPosConstraint.constraint_region = volume; 69 | volumeBoxPosConstraint.link_name = "sensor_constraint_frame"; 70 | volumeBoxPosConstraint.header.frame_id = "base_link"; 71 | 72 | cameraPoseConstraints.position_constraints.push_back(volumeBoxPosConstraint); 73 | 74 | 75 | 76 | // Second constraint: allow free rotation of the camera Z axis, since we mostly care about which way it's pointed around X and Y axes. 77 | 78 | // moveit_msgs::OrientationConstraint orientationConstraint; 79 | // orientationConstraint.header.frame_id = "base_link"; 80 | // orientationConstraint.link_name = "ensenso_sensor_optical_frame"; 81 | // orientationConstraint.orientation = targetPose.orientation; 82 | 83 | // orientationConstraint.absolute_z_axis_tolerance = M_PI; 84 | 85 | // cameraPoseConstraints.orientation_constraints.push_back(orientationConstraint); 86 | 87 | 88 | // Set all the constraints 89 | move_group.setPathConstraints(cameraPoseConstraints); 90 | 91 | 92 | // Make a motion planner to test poses in free space without moving the robot. 93 | robot_model_loader::RobotModelLoader robot_model_loader("robot_description"); 94 | robot_model::RobotModelPtr robot_model = robot_model_loader.getModel(); 95 | planning_scene::PlanningScenePtr planning_scene(new planning_scene::PlanningScene(robot_model)); 96 | 97 | planning_pipeline::PlanningPipelinePtr planning_pipeline(new planning_pipeline::PlanningPipeline(robot_model, node_handle_, "planning_plugin", "request_adapters")); 98 | 99 | planning_interface::MotionPlanRequest req; 100 | planning_interface::MotionPlanResponse res; 101 | 102 | req.group_name = "manipulator_ensenso"; 103 | req.allowed_planning_time = 0.5; 104 | 105 | std::vector tolerance_pose(3, 0.01); 106 | std::vector tolerance_angle(3, 0.01); 107 | 108 | moveit_msgs::Constraints goal; 109 | 110 | 111 | // Test poses until we can find one that's reachable in constrained space. 112 | bool success = false; 113 | while(!success) 114 | { 115 | ROS_INFO_STREAM("Pose index: " << currentPoseIndex); 116 | if (currentPoseIndex >= move_targets.poses.size()) 117 | { 118 | ROS_ERROR("Couldn't reach any of the provided poses in free space!"); 119 | break; 120 | } 121 | // Get next best pose and try to plan a path to it in free space 122 | targetPose = move_targets.poses[currentPoseIndex]; 123 | ROS_INFO_STREAM("Trying next best pose: " << targetPose); 124 | geometry_msgs::PoseStamped targetPoseStamped; 125 | targetPoseStamped.pose = targetPose; 126 | targetPoseStamped.header.frame_id = "base_link"; 127 | goal = kinematic_constraints::constructGoalConstraints("ensenso_sensor_optical_frame", targetPoseStamped, tolerance_pose, tolerance_angle); 128 | req.goal_constraints.clear(); 129 | req.goal_constraints.push_back(goal); 130 | planning_pipeline->generatePlan(planning_scene, req, res); 131 | if (res.error_code_.val == res.error_code_.SUCCESS) 132 | { 133 | // If it's reachable in free space, try to move to it in constrained space. 134 | ROS_INFO("Found a path in free space!"); 135 | move_group.setPoseTarget(targetPose); 136 | if (!move_group.move()) 137 | { 138 | // If not reachable in constrained space, try the next pose in free space. 139 | ROS_INFO("Couldn't move to this pose in constrained space"); 140 | currentPoseIndex++; 141 | } 142 | else 143 | { 144 | // If reachable in constrained space, move to next step of NBV 145 | ROS_INFO("Moved to the pose in constrained space!"); 146 | success = true; 147 | } 148 | } 149 | else 150 | { 151 | // If not reachable in free space, try the next pose 152 | ROS_ERROR("Couldn't compute a free-space trajectory"); 153 | currentPoseIndex++; 154 | } 155 | } 156 | } 157 | } 158 | 159 | 160 | 161 | int main(int argc, char* argv[]) 162 | { 163 | ros::init(argc, argv, "exploration_controller_node"); 164 | ros::NodeHandle nh; 165 | 166 | 167 | moveit::planning_interface::MoveGroupInterface move_group("manipulator_ensenso"); 168 | move_group.setPlanningTime(30.0); 169 | 170 | Explorer explorer(nh); 171 | 172 | ros::AsyncSpinner async_spinner(1); 173 | async_spinner.start(); 174 | 175 | explorer.MoveToNBVs(move_group); 176 | 177 | return 0; 178 | } 179 | -------------------------------------------------------------------------------- /nbv_planner/src/nbv_planner_node.cpp: -------------------------------------------------------------------------------- 1 | #include "nbv_planner/nbv_planner_node.hpp" 2 | 3 | NBVSolver::NBVSolver(ros::NodeHandle &nh) 4 | { 5 | nbv_server_ = nh.advertiseService("get_nbv", &NBVSolver::GetNBV, this); 6 | octomap_client_ = nh.serviceClient("/octomap_full"); 7 | unknown_cloud_publisher_ = nh.advertise("my_unknown_cloud",1); 8 | 9 | float bound_min_x, bound_min_y, bound_min_z, bound_max_x, bound_max_y, bound_max_z, voxel_res; 10 | int dims_x, dims_y, dims_z; 11 | 12 | // TODO: Don't read from hardcoded values. Use ROS parameters, ideally. 13 | voxel_res = 0.001; 14 | dims_x = 480; 15 | dims_y = 480; 16 | dims_z = 256; 17 | bound_min_z = bound_min_x = bound_min_y = 0.0; 18 | bound_max_x = voxel_res*(float)dims_x; 19 | bound_max_y = voxel_res*(float)dims_y; 20 | bound_max_z = voxel_res*(float)dims_z; 21 | 22 | bound_min_ = octomath::Vector3(bound_min_x, bound_min_y, bound_min_z); 23 | bound_max_ = octomath::Vector3(bound_max_x, bound_max_y, bound_max_z); 24 | 25 | ROS_INFO_STREAM("Evaluating unknowns in a region from " << bound_min_ << " to " << bound_max_); 26 | 27 | num_pose_slices_ = 8; 28 | ray_count_ = 15; 29 | raycast_distance_ = 0.5; 30 | 31 | 32 | // Set up the line lists and marker publishers for the visualization of the raycast evaluation. 33 | ray_line_list_.header.frame_id = hit_ray_line_list_.header.frame_id = "/volume_pose"; 34 | ray_line_list_.scale.x = hit_ray_line_list_.scale.x = 0.0001; 35 | ray_line_list_.action = hit_ray_line_list_.action = visualization_msgs::Marker::ADD; 36 | ray_line_list_.pose.orientation.w = hit_ray_line_list_.pose.orientation.w = 1.0; 37 | ray_line_list_.type = hit_ray_line_list_.type = visualization_msgs::Marker::LINE_LIST; 38 | ray_line_list_.color.b = 1.0; 39 | hit_ray_line_list_.color.r = 1.0; 40 | ray_line_list_.color.a = hit_ray_line_list_.color.a = 1.0; 41 | 42 | all_ray_pub_ = nh.advertise("all_rays", 10); 43 | hit_ray_pub_ = nh.advertise("hit_rays", 10); 44 | } 45 | bool NBVSolver::GetNBV(nbv_planner::GetNBV::Request &req, nbv_planner::GetNBV::Response &res){ 46 | // Download the octomap from octomap_server 47 | ROS_INFO("Attempting to get octomap"); 48 | octomap_msgs::GetOctomap srv; 49 | if (!octomap_client_.call(srv)) 50 | { 51 | ROS_INFO("Couldn't get octomap"); 52 | return false; 53 | } 54 | ROS_INFO("Got octomap!"); 55 | 56 | octomap::AbstractOcTree* abstract_tree = octomap_msgs::fullMsgToMap(srv.response.map); 57 | octomap::ColorOcTree* my_map = (octomap::ColorOcTree*)abstract_tree; 58 | octomap::ColorOcTree tree = *my_map; 59 | 60 | // int numLeaves = tree.calcNumNodes(); 61 | // ROS_INFO_STREAM("Number of nodes: " << numLeaves); 62 | 63 | // Make a new octomap using the coordinates of the centers of the unknown voxels within the specified volume bounds. 64 | octomap::ColorOcTree unknownTree(tree.getResolution()); 65 | std::list unknownLeafs; 66 | 67 | tree.getUnknownLeafCenters(unknownLeafs, bound_min_, bound_max_, 0); 68 | ROS_INFO_STREAM("Number of unknown leaves: " << unknownLeafs.size()); 69 | 70 | // octomath::Vector3 first = unknownLeafs.front(); 71 | // ROS_INFO_STREAM("First coords: " << first.x() << " " << first.y() << " " << first.z()); 72 | 73 | // Convert unknown points from octomap vector3's to pcl points and then to a ROS message so they can be published and visualized. 74 | pcl::PointCloud pclCloud; 75 | for (std::list::const_iterator it = unknownLeafs.begin(); it != unknownLeafs.end(); ++it) 76 | { 77 | pcl::PointXYZ newPoint(it->x(), it->y(), it->z()); 78 | pclCloud.push_back(newPoint); 79 | unknownTree.updateNode(it->x(), it->y(), it->z(), true); 80 | } 81 | sensor_msgs::PointCloud2 unknownLeafCloud; 82 | pcl::toROSMsg(pclCloud, unknownLeafCloud); 83 | ROS_INFO("converted leaf list to point cloud"); 84 | unknownLeafCloud.header.stamp = ros::Time::now(); 85 | unknownLeafCloud.header.frame_id = "map"; 86 | unknown_cloud_publisher_.publish(unknownLeafCloud); 87 | ROS_INFO("Published updated point cloud"); 88 | 89 | 90 | // Generate many random poses near the volume within certain limits 91 | ROS_INFO("Making candidate poses..."); 92 | candidate_poses_.clear(); 93 | std::list poses; 94 | // tf::Transform orbitCenter(tf::Quaternion(0,0,0,1), tf::Vector3((bound_min_.x()-bound_max_.x())/2, (bound_min_.y()-bound_max_.y())/2, (bound_max_.z()-bound_min_.z())/2)); 95 | GenerateViewPosesRandom(64, 0, 2*M_PI, M_PI/16, M_PI/2, 0.5, 0.75, poses); 96 | 97 | 98 | // Evaluate the list of poses to find the ones that can see the most unknown voxels. 99 | ray_line_list_.points.clear(); 100 | hit_ray_line_list_.points.clear(); 101 | 102 | // std::vector viewMetrics; 103 | std::vector> viewsWithMetrics; 104 | 105 | for (std::list::const_iterator it = poses.begin(); it != poses.end(); ++it) 106 | { 107 | candidate_poses_.push_back(*it); 108 | int fitness = EvaluateCandidateView(*it, tree, unknownTree); 109 | ROS_INFO_STREAM("Casts from this view hit " << fitness << " unseen voxels."); 110 | // viewMetrics.push_back(fitness); 111 | viewsWithMetrics.push_back(std::tuple(*it, fitness)); 112 | } 113 | 114 | // std::vector::iterator result; 115 | 116 | // result = std::max_element(viewMetrics.begin(), viewMetrics.end()); 117 | // int indexOfMax = std::distance(viewMetrics.begin(), result); 118 | 119 | // std::list::iterator poseIt = candidate_poses_.begin(); 120 | // std::advance(poseIt, indexOfMax); 121 | 122 | // ROS_INFO_STREAM("Best view is # " << indexOfMax); 123 | 124 | // Sort the list of views in order from most unknown voxels exposed to least. 125 | std::sort( viewsWithMetrics.begin(), viewsWithMetrics.end(), [ ]( const std::tuple& a, const std::tuple& b ) 126 | { 127 | return std::get<1>(a) > std::get<1>(b); 128 | }); 129 | 130 | // Evaluate the best view against some threshold 131 | if (std::get<1>(viewsWithMetrics[0]) <= 5) 132 | { 133 | // If very few unknown voxels are visible from the best view, then it's probably not worth exploring any more. 134 | res.exploration_done = true; 135 | } 136 | else 137 | { 138 | res.exploration_done = false; 139 | } 140 | 141 | // Return the list of candidate poses. 142 | tf::StampedTransform base_to_volume; 143 | listener_.waitForTransform("base_link", "volume_pose", ros::Time::now(), ros::Duration(0.5)); 144 | listener_.lookupTransform("base_link", "volume_pose", ros::Time(0), base_to_volume); 145 | 146 | for (int i = 0; i < viewsWithMetrics.size(); i++) { 147 | ROS_INFO_STREAM(std::get<1>(viewsWithMetrics[i])); 148 | geometry_msgs::Pose aPose; 149 | tf::poseTFToMsg(tf::Transform(base_to_volume.getRotation(), base_to_volume.getOrigin()) * std::get<0>(viewsWithMetrics[i]), aPose); 150 | res.bestViewPose.poses.push_back(aPose); 151 | } 152 | ROS_INFO_STREAM("Best pose (base-relative): " << res.bestViewPose.poses.front()); 153 | 154 | abstract_tree->clear(); 155 | 156 | return true; 157 | } 158 | 159 | // Legacy code: generate poses evenly distributed around a sphere at a constant distance from some center point. 160 | void NBVSolver::GenerateViewPosesSpherical(float distance, int slices, float yawMin, float yawMax, float pitchMin, float pitchMax, tf::Transform &origin, std::list &poseList) 161 | { 162 | tf::Transform offset(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(0)), tf::Vector3(-distance, 0, 0)); 163 | 164 | for (float angle = yawMin; angle <= yawMax; angle += (yawMax - yawMin)/(slices-1)) 165 | { 166 | for (float elevation = pitchMin; elevation < pitchMax; elevation += (pitchMax - pitchMin)/slices) 167 | { 168 | tf::Transform yaw(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(angle)), tf::Vector3(0,0,0)); 169 | tf::Transform pitch(tf::Quaternion(tf::Vector3(0,1,0), tfScalar(elevation)), tf::Vector3(0,0,0)); 170 | poseList.push_back(origin*yaw*pitch*offset); 171 | } 172 | } 173 | } 174 | 175 | // Randomly distribute poses facing the volume. 176 | void NBVSolver::GenerateViewPosesRandom(int numPoses, float yawMin, float yawMax, float pitchMin, float pitchMax, float dMin, float dMax, std::list &poseList) 177 | { 178 | while (poseList.size() < numPoses) 179 | { 180 | // Randomize the center of rotation to be a point somewhere within the volume. 181 | float originX = this->RandInRange(bound_min_.x(), bound_max_.x()); 182 | float originY = this->RandInRange(bound_min_.y(), bound_max_.y()); 183 | float originZ = (bound_max_.z() + bound_min_.z())/2.0; 184 | 185 | tf::Transform origin(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(0)), tf::Vector3(originX, originY, originZ)); 186 | 187 | float distance = this->RandInRange(dMin, dMax); 188 | tf::Transform offset(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(0)), tf::Vector3(0, 0, -distance)); 189 | 190 | float angle = this->RandInRange(yawMin, yawMax); 191 | float elevation = this->RandInRange(pitchMin, pitchMax); 192 | 193 | tf::Transform pitch(tf::Quaternion(tf::Vector3(0,1,0), tfScalar(-elevation + 3*M_PI/2)), tf::Vector3(0,0,0)); 194 | tf::Transform yaw(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(angle)), tf::Vector3(0,0,0)); 195 | tf::Transform roll(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(-M_PI/2)), tf::Vector3(0,0,0)); 196 | poseList.push_back(origin*yaw*pitch*roll*offset); 197 | } 198 | } 199 | 200 | float NBVSolver::RandInRange(float min, float max) { 201 | return min + static_cast (rand()) /( static_cast (RAND_MAX/(max - min))); 202 | } 203 | 204 | // For a given pose, an octree of knowns, and an octree of unknowns, return how many unknown voxels would be exposed if the camera were at that pose. 205 | int NBVSolver::EvaluateCandidateView(tf::Transform pose, octomap::ColorOcTree &tree, octomap::ColorOcTree &unknownTree) 206 | { 207 | float fov = M_PI/6.0; 208 | 209 | int unknownCount = 0; 210 | 211 | // Generate an evenly-distributed array of rays corresponding to the camera's field of view. 212 | std::list rays; 213 | for (float angleWidth = -fov/2.0; angleWidth <= fov/2.0; angleWidth += fov/ray_count_) 214 | { 215 | for (float angleHeight = -fov/2.0; angleHeight <= fov/2.0; angleHeight += fov/ray_count_) 216 | { 217 | tf::Matrix3x3 rotationMat; 218 | rotationMat.setRPY(tfScalar(angleHeight), tfScalar(angleWidth), tfScalar(0.0)); 219 | 220 | tf::Quaternion rotation; 221 | rotationMat.getRotation(rotation); 222 | 223 | tf::Transform rayTf(tf::Quaternion(tf::Vector3(0,0,1), tfScalar(0)), tf::Vector3(0,0,1)); 224 | tf::Transform rayRotated = tf::Transform(pose.getRotation(), tf::Vector3(0,0,0)) * tf::Transform(rotation, tf::Vector3(0,0,0)) * rayTf; 225 | tf::Vector3 ray = rayRotated.getOrigin(); 226 | rays.push_back(octomap::pointTfToOctomap(ray)); 227 | } 228 | } 229 | 230 | for (std::list::const_iterator it = rays.begin(); it != rays.end(); ++it) 231 | { 232 | octomath::Vector3 origin = octomap::pointTfToOctomap(pose.getOrigin()); 233 | octomath::Vector3 hitKnown; 234 | octomath::Vector3 hitUnknown; 235 | 236 | geometry_msgs::Point p; 237 | p.x = origin.x(); 238 | p.y = origin.y(); 239 | p.z = origin.z(); 240 | ray_line_list_.points.push_back(p); 241 | 242 | geometry_msgs::Point q; 243 | q.x = origin.x() + it->x(); 244 | q.y = origin.y() + it->y(); 245 | q.z = origin.z() + it->z(); 246 | ray_line_list_.points.push_back(q); 247 | 248 | // Need to check for a few cases due to how octomap handles raycasts through unknown space. 249 | // Want to count hits to unknown space that aren't occluded by known occupied space, since those are potentially discoverable. 250 | bool raycastToKnown = tree.castRay(origin, *it, hitKnown, true, 1.0); 251 | bool raycastToUnknown = unknownTree.castRay(origin, *it, hitUnknown, true, 1.0); 252 | if (raycastToKnown && raycastToUnknown) 253 | { 254 | double distanceKnown = octomath::Vector3(hitKnown - origin).norm(); 255 | double distanceUnknown = octomath::Vector3(hitUnknown - origin).norm(); 256 | if (distanceUnknown < distanceKnown) 257 | { 258 | // Unknown voxel is nearer than an occupied voxel behind it 259 | unknownCount++; 260 | hit_ray_line_list_.points.push_back(p); 261 | hit_ray_line_list_.points.push_back(octomap::pointOctomapToMsg(hitUnknown)); 262 | } 263 | } 264 | else if (!raycastToKnown && raycastToUnknown) 265 | { 266 | // Unknown voxel occludes only free space 267 | unknownCount++; 268 | hit_ray_line_list_.points.push_back(p); 269 | hit_ray_line_list_.points.push_back(octomap::pointOctomapToMsg(hitUnknown)); 270 | } 271 | // Otherwise, there's either only free space up to a known occupied surface, or there's free space up to the edge of the volume bounds. 272 | // In either case, there's nothing new for us to learn along that ray. 273 | } 274 | return unknownCount; 275 | } 276 | 277 | void NBVSolver::Update() 278 | { 279 | int count = 0; 280 | for (std::list::iterator it = candidate_poses_.begin(); it != candidate_poses_.end(); ++it, count++) 281 | { 282 | tf::StampedTransform transformStamped(*it, ros::Time::now(), "volume_pose", "candidate" + std::to_string(count)); 283 | broadcaster_.sendTransform(transformStamped); 284 | 285 | } 286 | ray_line_list_.header.stamp = ros::Time::now(); 287 | all_ray_pub_.publish(ray_line_list_); 288 | hit_ray_line_list_.header.stamp = ros::Time::now(); 289 | hit_ray_pub_.publish(hit_ray_line_list_); 290 | } 291 | 292 | int main(int argc, char* argv[]) 293 | { 294 | ros::init(argc, argv, "nbv_planner_node"); 295 | ros::NodeHandle nh; 296 | 297 | std::srand(std::time(0)); 298 | 299 | NBVSolver solver(nh); 300 | 301 | ros::Rate rate(10.0); 302 | 303 | while (ros::ok()){ 304 | solver.Update(); 305 | rate.sleep(); 306 | ros::spinOnce(); 307 | } 308 | return 0; 309 | } 310 | -------------------------------------------------------------------------------- /nbv_planner/src/octomap_reset_node.cpp: -------------------------------------------------------------------------------- 1 | #include "nbv_planner/octomap_reset_node.hpp" 2 | 3 | OctomapResetter::OctomapResetter(ros::NodeHandle &nh) { 4 | volume_update_subscriber_ = nh.subscribe("/volume_marker/feedback", 1, &OctomapResetter::UpdateCallback, this); 5 | volume_reset_client_ = nh.serviceClient("/octomap_server/reset"); 6 | } 7 | 8 | void OctomapResetter::UpdateCallback(const visualization_msgs::InteractiveMarkerFeedbackConstPtr& feedback) { 9 | std_srvs::Empty srv; 10 | volume_reset_client_.call(srv); 11 | } 12 | 13 | int main(int argc, char** argv) { 14 | ros::init(argc, argv, "volume_tf_broadcaster"); 15 | ros::NodeHandle nh; 16 | 17 | OctomapResetter resetter(nh); 18 | 19 | while (true) { 20 | ros::spinOnce(); 21 | } 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /nbv_planner/srv/GetNBV.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | #Respose 4 | bool exploration_done 5 | geometry_msgs/PoseArray bestViewPose 6 | -------------------------------------------------------------------------------- /yak/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(yak) 3 | add_definitions(-std=c++11) 4 | 5 | #compute flags macros 6 | MACRO(CUDA_COMPUTE_TARGET_FLAGS arch_bin arch_ptx cuda_nvcc_target_flags) 7 | string(REGEX REPLACE "\\." "" ARCH_BIN_WITHOUT_DOTS "${${arch_bin}}") 8 | string(REGEX REPLACE "\\." "" ARCH_PTX_WITHOUT_DOTS "${${arch_ptx}}") 9 | 10 | set(cuda_computer_target_flags_temp "") 11 | 12 | # Tell NVCC to add binaries for the specified GPUs 13 | string(REGEX MATCHALL "[0-9()]+" ARCH_LIST "${ARCH_BIN_WITHOUT_DOTS}") 14 | foreach(ARCH IN LISTS ARCH_LIST) 15 | if (ARCH MATCHES "([0-9]+)\\(([0-9]+)\\)") 16 | # User explicitly specified PTX for the concrete BIN 17 | set(cuda_computer_target_flags_temp ${cuda_computer_target_flags_temp} -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}) 18 | else() 19 | # User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN 20 | set(cuda_computer_target_flags_temp ${cuda_computer_target_flags_temp} -gencode arch=compute_${ARCH},code=sm_${ARCH}) 21 | endif() 22 | endforeach() 23 | 24 | # Tell NVCC to add PTX intermediate code for the specified architectures 25 | string(REGEX MATCHALL "[0-9]+" ARCH_LIST "${ARCH_PTX_WITHOUT_DOTS}") 26 | foreach(ARCH IN LISTS ARCH_LIST) 27 | set(cuda_computer_target_flags_temp ${cuda_computer_target_flags_temp} -gencode arch=compute_${ARCH},code=compute_${ARCH}) 28 | endforeach() 29 | 30 | set(${cuda_nvcc_target_flags} ${cuda_computer_target_flags_temp}) 31 | ENDMACRO() 32 | 33 | MACRO(APPEND_TARGET_ARCH_FLAGS) 34 | set(cuda_nvcc_target_flags "") 35 | CUDA_COMPUTE_TARGET_FLAGS(CUDA_ARCH_BIN CUDA_ARCH_PTX cuda_nvcc_target_flags) 36 | if (cuda_nvcc_target_flags) 37 | message(STATUS "CUDA NVCC target flags: ${cuda_nvcc_target_flags}") 38 | list(APPEND CUDA_NVCC_FLAGS ${cuda_nvcc_target_flags}) 39 | endif() 40 | ENDMACRO() 41 | 42 | 43 | list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/Modules/") 44 | message(STATUS "CMAKE MODULE PATH IS ${CMAKE_MODULE_PATH}") 45 | 46 | find_package(OpenCV REQUIRED COMPONENTS core highgui) 47 | message(STATUS "Found OpenCV At ${OpenCV_INCLUDE_DIRS}") 48 | 49 | #find_library(OPENVDB_LIBRARY NAMES openvdb) 50 | #find_library(TBB_LIBRARY NAMES tbb) 51 | #find_library(HALF_LIBRARY NAMES Half) 52 | find_library(PCL 1.8 REQUIRED COMPONENTS common gpu) 53 | 54 | find_package(CUDA REQUIRED) 55 | if(${CUDA_VERSION_STRING} VERSION_GREATER "4.1") 56 | set(CUDA_ARCH_BIN "2.0 2.1(2.0) 3.0 5.0 5.2" CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported") 57 | else() 58 | set(CUDA_ARCH_BIN "2.0 2.1(2.0)" CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported") 59 | endif() 60 | APPEND_TARGET_ARCH_FLAGS() 61 | message(STATUS "CUDA_BIN_ARCH ${CUDA_ARCH_BIN}") 62 | 63 | find_package(catkin REQUIRED COMPONENTS 64 | message_generation 65 | cv_bridge 66 | image_transport 67 | rosconsole 68 | roscpp 69 | sensor_msgs 70 | tf 71 | pcl_ros 72 | visualization_msgs 73 | tf_conversions 74 | interactive_markers 75 | 76 | ) 77 | 78 | add_message_files( 79 | FILES 80 | TSDF.msg 81 | SparseTSDF.msg 82 | ) 83 | 84 | add_service_files( 85 | FILES 86 | GetTSDF.srv 87 | GetSparseTSDF.srv 88 | ResetVolume.srv 89 | ) 90 | 91 | 92 | generate_messages( 93 | DEPENDENCIES 94 | geometry_msgs 95 | visualization_msgs 96 | ) 97 | 98 | catkin_package( 99 | INCLUDE_DIRS include 100 | LIBRARIES yak 101 | CATKIN_DEPENDS 102 | cv_bridge 103 | image_transport 104 | rosconsole 105 | roscpp 106 | sensor_msgs 107 | tf 108 | pcl_ros 109 | visualization_msgs 110 | tf_conversions 111 | interactive_markers 112 | message_runtime 113 | ) 114 | 115 | 116 | include_directories( 117 | include 118 | ${catkin_INCLUDE_DIRS} 119 | ${OpenCV_INCLUDE_DIRS} 120 | ${CUDA_INCLUDE_DIRS} 121 | ${PCL_INCLUDE_DIRS} 122 | ) 123 | 124 | #messages for debugging cuda libraries 125 | #message("debugging") 126 | #message("PCL_INCLUDE_DIRS: ${PCL_INCLUDE_DIRS}") 127 | #message("CUDA_INCLUDE_DIRS: ${CUDA_INCLUDE_DIRS}") 128 | #message("CUDA_CUDA_LIBRARY: ${CUDA_CUDA_LIBRARY}") 129 | 130 | set(HAVE_CUDA 1) 131 | #list(APPEND CUDA_NVCC_FLAGS "-arch;compute_20") 132 | list(APPEND CUDA_NVCC_FLAGS "-gencode;arch=compute_20,code=sm_20;-gencode;arch=compute_20,code=sm_21;-gencode;arch=compute_30,code=sm_30;-gencode;arch=compute_35,code=sm_35") 133 | list(APPEND CUDA_NVCC_FLAGS "-Xcompiler;-fPIC;") 134 | 135 | link_directories(${PCL_LIBRARY_DIRS}) 136 | 137 | cuda_add_library(yak 138 | src/core.cpp 139 | src/device_memory.cpp 140 | src/imgproc.cpp 141 | src/kinfu.cpp 142 | src/precomp.cpp 143 | src/projective_icp.cpp 144 | src/tsdf_volume.cpp 145 | src/cuda/imgproc.cu 146 | src/cuda/proj_icp.cu 147 | src/cuda/tsdf_volume.cu 148 | src/ros/ros_rgbd_camera.cpp 149 | src/ros/kinfu_server.cpp 150 | ) 151 | add_dependencies(yak ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 152 | 153 | 154 | target_link_libraries(yak 155 | ${catkin_LIBRARIES} 156 | ${CUDA_LIBRARIES} 157 | ${CUDA_CUDA_LIBRARY} 158 | ${OpenCV_LIBS} 159 | ) 160 | 161 | add_executable(kinfu_node 162 | src/kinfu_node.cpp) 163 | add_dependencies(kinfu_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 164 | 165 | add_executable(volume_tf_broadcaster 166 | src/ros/volume_tf_broadcaster.cpp) 167 | add_dependencies(volume_tf_broadcaster ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 168 | 169 | add_executable(volume_marker 170 | src/ros/volume_marker.cpp) 171 | add_dependencies(volume_marker ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 172 | 173 | add_definitions(${PCL_DEFINITIONS}) 174 | 175 | target_link_libraries(kinfu_node 176 | yak 177 | ${catkin_LIBRARIES} 178 | ${CUDA_LIBRARIES} 179 | ${CUDA_CUDA_LIBRARY} 180 | ${OpenCV_LIBS} 181 | ) 182 | 183 | target_link_libraries(volume_tf_broadcaster 184 | yak 185 | ${catkin_LIBRARIES} 186 | ${OpenCV_LIBS} 187 | ) 188 | 189 | target_link_libraries(volume_marker 190 | yak 191 | ${catkin_LIBRARIES} 192 | ${OpenCV_LIBS} 193 | ) 194 | 195 | install(TARGETS yak kinfu_node 196 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 197 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 198 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 199 | ) 200 | 201 | install(DIRECTORY include/${PROJECT_NAME}/ 202 | DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 203 | ) 204 | -------------------------------------------------------------------------------- /yak/cmake/Modules/FindOpenNI.cmake: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Find OpenNI 3 | # 4 | # This sets the following variables: 5 | # OPENNI_FOUND - True if OPENNI was found. 6 | # OPENNI_INCLUDE_DIRS - Directories containing the OPENNI include files. 7 | # OPENNI_LIBRARIES - Libraries needed to use OPENNI. 8 | # OPENNI_DEFINITIONS - Compiler flags for OPENNI. 9 | # 10 | # For libusb-1.0, add USB_10_ROOT if not found 11 | 12 | find_package(PkgConfig QUIET) 13 | 14 | # Find LibUSB 15 | if(NOT WIN32) 16 | pkg_check_modules(PC_USB_10 libusb-1.0) 17 | find_path(USB_10_INCLUDE_DIR libusb-1.0/libusb.h 18 | HINTS ${PC_USB_10_INCLUDEDIR} ${PC_USB_10_INCLUDE_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}" 19 | PATH_SUFFIXES libusb-1.0) 20 | 21 | find_library(USB_10_LIBRARY 22 | NAMES usb-1.0 23 | HINTS ${PC_USB_10_LIBDIR} ${PC_USB_10_LIBRARY_DIRS} "${USB_10_ROOT}" "$ENV{USB_10_ROOT}" 24 | PATH_SUFFIXES lib) 25 | 26 | include(FindPackageHandleStandardArgs) 27 | find_package_handle_standard_args(USB_10 DEFAULT_MSG USB_10_LIBRARY USB_10_INCLUDE_DIR) 28 | 29 | if(NOT USB_10_FOUND) 30 | message(STATUS "OpenNI disabled because libusb-1.0 not found.") 31 | return() 32 | else() 33 | include_directories(SYSTEM ${USB_10_INCLUDE_DIR}) 34 | endif() 35 | endif(NOT WIN32) 36 | 37 | if(${CMAKE_VERSION} VERSION_LESS 2.8.2) 38 | pkg_check_modules(PC_OPENNI openni-dev) 39 | else() 40 | pkg_check_modules(PC_OPENNI QUIET openni-dev) 41 | endif() 42 | 43 | set(OPENNI_DEFINITIONS ${PC_OPENNI_CFLAGS_OTHER}) 44 | 45 | set(OPENNI_SUFFIX) 46 | if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8) 47 | set(OPENNI_SUFFIX 64) 48 | endif(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8) 49 | 50 | #add a hint so that it can find it without the pkg-config 51 | find_path(OPENNI_INCLUDE_DIR XnStatus.h 52 | HINTS ${PC_OPENNI_INCLUDEDIR} ${PC_OPENNI_INCLUDE_DIRS} /usr/include/openni /usr/include/ni "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}" 53 | PATHS "$ENV{OPEN_NI_INSTALL_PATH${OPENNI_SUFFIX}}/Include" 54 | PATH_SUFFIXES openni include Include) 55 | #add a hint so that it can find it without the pkg-config 56 | find_library(OPENNI_LIBRARY 57 | NAMES OpenNI${OPENNI_SUFFIX} 58 | HINTS ${PC_OPENNI_LIBDIR} ${PC_OPENNI_LIBRARY_DIRS} /usr/lib "${OPENNI_ROOT}" "$ENV{OPENNI_ROOT}" 59 | PATHS "$ENV{OPEN_NI_LIB${OPENNI_SUFFIX}}" 60 | PATH_SUFFIXES lib Lib Lib64) 61 | 62 | if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") 63 | set(OPENNI_LIBRARIES ${OPENNI_LIBRARY} ${LIBUSB_1_LIBRARIES}) 64 | else() 65 | set(OPENNI_LIBRARIES ${OPENNI_LIBRARY}) 66 | endif() 67 | 68 | include(FindPackageHandleStandardArgs) 69 | find_package_handle_standard_args(OpenNI DEFAULT_MSG OPENNI_LIBRARY OPENNI_INCLUDE_DIR) 70 | 71 | mark_as_advanced(OPENNI_LIBRARY OPENNI_INCLUDE_DIR) 72 | 73 | if(OPENNI_FOUND) 74 | # Add the include directories 75 | set(OPENNI_INCLUDE_DIRS ${OPENNI_INCLUDE_DIR}) 76 | message(STATUS "OpenNI found (include: ${OPENNI_INCLUDE_DIRS}, lib: ${OPENNI_LIBRARY})") 77 | endif(OPENNI_FOUND) 78 | 79 | -------------------------------------------------------------------------------- /yak/cmake/Targets.cmake: -------------------------------------------------------------------------------- 1 | ################################################################################################ 2 | # short command to setup source group 3 | function(kf_source_group group) 4 | cmake_parse_arguments(VW_SOURCE_GROUP "" "" "GLOB" ${ARGN}) 5 | file(GLOB srcs ${VW_SOURCE_GROUP_GLOB}) 6 | #list(LENGTH ${srcs} ___size) 7 | #if (___size GREATER 0) 8 | source_group(${group} FILES ${srcs}) 9 | #endif() 10 | endfunction() 11 | 12 | 13 | ################################################################################################ 14 | # short command getting sources from standart directores 15 | macro(pickup_std_sources) 16 | kf_source_group("Include" GLOB "include/${module_name}/*.h*") 17 | kf_source_group("Include\\cuda" GLOB "include/${module_name}/cuda/*.h*") 18 | kf_source_group("Source" GLOB "src/*.cpp" "src/*.h*") 19 | kf_source_group("Source\\utils" GLOB "src/utils/*.cpp" "src/utils/*.h*") 20 | kf_source_group("Source\\cuda" GLOB "src/cuda/*.c*" "src/cuda/*.h*") 21 | FILE(GLOB_RECURSE sources include/${module_name}/*.h* src/*.cpp src/*.h* src/cuda/*.h* src/cuda/*.c*) 22 | endmacro() 23 | 24 | 25 | ################################################################################################ 26 | # short command for declaring includes from other modules 27 | macro(declare_deps_includes) 28 | foreach(__arg ${ARGN}) 29 | get_filename_component(___path "${CMAKE_SOURCE_DIR}/modules/${__arg}/include" ABSOLUTE) 30 | if (EXISTS ${___path}) 31 | include_directories(${___path}) 32 | endif() 33 | endforeach() 34 | 35 | unset(___path) 36 | unset(__arg) 37 | endmacro() 38 | 39 | 40 | ################################################################################################ 41 | # short command for setting defeault target properties 42 | function(default_properties target) 43 | set_target_properties(${target} PROPERTIES 44 | DEBUG_POSTFIX "d" 45 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib" 46 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") 47 | 48 | if (NOT ${target} MATCHES "^test_") 49 | install(TARGETS ${the_target} RUNTIME DESTINATION ".") 50 | endif() 51 | endfunction() 52 | 53 | function(test_props target) 54 | #os_project_label(${target} "[test]") 55 | if(USE_PROJECT_FOLDERS) 56 | set_target_properties(${target} PROPERTIES FOLDER "Tests") 57 | endif() 58 | endfunction() 59 | 60 | function(app_props target) 61 | #os_project_label(${target} "[app]") 62 | if(USE_PROJECT_FOLDERS) 63 | set_target_properties(${target} PROPERTIES FOLDER "Apps") 64 | endif() 65 | endfunction() 66 | 67 | 68 | ################################################################################################ 69 | # short command for setting defeault target properties 70 | function(default_properties target) 71 | set_target_properties(${target} PROPERTIES 72 | DEBUG_POSTFIX "d" 73 | ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib" 74 | RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") 75 | 76 | if (NOT ${target} MATCHES "^test_") 77 | install(TARGETS ${the_target} RUNTIME DESTINATION ".") 78 | endif() 79 | endfunction() 80 | 81 | 82 | ################################################################################################ 83 | # short command for adding library module 84 | macro(add_module_library name) 85 | set(module_name ${name}) 86 | pickup_std_sources() 87 | include_directories(include src src/cuda) 88 | 89 | set(__has_cuda OFF) 90 | check_cuda(__has_cuda) 91 | 92 | set(__lib_type STATIC) 93 | if (${ARGV1} MATCHES "SHARED|STATIC") 94 | set(__lib_type ${ARGV1}) 95 | endif() 96 | 97 | if (__has_cuda) 98 | cuda_add_library(${module_name} ${__lib_type} ${sources}) 99 | else() 100 | add_library(${module_name} ${__lib_type} ${sources}) 101 | endif() 102 | 103 | if(MSVC) 104 | set_target_properties(${module_name} PROPERTIES DEFINE_SYMBOL KFUSION_API_EXPORTS) 105 | else() 106 | add_definitions(-DKFUSION_API_EXPORTS) 107 | endif() 108 | 109 | default_properties(${module_name}) 110 | 111 | if(USE_PROJECT_FOLDERS) 112 | set_target_properties(${module_name} PROPERTIES FOLDER "Libraries") 113 | endif() 114 | 115 | set_target_properties(${module_name} PROPERTIES INSTALL_NAME_DIR lib) 116 | 117 | install(TARGETS ${module_name} 118 | RUNTIME DESTINATION bin COMPONENT main 119 | LIBRARY DESTINATION lib COMPONENT main 120 | ARCHIVE DESTINATION lib COMPONENT main) 121 | 122 | install(DIRECTORY include/ DESTINATION include/ FILES_MATCHING PATTERN "*.h*") 123 | endmacro() 124 | 125 | ################################################################################################ 126 | # short command for adding application module 127 | macro(add_application target sources) 128 | add_executable(${target} ${sources}) 129 | default_properties(${target}) 130 | app_props(${target}) 131 | endmacro() 132 | 133 | 134 | ################################################################################################ 135 | # short command for adding test target 136 | macro(add_test the_target) 137 | add_executable(${the_target} ${ARGN}) 138 | default_properties(${the_target}) 139 | test_props(${the_target}) 140 | endmacro() 141 | -------------------------------------------------------------------------------- /yak/cmake/Utils.cmake: -------------------------------------------------------------------------------- 1 | ################################################################################################ 2 | # short alias for CMkae loggign 3 | function(dmsg) 4 | message(STATUS "<<${ARGN}") 5 | endfunction() 6 | 7 | ################################################################################################ 8 | # Command checking if current module has cuda souces to compile 9 | macro(check_cuda var) 10 | file(GLOB cuda src/cuda/*.cu) 11 | list(LENGTH cuda ___size) 12 | if (HAVE_CUDA AND ___size GREATER 0) 13 | set(${var} ON) 14 | else() 15 | set(${var} OFF) 16 | endif() 17 | unset(___size) 18 | unset(cuda) 19 | endmacro() 20 | 21 | ################################################################################################ 22 | # short command for adding library module 23 | macro(warnings_disable) 24 | if(NOT ENABLE_NOISY_WARNINGS) 25 | set(_flag_vars "") 26 | set(_msvc_warnings "") 27 | set(_gxx_warnings "") 28 | foreach(arg ${ARGN}) 29 | if(arg MATCHES "^CMAKE_") 30 | list(APPEND _flag_vars ${arg}) 31 | elseif(arg MATCHES "^/wd") 32 | list(APPEND _msvc_warnings ${arg}) 33 | elseif(arg MATCHES "^-W") 34 | list(APPEND _gxx_warnings ${arg}) 35 | endif() 36 | endforeach() 37 | if(MSVC AND _msvc_warnings AND _flag_vars) 38 | foreach(var ${_flag_vars}) 39 | foreach(warning ${_msvc_warnings}) 40 | set(${var} "${${var}} ${warning}") 41 | endforeach() 42 | endforeach() 43 | elseif(CV_COMPILER_IS_GNU AND _gxx_warnings AND _flag_vars) 44 | foreach(var ${_flag_vars}) 45 | foreach(warning ${_gxx_warnings}) 46 | if(NOT warning MATCHES "^-Wno-") 47 | string(REPLACE "${warning}" "" ${var} "${${var}}") 48 | string(REPLACE "-W" "-Wno-" warning "${warning}") 49 | endif() 50 | ocv_check_flag_support(${var} "${warning}" _varname) 51 | if(${_varname}) 52 | set(${var} "${${var}} ${warning}") 53 | endif() 54 | endforeach() 55 | endforeach() 56 | endif() 57 | unset(_flag_vars) 58 | unset(_msvc_warnings) 59 | unset(_gxx_warnings) 60 | endif(NOT ENABLE_NOISY_WARNINGS) 61 | endmacro() 62 | 63 | ################################################################################################ 64 | # Command for asstion options with some preconditions 65 | macro(kf_option variable description value) 66 | set(__value ${value}) 67 | set(__condition "") 68 | set(__varname "__value") 69 | foreach(arg ${ARGN}) 70 | if(arg STREQUAL "IF" OR arg STREQUAL "if") 71 | set(__varname "__condition") 72 | else() 73 | list(APPEND ${__varname} ${arg}) 74 | endif() 75 | endforeach() 76 | unset(__varname) 77 | if("${__condition}" STREQUAL "") 78 | set(__condition 2 GREATER 1) 79 | endif() 80 | 81 | if(${__condition}) 82 | if("${__value}" MATCHES ";") 83 | if(${__value}) 84 | option(${variable} "${description}" ON) 85 | else() 86 | option(${variable} "${description}" OFF) 87 | endif() 88 | elseif(DEFINED ${__value}) 89 | if(${__value}) 90 | option(${variable} "${description}" ON) 91 | else() 92 | option(${variable} "${description}" OFF) 93 | endif() 94 | else() 95 | option(${variable} "${description}" ${__value}) 96 | endif() 97 | else() 98 | unset(${variable} CACHE) 99 | endif() 100 | unset(__condition) 101 | unset(__value) 102 | endmacro() 103 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/device.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_DEVICE_H_ 2 | #define KFUSION_DEVICE_H_ 3 | 4 | #include "yak/kfusion/internal.hpp" 5 | #include "yak/kfusion/cuda/temp_utils.hpp" 6 | 7 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 8 | /// TsdfVolume 9 | 10 | //__kf_device__ 11 | //kfusion::device::TsdfVolume::TsdfVolume(elem_type* _data, int3 _dims, float3 _voxel_size, float _trunc_dist, int _max_weight) 12 | // : data(_data), dims(_dims), voxel_size(_voxel_size), trunc_dist(_trunc_dist), max_weight(_max_weight) {} 13 | 14 | //__kf_device__ 15 | //kfusion::device::TsdfVolume::TsdfVolume(const TsdfVolume& other) 16 | // : data(other.data), dims(other.dims), voxel_size(other.voxel_size), trunc_dist(other.trunc_dist), max_weight(other.max_weight) {} 17 | 18 | __kf_device__ kfusion::device::TsdfVolume::elem_type* kfusion::device::TsdfVolume::operator()(int x, int y, int z) 19 | { 20 | return data + x + y * dims.x + z * dims.y * dims.x; 21 | } 22 | 23 | __kf_device__ const kfusion::device::TsdfVolume::elem_type* kfusion::device::TsdfVolume::operator()(int x, int y, int z) const 24 | { 25 | return data + x + y * dims.x + z * dims.y * dims.x; 26 | } 27 | 28 | __kf_device__ kfusion::device::TsdfVolume::elem_type* kfusion::device::TsdfVolume::beg(int x, int y) const 29 | { 30 | return data + x + dims.x * y; 31 | } 32 | 33 | __kf_device__ kfusion::device::TsdfVolume::elem_type* kfusion::device::TsdfVolume::zstep(elem_type * const ptr) const 34 | { 35 | return ptr + dims.x * dims.y; 36 | } 37 | 38 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 39 | /// Projector 40 | 41 | __kf_device__ float2 kfusion::device::Projector::operator()(const float3& p) const 42 | { 43 | float2 coo; 44 | coo.x = __fmaf_rn(f.x, __fdividef(p.x, p.z), c.x); 45 | coo.y = __fmaf_rn(f.y, __fdividef(p.y, p.z), c.y); 46 | return coo; 47 | } 48 | 49 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 50 | /// Reprojector 51 | 52 | __kf_device__ float3 kfusion::device::Reprojector::operator()(int u, int v, float z) const 53 | { 54 | float x = z * (u - c.x) * finv.x; 55 | float y = z * (v - c.y) * finv.y; 56 | return make_float3(x, y, z); 57 | } 58 | 59 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 60 | /// packing/unpacking tsdf volume element 61 | 62 | __kf_device__ ushort2 kfusion::device::pack_tsdf(float tsdf, int weight) 63 | { 64 | return make_ushort2(__float2half_rn(tsdf), weight); 65 | } 66 | 67 | __kf_device__ float kfusion::device::unpack_tsdf(ushort2 value, int& weight) 68 | { 69 | weight = value.y; 70 | return __half2float(value.x); 71 | } 72 | __kf_device__ float kfusion::device::unpack_tsdf(ushort2 value) 73 | { 74 | return __half2float(value.x); 75 | } 76 | 77 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 78 | /// Utility 79 | 80 | namespace kfusion 81 | { 82 | namespace device 83 | { 84 | __kf_device__ Vec3f operator*(const Mat3f& m, const Vec3f& v) 85 | { 86 | return make_float3(dot(m.data[0], v), dot(m.data[1], v), dot(m.data[2], v)); 87 | } 88 | 89 | __kf_device__ Vec3f operator*(const Aff3f& a, const Vec3f& v) 90 | { 91 | return a.R * v + a.t; 92 | } 93 | 94 | __kf_device__ Vec3f tr(const float4& v) 95 | { 96 | return make_float3(v.x, v.y, v.z); 97 | } 98 | 99 | struct plus 100 | { 101 | __kf_device__ 102 | float operator ()(float l, float r) const 103 | { 104 | return l + r; 105 | } 106 | __kf_device__ 107 | double operator ()(double l, double r) const 108 | { 109 | return l + r; 110 | } 111 | }; 112 | 113 | struct gmem 114 | { 115 | template __kf_device__ static T LdCs(T *ptr); 116 | template __kf_device__ static void StCs(const T& val, T *ptr); 117 | }; 118 | 119 | template<> __kf_device__ ushort2 gmem::LdCs(ushort2* ptr); 120 | template<> __kf_device__ void gmem::StCs(const ushort2& val, ushort2* ptr); 121 | } 122 | } 123 | 124 | #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200 125 | 126 | #if defined(_WIN64) || defined(__LP64__) 127 | #define _ASM_PTR_ "l" 128 | #else 129 | #define _ASM_PTR_ "r" 130 | #endif 131 | 132 | template<> __kf_device__ ushort2 kfusion::device::gmem::LdCs(ushort2* ptr) 133 | { 134 | ushort2 val; 135 | asm("ld.global.cs.v2.u16 {%0, %1}, [%2];" : "=h"(reinterpret_cast(val.x)), "=h"(reinterpret_cast(val.y)) : _ASM_PTR_(ptr)); 136 | return val; 137 | } 138 | 139 | template<> __kf_device__ void kfusion::device::gmem::StCs(const ushort2& val, ushort2* ptr) 140 | { 141 | short cx = val.x, cy = val.y; 142 | asm("st.global.cs.v2.u16 [%0], {%1, %2};" : : _ASM_PTR_(ptr), "h"(reinterpret_cast(cx)), "h"(reinterpret_cast(cy))); 143 | } 144 | #undef _ASM_PTR_ 145 | 146 | #else 147 | template<> __kf_device__ ushort2 kfusion::device::gmem::LdCs(ushort2* ptr) 148 | { 149 | return *ptr; 150 | } 151 | template<> __kf_device__ void kfusion::device::gmem::StCs(const ushort2& val, ushort2* ptr) 152 | { 153 | *ptr = val; 154 | } 155 | #endif 156 | 157 | #endif 158 | 159 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/imgproc.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_IMAGE_PROC_H_ 2 | #define KFUSION_IMAGE_PROC_H_ 3 | 4 | #include 5 | 6 | namespace kfusion 7 | { 8 | namespace cuda 9 | { 10 | KF_EXPORTS void depthBilateralFilter(const Depth& in, Depth& out, int ksz, float sigma_spatial, float sigma_depth); 11 | 12 | KF_EXPORTS void depthTruncation(Depth& depth, float threshold); 13 | 14 | KF_EXPORTS void depthBuildPyramid(const Depth& depth, Depth& pyramid, float sigma_depth); 15 | 16 | KF_EXPORTS void computeNormalsAndMaskDepth(const Intr& intr, Depth& depth, Normals& normals); 17 | 18 | KF_EXPORTS void computePointNormals(const Intr& intr, const Depth& depth, Cloud& points, Normals& normals); 19 | 20 | KF_EXPORTS void computeDists(const Depth& depth, Dists& dists, const Intr& intr); 21 | 22 | KF_EXPORTS void resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out); 23 | 24 | KF_EXPORTS void resizePointsNormals(const Cloud& points, const Normals& normals, Cloud& points_out, Normals& normals_out); 25 | 26 | KF_EXPORTS void waitAllDefaultStream(); 27 | 28 | KF_EXPORTS void renderTangentColors(const Normals& normals, Image& image); 29 | 30 | KF_EXPORTS void renderImage(const Depth& depth, const Normals& normals, const Intr& intr, const Vec3f& light_pose, Image& image); 31 | 32 | KF_EXPORTS void renderImage(const Cloud& points, const Normals& normals, const Intr& intr, const Vec3f& light_pose, Image& image); 33 | } 34 | } 35 | #endif 36 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/kernel_containers.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KERNEL_CONTAINERS_H_ 2 | #define KERNEL_CONTAINERS_H_ 3 | 4 | #if defined(__CUDACC__) 5 | #define __kf_hdevice__ __host__ __device__ __forceinline__ 6 | #define __kf_device__ __device__ __forceinline__ 7 | #else 8 | #define __kf_hdevice__ 9 | #define __kf_device__ 10 | #endif 11 | 12 | #include 13 | 14 | namespace kfusion 15 | { 16 | namespace cuda 17 | { 18 | template struct DevPtr 19 | { 20 | typedef T elem_type; 21 | const static size_t elem_size = sizeof(elem_type); 22 | 23 | T* data; 24 | 25 | __kf_hdevice__ 26 | DevPtr() : 27 | data(0) 28 | { 29 | } 30 | __kf_hdevice__ 31 | DevPtr(T* data_arg) : 32 | data(data_arg) 33 | { 34 | } 35 | 36 | __kf_hdevice__ 37 | size_t elemSize() const 38 | { 39 | return elem_size; 40 | } 41 | __kf_hdevice__ 42 | operator T*() 43 | { 44 | return data; 45 | } 46 | __kf_hdevice__ 47 | operator const T*() const 48 | { 49 | return data; 50 | } 51 | }; 52 | 53 | template struct PtrSz: public DevPtr 54 | { 55 | __kf_hdevice__ 56 | PtrSz() : 57 | size(0) 58 | { 59 | } 60 | __kf_hdevice__ 61 | PtrSz(T* data_arg, size_t size_arg) : 62 | DevPtr(data_arg), size(size_arg) 63 | { 64 | } 65 | 66 | size_t size; 67 | }; 68 | 69 | template struct PtrStep: public DevPtr 70 | { 71 | __kf_hdevice__ 72 | PtrStep() : 73 | step(0) 74 | { 75 | } 76 | __kf_hdevice__ 77 | PtrStep(T* data_arg, size_t step_arg) : 78 | DevPtr(data_arg), step(step_arg) 79 | { 80 | } 81 | 82 | /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */ 83 | size_t step; 84 | 85 | __kf_hdevice__ 86 | T* ptr(int y = 0) 87 | { 88 | return (T*) ((char*) DevPtr::data + y * step); 89 | } 90 | __kf_hdevice__ 91 | const T* ptr(int y = 0) const 92 | { 93 | return (const T*) ((const char*) DevPtr::data + y * step); 94 | } 95 | 96 | __kf_hdevice__ 97 | T& operator()(int y, int x) 98 | { 99 | return ptr(y)[x]; 100 | } 101 | __kf_hdevice__ 102 | const T& operator()(int y, int x) const 103 | { 104 | return ptr(y)[x]; 105 | } 106 | }; 107 | 108 | template struct PtrStepSz: public PtrStep 109 | { 110 | __kf_hdevice__ 111 | PtrStepSz() : 112 | cols(0), rows(0) 113 | { 114 | } 115 | __kf_hdevice__ 116 | PtrStepSz(int rows_arg, int cols_arg, T* data_arg, size_t step_arg) : 117 | PtrStep(data_arg, step_arg), cols(cols_arg), rows(rows_arg) 118 | { 119 | } 120 | 121 | int cols; 122 | int rows; 123 | }; 124 | } 125 | 126 | namespace device 127 | { 128 | using kfusion::cuda::PtrSz; 129 | using kfusion::cuda::PtrStep; 130 | using kfusion::cuda::PtrStepSz; 131 | } 132 | } 133 | 134 | namespace kf = kfusion; 135 | 136 | #endif 137 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/projective_icp.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_PROJECTIVE_ICP_H_ 2 | #define KFUSION_PROJECTIVE_ICP_H_ 3 | 4 | #include 5 | 6 | namespace kfusion 7 | { 8 | namespace cuda 9 | { 10 | class ProjectiveICP 11 | { 12 | public: 13 | enum 14 | { 15 | MAX_PYRAMID_LEVELS = 4 16 | }; 17 | 18 | typedef std::vector DepthPyr; 19 | typedef std::vector PointsPyr; 20 | typedef std::vector NormalsPyr; 21 | 22 | ProjectiveICP(); 23 | virtual ~ProjectiveICP(); 24 | 25 | float getDistThreshold() const; 26 | void setDistThreshold(float distance); 27 | 28 | float getAngleThreshold() const; 29 | void setAngleThreshold(float angle); 30 | 31 | void setIterationsNum(const std::vector& iters); 32 | int getUsedLevelsNum() const; 33 | 34 | virtual bool estimateTransform(Affine3f& affine, const Intr& intr, const Frame& curr, const Frame& prev); 35 | 36 | /** The function takes masked depth, i.e. it assumes for performance reasons that 37 | * "if depth(y,x) is not zero, then normals(y,x) surely is not qnan" */ 38 | virtual bool estimateTransform(Affine3f& affine, const Intr& intr, const DepthPyr& dcurr, const NormalsPyr ncurr, const DepthPyr dprev, const NormalsPyr nprev); 39 | virtual bool estimateTransform(const Affine3f& affine, Affine3f& correctedAffine, const Intr& intr, const PointsPyr& vcurr, const NormalsPyr ncurr, const PointsPyr vprev, const NormalsPyr nprev); 40 | 41 | //static Vec3f rodrigues2(const Mat3f& matrix); 42 | private: 43 | std::vector iters_; 44 | float angle_thres_; 45 | float dist_thres_; 46 | DeviceArray2D buffer_; 47 | 48 | struct StreamHelper; 49 | cv::Ptr shelp_; 50 | }; 51 | } 52 | } 53 | #endif 54 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/texture_binder.hpp: -------------------------------------------------------------------------------- 1 | #ifndef TEXTURE_BINDER_H_ 2 | #define TEXTURE_BINDER_H_ 3 | 4 | #include 5 | #include 6 | 7 | namespace kfusion 8 | { 9 | namespace cuda 10 | { 11 | class TextureBinder 12 | { 13 | public: 14 | template 15 | TextureBinder(const DeviceArray2D& arr, const struct texture& tex) : 16 | texref(&tex) 17 | { 18 | cudaChannelFormatDesc desc = cudaCreateChannelDesc(); 19 | cudaSafeCall(cudaBindTexture2D(0, tex, arr.ptr(), desc, arr.cols(), arr.rows(), arr.step())); 20 | } 21 | 22 | template 23 | TextureBinder(const DeviceArray& arr, const struct texture &tex) : 24 | texref(&tex) 25 | { 26 | cudaChannelFormatDesc desc = cudaCreateChannelDesc(); 27 | cudaSafeCall(cudaBindTexture(0, tex, arr.ptr(), desc, arr.sizeBytes())); 28 | } 29 | 30 | template 31 | TextureBinder(const PtrStepSz& arr, const struct texture& tex) : 32 | texref(&tex) 33 | { 34 | cudaChannelFormatDesc desc = cudaCreateChannelDesc(); 35 | cudaSafeCall(cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step)); 36 | } 37 | 38 | template 39 | TextureBinder(const A& arr, const struct texture& tex, const cudaChannelFormatDesc& desc) : 40 | texref(&tex) 41 | { 42 | cudaSafeCall(cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step)); 43 | } 44 | 45 | template 46 | TextureBinder(const PtrSz& arr, const struct texture &tex) : 47 | texref(&tex) 48 | { 49 | cudaChannelFormatDesc desc = cudaCreateChannelDesc(); 50 | cudaSafeCall(cudaBindTexture(0, tex, arr.data, desc, arr.size * arr.elemSize())); 51 | } 52 | 53 | ~TextureBinder() 54 | { 55 | cudaSafeCall(cudaUnbindTexture(texref)); 56 | } 57 | private: 58 | const struct textureReference *texref; 59 | }; 60 | } 61 | 62 | namespace device 63 | { 64 | using kfusion::cuda::TextureBinder; 65 | } 66 | } 67 | #endif 68 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/cuda/tsdf_volume.hpp: -------------------------------------------------------------------------------- 1 | #ifndef TSDF_VOLUME_H_ 2 | #define TSDF_VOLUME_H_ 3 | 4 | #include 5 | 6 | namespace kfusion 7 | { 8 | namespace cuda 9 | { 10 | class KF_EXPORTS TsdfVolume 11 | { 12 | public: 13 | TsdfVolume(const cv::Vec3i& dims); 14 | virtual ~TsdfVolume(); 15 | 16 | void create(const Vec3i& dims); 17 | 18 | Vec3i getDims() const; 19 | Vec3f getVoxelSize() const; 20 | 21 | const CudaData data() const; 22 | CudaData data(); 23 | 24 | Vec3f getSize() const; 25 | void setSize(const Vec3f& size); 26 | 27 | float getTruncDist() const; 28 | void setTruncDist(float distance); 29 | 30 | int getMaxWeight() const; 31 | void setMaxWeight(int weight); 32 | 33 | Affine3f getPose() const; 34 | void setPose(const Affine3f& pose); 35 | 36 | float getRaycastStepFactor() const; 37 | void setRaycastStepFactor(float factor); 38 | 39 | float getGradientDeltaFactor() const; 40 | void setGradientDeltaFactor(float factor); 41 | 42 | Vec3i getGridOrigin() const; 43 | void setGridOrigin(const Vec3i& origin); 44 | 45 | virtual void clear(); 46 | virtual void applyAffine(const Affine3f& affine); 47 | virtual void integrate(const Dists& dists, const Affine3f& camera_motion_tform, const Intr& intr); 48 | virtual void raycast(const Affine3f& camera_pose, const Intr& intr, Depth& depth, Normals& normals); 49 | virtual void raycast(const Affine3f& camera_pose, const Intr& intr, Cloud& points, Normals& normals); 50 | 51 | void swap(CudaData& data); 52 | 53 | DeviceArray fetchCloud(DeviceArray& cloud_buffer) const; 54 | void fetchNormals(const DeviceArray& cloud, DeviceArray& normals) const; 55 | 56 | struct Entry 57 | { 58 | typedef unsigned short half; 59 | 60 | half tsdf; 61 | unsigned short weight; 62 | 63 | static float half2float(half value); 64 | static half float2half(float value); 65 | }; 66 | private: 67 | CudaData data_; 68 | 69 | float trunc_dist_; 70 | int max_weight_; 71 | Vec3i dims_; 72 | Vec3f size_; 73 | Affine3f pose_; 74 | 75 | float gradient_delta_factor_; 76 | float raycast_step_factor_; 77 | }; 78 | } 79 | } 80 | #endif 81 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/exports.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_EXPORTS_H_ 2 | #define KFUSION_EXPORTS_H_ 3 | 4 | #if (defined WIN32 || defined _WIN32 || defined WINCE) && defined KFUSION_API_EXPORTS 5 | #define KF_EXPORTS __declspec(dllexport) 6 | #else 7 | #define KF_EXPORTS 8 | #endif 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/internal.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_INTERNAL_H_ 2 | #define KFUSION_INTERNAL_H_ 3 | 4 | #include "yak/kfusion/cuda/device_array.hpp" 5 | #include "yak/kfusion/safe_call.hpp" 6 | 7 | //#define USE_DEPTH 8 | 9 | namespace kfusion 10 | { 11 | namespace device 12 | { 13 | typedef float4 Normal; 14 | typedef float4 Point; 15 | 16 | typedef unsigned short ushort; 17 | typedef unsigned char uchar; 18 | 19 | typedef PtrStepSz Dists; 20 | typedef DeviceArray2D Depth; 21 | typedef DeviceArray2D Normals; 22 | typedef DeviceArray2D Points; 23 | typedef DeviceArray2D Image; 24 | 25 | typedef int3 Vec3i; 26 | typedef float3 Vec3f; 27 | struct Mat3f 28 | { 29 | float3 data[3]; 30 | }; 31 | struct Aff3f 32 | { 33 | Mat3f R; 34 | Vec3f t; 35 | }; 36 | 37 | struct TsdfVolume 38 | { 39 | public: 40 | typedef ushort2 elem_type; 41 | 42 | elem_type * const data; 43 | const int3 dims; 44 | const float3 voxel_size; 45 | const float trunc_dist; 46 | const int max_weight; 47 | 48 | TsdfVolume(elem_type* data, int3 dims, float3 voxel_size, float trunc_dist, int max_weight); 49 | //TsdfVolume(const TsdfVolume&); 50 | 51 | __kf_device__ 52 | elem_type* operator()(int x, int y, int z);__kf_device__ 53 | const elem_type* operator()(int x, int y, int z) const;__kf_device__ 54 | elem_type* beg(int x, int y) const;__kf_device__ 55 | elem_type* zstep(elem_type * const ptr) const; 56 | private: 57 | TsdfVolume& operator=(const TsdfVolume&); 58 | }; 59 | 60 | struct Projector 61 | { 62 | float2 f, c; 63 | Projector() 64 | { 65 | } 66 | Projector(float fx, float fy, float cx, float cy);__kf_device__ 67 | float2 operator()(const float3& p) const; 68 | }; 69 | 70 | struct Reprojector 71 | { 72 | Reprojector() 73 | { 74 | } 75 | Reprojector(float fx, float fy, float cx, float cy); 76 | float2 finv, c;__kf_device__ 77 | float3 operator()(int x, int y, float z) const; 78 | }; 79 | 80 | struct ComputeIcpHelper 81 | { 82 | struct Policy; 83 | struct PageLockHelper 84 | { 85 | float* data; 86 | PageLockHelper(); 87 | ~PageLockHelper(); 88 | }; 89 | 90 | float min_cosine; 91 | float dist2_thres; 92 | 93 | Aff3f aff; 94 | 95 | float rows, cols; 96 | float2 f, c, finv; 97 | 98 | PtrStep dcurr; 99 | PtrStep ncurr; 100 | PtrStep vcurr; 101 | 102 | ComputeIcpHelper(float dist_thres, float angle_thres); 103 | void setLevelIntr(int level_index, float fx, float fy, float cx, float cy); 104 | 105 | void operator()(const Depth& dprev, const Normals& nprev, DeviceArray2D& buffer, float* data, cudaStream_t stream); 106 | void operator()(const Points& vprev, const Normals& nprev, DeviceArray2D& buffer, float* data, cudaStream_t stream); 107 | 108 | static void allocate_buffer(DeviceArray2D& buffer, int partials_count = -1); 109 | 110 | //private: 111 | __kf_device__ 112 | int find_coresp(int x, int y, float3& n, float3& d, float3& s) const;__kf_device__ 113 | void partial_reduce(const float row[7], PtrStep& partial_buffer) const;__kf_device__ 114 | float2 proj(const float3& p) const;__kf_device__ 115 | float3 reproj(float x, float y, float z) const; 116 | }; 117 | 118 | //tsdf volume functions 119 | void clear_volume(TsdfVolume volume); 120 | void integrate(const Dists& depth, TsdfVolume& volume, const Aff3f& aff, const Projector& proj); 121 | 122 | void raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj, Depth& depth, Normals& normals, float step_factor, float delta_factor); 123 | 124 | void raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj, Points& points, Normals& normals, float step_factor, float delta_factor); 125 | __kf_device__ ushort2 pack_tsdf(float tsdf, int weight); 126 | __kf_device__ float unpack_tsdf(ushort2 value, int& weight); 127 | __kf_device__ float unpack_tsdf(ushort2 value); 128 | 129 | //image proc functions 130 | void compute_dists(const Depth& depth, Dists dists, float2 f, float2 c); 131 | 132 | void truncateDepth(Depth& depth, float max_dist /*meters*/); 133 | void bilateralFilter(const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth); 134 | void depthPyr(const Depth& source, Depth& pyramid, float sigma_depth); 135 | 136 | void resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out); 137 | void resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out); 138 | 139 | void computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals); 140 | void computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals); 141 | 142 | void renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image); 143 | void renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image); 144 | void renderTangentColors(const Normals& normals, Image& image); 145 | 146 | //exctraction functionality 147 | size_t extractCloud(const TsdfVolume& volume, const Aff3f& aff, PtrSz output); 148 | void extractNormals(const TsdfVolume& volume, const PtrSz& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output); 149 | 150 | struct float8 151 | { 152 | float x, y, z, w, c1, c2, c3, c4; 153 | }; 154 | struct float12 155 | { 156 | float x, y, z, w, normal_x, normal_y, normal_z, n4, c1, c2, c3, c4; 157 | }; 158 | void mergePointNormal(const DeviceArray& cloud, const DeviceArray& normals, const DeviceArray& output); 159 | } 160 | } 161 | #endif 162 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/kinfu.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KINFU_H_ 2 | #define KINFU_H_ 3 | 4 | #include "yak/kfusion/types.hpp" 5 | #include "yak/kfusion/cuda/tsdf_volume.hpp" 6 | #include "yak/kfusion/cuda/projective_icp.hpp" 7 | #include 8 | #include 9 | 10 | namespace kfusion 11 | { 12 | namespace cuda 13 | { 14 | KF_EXPORTS int getCudaEnabledDeviceCount(); 15 | KF_EXPORTS void setDevice(int device); 16 | KF_EXPORTS std::string getDeviceName(int device); 17 | KF_EXPORTS bool checkIfPreFermiGPU(int device); 18 | KF_EXPORTS void printCudaDeviceInfo(int device); 19 | KF_EXPORTS void printShortCudaDeviceInfo(int device); 20 | } 21 | 22 | struct KF_EXPORTS KinFuParams 23 | { 24 | static KinFuParams default_params(); 25 | 26 | int cols; //pixels 27 | int rows; //pixels 28 | 29 | Intr intr; //Camera intrinsic parameters 30 | 31 | Vec3i volume_dims; //number of voxels 32 | // Vec3f volume_size; //meters 33 | float volume_resolution; // meters per voxel 34 | 35 | Affine3f volume_pose; //meters, inital pose 36 | 37 | float bilateral_sigma_depth; //meters 38 | float bilateral_sigma_spatial; //pixels 39 | int bilateral_kernel_size; //pixels 40 | 41 | float icp_truncate_depth_dist; //meters 42 | float icp_dist_thres; //meters 43 | float icp_angle_thres; //radians 44 | std::vector icp_iter_num; //iterations for level index 0,1,..,3 45 | 46 | float tsdf_min_camera_movement; //meters, integrate only if exceeds 47 | float tsdf_trunc_dist; //meters; 48 | int tsdf_max_weight; //frames 49 | 50 | float raycast_step_factor; // in voxel sizes 51 | float gradient_delta_factor; // in voxel sizes 52 | 53 | Vec3f light_pose; //meters 54 | 55 | bool use_pose_hints; 56 | bool use_icp; 57 | bool update_via_sensor_motion; 58 | 59 | }; 60 | 61 | class KF_EXPORTS KinFu 62 | { 63 | public: 64 | typedef cv::Ptr Ptr; 65 | 66 | KinFu(const KinFuParams& params); 67 | 68 | const KinFuParams& params() const; 69 | KinFuParams& params(); 70 | 71 | const cuda::TsdfVolume& tsdf() const; 72 | cuda::TsdfVolume& tsdf(); 73 | 74 | const cuda::ProjectiveICP& icp() const; 75 | cuda::ProjectiveICP& icp(); 76 | 77 | void resetPose(); 78 | void resetVolume(); 79 | 80 | bool operator()(const Affine3f& inputCameraMotion, const Affine3f& currentCameraPose, const Affine3f& previousCameraPose, const cuda::Depth& depth, const cuda::Image& image = cuda::Image()); 81 | 82 | void renderImage(cuda::Image& image, int flags = 0); 83 | void renderImage(cuda::Image& image, const Affine3f& pose, int flags = 0); 84 | 85 | Affine3f getCameraPose(int time = -1) const; 86 | private: 87 | void allocate_buffers(); 88 | 89 | int frame_counter_; 90 | KinFuParams params_; 91 | 92 | // Sensor pose, currenly calculated via ICP 93 | std::vector poses_; 94 | 95 | cuda::Dists dists_; 96 | cuda::Frame curr_, prev_; 97 | 98 | cuda::Cloud points_; 99 | cuda::Normals normals_; 100 | cuda::Depth depths_; 101 | 102 | cv::Ptr volume_; 103 | cv::Ptr icp_; 104 | }; 105 | } 106 | #endif 107 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/precomp.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KFUSION_PRECOMP_H_ 2 | #define KFUSION_PRECOMP_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "internal.hpp" 10 | #include 11 | #include "vector_functions.h" 12 | 13 | namespace kfusion 14 | { 15 | template 16 | inline D device_cast(const S& source) 17 | { 18 | return *reinterpret_cast(source.val); 19 | } 20 | 21 | template<> 22 | inline device::Aff3f device_cast(const Affine3f& source) 23 | { 24 | device::Aff3f aff; 25 | Mat3f R = source.rotation(); 26 | Vec3f t = source.translation(); 27 | aff.R = device_cast(R); 28 | aff.t = device_cast(t); 29 | return aff; 30 | } 31 | } 32 | #endif 33 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/safe_call.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SAFE_CALL_H_ 2 | #define SAFE_CALL_H_ 3 | 4 | #include "cuda_runtime_api.h" 5 | 6 | namespace kfusion 7 | { 8 | namespace cuda 9 | { 10 | void error(const char *error_string, const char *file, const int line, const char *func); 11 | } 12 | } 13 | 14 | #if defined(__GNUC__) 15 | #define cudaSafeCall(expr) kfusion::cuda::___cudaSafeCall(expr, __FILE__, __LINE__, __func__) 16 | #else /* defined(__CUDACC__) || defined(__MSVC__) */ 17 | #define cudaSafeCall(expr) kfusion::cuda::___cudaSafeCall(expr, __FILE__, __LINE__) 18 | #endif 19 | 20 | namespace kfusion 21 | { 22 | namespace cuda 23 | { 24 | static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "") 25 | { 26 | if (cudaSuccess != err) 27 | error(cudaGetErrorString(err), file, line, func); 28 | } 29 | 30 | static inline int divUp(int total, int grain) 31 | { 32 | return (total + grain - 1) / grain; 33 | } 34 | } 35 | 36 | namespace device 37 | { 38 | using kfusion::cuda::divUp; 39 | } 40 | } 41 | 42 | #endif 43 | -------------------------------------------------------------------------------- /yak/include/yak/kfusion/types.hpp: -------------------------------------------------------------------------------- 1 | #ifndef KINFU_TYPES_H_ 2 | #define KINFU_TYPES_H_ 3 | 4 | #include "yak/kfusion/cuda/device_array.hpp" 5 | #include 6 | #include 7 | #include 8 | 9 | struct CUevent_st; 10 | 11 | namespace kfusion 12 | { 13 | typedef cv::Matx33f Mat3f; 14 | typedef cv::Vec3f Vec3f; 15 | typedef cv::Vec3i Vec3i; 16 | typedef cv::Affine3f Affine3f; 17 | 18 | struct KF_EXPORTS Intr 19 | { 20 | float fx, fy, cx, cy; 21 | 22 | Intr(); 23 | Intr(float fx, float fy, float cx, float cy); 24 | Intr operator()(int level_index) const; 25 | }; 26 | 27 | KF_EXPORTS std::ostream& operator <<(std::ostream& os, const Intr& intr); 28 | 29 | struct Point 30 | { 31 | union 32 | { 33 | float data[4]; 34 | struct 35 | { 36 | float x, y, z; 37 | }; 38 | }; 39 | }; 40 | 41 | typedef Point Normal; 42 | 43 | struct RGB 44 | { 45 | union 46 | { 47 | struct 48 | { 49 | unsigned char b, g, r; 50 | }; 51 | int bgra; 52 | }; 53 | }; 54 | 55 | struct PixelRGB 56 | { 57 | unsigned char r, g, b; 58 | }; 59 | 60 | namespace cuda 61 | { 62 | typedef cuda::DeviceMemory CudaData; 63 | typedef cuda::DeviceArray2D Depth; 64 | typedef cuda::DeviceArray2D Dists; 65 | typedef cuda::DeviceArray2D Image; 66 | typedef cuda::DeviceArray2D Normals; 67 | typedef cuda::DeviceArray2D Cloud; 68 | 69 | struct Frame 70 | { 71 | bool use_points; 72 | 73 | std::vector depth_pyr; 74 | std::vector points_pyr; 75 | std::vector normals_pyr; 76 | }; 77 | } 78 | 79 | inline float deg2rad(float alpha) 80 | { 81 | return alpha * 0.017453293f; 82 | } 83 | 84 | struct KF_EXPORTS ScopeTime 85 | { 86 | const char* name; 87 | double start; 88 | ScopeTime(const char *name); 89 | ~ScopeTime(); 90 | }; 91 | 92 | struct KF_EXPORTS SampledScopeTime 93 | { 94 | public: 95 | enum 96 | { 97 | EACH = 33 98 | }; 99 | SampledScopeTime(double& time_ms); 100 | ~SampledScopeTime(); 101 | private: 102 | double getTime(); 103 | SampledScopeTime(const SampledScopeTime&); 104 | SampledScopeTime& operator=(const SampledScopeTime&); 105 | 106 | double& time_ms_; 107 | double start; 108 | }; 109 | 110 | } 111 | #endif 112 | -------------------------------------------------------------------------------- /yak/include/yak/ros/kinfu_server.h: -------------------------------------------------------------------------------- 1 | /* 2 | * KinfuServer.h 3 | * 4 | * Created on: Jun 3, 2015 5 | * Author: mklingen 6 | */ 7 | 8 | #ifndef KINFUSERVER_H_ 9 | #define KINFUSERVER_H_ 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | #include 33 | #include 34 | #include 35 | 36 | #include 37 | #include 38 | 39 | #include "tf/transform_datatypes.h" 40 | #include "Eigen/Core" 41 | #include "Eigen/Geometry" 42 | #include 43 | #include 44 | 45 | namespace kfusion 46 | { 47 | 48 | class KinFuServer 49 | { 50 | public: 51 | KinFuServer(RosRGBDCamera* camera, const std::string& fixedFrame, const std::string& camFrame) ; 52 | 53 | template void LoadParam(T& value, const std::string& name) 54 | { 55 | T newValue = camera_->nodeHandle.param(name, value); 56 | value = newValue; 57 | } 58 | 59 | inline void LoadParam(float& value, const std::string& name) 60 | { 61 | double curr = value; 62 | curr = camera_->nodeHandle.param(name, curr); 63 | value = static_cast(curr); 64 | std::cout << name << ": " << value << std::endl; 65 | } 66 | 67 | // Should be called before running the server. 68 | inline void Initialize() 69 | { 70 | ConnectCamera(); 71 | LoadParams(); 72 | } 73 | 74 | // Does one full step of grabbing a camera image, 75 | // then tracking against the model, followed by 76 | // publishing relevant data to ROS. 77 | void Update(); 78 | 79 | // Publish a raycasted image of the scene to ROS 80 | void PublishRaycastImage(); 81 | // Connects to the camera, and runs until told to stop. 82 | bool ExecuteBlocking(); 83 | // Connects to the ROS depth camera, blocking. 84 | bool ConnectCamera(); 85 | // Creates the KinFu server from the ROS parameters 86 | bool LoadParams(); 87 | // Publishes the current camera transform. 88 | bool PublishTransform(); 89 | // Does a single KinFu step given a depth and (optional) color image. 90 | bool KinFu(const Affine3f& cameraMotionHint, const Affine3f& currentCameraPoseHint, const Affine3f& previousCameraPoseHint, const cv::Mat& depth, const cv::Mat& color); 91 | 92 | inline bool ShouldExit() const { return should_exit_; } 93 | inline void SetExit(bool value) { should_exit_ = value; } 94 | inline void Exit() { should_exit_ = true; } 95 | inline KinFu::Ptr GetKinfu() { return kinfu_; } 96 | inline void SetKinfu(KinFu::Ptr value) { kinfu_ = value; } 97 | inline const RosRGBDCamera* GetCamera() { return camera_; } 98 | inline void SetCamera(RosRGBDCamera* value) { camera_ = value; } 99 | inline const cv::Mat& GetViewHost() { return viewHost_; } 100 | inline const cuda::Image& GetViewDevice() { return viewDevice_; } 101 | inline const cuda::Depth& GetDepthDevice() { return depthDevice_; } 102 | inline const std::string& GetBaseFrame() { return baseFrame_; } 103 | inline const std::string& GetCameraFrame() { return cameraFrame_; } 104 | 105 | // Service calls 106 | 107 | bool GetTSDF(yak::GetTSDFRequest& req, yak::GetTSDFResponse& res); 108 | 109 | bool GetSparseTSDF(yak::GetSparseTSDFRequest& req, yak::GetSparseTSDFResponse& res); 110 | 111 | bool TruncateTSDF(std::vector &data, std::vector &dataOut, std::vector &rows, std::vector &cols, std::vector &sheets, int numVoxelsX, int numVoxelsY, int numVoxelsZ); 112 | 113 | bool GetTSDFData(uint32_t input, half_float::half& voxelValue, uint16_t& voxelWeight); 114 | 115 | Affine3f TransformToAffine(tf::Transform input); 116 | 117 | // tf::Transform SwitchToVolumeFrame(tf::Transform input); 118 | 119 | protected: 120 | bool should_exit_; 121 | KinFu::Ptr kinfu_; 122 | RosRGBDCamera* camera_; 123 | cv::Mat viewHost_; 124 | cuda::Image viewDevice_; 125 | cuda::Depth depthDevice_; 126 | ros::Publisher raycastImgPublisher_; 127 | std::string baseFrame_; 128 | std::string cameraFrame_; 129 | tf::TransformBroadcaster tfBroadcaster_; 130 | tf::TransformListener tfListener_; 131 | 132 | //tf::Transform camera_to_tool0_; 133 | 134 | tf::StampedTransform current_volume_to_sensor_transform_; 135 | tf::StampedTransform previous_volume_to_sensor_transform_; 136 | 137 | cv::Mat lastDepth_; 138 | cv::Mat lastColor_; 139 | Affine3f currentCameraMotionHint_; 140 | Affine3f currentCameraPoseHint_; 141 | 142 | ros::ServiceServer get_tsdf_server_; 143 | ros::ServiceServer get_sparse_tsdf_server_; 144 | ros::ServiceServer reset_volume_server_; 145 | 146 | bool use_pose_hints_; 147 | }; 148 | 149 | } /* namespace kfusion */ 150 | #endif /* KINFUSERVER_H_ */ 151 | -------------------------------------------------------------------------------- /yak/include/yak/ros/ros_rgbd_camera.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ros_rgbd_camera.h 3 | * 4 | * Created on: Jun 2, 2015 5 | * Author: mklingen 6 | */ 7 | 8 | #ifndef ROS_RGBD_CAMERA_H_ 9 | #define ROS_RGBD_CAMERA_H_ 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include "yak/kfusion/types.hpp" 18 | 19 | namespace kfusion 20 | { 21 | class RosRGBDCamera 22 | { 23 | public: 24 | RosRGBDCamera(const ros::NodeHandle& handle); 25 | virtual ~RosRGBDCamera(); 26 | 27 | void SubscribeDepth(const std::string& topic); 28 | void SubscribeRGB(const std::string& topic); 29 | 30 | void DepthCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo); 31 | void RGBCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo); 32 | 33 | bool Grab(cv::Mat& depth, cv::Mat& image); 34 | 35 | kfusion::Intr GetDepthIntrinsics(); 36 | 37 | inline int GetDepthWidth() 38 | { 39 | return lastDepthInfo->width; 40 | } 41 | inline int GetDepthHeight() 42 | { 43 | return lastDepthInfo->height; 44 | } 45 | 46 | ros::NodeHandle nodeHandle; 47 | 48 | image_transport::ImageTransport transport; 49 | image_transport::CameraSubscriber depthSubscriber; 50 | image_transport::CameraSubscriber rgbSubscriber; 51 | 52 | sensor_msgs::ImageConstPtr lastDepthImage; 53 | sensor_msgs::CameraInfoConstPtr lastDepthInfo; 54 | sensor_msgs::ImageConstPtr lastRGBImage; 55 | sensor_msgs::CameraInfoConstPtr lastRGBInfo; 56 | 57 | bool hasNewDepth; 58 | bool hasNewRGB; 59 | bool subscribedDepth; 60 | bool subscribedRGB; 61 | 62 | ros::Time lastImageTime; 63 | }; 64 | 65 | } /* namespace kfusion */ 66 | #endif /* ROS_RGBD_CAMERA_H_ */ 67 | -------------------------------------------------------------------------------- /yak/include/yak/ros/ros_sim_camera.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ros_sim_camera.h 3 | * 4 | * Created on: Jun 13, 2017 5 | * Author: jschornak 6 | */ 7 | 8 | #ifndef ROS_SIM_CAMERA_H_ 9 | #define ROS_SIM_CAMERA_H_ 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | namespace kfusion 20 | { 21 | class RosSimCamera 22 | { 23 | public: 24 | RosSimCamera(const ros::NodeHandle& handle); 25 | virtual ~RosSimCamera(); 26 | 27 | void SubscribeDepth(const std::string& topic); 28 | void SubscribeRGB(const std::string& topic); 29 | 30 | void DepthCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo); 31 | void RGBCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo); 32 | 33 | bool Grab(cv::Mat& depth, cv::Mat& image); 34 | 35 | kfusion::Intr GetDepthIntrinsics(); 36 | 37 | inline int GetDepthWidth() 38 | { 39 | return lastDepthInfo->width; 40 | } 41 | inline int GetDepthHeight() 42 | { 43 | return lastDepthInfo->height; 44 | } 45 | 46 | ros::NodeHandle nodeHandle; 47 | 48 | image_transport::ImageTransport transport; 49 | image_transport::CameraSubscriber depthSubscriber; 50 | image_transport::CameraSubscriber rgbSubscriber; 51 | 52 | sensor_msgs::ImageConstPtr lastDepthImage; 53 | sensor_msgs::CameraInfoConstPtr lastDepthInfo; 54 | sensor_msgs::ImageConstPtr lastRGBImage; 55 | sensor_msgs::CameraInfoConstPtr lastRGBInfo; 56 | 57 | bool hasNewDepth; 58 | bool hasNewRGB; 59 | bool subscribedDepth; 60 | bool subscribedRGB; 61 | }; 62 | 63 | } /* namespace kfusion */ 64 | #endif /* ROS_SIM_CAMERA_H_ */ 65 | -------------------------------------------------------------------------------- /yak/launch/launch_ensenso.launch: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | 12 | # The node reports the transform between the fixed frame and 13 | # a given camera frame. 14 | fixed_frame: "/base_link" 15 | camera_frame: "/ensenso_sensor_optical_frame" 16 | 17 | use_icp: True 18 | use_pose_hints: True 19 | update_via_sensor_motion: False 20 | 21 | # The bilateral kernel smooths the depth image and 22 | # filters out noise around the edges 23 | bilateral_kernel_size: 4 24 | bilateral_sigma_depth: 0.04 25 | bilateral_sigma_spatial: 4.5 26 | 27 | # These are parameters for the point-to-plane ICP. The 28 | # thresholds prevent ICP from converging to far away/inconsistent 29 | # data. 30 | icp_angle_thresh: 0.25 31 | icp_dist_thresh: 0.05 32 | icp_truncate_depth_dist: 0 33 | 34 | # ICP gets performed over an image pyramid of depth. These are the 35 | # number of iterations performed at each pyramid size. 36 | icp_iter_num : [10, 5, 4, 0] 37 | 38 | # For raycasting display, this is a multiplier on how far the rays 39 | # march each iteration, (in multiples of voxels?) Smaller numbers 40 | # are more accurate but slower. 41 | raycast_step_factor: 0.05 42 | 43 | # This is the step size, in voxels, used to compute gradients 44 | # in the TSDF. Gradients are linearly interpolated between voxels. 45 | gradient_delta_factor: 0.5 46 | 47 | # The truncated signed distance field stores the dense model. 48 | # The maximum weight value of a voxel (integer). Higher values 49 | # cause the TSDF to take in more readings before converging, 50 | # which will result in a smoother reconstruction 51 | tsdf_max_weight: 64 52 | 53 | # Kinfu will only fuse data into the TSDF when the camera movement 54 | # exceeds this distance threshold. 55 | tsdf_min_camera_movement: 0 56 | 57 | # Distance away from surface (in meters) the TSDF will be computed 58 | # Higher values result in smoother reconstructions, but less sharp 59 | # corners and edges. Lower values result in more accurate 60 | # reconstructions, but too low and noise becomes a major issue. 61 | tsdf_trunc_dist: 0.04 62 | 63 | # The size of the TSDF volume in voxels 64 | volume_dims_x: 512 65 | volume_dims_y: 512 66 | volume_dims_z: 512 67 | 68 | volume_resolution: 0.001 69 | 70 | 71 | # The position of the topmost corner of the TSDF volume in meters 72 | volume_pos_x: 0.5 73 | volume_pos_y: 0.5 74 | volume_pos_z: -1.0 75 | 76 | use_icp: False 77 | use_pose_hints: True 78 | 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /yak/launch/launch_ensenso_big.launch: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | use_icp: True 15 | use_pose_hints: False 16 | update_via_sensor_motion: False 17 | 18 | # The node reports the transform between the fixed frame and 19 | # a given camera frame. 20 | fixed_frame: "/map" 21 | camera_frame: "/camera_depth_optical_frame" 22 | 23 | # The bilateral kernel smooths the depth image and 24 | # filters out noise around the edges 25 | bilateral_kernel_size: 10 26 | bilateral_sigma_depth: 0.04 27 | bilateral_sigma_spatial: 4.5 28 | 29 | # These are parameters for the point-to-plane ICP. The 30 | # thresholds prevent ICP from converging to far away/inconsistent 31 | # data. 32 | icp_angle_thresh: 0.25 33 | icp_dist_thresh: 0.05 34 | icp_truncate_depth_dist: 0 35 | 36 | # ICP gets performed over an image pyramid of depth. These are the 37 | # number of iterations performed at each pyramid size. 38 | icp_iter_num : [10, 5, 4, 0] 39 | 40 | # For raycasting display, this is a multiplier on how far the rays 41 | # march each iteration, (in multiples of voxels?) Smaller numbers 42 | # are more accurate but slower. 43 | raycast_step_factor: 0.05 44 | 45 | # This is the step size, in voxels, used to compute gradients 46 | # in the TSDF. Gradients are linearly interpolated between voxels. 47 | gradient_delta_factor: 0.5 48 | 49 | # The truncated signed distance field stores the dense model. 50 | # The maximum weight value of a voxel (integer). Higher values 51 | # cause the TSDF to take in more readings before converging, 52 | # which will result in a smoother reconstruction 53 | tsdf_max_weight: 64 54 | 55 | # Kinfu will only fuse data into the TSDF when the camera movement 56 | # exceeds this distance threshold. 57 | tsdf_min_camera_movement: 0 58 | 59 | # Distance away from surface (in meters) the TSDF will be computed 60 | # Higher values result in smoother reconstructions, but less sharp 61 | # corners and edges. Lower values result in more accurate 62 | # reconstructions, but too low and noise becomes a major issue. 63 | tsdf_trunc_dist: 0.005 64 | 65 | # The size of the TSDF volume in voxels 66 | volume_dims_x: 704 67 | volume_dims_y: 1024 68 | volume_dims_z: 1024 69 | 70 | volume_resolution: 0.0005 71 | 72 | # The position of the topmost corner of the TSDF volume in meters 73 | volume_pos_x: 0.25 74 | volume_pos_y: 0.25 75 | volume_pos_z: 0.1 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /yak/launch/launch_sim.launch: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 11 | 12 | use_icp: True 13 | use_pose_hints: False 14 | update_via_sensor_motion: True 15 | # The node reports the transform between the fixed frame and 16 | # a given camera frame. 17 | fixed_frame: "/map" 18 | camera_frame: "/camera_depth_optical_frame" 19 | # The bilateral kernel smooths the depth image and 20 | # filters out noise around the edges 21 | bilateral_kernel_size: 7 22 | bilateral_sigma_depth: 0.04 23 | bilateral_sigma_spatial: 4.5 24 | # These are parameters for the point-to-plane ICP. The 25 | # thresholds prevent ICP from converging to far away/inconsistent 26 | # data. 27 | icp_angle_thresh: 0.523599 28 | icp_dist_thresh: 0.1 29 | icp_truncate_depth_dist: 0 30 | # ICP gets performed over an image pyramid of depth. These are the 31 | # number of iterations performed at each pyramid size. 32 | icp_iter_num : [10, 5, 4, 0] 33 | # For raycasting display, this is a multiplier on how far the rays 34 | # march each iteration, (in multiples of voxels?) Smaller numbers 35 | # are more accurate but slower. 36 | raycast_step_factor: 0.05 37 | # This is the step size, in voxels, used to compute gradients 38 | # in the TSDF. Gradients are linearly interpolated between voxels. 39 | gradient_delta_factor: 0.5 40 | # The truncated signed distance field stores the dense model. 41 | # The maximum weight value of a voxel (integer). Higher values 42 | # cause the TSDF to take in more readings before converging, 43 | # which will result in a smoother reconstruction 44 | tsdf_max_weight: 64 45 | # Kinfu will only fuse data into the TSDF when the camera movement 46 | # exceeds this distance threshold. 47 | tsdf_min_camera_movement: 0 48 | # Distance away from surface (in meters) the TSDF will be computed 49 | # Higher values result in smoother reconstructions, but less sharp 50 | # corners and edges. Lower values result in more accurate 51 | # reconstructions, but too low and noise becomes a major issue. 52 | tsdf_trunc_dist: 0.04 53 | # The size of the TSDF volume in voxels 54 | volume_dims_x: 512 55 | volume_dims_y: 512 56 | volume_dims_z: 512 57 | volume_resolution: 0.005 58 | # The position of the topmost corner of the TSDF volume in meters 59 | volume_pos_x: 0.25 60 | volume_pos_y: 0.25 61 | volume_pos_z: -0.5 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /yak/launch/launch_xtion.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | use_icp: True 15 | use_pose_hints: False 16 | update_via_sensor_motion: True 17 | 18 | # The node reports the transform between the fixed frame and 19 | # a given camera frame. 20 | fixed_frame: "/map" 21 | camera_frame: "/camera_depth_optical_frame" 22 | 23 | # The bilateral kernel smooths the depth image and 24 | # filters out noise around the edges 25 | bilateral_kernel_size: 4 26 | bilateral_sigma_depth: 0.04 27 | bilateral_sigma_spatial: 4.5 28 | 29 | # These are parameters for the point-to-plane ICP. The 30 | # thresholds prevent ICP from converging to far away/inconsistent 31 | # data. 32 | icp_angle_thresh: 0.523599 33 | icp_dist_thresh: 0.1 34 | icp_truncate_depth_dist: 0 35 | 36 | # ICP gets performed over an image pyramid of depth. These are the 37 | # number of iterations performed at each pyramid size. 38 | icp_iter_num : [10, 5, 4, 0] 39 | 40 | # For raycasting display, this is a multiplier on how far the rays 41 | # march each iteration, (in multiples of voxels?) Smaller numbers 42 | # are more accurate but slower. 43 | raycast_step_factor: 0.05 44 | 45 | # This is the step size, in voxels, used to compute gradients 46 | # in the TSDF. Gradients are linearly interpolated between voxels. 47 | gradient_delta_factor: 0.5 48 | 49 | # The truncated signed distance field stores the dense model. 50 | # The maximum weight value of a voxel (integer). Higher values 51 | # cause the TSDF to take in more readings before converging, 52 | # which will result in a smoother reconstruction 53 | tsdf_max_weight: 64 54 | 55 | # Kinfu will only fuse data into the TSDF when the camera movement 56 | # exceeds this distance threshold. 57 | tsdf_min_camera_movement: 0 58 | 59 | # Distance away from surface (in meters) the TSDF will be computed 60 | # Higher values result in smoother reconstructions, but less sharp 61 | # corners and edges. Lower values result in more accurate 62 | # reconstructions, but too low and noise becomes a major issue. 63 | tsdf_trunc_dist: 0.0025 64 | 65 | # The size of the TSDF volume in voxels 66 | volume_dims_x: 512 67 | volume_dims_y: 512 68 | volume_dims_z: 512 69 | 70 | volume_resolution: 0.001 71 | 72 | # The position of the topmost corner of the TSDF volume in meters 73 | volume_pos_x: 0.25 74 | volume_pos_y: 0.25 75 | volume_pos_z: -0.5 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | output_path: "/home/joe/meshes" 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /yak/launch/launch_xtion_big.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | 12 | 13 | use_icp: True 14 | use_pose_hints: False 15 | update_via_sensor_motion: True 16 | 17 | # The node reports the transform between the fixed frame and 18 | # a given camera frame. 19 | fixed_frame: "/map" 20 | camera_frame: "/camera_depth_optical_frame" 21 | 22 | # The bilateral kernel smooths the depth image and 23 | # filters out noise around the edges 24 | bilateral_kernel_size: 4 25 | bilateral_sigma_depth: 0.04 26 | bilateral_sigma_spatial: 4.5 27 | 28 | # These are parameters for the point-to-plane ICP. The 29 | # thresholds prevent ICP from converging to far away/inconsistent 30 | # data. 31 | icp_angle_thresh: 0.523599 32 | icp_dist_thresh: 0.1 33 | icp_truncate_depth_dist: 0 34 | 35 | # ICP gets performed over an image pyramid of depth. These are the 36 | # number of iterations performed at each pyramid size. 37 | icp_iter_num : [10, 5, 4, 0] 38 | 39 | # For raycasting display, this is a multiplier on how far the rays 40 | # march each iteration, (in multiples of voxels?) Smaller numbers 41 | # are more accurate but slower. 42 | raycast_step_factor: 0.05 43 | 44 | # This is the step size, in voxels, used to compute gradients 45 | # in the TSDF. Gradients are linearly interpolated between voxels. 46 | gradient_delta_factor: 0.5 47 | 48 | # The truncated signed distance field stores the dense model. 49 | # The maximum weight value of a voxel (integer). Higher values 50 | # cause the TSDF to take in more readings before converging, 51 | # which will result in a smoother reconstruction 52 | tsdf_max_weight: 64 53 | 54 | # Kinfu will only fuse data into the TSDF when the camera movement 55 | # exceeds this distance threshold. 56 | tsdf_min_camera_movement: 0 57 | 58 | # Distance away from surface (in meters) the TSDF will be computed 59 | # Higher values result in smoother reconstructions, but less sharp 60 | # corners and edges. Lower values result in more accurate 61 | # reconstructions, but too low and noise becomes a major issue. 62 | tsdf_trunc_dist: 0.04 63 | 64 | # The size of the TSDF volume in voxels 65 | volume_dims_x: 1024 66 | volume_dims_y: 1024 67 | volume_dims_z: 256 68 | 69 | # The size of the TSDF volume in meters. The size of a voxel is 70 | # volume_size / volume_dims 71 | 74 | 75 | volume_resolution: 0.0005 76 | 77 | # The position of the topmost corner of the TSDF volume in meters 78 | volume_pos_x: 0.25 79 | volume_pos_y: 0.25 80 | volume_pos_z: -0.5 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | output_path: "/home/aderic/meshes" 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /yak/launch/launch_xtion_robot.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 17 | 18 | 20 | 21 | use_icp: True 22 | use_pose_hints: True 23 | update_via_sensor_motion: False 24 | 25 | # The node reports the transform between the fixed frame and 26 | # a given camera frame. 27 | fixed_frame: "/map" 28 | camera_frame: "/camera_depth_optical_frame" 29 | 30 | # The bilateral kernel smooths the depth image and 31 | # filters out noise around the edges 32 | bilateral_kernel_size: 4 33 | bilateral_sigma_depth: 0.04 34 | bilateral_sigma_spatial: 4.5 35 | 36 | # These are parameters for the point-to-plane ICP. The 37 | # thresholds prevent ICP from converging to far away/inconsistent 38 | # data. 39 | icp_angle_thresh: 0.523599 40 | icp_dist_thresh: 0.1 41 | icp_truncate_depth_dist: 0 42 | 43 | # ICP gets performed over an image pyramid of depth. These are the 44 | # number of iterations performed at each pyramid size. 45 | icp_iter_num : [10, 5, 4, 0] 46 | 47 | # For raycasting display, this is a multiplier on how far the rays 48 | # march each iteration, (in multiples of voxels?) Smaller numbers 49 | # are more accurate but slower. 50 | raycast_step_factor: 0.05 51 | 52 | # This is the step size, in voxels, used to compute gradients 53 | # in the TSDF. Gradients are linearly interpolated between voxels. 54 | gradient_delta_factor: 0.5 55 | 56 | # The truncated signed distance field stores the dense model. 57 | # The maximum weight value of a voxel (integer). Higher values 58 | # cause the TSDF to take in more readings before converging, 59 | # which will result in a smoother reconstruction 60 | tsdf_max_weight: 64 61 | 62 | # Kinfu will only fuse data into the TSDF when the camera movement 63 | # exceeds this distance threshold. 64 | tsdf_min_camera_movement: 0.0001 65 | 66 | 67 | # Distance away from surface (in meters) the TSDF will be computed 68 | # Higher values result in smoother reconstructions, but less sharp 69 | # corners and edges. Lower values result in more accurate 70 | # reconstructions, but too low and noise becomes a major issue. 71 | tsdf_trunc_dist: 0.005 72 | 73 | # The size of the TSDF volume in voxels 74 | 77 | volume_dims_x: 1568 78 | volume_dims_y: 768 79 | volume_dims_z: 384 80 | 81 | 82 | # The size of the TSDF volume in meters. The size of a voxel is 83 | # volume_size / volume_dims 84 | 85 | volume_resolution: 0.001 86 | 87 | # The position of the topmost corner of the TSDF volume in meters 88 | volume_pos_x: 0.25 89 | volume_pos_y: 0.25 90 | volume_pos_z: -0.5 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | output_path: "/home/joe/meshes" 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /yak/launch/start_data_record.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /yak/launch/start_dataset.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /yak/launch/start_my_dataset.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /yak/msg/SparseTSDF.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | 3 | #Volume dimension, in meters 4 | float32 size_x 5 | float32 size_y 6 | float32 size_z 7 | 8 | #number of voxels in each dimension 9 | int32 num_voxels_x 10 | int32 num_voxels_y 11 | int32 num_voxels_z 12 | 13 | #Truncation distance, in meters 14 | float32 truncation_dist 15 | 16 | #Maximum tsdf weight 17 | int32 max_weight 18 | 19 | #Pose of the TSDF with respect to the camera origin 20 | geometry_msgs/Pose pose 21 | 22 | # Binary serialization of distances/weights. 23 | # Use a 3D implementation of compressed row storage to improve the packing efficiency of the data and let bigger TSDFs be serialized from the GPU. 24 | # For each voxel with nonzero weight. 25 | # The first 16 bits are a half-precision floating point value representing the TSDF. 26 | # The second 16 bits are an unsigned 16 bit weight value. 27 | uint32[] data 28 | 29 | # Row (X) coordinate of each voxel in data. 30 | uint16[] rows 31 | # Col (Y) coordinate of each contiguous series of row coordinates. 32 | uint16[] cols 33 | # Sheet (Z) coordinate of each contiguous series of col coordinates. 34 | uint16[] sheets 35 | -------------------------------------------------------------------------------- /yak/msg/TSDF.msg: -------------------------------------------------------------------------------- 1 | Header header 2 | 3 | #Volume dimension, in meters 4 | float32 size_x 5 | float32 size_y 6 | float32 size_z 7 | 8 | #number of voxels in each dimension 9 | int32 num_voxels_x 10 | int32 num_voxels_y 11 | int32 num_voxels_z 12 | 13 | #Truncation distance, in meters 14 | float32 truncation_dist 15 | 16 | #Maximum tsdf weight 17 | int32 max_weight 18 | 19 | #Pose of the TSDF with respect to the camera origin 20 | geometry_msgs/Pose pose 21 | 22 | #Binary serialization of distances/weights. 23 | # The first 16 bits are a half-precision floating point value representing the TSDF. The second 16 bits are 24 | # an unsigned 16 bit weight value. 25 | uint32[] data -------------------------------------------------------------------------------- /yak/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yak 4 | 0.0.0 5 | A truncated signed distance field(TSDF) to mesh tool 6 | schornakj 7 | AustinDeric 8 | AustinDeric 9 | GPLv3 10 | 11 | catkin 12 | 13 | message_generation 14 | 15 | cv_bridge 16 | image_transport 17 | rosconsole 18 | roscpp 19 | sensor_msgs 20 | tf 21 | pcl_ros 22 | visualization_msgs 23 | tf_conversions 24 | interactive_markers 25 | 26 | message_runtime 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /yak/src/core.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/kinfu.hpp" 2 | #include "yak/kfusion/safe_call.hpp" 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int kf::cuda::getCudaEnabledDeviceCount() 9 | { 10 | int count; 11 | cudaError_t error = cudaGetDeviceCount(&count); 12 | 13 | if (error == cudaErrorInsufficientDriver) 14 | return -1; 15 | 16 | if (error == cudaErrorNoDevice) 17 | return 0; 18 | 19 | cudaSafeCall(error); 20 | return count; 21 | } 22 | 23 | void kf::cuda::setDevice(int device) 24 | { 25 | cudaSafeCall(cudaSetDevice(device)); 26 | } 27 | 28 | std::string kf::cuda::getDeviceName(int device) 29 | { 30 | cudaDeviceProp prop; 31 | cudaSafeCall(cudaGetDeviceProperties(&prop, device)); 32 | 33 | return prop.name; 34 | } 35 | 36 | bool kf::cuda::checkIfPreFermiGPU(int device) 37 | { 38 | if (device < 0) 39 | cudaSafeCall(cudaGetDevice(&device)); 40 | 41 | cudaDeviceProp prop; 42 | cudaSafeCall(cudaGetDeviceProperties(&prop, device)); 43 | return prop.major < 2; // CC == 1.x 44 | } 45 | 46 | namespace 47 | { 48 | template inline void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device) 49 | { 50 | *attribute = T(); 51 | CUresult error = cuDeviceGetAttribute(attribute, device_attribute, device); 52 | if (CUDA_SUCCESS == error) 53 | return; 54 | 55 | printf("Driver API error = %04d\n", error); 56 | kfusion::cuda::error("driver API error", __FILE__, __LINE__); 57 | } 58 | 59 | inline int convertSMVer2Cores(int major, int minor) 60 | { 61 | // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM 62 | typedef struct 63 | { 64 | int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version 65 | int Cores; 66 | } SMtoCores; 67 | 68 | SMtoCores gpuArchCoresPerSM[] = { { 0x10, 8 }, { 0x11, 8 }, { 0x12, 8 }, { 0x13, 8 }, { 0x20, 32 }, { 0x21, 48 }, { 0x30, 192 }, { 0x35, 192 }, { 0x50, 128 }, { 0x52, 128 }, { -1, -1 } }; 69 | 70 | int index = 0; 71 | while (gpuArchCoresPerSM[index].SM != -1) 72 | { 73 | if (gpuArchCoresPerSM[index].SM == ((major << 4) + minor)) 74 | return gpuArchCoresPerSM[index].Cores; 75 | index++; 76 | } 77 | printf("\nCan't determine number of cores. Unknown SM version %d.%d!\n", major, minor); 78 | return 0; 79 | } 80 | } 81 | 82 | void kf::cuda::printCudaDeviceInfo(int device) 83 | { 84 | int count = getCudaEnabledDeviceCount(); 85 | bool valid = (device >= 0) && (device < count); 86 | 87 | int beg = valid ? device : 0; 88 | int end = valid ? device + 1 : count; 89 | 90 | printf("*** CUDA Device Query (Runtime API) version (CUDART static linking) *** \n\n"); 91 | printf("Device count: %d\n", count); 92 | 93 | int driverVersion = 0, runtimeVersion = 0; 94 | cudaSafeCall(cudaDriverGetVersion(&driverVersion)); 95 | cudaSafeCall(cudaRuntimeGetVersion(&runtimeVersion)); 96 | 97 | const char *computeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; 98 | 99 | for (int dev = beg; dev < end; ++dev) 100 | { 101 | cudaDeviceProp prop; 102 | cudaSafeCall(cudaGetDeviceProperties(&prop, dev)); 103 | 104 | int sm_cores = convertSMVer2Cores(prop.major, prop.minor); 105 | 106 | printf("\nDevice %d: \"%s\"\n", dev, prop.name); 107 | printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, driverVersion % 100, runtimeVersion / 1000, runtimeVersion % 100); 108 | printf(" CUDA Capability Major/Minor version number: %d.%d\n", prop.major, prop.minor); 109 | printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float) prop.totalGlobalMem / 1048576.0f, (unsigned long long) prop.totalGlobalMem); 110 | printf(" (%2d) Multiprocessors x (%2d) CUDA Cores/MP: %d CUDA Cores\n", prop.multiProcessorCount, sm_cores, sm_cores * prop.multiProcessorCount); 111 | printf(" GPU Clock Speed: %.2f GHz\n", prop.clockRate * 1e-6f); 112 | 113 | #if (CUDART_VERSION >= 4000) 114 | // This is not available in the CUDA Runtime API, so we make the necessary calls the driver API to support this for output 115 | int memoryClock, memBusWidth, L2CacheSize; 116 | getCudaAttribute(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev); 117 | getCudaAttribute(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev); 118 | getCudaAttribute(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev); 119 | 120 | printf(" Memory Clock rate: %.2f Mhz\n", memoryClock * 1e-3f); 121 | printf(" Memory Bus Width: %d-bit\n", memBusWidth); 122 | if (L2CacheSize) 123 | printf(" L2 Cache Size: %d bytes\n", L2CacheSize); 124 | 125 | printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n", prop.maxTexture1D, prop.maxTexture2D[0], prop.maxTexture2D[1], prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]); 126 | printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n", prop.maxTexture1DLayered[0], prop.maxTexture1DLayered[1], prop.maxTexture2DLayered[0], prop.maxTexture2DLayered[1], prop.maxTexture2DLayered[2]); 127 | #endif 128 | printf(" Total amount of constant memory: %u bytes\n", (int) prop.totalConstMem); 129 | printf(" Total amount of shared memory per block: %u bytes\n", (int) prop.sharedMemPerBlock); 130 | printf(" Total number of registers available per block: %d\n", prop.regsPerBlock); 131 | printf(" Warp size: %d\n", prop.warpSize); 132 | printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock); 133 | printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); 134 | printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); 135 | printf(" Maximum memory pitch: %u bytes\n", (int) prop.memPitch); 136 | printf(" Texture alignment: %u bytes\n", (int) prop.textureAlignment); 137 | 138 | #if CUDART_VERSION >= 4000 139 | printf(" Concurrent copy and execution: %s with %d copy engine(s)\n", (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); 140 | #else 141 | printf(" Concurrent copy and execution: %s\n", prop.deviceOverlap ? "Yes" : "No"); 142 | #endif 143 | printf(" Run time limit on kernels: %s\n", prop.kernelExecTimeoutEnabled ? "Yes" : "No"); 144 | printf(" Integrated GPU sharing Host Memory: %s\n", prop.integrated ? "Yes" : "No"); 145 | printf(" Support host page-locked memory mapping: %s\n", prop.canMapHostMemory ? "Yes" : "No"); 146 | 147 | printf(" Concurrent kernel execution: %s\n", prop.concurrentKernels ? "Yes" : "No"); 148 | printf(" Alignment requirement for Surfaces: %s\n", prop.surfaceAlignment ? "Yes" : "No"); 149 | printf(" Device has ECC support enabled: %s\n", prop.ECCEnabled ? "Yes" : "No"); 150 | printf(" Device is using TCC driver mode: %s\n", prop.tccDriver ? "Yes" : "No"); 151 | #if CUDART_VERSION >= 4000 152 | printf(" Device supports Unified Addressing (UVA): %s\n", prop.unifiedAddressing ? "Yes" : "No"); 153 | printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", prop.pciBusID, prop.pciDeviceID); 154 | #endif 155 | printf(" Compute Mode:\n"); 156 | printf(" %s \n", computeMode[prop.computeMode]); 157 | } 158 | 159 | printf("\n"); 160 | printf("deviceQuery, CUDA Driver = CUDART"); 161 | printf(", CUDA Driver Version = %d.%d", driverVersion / 1000, driverVersion % 100); 162 | printf(", CUDA Runtime Version = %d.%d", runtimeVersion / 1000, runtimeVersion % 100); 163 | printf(", NumDevs = %d\n\n", count); 164 | fflush(stdout); 165 | } 166 | 167 | void kf::cuda::printShortCudaDeviceInfo(int device) 168 | { 169 | int count = getCudaEnabledDeviceCount(); 170 | bool valid = (device >= 0) && (device < count); 171 | 172 | int beg = valid ? device : 0; 173 | int end = valid ? device + 1 : count; 174 | 175 | int driverVersion = 0, runtimeVersion = 0; 176 | cudaSafeCall(cudaDriverGetVersion(&driverVersion)); 177 | cudaSafeCall(cudaRuntimeGetVersion(&runtimeVersion)); 178 | 179 | for (int dev = beg; dev < end; ++dev) 180 | { 181 | cudaDeviceProp prop; 182 | cudaSafeCall(cudaGetDeviceProperties(&prop, dev)); 183 | 184 | const char *arch_str = prop.major < 2 ? " (pre-Fermi)" : ""; 185 | printf("Device %d: \"%s\" %.0fMb", dev, prop.name, (float) prop.totalGlobalMem / 1048576.0f); 186 | printf(", sm_%d%d%s, %d cores", prop.major, prop.minor, arch_str, convertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount); 187 | printf(", Driver/Runtime ver.%d.%d/%d.%d\n", driverVersion / 1000, driverVersion % 100, runtimeVersion / 1000, runtimeVersion % 100); 188 | } 189 | fflush(stdout); 190 | } 191 | 192 | kf::SampledScopeTime::SampledScopeTime(double& time_ms) : 193 | time_ms_(time_ms) 194 | { 195 | start = (double) cv::getTickCount(); 196 | } 197 | kf::SampledScopeTime::~SampledScopeTime() 198 | { 199 | static int i_ = 0; 200 | time_ms_ += getTime(); 201 | if (i_ % EACH == 0 && i_) 202 | { 203 | std::cout << "Average frame time = " << time_ms_ / EACH << "ms ( " << 1000.f * EACH / time_ms_ << "fps )" << std::endl; 204 | time_ms_ = 0.0; 205 | } 206 | ++i_; 207 | } 208 | 209 | double kf::SampledScopeTime::getTime() 210 | { 211 | return ((double) cv::getTickCount() - start) * 1000.0 / cv::getTickFrequency(); 212 | } 213 | 214 | kf::ScopeTime::ScopeTime(const char *name_) : 215 | name(name_) 216 | { 217 | start = (double) cv::getTickCount(); 218 | } 219 | kf::ScopeTime::~ScopeTime() 220 | { 221 | double time_ms = ((double) cv::getTickCount() - start) * 1000.0 / cv::getTickFrequency(); 222 | std::cout << "Time(" << name << ") = " << time_ms << "ms" << std::endl; 223 | } 224 | -------------------------------------------------------------------------------- /yak/src/device_memory.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/cuda/device_memory.hpp" 2 | #include "yak/kfusion/safe_call.hpp" 3 | #include 4 | #include 5 | #include 6 | 7 | void kfusion::cuda::error(const char *error_string, const char *file, const int line, const char *func) 8 | { 9 | std::cout << "KinFu2 error: " << error_string << "\t" << file << ":" << line << std::endl; 10 | exit(0); 11 | } 12 | 13 | ////////////////////////// XADD /////////////////////////////// 14 | 15 | #ifdef __GNUC__ 16 | 17 | #if __GNUC__*10 + __GNUC_MINOR__ >= 42 18 | 19 | #if !defined WIN32 && (defined __i486__ || defined __i586__ || defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) 20 | #define CV_XADD __sync_fetch_and_add 21 | #else 22 | #include 23 | #define CV_XADD __gnu_cxx::__exchange_and_add 24 | #endif 25 | #else 26 | #include 27 | #if __GNUC__*10 + __GNUC_MINOR__ >= 34 28 | #define CV_XADD __gnu_cxx::__exchange_and_add 29 | #else 30 | #define CV_XADD __exchange_and_add 31 | #endif 32 | #endif 33 | 34 | #elif defined WIN32 || defined _WIN32 35 | #include 36 | #define CV_XADD(addr,delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta)) 37 | #else 38 | 39 | template static inline _Tp CV_XADD(_Tp* addr, _Tp delta) 40 | { 41 | int tmp = *addr; 42 | *addr += delta; 43 | return tmp; 44 | } 45 | 46 | #endif 47 | 48 | //////////////////////// DeviceArray ///////////////////////////// 49 | 50 | kfusion::cuda::DeviceMemory::DeviceMemory() : 51 | data_(0), sizeBytes_(0), refcount_(0) 52 | { 53 | } 54 | kfusion::cuda::DeviceMemory::DeviceMemory(void *ptr_arg, size_t sizeBytes_arg) : 55 | data_(ptr_arg), sizeBytes_(sizeBytes_arg), refcount_(0) 56 | { 57 | } 58 | kfusion::cuda::DeviceMemory::DeviceMemory(size_t sizeBtes_arg) : 59 | data_(0), sizeBytes_(0), refcount_(0) 60 | { 61 | create(sizeBtes_arg); 62 | } 63 | kfusion::cuda::DeviceMemory::~DeviceMemory() 64 | { 65 | release(); 66 | } 67 | 68 | kfusion::cuda::DeviceMemory::DeviceMemory(const DeviceMemory& other_arg) : 69 | data_(other_arg.data_), sizeBytes_(other_arg.sizeBytes_), refcount_(other_arg.refcount_) 70 | { 71 | if (refcount_) 72 | CV_XADD(refcount_, 1); 73 | } 74 | 75 | kfusion::cuda::DeviceMemory& kfusion::cuda::DeviceMemory::operator =(const kfusion::cuda::DeviceMemory& other_arg) 76 | { 77 | if (this != &other_arg) 78 | { 79 | if (other_arg.refcount_) 80 | CV_XADD(other_arg.refcount_, 1); 81 | release(); 82 | 83 | data_ = other_arg.data_; 84 | sizeBytes_ = other_arg.sizeBytes_; 85 | refcount_ = other_arg.refcount_; 86 | } 87 | return *this; 88 | } 89 | 90 | void kfusion::cuda::DeviceMemory::create(size_t sizeBytes_arg) 91 | { 92 | if (sizeBytes_arg == sizeBytes_) 93 | return; 94 | 95 | if (sizeBytes_arg > 0) 96 | { 97 | if (data_) 98 | release(); 99 | 100 | sizeBytes_ = sizeBytes_arg; 101 | 102 | cudaSafeCall(cudaMalloc(&data_, sizeBytes_)); 103 | 104 | //refcount_ = (int*)cv::fastMalloc(sizeof(*refcount_)); 105 | refcount_ = new int; 106 | *refcount_ = 1; 107 | } 108 | } 109 | 110 | void kfusion::cuda::DeviceMemory::copyTo(DeviceMemory& other) const 111 | { 112 | if (empty()) 113 | other.release(); 114 | else 115 | { 116 | other.create(sizeBytes_); 117 | cudaSafeCall(cudaMemcpy(other.data_, data_, sizeBytes_, cudaMemcpyDeviceToDevice)); 118 | } 119 | } 120 | 121 | void kfusion::cuda::DeviceMemory::release() 122 | { 123 | if (refcount_ && CV_XADD(refcount_, -1) == 1) 124 | { 125 | //cv::fastFree(refcount); 126 | delete refcount_; 127 | cudaSafeCall(cudaFree(data_)); 128 | } 129 | data_ = 0; 130 | sizeBytes_ = 0; 131 | refcount_ = 0; 132 | } 133 | 134 | void kfusion::cuda::DeviceMemory::upload(const void *host_ptr_arg, size_t sizeBytes_arg) 135 | { 136 | create(sizeBytes_arg); 137 | cudaSafeCall(cudaMemcpy(data_, host_ptr_arg, sizeBytes_, cudaMemcpyHostToDevice)); 138 | } 139 | 140 | void kfusion::cuda::DeviceMemory::download(void *host_ptr_arg) const 141 | { 142 | cudaSafeCall(cudaMemcpy(host_ptr_arg, data_, sizeBytes_, cudaMemcpyDeviceToHost)); 143 | } 144 | 145 | void kfusion::cuda::DeviceMemory::swap(DeviceMemory& other_arg) 146 | { 147 | std::swap(data_, other_arg.data_); 148 | std::swap(sizeBytes_, other_arg.sizeBytes_); 149 | std::swap(refcount_, other_arg.refcount_); 150 | } 151 | 152 | bool kfusion::cuda::DeviceMemory::empty() const 153 | { 154 | return !data_; 155 | } 156 | size_t kfusion::cuda::DeviceMemory::sizeBytes() const 157 | { 158 | return sizeBytes_; 159 | } 160 | 161 | //////////////////////// DeviceArray2D ///////////////////////////// 162 | 163 | kfusion::cuda::DeviceMemory2D::DeviceMemory2D() : 164 | data_(0), step_(0), colsBytes_(0), rows_(0), refcount_(0) 165 | { 166 | } 167 | 168 | kfusion::cuda::DeviceMemory2D::DeviceMemory2D(int rows_arg, int colsBytes_arg) : 169 | data_(0), step_(0), colsBytes_(0), rows_(0), refcount_(0) 170 | { 171 | create(rows_arg, colsBytes_arg); 172 | } 173 | 174 | kfusion::cuda::DeviceMemory2D::DeviceMemory2D(int rows_arg, int colsBytes_arg, void *data_arg, size_t step_arg) : 175 | data_(data_arg), step_(step_arg), colsBytes_(colsBytes_arg), rows_(rows_arg), refcount_(0) 176 | { 177 | } 178 | 179 | kfusion::cuda::DeviceMemory2D::~DeviceMemory2D() 180 | { 181 | release(); 182 | } 183 | 184 | kfusion::cuda::DeviceMemory2D::DeviceMemory2D(const DeviceMemory2D& other_arg) : 185 | data_(other_arg.data_), step_(other_arg.step_), colsBytes_(other_arg.colsBytes_), rows_(other_arg.rows_), refcount_(other_arg.refcount_) 186 | { 187 | if (refcount_) 188 | CV_XADD(refcount_, 1); 189 | } 190 | 191 | kfusion::cuda::DeviceMemory2D& kfusion::cuda::DeviceMemory2D::operator =(const kfusion::cuda::DeviceMemory2D& other_arg) 192 | { 193 | if (this != &other_arg) 194 | { 195 | if (other_arg.refcount_) 196 | CV_XADD(other_arg.refcount_, 1); 197 | release(); 198 | 199 | colsBytes_ = other_arg.colsBytes_; 200 | rows_ = other_arg.rows_; 201 | data_ = other_arg.data_; 202 | step_ = other_arg.step_; 203 | 204 | refcount_ = other_arg.refcount_; 205 | } 206 | return *this; 207 | } 208 | 209 | void kfusion::cuda::DeviceMemory2D::create(int rows_arg, int colsBytes_arg) 210 | { 211 | if (colsBytes_ == colsBytes_arg && rows_ == rows_arg) 212 | return; 213 | 214 | if (rows_arg > 0 && colsBytes_arg > 0) 215 | { 216 | if (data_) 217 | release(); 218 | 219 | colsBytes_ = colsBytes_arg; 220 | rows_ = rows_arg; 221 | 222 | cudaSafeCall(cudaMallocPitch((void** )&data_, &step_, colsBytes_, rows_)); 223 | 224 | //refcount = (int*)cv::fastMalloc(sizeof(*refcount)); 225 | refcount_ = new int; 226 | *refcount_ = 1; 227 | } 228 | } 229 | 230 | void kfusion::cuda::DeviceMemory2D::release() 231 | { 232 | if (refcount_ && CV_XADD(refcount_, -1) == 1) 233 | { 234 | //cv::fastFree(refcount); 235 | delete refcount_; 236 | cudaSafeCall(cudaFree(data_)); 237 | } 238 | 239 | colsBytes_ = 0; 240 | rows_ = 0; 241 | data_ = 0; 242 | step_ = 0; 243 | refcount_ = 0; 244 | } 245 | 246 | void kfusion::cuda::DeviceMemory2D::copyTo(DeviceMemory2D& other) const 247 | { 248 | if (empty()) 249 | other.release(); 250 | else 251 | { 252 | other.create(rows_, colsBytes_); 253 | cudaSafeCall(cudaMemcpy2D(other.data_, other.step_, data_, step_, colsBytes_, rows_, cudaMemcpyDeviceToDevice)); 254 | } 255 | } 256 | 257 | void kfusion::cuda::DeviceMemory2D::upload(const void *host_ptr_arg, size_t host_step_arg, int rows_arg, int colsBytes_arg) 258 | { 259 | create(rows_arg, colsBytes_arg); 260 | cudaSafeCall(cudaMemcpy2D(data_, step_, host_ptr_arg, host_step_arg, colsBytes_, rows_, cudaMemcpyHostToDevice)); 261 | } 262 | 263 | void kfusion::cuda::DeviceMemory2D::download(void *host_ptr_arg, size_t host_step_arg) const 264 | { 265 | cudaSafeCall(cudaMemcpy2D(host_ptr_arg, host_step_arg, data_, step_, colsBytes_, rows_, cudaMemcpyDeviceToHost)); 266 | } 267 | 268 | void kfusion::cuda::DeviceMemory2D::swap(DeviceMemory2D& other_arg) 269 | { 270 | std::swap(data_, other_arg.data_); 271 | std::swap(step_, other_arg.step_); 272 | 273 | std::swap(colsBytes_, other_arg.colsBytes_); 274 | std::swap(rows_, other_arg.rows_); 275 | std::swap(refcount_, other_arg.refcount_); 276 | } 277 | 278 | bool kfusion::cuda::DeviceMemory2D::empty() const 279 | { 280 | return !data_; 281 | } 282 | int kfusion::cuda::DeviceMemory2D::colsBytes() const 283 | { 284 | return colsBytes_; 285 | } 286 | int kfusion::cuda::DeviceMemory2D::rows() const 287 | { 288 | return rows_; 289 | } 290 | size_t kfusion::cuda::DeviceMemory2D::step() const 291 | { 292 | return step_; 293 | } 294 | -------------------------------------------------------------------------------- /yak/src/imgproc.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/precomp.hpp" 2 | 3 | void kfusion::cuda::depthBilateralFilter(const Depth& in, Depth& out, int kernel_size, float sigma_spatial, float sigma_depth) 4 | { 5 | out.create(in.rows(), in.cols()); 6 | device::bilateralFilter(in, out, kernel_size, sigma_spatial, sigma_depth); 7 | } 8 | 9 | void kfusion::cuda::depthTruncation(Depth& depth, float threshold) 10 | { 11 | device::truncateDepth(depth, threshold); 12 | } 13 | 14 | void kfusion::cuda::depthBuildPyramid(const Depth& depth, Depth& pyramid, float sigma_depth) 15 | { 16 | pyramid.create(depth.rows() / 2, depth.cols() / 2); 17 | device::depthPyr(depth, pyramid, sigma_depth); 18 | } 19 | 20 | void kfusion::cuda::waitAllDefaultStream() 21 | { 22 | cudaSafeCall(cudaDeviceSynchronize()); 23 | } 24 | 25 | void kfusion::cuda::computeNormalsAndMaskDepth(const Intr& intr, Depth& depth, Normals& normals) 26 | { 27 | normals.create(depth.rows(), depth.cols()); 28 | 29 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy); 30 | 31 | device::Normals& n = (device::Normals&) normals; 32 | device::computeNormalsAndMaskDepth(reproj, depth, n); 33 | } 34 | 35 | void kfusion::cuda::computePointNormals(const Intr& intr, const Depth& depth, Cloud& points, Normals& normals) 36 | { 37 | points.create(depth.rows(), depth.cols()); 38 | normals.create(depth.rows(), depth.cols()); 39 | 40 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy); 41 | 42 | device::Points& p = (device::Points&) points; 43 | device::Normals& n = (device::Normals&) normals; 44 | device::computePointNormals(reproj, depth, p, n); 45 | } 46 | 47 | void kfusion::cuda::computeDists(const Depth& depth, Dists& dists, const Intr& intr) 48 | { 49 | dists.create(depth.rows(), depth.cols()); 50 | device::compute_dists(depth, dists, make_float2(intr.fx, intr.fy), make_float2(intr.cx, intr.cy)); 51 | } 52 | 53 | void kfusion::cuda::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out) 54 | { 55 | depth_out.create(depth.rows() / 2, depth.cols() / 2); 56 | normals_out.create(normals.rows() / 2, normals.cols() / 2); 57 | 58 | device::Normals& nsrc = (device::Normals&) normals; 59 | device::Normals& ndst = (device::Normals&) normals_out; 60 | 61 | device::resizeDepthNormals(depth, nsrc, depth_out, ndst); 62 | } 63 | 64 | void kfusion::cuda::resizePointsNormals(const Cloud& points, const Normals& normals, Cloud& points_out, Normals& normals_out) 65 | { 66 | points_out.create(points.rows() / 2, points.cols() / 2); 67 | normals_out.create(normals.rows() / 2, normals.cols() / 2); 68 | 69 | device::Points& pi = (device::Points&) points; 70 | device::Normals& ni = (device::Normals&) normals; 71 | 72 | device::Points& po = (device::Points&) points_out; 73 | device::Normals& no = (device::Normals&) normals_out; 74 | 75 | device::resizePointsNormals(pi, ni, po, no); 76 | } 77 | 78 | void kfusion::cuda::renderImage(const Depth& depth, const Normals& normals, const Intr& intr, const Vec3f& light_pose, Image& image) 79 | { 80 | image.create(depth.rows(), depth.cols()); 81 | 82 | const device::Depth& d = (const device::Depth&) depth; 83 | const device::Normals& n = (const device::Normals&) normals; 84 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.fy); 85 | device::Vec3f light = device_cast(light_pose); 86 | 87 | device::Image& i = (device::Image&) image; 88 | device::renderImage(d, n, reproj, light, i); 89 | waitAllDefaultStream(); 90 | } 91 | 92 | void kfusion::cuda::renderImage(const Cloud& points, const Normals& normals, const Intr& intr, const Vec3f& light_pose, Image& image) 93 | { 94 | image.create(points.rows(), points.cols()); 95 | 96 | const device::Points& p = (const device::Points&) points; 97 | const device::Normals& n = (const device::Normals&) normals; 98 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.fy); 99 | device::Vec3f light = device_cast(light_pose); 100 | 101 | device::Image& i = (device::Image&) image; 102 | device::renderImage(p, n, reproj, light, i); 103 | waitAllDefaultStream(); 104 | } 105 | 106 | void kfusion::cuda::renderTangentColors(const Normals& normals, Image& image) 107 | { 108 | image.create(normals.rows(), normals.cols()); 109 | const device::Normals& n = (const device::Normals&) normals; 110 | device::Image& i = (device::Image&) image; 111 | 112 | device::renderTangentColors(n, i); 113 | waitAllDefaultStream(); 114 | } 115 | -------------------------------------------------------------------------------- /yak/src/kinfu_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | using namespace kfusion; 13 | 14 | int main(int argc, char* argv[]) 15 | { 16 | ROS_INFO("starting kinfu node..."); 17 | int device = 0; 18 | cuda::setDevice(device); 19 | cuda::printShortCudaDeviceInfo(device); 20 | 21 | if (cuda::checkIfPreFermiGPU(device)) 22 | return std::cout << std::endl << "Kinfu is not supported for pre-Fermi GPU architectures, and not built for them by default. Exiting..." << std::endl, 1; 23 | 24 | ros::init(argc, argv, "yak"); 25 | 26 | ros::NodeHandle node("~"); 27 | RosRGBDCamera camera(node); 28 | camera.SubscribeDepth("/camera/depth/image_raw"); 29 | camera.SubscribeRGB("/camera/rgb/image_rect_color"); 30 | std::string fixedFrame; 31 | std::string cameraFrame; 32 | 33 | node.param("fixed_frame", fixedFrame, "/map"); 34 | node.param("camera_frame", cameraFrame, "/camera_depth_optical_frame"); 35 | ROS_INFO_STREAM("Fixed frame: " + fixedFrame + " Camera frame: " + cameraFrame); 36 | 37 | KinFuServer app(&camera, fixedFrame, cameraFrame); 38 | app.ExecuteBlocking(); 39 | 40 | return 0; 41 | } 42 | -------------------------------------------------------------------------------- /yak/src/precomp.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/precomp.hpp" 2 | #include "yak/kfusion/internal.hpp" 3 | 4 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 5 | /// Kinfu/types implementation 6 | 7 | kfusion::Intr::Intr() 8 | { 9 | } 10 | kfusion::Intr::Intr(float fx_, float fy_, float cx_, float cy_) : 11 | fx(fx_), fy(fy_), cx(cx_), cy(cy_) 12 | { 13 | } 14 | 15 | kfusion::Intr kfusion::Intr::operator()(int level_index) const 16 | { 17 | int div = 1 << level_index; 18 | return (Intr(fx / div, fy / div, cx / div, cy / div)); 19 | } 20 | 21 | std::ostream& operator <<(std::ostream& os, const kfusion::Intr& intr) 22 | { 23 | return os << "([f = " << intr.fx << ", " << intr.fy << "] [cp = " << intr.cx << ", " << intr.cy << "])"; 24 | } 25 | 26 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 27 | /// TsdfVolume host implementation 28 | 29 | kfusion::device::TsdfVolume::TsdfVolume(elem_type* _data, int3 _dims, float3 _voxel_size, float _trunc_dist, int _max_weight) : 30 | data(_data), dims(_dims), voxel_size(_voxel_size), trunc_dist(_trunc_dist), max_weight(_max_weight) 31 | { 32 | } 33 | 34 | //kfusion::device::TsdfVolume::elem_type* kfusionl::device::TsdfVolume::operator()(int x, int y, int z) 35 | //{ return data + x + y*dims.x + z*dims.y*dims.x; } 36 | // 37 | //const kfusion::device::TsdfVolume::elem_type* kfusionl::device::TsdfVolume::operator() (int x, int y, int z) const 38 | //{ return data + x + y*dims.x + z*dims.y*dims.x; } 39 | // 40 | //kfusion::device::TsdfVolume::elem_type* kfusionl::device::TsdfVolume::beg(int x, int y) const 41 | //{ return data + x + dims.x * y; } 42 | // 43 | //kfusion::device::TsdfVolume::elem_type* kfusionl::device::TsdfVolume::zstep(elem_type *const ptr) const 44 | //{ return data + dims.x * dims.y; } 45 | 46 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 47 | /// Projector host implementation 48 | 49 | kfusion::device::Projector::Projector(float fx, float fy, float cx, float cy) : 50 | f(make_float2(fx, fy)), c(make_float2(cx, cy)) 51 | { 52 | } 53 | 54 | //float2 kfusion::device::Projector::operator()(const float3& p) const 55 | //{ 56 | // float2 coo; 57 | // coo.x = p.x * f.x / p.z + c.x; 58 | // coo.y = p.y * f.y / p.z + c.y; 59 | // return coo; 60 | //} 61 | 62 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 63 | /// Reprojector host implementation 64 | 65 | kfusion::device::Reprojector::Reprojector(float fx, float fy, float cx, float cy) : 66 | finv(make_float2(1.f / fx, 1.f / fy)), c(make_float2(cx, cy)) 67 | { 68 | } 69 | 70 | //float3 kfusion::device::Reprojector::operator()(int u, int v, float z) const 71 | //{ 72 | // float x = z * (u - c.x) * finv.x; 73 | // float y = z * (v - c.y) * finv.y; 74 | // return make_float3(x, y, z); 75 | //} 76 | 77 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 78 | /// Host implementation of packing/unpacking tsdf volume element 79 | 80 | //ushort2 kfusion::device::pack_tsdf(float tsdf, int weight) { throw "Not implemented"; return ushort2(); } 81 | //float kfusion::device::unpack_tsdf(ushort2 value, int& weight) { throw "Not implemented"; return 0; } 82 | //float kfusion::device::unpack_tsdf(ushort2 value) { throw "Not implemented"; return 0; } 83 | 84 | -------------------------------------------------------------------------------- /yak/src/projective_icp.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/precomp.hpp" 2 | 3 | using namespace kfusion; 4 | using namespace std; 5 | using namespace kfusion::cuda; 6 | 7 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 8 | /// ComputeIcpHelper 9 | 10 | kfusion::device::ComputeIcpHelper::ComputeIcpHelper(float dist_thres, float angle_thres) 11 | { 12 | min_cosine = cos(angle_thres); 13 | dist2_thres = dist_thres * dist_thres; 14 | } 15 | 16 | void kfusion::device::ComputeIcpHelper::setLevelIntr(int level_index, float fx, float fy, float cx, float cy) 17 | { 18 | int div = 1 << level_index; 19 | f = make_float2(fx / div, fy / div); 20 | c = make_float2(cx / div, cy / div); 21 | finv = make_float2(1.f / f.x, 1.f / f.y); 22 | } 23 | 24 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 25 | /// ProjectiveICP::StreamHelper 26 | 27 | struct kfusion::cuda::ProjectiveICP::StreamHelper 28 | { 29 | typedef device::ComputeIcpHelper::PageLockHelper PageLockHelper; 30 | typedef cv::Matx66f Mat6f; 31 | typedef cv::Vec6f Vec6f; 32 | 33 | cudaStream_t stream; 34 | PageLockHelper locked_buffer; 35 | 36 | StreamHelper() 37 | { 38 | cudaSafeCall(cudaStreamCreate(&stream)); 39 | } 40 | ~StreamHelper() 41 | { 42 | cudaSafeCall(cudaStreamDestroy(stream)); 43 | } 44 | 45 | operator float*() 46 | { 47 | return locked_buffer.data; 48 | } 49 | operator cudaStream_t() 50 | { 51 | return stream; 52 | } 53 | 54 | Mat6f get(Vec6f& b) 55 | { 56 | cudaSafeCall(cudaStreamSynchronize(stream)); 57 | 58 | Mat6f A; 59 | float *data_A = A.val; 60 | float *data_b = b.val; 61 | 62 | int shift = 0; 63 | for (int i = 0; i < 6; ++i) //rows 64 | for (int j = i; j < 7; ++j) // cols + b 65 | { 66 | float value = locked_buffer.data[shift++]; 67 | if (j == 6) // vector b 68 | data_b[i] = value; 69 | else 70 | data_A[j * 6 + i] = data_A[i * 6 + j] = value; 71 | } 72 | return A; 73 | } 74 | }; 75 | 76 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 77 | /// ProjectiveICP 78 | 79 | kfusion::cuda::ProjectiveICP::ProjectiveICP() : 80 | angle_thres_(deg2rad(20.f)), dist_thres_(0.1f) 81 | { 82 | const int iters[] = { 10, 5, 4, 0 }; 83 | std::vector vector_iters(iters, iters + 4); 84 | setIterationsNum(vector_iters); 85 | device::ComputeIcpHelper::allocate_buffer(buffer_); 86 | 87 | shelp_ = cv::Ptr(new StreamHelper()); 88 | } 89 | 90 | kfusion::cuda::ProjectiveICP::~ProjectiveICP() 91 | { 92 | } 93 | 94 | float kfusion::cuda::ProjectiveICP::getDistThreshold() const 95 | { 96 | return dist_thres_; 97 | } 98 | 99 | void kfusion::cuda::ProjectiveICP::setDistThreshold(float distance) 100 | { 101 | dist_thres_ = distance; 102 | } 103 | 104 | float kfusion::cuda::ProjectiveICP::getAngleThreshold() const 105 | { 106 | return angle_thres_; 107 | } 108 | 109 | void kfusion::cuda::ProjectiveICP::setAngleThreshold(float angle) 110 | { 111 | angle_thres_ = angle; 112 | } 113 | 114 | void kfusion::cuda::ProjectiveICP::setIterationsNum(const std::vector& iters) 115 | { 116 | if (iters.size() >= MAX_PYRAMID_LEVELS) 117 | iters_.assign(iters.begin(), iters.begin() + MAX_PYRAMID_LEVELS); 118 | else 119 | { 120 | iters_ = vector(MAX_PYRAMID_LEVELS, 0); 121 | copy(iters.begin(), iters.end(), iters_.begin()); 122 | } 123 | } 124 | 125 | int kfusion::cuda::ProjectiveICP::getUsedLevelsNum() const 126 | { 127 | int i = MAX_PYRAMID_LEVELS - 1; 128 | for (; i >= 0 && !iters_[i]; --i) 129 | ; 130 | return i + 1; 131 | } 132 | 133 | bool kfusion::cuda::ProjectiveICP::estimateTransform(Affine3f& /*affine*/, const Intr& /*intr*/, const Frame& /*curr*/, const Frame& /*prev*/) 134 | { 135 | // bool has_depth = !curr.depth_pyr.empty() && !prev.depth_pyr.empty(); 136 | // bool has_points = !curr.points_pyr.empty() && !prev.points_pyr.empty(); 137 | 138 | // if (has_depth) 139 | // return estimateTransform(affine, intr, curr.depth_pyr, curr.normals_pyr, prev.depth_pyr, prev.normals_pyr); 140 | // else if(has_points) 141 | // return estimateTransform(affine, intr, curr.points_pyr, curr.normals_pyr, prev.points_pyr, prev.normals_pyr); 142 | // else 143 | // CV_Assert(!"Wrong parameters passed to estimateTransform"); 144 | CV_Assert(!"Not implemented"); 145 | return false; 146 | } 147 | 148 | bool kfusion::cuda::ProjectiveICP::estimateTransform(Affine3f& affine, const Intr& intr, const DepthPyr& dcurr, const NormalsPyr ncurr, const DepthPyr dprev, const NormalsPyr nprev) 149 | { 150 | const int LEVELS = getUsedLevelsNum(); 151 | StreamHelper& sh = *shelp_; 152 | 153 | device::ComputeIcpHelper helper(dist_thres_, angle_thres_); 154 | // affine = Affine3f::Identity(); 155 | 156 | for (int level_index = LEVELS - 1; level_index >= 0; --level_index) 157 | { 158 | const device::Normals& n = (const device::Normals&) nprev[level_index]; 159 | 160 | helper.rows = (float) n.rows(); 161 | helper.cols = (float) n.cols(); 162 | helper.setLevelIntr(level_index, intr.fx, intr.fy, intr.cx, intr.cy); 163 | helper.dcurr = dcurr[level_index]; 164 | helper.ncurr = ncurr[level_index]; 165 | 166 | for (int iter = 0; iter < iters_[level_index]; ++iter) 167 | { 168 | helper.aff = device_cast(affine); 169 | helper(dprev[level_index], n, buffer_, sh, sh); 170 | 171 | StreamHelper::Vec6f b; 172 | StreamHelper::Mat6f A = sh.get(b); 173 | 174 | //checking nullspace 175 | double det = cv::determinant(A); 176 | 177 | if (fabs(det) < 1e-15 || std::isnan(det)) 178 | { 179 | if (std::isnan(det)) 180 | cout << "qnan" << endl; 181 | return false; 182 | } 183 | 184 | StreamHelper::Vec6f r; 185 | cv::solve(A, b, r, cv::DECOMP_SVD); 186 | Affine3f Tinc(Vec3f(r.val), Vec3f(r.val + 3)); 187 | affine = Tinc * affine; 188 | } 189 | } 190 | return true; 191 | } 192 | 193 | bool kfusion::cuda::ProjectiveICP::estimateTransform(const Affine3f& affine, Affine3f& correctedAffine, const Intr& intr, const PointsPyr& vcurr, const NormalsPyr ncurr, const PointsPyr vprev, const NormalsPyr nprev) 194 | { 195 | const int LEVELS = getUsedLevelsNum(); 196 | StreamHelper& sh = *shelp_; 197 | 198 | device::ComputeIcpHelper helper(dist_thres_, angle_thres_); 199 | 200 | correctedAffine = affine; 201 | 202 | // This is equivalent to assuming that the transform between the previous and current pose is identity, i.e. the poses are identical. 203 | // Changing this to the actual transform between the prev and TF-derived current pose should help the calculation of the new pose. 204 | //affine = Affine3f::Identity(); 205 | 206 | for (int level_index = LEVELS - 1; level_index >= 0; --level_index) 207 | { 208 | const device::Normals& n = (const device::Normals&) nprev[level_index]; 209 | const device::Points& v = (const device::Points&) vprev[level_index]; 210 | 211 | helper.rows = (float) n.rows(); 212 | helper.cols = (float) n.cols(); 213 | helper.setLevelIntr(level_index, intr.fx, intr.fy, intr.cx, intr.cy); 214 | helper.vcurr = vcurr[level_index]; 215 | helper.ncurr = ncurr[level_index]; 216 | 217 | for (int iter = 0; iter < iters_[level_index]; ++iter) 218 | { 219 | helper.aff = device_cast(correctedAffine); 220 | helper(v, n, buffer_, sh, sh); 221 | 222 | StreamHelper::Vec6f b; 223 | StreamHelper::Mat6f A = sh.get(b); 224 | 225 | //checking nullspace 226 | double det = cv::determinant(A); 227 | 228 | if (fabs(det) < 1e-15 || std::isnan(det)) 229 | { 230 | if (std::isnan(det)) 231 | cout << "qnan" << endl; 232 | return false; 233 | } 234 | 235 | StreamHelper::Vec6f r; 236 | cv::solve(A, b, r, cv::DECOMP_SVD); 237 | 238 | Affine3f Tinc(Vec3f(r.val), Vec3f(r.val + 3)); 239 | correctedAffine = Tinc * correctedAffine; 240 | } 241 | } 242 | return true; 243 | } 244 | 245 | -------------------------------------------------------------------------------- /yak/src/ros/ros_rgbd_camera.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ros_rgbd_camera.cpp 3 | * 4 | * Created on: Jun 2, 2015 5 | * Author: mklingen 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | namespace kfusion 12 | { 13 | 14 | RosRGBDCamera::RosRGBDCamera(const ros::NodeHandle& handle) : 15 | nodeHandle(handle), transport(handle), hasNewDepth(false), hasNewRGB(false), subscribedDepth(false), subscribedRGB(false) 16 | { 17 | 18 | } 19 | 20 | RosRGBDCamera::~RosRGBDCamera() 21 | { 22 | 23 | } 24 | 25 | kfusion::Intr RosRGBDCamera::GetDepthIntrinsics() 26 | { 27 | return kfusion::Intr(lastDepthInfo->K[0], lastDepthInfo->K[4], lastDepthInfo->K[2], lastDepthInfo->K[5]); 28 | } 29 | 30 | bool RosRGBDCamera::Grab(cv::Mat& depth, cv::Mat& image) 31 | { 32 | bool hasAny = hasNewDepth || hasNewRGB; 33 | 34 | if (hasNewDepth) 35 | { 36 | cv_bridge::CvImageConstPtr cvDepth = cv_bridge::toCvShare(lastDepthImage, ""); 37 | if (!cvDepth) 38 | { 39 | ROS_ERROR("Failed to convert depth image to OpenCV"); 40 | return false; 41 | } 42 | depth = cvDepth->image; 43 | hasNewDepth = false; 44 | } 45 | 46 | if (hasNewRGB) 47 | { 48 | cv_bridge::CvImageConstPtr cvColor = cv_bridge::toCvShare(lastRGBImage, "rgb8"); 49 | 50 | if (!cvColor) 51 | { 52 | ROS_ERROR("Failed to convert color image to OpenCV"); 53 | return false; 54 | } 55 | 56 | image = cvColor->image; 57 | hasNewDepth = false; 58 | } 59 | 60 | return hasAny; 61 | } 62 | 63 | void RosRGBDCamera::SubscribeDepth(const std::string& topic) 64 | { 65 | depthSubscriber = transport.subscribeCamera(topic, 10, &RosRGBDCamera::DepthCallback, this); 66 | hasNewDepth = false; 67 | subscribedDepth = true; 68 | } 69 | 70 | void RosRGBDCamera::SubscribeRGB(const std::string& topic) 71 | { 72 | rgbSubscriber = transport.subscribeCamera(topic, 10, &RosRGBDCamera::RGBCallback, this); 73 | hasNewRGB = false; 74 | subscribedRGB = true; 75 | } 76 | 77 | void RosRGBDCamera::DepthCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo) 78 | { 79 | lastDepthImage = image; 80 | lastDepthInfo = cameraInfo; 81 | hasNewDepth = true; 82 | 83 | } 84 | 85 | void RosRGBDCamera::RGBCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo) 86 | { 87 | lastRGBImage = image; 88 | lastRGBInfo = cameraInfo; 89 | hasNewRGB = true; 90 | } 91 | 92 | } /* namespace kfusion */ 93 | -------------------------------------------------------------------------------- /yak/src/ros/ros_sim_camera.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ros_sim_camera.cpp 3 | * 4 | * Created on: Jun 13, 2017 5 | * Author: jschornak 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | namespace kfusion 12 | { 13 | 14 | RosSimCamera::RosSimCamera(const ros::NodeHandle& handle) : 15 | nodeHandle(handle), transport(handle), hasNewDepth(false), hasNewRGB(false), subscribedDepth(false), subscribedRGB(false) 16 | { 17 | 18 | } 19 | 20 | RosSimCamera::~RosSimCamera() 21 | { 22 | 23 | } 24 | 25 | kfusion::Intr RosSimCamera::GetDepthIntrinsics() 26 | { 27 | return kfusion::Intr(lastDepthInfo->K[0], lastDepthInfo->K[4], lastDepthInfo->K[2], lastDepthInfo->K[5]); 28 | } 29 | 30 | bool RosSimCamera::Grab(cv::Mat& depth, cv::Mat& image) 31 | { 32 | bool hasAny = hasNewDepth || hasNewRGB; 33 | 34 | if (hasNewDepth) 35 | { 36 | cv_bridge::CvImageConstPtr cvDepth = cv_bridge::toCvShare(lastDepthImage, ""); 37 | if (!cvDepth) 38 | { 39 | ROS_ERROR("Failed to convert depth image to OpenCV"); 40 | return false; 41 | } 42 | depth = cvDepth->image; 43 | hasNewDepth = false; 44 | } 45 | 46 | if (hasNewRGB) 47 | { 48 | cv_bridge::CvImageConstPtr cvColor = cv_bridge::toCvShare(lastRGBImage, "rgb8"); 49 | 50 | if (!cvColor) 51 | { 52 | ROS_ERROR("Failed to convert color image to OpenCV"); 53 | return false; 54 | } 55 | 56 | image = cvColor->image; 57 | hasNewDepth = false; 58 | } 59 | 60 | return hasAny; 61 | } 62 | 63 | void RosSimCamera::SubscribeDepth(const std::string& topic) 64 | { 65 | depthSubscriber = transport.subscribeCamera(topic, 10, &RosRGBDCamera::DepthCallback, this); 66 | hasNewDepth = false; 67 | subscribedDepth = true; 68 | } 69 | 70 | void RosSimCamera::SubscribeRGB(const std::string& topic) 71 | { 72 | rgbSubscriber = transport.subscribeCamera(topic, 10, &RosRGBDCamera::RGBCallback, this); 73 | hasNewRGB = false; 74 | subscribedRGB = true; 75 | } 76 | 77 | void RosSimCamera::DepthCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo) 78 | { 79 | lastDepthImage = image; 80 | lastDepthInfo = cameraInfo; 81 | hasNewDepth = true; 82 | } 83 | 84 | void RosSimCamera::RGBCallback(const sensor_msgs::ImageConstPtr& image, const sensor_msgs::CameraInfoConstPtr& cameraInfo) 85 | { 86 | lastRGBImage = image; 87 | lastRGBInfo = cameraInfo; 88 | hasNewRGB = true; 89 | } 90 | 91 | } /* namespace kfusion */ 92 | -------------------------------------------------------------------------------- /yak/src/ros/volume_marker.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | void processFeedback( 7 | const visualization_msgs::InteractiveMarkerFeedbackConstPtr &feedback ) 8 | { 9 | // ROS_INFO_STREAM( feedback->marker_name << " is now at " << feedback->pose.position.x << ", " << feedback->pose.position.y << ", " << feedback->pose.position.z ); 10 | } 11 | 12 | int main(int argc, char** argv) { 13 | ros::init(argc, argv, "volume_marker"); 14 | ros::NodeHandle nh; 15 | 16 | // tf::TransformBroadcaster br; 17 | // tf::Transform transform; 18 | 19 | // create an interactive marker server on the topic namespace simple_marker 20 | interactive_markers::InteractiveMarkerServer server("volume_marker"); 21 | 22 | // create an interactive marker for our server 23 | visualization_msgs::InteractiveMarker int_marker; 24 | int_marker.header.frame_id = "base_link"; 25 | int_marker.header.stamp=ros::Time::now(); 26 | int_marker.name = "my_marker"; 27 | int_marker.description = "Voxel Volume Pose"; 28 | 29 | // create a grey box marker 30 | visualization_msgs::Marker box_marker; 31 | box_marker.type = visualization_msgs::Marker::CUBE; 32 | box_marker.scale.x = 0.45; 33 | box_marker.scale.y = 0.45; 34 | box_marker.scale.z = 0.45; 35 | box_marker.color.r = 0.5; 36 | box_marker.color.g = 0.5; 37 | box_marker.color.b = 0.5; 38 | box_marker.color.a = 0.0; 39 | 40 | // create a non-interactive control which contains the box 41 | visualization_msgs::InteractiveMarkerControl box_control; 42 | box_control.always_visible = true; 43 | box_control.markers.push_back( box_marker ); 44 | 45 | // add the control to the interactive marker 46 | int_marker.controls.push_back( box_control ); 47 | 48 | // create a control which will move the box 49 | // this control does not contain any markers, 50 | // which will cause RViz to insert two arrows 51 | visualization_msgs::InteractiveMarkerControl rotate_control; 52 | // rotate_control.name = "move_x"; 53 | // rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::MOVE_AXIS; 54 | // rotate_control.orientation_mode = visualization_msgs::InteractiveMarkerControl::FIXED; 55 | 56 | 57 | rotate_control.orientation.w = 1; 58 | rotate_control.orientation.x = 1; 59 | rotate_control.orientation.y = 0; 60 | rotate_control.orientation.z = 0; 61 | // rotate_control.name = "rotate_x"; 62 | // rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::ROTATE_AXIS; 63 | // int_marker.controls.push_back(rotate_control); 64 | 65 | rotate_control.name = "move_x"; 66 | rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::MOVE_AXIS; 67 | rotate_control.orientation_mode = visualization_msgs::InteractiveMarkerControl::FIXED; 68 | int_marker.controls.push_back(rotate_control); 69 | 70 | rotate_control.orientation.w = 1; 71 | rotate_control.orientation.x = 0; 72 | rotate_control.orientation.y = 1; 73 | rotate_control.orientation.z = 0; 74 | // rotate_control.name = "rotate_z"; 75 | // rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::ROTATE_AXIS; 76 | // int_marker.controls.push_back(rotate_control); 77 | 78 | rotate_control.name = "move_z"; 79 | rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::MOVE_AXIS; 80 | rotate_control.orientation_mode = visualization_msgs::InteractiveMarkerControl::FIXED; 81 | int_marker.controls.push_back(rotate_control); 82 | 83 | rotate_control.orientation.w = 1; 84 | rotate_control.orientation.x = 0; 85 | rotate_control.orientation.y = 0; 86 | rotate_control.orientation.z = 1; 87 | // rotate_control.name = "rotate_y"; 88 | // rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::ROTATE_AXIS; 89 | // int_marker.controls.push_back(rotate_control); 90 | 91 | rotate_control.name = "move_y"; 92 | rotate_control.interaction_mode = visualization_msgs::InteractiveMarkerControl::MOVE_AXIS; 93 | rotate_control.orientation_mode = visualization_msgs::InteractiveMarkerControl::FIXED; 94 | int_marker.controls.push_back(rotate_control); 95 | 96 | // add the control to the interactive marker 97 | // int_marker.controls.push_back(rotate_control); 98 | 99 | 100 | int_marker.scale = 0.5; 101 | 102 | // add the interactive marker to our collection & 103 | // tell the server to call processFeedback() when feedback arrives for it 104 | server.insert(int_marker, &processFeedback); 105 | 106 | // 'commit' changes and send to all clients 107 | server.applyChanges(); 108 | 109 | geometry_msgs::Pose defaultPose; 110 | defaultPose.position.x = 0.75; 111 | defaultPose.position.y = -0.3; 112 | defaultPose.position.z = -0.25; 113 | defaultPose.orientation.x = 0; 114 | defaultPose.orientation.y = 0; 115 | defaultPose.orientation.z = 0; 116 | defaultPose.orientation.w = 1; 117 | 118 | // ROS_INFO_STREAM("Pose: " << defaultPose); 119 | 120 | server.setPose("my_marker", defaultPose); 121 | 122 | // VolumePosePublisher vol(nh); 123 | 124 | // ros::Rate rate(10.0); 125 | 126 | // while (ros::ok()) { 127 | //// vol.Update(); 128 | // rate.sleep(); 129 | // } 130 | 131 | ros::AsyncSpinner spinner(4); // Use 4 threads 132 | spinner.start(); 133 | ros::waitForShutdown(); 134 | // return 0; 135 | } 136 | -------------------------------------------------------------------------------- /yak/src/ros/volume_tf_broadcaster.cpp: -------------------------------------------------------------------------------- 1 | //#include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | class VolumePosePublisher { 11 | public: 12 | VolumePosePublisher(ros::NodeHandle& nh) { 13 | // ROS_INFO("Started constructor"); 14 | lastOriginPose_ = tf::Transform(tf::Quaternion(0,0,0,1), tf::Vector3(0,0,0)); 15 | subscriber_ = nh.subscribe("/volume_marker/feedback", 1, &VolumePosePublisher::volumeTFCallback, this); 16 | 17 | box_line_pub_ = nh.advertise("volume_boundary", 10); 18 | 19 | 20 | 21 | // ROS_INFO("Finished constructor"); 22 | VolumePosePublisher::Update(); 23 | 24 | int dimsX, dimsY, dimsZ; 25 | double res; 26 | 27 | nh.getParam("/kinfu/volume_dims_x", dimsX); 28 | nh.getParam("/kinfu/volume_dims_y", dimsY); 29 | nh.getParam("/kinfu/volume_dims_z", dimsZ); 30 | nh.getParam("/kinfu/volume_resolution", res); 31 | // ROS_INFO_STREAM("dimsX loaded as " << dimsX); 32 | sizeX_ = dimsX*res; 33 | sizeY_ = dimsY*res; 34 | sizeZ_ = dimsZ*res; 35 | 36 | 37 | ROS_INFO_STREAM("Volume size is " << sizeX_ << " " << sizeY_ << " " << sizeZ_); 38 | } 39 | 40 | void volumeTFCallback(const visualization_msgs::InteractiveMarkerFeedbackConstPtr& feedback) 41 | { 42 | // Update TF transform with new volume pose 43 | tf::Quaternion orientation; 44 | tf::quaternionMsgToTF(feedback->pose.orientation, orientation); 45 | tf::Vector3 position; 46 | tf::pointMsgToTF(feedback->pose.position, position); 47 | lastOriginPose_ = tf::Transform(orientation, position); 48 | 49 | // VolumePosePublisher::Update(); 50 | 51 | ROS_INFO_STREAM( feedback->marker_name << " is now at " << feedback->pose.position.x << ", " << feedback->pose.position.y << ", " << feedback->pose.position.z ); 52 | } 53 | 54 | void Update() { 55 | 56 | tf::Transform currentOriginPose = lastOriginPose_; 57 | // Publish the most recent pose 58 | // ROS_INFO("Start update"); 59 | 60 | geometry_msgs::Point c1, c2, c3, c4, c5, c6, c7, c8; 61 | c1.x = 0; 62 | c1.y = 0; 63 | c1.z = 0; 64 | 65 | c2.x = sizeX_; 66 | c2.y = 0; 67 | c2.z = 0; 68 | 69 | c3.x = sizeX_; 70 | c3.y = sizeY_; 71 | c3.z = 0; 72 | 73 | c4.x = 0; 74 | c4.y = sizeY_; 75 | c4.z = 0; 76 | 77 | c5.x = 0; 78 | c5.y = 0; 79 | c5.z = sizeZ_; 80 | 81 | c6.x = sizeX_; 82 | c6.y = 0; 83 | c6.z = sizeZ_; 84 | 85 | c7.x = sizeX_; 86 | c7.y = sizeY_; 87 | c7.z = sizeZ_; 88 | 89 | c8.x = 0; 90 | c8.y = sizeY_; 91 | c8.z = sizeZ_; 92 | 93 | 94 | visualization_msgs::Marker box_line_list; 95 | 96 | box_line_list.header.frame_id = "/volume_pose"; 97 | box_line_list.scale.x = 0.002; 98 | box_line_list.action = visualization_msgs::Marker::ADD; 99 | box_line_list.pose.orientation.w = 1.0; 100 | box_line_list.type = visualization_msgs::Marker::LINE_LIST; 101 | box_line_list.color.r = 1.0; 102 | box_line_list.color.b = 1.0; 103 | box_line_list.color.a = 1.0; 104 | 105 | box_line_list.points.push_back(c1); 106 | box_line_list.points.push_back(c2); 107 | 108 | box_line_list.points.push_back(c2); 109 | box_line_list.points.push_back(c3); 110 | 111 | box_line_list.points.push_back(c3); 112 | box_line_list.points.push_back(c4); 113 | 114 | box_line_list.points.push_back(c4); 115 | box_line_list.points.push_back(c1); 116 | 117 | 118 | box_line_list.points.push_back(c5); 119 | box_line_list.points.push_back(c6); 120 | 121 | box_line_list.points.push_back(c6); 122 | box_line_list.points.push_back(c7); 123 | 124 | box_line_list.points.push_back(c7); 125 | box_line_list.points.push_back(c8); 126 | 127 | box_line_list.points.push_back(c8); 128 | box_line_list.points.push_back(c5); 129 | 130 | 131 | box_line_list.points.push_back(c1); 132 | box_line_list.points.push_back(c5); 133 | 134 | box_line_list.points.push_back(c2); 135 | box_line_list.points.push_back(c6); 136 | 137 | box_line_list.points.push_back(c3); 138 | box_line_list.points.push_back(c7); 139 | 140 | box_line_list.points.push_back(c4); 141 | box_line_list.points.push_back(c8); 142 | 143 | 144 | tf::StampedTransform transformStamped(currentOriginPose, ros::Time::now(), "base_link", "volume_pose"); 145 | 146 | broadcaster_.sendTransform(transformStamped); 147 | 148 | box_line_pub_.publish(box_line_list); 149 | 150 | 151 | // ROS_INFO("update"); 152 | } 153 | 154 | 155 | ros::Publisher box_line_pub_; 156 | 157 | double sizeX_; 158 | double sizeY_; 159 | double sizeZ_; 160 | 161 | ros::Subscriber subscriber_; 162 | tf::TransformBroadcaster broadcaster_; 163 | tf::Transform lastOriginPose_; 164 | }; 165 | 166 | //void processFeedback( 167 | // const visualization_msgs::InteractiveMarkerFeedbackConstPtr &feedback ) 168 | //{ 169 | // ROS_INFO_STREAM( feedback->marker_name << " is now at " << feedback->pose.position.x << ", " << feedback->pose.position.y << ", " << feedback->pose.position.z ); 170 | //} 171 | 172 | int main(int argc, char** argv) { 173 | ros::init(argc, argv, "volume_tf_broadcaster"); 174 | ros::NodeHandle nh; 175 | 176 | VolumePosePublisher vol(nh); 177 | // vol.subscriber_ = nh.subscribe("/volume_marker/feedback", 1000, &VolumePosePublisher::volumeTFCallback, &vol); 178 | 179 | ros::Rate rate(10.0); 180 | 181 | while (ros::ok()) { 182 | vol.Update(); 183 | rate.sleep(); 184 | ros::spinOnce(); 185 | } 186 | 187 | // ros::spin(); 188 | // return 0; 189 | } 190 | -------------------------------------------------------------------------------- /yak/src/tsdf_volume.cpp: -------------------------------------------------------------------------------- 1 | #include "yak/kfusion/precomp.hpp" 2 | #include 3 | using namespace kfusion; 4 | using namespace kfusion::cuda; 5 | 6 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 7 | /// TsdfVolume::Entry 8 | 9 | float kfusion::cuda::TsdfVolume::Entry::half2float(half) 10 | { 11 | throw "Not implemented"; 12 | } 13 | 14 | kfusion::cuda::TsdfVolume::Entry::half kfusion::cuda::TsdfVolume::Entry::float2half(float value) 15 | { 16 | throw "Not implemented"; 17 | } 18 | 19 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// 20 | /// TsdfVolume 21 | 22 | kfusion::cuda::TsdfVolume::TsdfVolume(const Vec3i& dims) : 23 | data_(), trunc_dist_(0.03f), max_weight_(128), dims_(dims), size_(Vec3f::all(3.f)), pose_(Affine3f::Identity()), gradient_delta_factor_(0.75f), raycast_step_factor_(0.75f) 24 | { 25 | create(dims_); 26 | } 27 | 28 | kfusion::cuda::TsdfVolume::~TsdfVolume() 29 | { 30 | } 31 | 32 | void kfusion::cuda::TsdfVolume::create(const Vec3i& dims) 33 | { 34 | dims_ = dims; 35 | int voxels_number = dims_[0] * dims_[1] * dims_[2]; 36 | data_.create(voxels_number * sizeof(int)); 37 | setTruncDist(trunc_dist_); 38 | clear(); 39 | } 40 | 41 | Vec3i kfusion::cuda::TsdfVolume::getDims() const 42 | { 43 | return dims_; 44 | } 45 | 46 | Vec3f kfusion::cuda::TsdfVolume::getVoxelSize() const 47 | { 48 | return Vec3f(size_[0] / dims_[0], size_[1] / dims_[1], size_[2] / dims_[2]); 49 | } 50 | 51 | const CudaData kfusion::cuda::TsdfVolume::data() const 52 | { 53 | return data_; 54 | } 55 | CudaData kfusion::cuda::TsdfVolume::data() 56 | { 57 | return data_; 58 | } 59 | Vec3f kfusion::cuda::TsdfVolume::getSize() const 60 | { 61 | return size_; 62 | } 63 | 64 | void kfusion::cuda::TsdfVolume::setSize(const Vec3f& size) 65 | { 66 | size_ = size; 67 | setTruncDist(trunc_dist_); 68 | } 69 | 70 | float kfusion::cuda::TsdfVolume::getTruncDist() const 71 | { 72 | return trunc_dist_; 73 | } 74 | 75 | void kfusion::cuda::TsdfVolume::setTruncDist(float distance) 76 | { 77 | Vec3f vsz = getVoxelSize(); 78 | float max_coeff = std::max(std::max(vsz[0], vsz[1]), vsz[2]); 79 | trunc_dist_ = std::max(distance, 2.1f * max_coeff); 80 | } 81 | 82 | int kfusion::cuda::TsdfVolume::getMaxWeight() const 83 | { 84 | return max_weight_; 85 | } 86 | void kfusion::cuda::TsdfVolume::setMaxWeight(int weight) 87 | { 88 | max_weight_ = weight; 89 | } 90 | Affine3f kfusion::cuda::TsdfVolume::getPose() const 91 | { 92 | return pose_; 93 | } 94 | void kfusion::cuda::TsdfVolume::setPose(const Affine3f& pose) 95 | { 96 | pose_ = pose; 97 | } 98 | float kfusion::cuda::TsdfVolume::getRaycastStepFactor() const 99 | { 100 | return raycast_step_factor_; 101 | } 102 | void kfusion::cuda::TsdfVolume::setRaycastStepFactor(float factor) 103 | { 104 | raycast_step_factor_ = factor; 105 | } 106 | float kfusion::cuda::TsdfVolume::getGradientDeltaFactor() const 107 | { 108 | return gradient_delta_factor_; 109 | } 110 | void kfusion::cuda::TsdfVolume::setGradientDeltaFactor(float factor) 111 | { 112 | gradient_delta_factor_ = factor; 113 | } 114 | void kfusion::cuda::TsdfVolume::swap(CudaData& data) 115 | { 116 | data_.swap(data); 117 | } 118 | void kfusion::cuda::TsdfVolume::applyAffine(const Affine3f& affine) 119 | { 120 | pose_ = affine * pose_; 121 | } 122 | 123 | void kfusion::cuda::TsdfVolume::clear() 124 | { 125 | device::Vec3i dims = device_cast(dims_); 126 | device::Vec3f vsz = device_cast(getVoxelSize()); 127 | 128 | device::TsdfVolume volume(data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 129 | device::clear_volume(volume); 130 | } 131 | 132 | void kfusion::cuda::TsdfVolume::integrate(const Dists& dists, const Affine3f& camera_motion_tform, const Intr& intr) 133 | { 134 | // TODO: change this so instead of updating the pose estimate from measured sensor motion, vol2cam is set using the actual transform from the camera to the center of the volume 135 | // What does this even do, exactly? As written it's calculating the new transform from the frame of the volume to the frame of the camera after applying the motion of the most recent measurement. 136 | Affine3f vol2cam = camera_motion_tform.inv() * pose_; 137 | device::Projector proj(intr.fx, intr.fy, intr.cx, intr.cy); 138 | 139 | device::Vec3i dims = device_cast(dims_); 140 | device::Vec3f vsz = device_cast(getVoxelSize()); 141 | device::Aff3f aff = device_cast(vol2cam); 142 | 143 | device::TsdfVolume volume(data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 144 | device::integrate(dists, volume, aff, proj); 145 | } 146 | 147 | void kfusion::cuda::TsdfVolume::raycast(const Affine3f& camera_pose, const Intr& intr, Depth& depth, Normals& normals) 148 | { 149 | DeviceArray2D& n = (DeviceArray2D&) normals; 150 | 151 | Affine3f cam2vol = pose_.inv() * camera_pose; 152 | 153 | device::Aff3f aff = device_cast(cam2vol); 154 | device::Mat3f Rinv = device_cast(cam2vol.rotation().inv(cv::DECOMP_SVD)); 155 | 156 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy); 157 | 158 | device::Vec3i dims = device_cast(dims_); 159 | device::Vec3f vsz = device_cast(getVoxelSize()); 160 | 161 | device::TsdfVolume volume(data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 162 | device::raycast(volume, aff, Rinv, reproj, depth, n, raycast_step_factor_, gradient_delta_factor_); 163 | 164 | } 165 | 166 | void kfusion::cuda::TsdfVolume::raycast(const Affine3f& camera_pose, const Intr& intr, Cloud& points, Normals& normals) 167 | { 168 | device::Normals& n = (device::Normals&) normals; 169 | device::Points& p = (device::Points&) points; 170 | 171 | Affine3f cam2vol = pose_.inv() * camera_pose; 172 | 173 | device::Aff3f aff = device_cast(cam2vol); 174 | device::Mat3f Rinv = device_cast(cam2vol.rotation().inv(cv::DECOMP_SVD)); 175 | 176 | device::Reprojector reproj(intr.fx, intr.fy, intr.cx, intr.cy); 177 | 178 | device::Vec3i dims = device_cast(dims_); 179 | device::Vec3f vsz = device_cast(getVoxelSize()); 180 | 181 | device::TsdfVolume volume(data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 182 | device::raycast(volume, aff, Rinv, reproj, p, n, raycast_step_factor_, gradient_delta_factor_); 183 | } 184 | 185 | DeviceArray kfusion::cuda::TsdfVolume::fetchCloud(DeviceArray& cloud_buffer) const 186 | { 187 | enum 188 | { 189 | DEFAULT_CLOUD_BUFFER_SIZE = 10 * 1000 * 1000 190 | }; 191 | 192 | if (cloud_buffer.empty()) 193 | cloud_buffer.create(DEFAULT_CLOUD_BUFFER_SIZE); 194 | 195 | DeviceArray& b = (DeviceArray&) cloud_buffer; 196 | 197 | device::Vec3i dims = device_cast(dims_); 198 | device::Vec3f vsz = device_cast(getVoxelSize()); 199 | device::Aff3f aff = device_cast(pose_); 200 | 201 | device::TsdfVolume volume((ushort2*) data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 202 | size_t size = extractCloud(volume, aff, b); 203 | 204 | return DeviceArray((Point*) cloud_buffer.ptr(), size); 205 | } 206 | 207 | void kfusion::cuda::TsdfVolume::fetchNormals(const DeviceArray& cloud, DeviceArray& normals) const 208 | { 209 | normals.create(cloud.size()); 210 | DeviceArray& c = (DeviceArray&) cloud; 211 | 212 | device::Vec3i dims = device_cast(dims_); 213 | device::Vec3f vsz = device_cast(getVoxelSize()); 214 | device::Aff3f aff = device_cast(pose_); 215 | device::Mat3f Rinv = device_cast(pose_.rotation().inv(cv::DECOMP_SVD)); 216 | 217 | device::TsdfVolume volume((ushort2*) data_.ptr(), dims, vsz, trunc_dist_, max_weight_); 218 | device::extractNormals(volume, c, aff, Rinv, gradient_delta_factor_, (float4*) normals.ptr()); 219 | } 220 | -------------------------------------------------------------------------------- /yak/srv/GetMesh.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | bool value 4 | 5 | #Respose 6 | -------------------------------------------------------------------------------- /yak/srv/GetSparseTSDF.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | SparseTSDF tsdf 4 | #Respose 5 | -------------------------------------------------------------------------------- /yak/srv/GetTSDF.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | TSDF tsdf 4 | #Respose -------------------------------------------------------------------------------- /yak/srv/ResetVolume.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | bool value 4 | 5 | #Respose 6 | -------------------------------------------------------------------------------- /yak_meshing/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | 3 | project(yak_meshing) 4 | add_definitions(-std=c++11) 5 | 6 | find_package(catkin REQUIRED COMPONENTS 7 | message_generation 8 | rosconsole 9 | roscpp 10 | rospy 11 | std_msgs 12 | sensor_msgs 13 | tf 14 | pcl_ros 15 | visualization_msgs 16 | yak 17 | pcl_ros 18 | pcl_msgs 19 | ) 20 | 21 | find_library(OPENVDB_LIBRARY NAMES openvdb) 22 | find_library(TBB_LIBRARY NAMES tbb) 23 | find_library(HALF_LIBRARY NAMES Half) 24 | 25 | add_service_files( 26 | FILES 27 | GetMesh.srv 28 | ) 29 | 30 | generate_messages( 31 | DEPENDENCIES 32 | std_msgs 33 | geometry_msgs 34 | pcl_msgs 35 | ) 36 | catkin_package( 37 | INCLUDE_DIRS include 38 | CATKIN_DEPENDS 39 | message_runtime 40 | rosconsole 41 | roscpp 42 | rospy 43 | std_msgs 44 | sensor_msgs 45 | tf 46 | pcl_ros 47 | pcl_msgs 48 | visualization_msgs 49 | yak 50 | ) 51 | 52 | include_directories( 53 | include 54 | ${catkin_INCLUDE_DIRS} 55 | ) 56 | 57 | add_executable(${PROJECT_NAME}_node src/yak_meshing_node.cpp) 58 | add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 59 | 60 | target_link_libraries(${PROJECT_NAME}_node 61 | ${catkin_LIBRARIES} 62 | openvdb tbb Half 63 | ) 64 | -------------------------------------------------------------------------------- /yak_meshing/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | yak_meshing 4 | 0.0.0 5 | The yak_meshing package 6 | 7 | AustinDeric 8 | 9 | MIT 10 | 11 | catkin 12 | 13 | message_generation 14 | 15 | roscpp 16 | rospy 17 | std_msgs 18 | yak 19 | rosconsole 20 | sensor_msgs 21 | tf 22 | pcl_ros 23 | pcl_msgs 24 | visualization_msgs 25 | 26 | message_runtime 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /yak_meshing/src/yak_meshing_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | 18 | #include 19 | 20 | #include 21 | #include 22 | 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | class GenerateMesh 30 | { 31 | public: 32 | GenerateMesh(ros::NodeHandle& nh) 33 | { 34 | tsdf_client_ = nh.serviceClient("/kinfu/get_tsdf"); 35 | tsdf_sparse_client_ = nh.serviceClient("/kinfu/get_sparse_tsdf"); 36 | mesh_server_ = nh.advertiseService("get_mesh", &GenerateMesh::GetMesh, this); 37 | //grid_ = openvdb::FloatGrid::create(/*background value=*/1.0); 38 | if (!nh.param("/meshing/output_path", mesh_output_path_, "/home/joe/meshes")) { 39 | ROS_INFO("Couldn't load output path parameter. Using default path."); 40 | } 41 | 42 | } 43 | 44 | bool GetMesh(yak_meshing::GetMesh::Request& req, yak_meshing::GetMesh::Response &res) { 45 | ROS_INFO("Attempting to get sparse TSDF"); 46 | yak::GetSparseTSDF srv; 47 | if (!tsdf_sparse_client_.call(srv)) 48 | { 49 | ROS_ERROR("Couldn't get sparse TSDF"); 50 | return false; 51 | } 52 | 53 | int sizeX = srv.response.tsdf.size_x; 54 | 55 | ROS_INFO("Got sparse TSDF"); 56 | res.value = true; 57 | 58 | //TODO: Add mesh to service response. 59 | 60 | float size_x = srv.response.tsdf.size_x; 61 | float size_y = srv.response.tsdf.size_y; 62 | float size_z = srv.response.tsdf.size_z; 63 | 64 | int num_voxels_x = srv.response.tsdf.num_voxels_x; 65 | int num_voxels_y = srv.response.tsdf.num_voxels_y; 66 | int num_voxels_z = srv.response.tsdf.num_voxels_z; 67 | 68 | int length = num_voxels_x*num_voxels_y*num_voxels_y; 69 | 70 | std::vector tsdf_data = srv.response.tsdf.data; 71 | std::vector row_data = srv.response.tsdf.rows; 72 | std::vector col_data = srv.response.tsdf.cols; 73 | std::vector sheet_data = srv.response.tsdf.sheets; 74 | 75 | ROS_INFO("Got volume info from TSDF"); 76 | ROS_INFO_STREAM("Max weight: " << srv.response.tsdf.max_weight); 77 | 78 | //GenerateMesh::MakePointCloud(cloud, tsdf_data, size_x, size_y, size_z, num_voxels_x, num_voxels_y, num_voxels_z); 79 | ROS_INFO("Building voxel volume from serialized sparse data..."); 80 | //GenerateMesh::MakeVoxelGrid(grid, tsdf_data, size_x, size_y, size_z, num_voxels_x, num_voxels_y, num_voxels_z); 81 | GenerateMesh::MakeVoxelGridFromSparse(grid_, tsdf_data, row_data, col_data, sheet_data, size_x, size_y, size_z, num_voxels_x, num_voxels_y, num_voxels_z); 82 | 83 | // ROS_INFO("Making an example sphere..."); 84 | // GenerateMesh::MakeSphereVoxelGrid(grid); 85 | ROS_INFO("Done building volume"); 86 | 87 | ROS_INFO("Meshing voxel volume..."); 88 | openvdb::tools::VolumeToMesh mesher; 89 | mesher.operator()( grid_.operator*() ); 90 | ROS_INFO("Done meshing volume"); 91 | std::string path = mesh_output_path_ + "/new_mesh.obj"; 92 | ROS_INFO_STREAM("Saving .obj to file at " << path << " ..."); 93 | WriteMesh(path.c_str(), mesher); 94 | //WriteMesh("/home/jschornak/meshes/new_mesh.obj", mesher); 95 | ROS_INFO("Done saving!"); 96 | 97 | // convert VDB mesh to pcl_msgs::PolygonMesh and return 98 | pcl::PolygonMesh out_mesh; 99 | ConvertMesh(mesher, out_mesh); 100 | pcl_conversions::fromPCL(out_mesh, res.mesh); 101 | 102 | return true; 103 | } 104 | 105 | 106 | 107 | bool GetTSDFData(unsigned int input, half_float::half& voxelValue, uint16_t& voxelWeight) { 108 | std::memcpy(&voxelValue, &input, 2); 109 | std::memcpy(&voxelWeight, ((char*)(&input)) + 2, 2); 110 | return true; 111 | } 112 | 113 | bool MakeVoxelGrid(openvdb::FloatGrid::Ptr& grid, std::vector& data, float size_x, float size_y, float size_z, int res_x, int res_y, int res_z) { 114 | typedef typename openvdb::FloatGrid::ValueType ValueT; 115 | 116 | openvdb::FloatGrid::Accessor accessor = grid->getAccessor(); 117 | 118 | float voxel_dim_x = (float)size_x/(float)res_x; 119 | //float voxel_dim_y = (float)size_y/(float)res_y; 120 | //float voxel_dim_z = (float)size_z/(float)res_z; 121 | 122 | ROS_INFO_STREAM("Meters per voxel: " << voxel_dim_x); 123 | ROS_INFO_STREAM("Voxel count: " << data.size()); 124 | 125 | openvdb::Coord ijk; 126 | int &i = ijk[0], &j = ijk[1], &k = ijk[2]; 127 | 128 | for (i = 0; i < res_x; i++) { 129 | for (j = 0; j < res_y; j++) { 130 | for (k = 0; k < res_z; k++) { 131 | int currentData = data[res_y*res_z*k + res_y*j + i]; 132 | half_float::half currentValue; 133 | //float currentValue; 134 | 135 | uint16_t currentWeight; 136 | 137 | GetTSDFData(currentData, currentValue, currentWeight); 138 | ValueT val = ValueT(currentValue); 139 | // ROS_INFO_STREAM("Current weight: " << currentWeight); 140 | if (currentWeight > 0) { 141 | accessor.setValue(ijk, val); 142 | } 143 | } 144 | } 145 | } 146 | grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_dim_x)); 147 | } 148 | 149 | 150 | bool MakeVoxelGridFromSparse(openvdb::FloatGrid::Ptr& grid, std::vector& data, std::vector& rowIndex, std::vector& colIndex, std::vector& sheetIndex, float size_x, float size_y, float size_z, int res_x, int res_y, int res_z) { 151 | typedef typename openvdb::FloatGrid::ValueType ValueT; 152 | 153 | // Initialize grid with background value corresponding to "outside" estimated surface to clear existing data 154 | grid_ = openvdb::FloatGrid::create(/*background value=*/1.0); 155 | 156 | openvdb::FloatGrid::Accessor accessor = grid->getAccessor(); 157 | 158 | float voxel_dim_x = (float)size_x/(float)res_x; 159 | //float voxel_dim_y = (float)size_y/(float)res_y; 160 | //float voxel_dim_z = (float)size_z/(float)res_z; 161 | 162 | ROS_INFO_STREAM("Meters per voxel in sparse volume: " << voxel_dim_x); 163 | 164 | openvdb::Coord ijk; 165 | int &i = ijk[0], &j = ijk[1], &k = ijk[2]; 166 | 167 | ROS_INFO_STREAM("Nonzero voxel count: " << data.size()); 168 | ROS_INFO_STREAM("Row index count: " << rowIndex.size()); 169 | ROS_INFO_STREAM("Col index count: " << colIndex.size()); 170 | ROS_INFO_STREAM("Sheet index count: " << sheetIndex.size()); 171 | for (int a = 0; a < data.size()-1; a++) { 172 | i = rowIndex[a]; 173 | j = colIndex[a]; 174 | k = sheetIndex[a]; 175 | 176 | uint32_t currentData = data[a]; 177 | half_float::half currentValue; 178 | uint16_t currentWeight; 179 | GetTSDFData(currentData, currentValue, currentWeight); 180 | ValueT val = ValueT(currentValue); 181 | accessor.setValue(ijk, val); 182 | } 183 | ROS_INFO("Scaling voxel volume"); 184 | // accessor. 185 | // grid-> 186 | openvdb::tools::signedFloodFill(grid->tree()); 187 | grid->setTransform(openvdb::math::Transform::createLinearTransform(/*voxel size=*/voxel_dim_x)); 188 | } 189 | 190 | 191 | 192 | bool MakeSphereVoxelGrid(openvdb::FloatGrid::Ptr& grid) { 193 | grid = openvdb::tools::createLevelSetSphere(1.0, openvdb::Vec3f(0,0,0), /*voxel size=*/0.5, /*width=*/4.0); 194 | } 195 | openvdb::FloatGrid::Ptr grid_; 196 | 197 | private: 198 | void WriteMesh(const char* filename, 199 | openvdb::tools::VolumeToMesh &mesh ){ 200 | 201 | std::ofstream file; 202 | file.open(filename); 203 | 204 | openvdb::tools::PointList *verts = &mesh.pointList(); 205 | openvdb::tools::PolygonPoolList *polys = &mesh.polygonPoolList(); 206 | 207 | for( size_t i = 0; i < mesh.pointListSize(); i++ ){ 208 | openvdb::Vec3s &v = (*verts)[i]; 209 | file << "v " << v[0] << " " << v[1] << " " << v[2] << std::endl; 210 | } 211 | 212 | for( size_t i = 0; i < mesh.polygonPoolListSize(); i++ ){ 213 | 214 | for( size_t ndx = 0; ndx < (*polys)[i].numTriangles(); ndx++ ){ 215 | openvdb::Vec3I *p = &((*polys)[i].triangle(ndx)); 216 | file << "f " << p->x()+1 << " " << p->y()+1 << " " << p->z()+1 << std::endl; 217 | } 218 | 219 | for( size_t ndx = 0; ndx < (*polys)[i].numQuads(); ndx++ ){ 220 | openvdb::Vec4I *p = &((*polys)[i].quad(ndx)); 221 | file << "f " << p->x()+1 << " " << p->y()+1 << " " << p->z()+1 << " " << p->w()+1 << std::endl; 222 | } 223 | } 224 | 225 | file.close(); 226 | } 227 | 228 | void ConvertMesh(openvdb::tools::VolumeToMesh &in_mesh, pcl::PolygonMesh& out_mesh) 229 | { 230 | openvdb::tools::PointList *verts = &in_mesh.pointList(); 231 | openvdb::tools::PolygonPoolList *polys = &in_mesh.polygonPoolList(); 232 | 233 | pcl::PointCloud out_cloud; 234 | for( size_t i = 0; i < in_mesh.pointListSize(); i++ ){ 235 | openvdb::Vec3s &v = (*verts)[i]; 236 | pcl::PointXYZ pt(v[0], v[1], v[2]); 237 | out_cloud.push_back(pt); 238 | } 239 | 240 | out_mesh.polygons.clear(); 241 | for( size_t i = 0; i < in_mesh.polygonPoolListSize(); i++ ){ 242 | for( size_t ndx = 0; ndx < (*polys)[i].numTriangles(); ndx++ ){ 243 | pcl::Vertices v; 244 | openvdb::Vec3I *p = &((*polys)[i].triangle(ndx)); 245 | v.vertices.push_back(p->x()+1); 246 | v.vertices.push_back(p->y()+1); 247 | v.vertices.push_back(p->z()+1); 248 | out_mesh.polygons.push_back(v); 249 | } 250 | 251 | for( size_t ndx = 0; ndx < (*polys)[i].numQuads(); ndx++ ){ 252 | openvdb::Vec4I *p = &((*polys)[i].quad(ndx)); 253 | pcl::Vertices v; 254 | v.vertices.push_back(p->x()+1); 255 | v.vertices.push_back(p->y()+1); 256 | v.vertices.push_back(p->z()+1); 257 | v.vertices.push_back(p->w()+1); 258 | out_mesh.polygons.push_back(v); 259 | } 260 | } 261 | pcl::toPCLPointCloud2(out_cloud, out_mesh.cloud); 262 | 263 | } 264 | 265 | 266 | ros::ServiceClient tsdf_client_; 267 | ros::ServiceClient tsdf_sparse_client_; 268 | ros::ServiceServer mesh_server_; 269 | 270 | std::string mesh_output_path_; 271 | 272 | }; 273 | 274 | 275 | 276 | int main(int argc, char* argv[]) 277 | { 278 | openvdb::initialize(); 279 | ros::init(argc, argv, "yak_meshing_node"); 280 | ros::NodeHandle nh; 281 | 282 | GenerateMesh app(nh); 283 | 284 | ros::spin(); 285 | 286 | return 0; 287 | } 288 | -------------------------------------------------------------------------------- /yak_meshing/srv/GetMesh.srv: -------------------------------------------------------------------------------- 1 | #Request 2 | --- 3 | pcl_msgs/PolygonMesh mesh 4 | bool value 5 | 6 | #Respose 7 | --------------------------------------------------------------------------------