├── README.md ├── Supplementary_Material.pdf ├── camera_models_in ├── CMakeLists.txt ├── camera_calib_example │ ├── calibrationdata │ │ ├── left-0000.png │ │ ├── left-0001.png │ │ ├── left-0002.png │ │ ├── left-0003.png │ │ ├── left-0004.png │ │ ├── left-0005.png │ │ ├── left-0006.png │ │ ├── left-0007.png │ │ ├── left-0008.png │ │ ├── left-0009.png │ │ ├── left-0010.png │ │ ├── left-0011.png │ │ ├── left-0012.png │ │ ├── left-0013.png │ │ ├── left-0014.png │ │ ├── left-0015.png │ │ ├── left-0016.png │ │ ├── left-0017.png │ │ ├── left-0018.png │ │ ├── left-0019.png │ │ ├── left-0020.png │ │ ├── left-0021.png │ │ ├── left-0022.png │ │ ├── left-0023.png │ │ ├── left-0024.png │ │ ├── left-0025.png │ │ ├── left-0026.png │ │ ├── left-0027.png │ │ ├── left-0028.png │ │ ├── left-0029.png │ │ ├── left-0030.png │ │ ├── left-0031.png │ │ ├── left-0032.png │ │ ├── left-0033.png │ │ ├── left-0034.png │ │ ├── left-0035.png │ │ ├── left-0036.png │ │ ├── left-0037.png │ │ ├── left-0038.png │ │ ├── left-0039.png │ │ ├── left-0040.png │ │ ├── left-0041.png │ │ ├── left-0042.png │ │ ├── left-0043.png │ │ ├── left-0044.png │ │ ├── left-0045.png │ │ ├── left-0046.png │ │ ├── left-0047.png │ │ ├── left-0048.png │ │ ├── left-0049.png │ │ ├── left-0050.png │ │ ├── left-0051.png │ │ ├── left-0052.png │ │ ├── left-0053.png │ │ ├── left-0054.png │ │ ├── left-0055.png │ │ ├── left-0056.png │ │ ├── left-0057.png │ │ ├── left-0058.png │ │ ├── left-0059.png │ │ ├── left-0060.png │ │ ├── left-0061.png │ │ ├── left-0062.png │ │ ├── left-0063.png │ │ ├── left-0064.png │ │ ├── left-0065.png │ │ ├── left-0066.png │ │ ├── left-0067.png │ │ ├── left-0068.png │ │ ├── left-0069.png │ │ ├── left-0070.png │ │ ├── left-0071.png │ │ ├── left-0072.png │ │ ├── left-0073.png │ │ ├── left-0074.png │ │ ├── left-0075.png │ │ ├── left-0076.png │ │ ├── left-0077.png │ │ ├── left-0078.png │ │ ├── left-0079.png │ │ ├── left-0080.png │ │ ├── left-0081.png │ │ ├── left-0082.png │ │ ├── left-0083.png │ │ └── left-0084.png │ └── readme.txt ├── cmake │ └── FindEigen.cmake ├── include │ └── camodocal │ │ ├── calib │ │ └── CameraCalibration.h │ │ ├── camera_models │ │ ├── Camera.h │ │ ├── CameraFactory.h │ │ ├── CataCamera.h │ │ ├── CostFunctionFactory.h │ │ ├── EquidistantCamera.h │ │ ├── PinholeCamera.h │ │ ├── PinholeFullCamera.h │ │ └── ScaramuzzaCamera.h │ │ ├── chessboard │ │ ├── Chessboard.h │ │ ├── ChessboardCorner.h │ │ ├── ChessboardQuad.h │ │ └── Spline.h │ │ ├── gpl │ │ ├── EigenQuaternionParameterization.h │ │ ├── EigenUtils.h │ │ └── gpl.h │ │ └── sparse_graph │ │ └── Transform.h ├── package.xml ├── readme.md └── src │ ├── calib │ └── CameraCalibration.cc │ ├── camera_models │ ├── Camera.cc │ ├── CameraFactory.cc │ ├── CataCamera.cc │ ├── CostFunctionFactory.cc │ ├── EquidistantCamera.cc │ ├── PinholeCamera.cc │ ├── PinholeFullCamera.cc │ └── ScaramuzzaCamera.cc │ ├── chessboard │ └── Chessboard.cc │ ├── gpl │ ├── EigenQuaternionParameterization.cc │ └── gpl.cc │ ├── intrinsic_calib.cc │ └── sparse_graph │ └── Transform.cc ├── config ├── euroc.yaml ├── openloris_vio.yaml ├── rviz_config_euroc.rviz ├── rviz_config_init.rviz └── struct_core_v2.yaml ├── launch ├── svio_euroc_run.launch ├── svio_openloris_run.launch └── svio_vcu_run.launch ├── result ├── OpenLORIS-Scene │ ├── cafe-1.csv │ ├── cafe-2.csv │ ├── corridor-1.csv │ ├── corridor-2.csv │ ├── corridor-3.csv │ ├── corridor-4.csv │ ├── corridor-5.csv │ ├── home-1.csv │ ├── home-2.csv │ ├── home-3.csv │ ├── home-4.csv │ ├── home-5.csv │ ├── market-1.csv │ ├── market-2.csv │ ├── market-3.csv │ ├── office-1.csv │ ├── office-2.csv │ ├── office-3.csv │ ├── office-4.csv │ ├── office-5.csv │ ├── office-6.csv │ └── office-7.csv └── VCU_RVI │ ├── corridor1_tum.csv │ ├── corridor2_tum.csv │ ├── corridor3_tum.csv │ ├── corridor4_tum.csv │ ├── hall1_tum.csv │ ├── hall2_tum.csv │ ├── hall3_tum.csv │ ├── lab-dynamic1_tum.csv │ ├── lab-dynamic2_tum.csv │ ├── lab-dynamic3_tum.csv │ ├── lab-dynamic4_tum.csv │ ├── lab-dynamic5_tum.csv │ ├── lab-light1_tum.csv │ ├── lab-light2_tum.csv │ ├── lab-light3_tum.csv │ ├── lab-light4_tum.csv │ ├── lab-light5_tum.csv │ ├── lab-light6_tum.csv │ ├── lab-motion1_tum.csv │ ├── lab-motion2_tum.csv │ ├── lab-motion3_tum.csv │ ├── lab-motion4_tum.csv │ ├── lab-motion5_tum.csv │ └── lab-motion6_tum.csv └── svio ├── CMakeLists.txt ├── feature_tracking ├── feature_tracker.cpp └── feature_tracker.h ├── initialization ├── gmm_model.cpp ├── gmm_model.h ├── initial_aligment.cpp ├── initial_alignment.h ├── initial_ex_rotation.cpp ├── initial_ex_rotation.h ├── initial_sfm.cpp ├── initial_sfm.h ├── solve_5pts.cpp ├── solve_5pts.h ├── solve_opt.cpp ├── solve_opt.h ├── translate_factor.cpp └── translate_factor.h ├── line_tracking ├── line_tracker.cpp └── line_tracker.h ├── package.xml ├── plane_tools ├── PlaneExtractor.cpp ├── PlaneExtractor.h └── peac │ ├── AHCParamSet.hpp │ ├── AHCPlaneFitter.hpp │ ├── AHCPlaneSeg.hpp │ ├── AHCTypes.hpp │ ├── AHCUtils.hpp │ ├── DisjointSet.hpp │ └── eig33sym.hpp ├── state_estimation ├── depth_factor.cpp ├── depth_factor.h ├── feature_manager.cpp ├── feature_manager.h ├── frontend.cpp ├── frontend.h ├── imu_factor.h ├── integration_base.h ├── line_manager.cpp ├── line_manager.h ├── line_parameterization.cpp ├── line_parameterization.h ├── line_projection_factor.cpp ├── line_projection_factor.h ├── marginalization_factor.cpp ├── marginalization_factor.h ├── mns_alg.cpp ├── mns_alg.h ├── parameters.cpp ├── parameters.h ├── plane_factor.cpp ├── plane_factor.h ├── plane_manager.cpp ├── plane_manager.h ├── projection_quat.cpp ├── projection_quat.h ├── structure.h ├── svio.cpp ├── svio.h ├── svio_init.cpp ├── svio_stereo_node.cpp ├── svio_syn_node.cpp ├── vanish_point_factor.cpp └── vanish_point_factor.h └── utility ├── CameraPoseVisualization.cpp ├── CameraPoseVisualization.h ├── tic_toc.h ├── utility.cpp ├── utility.h ├── visualization.cpp └── visualization.h /README.md: -------------------------------------------------------------------------------- 1 | # SVIO 2 | ## A RGB-D Visual-Inertial Odometry Leveraging Structural Regularity 3 | 4 | **Related Paper** 5 | 6 | Pengfei Gu and Ziyang Meng, **"S-VIO: Exploiting Structural Constraints for RGB-D Visual Inertial Odometry"**, *IEEE Robotics and Automation Letters*, 2023. [pdf](https://ieeexplore.ieee.org/document/10107752) 7 | 8 | ## 1. Prerequisites 9 | ### 1.1 **Ubuntu** and **ROS** 10 | Test on Ubuntu 18.04 and ROS Melodic. Follow [ROS Installation](http://wiki.ros.org/ROS/Installation). The following ROS pacakges are needed: 11 | ``` 12 | sudo apt-get install ros-YOUR_DISTRO-cv-bridge ros-YOUR_DISTRO-tf ros-YOUR_DISTRO-message-filters ros-YOUR_DISTRO-image-transport 13 | ``` 14 | 15 | ### 1.2 **Ceres Solver** 16 | Follow [Ceres Installation](http://ceres-solver.org/installation.html). 17 | 18 | ## 2. Build SVIO on ROS 19 | Clone the repository and catkin_make: 20 | ``` 21 | cd YOUR_PATH_TO_SVIO/SVIO/src 22 | git clone https://github.com/oranfire/SVIO.git 23 | cd ../ 24 | catkin_make 25 | ``` 26 | Before running the SVIO, remember to source the script first: 27 | ``` 28 | source YOUR_PATH_TO_SVIO/SVIO/devel/setup.bash 29 | ``` 30 | 31 | ## 3. Usage 32 | ### 3.1 VCU-RVI handheld dataset 33 | Download [VCU-RVI Dataset](https://github.com/rising-turtle/VCU_RVI_Benchmark). We use the time-synchronized IMU measurements and RGB-D images for localization. Open two terminals, launch the estimator and play the bag respectively. Take motion_1 for example: 34 | ``` 35 | roslaunch YOUR_PATH_TO_SVIO/SVIO/src/launch/svio_vcu_run.launch 36 | rosbag play YOUR_PATH_TO_BAG/lab-motion1.bag 37 | ``` 38 | It will automounsly launch the rviz for visualization. 39 | 40 | ### 3.2 OpenLORIS-Scene wheeled robot dataset 41 | Download [OpenLORIS-Scene Dataset](https://lifelong-robotic-vision.github.io/dataset/scene). Although it contains multiple sensors, we only use the RGB-D images and IMU measurements from the d400 depth camera. Open two terminals, launch the estimator and play the bag respectively. Take home1-1 for example: 42 | ``` 43 | roslaunch YOUR_PATH_TO_SVIO/SVIO/src/launch/svio_openloris_run.launch 44 | rosbag play YOUR_PATH_TO_BAG/home1-1.bag 45 | ``` 46 | Note that the bag files provided by the OpenLORIS-Scene dataset record the acceleration and the angular velocity in two topics. Therefore before playing the bag, we should merge these two topics into one topic by using the script provided by [OpenLORIS-Scene Tools](https://github.com/lifelong-robotic-vision/openloris-scene-tools) first: 47 | ``` 48 | python merge_imu_topics.py YOUR_PATH_TO_BAG/home1-1.bag 49 | ``` 50 | 51 | ### 3.3 EuRoC MAV dataset 52 | As a bonus, SVIO also provides interface for a stereo-inertial dataset, [EuRoC Dataset](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets), where the depth image is computed from the rectified stereo images using the SGM algorithm. Take mh01 for example: 53 | ``` 54 | roslaunch YOUR_PATH_TO_SVIO/SVIO/src/launch/svio_euroc_run.launch 55 | rosbag play YOUR_PATH_TO_BAG/mh01.bag 56 | ``` 57 | 58 | ## 4. Credits 59 | Many thanks to the authors of [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono), [DUI-VIO](https://github.com/rising-turtle/DUI_VIO) and [ManhattanSLAM](https://github.com/razayunus/ManhattanSLAM). Our system is built on the first two projects, and part of codes are borrowed from the third project. 60 | 61 | ## 5. Citation 62 | ``` 63 | @ARTICLE{10107752, 64 | author={Gu, Pengfei and Meng, Ziyang}, 65 | journal={IEEE Robotics and Automation Letters}, 66 | title={S-VIO: Exploiting Structural Constraints for RGB-D Visual Inertial Odometry}, 67 | year={2023}, 68 | volume={8}, 69 | number={6}, 70 | pages={3542-3549}, 71 | doi={10.1109/LRA.2023.3270033}} 72 | ``` 73 | 74 | ## 6. Licence 75 | The source code is released under [GPLv3](http://www.gnu.org/licenses/) license. -------------------------------------------------------------------------------- /Supplementary_Material.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/Supplementary_Material.pdf -------------------------------------------------------------------------------- /camera_models_in/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(camera_models_in) 3 | 4 | set(CMAKE_BUILD_TYPE "Release") 5 | set(CMAKE_CXX_FLAGS "-std=c++11") 6 | set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC") 7 | 8 | find_package(catkin REQUIRED COMPONENTS 9 | roscpp 10 | std_msgs 11 | ) 12 | 13 | find_package(Boost REQUIRED COMPONENTS filesystem program_options system) 14 | include_directories(${Boost_INCLUDE_DIRS}) 15 | 16 | #set(OpenCV_DIR "/home/oranfire/WS/KU/opencv-3.2.0/opencv/build") 17 | find_package(OpenCV REQUIRED) 18 | 19 | # set(EIGEN_INCLUDE_DIR "/usr/local/include/eigen3") 20 | find_package(Ceres REQUIRED) 21 | include_directories(${CERES_INCLUDE_DIRS}) 22 | 23 | 24 | catkin_package( 25 | INCLUDE_DIRS include 26 | LIBRARIES camera_models_in 27 | CATKIN_DEPENDS roscpp std_msgs 28 | # DEPENDS system_lib 29 | ) 30 | 31 | include_directories( 32 | ${catkin_INCLUDE_DIRS} 33 | ) 34 | 35 | include_directories("include") 36 | 37 | add_executable(Calibrations_in 38 | src/intrinsic_calib.cc 39 | src/chessboard/Chessboard.cc 40 | src/calib/CameraCalibration.cc 41 | src/camera_models/Camera.cc 42 | src/camera_models/CameraFactory.cc 43 | src/camera_models/CostFunctionFactory.cc 44 | src/camera_models/PinholeCamera.cc 45 | src/camera_models/PinholeFullCamera.cc 46 | src/camera_models/CataCamera.cc 47 | src/camera_models/EquidistantCamera.cc 48 | src/camera_models/ScaramuzzaCamera.cc 49 | src/sparse_graph/Transform.cc 50 | src/gpl/gpl.cc 51 | src/gpl/EigenQuaternionParameterization.cc) 52 | 53 | add_library(camera_models_in 54 | src/chessboard/Chessboard.cc 55 | src/calib/CameraCalibration.cc 56 | src/camera_models/Camera.cc 57 | src/camera_models/CameraFactory.cc 58 | src/camera_models/CostFunctionFactory.cc 59 | src/camera_models/PinholeCamera.cc 60 | src/camera_models/PinholeFullCamera.cc 61 | src/camera_models/CataCamera.cc 62 | src/camera_models/EquidistantCamera.cc 63 | src/camera_models/ScaramuzzaCamera.cc 64 | src/sparse_graph/Transform.cc 65 | src/gpl/gpl.cc 66 | src/gpl/EigenQuaternionParameterization.cc) 67 | 68 | target_link_libraries(Calibrations_in ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) 69 | target_link_libraries(camera_models_in ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES}) 70 | -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0000.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0001.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0002.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0003.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0004.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0005.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0006.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0007.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0008.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0009.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0010.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0011.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0012.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0013.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0014.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0015.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0016.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0017.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0018.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0019.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0020.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0021.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0022.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0023.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0024.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0025.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0026.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0027.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0028.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0029.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0030.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0031.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0032.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0033.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0034.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0035.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0036.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0037.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0038.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0039.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0040.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0041.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0042.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0043.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0044.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0045.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0046.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0047.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0048.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0049.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0049.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0050.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0051.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0052.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0053.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0054.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0055.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0056.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0057.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0058.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0059.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0060.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0060.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0061.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0061.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0062.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0062.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0063.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0064.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0065.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0066.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0066.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0067.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0067.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0068.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0068.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0069.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0069.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0070.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0070.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0071.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0071.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0072.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0072.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0073.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0073.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0074.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0075.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0075.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0076.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0076.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0077.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0078.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0078.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0079.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0079.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0080.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0080.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0081.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0082.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0082.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0083.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0083.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/calibrationdata/left-0084.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oranfire/SVIO/18cdfbcffecedaef593089e66db529becb750327/camera_models_in/camera_calib_example/calibrationdata/left-0084.png -------------------------------------------------------------------------------- /camera_models_in/camera_calib_example/readme.txt: -------------------------------------------------------------------------------- 1 | # help for checking input parameters. 2 | rosrun camera_models Calibrations --help 3 | 4 | # example pinhole model. 5 | rosrun camera_models Calibrations -w 12 -h 8 -s 80 -i calibrationdata --camera-model pinhole 6 | 7 | # example mei model. 8 | rosrun camera_models Calibrations -w 12 -h 8 -s 80 -i calibrationdata --camera-model mei 9 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/calib/CameraCalibration.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERACALIBRATION_H 2 | #define CAMERACALIBRATION_H 3 | 4 | #include 5 | 6 | #include "camodocal/camera_models/Camera.h" 7 | 8 | namespace camodocal 9 | { 10 | 11 | class CameraCalibration 12 | { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | CameraCalibration(); 16 | 17 | CameraCalibration(Camera::ModelType modelType, 18 | const std::string& cameraName, 19 | const cv::Size& imageSize, 20 | const cv::Size& boardSize, 21 | float squareSize); 22 | 23 | void clear(void); 24 | 25 | void addChessboardData(const std::vector& corners); 26 | 27 | bool calibrate(void); 28 | 29 | int sampleCount(void) const; 30 | std::vector >& imagePoints(void); 31 | const std::vector >& imagePoints(void) const; 32 | std::vector >& scenePoints(void); 33 | const std::vector >& scenePoints(void) const; 34 | CameraPtr& camera(void); 35 | const CameraConstPtr camera(void) const; 36 | 37 | Eigen::Matrix2d& measurementCovariance(void); 38 | const Eigen::Matrix2d& measurementCovariance(void) const; 39 | 40 | cv::Mat& cameraPoses(void); 41 | const cv::Mat& cameraPoses(void) const; 42 | 43 | void drawResults(std::vector& images) const; 44 | 45 | void writeParams(const std::string& filename) const; 46 | 47 | bool writeChessboardData(const std::string& filename) const; 48 | bool readChessboardData(const std::string& filename); 49 | 50 | void setVerbose(bool verbose); 51 | 52 | private: 53 | bool calibrateHelper(CameraPtr& camera, 54 | std::vector& rvecs, std::vector& tvecs) const; 55 | 56 | void optimize(CameraPtr& camera, 57 | std::vector& rvecs, std::vector& tvecs) const; 58 | 59 | template 60 | void readData(std::ifstream& ifs, T& data) const; 61 | 62 | template 63 | void writeData(std::ofstream& ofs, T data) const; 64 | 65 | cv::Size m_boardSize; 66 | float m_squareSize; 67 | 68 | CameraPtr m_camera; 69 | cv::Mat m_cameraPoses; 70 | 71 | std::vector > m_imagePoints; 72 | std::vector > m_scenePoints; 73 | 74 | Eigen::Matrix2d m_measurementCovariance; 75 | 76 | bool m_verbose; 77 | }; 78 | 79 | } 80 | 81 | #endif 82 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/camera_models/Camera.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERA_H 2 | #define CAMERA_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace camodocal 10 | { 11 | 12 | class Camera 13 | { 14 | public: 15 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 16 | enum ModelType 17 | { 18 | KANNALA_BRANDT, 19 | MEI, 20 | PINHOLE, 21 | PINHOLE_FULL, 22 | SCARAMUZZA 23 | }; 24 | 25 | class Parameters 26 | { 27 | public: 28 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 29 | Parameters( ModelType modelType ); 30 | 31 | Parameters( ModelType modelType, const std::string& cameraName, int w, int h ); 32 | 33 | ModelType& modelType( void ); 34 | std::string& cameraName( void ); 35 | int& imageWidth( void ); 36 | int& imageHeight( void ); 37 | 38 | ModelType modelType( void ) const; 39 | const std::string& cameraName( void ) const; 40 | int imageWidth( void ) const; 41 | int imageHeight( void ) const; 42 | 43 | int nIntrinsics( void ) const; 44 | 45 | virtual bool readFromYamlFile( const std::string& filename ) = 0; 46 | virtual void writeToYamlFile( const std::string& filename ) const = 0; 47 | 48 | protected: 49 | ModelType m_modelType; 50 | int m_nIntrinsics; 51 | std::string m_cameraName; 52 | int m_imageWidth; 53 | int m_imageHeight; 54 | }; 55 | 56 | virtual ModelType modelType( void ) const = 0; 57 | virtual const std::string& cameraName( void ) const = 0; 58 | virtual int imageWidth( void ) const = 0; 59 | virtual int imageHeight( void ) const = 0; 60 | 61 | virtual cv::Mat& mask( void ); 62 | virtual const cv::Mat& mask( void ) const; 63 | 64 | virtual void estimateIntrinsics( const cv::Size& boardSize, 65 | const std::vector< std::vector< cv::Point3f > >& objectPoints, 66 | const std::vector< std::vector< cv::Point2f > >& imagePoints ) 67 | = 0; 68 | virtual void estimateExtrinsics( const std::vector< cv::Point3f >& objectPoints, 69 | const std::vector< cv::Point2f >& imagePoints, 70 | cv::Mat& rvec, 71 | cv::Mat& tvec ) const; 72 | 73 | // Lift points from the image plane to the sphere 74 | virtual void liftSphere( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0; 75 | //%output P 76 | 77 | // Lift points from the image plane to the projective space 78 | virtual void liftProjective( const Eigen::Vector2d& p, Eigen::Vector3d& P ) const = 0; 79 | //%output P 80 | 81 | // Projects 3D points to the image plane (Pi function) 82 | virtual void spaceToPlane( const Eigen::Vector3d& P, Eigen::Vector2d& p ) const = 0; 83 | //%output p 84 | 85 | // Projects 3D points to the image plane (Pi function) 86 | // and calculates jacobian 87 | // virtual void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 88 | // Eigen::Matrix& J) const = 0; 89 | //%output p 90 | //%output J 91 | 92 | virtual void undistToPlane( const Eigen::Vector2d& p_u, Eigen::Vector2d& p ) const = 0; 93 | //%output p 94 | 95 | // virtual void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) 96 | // const = 0; 97 | virtual cv::Mat initUndistortRectifyMap( cv::Mat& map1, 98 | cv::Mat& map2, 99 | float fx = -1.0f, 100 | float fy = -1.0f, 101 | cv::Size imageSize = cv::Size( 0, 0 ), 102 | float cx = -1.0f, 103 | float cy = -1.0f, 104 | cv::Mat rmat = cv::Mat::eye( 3, 3, CV_32F ) ) const = 0; 105 | 106 | virtual int parameterCount( void ) const = 0; 107 | 108 | virtual void readParameters( const std::vector< double >& parameters ) = 0; 109 | virtual void writeParameters( std::vector< double >& parameters ) const = 0; 110 | 111 | virtual void writeParametersToYamlFile( const std::string& filename ) const = 0; 112 | 113 | virtual std::string parametersToString( void ) const = 0; 114 | 115 | /** 116 | * \brief Calculates the reprojection distance between points 117 | * 118 | * \param P1 first 3D point coordinates 119 | * \param P2 second 3D point coordinates 120 | * \return euclidean distance in the plane 121 | */ 122 | double reprojectionDist( const Eigen::Vector3d& P1, const Eigen::Vector3d& P2 ) const; 123 | 124 | double reprojectionError( const std::vector< std::vector< cv::Point3f > >& objectPoints, 125 | const std::vector< std::vector< cv::Point2f > >& imagePoints, 126 | const std::vector< cv::Mat >& rvecs, 127 | const std::vector< cv::Mat >& tvecs, 128 | cv::OutputArray perViewErrors = cv::noArray( ) ) const; 129 | 130 | double reprojectionError( const Eigen::Vector3d& P, 131 | const Eigen::Quaterniond& camera_q, 132 | const Eigen::Vector3d& camera_t, 133 | const Eigen::Vector2d& observed_p ) const; 134 | 135 | void projectPoints( const std::vector< cv::Point3f >& objectPoints, 136 | const cv::Mat& rvec, 137 | const cv::Mat& tvec, 138 | std::vector< cv::Point2f >& imagePoints ) const; 139 | 140 | protected: 141 | cv::Mat m_mask; 142 | }; 143 | 144 | typedef boost::shared_ptr< Camera > CameraPtr; 145 | typedef boost::shared_ptr< const Camera > CameraConstPtr; 146 | } 147 | 148 | #endif 149 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/camera_models/CameraFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef CAMERAFACTORY_H 2 | #define CAMERAFACTORY_H 3 | 4 | #include 5 | #include 6 | 7 | #include "camodocal/camera_models/Camera.h" 8 | 9 | namespace camodocal 10 | { 11 | 12 | class CameraFactory 13 | { 14 | public: 15 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 16 | CameraFactory(); 17 | 18 | static boost::shared_ptr instance(void); 19 | 20 | CameraPtr generateCamera(Camera::ModelType modelType, 21 | const std::string& cameraName, 22 | cv::Size imageSize) const; 23 | 24 | CameraPtr generateCameraFromYamlFile(const std::string& filename); 25 | 26 | private: 27 | static boost::shared_ptr m_instance; 28 | }; 29 | 30 | } 31 | 32 | #endif 33 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/camera_models/CostFunctionFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef COSTFUNCTIONFACTORY_H 2 | #define COSTFUNCTIONFACTORY_H 3 | 4 | #include 5 | #include 6 | 7 | #include "camodocal/camera_models/Camera.h" 8 | 9 | namespace ceres 10 | { 11 | class CostFunction; 12 | } 13 | 14 | namespace camodocal 15 | { 16 | 17 | enum 18 | { 19 | CAMERA_INTRINSICS = 1 << 0, 20 | CAMERA_POSE = 1 << 1, 21 | POINT_3D = 1 << 2, 22 | ODOMETRY_INTRINSICS = 1 << 3, 23 | ODOMETRY_3D_POSE = 1 << 4, 24 | ODOMETRY_6D_POSE = 1 << 5, 25 | CAMERA_ODOMETRY_TRANSFORM = 1 << 6 26 | }; 27 | 28 | class CostFunctionFactory 29 | { 30 | public: 31 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 32 | CostFunctionFactory(); 33 | 34 | static boost::shared_ptr instance(void); 35 | 36 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 37 | const Eigen::Vector3d& observed_P, 38 | const Eigen::Vector2d& observed_p, 39 | int flags) const; 40 | 41 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 42 | const Eigen::Vector3d& observed_P, 43 | const Eigen::Vector2d& observed_p, 44 | const Eigen::Matrix2d& sqrtPrecisionMat, 45 | int flags) const; 46 | 47 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 48 | const Eigen::Vector2d& observed_p, 49 | int flags, bool optimize_cam_odo_z = true) const; 50 | 51 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 52 | const Eigen::Vector2d& observed_p, 53 | const Eigen::Matrix2d& sqrtPrecisionMat, 54 | int flags, bool optimize_cam_odo_z = true) const; 55 | 56 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 57 | const Eigen::Vector3d& odo_pos, 58 | const Eigen::Vector3d& odo_att, 59 | const Eigen::Vector2d& observed_p, 60 | int flags, bool optimize_cam_odo_z = true) const; 61 | 62 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera, 63 | const Eigen::Quaterniond& cam_odo_q, 64 | const Eigen::Vector3d& cam_odo_t, 65 | const Eigen::Vector3d& odo_pos, 66 | const Eigen::Vector3d& odo_att, 67 | const Eigen::Vector2d& observed_p, 68 | int flags) const; 69 | 70 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& cameraLeft, 71 | const CameraConstPtr& cameraRight, 72 | const Eigen::Vector3d& observed_P, 73 | const Eigen::Vector2d& observed_p_left, 74 | const Eigen::Vector2d& observed_p_right) const; 75 | 76 | private: 77 | static boost::shared_ptr m_instance; 78 | }; 79 | 80 | } 81 | 82 | #endif 83 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/camera_models/PinholeCamera.h: -------------------------------------------------------------------------------- 1 | #ifndef PINHOLECAMERA_H 2 | #define PINHOLECAMERA_H 3 | 4 | #include 5 | #include 6 | 7 | #include "ceres/rotation.h" 8 | #include "Camera.h" 9 | 10 | namespace camodocal 11 | { 12 | 13 | class PinholeCamera: public Camera 14 | { 15 | public: 16 | class Parameters: public Camera::Parameters 17 | { 18 | public: 19 | Parameters(); 20 | Parameters(const std::string& cameraName, 21 | int w, int h, 22 | double k1, double k2, double p1, double p2, 23 | double fx, double fy, double cx, double cy); 24 | 25 | double& k1(void); 26 | double& k2(void); 27 | double& p1(void); 28 | double& p2(void); 29 | double& fx(void); 30 | double& fy(void); 31 | double& cx(void); 32 | double& cy(void); 33 | 34 | double xi(void) const; 35 | double k1(void) const; 36 | double k2(void) const; 37 | double p1(void) const; 38 | double p2(void) const; 39 | double fx(void) const; 40 | double fy(void) const; 41 | double cx(void) const; 42 | double cy(void) const; 43 | 44 | bool readFromYamlFile(const std::string& filename); 45 | void writeToYamlFile(const std::string& filename) const; 46 | 47 | Parameters& operator=(const Parameters& other); 48 | friend std::ostream& operator<< (std::ostream& out, const Parameters& params); 49 | 50 | private: 51 | double m_k1; 52 | double m_k2; 53 | double m_p1; 54 | double m_p2; 55 | double m_fx; 56 | double m_fy; 57 | double m_cx; 58 | double m_cy; 59 | }; 60 | 61 | PinholeCamera(); 62 | 63 | /** 64 | * \brief Constructor from the projection model parameters 65 | */ 66 | PinholeCamera(const std::string& cameraName, 67 | int imageWidth, int imageHeight, 68 | double k1, double k2, double p1, double p2, 69 | double fx, double fy, double cx, double cy); 70 | /** 71 | * \brief Constructor from the projection model parameters 72 | */ 73 | PinholeCamera(const Parameters& params); 74 | 75 | Camera::ModelType modelType(void) const; 76 | const std::string& cameraName(void) const; 77 | int imageWidth(void) const; 78 | int imageHeight(void) const; 79 | 80 | void estimateIntrinsics(const cv::Size& boardSize, 81 | const std::vector< std::vector >& objectPoints, 82 | const std::vector< std::vector >& imagePoints); 83 | 84 | // Lift points from the image plane to the sphere 85 | virtual void liftSphere(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 86 | //%output P 87 | 88 | // Lift points from the image plane to the projective space 89 | void liftProjective(const Eigen::Vector2d& p, Eigen::Vector3d& P) const; 90 | //%output P 91 | 92 | // Projects 3D points to the image plane (Pi function) 93 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p) const; 94 | //%output p 95 | 96 | // Projects 3D points to the image plane (Pi function) 97 | // and calculates jacobian 98 | void spaceToPlane(const Eigen::Vector3d& P, Eigen::Vector2d& p, 99 | Eigen::Matrix& J) const; 100 | //%output p 101 | //%output J 102 | 103 | void undistToPlane(const Eigen::Vector2d& p_u, Eigen::Vector2d& p) const; 104 | //%output p 105 | 106 | template 107 | static void spaceToPlane(const T* const params, 108 | const T* const q, const T* const t, 109 | const Eigen::Matrix& P, 110 | Eigen::Matrix& p); 111 | 112 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u) const; 113 | void distortion(const Eigen::Vector2d& p_u, Eigen::Vector2d& d_u, 114 | Eigen::Matrix2d& J) const; 115 | 116 | void initUndistortMap(cv::Mat& map1, cv::Mat& map2, double fScale = 1.0) const; 117 | cv::Mat initUndistortRectifyMap(cv::Mat& map1, cv::Mat& map2, 118 | float fx = -1.0f, float fy = -1.0f, 119 | cv::Size imageSize = cv::Size(0, 0), 120 | float cx = -1.0f, float cy = -1.0f, 121 | cv::Mat rmat = cv::Mat::eye(3, 3, CV_32F)) const; 122 | 123 | int parameterCount(void) const; 124 | 125 | const Parameters& getParameters(void) const; 126 | void setParameters(const Parameters& parameters); 127 | 128 | void readParameters(const std::vector& parameterVec); 129 | void writeParameters(std::vector& parameterVec) const; 130 | 131 | void writeParametersToYamlFile(const std::string& filename) const; 132 | 133 | std::string parametersToString(void) const; 134 | 135 | private: 136 | Parameters mParameters; 137 | 138 | double m_inv_K11, m_inv_K13, m_inv_K22, m_inv_K23; 139 | bool m_noDistortion; 140 | }; 141 | 142 | typedef boost::shared_ptr PinholeCameraPtr; 143 | typedef boost::shared_ptr PinholeCameraConstPtr; 144 | 145 | template 146 | void 147 | PinholeCamera::spaceToPlane(const T* const params, 148 | const T* const q, const T* const t, 149 | const Eigen::Matrix& P, 150 | Eigen::Matrix& p) 151 | { 152 | T P_w[3]; 153 | P_w[0] = T(P(0)); 154 | P_w[1] = T(P(1)); 155 | P_w[2] = T(P(2)); 156 | 157 | // Convert quaternion from Eigen convention (x, y, z, w) 158 | // to Ceres convention (w, x, y, z) 159 | T q_ceres[4] = {q[3], q[0], q[1], q[2]}; 160 | 161 | T P_c[3]; 162 | ceres::QuaternionRotatePoint(q_ceres, P_w, P_c); 163 | 164 | P_c[0] += t[0]; 165 | P_c[1] += t[1]; 166 | P_c[2] += t[2]; 167 | 168 | // project 3D object point to the image plane 169 | T k1 = params[0]; 170 | T k2 = params[1]; 171 | T p1 = params[2]; 172 | T p2 = params[3]; 173 | T fx = params[4]; 174 | T fy = params[5]; 175 | T alpha = T(0); //cameraParams.alpha(); 176 | T cx = params[6]; 177 | T cy = params[7]; 178 | 179 | // Transform to model plane 180 | T u = P_c[0] / P_c[2]; 181 | T v = P_c[1] / P_c[2]; 182 | 183 | T rho_sqr = u * u + v * v; 184 | T L = T(1.0) + k1 * rho_sqr + k2 * rho_sqr * rho_sqr; 185 | T du = T(2.0) * p1 * u * v + p2 * (rho_sqr + T(2.0) * u * u); 186 | T dv = p1 * (rho_sqr + T(2.0) * v * v) + T(2.0) * p2 * u * v; 187 | 188 | u = L * u + du; 189 | v = L * v + dv; 190 | p(0) = fx * (u + alpha * v) + cx; 191 | p(1) = fy * v + cy; 192 | } 193 | 194 | } 195 | 196 | #endif 197 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/chessboard/Chessboard.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARD_H 2 | #define CHESSBOARD_H 3 | 4 | #include 5 | #include 6 | 7 | namespace camodocal 8 | { 9 | 10 | // forward declarations 11 | class ChessboardCorner; 12 | typedef boost::shared_ptr ChessboardCornerPtr; 13 | class ChessboardQuad; 14 | typedef boost::shared_ptr ChessboardQuadPtr; 15 | 16 | class Chessboard 17 | { 18 | public: 19 | Chessboard(cv::Size boardSize, cv::Mat& image); 20 | 21 | void findCorners(bool useOpenCV = false); 22 | const std::vector& getCorners(void) const; 23 | bool cornersFound(void) const; 24 | 25 | const cv::Mat& getImage(void) const; 26 | const cv::Mat& getSketch(void) const; 27 | 28 | private: 29 | bool findChessboardCorners(const cv::Mat& image, 30 | const cv::Size& patternSize, 31 | std::vector& corners, 32 | int flags, bool useOpenCV); 33 | 34 | bool findChessboardCornersImproved(const cv::Mat& image, 35 | const cv::Size& patternSize, 36 | std::vector& corners, 37 | int flags); 38 | 39 | void cleanFoundConnectedQuads(std::vector& quadGroup, cv::Size patternSize); 40 | 41 | void findConnectedQuads(std::vector& quads, 42 | std::vector& group, 43 | int group_idx, int dilation); 44 | 45 | // int checkQuadGroup(std::vector& quadGroup, 46 | // std::vector& outCorners, 47 | // cv::Size patternSize); 48 | 49 | void labelQuadGroup(std::vector& quad_group, 50 | cv::Size patternSize, bool firstRun); 51 | 52 | void findQuadNeighbors(std::vector& quads, int dilation); 53 | 54 | int augmentBestRun(std::vector& candidateQuads, int candidateDilation, 55 | std::vector& existingQuads, int existingDilation); 56 | 57 | void generateQuads(std::vector& quads, 58 | cv::Mat& image, int flags, 59 | int dilation, bool firstRun); 60 | 61 | bool checkQuadGroup(std::vector& quads, 62 | std::vector& corners, 63 | cv::Size patternSize); 64 | 65 | void getQuadrangleHypotheses(const std::vector< std::vector >& contours, 66 | std::vector< std::pair >& quads, 67 | int classId) const; 68 | 69 | bool checkChessboard(const cv::Mat& image, cv::Size patternSize) const; 70 | 71 | bool checkBoardMonotony(std::vector& corners, 72 | cv::Size patternSize); 73 | 74 | bool matchCorners(ChessboardQuadPtr& quad1, int corner1, 75 | ChessboardQuadPtr& quad2, int corner2) const; 76 | 77 | cv::Mat mImage; 78 | cv::Mat mSketch; 79 | std::vector mCorners; 80 | cv::Size mBoardSize; 81 | bool mCornersFound; 82 | }; 83 | 84 | } 85 | 86 | #endif 87 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/chessboard/ChessboardCorner.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARDCORNER_H 2 | #define CHESSBOARDCORNER_H 3 | 4 | #include 5 | #include 6 | 7 | namespace camodocal 8 | { 9 | 10 | class ChessboardCorner; 11 | typedef boost::shared_ptr ChessboardCornerPtr; 12 | 13 | class ChessboardCorner 14 | { 15 | public: 16 | ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {} 17 | 18 | float meanDist(int &n) const 19 | { 20 | float sum = 0; 21 | n = 0; 22 | for (int i = 0; i < 4; ++i) 23 | { 24 | if (neighbors[i].get()) 25 | { 26 | float dx = neighbors[i]->pt.x - pt.x; 27 | float dy = neighbors[i]->pt.y - pt.y; 28 | sum += sqrt(dx*dx + dy*dy); 29 | n++; 30 | } 31 | } 32 | return sum / std::max(n, 1); 33 | } 34 | 35 | cv::Point2f pt; // X and y coordinates 36 | int row; // Row and column of the corner 37 | int column; // in the found pattern 38 | bool needsNeighbor; // Does the corner require a neighbor? 39 | int count; // number of corner neighbors 40 | ChessboardCornerPtr neighbors[4]; // pointer to all corner neighbors 41 | }; 42 | 43 | } 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/chessboard/ChessboardQuad.h: -------------------------------------------------------------------------------- 1 | #ifndef CHESSBOARDQUAD_H 2 | #define CHESSBOARDQUAD_H 3 | 4 | #include 5 | 6 | #include "camodocal/chessboard/ChessboardCorner.h" 7 | 8 | namespace camodocal 9 | { 10 | 11 | class ChessboardQuad; 12 | typedef boost::shared_ptr ChessboardQuadPtr; 13 | 14 | class ChessboardQuad 15 | { 16 | public: 17 | ChessboardQuad() : count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {} 18 | 19 | int count; // Number of quad neighbors 20 | int group_idx; // Quad group ID 21 | float edge_len; // Smallest side length^2 22 | ChessboardCornerPtr corners[4]; // Coordinates of quad corners 23 | ChessboardQuadPtr neighbors[4]; // Pointers of quad neighbors 24 | bool labeled; // Has this corner been labeled? 25 | }; 26 | 27 | } 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/gpl/EigenQuaternionParameterization.h: -------------------------------------------------------------------------------- 1 | #ifndef EIGENQUATERNIONPARAMETERIZATION_H 2 | #define EIGENQUATERNIONPARAMETERIZATION_H 3 | 4 | #include "ceres/local_parameterization.h" 5 | 6 | namespace camodocal 7 | { 8 | 9 | class EigenQuaternionParameterization : public ceres::LocalParameterization 10 | { 11 | public: 12 | virtual ~EigenQuaternionParameterization() {} 13 | virtual bool Plus(const double* x, 14 | const double* delta, 15 | double* x_plus_delta) const; 16 | virtual bool ComputeJacobian(const double* x, 17 | double* jacobian) const; 18 | virtual int GlobalSize() const { return 4; } 19 | virtual int LocalSize() const { return 3; } 20 | 21 | private: 22 | template 23 | void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const; 24 | }; 25 | 26 | 27 | template 28 | void 29 | EigenQuaternionParameterization::EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const 30 | { 31 | zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1]; 32 | zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0]; 33 | zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3]; 34 | zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2]; 35 | } 36 | 37 | } 38 | 39 | #endif 40 | 41 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/gpl/gpl.h: -------------------------------------------------------------------------------- 1 | #ifndef GPL_H 2 | #define GPL_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace camodocal 9 | { 10 | 11 | template 12 | const T clamp(const T& v, const T& a, const T& b) 13 | { 14 | return std::min(b, std::max(a, v)); 15 | } 16 | 17 | double hypot3(double x, double y, double z); 18 | float hypot3f(float x, float y, float z); 19 | 20 | template 21 | const T normalizeTheta(const T& theta) 22 | { 23 | T normTheta = theta; 24 | 25 | while (normTheta < - M_PI) 26 | { 27 | normTheta += 2.0 * M_PI; 28 | } 29 | while (normTheta > M_PI) 30 | { 31 | normTheta -= 2.0 * M_PI; 32 | } 33 | 34 | return normTheta; 35 | } 36 | 37 | double d2r(double deg); 38 | float d2r(float deg); 39 | double r2d(double rad); 40 | float r2d(float rad); 41 | 42 | double sinc(double theta); 43 | 44 | template 45 | const T square(const T& x) 46 | { 47 | return x * x; 48 | } 49 | 50 | template 51 | const T cube(const T& x) 52 | { 53 | return x * x * x; 54 | } 55 | 56 | template 57 | const T random(const T& a, const T& b) 58 | { 59 | return static_cast(rand()) / RAND_MAX * (b - a) + a; 60 | } 61 | 62 | template 63 | const T randomNormal(const T& sigma) 64 | { 65 | T x1, x2, w; 66 | 67 | do 68 | { 69 | x1 = 2.0 * random(0.0, 1.0) - 1.0; 70 | x2 = 2.0 * random(0.0, 1.0) - 1.0; 71 | w = x1 * x1 + x2 * x2; 72 | } 73 | while (w >= 1.0 || w == 0.0); 74 | 75 | w = sqrt((-2.0 * log(w)) / w); 76 | 77 | return x1 * w * sigma; 78 | } 79 | 80 | unsigned long long timeInMicroseconds(void); 81 | 82 | double timeInSeconds(void); 83 | 84 | void colorDepthImage(cv::Mat& imgDepth, 85 | cv::Mat& imgColoredDepth, 86 | float minRange, float maxRange); 87 | 88 | bool colormap(const std::string& name, unsigned char idx, 89 | float& r, float& g, float& b); 90 | 91 | std::vector bresLine(int x0, int y0, int x1, int y1); 92 | std::vector bresCircle(int x0, int y0, int r); 93 | 94 | void fitCircle(const std::vector& points, 95 | double& centerX, double& centerY, double& radius); 96 | 97 | std::vector intersectCircles(double x1, double y1, double r1, 98 | double x2, double y2, double r2); 99 | 100 | void LLtoUTM(double latitude, double longitude, 101 | double& utmNorthing, double& utmEasting, 102 | std::string& utmZone); 103 | void UTMtoLL(double utmNorthing, double utmEasting, 104 | const std::string& utmZone, 105 | double& latitude, double& longitude); 106 | 107 | long int timestampDiff(uint64_t t1, uint64_t t2); 108 | 109 | } 110 | 111 | #endif 112 | -------------------------------------------------------------------------------- /camera_models_in/include/camodocal/sparse_graph/Transform.h: -------------------------------------------------------------------------------- 1 | #ifndef TRANSFORM_H 2 | #define TRANSFORM_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace camodocal 9 | { 10 | 11 | class Transform 12 | { 13 | public: 14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 15 | 16 | Transform(); 17 | Transform(const Eigen::Matrix4d& H); 18 | 19 | Eigen::Quaterniond& rotation(void); 20 | const Eigen::Quaterniond& rotation(void) const; 21 | double* rotationData(void); 22 | const double* const rotationData(void) const; 23 | 24 | Eigen::Vector3d& translation(void); 25 | const Eigen::Vector3d& translation(void) const; 26 | double* translationData(void); 27 | const double* const translationData(void) const; 28 | 29 | Eigen::Matrix4d toMatrix(void) const; 30 | 31 | private: 32 | Eigen::Quaterniond m_q; 33 | Eigen::Vector3d m_t; 34 | }; 35 | 36 | } 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /camera_models_in/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | camera_models_in 4 | 0.0.0 5 | The camera_models package 6 | 7 | 8 | 9 | 10 | dvorak 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | catkin 43 | roscpp 44 | std_msgs 45 | roscpp 46 | std_msgs 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /camera_models_in/readme.md: -------------------------------------------------------------------------------- 1 | part of [camodocal](https://github.com/hengli/camodocal) 2 | 3 | [Google Ceres](http://ceres-solver.org) is needed. 4 | 5 | # Calibration: 6 | 7 | Use [intrinsic_calib.cc](https://github.com/dvorak0/camera_model/blob/master/src/intrinsic_calib.cc) to calibrate your camera. 8 | 9 | # Undistortion: 10 | 11 | See [Camera.h](https://github.com/dvorak0/camera_model/blob/master/include/camodocal/camera_models/Camera.h) for general interface: 12 | 13 | - liftProjective: Lift points from the image plane to the projective space. 14 | - spaceToPlane: Projects 3D points to the image plane (Pi function) 15 | 16 | -------------------------------------------------------------------------------- /camera_models_in/src/camera_models/Camera.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/Camera.h" 2 | #include "camodocal/camera_models/ScaramuzzaCamera.h" 3 | 4 | #include 5 | 6 | namespace camodocal 7 | { 8 | 9 | Camera::Parameters::Parameters(ModelType modelType) 10 | : m_modelType(modelType) 11 | , m_imageWidth(0) 12 | , m_imageHeight(0) 13 | { 14 | switch (modelType) 15 | { 16 | case KANNALA_BRANDT: 17 | m_nIntrinsics = 8; 18 | break; 19 | case PINHOLE: 20 | m_nIntrinsics = 8; 21 | break; 22 | case SCARAMUZZA: 23 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; 24 | break; 25 | case MEI: 26 | default: 27 | m_nIntrinsics = 9; 28 | } 29 | } 30 | 31 | Camera::Parameters::Parameters(ModelType modelType, 32 | const std::string& cameraName, 33 | int w, int h) 34 | : m_modelType(modelType) 35 | , m_cameraName(cameraName) 36 | , m_imageWidth(w) 37 | , m_imageHeight(h) 38 | { 39 | switch (modelType) 40 | { 41 | case KANNALA_BRANDT: 42 | m_nIntrinsics = 8; 43 | break; 44 | case PINHOLE: 45 | m_nIntrinsics = 8; 46 | break; 47 | case SCARAMUZZA: 48 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS; 49 | break; 50 | case MEI: 51 | default: 52 | m_nIntrinsics = 9; 53 | } 54 | } 55 | 56 | Camera::ModelType& 57 | Camera::Parameters::modelType(void) 58 | { 59 | return m_modelType; 60 | } 61 | 62 | std::string& 63 | Camera::Parameters::cameraName(void) 64 | { 65 | return m_cameraName; 66 | } 67 | 68 | int& 69 | Camera::Parameters::imageWidth(void) 70 | { 71 | return m_imageWidth; 72 | } 73 | 74 | int& 75 | Camera::Parameters::imageHeight(void) 76 | { 77 | return m_imageHeight; 78 | } 79 | 80 | Camera::ModelType 81 | Camera::Parameters::modelType(void) const 82 | { 83 | return m_modelType; 84 | } 85 | 86 | const std::string& 87 | Camera::Parameters::cameraName(void) const 88 | { 89 | return m_cameraName; 90 | } 91 | 92 | int 93 | Camera::Parameters::imageWidth(void) const 94 | { 95 | return m_imageWidth; 96 | } 97 | 98 | int 99 | Camera::Parameters::imageHeight(void) const 100 | { 101 | return m_imageHeight; 102 | } 103 | 104 | int 105 | Camera::Parameters::nIntrinsics(void) const 106 | { 107 | return m_nIntrinsics; 108 | } 109 | 110 | cv::Mat& 111 | Camera::mask(void) 112 | { 113 | return m_mask; 114 | } 115 | 116 | const cv::Mat& 117 | Camera::mask(void) const 118 | { 119 | return m_mask; 120 | } 121 | 122 | void 123 | Camera::estimateExtrinsics(const std::vector& objectPoints, 124 | const std::vector& imagePoints, 125 | cv::Mat& rvec, cv::Mat& tvec) const 126 | { 127 | std::vector Ms(imagePoints.size()); 128 | for (size_t i = 0; i < Ms.size(); ++i) 129 | { 130 | Eigen::Vector3d P; 131 | liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P); 132 | 133 | P /= P(2); 134 | 135 | Ms.at(i).x = P(0); 136 | Ms.at(i).y = P(1); 137 | } 138 | 139 | // assume unit focal length, zero principal point, and zero distortion 140 | cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec); 141 | } 142 | 143 | double 144 | Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const 145 | { 146 | Eigen::Vector2d p1, p2; 147 | 148 | spaceToPlane(P1, p1); 149 | spaceToPlane(P2, p2); 150 | 151 | return (p1 - p2).norm(); 152 | } 153 | 154 | double 155 | Camera::reprojectionError(const std::vector< std::vector >& objectPoints, 156 | const std::vector< std::vector >& imagePoints, 157 | const std::vector& rvecs, 158 | const std::vector& tvecs, 159 | cv::OutputArray _perViewErrors) const 160 | { 161 | int imageCount = objectPoints.size(); 162 | size_t pointsSoFar = 0; 163 | double totalErr = 0.0; 164 | 165 | bool computePerViewErrors = _perViewErrors.needed(); 166 | cv::Mat perViewErrors; 167 | if (computePerViewErrors) 168 | { 169 | _perViewErrors.create(imageCount, 1, CV_64F); 170 | perViewErrors = _perViewErrors.getMat(); 171 | } 172 | 173 | for (int i = 0; i < imageCount; ++i) 174 | { 175 | size_t pointCount = imagePoints.at(i).size(); 176 | 177 | pointsSoFar += pointCount; 178 | 179 | std::vector estImagePoints; 180 | projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i), 181 | estImagePoints); 182 | 183 | double err = 0.0; 184 | for (size_t j = 0; j < imagePoints.at(i).size(); ++j) 185 | { 186 | err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j)); 187 | } 188 | 189 | if (computePerViewErrors) 190 | { 191 | perViewErrors.at(i) = err / pointCount; 192 | } 193 | 194 | totalErr += err; 195 | } 196 | 197 | return totalErr / pointsSoFar; 198 | } 199 | 200 | double 201 | Camera::reprojectionError(const Eigen::Vector3d& P, 202 | const Eigen::Quaterniond& camera_q, 203 | const Eigen::Vector3d& camera_t, 204 | const Eigen::Vector2d& observed_p) const 205 | { 206 | Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t; 207 | 208 | Eigen::Vector2d p; 209 | spaceToPlane(P_cam, p); 210 | 211 | return (p - observed_p).norm(); 212 | } 213 | 214 | void 215 | Camera::projectPoints(const std::vector& objectPoints, 216 | const cv::Mat& rvec, 217 | const cv::Mat& tvec, 218 | std::vector& imagePoints) const 219 | { 220 | // project 3D object points to the image plane 221 | imagePoints.reserve(objectPoints.size()); 222 | 223 | //double 224 | cv::Mat R0; 225 | cv::Rodrigues(rvec, R0); 226 | 227 | Eigen::MatrixXd R(3,3); 228 | R << R0.at(0,0), R0.at(0,1), R0.at(0,2), 229 | R0.at(1,0), R0.at(1,1), R0.at(1,2), 230 | R0.at(2,0), R0.at(2,1), R0.at(2,2); 231 | 232 | Eigen::Vector3d t; 233 | t << tvec.at(0), tvec.at(1), tvec.at(2); 234 | 235 | for (size_t i = 0; i < objectPoints.size(); ++i) 236 | { 237 | const cv::Point3f& objectPoint = objectPoints.at(i); 238 | 239 | // Rotate and translate 240 | Eigen::Vector3d P; 241 | P << objectPoint.x, objectPoint.y, objectPoint.z; 242 | 243 | P = R * P + t; 244 | 245 | Eigen::Vector2d p; 246 | spaceToPlane(P, p); 247 | 248 | imagePoints.push_back(cv::Point2f(p(0), p(1))); 249 | } 250 | } 251 | 252 | } 253 | -------------------------------------------------------------------------------- /camera_models_in/src/camera_models/CameraFactory.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/camera_models/CameraFactory.h" 2 | 3 | #include 4 | 5 | #include "camodocal/camera_models/CataCamera.h" 6 | #include "camodocal/camera_models/EquidistantCamera.h" 7 | #include "camodocal/camera_models/PinholeCamera.h" 8 | #include "camodocal/camera_models/PinholeFullCamera.h" 9 | #include "camodocal/camera_models/ScaramuzzaCamera.h" 10 | 11 | #include "ceres/ceres.h" 12 | 13 | namespace camodocal 14 | { 15 | 16 | boost::shared_ptr< CameraFactory > CameraFactory::m_instance; 17 | 18 | CameraFactory::CameraFactory( ) {} 19 | 20 | boost::shared_ptr< CameraFactory > 21 | CameraFactory::instance( void ) 22 | { 23 | if ( m_instance.get( ) == 0 ) 24 | { 25 | m_instance.reset( new CameraFactory ); 26 | } 27 | 28 | return m_instance; 29 | } 30 | 31 | CameraPtr 32 | CameraFactory::generateCamera( Camera::ModelType modelType, const std::string& cameraName, cv::Size imageSize ) const 33 | { 34 | switch ( modelType ) 35 | { 36 | case Camera::KANNALA_BRANDT: 37 | { 38 | EquidistantCameraPtr camera( new EquidistantCamera ); 39 | 40 | EquidistantCamera::Parameters params = camera->getParameters( ); 41 | params.cameraName( ) = cameraName; 42 | params.imageWidth( ) = imageSize.width; 43 | params.imageHeight( ) = imageSize.height; 44 | camera->setParameters( params ); 45 | return camera; 46 | } 47 | case Camera::PINHOLE: 48 | { 49 | PinholeCameraPtr camera( new PinholeCamera ); 50 | 51 | PinholeCamera::Parameters params = camera->getParameters( ); 52 | params.cameraName( ) = cameraName; 53 | params.imageWidth( ) = imageSize.width; 54 | params.imageHeight( ) = imageSize.height; 55 | camera->setParameters( params ); 56 | return camera; 57 | } 58 | case Camera::PINHOLE_FULL: 59 | { 60 | PinholeFullCameraPtr camera( new PinholeFullCamera ); 61 | 62 | PinholeFullCamera::Parameters params = camera->getParameters( ); 63 | params.cameraName( ) = cameraName; 64 | params.imageWidth( ) = imageSize.width; 65 | params.imageHeight( ) = imageSize.height; 66 | camera->setParameters( params ); 67 | return camera; 68 | } 69 | case Camera::SCARAMUZZA: 70 | { 71 | OCAMCameraPtr camera( new OCAMCamera ); 72 | 73 | OCAMCamera::Parameters params = camera->getParameters( ); 74 | params.cameraName( ) = cameraName; 75 | params.imageWidth( ) = imageSize.width; 76 | params.imageHeight( ) = imageSize.height; 77 | camera->setParameters( params ); 78 | return camera; 79 | } 80 | case Camera::MEI: 81 | default: 82 | { 83 | CataCameraPtr camera( new CataCamera ); 84 | 85 | CataCamera::Parameters params = camera->getParameters( ); 86 | params.cameraName( ) = cameraName; 87 | params.imageWidth( ) = imageSize.width; 88 | params.imageHeight( ) = imageSize.height; 89 | camera->setParameters( params ); 90 | return camera; 91 | } 92 | } 93 | } 94 | 95 | CameraPtr 96 | CameraFactory::generateCameraFromYamlFile( const std::string& filename ) 97 | { 98 | cv::FileStorage fs( filename, cv::FileStorage::READ ); 99 | 100 | if ( !fs.isOpened( ) ) 101 | { 102 | return CameraPtr( ); 103 | } 104 | 105 | Camera::ModelType modelType = Camera::MEI; 106 | if ( !fs["model_type"].isNone( ) ) 107 | { 108 | std::string sModelType; 109 | fs["model_type"] >> sModelType; 110 | 111 | if ( boost::iequals( sModelType, "kannala_brandt" ) ) 112 | { 113 | modelType = Camera::KANNALA_BRANDT; 114 | } 115 | else if ( boost::iequals( sModelType, "mei" ) ) 116 | { 117 | modelType = Camera::MEI; 118 | } 119 | else if ( boost::iequals( sModelType, "scaramuzza" ) ) 120 | { 121 | modelType = Camera::SCARAMUZZA; 122 | } 123 | else if ( boost::iequals( sModelType, "pinhole" ) ) 124 | { 125 | modelType = Camera::PINHOLE; 126 | } 127 | else if ( boost::iequals( sModelType, "PINHOLE_FULL" ) ) 128 | { 129 | modelType = Camera::PINHOLE_FULL; 130 | } 131 | else 132 | { 133 | std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl; 134 | return CameraPtr( ); 135 | } 136 | } 137 | 138 | switch ( modelType ) 139 | { 140 | case Camera::KANNALA_BRANDT: 141 | { 142 | EquidistantCameraPtr camera( new EquidistantCamera ); 143 | 144 | EquidistantCamera::Parameters params = camera->getParameters( ); 145 | params.readFromYamlFile( filename ); 146 | camera->setParameters( params ); 147 | return camera; 148 | } 149 | case Camera::PINHOLE: 150 | { 151 | PinholeCameraPtr camera( new PinholeCamera ); 152 | 153 | PinholeCamera::Parameters params = camera->getParameters( ); 154 | params.readFromYamlFile( filename ); 155 | camera->setParameters( params ); 156 | return camera; 157 | } 158 | case Camera::PINHOLE_FULL: 159 | { 160 | PinholeFullCameraPtr camera( new PinholeFullCamera ); 161 | 162 | PinholeFullCamera::Parameters params = camera->getParameters( ); 163 | params.readFromYamlFile( filename ); 164 | camera->setParameters( params ); 165 | return camera; 166 | } 167 | case Camera::SCARAMUZZA: 168 | { 169 | OCAMCameraPtr camera( new OCAMCamera ); 170 | 171 | OCAMCamera::Parameters params = camera->getParameters( ); 172 | params.readFromYamlFile( filename ); 173 | camera->setParameters( params ); 174 | return camera; 175 | } 176 | case Camera::MEI: 177 | default: 178 | { 179 | CataCameraPtr camera( new CataCamera ); 180 | 181 | CataCamera::Parameters params = camera->getParameters( ); 182 | params.readFromYamlFile( filename ); 183 | camera->setParameters( params ); 184 | return camera; 185 | } 186 | } 187 | 188 | return CameraPtr( ); 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /camera_models_in/src/gpl/EigenQuaternionParameterization.cc: -------------------------------------------------------------------------------- 1 | #include "camodocal/gpl/EigenQuaternionParameterization.h" 2 | 3 | #include 4 | 5 | namespace camodocal 6 | { 7 | 8 | bool 9 | EigenQuaternionParameterization::Plus(const double* x, 10 | const double* delta, 11 | double* x_plus_delta) const 12 | { 13 | const double norm_delta = 14 | sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]); 15 | if (norm_delta > 0.0) 16 | { 17 | const double sin_delta_by_delta = (sin(norm_delta) / norm_delta); 18 | double q_delta[4]; 19 | q_delta[0] = sin_delta_by_delta * delta[0]; 20 | q_delta[1] = sin_delta_by_delta * delta[1]; 21 | q_delta[2] = sin_delta_by_delta * delta[2]; 22 | q_delta[3] = cos(norm_delta); 23 | EigenQuaternionProduct(q_delta, x, x_plus_delta); 24 | } 25 | else 26 | { 27 | for (int i = 0; i < 4; ++i) 28 | { 29 | x_plus_delta[i] = x[i]; 30 | } 31 | } 32 | return true; 33 | } 34 | 35 | bool 36 | EigenQuaternionParameterization::ComputeJacobian(const double* x, 37 | double* jacobian) const 38 | { 39 | jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT 40 | jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT 41 | jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT 42 | jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT 43 | return true; 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /camera_models_in/src/sparse_graph/Transform.cc: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | namespace camodocal 4 | { 5 | 6 | Transform::Transform() 7 | { 8 | m_q.setIdentity(); 9 | m_t.setZero(); 10 | } 11 | 12 | Transform::Transform(const Eigen::Matrix4d& H) 13 | { 14 | m_q = Eigen::Quaterniond(H.block<3,3>(0,0)); 15 | m_t = H.block<3,1>(0,3); 16 | } 17 | 18 | Eigen::Quaterniond& 19 | Transform::rotation(void) 20 | { 21 | return m_q; 22 | } 23 | 24 | const Eigen::Quaterniond& 25 | Transform::rotation(void) const 26 | { 27 | return m_q; 28 | } 29 | 30 | double* 31 | Transform::rotationData(void) 32 | { 33 | return m_q.coeffs().data(); 34 | } 35 | 36 | const double* const 37 | Transform::rotationData(void) const 38 | { 39 | return m_q.coeffs().data(); 40 | } 41 | 42 | Eigen::Vector3d& 43 | Transform::translation(void) 44 | { 45 | return m_t; 46 | } 47 | 48 | const Eigen::Vector3d& 49 | Transform::translation(void) const 50 | { 51 | return m_t; 52 | } 53 | 54 | double* 55 | Transform::translationData(void) 56 | { 57 | return m_t.data(); 58 | } 59 | 60 | const double* const 61 | Transform::translationData(void) const 62 | { 63 | return m_t.data(); 64 | } 65 | 66 | Eigen::Matrix4d 67 | Transform::toMatrix(void) const 68 | { 69 | Eigen::Matrix4d H; 70 | H.setIdentity(); 71 | H.block<3,3>(0,0) = m_q.toRotationMatrix(); 72 | H.block<3,1>(0,3) = m_t; 73 | 74 | return H; 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /config/euroc.yaml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | model_type: PINHOLE 4 | camera_name: camera 5 | image_width: 752 6 | image_height: 480 7 | distortion_parameters: 8 | k1: 0 9 | k2: 0 10 | p1: 0 11 | p2: 0 12 | projection_parameters: 13 | fx: 435.2046959714599 14 | fy: 435.2046959714599 15 | cx: 367.4517211914062 16 | cy: 252.2008514404297 17 | 18 | #stereo rectify 19 | STEREO.bf: 47.90639384423901 # unit: meter, meaning: baseline times fx(unit: pixel) 20 | 21 | LEFT.height: 480 22 | LEFT.width: 752 23 | LEFT.D: !!opencv-matrix 24 | rows: 1 25 | cols: 5 26 | dt: d 27 | data: [-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05, 0.0] 28 | LEFT.K: !!opencv-matrix 29 | rows: 3 30 | cols: 3 31 | dt: d 32 | data: [458.654, 0.0, 367.215, 0.0, 457.296, 248.375, 0.0, 0.0, 1.0] 33 | LEFT.R: !!opencv-matrix 34 | rows: 3 35 | cols: 3 36 | dt: d 37 | data: [0.999966347530033, -0.001422739138722922, 0.008079580483432283, 0.001365741834644127, 0.9999741760894847, 0.007055629199258132, -0.008089410156878961, -0.007044357138835809, 0.9999424675829176] 38 | LEFT.Rf: !!opencv-matrix 39 | rows: 3 40 | cols: 3 41 | dt: f 42 | data: [0.999966347530033, -0.001422739138722922, 0.008079580483432283, 0.001365741834644127, 0.9999741760894847, 0.007055629199258132, -0.008089410156878961, -0.007044357138835809, 0.9999424675829176] 43 | LEFT.P: !!opencv-matrix 44 | rows: 3 45 | cols: 4 46 | dt: d 47 | data: [435.2046959714599, 0, 367.4517211914062, 0, 0, 435.2046959714599, 252.2008514404297, 0, 0, 0, 1, 0] 48 | LEFT.fx: 435.2046959714599 49 | 50 | RIGHT.height: 480 51 | RIGHT.width: 752 52 | RIGHT.D: !!opencv-matrix 53 | rows: 1 54 | cols: 5 55 | dt: d 56 | data: [-0.28368365, 0.07451284, -0.00010473, -3.555907e-05, 0.0] 57 | RIGHT.K: !!opencv-matrix 58 | rows: 3 59 | cols: 3 60 | dt: d 61 | data: [457.587, 0.0, 379.999, 0.0, 456.134, 255.238, 0.0, 0.0, 1] 62 | RIGHT.R: !!opencv-matrix 63 | rows: 3 64 | cols: 3 65 | dt: d 66 | data: [0.9999633526194376, -0.003625811871560086, 0.007755443660172947, 0.003680398547259526, 0.9999684752771629, -0.007035845251224894, -0.007729688520722713, 0.007064130529506649, 0.999945173484644] 67 | RIGHT.P: !!opencv-matrix 68 | rows: 3 69 | cols: 4 70 | dt: d 71 | data: [435.2046959714599, 0, 367.4517211914062, -47.90639384423901, 0, 435.2046959714599, 252.2008514404297, 0, 0, 0, 1, 0] 72 | 73 | #common parameters 74 | imu_topic: "/imu0" 75 | left_image_topic: "/cam0/image_raw" 76 | right_image_topic: "/cam1/image_raw" 77 | output_path: "/home/oran/WS/WSa/SLAM/VCU-VIOs/SVIO/output" 78 | 79 | # Extrinsic parameter between IMU and Camera. 80 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it. 81 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess. 82 | 83 | body_T_cam0: !!opencv-matrix # Timu2c_1 Tu2c 84 | rows: 4 85 | cols: 4 86 | dt: d 87 | data: [0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975, 88 | 0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768, 89 | -0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949, 90 | 0.0, 0.0, 0.0, 1.0] 91 | 92 | #Multiple thread support 93 | multiple_thread: 1 94 | 95 | #feature traker paprameters 96 | max_cnt: 150 # max feature number in feature tracking 97 | min_dist: 30 # min distance between two features 98 | freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image 99 | F_threshold: 2.0 # 1.0 # ransac threshold (pixel) 100 | show_track: 1 # publish tracking image as topic 101 | flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy 102 | 103 | #optimization parameters 104 | max_solver_time: 0.04 # 0.04 # max solver itration time (ms), to guarantee real time 105 | max_num_iterations: 8 # max solver itrations, to guarantee real time 106 | keyframe_parallax: 10.0 # 5.0 # 10.0 # keyframe selection threshold (pixel) 107 | 108 | #imu parameters The more accurate parameters you provide, the better performance 109 | acc_n: 0.08 #2.0e-03 110 | gyr_n: 0.004 #1.7e-04 111 | acc_w: 0.00004 #3.e-03 112 | gyr_w: 2.0e-6 #1.9393e-05 113 | g_norm: 9.81 # gravity magnitude 114 | 115 | #loop closure 116 | loop_closure: 0 # 1 #if you want to use loop closure to minimize the drift, set loop_closure true and give your brief pattern file path and vocabulary file path accordingly; 117 | 118 | #unsynchronization parameters 119 | estimate_td: 0 # 1 # online estimate time offset between camera and imu 120 | td: 0.0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock) 121 | 122 | #rolling shutter parameters 123 | rolling_shutter: 0 # 1 # 0: global shutter camera, 1: rolling shutter camera 124 | rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet). 125 | 126 | #loop closure parameters 127 | load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path' 128 | pose_graph_save_path: "/home/tony-ws1/output/pose_graph/" # save and load path 129 | save_image: 0 #1 130 | 131 | # Structural constraints 132 | horizontal_structural_th: 5.0 133 | line_horizontal_structural_th: 5.0 134 | vertical_structural_th: 3.0 135 | 136 | # Outlier threshold 137 | err_pt_line: 10 138 | err_normal_line: 10 139 | err_point: 20 140 | 141 | # Result format 142 | save_tum: 0 143 | 144 | # Drop frame 145 | drop_frame: 0 146 | 147 | # Min used num (line/point) 148 | min_used_num: 4 149 | -------------------------------------------------------------------------------- /config/openloris_vio.yaml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | #camera calibration 4 | model_type: PINHOLE 5 | camera_name: camera 6 | image_width: 848 7 | image_height: 480 8 | distortion_parameters: 9 | k1: 0.0 10 | k2: 0.0 11 | p1: 0.0 12 | p2: 0.0 13 | projection_parameters: 14 | fx: 611.4509887695312 15 | fy: 611.4857177734375 16 | cx: 433.2039794921875 17 | cy: 249.4730224609375 18 | 19 | # Topics 20 | imu_topic: "/d400/imu0" 21 | image_topic: "/d400/color/image_raw" 22 | dpt_img_topic: "/d400/aligned_depth_to_color/image_raw" 23 | output_path: "/home/oran/WS/Work/SLAM/SVIO/output" 24 | 25 | # Extrinsic parameter between IMU and Camera. 26 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it. 27 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess. 28 | # 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning. 29 | 30 | body_T_cam0: !!opencv-matrix # Timu2c_1 Tu2c 31 | rows: 4 32 | cols: 4 33 | dt: d 34 | data: [0.999975572679493, 0.003849141066713, 0.005854714944742, 0.0203127935529, 35 | -0.003828680351062, 0.999986658473099, -0.003501944262433, -0.00510325236246, 36 | -0.005868115609379, 0.003479442469180, 0.999976848846595, -0.0112013882026, 37 | 0, 0, 0, 1] 38 | 39 | # Multiple thread support 40 | multiple_thread: 1 41 | 42 | # Feature traker paprameters 43 | max_cnt: 130 # max feature number in feature tracking. It is suggested to be raised in VO mode. 44 | min_dist: 30 # min distance between two features 45 | freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image 46 | F_threshold: 2.0 # ransac threshold (pixel) 47 | show_track: 1 # publish tracking image as topic 48 | flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy 49 | 50 | # IMU acc. multiply 51 | acc_mult: 1 52 | 53 | # Optimization parameters 54 | max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time 55 | max_num_iterations: 8 # max solver itrations, to guarantee real time 56 | keyframe_parallax: 10.0 # keyframe selection threshold (pixel) 57 | 58 | # Imu parameters The more accurate parameters you provide, the better performance 59 | acc_n: 0.1 # accelerometer measurement noise standard deviation. 60 | gyr_n: 0.01 # gyroscope measurement noise standard deviation. 61 | acc_w: 0.0002 # accelerometer bias random work noise standard deviation. 62 | gyr_w: 2.0e-5 # gyroscope bias random work noise standard deviation. 63 | g_norm: 9.805 # gravity magnitude 64 | 65 | # Loop closure 66 | loop_closure: 0 #if you want to use loop closure to minimize the drift, set loop_closure true and give your brief pattern file path and vocabulary file path accordingly; 67 | load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path' 68 | pose_graph_save_path: "" # save and load path 69 | save_image: 0 70 | 71 | # Unsynchronization parameters 72 | estimate_td: 1 # online estimate time offset between camera and imu 73 | td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock) 74 | 75 | # Rolling shutter parameters 76 | rolling_shutter: 1 # 0: global shutter camera, 1: rolling shutter camera 77 | rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet) 78 | 79 | # RGBD camera Ideal Range 80 | depth_min_dist: 0.3 81 | depth_max_dist: 3 82 | 83 | # Structural constraints 84 | horizontal_structural_th: 3.0 85 | line_horizontal_structural_th: 3.0 86 | vertical_structural_th: 2.0 87 | 88 | # Outlier threshold 89 | err_pt_line: 10 90 | err_normal_line: 10 91 | err_point: 20 92 | err_plane_dist: 1.0 93 | 94 | # Result format 95 | save_tum: 1 96 | 97 | # Drop frame 98 | drop_frame: 0 99 | 100 | # Min used num (line/point) 101 | min_used_num: 4 102 | 103 | # ablation study 104 | structure_verify: 1 105 | structure: 1 106 | -------------------------------------------------------------------------------- /config/struct_core_v2.yaml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | model_type: PINHOLE 4 | camera_name: camera 5 | image_width: 640 6 | image_height: 480 7 | distortion_parameters: 8 | k1: 0 # -2.9839715720358556e-01 9 | k2: 0 # 9.2224519780237782e-02 10 | p1: 0 # -1.1990340691497372e-04 11 | p2: 0 # -7.4597868882045419e-05 12 | projection_parameters: 13 | fx: 4.59357e+02 14 | fy: 4.59764e+02 15 | cx: 3.32695e+02 16 | cy: 2.58998e+02 17 | 18 | # Topics 19 | imu_topic: "/imu" 20 | image_topic: "/cam0/color" 21 | dpt_img_topic: "/cam0/depth" 22 | output_path: "/home/oran/WS/Work/SLAM/SVIO/output" 23 | 24 | # Extrinsic parameter between IMU and Camera. 25 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it. 26 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess. 27 | 28 | body_T_cam0: !!opencv-matrix # Timu2c_1 Tu2c 29 | rows: 4 30 | cols: 4 31 | dt: d 32 | data: [0.00193013, -0.999997, 0.00115338, -0.00817048, 33 | -0.999996, -0.0019327, -0.00223606, 0.015075, 34 | 0.00223829, -0.00114906, -0.999997, -0.0110795, 35 | 0, 0, 0, 1] 36 | 37 | # camera # IMU 38 | # /z | x 39 | # / | 40 | # /____x y____| 41 | # | / 42 | # | / 43 | # | y / z 44 | 45 | body_T_cam1: !!opencv-matrix # Timu2c_2, Tc1_2_c2 is a virtual transformation [I,t] t = [0.1, 0, 0], note "mbf" in estimator_dpt.cpp 46 | rows: 4 47 | cols: 4 48 | dt: d 49 | data: [0.00193013, -0.999997, 0.00115338, -0.007977467, 50 | -0.999996, -0.0019327, -0.00223606, -0.0849246, 51 | 0.00223829, -0.00114906, -0.999997, -0.010855671, 52 | 0, 0, 0, 1] 53 | 54 | 55 | # Multiple thread support 56 | multiple_thread: 1 57 | 58 | # Feature traker paprameters 59 | max_cnt: 150 # max feature number in feature tracking 60 | min_dist: 30 # min distance between two features 61 | freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image 62 | F_threshold: 2.0 # ransac threshold (pixel) 63 | show_track: 1 # publish tracking image as topic 64 | flow_back: 1 # perform forward and backward optical flow to improve feature tracking accuracy 65 | 66 | # IMU acc. multiply 67 | acc_mult: -9.8 68 | 69 | # Optimization parameters 70 | max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time 71 | max_num_iterations: 8 # max solver itrations, to guarantee real time 72 | keyframe_parallax: 20.0 #10.0 # keyframe selection threshold (pixel) 73 | 74 | # Imu parameters The more accurate parameters you provide, the better performance 75 | acc_n: 0.1 # accelerometer measurement noise standard deviation. 76 | gyr_n: 0.01 # gyroscope measurement noise standard deviation. 77 | acc_w: 0.001 # accelerometer bias random work noise standard deviation. 78 | gyr_w: 0.0001 # gyroscope bias random work noise standard deviation. 79 | g_norm: 9.81 # gravity magnitude 80 | 81 | # Loop closure 82 | loop_closure: 0 #if you want to use loop closure to minimize the drift, set loop_closure true and give your brief pattern file path and vocabulary file path accordingly; 83 | load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path' 84 | pose_graph_save_path: "" # save and load path 85 | save_image: 0 #1 86 | 87 | # Unsynchronization parameters 88 | estimate_td: 0 # online estimate time offset between camera and imu 89 | td: 0.0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock) 90 | 91 | # Rolling shutter parameters 92 | rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera 93 | rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet). 94 | 95 | # RGBD camera Ideal Range 96 | depth_min_dist: 0.3 97 | depth_max_dist: 7 98 | 99 | # Structural constraints 100 | horizontal_structural_th: 5.0 101 | line_horizontal_structural_th: 5.0 102 | vertical_structural_th: 3.0 103 | 104 | # Outlier threshold 105 | err_pt_line: 10 106 | err_normal_line: 10 107 | err_point: 20 108 | err_plane_dist: 1.0 109 | 110 | # Result format 111 | save_tum: 0 112 | 113 | # Drop frame 114 | drop_frame: 0 115 | 116 | # Min used num (line/point) 117 | min_used_num: 4 118 | 119 | # ablation study 120 | structure_verify: 1 121 | disable_line: 0 122 | disable_plane: 0 123 | structure: 1 -------------------------------------------------------------------------------- /launch/svio_euroc_run.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /launch/svio_openloris_run.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /launch/svio_vcu_run.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svio/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(svio) 2 | 3 | cmake_minimum_required(VERSION 2.8.3) 4 | 5 | # SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -std=c++11 -w -g -O3 -fpermissive -mno-avx -mno-avx2") 6 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -w -g -O3 -fpermissive") 7 | SET(CMAKE_BUILD_TYPE "RelWithDebInfo") 8 | 9 | if(COMMAND cmake_policy) 10 | cmake_policy(SET CMP0003 NEW) 11 | endif(COMMAND cmake_policy) 12 | 13 | # Set the build type. Options are: 14 | # Coverage : w/ debug symbols, w/o optimization, w/ code-coverage 15 | # Debug : w/ debug symbols, w/o optimization 16 | # Release : w/o debug symbols, w/ optimization 17 | # RelWithDebInfo : w/ debug symbols, w/ optimization 18 | # MinSizeRel : w/o debug symbols, w/ optimization, stripped binaries 19 | #set(ROS_BUILD_TYPE RelWithDebInfo) 20 | 21 | # rosbuild_init() 22 | 23 | option(WITH_OPENGV "use opengv" OFF) #ON use opengv or not in hybridPnP 24 | 25 | #set the default path for built executables to the "bin" directory 26 | set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) 27 | #set the default path for built libraries to the "lib" directory 28 | set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) 29 | 30 | # set(OpenCV_DIR "/home/davidz/work/3rdlibs/OpenCV-2.4.10/build") 31 | # set(OpenCV_DIR "/home/oran/WS/WSa/KU/opencv-3.2.0/opencv/build") 32 | find_package(OpenCV REQUIRED) 33 | # message("OpenCV_INCLUDE_DIR: ${OpenCV_INCLUDE_DIRS}") 34 | # include_directories("/home/hzhang8/work/3rdlib/opencv-3.3.1/install/include") 35 | 36 | find_package(catkin REQUIRED COMPONENTS 37 | roscpp 38 | cv_bridge 39 | sensor_msgs 40 | std_msgs 41 | pcl_conversions 42 | pcl_ros 43 | message_filters 44 | image_transport 45 | camera_models_in # copied from VINS-Fusion 46 | tf 47 | ) 48 | 49 | catkin_package( 50 | # DEPENDS Eigen 51 | # CATKIN_DEPENDS roscpp std_msgs sensor_msgs cv_bridge 52 | # INCLUDE_DIRS include 53 | # LIBRARIES 54 | ) 55 | 56 | SET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) 57 | find_package(Eigen3 QUIET) 58 | include_directories(${EIGEN3_INCLUDE_DIR} "/usr/include/eigen3") 59 | include_directories("${PROJECT_SOURCE_DIR}/include") 60 | include_directories(${catkin_INCLUDE_DIRS}) 61 | 62 | find_package(Ceres REQUIRED) 63 | include_directories(${CERES_INCLUDE_DIRS}) 64 | 65 | 66 | if(WITH_OPENGV) 67 | add_definitions(-DUSE_OPENGV) 68 | #### opengv #### 69 | set(opengv_DIR "/home/davidz/work/3rdlib/opengv/build") 70 | MESSAGE(STATUS "USE_OPENGV ") 71 | find_package(opengv REQUIRED 72 | PATHS ${opengv_DIR}) 73 | include_directories("${opengv_DIR}/../include") 74 | endif() 75 | 76 | # svio library 77 | add_library(svio state_estimation/svio.cpp state_estimation/svio_init.cpp 78 | state_estimation/feature_manager.cpp state_estimation/marginalization_factor.cpp state_estimation/depth_factor.cpp 79 | state_estimation/projection_quat.cpp state_estimation/parameters.cpp state_estimation/plane_factor.cpp 80 | utility/visualization.cpp utility/utility.cpp utility/CameraPoseVisualization.cpp 81 | initialization/initial_aligment.cpp initialization/initial_ex_rotation.cpp 82 | initialization/initial_sfm.cpp initialization/solve_5pts.cpp 83 | initialization/solve_opt.cpp initialization/translate_factor.cpp 84 | initialization/gmm_model.cpp feature_tracking/feature_tracker.cpp 85 | plane_tools/PlaneExtractor.cpp state_estimation/plane_manager.cpp state_estimation/line_parameterization.cpp 86 | line_tracking/line_tracker.cpp state_estimation/line_manager.cpp state_estimation/line_projection_factor.cpp 87 | state_estimation/vanish_point_factor.cpp state_estimation/mns_alg.cpp state_estimation/frontend.cpp) 88 | 89 | if(WITH_OPENGV) 90 | target_link_libraries(svio ${CERES_LIBRARIES} ${catkin_LIBRARIES} ${OpenCV_LIBS} opengv) 91 | else() 92 | target_link_libraries(svio ${CERES_LIBRARIES} ${catkin_LIBRARIES} ${OpenCV_LIBS}) 93 | endif() 94 | 95 | add_executable(svio_syn_node state_estimation/svio_syn_node.cpp) 96 | target_link_libraries(svio_syn_node svio) 97 | 98 | add_executable(svio_stereo_node state_estimation/svio_stereo_node.cpp) 99 | target_link_libraries(svio_stereo_node svio) 100 | 101 | 102 | -------------------------------------------------------------------------------- /svio/feature_tracking/feature_tracker.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include "camodocal/camera_models/CameraFactory.h" 13 | #include "camodocal/camera_models/CataCamera.h" 14 | #include "camodocal/camera_models/PinholeCamera.h" 15 | #include "../state_estimation/parameters.h" 16 | #include "../utility/tic_toc.h" 17 | #include "../plane_tools/PlaneExtractor.h" 18 | 19 | using namespace std; 20 | using namespace camodocal; 21 | using namespace Eigen; 22 | 23 | class GMM_Model; 24 | 25 | class FeatureTracker 26 | { 27 | public: 28 | 29 | // initialization 30 | FeatureTracker(); 31 | ~FeatureTracker(); 32 | void readIntrinsicParameter(const string& calib_file); 33 | 34 | // high-level tools 35 | void setMask(); 36 | void addPoints(); 37 | void rejectWithF(); 38 | void undistortedPoints(); 39 | 40 | // tools 41 | bool inBorder(const cv::Point2f &pt); 42 | double distance(cv::Point2f &pt1, cv::Point2f &pt2); 43 | void reduceVector(vector &v, vector& status); 44 | void reduceVector(vector &v, vector& status); 45 | 46 | // API 47 | void trackImage(const cv::Mat &_img, double _cur_time); 48 | void removeOutliers(set &removePtsIds); 49 | void associateDepthSimple(const cv::Mat& dpt); 50 | void associateDepthGMM(const cv::Mat& dpt, bool use_sim=true); 51 | void associatePlane(const PlaneDetection& plane_detector); 52 | cv::Mat drawImTrack(const cv::Mat& color_img); 53 | 54 | int row, col, n_id; 55 | camodocal::CameraPtr camera; 56 | float depthMapFactor; 57 | GMM_Model* mp_gmm; 58 | 59 | cv::Mat mask; 60 | vector n_pts; 61 | 62 | cv::Mat prev_img, cur_img; 63 | double cur_time, prev_time; 64 | vector prev_pts, cur_pts; 65 | vector prev_un_pts, cur_un_pts; 66 | vector cur_pts_depth; 67 | vector cur_pts_plane_index; 68 | map cur_un_pts_map, prev_un_pts_map; 69 | vector pts_velocity; 70 | vector ids; 71 | vector track_cnt; 72 | }; 73 | -------------------------------------------------------------------------------- /svio/initialization/gmm_model.cpp: -------------------------------------------------------------------------------- 1 | #include "gmm_model.h" 2 | #include "../utility/utility.h" 3 | #include 4 | #include 5 | #include 6 | 7 | using namespace cv; 8 | 9 | GMM_Model::GMM_Model(){ 10 | 11 | } 12 | GMM_Model::~GMM_Model(){} 13 | 14 | double GMM_Model::loss_d(double d, double mu, double sigma_d, double local_sigma){ 15 | double scale = 1.5; // 1.2; //0.7; 16 | // return log(scale*SQ(d)/(SQ(sigma_d)*SQ(local_sigma))+1); 17 | return scale*log(SQ(d)/(SQ(sigma_d)*SQ(local_sigma))+1); // lambda = 1.2 18 | } 19 | 20 | double GMM_Model::loss_lambda(double delta_lambda, double lambda, double local_sigma){ 21 | double scale = 1e6; // 1e6; // 806800; //700000; // SQ(0.001); // SQ(0.01); 22 | // return scale*log(SQ(delta_lambda)*SQ(lambda)/(SQ(local_sigma)) + 1); 23 | return scale*log(SQ(delta_lambda)*SQ(lambda)/(SQ(local_sigma))+1); // 806800 806800 3.08269e+06 24 | // return log(700000*SQ(delta_lambda)*SQ(lambda)/(SQ(local_sigma))+1); // 806800 806800 3.08269e+06 25 | } 26 | 27 | void GMM_Model::mu_std(vector& vt, double& mu, double& std) 28 | { 29 | if(vt.size() <= 0){ 30 | mu = std = 0; 31 | return ; 32 | } 33 | 34 | double sum = std::accumulate(std::begin(vt), std::end(vt), 0.0); 35 | double m = sum / vt.size(); 36 | 37 | double accum = 0.0; 38 | std::for_each (std::begin(vt), std::end(vt), [&](const double d) { 39 | accum += (d - m) * (d - m); 40 | }); 41 | 42 | double stdev = sqrt(accum / (vt.size()-1)); 43 | return ; 44 | } 45 | 46 | void GMM_Model::gmm_model_depth(int r, int c, const cv::Mat& dpt, double &mu_d, double &sig_d, int use_sim) 47 | { 48 | cv::Mat W = (Mat_(3,3) << 4, 1, 4, 1, 0, 1, 4, 1, 4); 49 | double d = dpt.at(r, c)*0.001; 50 | if(d < 0.5 || d > 7){ 51 | mu_d = 0; 52 | sig_d = cp_depth_cov.y(d); 53 | return ; 54 | } 55 | 56 | // compute local sigma_s 57 | int n_invalid = 1; 58 | double local_std = 1; 59 | vector vdpt; 60 | for(int i=0; i<5; i++) 61 | for(int j=0; j<5; j++){ 62 | int ur = r + i - 2; 63 | int uc = c + j - 2; 64 | if(ur < 0 || uc < 0 || ur >= dpt.rows || uc >= dpt.cols) 65 | continue; 66 | 67 | double mu_ij = dpt.at(ur, uc)*0.001; 68 | if(mu_ij < 0.5 || mu_ij>7){ 69 | n_invalid++; 70 | continue; 71 | } 72 | vdpt.push_back(mu_ij); 73 | } 74 | double local_mu; 75 | mu_std(vdpt, local_mu, local_std); 76 | local_std += 2*n_invalid; 77 | if(local_std <= 0) local_std = 1; 78 | 79 | // simple model's depth uncertainty 80 | double std_d = cp_depth_cov.y(d); 81 | 82 | // gmm compute sum_w 83 | double sW = 0; 84 | double mu_z = 0; 85 | double std_sq = 0; 86 | for(int i=0; i<3; i++) 87 | for(int j=0; j<3; j++){ 88 | int ur = r + i -1; 89 | int uc = c + j -1; 90 | if(ur < 0 || uc < 0 || ur >= dpt.rows || uc >= dpt.cols) 91 | continue; 92 | 93 | double mu_ij = dpt.at(ur, uc)*0.001; 94 | if(mu_ij < 0.5 || mu_ij > 7) 95 | continue; 96 | if(fabs(mu_ij - d) > 0.5) 97 | continue; 98 | double std_ij = cp_depth_cov.y(mu_ij); 99 | double w = -W.at(i,j)/2. - use_sim*loss_d(mu_ij - d, d, std_d, local_std); 100 | w = exp(w); 101 | 102 | mu_z += w*mu_ij; 103 | std_sq += w*(SQ(std_ij)+SQ(mu_ij)); 104 | // sW += W.at(i,j); 105 | sW += (w); 106 | } 107 | 108 | // output 109 | mu_d = mu_z/sW; 110 | sig_d = sqrt(std_sq/sW - SQ(mu_d)); 111 | 112 | 113 | // cout<<" origin depth: "<(3,3) << 4, 1, 4, 1, 0, 1, 4, 1, 4); 120 | double d = dpt.at(r, c)*0.001; 121 | if(d < 0.5 || d > 7){ 122 | mu_lambda = 0; 123 | sig_lambda = 1.; 124 | return ; 125 | } 126 | 127 | // compute local sigma_s, 128 | int n_invalid = 1; 129 | double local_std = 1; 130 | vector vdpt; 131 | for(int i=0; i<5; i++) 132 | for(int j=0; j<5; j++){ 133 | int ur = r + i - 2; 134 | int uc = c + j - 2; 135 | if(ur < 0 || uc < 0 || ur >= dpt.rows || uc >= dpt.cols) 136 | continue; 137 | 138 | double mu_ij = dpt.at(ur, uc)*0.001; 139 | if(mu_ij < 0.5 || mu_ij>7){ 140 | n_invalid++; 141 | continue; 142 | } 143 | vdpt.push_back(1./mu_ij); 144 | } 145 | double local_mu; 146 | mu_std(vdpt, local_mu, local_std); 147 | local_std += 2*n_invalid; 148 | if(local_std <= 0) local_std = 1; 149 | 150 | // simple model's depth uncertainty 151 | // double std_d = cp_depth_cov.y(d); 152 | 153 | double lambda = 1./d; 154 | 155 | // gmm compute sum_w 156 | double sW = 0; 157 | double mu_z = 0; 158 | double std_sq = 0; 159 | for(int i=0; i<3; i++) 160 | for(int j=0; j<3; j++){ 161 | int ur = r + i -1; 162 | int uc = c + j -1; 163 | if(ur < 0 || uc < 0 || ur >= dpt.rows || uc >= dpt.cols) 164 | continue; 165 | 166 | double mu_ij = dpt.at(ur, uc)*0.001; 167 | if(mu_ij < 0.5 || mu_ij > 7) 168 | continue; 169 | if(fabs(mu_ij - d) > 0.5) 170 | continue; 171 | double std_ij = 0.0005; 172 | double lambda_ij = 1./mu_ij; 173 | double w = -W.at(i,j)/2. - use_sim*loss_lambda(lambda_ij - lambda, lambda, local_std); 174 | w = exp(w); 175 | 176 | mu_z += w*lambda_ij; 177 | std_sq += w*(SQ(std_ij)+SQ(lambda_ij)); 178 | // sW += W.at(i,j); 179 | sW += (w); 180 | } 181 | 182 | // output 183 | mu_lambda = mu_z/sW; 184 | sig_lambda = 12*sqrt(std_sq/sW - SQ(mu_lambda)); // 12 185 | // sig_lambda = sqrt(std_sq/sW - SQ(mu_lambda)); 186 | return ; 187 | } 188 | -------------------------------------------------------------------------------- /svio/initialization/gmm_model.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "opencv/cv.h" 4 | #include 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | // depth_sigma = y(depth) 11 | struct poly{ 12 | poly(double para[3]){ 13 | a1 = para[0]; a2 = para[1]; a3 = para[2]; 14 | } 15 | poly(){ 16 | // depth variance of structure core at central point 17 | a1 = 0.00155816; a2 = -0.00362021; a3 = 0.00452812; 18 | } 19 | double y(double x){ 20 | if(x <= 0.75) 21 | return 0.0007; 22 | return (a1*x*x + a2*x+a3); 23 | } 24 | double a1,a2,a3; 25 | int r,c; 26 | }; 27 | 28 | class GMM_Model 29 | { 30 | public: 31 | GMM_Model(); 32 | ~GMM_Model(); 33 | 34 | double loss_d(double d, double mu, double sigma_d, double local_sigma); 35 | double loss_lambda(double delta_lambda, double lambda, double local_sigma); 36 | 37 | void mu_std(vector& vt, double& mu, double& std); 38 | 39 | void gmm_model_depth(int u, int v, const cv::Mat& dpt, double &mu_d, double &sig_d, int use_sim = 1); 40 | void gmm_model_inv_depth(int u, int v, const cv::Mat& dpt, double &mu_lambda, double &sig_lambda, int use_sim = 1); 41 | 42 | poly cp_depth_cov; 43 | 44 | }; -------------------------------------------------------------------------------- /svio/initialization/initial_alignment.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include "../state_estimation/imu_factor.h" 5 | #include "../utility/utility.h" 6 | #include 7 | #include 8 | #include "../state_estimation/feature_manager.h" 9 | 10 | using namespace Eigen; 11 | using namespace std; 12 | 13 | class ImageFrame 14 | { 15 | public: 16 | ImageFrame(){}; 17 | ImageFrame(const map>>>& _points, double _t):t{_t},is_key_frame{false} 18 | { 19 | points = _points; 20 | }; 21 | 22 | map> > > points; 23 | double t; 24 | Matrix3d R; 25 | Vector3d T; 26 | IntegrationBase *pre_integration; 27 | bool is_key_frame; 28 | }; 29 | 30 | bool VisualIMUAlignment(map &all_image_frame, Vector3d* Bgs, Vector3d &g, VectorXd &x); -------------------------------------------------------------------------------- /svio/initialization/initial_ex_rotation.cpp: -------------------------------------------------------------------------------- 1 | #include "initial_ex_rotation.h" 2 | 3 | InitialEXRotation::InitialEXRotation(){ 4 | frame_count = 0; 5 | Rc.push_back(Matrix3d::Identity()); 6 | Rc_g.push_back(Matrix3d::Identity()); 7 | Rimu.push_back(Matrix3d::Identity()); 8 | ric = Matrix3d::Identity(); 9 | } 10 | 11 | bool InitialEXRotation::CalibrationExRotation(vector> corres, Quaterniond delta_q_imu, Matrix3d &calib_ric_result) 12 | { 13 | frame_count++; 14 | Rc.push_back(solveRelativeR(corres)); 15 | Rimu.push_back(delta_q_imu.toRotationMatrix()); 16 | Rc_g.push_back(ric.inverse() * delta_q_imu * ric); 17 | 18 | Eigen::MatrixXd A(frame_count * 4, 4); 19 | A.setZero(); 20 | int sum_ok = 0; 21 | for (int i = 1; i <= frame_count; i++) 22 | { 23 | Quaterniond r1(Rc[i]); 24 | Quaterniond r2(Rc_g[i]); 25 | 26 | double angular_distance = 180 / M_PI * r1.angularDistance(r2); 27 | ROS_DEBUG( 28 | "%d %f", i, angular_distance); 29 | 30 | double huber = angular_distance > 5.0 ? 5.0 / angular_distance : 1.0; 31 | ++sum_ok; 32 | Matrix4d L, R; 33 | 34 | double w = Quaterniond(Rc[i]).w(); 35 | Vector3d q = Quaterniond(Rc[i]).vec(); 36 | L.block<3, 3>(0, 0) = w * Matrix3d::Identity() + Utility::skewSymmetric(q); 37 | L.block<3, 1>(0, 3) = q; 38 | L.block<1, 3>(3, 0) = -q.transpose(); 39 | L(3, 3) = w; 40 | 41 | Quaterniond R_ij(Rimu[i]); 42 | w = R_ij.w(); 43 | q = R_ij.vec(); 44 | R.block<3, 3>(0, 0) = w * Matrix3d::Identity() - Utility::skewSymmetric(q); 45 | R.block<3, 1>(0, 3) = q; 46 | R.block<1, 3>(3, 0) = -q.transpose(); 47 | R(3, 3) = w; 48 | 49 | A.block<4, 4>((i - 1) * 4, 0) = huber * (L - R); 50 | } 51 | 52 | JacobiSVD svd(A, ComputeFullU | ComputeFullV); 53 | Matrix x = svd.matrixV().col(3); 54 | Quaterniond estimated_R(x); 55 | ric = estimated_R.toRotationMatrix().inverse(); 56 | //cout << svd.singularValues().transpose() << endl; 57 | //cout << ric << endl; 58 | Vector3d ric_cov; 59 | ric_cov = svd.singularValues().tail<3>(); 60 | if (frame_count >= WINDOW_SIZE && ric_cov(1) > 0.25) 61 | { 62 | calib_ric_result = ric; 63 | return true; 64 | } 65 | else 66 | return false; 67 | } 68 | 69 | Matrix3d InitialEXRotation::solveRelativeR(const vector> &corres) 70 | { 71 | if (corres.size() >= 9) 72 | { 73 | vector ll, rr; 74 | for (int i = 0; i < int(corres.size()); i++) 75 | { 76 | ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1))); 77 | rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1))); 78 | } 79 | cv::Mat E = cv::findFundamentalMat(ll, rr); 80 | cv::Mat_ R1, R2, t1, t2; 81 | decomposeE(E, R1, R2, t1, t2); 82 | 83 | if (determinant(R1) + 1.0 < 1e-09) 84 | { 85 | E = -E; 86 | decomposeE(E, R1, R2, t1, t2); 87 | } 88 | double ratio1 = max(testTriangulation(ll, rr, R1, t1), testTriangulation(ll, rr, R1, t2)); 89 | double ratio2 = max(testTriangulation(ll, rr, R2, t1), testTriangulation(ll, rr, R2, t2)); 90 | cv::Mat_ ans_R_cv = ratio1 > ratio2 ? R1 : R2; 91 | 92 | Matrix3d ans_R_eigen; 93 | for (int i = 0; i < 3; i++) 94 | for (int j = 0; j < 3; j++) 95 | ans_R_eigen(j, i) = ans_R_cv(i, j); 96 | return ans_R_eigen; 97 | } 98 | return Matrix3d::Identity(); 99 | } 100 | 101 | double InitialEXRotation::testTriangulation(const vector &l, 102 | const vector &r, 103 | cv::Mat_ R, cv::Mat_ t) 104 | { 105 | cv::Mat pointcloud; 106 | cv::Matx34f P = cv::Matx34f(1, 0, 0, 0, 107 | 0, 1, 0, 0, 108 | 0, 0, 1, 0); 109 | cv::Matx34f P1 = cv::Matx34f(R(0, 0), R(0, 1), R(0, 2), t(0), 110 | R(1, 0), R(1, 1), R(1, 2), t(1), 111 | R(2, 0), R(2, 1), R(2, 2), t(2)); 112 | cv::triangulatePoints(P, P1, l, r, pointcloud); 113 | int front_count = 0; 114 | for (int i = 0; i < pointcloud.cols; i++) 115 | { 116 | double normal_factor = pointcloud.col(i).at(3); 117 | 118 | cv::Mat_ p_3d_l = cv::Mat(P) * (pointcloud.col(i) / normal_factor); 119 | cv::Mat_ p_3d_r = cv::Mat(P1) * (pointcloud.col(i) / normal_factor); 120 | if (p_3d_l(2) > 0 && p_3d_r(2) > 0) 121 | front_count++; 122 | } 123 | ROS_DEBUG("MotionEstimator: %f", 1.0 * front_count / pointcloud.cols); 124 | return 1.0 * front_count / pointcloud.cols; 125 | } 126 | 127 | void InitialEXRotation::decomposeE(cv::Mat E, 128 | cv::Mat_ &R1, cv::Mat_ &R2, 129 | cv::Mat_ &t1, cv::Mat_ &t2) 130 | { 131 | cv::SVD svd(E, cv::SVD::MODIFY_A); 132 | cv::Matx33d W(0, -1, 0, 133 | 1, 0, 0, 134 | 0, 0, 1); 135 | cv::Matx33d Wt(0, 1, 0, 136 | -1, 0, 0, 137 | 0, 0, 1); 138 | R1 = svd.u * cv::Mat(W) * svd.vt; 139 | R2 = svd.u * cv::Mat(Wt) * svd.vt; 140 | t1 = svd.u.col(2); 141 | t2 = -svd.u.col(2); 142 | } 143 | -------------------------------------------------------------------------------- /svio/initialization/initial_ex_rotation.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include "../state_estimation/parameters.h" 5 | using namespace std; 6 | 7 | #include 8 | 9 | #include 10 | using namespace Eigen; 11 | #include 12 | 13 | /* This class help you to calibrate extrinsic rotation between imu and camera when your totally don't konw the extrinsic parameter */ 14 | class InitialEXRotation 15 | { 16 | public: 17 | InitialEXRotation(); 18 | bool CalibrationExRotation(vector> corres, Quaterniond delta_q_imu, Matrix3d &calib_ric_result); 19 | private: 20 | Matrix3d solveRelativeR(const vector> &corres); 21 | 22 | double testTriangulation(const vector &l, 23 | const vector &r, 24 | cv::Mat_ R, cv::Mat_ t); 25 | void decomposeE(cv::Mat E, 26 | cv::Mat_ &R1, cv::Mat_ &R2, 27 | cv::Mat_ &t1, cv::Mat_ &t2); 28 | int frame_count; 29 | 30 | vector< Matrix3d > Rc; 31 | vector< Matrix3d > Rimu; 32 | vector< Matrix3d > Rc_g; 33 | Matrix3d ric; 34 | }; 35 | 36 | 37 | -------------------------------------------------------------------------------- /svio/initialization/initial_sfm.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | using namespace Eigen; 12 | using namespace std; 13 | 14 | 15 | struct SFMFeature 16 | { 17 | bool state; 18 | int id; 19 | double position[3]; 20 | double depth; 21 | 22 | vector> observation; // observed feature locations at different frames 23 | vector> observation_depth; // observed depth measurement at different frames 24 | vector> observation_depth_std; // observed depth measurement standard deviation at different frames 25 | 26 | }; 27 | 28 | struct ReprojectionError3D 29 | { 30 | ReprojectionError3D(double observed_u, double observed_v) 31 | :observed_u(observed_u), observed_v(observed_v) 32 | {} 33 | 34 | template 35 | bool operator()(const T* const camera_R, const T* const camera_T, const T* point, T* residuals) const 36 | { 37 | T p[3]; 38 | ceres::QuaternionRotatePoint(camera_R, point, p); 39 | p[0] += camera_T[0]; p[1] += camera_T[1]; p[2] += camera_T[2]; 40 | T xp = p[0] / p[2]; 41 | T yp = p[1] / p[2]; 42 | residuals[0] = xp - T(observed_u); 43 | residuals[1] = yp - T(observed_v); 44 | return true; 45 | } 46 | 47 | static ceres::CostFunction* Create(const double observed_x, 48 | const double observed_y) 49 | { 50 | return (new ceres::AutoDiffCostFunction< 51 | ReprojectionError3D, 2, 4, 3, 3>( 52 | new ReprojectionError3D(observed_x,observed_y))); 53 | } 54 | 55 | double observed_u; 56 | double observed_v; 57 | }; 58 | 59 | class GlobalSFM 60 | { 61 | public: 62 | GlobalSFM(); 63 | bool construct(int frame_num, Quaterniond* q, Vector3d* T, int l, 64 | const Matrix3d relative_R, const Vector3d relative_T, 65 | vector &sfm_f, map &sfm_tracked_points); 66 | 67 | private: 68 | bool solveFrameByPnP(Matrix3d &R_initial, Vector3d &P_initial, int i, vector &sfm_f); 69 | 70 | void triangulatePoint(Eigen::Matrix &Pose0, Eigen::Matrix &Pose1, 71 | Vector2d &point0, Vector2d &point1, Vector3d &point_3d); 72 | void triangulateTwoFrames(int frame0, Eigen::Matrix &Pose0, 73 | int frame1, Eigen::Matrix &Pose1, 74 | vector &sfm_f); 75 | void triangulateTwoFramesWithDepth(int frame0, Eigen::Matrix &Pose0, 76 | int frame1, Eigen::Matrix &Pose1, 77 | vector &sfm_f); 78 | void triangulateTwoFramesWithDepthCov(int frame0, Matrix & Pose0, int frame1, 79 | Eigen::Matrix & Pose1, vector & sfm_f); 80 | 81 | int feature_num; 82 | }; -------------------------------------------------------------------------------- /svio/initialization/solve_5pts.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | using namespace std; 5 | 6 | #include 7 | #include 8 | using namespace Eigen; 9 | 10 | #include 11 | 12 | class MotionEstimator 13 | { 14 | public: 15 | 16 | bool solveRelativeRT(const vector> &corres, Matrix3d &R, Vector3d &T); 17 | 18 | bool solveRelativeHybrid(const vector> &corres, Matrix3d &Rotation, Vector3d &Translation, vector* pcov = NULL); 19 | 20 | private: 21 | double testTriangulation(const vector &l, 22 | const vector &r, 23 | cv::Mat_ R, cv::Mat_ t); 24 | void decomposeE(cv::Mat E, 25 | cv::Mat_ &R1, cv::Mat_ &R2, 26 | cv::Mat_ &t1, cv::Mat_ &t2); 27 | }; 28 | 29 | 30 | -------------------------------------------------------------------------------- /svio/initialization/solve_opt.cpp: -------------------------------------------------------------------------------- 1 | #include "solve_opt.h" 2 | #include "translate_factor.h" 3 | #include "../state_estimation/feature_manager.h" 4 | 5 | OptSolver::OptSolver(){} 6 | 7 | // notice here corres must be inliers 8 | bool OptSolver::solveHybrid(const vector> &corres, Matrix3d &Rji, Vector3d &tji, vector* pcov) 9 | { 10 | // first solve for initial translation 11 | solveTCeres(corres, Rji, tji, pcov); 12 | 13 | // TODO: remove outliers? 14 | 15 | // optimization the result 16 | solveCeres(corres, Rji, tji); 17 | return true; 18 | } 19 | 20 | bool OptSolver::solveTCeres(const vector> &corres, const Matrix3d &Rji, Vector3d &tji, vector* pcov) 21 | { 22 | para_T[0][0] = tji(0); 23 | para_T[0][1] = tji(1); 24 | para_T[0][2] = tji(2); 25 | 26 | ceres::Problem problem; 27 | ceres::LossFunction *loss_function; 28 | // loss_function = new ceres::CauchyLoss(1.0); 29 | loss_function = new ceres::HuberLoss(1.0); 30 | 31 | problem.AddParameterBlock(para_T[0], 3); 32 | 33 | Matrix3d cov_pti = Matrix3d::Identity(); 34 | 35 | double ceres_sum_err = 0; 36 | for(int i=0; i> &corres, Matrix3d &Rji, Vector3d &tji) 74 | { 75 | Quaterniond qji(Rji); 76 | 77 | m_tji[0][0] = tji.x(); 78 | m_tji[0][1] = tji.y(); 79 | m_tji[0][2] = tji.z(); 80 | 81 | m_qji[0][0] = qji.w(); 82 | m_qji[0][1] = qji.x(); 83 | m_qji[0][2] = qji.y(); 84 | m_qji[0][3] = qji.z(); 85 | 86 | 87 | //full BA 88 | ceres::Problem problem; 89 | ceres::LocalParameterization* local_parameterization = new ceres::QuaternionParameterization(); 90 | problem.AddParameterBlock(m_qji[0], 4, local_parameterization); 91 | problem.AddParameterBlock(m_tji[0], 3); 92 | 93 | int N = corres.size(); 94 | for(int i=0; i 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include "initial_sfm.h" 13 | using namespace Eigen; 14 | using namespace std; 15 | 16 | class OptSolver 17 | { 18 | public: 19 | OptSolver(); 20 | 21 | // optimize to solve [R, t] 22 | bool solveCeres(const vector> &corres, Matrix3d &Rji, Vector3d &tv); 23 | 24 | // entrance function, estimate [R,t] given intial R 25 | bool solveHybrid(const vector> &corres, Matrix3d &Rji, Vector3d &tv, vector* pcov=NULL); 26 | 27 | // only solve t in [R; t] 28 | bool solveTCeres(const vector> &corres, const Matrix3d &Rji, Vector3d &tji, vector* pcov=NULL); 29 | 30 | // camera pose 31 | double m_tji[1][3]; 32 | double m_qji[1][4]; 33 | 34 | double para_T[1][3]; // for translation 35 | double para_s[1][0]; // for scale 36 | 37 | double para_pt[1000][3]; 38 | 39 | int feature_num; 40 | }; -------------------------------------------------------------------------------- /svio/initialization/translate_factor.cpp: -------------------------------------------------------------------------------- 1 | #include "translate_factor.h" 2 | using namespace Eigen; 3 | 4 | SinglePtFactor::SinglePtFactor(const Eigen::Vector3d& pt, const Eigen::Matrix3d& C): 5 | pt_xi(pt) 6 | { 7 | Matrix3d INF = C.inverse(); 8 | LLT lltOfA(INF); // compute the Cholesky decomposition of A 9 | inf_pt_xi = lltOfA.matrixL(); // retrieve factor L in the decomposition 10 | // The previous two lines can also be written as "L = A.llt().matrixL()" 11 | } 12 | 13 | bool SinglePtFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const 14 | { 15 | Eigen::Vector3d pt(parameters[0][0], parameters[0][1], parameters[0][2]); 16 | Eigen::Map> residual(residuals); 17 | 18 | residual = pt - pt_xi; 19 | residual = inf_pt_xi * residual; 20 | 21 | if(jacobians){ 22 | if(jacobians[0]){ 23 | Eigen::Map> jacobian_t(jacobians[0]); 24 | jacobian_t = inf_pt_xi; 25 | } 26 | } 27 | return true; 28 | } 29 | 30 | 31 | TranslateWithPtFactor::TranslateWithPtFactor(const Eigen::Matrix3d& R, const Eigen::Vector2d& xnj, 32 | const Eigen::Matrix3d& cov_xi): 33 | pt_xnj(xnj), Rji(R), cov_pt_xi(cov_xi) 34 | {} 35 | 36 | bool TranslateWithPtFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const 37 | { 38 | Eigen::Vector3d T(parameters[0][0], parameters[0][1], parameters[0][2]); 39 | Eigen::Vector3d pt_xi(parameters[1][0], parameters[1][1], parameters[1][2]); 40 | Eigen::Map> residual(residuals); 41 | 42 | Eigen::Matrix R1 = Rji.row(0); 43 | Eigen::Matrix R2 = Rji.row(1); 44 | Eigen::Matrix R3 = Rji.row(2); 45 | 46 | Eigen::Matrix A; 47 | A << 1, 0, -pt_xnj(0), 48 | 0, 1, -pt_xnj(1); 49 | Eigen::Matrix b; 50 | Eigen::Matrix dr_dx = (R1 - pt_xnj(0)*R3); 51 | Eigen::Matrix dr_dy = (R2 - pt_xnj(1)*R3); 52 | b(0) = (pt_xnj(0)*R3 - R1)*pt_xi; 53 | b(1) = (pt_xnj(1)*R3 - R2)*pt_xi; 54 | 55 | // weight 56 | Eigen::Matrix2d W = Eigen::Matrix2d::Identity(); 57 | W(0,0) = 1./sqrt(dr_dx * cov_pt_xi * dr_dx.transpose()); 58 | W(1,1) = 1./sqrt(dr_dy * cov_pt_xi * dr_dy.transpose()); 59 | 60 | residual = A*T - b; 61 | residual = W * residual; 62 | 63 | if(jacobians){ 64 | if(jacobians[0]){ 65 | Eigen::Map> jacobian_t(jacobians[0]); 66 | 67 | jacobian_t = W*A; 68 | } 69 | if(jacobians[1]){ 70 | Eigen::Map> jacobian_t(jacobians[1]); 71 | 72 | jacobian_t.row(0) = dr_dx; 73 | jacobian_t.row(1) = dr_dy; 74 | jacobian_t = W * jacobian_t; 75 | } 76 | } 77 | 78 | return true; 79 | } 80 | 81 | TranslateFactor::TranslateFactor(const Eigen::Matrix3d& R, const Eigen::Vector3d& xi, const Eigen::Vector2d& xnj, 82 | const Eigen::Matrix3d& cov_xi): 83 | pt_xi(xi), pt_xnj(xnj), Rji(R), cov_pt_xi(cov_xi) 84 | {} 85 | 86 | bool TranslateFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const 87 | { 88 | Eigen::Vector3d T(parameters[0][0], parameters[0][1], parameters[0][2]); 89 | Eigen::Map> residual(residuals); 90 | 91 | Eigen::Matrix R1 = Rji.row(0); 92 | Eigen::Matrix R2 = Rji.row(1); 93 | Eigen::Matrix R3 = Rji.row(2); 94 | 95 | Eigen::Matrix A; 96 | A << 1, 0, -pt_xnj(0), 97 | 0, 1, -pt_xnj(1); 98 | Eigen::Matrix b; 99 | Eigen::Matrix dr_dx = (R1 - pt_xnj(0)*R3); 100 | Eigen::Matrix dr_dy = (R2 - pt_xnj(1)*R3); 101 | b(0) = (pt_xnj(0)*R3 - R1)*pt_xi; 102 | b(1) = (pt_xnj(1)*R3 - R2)*pt_xi; 103 | 104 | // weight 105 | Eigen::Matrix2d W = Eigen::Matrix2d::Identity(); 106 | W(0,0) = 1./sqrt(dr_dx * cov_pt_xi * dr_dx.transpose()); 107 | W(1,1) = 1./sqrt(dr_dy * cov_pt_xi * dr_dy.transpose()); 108 | 109 | residual = A*T - b; 110 | residual = W * residual; 111 | 112 | if(jacobians){ 113 | if(jacobians[0]){ 114 | Eigen::Map> jacobian_t(jacobians[0]); 115 | 116 | jacobian_t = W*A; 117 | 118 | } 119 | } 120 | 121 | return true; 122 | } 123 | 124 | 125 | TranslateScaleFactor::TranslateScaleFactor(const Eigen::Matrix3d& R, const Eigen::Vector3d& nt, const Eigen::Vector3d& xi, const Eigen::Vector2d& xnj, 126 | const Eigen::Matrix3d& cov_xi): 127 | pt_xi(xi), pt_xnj(xnj), Rji(R), ntji(nt), cov_pt_xi(cov_xi) 128 | {} 129 | 130 | bool TranslateScaleFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const 131 | { 132 | double scale = parameters[0][0]; 133 | Eigen::Vector3d T = scale * ntji; 134 | Eigen::Map> residual(residuals); 135 | 136 | Eigen::Matrix R1 = Rji.row(0); 137 | Eigen::Matrix R2 = Rji.row(1); 138 | Eigen::Matrix R3 = Rji.row(2); 139 | 140 | Eigen::Matrix A; 141 | A << 1, 0, -pt_xnj(0), 142 | 0, 1, -pt_xnj(1); 143 | Eigen::Matrix b; 144 | Eigen::Matrix dr_dx = (R1 - pt_xnj(0)*R3); 145 | Eigen::Matrix dr_dy = (R2 - pt_xnj(1)*R3); 146 | b(0) = (pt_xnj(0)*R3 - R1)*pt_xi; 147 | b(1) = (pt_xnj(1)*R3 - R2)*pt_xi; 148 | 149 | // weight 150 | Eigen::Matrix2d W = Eigen::Matrix2d::Identity(); 151 | W(0,0) = 1./sqrt(dr_dx * cov_pt_xi * dr_dx.transpose()); 152 | W(1,1) = 1./sqrt(dr_dy * cov_pt_xi * dr_dy.transpose()); 153 | 154 | residual = A*T - b; 155 | residual = W * residual; 156 | 157 | if(jacobians){ 158 | if(jacobians[0]){ 159 | Eigen::Map> jacobian_t(jacobians[0]); 160 | 161 | jacobian_t = W*A*ntji; 162 | 163 | } 164 | } 165 | 166 | return true; 167 | } -------------------------------------------------------------------------------- /svio/initialization/translate_factor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | 4 | #include 5 | #include 6 | 7 | 8 | class TranslateFactor : public ceres::SizedCostFunction<2, 3> 9 | { 10 | public: 11 | TranslateFactor(const Eigen::Matrix3d& R, const Eigen::Vector3d& xi, const Eigen::Vector2d& xnj, 12 | const Eigen::Matrix3d& cov_xi); 13 | virtual bool Evaluate(double const* const *params, double *residuals, double**jacobians) const; 14 | 15 | Eigen::Vector3d pt_xi; 16 | Eigen::Vector2d pt_xnj; 17 | Eigen::Matrix3d Rji; 18 | Eigen::Matrix3d cov_pt_xi; 19 | // Eigen::Matrix2d cov_pt_xnj; 20 | 21 | }; 22 | 23 | class TranslateScaleFactor : public ceres::SizedCostFunction<2, 1> 24 | { 25 | public: 26 | TranslateScaleFactor(const Eigen::Matrix3d& R, const Eigen::Vector3d& nt, const Eigen::Vector3d& xi, const Eigen::Vector2d& xnj, 27 | const Eigen::Matrix3d& cov_xi); 28 | virtual bool Evaluate(double const* const *params, double *residuals, double**jacobians) const; 29 | 30 | Eigen::Vector3d pt_xi; 31 | Eigen::Vector2d pt_xnj; 32 | Eigen::Matrix3d Rji; 33 | Eigen::Vector3d ntji; // normalized tji 34 | Eigen::Matrix3d cov_pt_xi; 35 | }; 36 | 37 | class TranslateWithPtFactor : public ceres::SizedCostFunction<2, 3, 3> 38 | { 39 | public: 40 | TranslateWithPtFactor(const Eigen::Matrix3d& R, const Eigen::Vector2d& xnj, 41 | const Eigen::Matrix3d& cov_xi); 42 | virtual bool Evaluate(double const* const *params, double *residuals, double**jacobians) const; 43 | 44 | Eigen::Vector2d pt_xnj; 45 | Eigen::Matrix3d Rji; 46 | Eigen::Matrix3d cov_pt_xi; 47 | }; 48 | 49 | class SinglePtFactor : public ceres::SizedCostFunction<3, 3> 50 | { 51 | public: 52 | SinglePtFactor(const Eigen::Vector3d& pt, const Eigen::Matrix3d& cov); 53 | 54 | virtual bool Evaluate(double const* const *params, double *residuals, double**jacobians) const; 55 | 56 | Eigen::Vector3d pt_xi; 57 | Eigen::Matrix3d inf_pt_xi; 58 | }; -------------------------------------------------------------------------------- /svio/line_tracking/line_tracker.h: -------------------------------------------------------------------------------- 1 | 2 | #pragma once 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include "camodocal/camera_models/CameraFactory.h" 18 | #include "camodocal/camera_models/CataCamera.h" 19 | #include "camodocal/camera_models/PinholeCamera.h" 20 | #include "../state_estimation/parameters.h" 21 | #include "../utility/tic_toc.h" 22 | #include "../plane_tools/PlaneExtractor.h" 23 | 24 | using namespace std; 25 | using namespace camodocal; 26 | using namespace Eigen; 27 | 28 | class PointsPerLine 29 | { 30 | public: 31 | PointsPerLine():detect_init(true),track_failed(false),plane_index(-1),track_cnt(0),track_failed_cnt(0),length(0.f){} 32 | 33 | int id; 34 | std::vector pt_index; 35 | 36 | std::vector segments; // (x1,y1,x2,y2) of line segments 37 | std::vector lens; // length of line segments 38 | 39 | std::vector cur_pts_per_line; 40 | 41 | double cx, cy; // (x-cx)/dx = (y-cy)/dy 42 | Vector2d v; 43 | 44 | bool track_failed; 45 | int track_failed_cnt; 46 | bool detect_init; 47 | int track_cnt; 48 | float track_succ_ratio; 49 | 50 | float length; 51 | cv::Point2f s, e, un_s, un_e; 52 | 53 | int plane_index; 54 | }; 55 | 56 | class LineTracker 57 | { 58 | public: 59 | 60 | // initialization 61 | LineTracker(); 62 | void setParameters(const string &calib_file); 63 | 64 | // high-level tools 65 | void setMask(); 66 | void addPoints(); 67 | void rejectWithF(); 68 | void undistortedPoints(); 69 | 70 | // tools 71 | bool inBorder(const cv::Point2f &pt); 72 | void reduceVector(vector &v, vector& status); 73 | void reduceVector(vector &v, vector& status); 74 | void reduceVector(vector> &v, vector& status); 75 | void reduceVector(vector &v, vector& status); 76 | double RansacLinefit(std::vector& pts, Eigen::VectorXf& coefficient); 77 | 78 | // API 79 | void trackImage(const cv::Mat &_img); 80 | void associatePlane(const PlaneDetection& plane_detector); 81 | cv::Mat drawImTrack(const cv::Mat& color_img); 82 | void recordStats(); 83 | void printStats(); 84 | 85 | int row, col, n_id, min_klt_inliers, max_track_failed_cnt; 86 | float line_dist_th, merge_ang_th, line_ransac_inlier_rate_th, line_len_th, sample_delta, plane_cnt_th, length_decline_ratio; 87 | camodocal::CameraPtr camera; 88 | cv::Ptr fld; 89 | 90 | cv::Mat prev_img, cur_img; 91 | std::vector prev_pts, cur_pts; 92 | std::vector lines; 93 | 94 | std::vector> stats; 95 | }; 96 | -------------------------------------------------------------------------------- /svio/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | svio 4 | 0.1.0 5 | 6 | Depth Uncertainty Incorporated VIO 7 | 8 | David Zhang 9 | XXX 10 | LGPL 11 | 12 | catkin 13 | pcl_conversions 14 | tf 15 | pcl_conversions 16 | tf 17 | 18 | cv_bridge 19 | cv_bridge 20 | 21 | sensor_msgs 22 | sensor_msgs 23 | 24 | image_transport 25 | image_transport 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /svio/plane_tools/PlaneExtractor.h: -------------------------------------------------------------------------------- 1 | #ifndef PLANEEXTRACTOR_H 2 | #define PLANEEXTRACTOR_H 3 | 4 | #include 5 | #include "opencv2/opencv.hpp" 6 | #include 7 | #include 8 | #include 9 | #include "peac/AHCPlaneFitter.hpp" 10 | #include 11 | 12 | #include "camodocal/camera_models/CameraFactory.h" 13 | #include "camodocal/camera_models/CataCamera.h" 14 | #include "camodocal/camera_models/PinholeCamera.h" 15 | #include "../state_estimation/parameters.h" 16 | #include "../initialization/gmm_model.h" 17 | 18 | typedef Eigen::Vector3d VertexType; 19 | typedef cv::Vec3d VertexColour; 20 | typedef Eigen::Matrix3d VertexVariance; 21 | 22 | #ifdef __linux__ 23 | #define _isnan(x) isnan(x) 24 | #endif 25 | 26 | struct ImagePointCloud { 27 | std::vector vertices; // 3D vertices 28 | std::vector verticesColour; 29 | std::vector verticesVariance; 30 | int w, h; 31 | 32 | inline int width() const { return w; } 33 | 34 | inline int height() const { return h; } 35 | 36 | inline bool get(const int row, const int col, double &x, double &y, double &z) const { 37 | const int pixIdx = row * w + col; 38 | z = vertices[pixIdx][2]; 39 | // Remove points with 0 or invalid depth in case they are detected as a plane 40 | if (z == 0 || std::_isnan(z)) return false; 41 | x = vertices[pixIdx][0]; 42 | y = vertices[pixIdx][1]; 43 | return true; 44 | } 45 | }; 46 | 47 | class GMM_Model; 48 | 49 | class PlaneDetection { 50 | public: 51 | ImagePointCloud cloud; 52 | ahc::PlaneFitter plane_filter; 53 | std::vector> plane_vertices_; // vertex indices each plane contains 54 | cv::Mat seg_img_; // segmentation image 55 | cv::Mat color_img_; // input color image 56 | cv::Mat gmm_d; // depth covariance image 57 | int plane_num_; 58 | GMM_Model* mp_gmm; 59 | 60 | double fx, fy, cx, cy; 61 | float depthMapFactor; 62 | int row, col; 63 | 64 | std::map c_book; 65 | std::vector meas; 66 | 67 | public: 68 | PlaneDetection(); 69 | 70 | ~PlaneDetection(); 71 | 72 | void setParameters(const std::string &calib_file); 73 | 74 | void associateAllDepthSimple(const cv::Mat &dpt, int *n_valid=nullptr); 75 | void associateAllDepthGMM(const cv::Mat &dpt, int *n_valid, bool use_sim); 76 | 77 | bool readColorImage(cv::Mat RGBImg); 78 | bool readDepthImage(const cv::Mat depthImg, bool use_gmm=true); 79 | void runPlaneDetection(); 80 | void buildMeasurement(); 81 | void buildColorBook(); 82 | cv::Mat drawImDetect(const cv::Mat& color_img); 83 | }; 84 | 85 | 86 | #endif //PLANEEXTRACTOR_H -------------------------------------------------------------------------------- /svio/plane_tools/peac/AHCParamSet.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2014 Mitsubishi Electric Research Laboratories All 3 | // Rights Reserved. 4 | // 5 | // Permission to use, copy and modify this software and its 6 | // documentation without fee for educational, research and non-profit 7 | // purposes, is hereby granted, provided that the above copyright 8 | // notice, this paragraph, and the following three paragraphs appear 9 | // in all copies. 10 | // 11 | // To request permission to incorporate this software into commercial 12 | // products contact: Director; Mitsubishi Electric Research 13 | // Laboratories (MERL); 201 Broadway; Cambridge, MA 02139. 14 | // 15 | // IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, 16 | // INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING 17 | // LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS 18 | // DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF 19 | // SUCH DAMAGES. 20 | // 21 | // MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN 24 | // "AS IS" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 25 | // SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 26 | // 27 | #pragma once 28 | 29 | #include "math.h" 30 | 31 | namespace ahc { 32 | 33 | #define MACRO_DEG2RAD(d) ((d)*M_PI/180.0) 34 | #define MACRO_RAD2DEG(r) ((r)*180.0/M_PI) 35 | 36 | enum InitType { 37 | INIT_STRICT = 0, //no nan point is allowed in any valid init blocks 38 | INIT_LOOSE = 1 //at most half of a init block can be nan point 39 | }; 40 | 41 | /** 42 | * \brief ParamSet is a struct representing a set of parameters used in ahc::PlaneFitter 43 | */ 44 | struct ParamSet { 45 | // related to T_mse 46 | double depthSigma; //\sigma in the paper, unit: u^-1 mm^-1 47 | double stdTol_init; //\epsilon in the paper, used when init graph, unit: u mm 48 | double stdTol_merge; //\epsilon in the paper, used when merging nodes, unit: u mm 49 | 50 | // related to T_ang 51 | double z_near, z_far; //unit: u mm, closest/farthest z to be considered 52 | double angle_near, angle_far; //unit: rad, corresponding normal deviation angle 53 | double similarityTh_merge; //unit: none, 1 means the same, 0 means perpendicular 54 | double similarityTh_refine; //unit: none 55 | 56 | // related to T_dz 57 | double depthAlpha; //unit: none, corresponds to the 2*\alpha in the paper 58 | double depthChangeTol; //unit: u mm 59 | 60 | InitType initType; 61 | 62 | enum Phase { 63 | P_INIT = 0, 64 | P_MERGING = 1, 65 | P_REFINE = 2 66 | }; 67 | 68 | ParamSet() : depthSigma(1.6e-6), 69 | stdTol_init(5), stdTol_merge(8), 70 | z_near(500), z_far(4000), 71 | angle_near(MACRO_DEG2RAD(5.0)), angle_far(MACRO_DEG2RAD(5.0)), // original: angle_near(MACRO_DEG2RAD(15.0)) angle_far(MACRO_DEG2RAD(90.0)) oranfire 72 | similarityTh_merge(std::cos(MACRO_DEG2RAD(5.0))), // original: similarityTh_merge(std::cos(MACRO_DEG2RAD(60.0))) oranfire 73 | similarityTh_refine(std::cos(MACRO_DEG2RAD(30.0))), 74 | depthAlpha(0.04), depthChangeTol(0.02), 75 | initType(INIT_STRICT) {} 76 | 77 | /** 78 | * \brief Dynamic MSE threshold, depending on depth z 79 | * 80 | * \param [in] phase specify whether invoked in initGraph or when merging 81 | * \param [in] z current depth, unit: u mm 82 | * \return the MSE threshold at depth z, unit: u^2 mm^2 83 | * 84 | * \details Reference: 2012.Sensors.Khoshelham.Accuracy and Resolution of Kinect Depth Data for Indoor Mapping Applications 85 | */ 86 | inline double T_mse(const Phase phase, const double z = 0) const { 87 | //theoretical point-plane distance std = sigma * z * z 88 | //sigma corresponds to \sigma * (m/f/b) in the 2012.Khoshelham paper 89 | //we add a stdTol to move the theoretical curve up as tolerances 90 | switch (phase) { 91 | case P_INIT: 92 | return std::pow(depthSigma * z * z + stdTol_init, 2); 93 | case P_MERGING: 94 | case P_REFINE: 95 | default: 96 | return std::pow(depthSigma * z * z + stdTol_merge, 2); 97 | } 98 | } 99 | 100 | /** 101 | * \brief Dynamic normal deviation threshold, depending on depth z 102 | * 103 | * \param [in] phase specify whether invoked in initGraph or when merging 104 | * \param [in] z current depth (z>=0) 105 | * \return cos of the normal deviation threshold at depth z 106 | * 107 | * \details This is simply a linear mapping from depth to thresholding angle 108 | * and the threshold will be used to reject edge when initialize the graph; 109 | * this function corresponds to T_{ANG} in our paper 110 | */ 111 | inline double T_ang(const Phase phase, const double z = 0) const { 112 | switch (phase) { 113 | case P_INIT: {//linear maping z->thresholding angle, clipping z also 114 | double clipped_z = z; 115 | clipped_z = std::max(clipped_z, z_near); 116 | clipped_z = std::min(clipped_z, z_far); 117 | const double factor = (angle_far - angle_near) / (z_far - z_near); 118 | return std::cos(factor * clipped_z + angle_near - factor * z_near); 119 | } 120 | case P_MERGING: { 121 | return similarityTh_merge; 122 | } 123 | case P_REFINE: 124 | default: { 125 | return similarityTh_refine; 126 | } 127 | } 128 | } 129 | 130 | /** 131 | * \brief Dynamic threshold to test whether the two adjacent pixels are discontinuous in depth 132 | * 133 | * \param [in] z depth of the current pixel 134 | * \return the max depth change allowed at depth z to make the points connected in a single block 135 | * 136 | * \details This is modified from pcl's segmentation code as well as suggested in 2013.iros.Holzer 137 | * essentially returns factor*z+tolerance 138 | * (TODO: maybe change this to 3D-point distance threshold) 139 | */ 140 | inline double T_dz(const double z) const { 141 | return depthAlpha * fabs(z) + depthChangeTol; 142 | } 143 | };//ParamSet 144 | 145 | }//ahc -------------------------------------------------------------------------------- /svio/plane_tools/peac/AHCTypes.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2014 Mitsubishi Electric Research Laboratories All 3 | // Rights Reserved. 4 | // 5 | // Permission to use, copy and modify this software and its 6 | // documentation without fee for educational, research and non-profit 7 | // purposes, is hereby granted, provided that the above copyright 8 | // notice, this paragraph, and the following three paragraphs appear 9 | // in all copies. 10 | // 11 | // To request permission to incorporate this software into commercial 12 | // products contact: Director; Mitsubishi Electric Research 13 | // Laboratories (MERL); 201 Broadway; Cambridge, MA 02139. 14 | // 15 | // IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, 16 | // INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING 17 | // LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS 18 | // DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF 19 | // SUCH DAMAGES. 20 | // 21 | // MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN 24 | // "AS IS" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 25 | // SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 26 | // 27 | #pragma once 28 | 29 | #ifndef USE_BOOST_SHARED_PTR 30 | 31 | #include 32 | 33 | #else 34 | #include 35 | #endif 36 | 37 | namespace ahc { 38 | #ifndef USE_BOOST_SHARED_PTR 39 | using std::shared_ptr; 40 | #else 41 | using boost::shared_ptr; 42 | #endif 43 | } -------------------------------------------------------------------------------- /svio/plane_tools/peac/AHCUtils.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2014 Mitsubishi Electric Research Laboratories All 3 | // Rights Reserved. 4 | // 5 | // Permission to use, copy and modify this software and its 6 | // documentation without fee for educational, research and non-profit 7 | // purposes, is hereby granted, provided that the above copyright 8 | // notice, this paragraph, and the following three paragraphs appear 9 | // in all copies. 10 | // 11 | // To request permission to incorporate this software into commercial 12 | // products contact: Director; Mitsubishi Electric Research 13 | // Laboratories (MERL); 201 Broadway; Cambridge, MA 02139. 14 | // 15 | // IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, 16 | // INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING 17 | // LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS 18 | // DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF 19 | // SUCH DAMAGES. 20 | // 21 | // MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN 24 | // "AS IS" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 25 | // SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 26 | // 27 | #pragma once 28 | 29 | #include 30 | #include "opencv2/opencv.hpp" 31 | 32 | namespace ahc { 33 | namespace utils { 34 | 35 | /** 36 | * \brief Generate pseudo-colors 37 | * 38 | * \param [in] ncolors number of colors to be generated (will be reset to 10 if ncolors<=0) 39 | * \return a vector of cv::Vec3b 40 | */ 41 | inline std::vector pseudocolor(int ncolors) { 42 | srand((unsigned int) time(0)); 43 | if (ncolors <= 0) ncolors = 10; 44 | std::vector ret(ncolors); 45 | for (int i = 0; i < ncolors; ++i) { 46 | cv::Vec3b &color = ret[i]; 47 | color[0] = rand() % 255; 48 | color[1] = rand() % 255; 49 | color[2] = rand() % 255; 50 | } 51 | return ret; 52 | } 53 | 54 | /** 55 | \brief helper class for measuring time elapse 56 | */ 57 | struct Timer { 58 | int scale; 59 | double startTick; 60 | 61 | /** 62 | constructor 63 | 64 | @param scale time scale, 1 means second, 1000 means milli-second, 65 | 1/60.0 means minutes, etc. 66 | */ 67 | Timer(int scale = 1) { 68 | this->scale = scale; 69 | startTick = 0; 70 | } 71 | 72 | /** 73 | start record time, similar to matlab function "tic"; 74 | 75 | @return the start tick 76 | */ 77 | inline double tic() { 78 | return startTick = (double) cv::getTickCount(); 79 | } 80 | 81 | /** 82 | return duration from last tic, in (second * scale), similar to matlab function "toc" 83 | 84 | @return duration from last tic, in (second * scale) 85 | */ 86 | inline double toc() { 87 | return ((double) cv::getTickCount() - startTick) / cv::getTickFrequency() * scale; 88 | } 89 | 90 | inline double toc(std::string tag) { 91 | double time = toc(); 92 | std::cout << tag << " " << time << std::endl; 93 | return time; 94 | } 95 | 96 | /** 97 | equivalent to { toc(); tic(); } 98 | 99 | @return duration from last tic, in (second * scale) 100 | */ 101 | inline double toctic() { 102 | double ret = ((double) cv::getTickCount() - startTick) / cv::getTickFrequency() * scale; 103 | tic(); 104 | return ret; 105 | } 106 | 107 | inline double toctic(std::string tag) { 108 | double time = toctic(); 109 | std::cout << tag << " " << time << std::endl; 110 | return time; 111 | } 112 | }; 113 | 114 | } //utils 115 | } //ach -------------------------------------------------------------------------------- /svio/plane_tools/peac/DisjointSet.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2014 Mitsubishi Electric Research Laboratories All 3 | // Rights Reserved. 4 | // 5 | // Permission to use, copy and modify this software and its 6 | // documentation without fee for educational, research and non-profit 7 | // purposes, is hereby granted, provided that the above copyright 8 | // notice, this paragraph, and the following three paragraphs appear 9 | // in all copies. 10 | // 11 | // To request permission to incorporate this software into commercial 12 | // products contact: Director; Mitsubishi Electric Research 13 | // Laboratories (MERL); 201 Broadway; Cambridge, MA 02139. 14 | // 15 | // IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, 16 | // INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING 17 | // LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS 18 | // DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF 19 | // SUCH DAMAGES. 20 | // 21 | // MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN 24 | // "AS IS" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 25 | // SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 26 | // 27 | #pragma once 28 | 29 | #include 30 | 31 | class DisjointSet { 32 | private: 33 | std::vector parent_; 34 | std::vector size_; 35 | 36 | public: 37 | DisjointSet(const int n) { 38 | parent_.reserve(n); 39 | size_.reserve(n); 40 | 41 | for (int i = 0; i < n; ++i) { 42 | parent_.push_back(i); 43 | size_.push_back(1); 44 | } 45 | } 46 | 47 | ~DisjointSet() {} 48 | 49 | inline void remove(const int x) { 50 | if (parent_[x] != x) { 51 | --size_[Find(x)]; 52 | parent_[x] = x; 53 | size_[x] = 1; 54 | } 55 | } 56 | 57 | inline int getSetSize(const int x) { 58 | return size_[Find(x)]; 59 | } 60 | 61 | inline int Union(const int x, const int y) { 62 | const int xRoot = Find(x); 63 | const int yRoot = Find(y); 64 | 65 | if (xRoot == yRoot) 66 | return xRoot; 67 | 68 | const int xRootSize = size_[xRoot]; 69 | const int yRootSize = size_[yRoot]; 70 | 71 | if (xRootSize < yRootSize) { 72 | parent_[xRoot] = yRoot; 73 | size_[yRoot] += size_[xRoot]; 74 | return yRoot; 75 | } else { 76 | parent_[yRoot] = xRoot; 77 | size_[xRoot] += size_[yRoot]; 78 | return xRoot; 79 | } 80 | } 81 | 82 | inline int Find(const int x) { 83 | if (parent_[x] != x) 84 | parent_[x] = Find(parent_[x]); 85 | 86 | return parent_[x]; 87 | } 88 | 89 | private: 90 | DisjointSet(); 91 | 92 | DisjointSet(const DisjointSet &rhs); 93 | 94 | const DisjointSet &operator=(const DisjointSet &rhs); 95 | }; -------------------------------------------------------------------------------- /svio/plane_tools/peac/eig33sym.hpp: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright 2014 Mitsubishi Electric Research Laboratories All 3 | // Rights Reserved. 4 | // 5 | // Permission to use, copy and modify this software and its 6 | // documentation without fee for educational, research and non-profit 7 | // purposes, is hereby granted, provided that the above copyright 8 | // notice, this paragraph, and the following three paragraphs appear 9 | // in all copies. 10 | // 11 | // To request permission to incorporate this software into commercial 12 | // products contact: Director; Mitsubishi Electric Research 13 | // Laboratories (MERL); 201 Broadway; Cambridge, MA 02139. 14 | // 15 | // IN NO EVENT SHALL MERL BE LIABLE TO ANY PARTY FOR DIRECT, 16 | // INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING 17 | // LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS 18 | // DOCUMENTATION, EVEN IF MERL HAS BEEN ADVISED OF THE POSSIBILITY OF 19 | // SUCH DAMAGES. 20 | // 21 | // MERL SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 | // FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN 24 | // "AS IS" BASIS, AND MERL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 25 | // SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 26 | // 27 | #pragma once 28 | // 29 | // Note: 30 | // If you use dsyevh3 library [http://www.mpi-hd.mpg.de/personalhomes/globes/3x3/], 31 | // this function can be accelerated by uncommenting the following line 32 | //#define USE_DSYEVH3 33 | // 34 | #ifdef USE_DSYEVH3 35 | #include "dsyevh3/dsyevh3.h" 36 | #else 37 | 38 | #include 39 | #include 40 | 41 | #endif 42 | 43 | #include 44 | #include 45 | 46 | namespace LA { 47 | //s[0]<=s[1]<=s[2], V[:][i] correspond to s[i] 48 | inline static bool eig33sym(double K[3][3], double s[3], double V[3][3]) { 49 | #ifdef USE_DSYEVH3 50 | double tmpV[3][3]; 51 | if(dsyevh3(K, tmpV, s)!=0) return false; 52 | 53 | int order[]={0,1,2}; 54 | for(int i=0; i<3; ++i) { 55 | for(int j=i+1; j<3; ++j) { 56 | if(s[i]>s[j]) { 57 | double tmp=s[i]; 58 | s[i]=s[j]; 59 | s[j]=tmp; 60 | int tmpor=order[i]; 61 | order[i]=order[j]; 62 | order[j]=tmpor; 63 | } 64 | } 65 | } 66 | V[0][0]=tmpV[0][order[0]]; V[0][1]=tmpV[0][order[1]]; V[0][2]=tmpV[0][order[2]]; 67 | V[1][0]=tmpV[1][order[0]]; V[1][1]=tmpV[1][order[1]]; V[1][2]=tmpV[1][order[2]]; 68 | V[2][0]=tmpV[2][order[0]]; V[2][1]=tmpV[2][order[1]]; V[2][2]=tmpV[2][order[2]]; 69 | #else 70 | //below we did not specify row major since it does not matter, K==K' 71 | Eigen::SelfAdjointEigenSolver es( 72 | Eigen::Map(K[0], 3, 3)); 73 | Eigen::Map(s, 3, 1) = es.eigenvalues(); 74 | //below we need to specify row major since V!=V' 75 | Eigen::Map>(V[0], 3, 3) = es.eigenvectors(); 76 | #endif 77 | return true; 78 | } 79 | }//end of namespace LA -------------------------------------------------------------------------------- /svio/state_estimation/depth_factor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "parameters.h" 6 | #include "../utility/utility.h" 7 | 8 | 9 | class SingleInvDepthFactor : public ceres::SizedCostFunction<1, 1> 10 | { 11 | public: 12 | SingleInvDepthFactor(double inv_i); 13 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 14 | 15 | void setSigma(double sig_lambda); 16 | double inv_depth_i; 17 | double sqrt_info; 18 | }; 19 | 20 | 21 | class ProjectionDepthFactor : public ceres::SizedCostFunction<3, 7, 7, 7, 1> 22 | { 23 | public: 24 | ProjectionDepthFactor(const Eigen::Vector3d& _pts_i, const Eigen::Vector3d& _pts_j, double inv_j); 25 | 26 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 27 | void check(double **parameters); 28 | 29 | void setSqrtCov(Eigen::Matrix3d& C); 30 | 31 | // double inv_depth_i; 32 | double inv_depth_j; 33 | Eigen::Vector3d pts_i; 34 | Eigen::Vector3d pts_j; 35 | // static double sqrt_info; 36 | Eigen::Matrix3d sqrt_info; 37 | }; 38 | 39 | -------------------------------------------------------------------------------- /svio/state_estimation/feature_manager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "parameters.h" 10 | #include "frontend.h" 11 | #include "plane_manager.h" 12 | 13 | using namespace std; 14 | 15 | #include 16 | using namespace Eigen; 17 | 18 | enum DPT_TYPE 19 | { 20 | NO_DEPTH =0, DEPTH_MES, DEPTH_TRI, INVALID 21 | } ; 22 | 23 | 24 | extern void sigma_pt3d(Eigen::Matrix3d& C, double u, double v, double z, double sig_z); 25 | 26 | class FeaturePerFrame 27 | { 28 | public: 29 | FeaturePerFrame(float xi, float yi){ 30 | pt.x() = xi; 31 | pt.y() = yi; 32 | pt.z() = 1.0; 33 | dpt = -1.0; 34 | lambda = 0; 35 | sig_d = sig_l = 0; 36 | } 37 | 38 | FeaturePerFrame(float xi, float yi, float di){ 39 | pt.x() = xi; 40 | pt.y() = yi; 41 | pt.z() = 1.0; 42 | dpt = di; 43 | 44 | if(di!=0) 45 | lambda = 1./dpt; 46 | else 47 | lambda = 0; 48 | sig_d = sig_l = 0; 49 | } 50 | 51 | void print(){ 52 | printf("pt: %f %f %f dpt: %lf \n", pt.x(), pt.y(), pt.z(), dpt); 53 | } 54 | 55 | void setUV(float u, float v){pt_2d.x() = u; pt_2d.y() = v;} 56 | void setInvDepth(float inv_di){lambda = inv_di; } 57 | void setDepth(float di){ dpt = di;} 58 | void setDepthSigma(double sig_d_){sig_d = sig_d_;} 59 | void setInvDepthSigma(double sig_l_){sig_l = sig_l_;} 60 | 61 | void setAllD(double d, double l, double sig_d_, double sig_l_){ 62 | dpt = d; lambda = l; sig_d = sig_d_; sig_l = sig_l_; 63 | } 64 | 65 | Vector2d pt_2d; // u and v in image coordinate 66 | Vector3d pt; 67 | // Vector2d uv; 68 | double dpt; // depth at this frame 69 | // int frame_id ; 70 | 71 | double lambda; 72 | double sig_d; 73 | double sig_l; 74 | 75 | }; 76 | 77 | 78 | class FeaturePerId 79 | { 80 | public: 81 | const int feature_id; 82 | int start_frame; 83 | int depth_shift; // relative to start frame 84 | vector feature_per_frame; 85 | int used_num; 86 | double estimated_depth; 87 | int solve_flag; // 0 no depth; 1 ready for optimization; 2 outlier; 88 | 89 | int cur_plane_id, comfirm_plane_id; 90 | bool is_plane; 91 | 92 | FeaturePerId(int _feature_id, int _start_frame) 93 | : feature_id(_feature_id), start_frame(_start_frame), 94 | used_num(0), estimated_depth(-1.0), solve_flag(0), depth_shift(-1), 95 | cur_plane_id(-1),comfirm_plane_id(-1), dpt_type(NO_DEPTH), is_plane(false) 96 | { 97 | } 98 | void setDepth(float depth){ 99 | estimated_depth = depth; 100 | solve_flag = 1; 101 | } 102 | 103 | double parallax_angle(Matrix3d Rs[], Matrix3d& , double *parallax = NULL); // compute the biggest parallax angle 104 | 105 | void setDepthType(DPT_TYPE t){dpt_type = t;} 106 | 107 | int endFrame(); 108 | 109 | void print(){ 110 | printf("feature id: %d start_frame: %d used_num: %d depth_shift %d estimated_depth: %lf\n", feature_id, start_frame, used_num, depth_shift, estimated_depth); 111 | 112 | // for(int i=0; i>>> &image, double td); 140 | vector> getCorresponding(int frame_count_l, int frame_count_r); 141 | vector> getCorrespondingWithDepth(int frame_count_l, int frame_count_r); 142 | pair>, vector> getCorrespondingWithDepthAndCov(int frame_count_l, int frame_count_r); 143 | 144 | bool addFeatureCheckParallaxSigma(int frame_count, const Meas &meas, PlaneManager& p_manager, Vector3d Ps[], Matrix3d Rs[], 145 | Vector3d tic[], Matrix3d ric[]); 146 | void addPointPlaneMeasurements(PlaneManager& p_manager); 147 | 148 | // point-on-plane 149 | void checkPlaneRelation(Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[], PlaneManager& p_manager); 150 | void assignPlanePointDepth(Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[], PlaneManager& p_manager); 151 | 152 | int getFeatureCount(); 153 | 154 | void identifyOutlier(Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 155 | void removeFailures(); 156 | void removeOutlier(set &outlierIndex); 157 | void triangulate(Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 158 | void triangulateWithDepth(Vector3d Ps[], Vector3d tic[], Matrix3d ric[]); 159 | void triangulateSimple(int frameCnt, Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 160 | void triangulatePoint(Eigen::Matrix &Pose0, Eigen::Matrix &Pose1, 161 | Eigen::Vector2d &point0, Eigen::Vector2d &point1, Eigen::Vector3d &point_3d); 162 | void removeFront(int frame_count); 163 | void removeFrontWithDepth(int frame_count); 164 | void removeBackShiftDepth(Eigen::Matrix3d marg_R, Eigen::Vector3d marg_P, Eigen::Matrix3d new_R, Eigen::Vector3d new_P); 165 | void removeBack(); 166 | 167 | double compensatedParallax2(const FeaturePerId& it_per_id, int frame_count); 168 | 169 | // void triangulate(int frame_count, Eigen::Matrix3d Ri, Eigen::Vector3d Pi, Eigen::Matrix3d Ric, Eigen::Vector3d Pic); 170 | 171 | void initFramePoseByPnP(int frameCnt, Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 172 | bool solvePoseByPnP(Eigen::Matrix3d &R_initial, Eigen::Vector3d &P_initial, 173 | vector &pts2D, vector &pts3D); 174 | VectorXd getDepthVector(); 175 | void clearDepth(const VectorXd &x); 176 | 177 | list feature; 178 | 179 | int last_track_num; 180 | double last_average_parallax; 181 | int new_feature_num; 182 | int long_track_num; 183 | 184 | const Matrix3d *Rs; 185 | Matrix3d K; 186 | Matrix3d ric[2]; 187 | 188 | // debug 189 | void printStatus(); 190 | 191 | public: 192 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 193 | }; -------------------------------------------------------------------------------- /svio/state_estimation/frontend.cpp: -------------------------------------------------------------------------------- 1 | #include "frontend.h" 2 | -------------------------------------------------------------------------------- /svio/state_estimation/frontend.h: -------------------------------------------------------------------------------- 1 | 2 | #pragma once 3 | 4 | #include "../plane_tools/PlaneExtractor.h" 5 | #include "../feature_tracking/feature_tracker.h" 6 | #include "../line_tracking/line_tracker.h" 7 | 8 | typedef struct pointMea 9 | { 10 | int id; 11 | float u_pt, v_pt, un_u_pt, un_v_pt; 12 | float velocity_u_pt, velocity_v_pt; 13 | float mu_d, mu_l,sig_d, sig_l; 14 | int plane_index; 15 | } pointMea; 16 | 17 | typedef struct lineMea 18 | { 19 | int id; 20 | float start_u_pt, start_v_pt, end_u_pt, end_v_pt; 21 | std::vector start_seg_pts, end_seg_pts; 22 | cv::Point2f start_pt, end_pt; 23 | float length; 24 | int plane_index; 25 | } lineMea; 26 | 27 | typedef struct planeMea 28 | { 29 | int id; 30 | int p_cnt; 31 | Eigen::Matrix4d mea; 32 | Eigen::Vector3d center; 33 | Eigen::Vector3d normal; 34 | std::vector point_cloud; 35 | std::vector dp_var; 36 | } planeMea; 37 | 38 | class Meas 39 | { 40 | public: 41 | cv::Point2f projectPtOnLine(cv::Point2f cv_p, cv::Point2f cv_s, cv::Point2f cv_e) 42 | { 43 | Vector2d s(cv_s.x, cv_s.y), e(cv_e.x, cv_e.y), p(cv_p.x, cv_p.y); 44 | Vector2d v = s-e; 45 | v.normalize(); 46 | Vector2d p_l = s - (s-p).dot(v)*v; 47 | cv::Point2f cv_pl(p_l.x(), p_l.y()); 48 | return cv_pl; 49 | } 50 | 51 | void FillMeas(FeatureTracker& feature_tracker, LineTracker& line_tracker, PlaneDetection& plane_detector) 52 | { 53 | for (int i = 0; i < feature_tracker.cur_pts.size(); i++) 54 | { 55 | pointMea tmp_pt; 56 | tmp_pt.id = feature_tracker.ids[i]; 57 | tmp_pt.mu_d = feature_tracker.cur_pts_depth[i][0]; 58 | tmp_pt.mu_l = feature_tracker.cur_pts_depth[i][1]; 59 | tmp_pt.sig_d = feature_tracker.cur_pts_depth[i][2]; 60 | tmp_pt.sig_l = feature_tracker.cur_pts_depth[i][3]; 61 | tmp_pt.plane_index = feature_tracker.cur_pts_plane_index[i]; 62 | tmp_pt.u_pt = feature_tracker.cur_pts[i].x; 63 | tmp_pt.v_pt = feature_tracker.cur_pts[i].y; 64 | tmp_pt.un_u_pt = feature_tracker.cur_un_pts[i].x; 65 | tmp_pt.un_v_pt = feature_tracker.cur_un_pts[i].y; 66 | tmp_pt.velocity_u_pt = feature_tracker.pts_velocity[i].x; 67 | tmp_pt.velocity_v_pt = feature_tracker.pts_velocity[i].y; 68 | point_meas.push_back(tmp_pt); 69 | } 70 | 71 | for (int i = 0; i < line_tracker.lines.size(); i++) 72 | { 73 | lineMea tmp_l; 74 | tmp_l.id = line_tracker.lines[i].id; 75 | tmp_l.plane_index = line_tracker.lines[i].plane_index; 76 | tmp_l.start_u_pt = line_tracker.lines[i].un_s.x; 77 | tmp_l.start_v_pt = line_tracker.lines[i].un_s.y; 78 | tmp_l.end_u_pt = line_tracker.lines[i].un_e.x; 79 | tmp_l.end_v_pt = line_tracker.lines[i].un_e.y; 80 | tmp_l.length = line_tracker.lines[i].length; 81 | tmp_l.start_pt = line_tracker.lines[i].s; 82 | tmp_l.end_pt = line_tracker.lines[i].e; 83 | for (int j = 0; j < line_tracker.lines[i].segments.size(); j++) 84 | { 85 | const auto& it_seg = line_tracker.lines[i].segments[j]; 86 | cv::Point2f seg_s(it_seg[0], it_seg[1]), seg_e(it_seg[2], it_seg[3]); 87 | tmp_l.start_seg_pts.push_back(projectPtOnLine(seg_s, line_tracker.lines[i].s, line_tracker.lines[i].e)); 88 | tmp_l.end_seg_pts.push_back(projectPtOnLine(seg_e, line_tracker.lines[i].s, line_tracker.lines[i].e)); 89 | } 90 | line_meas.push_back(tmp_l); 91 | } 92 | 93 | for (int i = 0; i < plane_detector.meas.size(); i++) 94 | { 95 | planeMea tmp_p; 96 | tmp_p.id = i; 97 | tmp_p.mea = plane_detector.meas[i]; 98 | auto extractedPlane = plane_detector.plane_filter.extractedPlanes[i]; 99 | tmp_p.normal = Eigen::Vector3d(extractedPlane->normal[0], extractedPlane->normal[1], extractedPlane->normal[2]); 100 | tmp_p.center = Eigen::Vector3d(extractedPlane->center[0], extractedPlane->center[1], extractedPlane->center[2]); 101 | tmp_p.p_cnt = plane_detector.plane_vertices_[i].size(); 102 | for (int j : plane_detector.plane_vertices_[i]) 103 | { 104 | tmp_p.point_cloud.push_back(plane_detector.cloud.vertices[j]); 105 | tmp_p.dp_var.push_back(plane_detector.cloud.verticesVariance[j](2,2)); 106 | } 107 | plane_meas.push_back(tmp_p); 108 | } 109 | } 110 | 111 | std::vector point_meas; 112 | std::vector line_meas; 113 | std::vector plane_meas; 114 | 115 | cv::Mat imColor; 116 | 117 | double time; 118 | }; 119 | 120 | -------------------------------------------------------------------------------- /svio/state_estimation/line_parameterization.cpp: -------------------------------------------------------------------------------- 1 | #include "line_parameterization.h" 2 | #include "../utility/utility.h" 3 | 4 | 5 | bool LineOrthParameterization::Plus(const double *x, const double *delta, double *x_plus_delta) const 6 | { 7 | // ref: 2001, Adrien Bartol,Peter Sturm ,Structure-From-Motion Using Lines: Representation, Triangulation and Bundle Adjustment 8 | 9 | // theta --> U, phi --> W 10 | Eigen::Map theta(x); 11 | double phi = *(x + 3); 12 | double s1 = sin(theta[0]); 13 | double c1 = cos(theta[0]); 14 | double s2 = sin(theta[1]); 15 | double c2 = cos(theta[1]); 16 | double s3 = sin(theta[2]); 17 | double c3 = cos(theta[2]); 18 | Eigen::Matrix3d R; 19 | R << 20 | c2 * c3, s1 * s2 * c3 - c1 * s3, c1 * s2 * c3 + s1 * s3, 21 | c2 * s3, s1 * s2 * s3 + c1 * c3, c1 * s2 * s3 - s1 * c3, 22 | -s2, s1 * c2, c1 * c2; 23 | double w1 = cos(phi); 24 | double w2 = sin(phi); 25 | 26 | // update 27 | Eigen::Map _delta_theta(delta); 28 | double _delta_phi = *(delta + 3); 29 | Eigen::Matrix3d Rz; 30 | Rz << cos(_delta_theta(2)), -sin(_delta_theta(2)), 0, 31 | sin(_delta_theta(2)), cos(_delta_theta(2)), 0, 32 | 0, 0, 1; 33 | 34 | Eigen::Matrix3d Ry; 35 | Ry << cos(_delta_theta(1)), 0., sin(_delta_theta(1)), 36 | 0., 1., 0., 37 | -sin(_delta_theta(1)), 0., cos(_delta_theta(1)); 38 | 39 | Eigen::Matrix3d Rx; 40 | Rx << 1., 0., 0., 41 | 0., cos(_delta_theta(0)), -sin(_delta_theta(0)), 42 | 0., sin(_delta_theta(0)), cos(_delta_theta(0)); 43 | R = R * Rx * Ry * Rz; 44 | 45 | Eigen::Matrix2d W; 46 | W << w1, -w2, w2, w1; 47 | Eigen::Matrix2d delta_W; 48 | delta_W << cos(_delta_phi), -sin(_delta_phi),sin(_delta_phi), cos(_delta_phi); 49 | W = W * delta_W; 50 | 51 | // U' -- > theta'. W' --> phi' 52 | Eigen::Map theta_pluse(x_plus_delta); // double 指针 转为eigen数组 53 | double* phi_plus(x_plus_delta + 3); 54 | 55 | Eigen::Vector3d u1 = R.col(0); 56 | Eigen::Vector3d u2 = R.col(1); 57 | Eigen::Vector3d u3 = R.col(2); 58 | theta_pluse[0] = atan2( u2(2),u3(2) ); 59 | theta_pluse[1] = asin( -u1(2) ); 60 | theta_pluse[2] = atan2( u1(1),u1(0) ); 61 | 62 | *phi_plus = asin( W(1,0) ); 63 | 64 | ////////////////////////////////////////////////////////// 65 | /* 66 | // SO3参数方法,得到的雅克比更上面一样的。用上面的形式就OK。 67 | Eigen::Map theta(x); 68 | double phi = *(x + 3); 69 | double s1 = sin(theta[0]); 70 | double c1 = cos(theta[0]); 71 | double s2 = sin(theta[1]); 72 | double c2 = cos(theta[1]); 73 | double s3 = sin(theta[2]); 74 | double c3 = cos(theta[2]); 75 | Matrix3d R; 76 | R << 77 | c2 * c3, s1 * s2 * c3 - c1 * s3, c1 * s2 * c3 + s1 * s3, 78 | c2 * s3, s1 * s2 * s3 + c1 * c3, c1 * s2 * s3 - s1 * c3, 79 | -s2, s1 * c2, c1 * c2; 80 | 81 | Sophus::SO3 U = Sophus::SO3::exp(theta); 82 | Sophus::SO3 U1(R); 83 | 84 | std::cout << U.matrix() << "\n\n" <::exp(U1.log()).matrix() << "\n\n"; 87 | */ 88 | 89 | return true; 90 | } 91 | 92 | bool LineOrthParameterization::ComputeJacobian(const double *x, double *jacobian) const 93 | { 94 | Eigen::Map> j(jacobian); 95 | j.setIdentity(); 96 | 97 | return true; 98 | } 99 | -------------------------------------------------------------------------------- /svio/state_estimation/line_parameterization.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | class LineOrthParameterization : public ceres::LocalParameterization 7 | { 8 | virtual bool Plus(const double *x, const double *delta, double *x_plus_delta) const; 9 | virtual bool ComputeJacobian(const double *x, double *jacobian) const; 10 | virtual int GlobalSize() const { return 4; }; 11 | virtual int LocalSize() const { return 4; }; 12 | }; 13 | -------------------------------------------------------------------------------- /svio/state_estimation/line_projection_factor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include "../utility/utility.h" 7 | 8 | // Vertical Line Formulation 9 | class VerticalLineProjectionFactor : public ceres::SizedCostFunction<2, 7, 7, 2> 10 | { 11 | public: 12 | VerticalLineProjectionFactor(const Eigen::Vector3d &_pts_s, const Eigen::Vector3d &_pts_e); 13 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 14 | void check(double **parameters); 15 | 16 | Eigen::Matrix obs; 17 | static Eigen::Matrix2d sqrt_info; 18 | }; 19 | 20 | // Atlanta Line Formulation 21 | class AtlantaLineProjectionFactor : public ceres::SizedCostFunction<2, 7, 7, 2> 22 | { 23 | public: 24 | AtlantaLineProjectionFactor(const Eigen::Vector3d &_pts_s, const Eigen::Vector3d &_pts_e, bool _is_rot90, double _theta); 25 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 26 | void check(double **parameters); 27 | 28 | Eigen::Matrix obs; 29 | static Eigen::Matrix2d sqrt_info; 30 | double line_theta; 31 | }; 32 | 33 | // Line in Absolute Formulation 34 | class lineProjectionFactor : public ceres::SizedCostFunction<2, 7, 7, 4> 35 | { 36 | public: 37 | lineProjectionFactor(const Eigen::Vector3d &_pts_s, const Eigen::Vector3d &_pts_e); 38 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 39 | void check(double **parameters); 40 | 41 | Eigen::Matrix obs; 42 | static Eigen::Matrix2d sqrt_info; 43 | }; -------------------------------------------------------------------------------- /svio/state_estimation/marginalization_factor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "../utility/utility.h" 11 | #include "../utility/tic_toc.h" 12 | 13 | const int NUM_THREADS = 4; 14 | 15 | struct ResidualBlockInfo 16 | { 17 | ResidualBlockInfo(ceres::CostFunction *_cost_function, ceres::LossFunction *_loss_function, std::vector _parameter_blocks, std::vector _drop_set) 18 | : cost_function(_cost_function), loss_function(_loss_function), parameter_blocks(_parameter_blocks), drop_set(_drop_set) {} 19 | 20 | void Evaluate(); 21 | 22 | ceres::CostFunction *cost_function; 23 | ceres::LossFunction *loss_function; 24 | std::vector parameter_blocks; 25 | std::vector drop_set; 26 | 27 | double **raw_jacobians; 28 | std::vector> jacobians; 29 | Eigen::VectorXd residuals; 30 | 31 | int localSize(int size) 32 | { 33 | return size == 7 ? 6 : size; 34 | } 35 | }; 36 | 37 | struct ThreadsStruct 38 | { 39 | std::vector sub_factors; 40 | Eigen::MatrixXd A; 41 | Eigen::VectorXd b; 42 | std::unordered_map parameter_block_size; //global size 43 | std::unordered_map parameter_block_idx; //local size 44 | }; 45 | 46 | class MarginalizationInfo 47 | { 48 | public: 49 | MarginalizationInfo(); 50 | ~MarginalizationInfo(); 51 | int localSize(int size) const; 52 | int globalSize(int size) const; 53 | void addResidualBlockInfo(ResidualBlockInfo *residual_block_info); 54 | void preMarginalize(); 55 | void marginalize(); 56 | std::vector getParameterBlocks(std::unordered_map &addr_shift); 57 | 58 | std::vector factors; 59 | int m, n; 60 | std::unordered_map parameter_block_size; //global size 61 | int sum_block_size; 62 | std::unordered_map parameter_block_idx; //local size 63 | std::unordered_map parameter_block_data; 64 | 65 | std::vector keep_block_size; //global size 66 | std::vector keep_block_idx; //local size 67 | std::vector keep_block_data; 68 | 69 | Eigen::MatrixXd linearized_jacobians; 70 | Eigen::VectorXd linearized_residuals; 71 | const double eps = 1e-8; 72 | bool valid; 73 | 74 | }; 75 | 76 | class MarginalizationFactor : public ceres::CostFunction 77 | { 78 | public: 79 | MarginalizationFactor(MarginalizationInfo* _marginalization_info); 80 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 81 | 82 | MarginalizationInfo* marginalization_info; 83 | }; 84 | -------------------------------------------------------------------------------- /svio/state_estimation/mns_alg.cpp: -------------------------------------------------------------------------------- 1 | #include "mns_alg.h" 2 | 3 | void reduceVector(vector &v, vector& status) 4 | { 5 | int j = 0; 6 | for (int i = 0; i < int(v.size()); i++) 7 | if (status[i]) 8 | v[j++] = v[i]; 9 | v.resize(j); 10 | } 11 | 12 | int MnS_General(const std::vector& _mns_v, std::vector& mns_ret, int min_inlier, bool use_weight) 13 | { 14 | std::vector mns_v = _mns_v; 15 | 16 | while (mns_v.size() >= min_inlier) 17 | { 18 | MnsRet final_interval, tmp_interval; 19 | sort(mns_v.begin(), mns_v.end(), [&](const MnsEntry& a, const MnsEntry& b) 20 | { 21 | return a.val < b.val; 22 | }); 23 | for (auto it = mns_v.begin(); it != mns_v.end(); it++) 24 | { 25 | if (it->is_min == true) 26 | { 27 | tmp_interval.min = it->val; 28 | tmp_interval.ids.insert(it->id); 29 | tmp_interval.indexs.insert(it->index); 30 | tmp_interval.weight += it->weight; 31 | } 32 | else 33 | { 34 | if ((use_weight == false && tmp_interval.ids.size() > final_interval.ids.size()) 35 | || (use_weight == true && tmp_interval.weight > final_interval.weight)) 36 | { 37 | tmp_interval.max = it->val; 38 | final_interval = tmp_interval; 39 | } 40 | tmp_interval.ids.erase(it->id); 41 | tmp_interval.indexs.erase(it->index); 42 | tmp_interval.weight -= it->weight; 43 | } 44 | } 45 | 46 | if ((use_weight == false && final_interval.ids.size() >= min_inlier) 47 | || (use_weight == true && final_interval.weight > min_inlier)) 48 | { 49 | mns_ret.push_back(final_interval); 50 | 51 | vector status(mns_v.size(), true); 52 | for (int i = 0; i < mns_v.size(); i++) 53 | { 54 | if (final_interval.ids.find(mns_v[i].id) != final_interval.ids.end()) 55 | status[i] = false; 56 | } 57 | reduceVector(mns_v, status); 58 | } 59 | else 60 | break; 61 | } 62 | 63 | return mns_ret.size(); 64 | } -------------------------------------------------------------------------------- /svio/state_estimation/mns_alg.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "parameters.h" 10 | 11 | using namespace std; 12 | 13 | class MnsEntry 14 | { 15 | public: 16 | double val; 17 | bool is_min; 18 | double weight; 19 | int id; 20 | int index; // position in vector 21 | 22 | MnsEntry(){} 23 | MnsEntry(double _val, bool _is_min, int _id, int _index=-1, double _weight=1):val(_val), 24 | is_min(_is_min),id(_id),index(_index),weight(_weight){} 25 | }; 26 | 27 | class MnsRet 28 | { 29 | public: 30 | double min; 31 | double max; 32 | set ids; 33 | set indexs; 34 | double weight; 35 | 36 | MnsRet():weight(0){} 37 | }; 38 | 39 | void reduceVector(vector &v, vector& status); 40 | 41 | int MnS_General(const std::vector& _mns_v, std::vector& mns_ret, int min_inlier=2, bool use_weight=false); -------------------------------------------------------------------------------- /svio/state_estimation/parameters.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include "../utility/utility.h" 7 | #include 8 | #include 9 | #include 10 | 11 | #define SQ(x) ((x)*(x)) 12 | 13 | // Macros 14 | 15 | // Macros for visualization 16 | #define VISIUALIZE_RAW_POINT_CLOUD 17 | 18 | // Paras for result saving 19 | extern bool SAVE_TUM; 20 | 21 | // Macros for plane association 22 | // #define MATCH_BY_FEATURE 23 | // #define MIN_FEATURE_MATCH_FOR_PLANE 10 24 | #define MATCH_BY_DIST 25 | #define MAX_DIST 0.05 26 | #define MAX_ANGLE 5 27 | 28 | // Paras for identifying structural elements 29 | extern double STRUCTURE_HORIZONTAL_ANG_DIFF; 30 | extern double LINE_STRUCTURE_HORIZONTAL_ANG_DIFF; 31 | extern double STRUCTURE_VERTICAL_ANG_DIFF; 32 | 33 | // Macros for deciding new atlanta axis 34 | #define ATLANTA_USE_WEIGHT 35 | #define MIN_ATLANTA_INLIER 50 36 | #define MAX_NO_OBSERVE_KEYFRAMES_PLANE 1000 37 | 38 | // Macros for verifying line/point-plane relation 39 | #define MIN_VERIFY_LINEPLANE_CNT 4 40 | #define ERR_PT_LINEONPLANE 2 41 | #define ERR_N_LINEONPLANE 1 42 | #define MIN_VERIFY_POINTPLANE_CNT 4 43 | #define ERR_POINTONPLANE 2 44 | 45 | // Macros for identifying line outlier 46 | #define REMOVE_LINE_OUTLIER 47 | extern double ERR_PT_LINE_REPRO; 48 | extern double ERR_N_LINE_REPRO; 49 | 50 | // Macros for identifying point outlier 51 | #define REMOVE_POINT_OUTLIER 52 | extern double ERR_POINT_REPRO; 53 | 54 | // Macros for identifying plane outlier 55 | // #define REINIT_PLANE_OUTLIER 56 | extern double ERR_PLANE_DIST; 57 | 58 | // Macros for factors used in optimization 59 | // #define LINE_YAW_OBSERVED 60 | #define USE_PLPP 61 | // #define USE_VVP 62 | 63 | // Macros for initialization with depth 64 | #define INITIALIZATION_WITH_DEPTH 65 | 66 | // Paras for frontend sampling 67 | extern int DROP_FRAME; 68 | 69 | const double FOCAL_LENGTH = 460.0; // 460.0; // when use downsampled input change it to 230 70 | const int WINDOW_SIZE = 10; 71 | const int NUM_OF_CAM = 1; //2; 72 | const int NUM_OF_FEAT = 1000; 73 | const int NUM_OF_LINE = 1000; 74 | const int NUM_OF_PLANE = 100; 75 | const int NUM_OF_HVP = 100; 76 | const double LOOP_INFO_VALUE = 50.0; 77 | const double RIG_LEN = 0.1; // stereo rig len , about 0.1 meters 78 | //#define DEPTH_PRIOR 79 | //#define GT 80 | // #define UNIT_SPHERE_ERROR 81 | 82 | extern double INIT_DEPTH; 83 | extern int ESTIMATE_EXTRINSIC; 84 | extern int MIN_USED_NUM; // features used times 85 | extern double MIN_PARALLAX; // minimal parallax 86 | 87 | extern double ACC_N, ACC_W; 88 | extern double GYR_N, GYR_W; 89 | 90 | extern std::vector RIC; 91 | extern std::vector TIC; 92 | extern Eigen::Vector3d G; 93 | 94 | extern double SOLVER_TIME; 95 | extern int NUM_ITERATIONS; 96 | extern std::string EX_CALIB_RESULT_PATH; 97 | extern std::string VINS_RESULT_PATH; 98 | extern std::string VINS_STATS_PATH; 99 | extern std::string VINS_FOLDER_PATH; 100 | 101 | extern int LOOP_CLOSURE; 102 | extern int MIN_LOOP_NUM; 103 | extern int IMAGE_ROW; 104 | extern int IMAGE_COL; 105 | extern double CX, CY, FX, FY; 106 | extern std::string PATTERN_FILE; 107 | extern std::string VOC_FILE; 108 | extern std::string CAM_NAMES; 109 | extern double PIX_SIGMA; 110 | 111 | extern int MAX_CNT; 112 | extern int MIN_DIST; 113 | extern double F_THRESHOLD; 114 | extern int SHOW_TRACK; 115 | extern int FLOW_BACK; 116 | 117 | extern double RGBD_DPT_MIN, RGBD_DPT_MAX; 118 | 119 | // for feature tracker 120 | // extern int EQUALIZE; 121 | // extern int FISHEYE; 122 | extern bool PUB_THIS_FRAME; 123 | extern double ACC_MULT; // normalize value 124 | 125 | extern bool USE_GMM; // whether use gmm to compute covariance 126 | extern bool USE_GMM_EXT; // whether extend gmm by introducing similarity 127 | extern int S_VERIFY; 128 | extern int ENABLE_STRUCTURE; 129 | 130 | void readParameters(ros::NodeHandle &n); 131 | 132 | enum SIZE_PARAMETERIZATION 133 | { 134 | SIZE_POSE = 7, 135 | SIZE_SPEEDBIAS = 9, 136 | SIZE_FEATURE = 1, 137 | SIZE_LINE = 4, 138 | SIZE_PLANE = 3, 139 | SIZE_HVP = 1 140 | }; 141 | 142 | enum StateOrder 143 | { 144 | O_P = 0, 145 | O_R = 3, 146 | O_V = 6, 147 | O_BA = 9, 148 | O_BG = 12 149 | }; 150 | 151 | enum NoiseOrder 152 | { 153 | O_AN = 0, 154 | O_GN = 3, 155 | O_AW = 6, 156 | O_GW = 9 157 | }; 158 | -------------------------------------------------------------------------------- /svio/state_estimation/plane_manager.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "parameters.h" 10 | #include "frontend.h" 11 | #include "../plane_tools/PlaneExtractor.h" 12 | #include "structure.h" 13 | #include "mns_alg.h" 14 | 15 | using namespace std; 16 | 17 | #include 18 | #include 19 | using namespace Eigen; 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | typedef pcl::PointCloud PointCloudT; 27 | typedef pcl::PointXYZ PointT; 28 | 29 | struct PlaneIdCnt 30 | { 31 | int plane_id; 32 | int f_cnt; 33 | }; 34 | 35 | class PlanePerFrame 36 | { 37 | public: 38 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 39 | 40 | int frame_id; 41 | 42 | vector xyz; 43 | vector xyz_cov; 44 | Matrix4d info; 45 | Matrix4d sqrt_info; 46 | Matrix4d mean_mea; 47 | Vector4d fitting_hesse; 48 | 49 | PlanePerFrame():frame_id(-1),info(Matrix4d::Zero()){} 50 | PlanePerFrame(int _frame_id):frame_id(_frame_id){} 51 | 52 | ~PlanePerFrame(){} 53 | 54 | void cholesky(); 55 | }; 56 | 57 | class PlanePerId 58 | { 59 | public: 60 | // id 61 | int plane_id; 62 | 63 | // geo. info 64 | Vector4d hesse; // [n,d], d>0, n: from plane to the camera 65 | double cp[3]; // used in opt. (non-strutural) 66 | double pro_d; // used in opt. (structural) 67 | 68 | // visualization 69 | Vector3d color; 70 | 71 | // depth mea. 72 | vector plane_ml; 73 | int start_frame_id; 74 | 75 | // on-plane points/lines 76 | set lines, points; 77 | 78 | // depth observation times. 79 | int observe_times; 80 | int last_observe_times; 81 | 82 | // outlier 83 | bool is_outlier; 84 | 85 | // structural info. 86 | Strutural s_type; 87 | int axis_id; 88 | double theta; // horizontal theta with respect to Vector3d(1,0,0) 89 | 90 | PlanePerId(int _plane_id):plane_id(_plane_id),observe_times(0),last_observe_times(0),s_type(no),axis_id(-1), 91 | is_outlier(false),color(Vector3d(244,164,195)){} 92 | ~PlanePerId(){} 93 | 94 | void hesse2cp(); 95 | void cp2hesse(); 96 | }; 97 | 98 | class PastNormal 99 | { 100 | public: 101 | PastNormal(){} 102 | PastNormal(Vector3d _n, int _observe_times): normal(_n), last_observed_times(0), observe_times(_observe_times){} 103 | 104 | Vector3d normal; 105 | int last_observed_times, observe_times; 106 | }; 107 | 108 | class PlaneManager 109 | { 110 | public: 111 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 112 | 113 | // match info 114 | int match_pid[20]; 115 | 116 | // planes 117 | map plane_l; 118 | int plane_id; 119 | 120 | // map normal 121 | vector history_normal; 122 | 123 | // paras 124 | int match_by_area, match_by_feature, match_by_dist, no_match; 125 | 126 | PlaneManager():plane_id(0),match_by_area(0),match_by_dist(0),match_by_feature(0),no_match(0),match_pid({-1}){} 127 | ~PlaneManager(){} 128 | 129 | // establish new planes, assoicate with observed planes 130 | void addNewMeasurements(const Meas& meas, int _frame_id, Vector3d Ps[], Matrix3d Rs[], 131 | Vector3d tic[], Matrix3d ric[]); 132 | 133 | // identify structural planes 134 | void classifyStructuralPlanes(const AxisMap& a_map); 135 | 136 | // update frame id, remove old measurements, add plane into local history plane map 137 | void removeFront(int frame_count); 138 | void removeBack(int frame_cnt); 139 | void applyTransformation(Matrix3d old_R, Vector3d old_P, Matrix3d new_R, Vector3d new_P); 140 | 141 | // check outlier 142 | void findOutlier(Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 143 | 144 | // initialize geo. info 145 | void initialGeo(AxisMap& a_map, Vector3d Ps[], Matrix3d Rs[], Vector3d tic[], Matrix3d ric[]); 146 | 147 | // info. 148 | void printStatus(); 149 | }; -------------------------------------------------------------------------------- /svio/state_estimation/projection_quat.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "../utility/utility.h" 6 | #include "../utility/tic_toc.h" 7 | 8 | namespace Eigen 9 | { 10 | typedef Eigen::Matrix Matrix32; 11 | typedef Eigen::Matrix Matrix62; 12 | } 13 | 14 | namespace QUATERNION_VIO 15 | { 16 | 17 | class Unit3 18 | { 19 | public: 20 | Unit3(); 21 | Unit3(Eigen::Vector3d& n); 22 | ~Unit3(); 23 | Eigen::Matrix32 getBasis(Eigen::Matrix62* H = NULL) ; 24 | Eigen::Vector3d p_; 25 | Eigen::Matrix32* B_; 26 | Eigen::Matrix62* H_B_; 27 | }; 28 | 29 | class ProjectionFactor : public ceres::SizedCostFunction<2, 7, 7, 7, 1> 30 | { 31 | public: 32 | ProjectionFactor(const Eigen::Vector3d &_pts_i, const Eigen::Vector3d &_pts_j); 33 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 34 | void check(double **parameters); 35 | 36 | Eigen::Vector3d pts_i, pts_j; 37 | Eigen::Matrix tangent_base; 38 | static Eigen::Matrix2d sqrt_info; 39 | }; 40 | 41 | class PoseLocalPrameterization : public ceres::LocalParameterization 42 | { 43 | virtual bool Plus(const double *x, const double *delta, double *x_plus_delta) const; 44 | virtual bool ComputeJacobian(const double *x, double *jacobian) const; 45 | virtual int GlobalSize() const { return 7; }; 46 | virtual int LocalSize() const { return 6; }; 47 | }; 48 | 49 | } 50 | -------------------------------------------------------------------------------- /svio/state_estimation/structure.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "parameters.h" 10 | 11 | using namespace std; 12 | 13 | #include 14 | #include 15 | using namespace Eigen; 16 | 17 | typedef enum Strutural{no, horizontal_1, horizontal_2, atlanta_1, atlanta_2, atlanta_3, atlanta_4, vertical} Strutural; 18 | 19 | class Axis 20 | { 21 | public: 22 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 23 | 24 | // id 25 | int id; 26 | 27 | // visualization 28 | Vector3d color_13, color_24; 29 | 30 | // geo. info 31 | Vector3d axis_1, axis_2, axis_3, axis_4; 32 | double theta; 33 | 34 | // obs. info 35 | int last_observe_cnt, observe_cnt; 36 | 37 | // state 38 | bool is_mature; 39 | 40 | // used in opt. 41 | bool is_opting; 42 | 43 | // Axis(int _id, double _theta):last_observe_cnt(0),observe_cnt(1),id(_id),is_mature(false),theta(_theta){} 44 | Axis(int _id, double _theta):last_observe_cnt(0),observe_cnt(1),id(_id),is_mature(true),theta(_theta){} 45 | Axis(){} 46 | 47 | void setcolor() 48 | { 49 | switch(id%3) 50 | { 51 | case 0: 52 | color_13 = Eigen::Vector3d(255, 0, 0); 53 | color_24 = Eigen::Vector3d(0, 255, 0); 54 | break; 55 | case 1: 56 | color_13 = Eigen::Vector3d(255, 255, 0); 57 | color_24 = Eigen::Vector3d(255, 0, 255); 58 | break; 59 | case 2: 60 | color_13 = Eigen::Vector3d(252, 230, 202); 61 | color_24 = Eigen::Vector3d(160, 32, 240); 62 | break; 63 | } 64 | } 65 | 66 | void setdir() 67 | { 68 | axis_1 = Vector3d(cos(theta), sin(theta), 0); 69 | axis_2 = Vector3d(-sin(theta), cos(theta), 0); 70 | axis_3 = Vector3d(-cos(theta), -sin(theta), 0); 71 | axis_4 = Vector3d(sin(theta), -cos(theta), 0); 72 | } 73 | 74 | void updateObs() 75 | { 76 | if (last_observe_cnt != 0) 77 | { 78 | last_observe_cnt = 0; 79 | observe_cnt++; 80 | } 81 | } 82 | }; 83 | 84 | class AxisMap 85 | { 86 | public: 87 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 88 | 89 | map axis_l; 90 | int axis_id; 91 | Vector3d h_color; 92 | 93 | AxisMap():axis_id(0),h_color(Vector3d(0, 250, 250)){} 94 | 95 | void updateAxisMap() 96 | { 97 | for (auto it = axis_l.begin(); it != axis_l.end();) 98 | { 99 | auto& axis = it->second; 100 | if (S_VERIFY > 0 && axis.observe_cnt < 300 && axis.last_observe_cnt > 300) 101 | { 102 | axis_l.erase(it++); 103 | continue; 104 | } 105 | 106 | axis.last_observe_cnt++; 107 | if (axis.observe_cnt > 100) 108 | axis.is_mature = true; 109 | it++; 110 | } 111 | } 112 | 113 | void printStatus() 114 | { 115 | ROS_WARN("AxisMap: total %d axis detected", axis_l.size()); 116 | for (const auto& axis_pair : axis_l) 117 | { 118 | const auto& axis = axis_pair.second; 119 | ROS_WARN("axis: id(%d), theta(%f), mature(%s), observed(%d), last_observe(%d ago)", 120 | axis.id, axis.theta*180/M_PI, (axis.is_mature)?"yes":"no", axis.observe_cnt, axis.last_observe_cnt); 121 | } 122 | } 123 | }; -------------------------------------------------------------------------------- /svio/state_estimation/svio.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | // #include "../utility/pointDefinition.h" 17 | #include "imu_factor.h" 18 | #include "parameters.h" 19 | #include "tf/tf.h" 20 | #include "feature_manager.h" 21 | #include "plane_manager.h" 22 | #include "line_manager.h" 23 | #include "structure.h" 24 | #include "marginalization_factor.h" 25 | #include "../feature_tracking/feature_tracker.h" 26 | 27 | #include 28 | #include 29 | 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | 37 | #include "../initialization/solve_5pts.h" 38 | #include "../initialization/initial_sfm.h" 39 | #include "../initialization/initial_alignment.h" 40 | #include "../initialization/initial_ex_rotation.h" 41 | 42 | using namespace std; 43 | 44 | const static int NOT_INITIED = -100000; 45 | 46 | enum SolverFlag 47 | { 48 | INITIAL, 49 | NON_LINEAR, 50 | STAGE_1_INITIAL, 51 | STAGE_2_INITIAL 52 | }; 53 | 54 | enum MarginalizationFlag 55 | { 56 | MARGIN_OLD = 0, 57 | MARGIN_SECOND_NEW = 1 58 | }; 59 | 60 | class SVIO 61 | { 62 | public: 63 | SVIO(); 64 | virtual ~SVIO(); 65 | void clearState(); 66 | 67 | void processIMU(double dt, Eigen::Vector3d & linear_acceleration, Eigen::Vector3d& angular_velocity); 68 | virtual void solveOdometry(); 69 | virtual void slideWindow(); 70 | virtual void slideWindowNew(); 71 | virtual void slideWindowOld(); 72 | 73 | virtual void priorOptimize(bool add_plane_point=false, bool add_plane_line=false); 74 | virtual void afterOptimize(AxisMap& a_map, bool is_yaw_observed, bool add_plane_point=false, bool add_plane_line=false); 75 | 76 | void setParameter(const string &calib_file); 77 | void showStatus(); 78 | 79 | // initialization functions 80 | virtual void processImage_Init(const Meas& vision_meas); 81 | bool findNewAtlantaAxis(); 82 | 83 | bool initialStructure(); 84 | bool visualInitialAlign(); 85 | bool visualInitialAlignWithDepth(); 86 | bool relativePose(Matrix3d &relative_R, Vector3d &relative_T, int &l); 87 | 88 | // use hybrid pnp method 89 | bool relativePoseHybrid(Matrix3d &relative_R, Vector3d &relative_T, int &l); 90 | bool relativePoseHybridCov(Matrix3d &relative_R, Vector3d &relative_T, int &l); 91 | 92 | // initialization variables 93 | double initial_timestamp; 94 | MotionEstimator motion_estimator; 95 | Vector3d g; 96 | 97 | tf::Transform mInitCamPose; // Initial campose 98 | tf::Transform mLastPose; // camera pose for the mImgPTLast 99 | tf::Transform mCurrPose; // camera pose for the mImgPTCurr 100 | tf::Transform mCurrIMUPose; // current IMU pose 101 | tf::Transform mTIC; // transform from imu to camera 102 | 103 | Eigen::Vector3d mg; 104 | Eigen::Matrix3d ric[NUM_OF_CAM]; 105 | Eigen::Vector3d tic[NUM_OF_CAM]; 106 | 107 | double Headers[WINDOW_SIZE+1]; 108 | Eigen::Vector3d Ps[WINDOW_SIZE+1]; 109 | Eigen::Vector3d Vs[WINDOW_SIZE+1]; 110 | Eigen::Matrix3d Rs[WINDOW_SIZE+1]; 111 | Eigen::Vector3d Bas[WINDOW_SIZE+1]; 112 | Eigen::Vector3d Bgs[WINDOW_SIZE+1]; 113 | Eigen::Matrix3d R_imu; 114 | 115 | vector key_poses; 116 | 117 | Matrix3d back_R0, last_R, last_R0; 118 | Vector3d back_P0, last_P, last_P0; 119 | 120 | FeatureManager f_manager; 121 | PlaneManager p_manager; 122 | LineManager l_manager; 123 | AxisMap a_map; 124 | 125 | // for debug 126 | vector ceres_times, structural_times, marginalization_times; 127 | vector ceres_iterations; 128 | vector ceres_per_iteration_times; 129 | 130 | // for marginalization 131 | SolverFlag solver_flag; 132 | MarginalizationFlag marginalization_flag; 133 | MarginalizationInfo * last_marginalization_info; 134 | vector last_marginalization_parameter_blocks; 135 | 136 | // for initialization 137 | map all_image_frame; 138 | IntegrationBase *tmp_pre_integration; 139 | 140 | bool mbFirstIMU; 141 | Vector3d acc_0; 142 | Vector3d gyr_0; 143 | 144 | std::mutex m_process; 145 | std::mutex m_buf; 146 | queue> accBuf; 147 | queue> gyrBuf; 148 | queue > > > > > featureBuf; 149 | double prevTime, currTime; 150 | 151 | IntegrationBase * pre_integrations[WINDOW_SIZE+1]; 152 | vector dt_buf[WINDOW_SIZE+1]; 153 | vector linear_acceleration_buf[WINDOW_SIZE+1]; 154 | vector angular_velocity_buf[WINDOW_SIZE+1]; 155 | 156 | int frame_count; 157 | 158 | double para_Pose[WINDOW_SIZE+1][SIZE_POSE]; 159 | double para_SpeedBias[WINDOW_SIZE+1][SIZE_SPEEDBIAS]; 160 | double para_Feature[NUM_OF_FEAT][SIZE_FEATURE]; 161 | double para_Line[NUM_OF_LINE][SIZE_LINE]; 162 | double para_Plane[NUM_OF_PLANE][SIZE_PLANE]; 163 | double para_Ex_Pose[NUM_OF_CAM][SIZE_POSE]; 164 | double para_hvp[NUM_OF_HVP][SIZE_HVP]; 165 | double para_Td[1][1]; 166 | 167 | vector opt_dims, opt_var_cnts, opt_res_cnts; 168 | 169 | nav_msgs::Path m_gt_path; // groundtruth path 170 | nav_msgs::Path m_est_path; // estimated path 171 | public: 172 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 173 | }; 174 | -------------------------------------------------------------------------------- /svio/state_estimation/vanish_point_factor.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include "../utility/utility.h" 6 | 7 | class ParaVPPoseFactor : public ceres::SizedCostFunction<3,7,7,1> 8 | { 9 | public: 10 | ParaVPPoseFactor(const Eigen::Matrix3d& _sqrt_mea); 11 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 12 | void check(double **parameters); // for debug oranfire 13 | 14 | Eigen::Matrix3d sqrt_mea; 15 | }; 16 | 17 | class VertVPPoseFactor : public ceres::SizedCostFunction<3,7,7,1> 18 | { 19 | public: 20 | VertVPPoseFactor(const Eigen::Matrix3d& _sqrt_mea); 21 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 22 | void check(double **parameters); // for debug oranfire 23 | 24 | Eigen::Matrix3d sqrt_mea; 25 | }; 26 | 27 | class VPPoseFactor : public ceres::SizedCostFunction<3,7,7> 28 | { 29 | public: 30 | VPPoseFactor(const Eigen::Matrix3d& _sqrt_mea, const Eigen::Vector3d& _global_dir); 31 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const; 32 | void check(double **parameters); // for debug oranfire 33 | 34 | Eigen::Matrix3d sqrt_mea; 35 | Eigen::Vector3d global_dir; 36 | }; -------------------------------------------------------------------------------- /svio/utility/CameraPoseVisualization.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | class CameraPoseVisualization { 11 | public: 12 | std::string m_marker_ns; 13 | 14 | CameraPoseVisualization(float r, float g, float b, float a); 15 | 16 | void setImageBoundaryColor(float r, float g, float b, float a=1.0); 17 | void setOpticalCenterConnectorColor(float r, float g, float b, float a=1.0); 18 | void setScale(double s); 19 | void setLineWidth(double width); 20 | 21 | void add_pose(const Eigen::Vector3d& p, const Eigen::Quaterniond& q); 22 | void reset(); 23 | 24 | void publish_by(ros::Publisher& pub, const std_msgs::Header& header); 25 | void add_edge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1); 26 | void add_loopedge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1); 27 | private: 28 | std::vector m_markers; 29 | std_msgs::ColorRGBA m_image_boundary_color; 30 | std_msgs::ColorRGBA m_optical_center_connector_color; 31 | double m_scale; 32 | double m_line_width; 33 | 34 | static const Eigen::Vector3d imlt; 35 | static const Eigen::Vector3d imlb; 36 | static const Eigen::Vector3d imrt; 37 | static const Eigen::Vector3d imrb; 38 | static const Eigen::Vector3d oc ; 39 | static const Eigen::Vector3d lt0 ; 40 | static const Eigen::Vector3d lt1 ; 41 | static const Eigen::Vector3d lt2 ; 42 | }; 43 | -------------------------------------------------------------------------------- /svio/utility/tic_toc.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | class TicToc 8 | { 9 | public: 10 | TicToc() 11 | { 12 | tic(); 13 | } 14 | 15 | void tic() 16 | { 17 | start = std::chrono::system_clock::now(); 18 | } 19 | 20 | double toc() 21 | { 22 | end = std::chrono::system_clock::now(); 23 | std::chrono::duration elapsed_seconds = end - start; 24 | return elapsed_seconds.count() * 1000; 25 | } 26 | 27 | private: 28 | std::chrono::time_point start, end; 29 | }; 30 | -------------------------------------------------------------------------------- /svio/utility/utility.cpp: -------------------------------------------------------------------------------- 1 | #include "utility.h" 2 | 3 | Eigen::Matrix3d Utility::g2R(const Eigen::Vector3d &g) 4 | { 5 | Eigen::Matrix3d R0; 6 | Eigen::Vector3d ng1 = g.normalized(); 7 | Eigen::Vector3d ng2{0, 0, 1.0}; 8 | R0 = Eigen::Quaterniond::FromTwoVectors(ng1, ng2).toRotationMatrix(); 9 | double yaw = Utility::R2ypr(R0).x(); 10 | R0 = Utility::ypr2R(Eigen::Vector3d{-yaw, 0, 0}) * R0; 11 | // R0 = Utility::ypr2R(Eigen::Vector3d{-90, 0, 0}) * R0; 12 | return R0; 13 | } 14 | -------------------------------------------------------------------------------- /svio/utility/visualization.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | // #include "CameraPoseVisualization.h" 20 | #include 21 | #include "../state_estimation/svio.h" 22 | #include "../state_estimation/parameters.h" 23 | #include "CameraPoseVisualization.h" 24 | #include 25 | 26 | extern ros::Publisher pub_odometry; 27 | extern ros::Publisher pub_path, pub_pose; 28 | extern ros::Publisher pub_cloud, pub_map; 29 | extern ros::Publisher pub_key_poses; 30 | extern ros::Publisher pub_ref_pose, pub_cur_pose; 31 | extern ros::Publisher pub_key; 32 | extern nav_msgs::Path path; 33 | extern ros::Publisher pub_pose_graph; 34 | extern int IMAGE_ROW, IMAGE_COL; 35 | 36 | void registerPub(ros::NodeHandle &n); 37 | 38 | void pubLatestOdometry(const Eigen::Vector3d &P, const Eigen::Quaterniond &Q, const Eigen::Vector3d &V, double t); 39 | 40 | void pubPointImage(const cv::Mat &imgTrack, const double t); 41 | 42 | void pubPlaneSegmentation(const cv::Mat &imgTrack, const double t); 43 | 44 | void pubLineImg(const cv::Mat &imgLine, const double t); 45 | 46 | void pubLineClusterImg(const cv::Mat &imgLineCluster, const double t); 47 | 48 | // void printStatistics(const SVIO &dvio, double t); 49 | 50 | void pubOdometry(const SVIO &dvio, const double t); 51 | 52 | void pubOldOdometry(const SVIO &dvio, const double t); 53 | 54 | void pubKeyPoses(const SVIO &dvio, const double t); 55 | 56 | void pubCameraPose(const SVIO &dvio, const double t); 57 | 58 | void pubPointCloud(const SVIO &dvio, const double t); 59 | 60 | void pubPlanePointCloud(const SVIO &dvio, const double t, bool pub_local = true, bool pub_history = false); 61 | 62 | void pubLineCloud(const SVIO &dvio, const double t); 63 | 64 | void pubTF(const SVIO &dvio, const double t); 65 | 66 | void pubKeyframe(const SVIO &dvio, const double t); --------------------------------------------------------------------------------