├── .gitignore
├── .idea
├── .gitignore
├── LVI-SAM-modified.iml
├── modules.xml
└── vcs.xml
├── README.md
├── images
├── 04.png
├── 05.png
├── 06.png
├── Screenshot from 2022-03-23 17-25-35.png
├── Screenshot from 2022-03-23 17-34-16.png
├── Screenshot from 2022-03-23 21-40-51.png
├── Screenshot from 2022-03-23 21-44-15.png
├── Screenshot from 2022-03-23 21-55-31.png
├── Screenshot from 2022-03-24 15-55-44.png
├── Screenshot from 2022-03-24 15-59-28.png
├── Screenshot from 2022-03-24 16-02-21.png
├── Screenshot from 2022-03-24 16-05-43.png
├── Screenshot from 2022-03-24 16-08-34.png
├── Screenshot from 2022-04-07 15-52-59.png
├── Screenshot from 2022-04-07 15-53-22.png
├── Screenshot from 2022-04-07 15-53-33.png
├── Screenshot from 2022-04-07 15-57-16.png
└── Screenshot from 2022-04-07 15-57-36.png
└── src
├── .vscode
├── c_cpp_properties.json
└── settings.json
├── CMakeLists.txt
├── LICENSE
├── README.md
├── config
├── brief_k10L6.bin
├── brief_pattern.yml
├── custom
│ ├── params_camera.yaml
│ └── params_lidar.yaml
├── extrinsic_parameter.csv
├── extrinsic_parameter_example.pdf
├── fisheye_mask_720x540.jpg
├── params_camera.yaml
└── params_lidar.yaml
├── doc
├── demo.gif
├── handheld-earth.png
├── jackal-earth.png
├── paper.pdf
└── sensor.jpeg
├── launch
├── include
│ ├── config
│ │ ├── robot.urdf.xacro
│ │ └── rviz.rviz
│ ├── module_robot_state_publisher.launch
│ ├── module_rviz.launch
│ ├── module_sam.launch
│ └── rosconsole
│ │ ├── rosconsole_error.conf
│ │ ├── rosconsole_info.conf
│ │ └── rosconsole_warn.conf
└── run.launch
├── msg
└── cloud_info.msg
├── package.xml
└── src
├── lidar_odometry
├── featureExtraction.cpp
├── imageProjection.cpp
├── imuPreintegration.cpp
├── mahonyMine.cpp
├── mahonyMine.h
├── mapOptmization.cpp
└── utility.h
└── visual_odometry
├── visual_estimator
├── estimator.cpp
├── estimator.h
├── estimator_node.cpp
├── factor
│ ├── imu_factor.h
│ ├── integration_base.h
│ ├── marginalization_factor.cpp
│ ├── marginalization_factor.h
│ ├── pose_local_parameterization.cpp
│ ├── pose_local_parameterization.h
│ ├── projection_factor.cpp
│ ├── projection_factor.h
│ ├── projection_td_factor.cpp
│ └── projection_td_factor.h
├── feature_manager.cpp
├── feature_manager.h
├── initial
│ ├── initial_aligment.cpp
│ ├── initial_alignment.h
│ ├── initial_ex_rotation.cpp
│ ├── initial_ex_rotation.h
│ ├── initial_sfm.cpp
│ ├── initial_sfm.h
│ ├── solve_5pts.cpp
│ └── solve_5pts.h
├── parameters.cpp
├── parameters.h
└── utility
│ ├── CameraPoseVisualization.cpp
│ ├── CameraPoseVisualization.h
│ ├── tic_toc.h
│ ├── utility.cpp
│ ├── utility.h
│ ├── visualization.cpp
│ └── visualization.h
├── visual_feature
├── camera_models
│ ├── Camera.cc
│ ├── Camera.h
│ ├── CameraFactory.cc
│ ├── CameraFactory.h
│ ├── CataCamera.cc
│ ├── CataCamera.h
│ ├── CostFunctionFactory.cc
│ ├── CostFunctionFactory.h
│ ├── EquidistantCamera.cc
│ ├── EquidistantCamera.h
│ ├── PinholeCamera.cc
│ ├── PinholeCamera.h
│ ├── ScaramuzzaCamera.cc
│ ├── ScaramuzzaCamera.h
│ ├── gpl.cc
│ └── gpl.h
├── feature_tracker.cpp
├── feature_tracker.h
├── feature_tracker_node.cpp
├── parameters.cpp
├── parameters.h
└── tic_toc.h
└── visual_loop
├── ThirdParty
├── DBoW
│ ├── BowVector.cpp
│ ├── BowVector.h
│ ├── DBoW2.h
│ ├── FBrief.cpp
│ ├── FBrief.h
│ ├── FClass.h
│ ├── FeatureVector.cpp
│ ├── FeatureVector.h
│ ├── QueryResults.cpp
│ ├── QueryResults.h
│ ├── ScoringObject.cpp
│ ├── ScoringObject.h
│ ├── TemplatedDatabase.h
│ └── TemplatedVocabulary.h
├── DUtils
│ ├── DException.h
│ ├── DUtils.h
│ ├── Random.cpp
│ ├── Random.h
│ ├── Timestamp.cpp
│ └── Timestamp.h
├── DVision
│ ├── BRIEF.cpp
│ ├── BRIEF.h
│ └── DVision.h
├── VocabularyBinary.cpp
└── VocabularyBinary.hpp
├── keyframe.cpp
├── keyframe.h
├── loop_detection.cpp
├── loop_detection.h
├── loop_detection_node.cpp
└── parameters.h
/.gitignore:
--------------------------------------------------------------------------------
1 | /src/.history/
2 | /src/.vscode/
3 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Datasource local storage ignored files
5 | /dataSources/
6 | /dataSources.local.xml
7 | # Editor-based HTTP Client requests
8 | /httpRequests/
9 |
--------------------------------------------------------------------------------
/.idea/LVI-SAM-modified.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LVI-SAM-MODIFIED
2 |
3 | This repository is a modified version of [LVI-SAM](https://github.com/TixiaoShan/LVI-SAM).
4 | ---
5 |
6 | ## Modification
7 |
8 | - Add function to get extrinsic parameters.The original code assumes there are no translation between the sensors and their parameters are written in the code. But now both our datasets and LVI-SAM official datasets are working well.
9 | - Add "lidar to imu extrinsics" in params_camera.yaml.
10 | - Using [MahonyAHRS](https://github.com/PaulStoffregen/MahonyAHRS) to caculate quaternion.So you don't need to prepare a 9-axis IMU.
11 | - Add lidar ring calculation method,whether your lidar provides "ring" information or not,it works.
12 | - Make some changes to the code for sensor alignment.
13 | - Fix depth association between camera and lidar (Default lidar orientation is x--->front,y--->left,z--->up).
14 | ---
15 |
16 | ## Notes
17 |
18 | - Most of the changes are marked by "# modified".
19 | - If you are using VSCode and "#include xxx" prompt error,please ctrl+shit+p and enter C/C++ Edit Configurations(UI), add the following paths in Include path.
20 | /opt/ros/melodic/include/**
21 | /usr/include/**
22 | - Please make sure you have the same version of dependencies as [LVI-SAM](https://github.com/TixiaoShan/LVI-SAM).If you have problems installing or importing multiple version of dependencies,you can refer to this [blog](https://blog.csdn.net/DumpDoctorWang/article/details/84587331).
23 | - You need to download and compile [yaml-cpp](https://github.com/jbeder/yaml-cpp).
24 | - You can use [kalibr](https://github.com/ethz-asl/kalibr) to get cam_to_imu,and [calibration_camera_lidar](https://github.com/XidianLemon/calibration_camera_lidar) to get cam_to_lidar,lidar_to_imu = cam_to_lidar.transpose * cam_to_imu
25 | - If you are looking up for Chinese annotation, please click this link [LVI-SAM_detailed_comments](https://github.com/electech6/LVI-SAM_detailed_comments).
26 | - You can see the difference between Fix depth association or not in the following pictures.Feature points are not easily go through wall, more reasonable.
27 | ---
28 |
29 | ## Results 1.1.0
30 | 
31 | 
32 | 
33 | ---
34 |
35 | ## Results 2.0.0 fix depth association (Indoor)
36 | 
37 | 
38 | 
39 | ---
40 |
41 | ## Results 2.0.0 fix depth association (Outdoor)
42 | 
43 | 
44 | 
45 | ---
46 |
47 | ## Acknowledgement
48 | - The original version is from [LVI-SAM](https://github.com/TixiaoShan/LVI-SAM).
49 | - Inspired by this work[LVI_SAM_fixed](https://github.com/epicjung/LVI_SAM_fixed).
50 |
--------------------------------------------------------------------------------
/images/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/04.png
--------------------------------------------------------------------------------
/images/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/05.png
--------------------------------------------------------------------------------
/images/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/06.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-23 17-25-35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-23 17-25-35.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-23 17-34-16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-23 17-34-16.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-23 21-40-51.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-23 21-40-51.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-23 21-44-15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-23 21-44-15.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-23 21-55-31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-23 21-55-31.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-24 15-55-44.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-24 15-55-44.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-24 15-59-28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-24 15-59-28.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-24 16-02-21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-24 16-02-21.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-24 16-05-43.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-24 16-05-43.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-03-24 16-08-34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-03-24 16-08-34.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-04-07 15-52-59.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-04-07 15-52-59.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-04-07 15-53-22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-04-07 15-53-22.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-04-07 15-53-33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-04-07 15-53-33.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-04-07 15-57-16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-04-07 15-57-16.png
--------------------------------------------------------------------------------
/images/Screenshot from 2022-04-07 15-57-36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/images/Screenshot from 2022-04-07 15-57-36.png
--------------------------------------------------------------------------------
/src/.vscode/c_cpp_properties.json:
--------------------------------------------------------------------------------
1 | {
2 | "configurations": [
3 | {
4 | "browse": {
5 | "databaseFilename": "",
6 | "limitSymbolsToIncludedHeaders": true
7 | },
8 | "includePath": [
9 | "/opt/ros/kinetic/include/**",
10 | "/usr/include/**"
11 | ],
12 | "name": "ROS"
13 | }
14 | ],
15 | "version": 4
16 | }
--------------------------------------------------------------------------------
/src/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.autoComplete.extraPaths": [
3 | "/opt/ros/kinetic/lib/python2.7/dist-packages"
4 | ],
5 | "python.analysis.extraPaths": [
6 | "/opt/ros/kinetic/lib/python2.7/dist-packages"
7 | ]
8 | }
--------------------------------------------------------------------------------
/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(lvi_sam)
3 |
4 | ######################
5 | ### Cmake flags
6 | ######################
7 | set(CMAKE_BUILD_TYPE "Release")
8 | set(CMAKE_CXX_FLAGS "-std=c++11")
9 | set(CMAKE_CXX_FLAGS_RELEASE "-O3 -Wall -g -pthread")
10 |
11 | ######################
12 | ### Packages
13 | ######################
14 | find_package(catkin REQUIRED COMPONENTS
15 | tf
16 | roscpp
17 | rospy
18 | roslib
19 | # msg
20 | std_msgs
21 | sensor_msgs
22 | geometry_msgs
23 | nav_msgs
24 | # cv
25 | cv_bridge
26 | # pcl
27 | pcl_conversions
28 | # msg generation
29 | message_generation
30 | )
31 |
32 | find_package(OpenMP REQUIRED)
33 | find_package(PCL REQUIRED)
34 | find_package(OpenCV REQUIRED)
35 | find_package(Ceres REQUIRED)
36 | find_package(GTSAM REQUIRED QUIET)
37 | find_package(Boost REQUIRED COMPONENTS filesystem program_options system)
38 |
39 |
40 | ######################
41 | ### Message generation
42 | ######################
43 | add_message_files(
44 | DIRECTORY msg
45 | FILES
46 | cloud_info.msg
47 | )
48 |
49 | generate_messages(
50 | DEPENDENCIES
51 | geometry_msgs
52 | std_msgs
53 | nav_msgs
54 | sensor_msgs
55 | )
56 |
57 | ######################
58 | ### Catkin
59 | ######################
60 | catkin_package(
61 | DEPENDS PCL GTSAM
62 | )
63 |
64 | include_directories(
65 | ${catkin_INCLUDE_DIRS}
66 | ${PCL_INCLUDE_DIRS}
67 | ${OpenCV_INCLUDE_DIRS}
68 | ${CERES_INCLUDE_DIRS}
69 | ${Boost_INCLUDE_DIRS}
70 | ${GTSAM_INCLUDE_DIR}
71 | /usr/local/include/yaml-cpp
72 | )
73 |
74 | link_directories(
75 | ${PCL_LIBRARY_DIRS}
76 | ${OpenCV_LIBRARY_DIRS}
77 | ${GTSAM_LIBRARY_DIRS}
78 | /usr/local/lib
79 | )
80 |
81 | ######################
82 | ### visual odometry
83 | ######################
84 | file(GLOB visual_feature_files
85 | "src/visual_odometry/visual_feature/*.cpp"
86 | "src/visual_odometry/visual_feature/camera_models/*.cc"
87 | )
88 | file(GLOB visual_odometry_files
89 | "src/visual_odometry/visual_estimator/*.cpp"
90 | "src/visual_odometry/visual_estimator/factor/*.cpp"
91 | "src/visual_odometry/visual_estimator/initial/*.cpp"
92 | "src/visual_odometry/visual_estimator/utility/*.cpp"
93 | )
94 | file(GLOB visual_loop_files
95 | "src/visual_odometry/visual_loop/*.cpp"
96 | "src/visual_odometry/visual_loop/utility/*.cpp"
97 | "src/visual_odometry/visual_loop/ThirdParty/*.cpp"
98 | "src/visual_odometry/visual_loop/ThirdParty/DBoW/*.cpp"
99 | "src/visual_odometry/visual_loop/ThirdParty/DUtils/*.cpp"
100 | "src/visual_odometry/visual_loop/ThirdParty/DVision/*.cpp"
101 | "src/visual_odometry/visual_feature/camera_models/*.cc"
102 | )
103 | add_library(ahrs SHARED src/lidar_odometry/mahonyMine.cpp)
104 | # Visual Feature Tracker
105 | add_executable(${PROJECT_NAME}_visual_feature ${visual_feature_files})
106 | target_link_libraries(${PROJECT_NAME}_visual_feature ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
107 | # Visual Odometry
108 | add_executable(${PROJECT_NAME}_visual_odometry ${visual_odometry_files})
109 | target_link_libraries(${PROJECT_NAME}_visual_odometry ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
110 | # Visual Lopp
111 | add_executable(${PROJECT_NAME}_visual_loop ${visual_loop_files})
112 | target_link_libraries(${PROJECT_NAME}_visual_loop ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
113 |
114 | ######################
115 | ### lidar odometry
116 | ######################
117 |
118 | # IMU Preintegration
119 | add_executable(${PROJECT_NAME}_imuPreintegration src/lidar_odometry/imuPreintegration.cpp)
120 | target_link_libraries(${PROJECT_NAME}_imuPreintegration ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} gtsam ahrs)
121 | # Range Image Projection
122 | add_executable(${PROJECT_NAME}_imageProjection src/lidar_odometry/imageProjection.cpp)
123 | add_dependencies(${PROJECT_NAME}_imageProjection ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)
124 | target_link_libraries(${PROJECT_NAME}_imageProjection ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ahrs)
125 | # Feature Association
126 | add_executable(${PROJECT_NAME}_featureExtraction src/lidar_odometry/featureExtraction.cpp)
127 | add_dependencies(${PROJECT_NAME}_featureExtraction ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)
128 | target_link_libraries(${PROJECT_NAME}_featureExtraction ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ahrs)
129 | # Mapping Optimization
130 | add_executable(${PROJECT_NAME}_mapOptmization src/lidar_odometry/mapOptmization.cpp)
131 | add_dependencies(${PROJECT_NAME}_mapOptmization ${catkin_EXPORTED_TARGETS} ${PROJECT_NAME}_generate_messages_cpp)
132 | target_compile_options(${PROJECT_NAME}_mapOptmization PRIVATE ${OpenMP_CXX_FLAGS})
133 | target_link_libraries(${PROJECT_NAME}_mapOptmization PRIVATE ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${OpenMP_CXX_FLAGS} gtsam ahrs)
134 |
--------------------------------------------------------------------------------
/src/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, Tixiao Shan
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/src/README.md:
--------------------------------------------------------------------------------
1 | # LVI-SAM
2 |
3 | This repository contains code for a lidar-visual-inertial odometry and mapping system, which combines the advantages of [LIO-SAM](https://github.com/TixiaoShan/LIO-SAM/tree/a246c960e3fca52b989abf888c8cf1fae25b7c25) and [Vins-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono) at a system level.
4 |
5 |
6 |
7 |
8 |
9 | ---
10 |
11 | ## Dependency
12 |
13 | - [ROS](http://wiki.ros.org/ROS/Installation) (Tested with kinetic and melodic)
14 | - [gtsam](https://github.com/borglab/gtsam/releases) (Georgia Tech Smoothing and Mapping library)
15 | ```
16 | wget -O ~/Downloads/gtsam.zip https://github.com/borglab/gtsam/archive/4.0.2.zip
17 | cd ~/Downloads/ && unzip gtsam.zip -d ~/Downloads/
18 | cd ~/Downloads/gtsam-4.0.2/
19 | mkdir build && cd build
20 | cmake -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF ..
21 | sudo make install -j4
22 | ```
23 | - [Ceres](https://github.com/ceres-solver/ceres-solver/releases) (C++ library for modeling and solving large, complicated optimization problems)
24 | ```
25 | sudo apt-get install -y libgoogle-glog-dev
26 | sudo apt-get install -y libatlas-base-dev
27 | wget -O ~/Downloads/ceres.zip https://github.com/ceres-solver/ceres-solver/archive/1.14.0.zip
28 | cd ~/Downloads/ && unzip ceres.zip -d ~/Downloads/
29 | cd ~/Downloads/ceres-solver-1.14.0
30 | mkdir ceres-bin && cd ceres-bin
31 | cmake ..
32 | sudo make install -j4
33 | ```
34 |
35 | ---
36 |
37 | ## Compile
38 |
39 | You can use the following commands to download and compile the package.
40 |
41 | ```
42 | cd ~/catkin_ws/src
43 | git clone https://github.com/TixiaoShan/LVI-SAM.git
44 | cd ..
45 | catkin_make
46 | ```
47 |
48 | ---
49 |
50 | ## Datasets
51 |
52 |
53 |
54 |
55 |
56 | The datasets used in the paper can be downloaded from Google Drive. The data-gathering sensor suite includes: Velodyne VLP-16 lidar, FLIR BFS-U3-04S2M-CS camera, MicroStrain 3DM-GX5-25 IMU, and Reach RS+ GPS.
57 |
58 | ```
59 | https://drive.google.com/drive/folders/1q2NZnsgNmezFemoxhHnrDnp1JV_bqrgV?usp=sharing
60 | ```
61 |
62 | **Note** that the images in the provided bag files are in compressed format. So a decompression command is added at the last line of ```launch/module_sam.launch```. If your own bag records the raw image data, please comment this line out.
63 |
64 |
65 |
66 |
67 |
68 |
69 | ---
70 |
71 | ## Run the package
72 |
73 | 1. Configure parameters:
74 |
75 | ```
76 | Configure sensor parameters in the .yaml files in the ```config``` folder.
77 | ```
78 |
79 | 2. Run the launch file:
80 | ```
81 | roslaunch lvi_sam run.launch
82 | ```
83 |
84 | 3. Play existing bag files:
85 | ```
86 | rosbag play handheld.bag
87 | ```
88 |
89 | ---
90 |
91 | ## TODO
92 |
93 | - [ ] Update graph optimization using all three factors in imuPreintegration.cpp, simplify mapOptimization.cpp, increase system stability
94 |
95 | ---
96 |
97 | ## Paper
98 |
99 | Thank you for citing our [paper](./doc/paper.pdf) if you use any of this code or datasets.
100 |
101 | ```
102 | @inproceedings{lvisam2021shan,
103 | title={LVI-SAM: Tightly-coupled Lidar-Visual-Inertial Odometry via Smoothing and Mapping},
104 | author={Shan, Tixiao and Englot, Brendan and Ratti, Carlo and Rus Daniela},
105 | booktitle={IEEE International Conference on Robotics and Automation (ICRA)},
106 | pages={to-be-added},
107 | year={2021},
108 | organization={IEEE}
109 | }
110 | ```
111 |
112 | ---
113 |
114 | ## Acknowledgement
115 |
116 | - The visual-inertial odometry module is adapted from [Vins-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono).
117 | - The lidar-inertial odometry module is adapted from [LIO-SAM](https://github.com/TixiaoShan/LIO-SAM/tree/a246c960e3fca52b989abf888c8cf1fae25b7c25).
118 |
--------------------------------------------------------------------------------
/src/config/brief_k10L6.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/config/brief_k10L6.bin
--------------------------------------------------------------------------------
/src/config/custom/params_camera.yaml:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 |
3 | # Project
4 | project_name: "lvi_sam"
5 |
6 | #common parameters
7 | imu_topic: "/camera/imu"
8 | image_topic: "/camera/infra1/image_rect_raw"
9 | point_cloud_topic: "lvi_sam/lidar/deskew/cloud_deskewed"
10 |
11 | # Lidar Params
12 | use_lidar: 1 # whether use depth info from lidar or not
13 | lidar_skip: 3 # skip this amount of scans
14 | align_camera_lidar_estimation: 1 # align camera and lidar estimation for visualization
15 |
16 | # lidar to camera extrinsic
17 | lidar_to_cam_tx: 0.0398946
18 | lidar_to_cam_ty: -0.111613
19 | lidar_to_cam_tz: -0.110938
20 | lidar_to_cam_rx: -1.58432
21 | lidar_to_cam_ry: -0.00517069
22 | lidar_to_cam_rz: 3.14111
23 |
24 | # imu to lidar extrinsic
25 | imu_to_lidar_tx: 0.0368063
26 | imu_to_lidar_ty: -0.126738
27 | imu_to_lidar_tz: -0.118555
28 | imu_to_lidar_rx: -1.58037
29 | imu_to_lidar_ry: 0.00435929
30 | imu_to_lidar_rz: 3.13845
31 | # camera model
32 | model_type: PINHOLE
33 | camera_name: camera
34 |
35 | image_width: 640
36 | image_height: 480
37 | distortion_parameters:
38 | k1: 0.0
39 | k2: 0.0
40 | p1: 0.0
41 | p2: 0.0
42 | projection_parameters:
43 | fx: 382.25116042947064
44 | fy: 378.96199224070352
45 | cx: 322.32833278836199
46 | cy: 243.69059923371992
47 |
48 |
49 | #imu parameters The more accurate parameters you provide, the worse performance
50 | acc_n: 2.8204438138836063e-02 # accelerometer measurement noise standard deviation.
51 | gyr_n: 2.4927177105924072e-03 # gyroscope measurement noise standard deviation.
52 | acc_w: 4.0739558582909998e-04 # accelerometer bias random work noise standard deviation.
53 | gyr_w: 1.7029730578800357e-05 # gyroscope bias random work noise standard deviation.
54 | g_norm: 9.81007 # gravity magnitude
55 | imu_hz: 400 # frequency of imu
56 |
57 | # Extrinsic parameter between IMU and Camera.
58 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
59 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
60 | # 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
61 | #Rotation from camera frame to imu frame, imu^R_cam
62 | extrinsicRotation: !!opencv-matrix
63 | rows: 3
64 | cols: 3
65 | dt: d
66 | data: [ 0.99995824,0.003922,-0.00825459,
67 | -0.00388915,0.99998447,0.00399195,
68 | 0.00827012,-0.00395968,0.99995796]
69 | #Translation from camera frame to imu frame, imu^T_cam
70 | extrinsicTranslation: !!opencv-matrix
71 | rows: 3
72 | cols: 1
73 | dt: d
74 | data: [0.00304159, 0.00742751,0.01522853]
75 |
76 | #feature traker paprameters
77 | max_cnt: 150 # max feature number in feature tracking
78 | min_dist: 20 # min distance between two features
79 | freq: 20 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
80 | F_threshold: 1.0 # ransac threshold (pixel)
81 | show_track: 1 # publish tracking image as topic
82 | equalize: 1 # if image is too dark or light, trun on equalize to find enough features
83 | fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
84 |
85 | #optimization parameters
86 | max_solver_time: 0.035 # max solver itration time (ms), to guarantee real time
87 | max_num_iterations: 10 # max solver itrations, to guarantee real time
88 | keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
89 |
90 | #unsynchronization parameters
91 | estimate_td: 0 # online estimate time offset between camera and imu
92 | td: -0.004492987156480011 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
93 |
94 | #rolling shutter parameters
95 | rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera
96 | rolling_shutter_tr: 0 # unit: s. rolling shutter read out time per frame (from data sheet).
97 |
98 | #loop closure parameters
99 | loop_closure: 1 # start loop closure
100 | skip_time: 0.0
101 | skip_dist: 0.0
102 | debug_image: 0 # save raw image in loop detector for visualization prupose; you can close this function by setting 0
103 | match_image_scale: 0.5
104 | vocabulary_file: "/config/brief_k10L6.bin"
105 | brief_pattern_file: "/config/brief_pattern.yml"
106 |
--------------------------------------------------------------------------------
/src/config/custom/params_lidar.yaml:
--------------------------------------------------------------------------------
1 | # project name
2 | PROJECT_NAME: "lvi_sam"
3 |
4 | lvi_sam:
5 |
6 | # Topics
7 | pointCloudTopic: "/lslidar_point_cloud" # Point cloud data
8 | imuTopic: "/camera/imu" # IMU data
9 |
10 | # Heading
11 | useImuHeadingInitialization: true # if using GPS data, set to "true"
12 |
13 | # Export settings
14 | savePCD: false # https://github.com/TixiaoShan/LIO-SAM/issues/3
15 | savePCDDirectory: "/software/LVI-SAM/picresults/" # in your home folder, starts and ends with "/". Warning: the code deletes "LOAM" folder then recreates it. See "mapOptimization" for implementation
16 |
17 | # Sensor Settings
18 | N_SCAN: 32 # number of lidar channel (i.e., 16, 32, 64, 128)
19 | Horizon_SCAN: 2000 # lidar horizontal resolution (Velodyne:1800, Ouster:512,1024,2048)
20 | ang_y: 1.0 #ang/N_SCAN在纵向激光头分布的角度/线数
21 | timeField: "time" # point timestamp field, Velodyne - "time", Ouster - "t"
22 | downsampleRate: 1 # default: 1. Downsample your data if too many points. i.e., 16 = 64 / 4, 16 = 16 / 1
23 |
24 | # IMU Settings
25 | imuAccNoise: 2.8204438138836063e-02
26 | imuGyrNoise: 2.4927177105924072e-03
27 | imuAccBiasN: 4.0739558582909998e-04
28 | imuGyrBiasN: 1.7029730578800357e-05
29 | imuGravity: 9.81007
30 | imuHz: 400
31 |
32 | # Extrinsics (IMU -> lidar)
33 | extrinsicTrans: [0.0435774,-0.0940634, -0.105495]
34 | extrinsicRot: [-0.999986,0.00438914,-0.00310146,0.0031433,0.00955755,-0.999949,-0.00435928,-0.999945,-0.00957121]
35 | extrinsicRPY: [-0.999986,0.00438914,-0.00310146,0.0031433,0.00955755,-0.999949,-0.00435928,-0.999945,-0.00957121]
36 |
37 | # LOAM feature threshold
38 | edgeThreshold: 1.0
39 | surfThreshold: 0.1
40 | edgeFeatureMinValidNum: 10
41 | surfFeatureMinValidNum: 100
42 |
43 | # voxel filter paprams
44 | odometrySurfLeafSize: 0.4 # default: 0.4
45 | mappingCornerLeafSize: 0.2 # default: 0.2
46 | mappingSurfLeafSize: 0.4 # default: 0.4
47 |
48 | # robot motion constraint (in case you are using a 2D robot)
49 | z_tollerance: 1000 # meters
50 | rotation_tollerance: 1000 # radians
51 |
52 | # CPU Params
53 | numberOfCores: 4 # number of cores for mapping optimization
54 | mappingProcessInterval: 0.15 # seconds, regulate mapping frequency
55 |
56 | # Surrounding map
57 | surroundingkeyframeAddingDistThreshold: 1.0 # meters, regulate keyframe adding threshold
58 | surroundingkeyframeAddingAngleThreshold: 0.2 # radians, regulate keyframe adding threshold
59 | surroundingKeyframeDensity: 2.0 # meters, downsample surrounding keyframe poses
60 | surroundingKeyframeSearchRadius: 50.0 # meters, within n meters scan-to-map optimization (when loop closure disabled)
61 |
62 | # Loop closure
63 | loopClosureEnableFlag: true
64 | surroundingKeyframeSize: 25 # submap size (when loop closure enabled)
65 | historyKeyframeSearchRadius: 20.0 # meters, key frame that is within n meters from current pose will be considerd for loop closure
66 | historyKeyframeSearchTimeDiff: 30.0 # seconds, key frame that is n seconds older will be considered for loop closure
67 | historyKeyframeSearchNum: 25 # number of hostory key frames will be fused into a submap for loop closure
68 | historyKeyframeFitnessScore: 0.3 # icp threshold, the smaller the better alignment
69 |
70 | # Visualization
71 | globalMapVisualizationSearchRadius: 1000.0 # meters, global map visualization radius
72 | globalMapVisualizationPoseDensity: 10.0 # meters, global map visualization keyframe density
73 | globalMapVisualizationLeafSize: 1.0 # meters, global map visualization cloud density
74 |
--------------------------------------------------------------------------------
/src/config/extrinsic_parameter.csv:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 | ---
3 | extrinsicRotation: !!opencv-matrix
4 | rows: 3
5 | cols: 3
6 | dt: d
7 | data: [ 9.9967237146459742e-01, 3.5010791507287180e-03,
8 | 2.5355318478149098e-02, -3.7927849731910972e-03,
9 | 9.9992707266789804e-01, 1.1465780739019066e-02,
10 | -2.5313326776525688e-02, -1.1558191492982692e-02,
11 | 9.9961274686596324e-01 ]
12 | extrinsicTranslation: !!opencv-matrix
13 | rows: 3
14 | cols: 1
15 | dt: d
16 | data: [ -9.0253632250273802e-03, 8.0388486327263697e-02,
17 | 1.5270169087859092e-02 ]
18 |
--------------------------------------------------------------------------------
/src/config/extrinsic_parameter_example.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/config/extrinsic_parameter_example.pdf
--------------------------------------------------------------------------------
/src/config/fisheye_mask_720x540.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/config/fisheye_mask_720x540.jpg
--------------------------------------------------------------------------------
/src/config/params_camera.yaml:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 |
3 | # Project
4 | project_name: "lvi_sam"
5 |
6 | #common parameters
7 | imu_topic: "/imu_raw"
8 | image_topic: "/camera/image_raw"
9 | point_cloud_topic: "lvi_sam/lidar/deskew/cloud_deskewed"
10 |
11 | # Lidar Params
12 | use_lidar: 1 # whether use depth info from lidar or not
13 | lidar_skip: 3 # skip this amount of scans
14 | align_camera_lidar_estimation: 1 # align camera and lidar estimation for visualization
15 |
16 | # lidar to camera extrinsic
17 | lidar_to_cam_tx: -0.0199398
18 | lidar_to_cam_ty: -0.0336424
19 | lidar_to_cam_tz: -0.00642238
20 | lidar_to_cam_rx: -1.5708
21 | lidar_to_cam_ry: -1.5708
22 | lidar_to_cam_rz: 0.0
23 |
24 | # imu to lidar extrinsic
25 | imu_to_lidar_tx: 0.0
26 | imu_to_lidar_ty: 0.0
27 | imu_to_lidar_tz: 0.0
28 | imu_to_lidar_rx: 3.14159
29 | imu_to_lidar_ry: 0.0
30 | imu_to_lidar_rz: 3.14159
31 |
32 | # camera model
33 | model_type: MEI
34 | camera_name: camera
35 |
36 | # Mono camera config
37 | image_width: 720
38 | image_height: 540
39 | mirror_parameters:
40 | xi: 1.9926618269451453
41 | distortion_parameters:
42 | k1: -0.0399258932468764
43 | k2: 0.15160828121223818
44 | p1: 0.00017756967825777937
45 | p2: -0.0011531239076798612
46 | projection_parameters:
47 | gamma1: 669.8940458885896
48 | gamma2: 669.1450614220616
49 | u0: 377.9459252967363
50 | v0: 279.63655686698144
51 | fisheye_mask: "/config/fisheye_mask_720x540.jpg"
52 |
53 | #imu parameters The more accurate parameters you provide, the worse performance
54 | acc_n: 0.02 # accelerometer measurement noise standard deviation.
55 | gyr_n: 0.01 # gyroscope measurement noise standard deviation.
56 | acc_w: 0.002 # accelerometer bias random work noise standard deviation.
57 | gyr_w: 4.0e-5 # gyroscope bias random work noise standard deviation.
58 | g_norm: 9.805 #
59 |
60 | # Extrinsic parameter between IMU and Camera.
61 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
62 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
63 | # 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
64 | #Rotation from camera frame to imu frame, imu^R_cam
65 | extrinsicRotation: !!opencv-matrix
66 | rows: 3
67 | cols: 3
68 | dt: d
69 | data: [ 0, 0,-1,
70 | -1, 0, 0,
71 | 0, 1, 0]
72 |
73 | #Translation from camera frame to imu frame, imu^T_cam
74 | extrinsicTranslation: !!opencv-matrix
75 | rows: 3
76 | cols: 1
77 | dt: d
78 | data: [0.006422381632411965, 0.019939800449065116, 0.03364235163589248]
79 |
80 | #feature traker paprameters
81 | max_cnt: 150 # max feature number in feature tracking
82 | min_dist: 20 # min distance between two features
83 | freq: 20 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
84 | F_threshold: 1.0 # ransac threshold (pixel)
85 | show_track: 1 # publish tracking image as topic
86 | equalize: 1 # if image is too dark or light, trun on equalize to find enough features
87 | fisheye: 1 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
88 |
89 | #optimization parameters
90 | max_solver_time: 0.035 # max solver itration time (ms), to guarantee real time
91 | max_num_iterations: 10 # max solver itrations, to guarantee real time
92 | keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
93 |
94 | #unsynchronization parameters
95 | estimate_td: 0 # online estimate time offset between camera and imu
96 | td: 0 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
97 |
98 | #rolling shutter parameters
99 | rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera
100 | rolling_shutter_tr: 0 # unit: s. rolling shutter read out time per frame (from data sheet).
101 |
102 | #loop closure parameters
103 | loop_closure: 1 # start loop closure
104 | skip_time: 0.0
105 | skip_dist: 0.0
106 | debug_image: 0 # save raw image in loop detector for visualization prupose; you can close this function by setting 0
107 | match_image_scale: 0.5
108 | vocabulary_file: "/config/brief_k10L6.bin"
109 | brief_pattern_file: "/config/brief_pattern.yml"
--------------------------------------------------------------------------------
/src/config/params_lidar.yaml:
--------------------------------------------------------------------------------
1 | # project name
2 | PROJECT_NAME: "lvi_sam"
3 |
4 | lvi_sam:
5 |
6 | # Topics
7 | pointCloudTopic: "/points_raw" # Point cloud data
8 | imuTopic: "/imu_raw" # IMU data
9 |
10 | # Heading
11 | useImuHeadingInitialization: true # if using GPS data, set to "true"
12 |
13 | # Export settings
14 | savePCD: false # https://github.com/TixiaoShan/LIO-SAM/issues/3
15 | savePCDDirectory: "/Downloads/LOAM/" # in your home folder, starts and ends with "/". Warning: the code deletes "LOAM" folder then recreates it. See "mapOptimization" for implementation
16 |
17 | # Sensor Settings
18 | N_SCAN: 16 # number of lidar channel (i.e., 16, 32, 64, 128)
19 | Horizon_SCAN: 1800 # lidar horizontal resolution (Velodyne:1800, Ouster:512,1024,2048)
20 | timeField: "time" # point timestamp field, Velodyne - "time", Ouster - "t"
21 | downsampleRate: 1 # default: 1. Downsample your data if too many points. i.e., 16 = 64 / 4, 16 = 16 / 1
22 |
23 | # IMU Settings
24 | imuAccNoise: 3.9939570888238808e-03
25 | imuGyrNoise: 1.5636343949698187e-03
26 | imuAccBiasN: 6.4356659353532566e-05
27 | imuGyrBiasN: 3.5640318696367613e-05
28 | imuGravity: 9.80511
29 |
30 | # Extrinsics (lidar -> IMU) //suppose to be IMU->lidar
31 | extrinsicTrans: [0.0, 0.0, 0.0]
32 | extrinsicRot: [-1, 0, 0, 0, 1, 0, 0, 0, -1]
33 | extrinsicRPY: [0, 1, 0, -1, 0, 0, 0, 0, 1]
34 |
35 | # LOAM feature threshold
36 | edgeThreshold: 1.0
37 | surfThreshold: 0.1
38 | edgeFeatureMinValidNum: 10
39 | surfFeatureMinValidNum: 100
40 |
41 | # voxel filter paprams
42 | odometrySurfLeafSize: 0.4 # default: 0.4
43 | mappingCornerLeafSize: 0.2 # default: 0.2
44 | mappingSurfLeafSize: 0.4 # default: 0.4
45 |
46 | # robot motion constraint (in case you are using a 2D robot)
47 | z_tollerance: 1000 # meters
48 | rotation_tollerance: 1000 # radians
49 |
50 | # CPU Params
51 | numberOfCores: 4 # number of cores for mapping optimization
52 | mappingProcessInterval: 0.15 # seconds, regulate mapping frequency
53 |
54 | # Surrounding map
55 | surroundingkeyframeAddingDistThreshold: 1.0 # meters, regulate keyframe adding threshold
56 | surroundingkeyframeAddingAngleThreshold: 0.2 # radians, regulate keyframe adding threshold
57 | surroundingKeyframeDensity: 2.0 # meters, downsample surrounding keyframe poses
58 | surroundingKeyframeSearchRadius: 50.0 # meters, within n meters scan-to-map optimization (when loop closure disabled)
59 |
60 | # Loop closure
61 | loopClosureEnableFlag: true
62 | surroundingKeyframeSize: 25 # submap size (when loop closure enabled)
63 | historyKeyframeSearchRadius: 20.0 # meters, key frame that is within n meters from current pose will be considerd for loop closure
64 | historyKeyframeSearchTimeDiff: 30.0 # seconds, key frame that is n seconds older will be considered for loop closure
65 | historyKeyframeSearchNum: 25 # number of hostory key frames will be fused into a submap for loop closure
66 | historyKeyframeFitnessScore: 0.3 # icp threshold, the smaller the better alignment
67 |
68 | # Visualization
69 | globalMapVisualizationSearchRadius: 1000.0 # meters, global map visualization radius
70 | globalMapVisualizationPoseDensity: 10.0 # meters, global map visualization keyframe density
71 | globalMapVisualizationLeafSize: 1.0 # meters, global map visualization cloud density
--------------------------------------------------------------------------------
/src/doc/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/doc/demo.gif
--------------------------------------------------------------------------------
/src/doc/handheld-earth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/doc/handheld-earth.png
--------------------------------------------------------------------------------
/src/doc/jackal-earth.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/doc/jackal-earth.png
--------------------------------------------------------------------------------
/src/doc/paper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/doc/paper.pdf
--------------------------------------------------------------------------------
/src/doc/sensor.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/skyrim835/LVI-SAM-modified/4eabb2b79b0cf48e7e2eb779ac97ba1abae0296b/src/doc/sensor.jpeg
--------------------------------------------------------------------------------
/src/launch/include/config/robot.urdf.xacro:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/src/launch/include/module_robot_state_publisher.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/src/launch/include/module_rviz.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/src/launch/include/module_sam.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/src/launch/include/rosconsole/rosconsole_error.conf:
--------------------------------------------------------------------------------
1 | # Set the default ros output to warning and higher
2 | log4j.logger.ros = ERROR
--------------------------------------------------------------------------------
/src/launch/include/rosconsole/rosconsole_info.conf:
--------------------------------------------------------------------------------
1 | # Set the default ros output to warning and higher
2 | log4j.logger.ros = INFO
--------------------------------------------------------------------------------
/src/launch/include/rosconsole/rosconsole_warn.conf:
--------------------------------------------------------------------------------
1 | # Set the default ros output to warning and higher
2 | log4j.logger.ros = WARN
--------------------------------------------------------------------------------
/src/launch/run.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/src/msg/cloud_info.msg:
--------------------------------------------------------------------------------
1 | # Cloud Info
2 | Header header
3 |
4 | int32[] startRingIndex
5 | int32[] endRingIndex
6 |
7 | int32[] pointColInd # point column index in range image
8 | float32[] pointRange # point range
9 |
10 | int64 imuAvailable
11 | int64 odomAvailable
12 |
13 | # Attitude for lidar odometry initialization
14 | float32 imuRollInit
15 | float32 imuPitchInit
16 | float32 imuYawInit
17 |
18 | # Odometry
19 | float32 odomX
20 | float32 odomY
21 | float32 odomZ
22 | float32 odomRoll
23 | float32 odomPitch
24 | float32 odomYaw
25 |
26 | # Odometry reset ID
27 | int64 odomResetId
28 |
29 | # Point cloud messages
30 | sensor_msgs/PointCloud2 cloud_deskewed # original cloud deskewed
31 | sensor_msgs/PointCloud2 cloud_corner # extracted corner feature
32 | sensor_msgs/PointCloud2 cloud_surface # extracted surface feature
--------------------------------------------------------------------------------
/src/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | lvi_sam
4 | 0.0.0
5 | A package
6 | lalala
7 | Tixiao Shan
8 | BSD-3
9 |
10 |
11 | catkin
12 |
13 | roscpp
14 | roscpp
15 | rospy
16 | rospy
17 |
18 | tf
19 | tf
20 |
21 | cv_bridge
22 | cv_bridge
23 |
24 | pcl_conversions
25 | pcl_conversions
26 |
27 | std_msgs
28 | std_msgs
29 | sensor_msgs
30 | sensor_msgs
31 | geometry_msgs
32 | geometry_msgs
33 | nav_msgs
34 | nav_msgs
35 |
36 | message_generation
37 | message_generation
38 | message_runtime
39 | message_runtime
40 |
41 | GTSAM
42 | GTSAM
43 |
44 |
45 |
--------------------------------------------------------------------------------
/src/src/lidar_odometry/mahonyMine.h:
--------------------------------------------------------------------------------
1 | //=====================================================================================================
2 | // MahonyAHRS.h
3 | //=====================================================================================================
4 | //
5 | // Madgwick's implementation of Mayhony's AHRS algorithm.
6 | // See: http://www.x-io.co.uk/node/8#open_source_ahrs_and_imu_algorithms
7 | //
8 | // Date Author Notes
9 | // 29/09/2011 SOH Madgwick Initial release
10 | // 02/10/2011 SOH Madgwick Optimised for reduced CPU load
11 | //
12 | //=====================================================================================================
13 | #ifndef MahonyAHRS_h
14 | #define MahonyAHRS_h
15 |
16 | //----------------------------------------------------------------------------------------------------
17 | // Variable declaration
18 | class Mahony{
19 | private:
20 | float twoKp; // 2 * proportional gain (Kp)
21 | float twoKi; // 2 * integral gain (Ki)
22 | float q0, q1, q2, q3; // quaternion of sensor frame relative to auxiliary frame
23 | float integralFBx, integralFBy, integralFBz;
24 | public:
25 | Mahony();
26 | void MahonyAHRSupdate(float gx, float gy, float gz, float ax, float ay, float az, float mx, float my, float mz);
27 | void MahonyAHRSupdateIMU(float gx, float gy, float gz, float ax, float ay, float az);
28 | float invSqrt(float x);
29 | float getQuaternionX() {
30 | return q1;
31 | }
32 | float getQuaternionY() {
33 | return q2;
34 | }
35 | float getQuaternionZ() {
36 | return q3;
37 | }
38 | float getQuaternionW() {
39 | return q0;
40 | }
41 | };
42 |
43 | //---------------------------------------------------------------------------------------------------
44 | // Function declarations
45 |
46 |
47 |
48 | #endif
49 | //=====================================================================================================
50 | // End of file
51 | //=====================================================================================================
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/estimator.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include "parameters.h"
4 | #include "feature_manager.h"
5 | #include "utility/utility.h"
6 | #include "utility/tic_toc.h"
7 | #include "initial/solve_5pts.h"
8 | #include "initial/initial_sfm.h"
9 | #include "initial/initial_alignment.h"
10 | #include "initial/initial_ex_rotation.h"
11 | #include
12 | #include
13 |
14 | #include
15 | #include "factor/imu_factor.h"
16 | #include "factor/pose_local_parameterization.h"
17 | #include "factor/projection_factor.h"
18 | #include "factor/projection_td_factor.h"
19 | #include "factor/marginalization_factor.h"
20 | #include
21 | #include
22 | #include
23 | /**
24 | * @class Estimator 状态估计器
25 | * @Description IMU预积分,图像IMU融合的初始化和状态估计,重定位
26 | * detailed
27 | */
28 |
29 | class Estimator
30 | {
31 | public:
32 | Estimator();
33 |
34 | void setParameter();
35 |
36 | // interface
37 | void processIMU(double t, const Vector3d &linear_acceleration, const Vector3d &angular_velocity);
38 | void processImage(const map>>> &image,
39 | const vector &lidar_initialization_info,
40 | const std_msgs::Header &header);
41 |
42 | // internal
43 | void clearState();
44 | bool initialStructure();
45 | bool visualInitialAlign();
46 | // void visualInitialAlignWithDepth();
47 | bool relativePose(Matrix3d &relative_R, Vector3d &relative_T, int &l);
48 | void slideWindow();
49 | void solveOdometry();
50 | void slideWindowNew();
51 | void slideWindowOld();
52 | void optimization();
53 | void vector2double();
54 | void double2vector();
55 | bool failureDetection();
56 |
57 |
58 | enum SolverFlag
59 | {
60 | INITIAL, // 初始化
61 | NON_LINEAR // 非线性优化
62 | };
63 |
64 | enum MarginalizationFlag
65 | {
66 | MARGIN_OLD = 0, // 边缘化最老帧
67 | MARGIN_SECOND_NEW = 1 // 边缘化次新帧
68 | };
69 |
70 | SolverFlag solver_flag;
71 | MarginalizationFlag marginalization_flag;
72 | Vector3d g;
73 | MatrixXd Ap[2], backup_A;
74 | VectorXd bp[2], backup_b;
75 | // camera to imu
76 | Matrix3d ric[NUM_OF_CAM];
77 | Vector3d tic[NUM_OF_CAM];
78 | // PVQ
79 | Vector3d Ps[(WINDOW_SIZE + 1)];
80 | Vector3d Vs[(WINDOW_SIZE + 1)];
81 | Matrix3d Rs[(WINDOW_SIZE + 1)];
82 | Vector3d Bas[(WINDOW_SIZE + 1)]; //加速度的bias
83 | Vector3d Bgs[(WINDOW_SIZE + 1)]; //角速度的bias
84 | double td;
85 | // 滑动窗口
86 | Matrix3d back_R0, last_R, last_R0;
87 | Vector3d back_P0, last_P, last_P0;
88 | std_msgs::Header Headers[(WINDOW_SIZE + 1)];
89 | // 预积分
90 | IntegrationBase *pre_integrations[(WINDOW_SIZE + 1)];
91 | Vector3d acc_0, gyr_0;// 上一时刻的加速度a和角速度w
92 | // imu数据
93 | vector dt_buf[(WINDOW_SIZE + 1)];
94 | vector linear_acceleration_buf[(WINDOW_SIZE + 1)];
95 | vector angular_velocity_buf[(WINDOW_SIZE + 1)];
96 |
97 | int frame_count; // 滑窗内的关键帧个数
98 | int sum_of_outlier, sum_of_back, sum_of_front, sum_of_invalid;
99 |
100 | FeatureManager f_manager;
101 | MotionEstimator m_estimator;
102 | InitialEXRotation initial_ex_rotation;
103 |
104 | bool first_imu;
105 | bool is_valid, is_key;
106 | bool failure_occur;
107 |
108 | vector point_cloud;
109 | vector margin_cloud;
110 | vector key_poses;
111 | double initial_timestamp;
112 |
113 |
114 | double para_Pose[WINDOW_SIZE + 1][SIZE_POSE]; // P, Q组合成pose
115 | double para_SpeedBias[WINDOW_SIZE + 1][SIZE_SPEEDBIAS]; // V, Ba, Bg
116 | double para_Feature[NUM_OF_F][SIZE_FEATURE]; // 特征点的深度
117 | double para_Ex_Pose[NUM_OF_CAM][SIZE_POSE]; // camera to imu外参
118 | double para_Retrive_Pose[SIZE_POSE];
119 | double para_Td[1][1]; // 滑窗内第一个时刻相机到IMU时钟差
120 | double para_Tr[1][1];
121 |
122 | int loop_window_index;
123 |
124 | MarginalizationInfo *last_marginalization_info;
125 | vector last_marginalization_parameter_blocks;
126 |
127 | map all_image_frame; // 滑动窗口时间段内的所有frames
128 | IntegrationBase *tmp_pre_integration;
129 |
130 | int failureCount;
131 | };
132 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/marginalization_factor.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include "../utility/utility.h"
11 | #include "../utility/tic_toc.h"
12 |
13 | const int NUM_THREADS = 4;
14 |
15 | struct ResidualBlockInfo
16 | {
17 | ResidualBlockInfo(ceres::CostFunction *_cost_function, ceres::LossFunction *_loss_function, std::vector _parameter_blocks, std::vector _drop_set)
18 | : cost_function(_cost_function), loss_function(_loss_function), parameter_blocks(_parameter_blocks), drop_set(_drop_set) {}
19 |
20 | void Evaluate();
21 |
22 | ceres::CostFunction *cost_function;
23 | ceres::LossFunction *loss_function;
24 | std::vector parameter_blocks;
25 | std::vector drop_set;
26 |
27 | double **raw_jacobians;
28 | std::vector> jacobians;
29 | Eigen::VectorXd residuals;
30 |
31 | int localSize(int size)
32 | {
33 | return size == 7 ? 6 : size;
34 | }
35 | };
36 |
37 | struct ThreadsStruct
38 | {
39 | std::vector sub_factors;
40 | Eigen::MatrixXd A;
41 | Eigen::VectorXd b;
42 | std::unordered_map parameter_block_size; //global size
43 | std::unordered_map parameter_block_idx; //local size
44 | };
45 |
46 | class MarginalizationInfo
47 | {
48 | public:
49 | ~MarginalizationInfo();
50 | int localSize(int size) const;
51 | int globalSize(int size) const;
52 | void addResidualBlockInfo(ResidualBlockInfo *residual_block_info);
53 | void preMarginalize();
54 | void marginalize();
55 | std::vector getParameterBlocks(std::unordered_map &addr_shift);
56 |
57 | std::vector factors;
58 | int m, n;
59 | std::unordered_map parameter_block_size; //global size
60 | int sum_block_size;
61 | std::unordered_map parameter_block_idx; //local size
62 | std::unordered_map parameter_block_data;
63 |
64 | std::vector keep_block_size; //global size
65 | std::vector keep_block_idx; //local size
66 | std::vector keep_block_data;
67 |
68 | Eigen::MatrixXd linearized_jacobians;
69 | Eigen::VectorXd linearized_residuals;
70 | const double eps = 1e-8;
71 |
72 | };
73 |
74 | class MarginalizationFactor : public ceres::CostFunction
75 | {
76 | public:
77 | MarginalizationFactor(MarginalizationInfo* _marginalization_info);
78 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const;
79 |
80 | MarginalizationInfo* marginalization_info;
81 | };
82 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/pose_local_parameterization.cpp:
--------------------------------------------------------------------------------
1 | #include "pose_local_parameterization.h"
2 |
3 | bool PoseLocalParameterization::Plus(const double *x, const double *delta, double *x_plus_delta) const
4 | {
5 | Eigen::Map _p(x);
6 | Eigen::Map _q(x + 3);
7 |
8 | Eigen::Map dp(delta);
9 |
10 | Eigen::Quaterniond dq = Utility::deltaQ(Eigen::Map(delta + 3));
11 |
12 | Eigen::Map p(x_plus_delta);
13 | Eigen::Map q(x_plus_delta + 3);
14 |
15 | p = _p + dp;
16 | q = (_q * dq).normalized();
17 |
18 | return true;
19 | }
20 | bool PoseLocalParameterization::ComputeJacobian(const double *x, double *jacobian) const
21 | {
22 | Eigen::Map> j(jacobian);
23 | j.topRows<6>().setIdentity();
24 | j.bottomRows<1>().setZero();
25 |
26 | return true;
27 | }
28 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/pose_local_parameterization.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include "../utility/utility.h"
6 |
7 | class PoseLocalParameterization : public ceres::LocalParameterization
8 | {
9 | virtual bool Plus(const double *x, const double *delta, double *x_plus_delta) const;
10 | virtual bool ComputeJacobian(const double *x, double *jacobian) const;
11 | virtual int GlobalSize() const { return 7; };
12 | virtual int LocalSize() const { return 6; };
13 | };
14 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/projection_factor.cpp:
--------------------------------------------------------------------------------
1 | #include "projection_factor.h"
2 |
3 | Eigen::Matrix2d ProjectionFactor::sqrt_info;
4 | double ProjectionFactor::sum_t;
5 |
6 | ProjectionFactor::ProjectionFactor(const Eigen::Vector3d &_pts_i, const Eigen::Vector3d &_pts_j) : pts_i(_pts_i), pts_j(_pts_j)
7 | {
8 | #ifdef UNIT_SPHERE_ERROR
9 | Eigen::Vector3d b1, b2;
10 | Eigen::Vector3d a = pts_j.normalized();
11 | Eigen::Vector3d tmp(0, 0, 1);
12 | if(a == tmp)
13 | tmp << 1, 0, 0;
14 | b1 = (tmp - a * (a.transpose() * tmp)).normalized();
15 | b2 = a.cross(b1);
16 | tangent_base.block<1, 3>(0, 0) = b1.transpose();
17 | tangent_base.block<1, 3>(1, 0) = b2.transpose();
18 | #endif
19 | };
20 |
21 | bool ProjectionFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const
22 | {
23 | TicToc tic_toc;
24 | Eigen::Vector3d Pi(parameters[0][0], parameters[0][1], parameters[0][2]);
25 | Eigen::Quaterniond Qi(parameters[0][6], parameters[0][3], parameters[0][4], parameters[0][5]);
26 |
27 | Eigen::Vector3d Pj(parameters[1][0], parameters[1][1], parameters[1][2]);
28 | Eigen::Quaterniond Qj(parameters[1][6], parameters[1][3], parameters[1][4], parameters[1][5]);
29 |
30 | Eigen::Vector3d tic(parameters[2][0], parameters[2][1], parameters[2][2]);
31 | Eigen::Quaterniond qic(parameters[2][6], parameters[2][3], parameters[2][4], parameters[2][5]);
32 |
33 | //pts_i 是i时刻归一化相机坐标系下的3D坐标
34 | //第i帧相机坐标系下的的逆深度
35 | double inv_dep_i = parameters[3][0];
36 | //第i帧相机坐标系下的3D坐标
37 | Eigen::Vector3d pts_camera_i = pts_i / inv_dep_i;
38 | //第i帧IMU坐标系下的3D坐标
39 | Eigen::Vector3d pts_imu_i = qic * pts_camera_i + tic;
40 | //世界坐标系下的3D坐标
41 | Eigen::Vector3d pts_w = Qi * pts_imu_i + Pi;
42 | //第j帧imu坐标系下的3D坐标
43 | Eigen::Vector3d pts_imu_j = Qj.inverse() * (pts_w - Pj);
44 | //第j帧相机坐标系下的3D坐标
45 | Eigen::Vector3d pts_camera_j = qic.inverse() * (pts_imu_j - tic);
46 | Eigen::Map residual(residuals);
47 |
48 | #ifdef UNIT_SPHERE_ERROR
49 | residual = tangent_base * (pts_camera_j.normalized() - pts_j.normalized());
50 | #else
51 | double dep_j = pts_camera_j.z();
52 | residual = (pts_camera_j / dep_j).head<2>() - pts_j.head<2>();
53 | #endif
54 |
55 | residual = sqrt_info * residual;
56 |
57 | //reduce 表示残差residual对fci(pts_camera_j)的导数,同样根据不同的相机模型
58 | if (jacobians)
59 | {
60 | Eigen::Matrix3d Ri = Qi.toRotationMatrix();
61 | Eigen::Matrix3d Rj = Qj.toRotationMatrix();
62 | Eigen::Matrix3d ric = qic.toRotationMatrix();
63 | Eigen::Matrix reduce(2, 3);
64 | #ifdef UNIT_SPHERE_ERROR
65 | double norm = pts_camera_j.norm();
66 | Eigen::Matrix3d norm_jaco;
67 | double x1, x2, x3;
68 | x1 = pts_camera_j(0);
69 | x2 = pts_camera_j(1);
70 | x3 = pts_camera_j(2);
71 | norm_jaco << 1.0 / norm - x1 * x1 / pow(norm, 3), - x1 * x2 / pow(norm, 3), - x1 * x3 / pow(norm, 3),
72 | - x1 * x2 / pow(norm, 3), 1.0 / norm - x2 * x2 / pow(norm, 3), - x2 * x3 / pow(norm, 3),
73 | - x1 * x3 / pow(norm, 3), - x2 * x3 / pow(norm, 3), 1.0 / norm - x3 * x3 / pow(norm, 3);
74 | reduce = tangent_base * norm_jaco;
75 | #else
76 | reduce << 1. / dep_j, 0, -pts_camera_j(0) / (dep_j * dep_j),
77 | 0, 1. / dep_j, -pts_camera_j(1) / (dep_j * dep_j);
78 | #endif
79 | reduce = sqrt_info * reduce;
80 |
81 | // 残差项的Jacobian
82 | // 先求fci对各项的Jacobian,然后用链式法则乘起来
83 | // 对第i帧的位姿 pbi,qbi 2X7的矩阵 最后一项是0
84 | if (jacobians[0])
85 | {
86 | Eigen::Map> jacobian_pose_i(jacobians[0]);
87 |
88 | Eigen::Matrix jaco_i;
89 | jaco_i.leftCols<3>() = ric.transpose() * Rj.transpose();
90 | jaco_i.rightCols<3>() = ric.transpose() * Rj.transpose() * Ri * -Utility::skewSymmetric(pts_imu_i);
91 |
92 | jacobian_pose_i.leftCols<6>() = reduce * jaco_i;
93 | jacobian_pose_i.rightCols<1>().setZero();
94 | }
95 | // 对第j帧的位姿 pbj,qbj
96 | if (jacobians[1])
97 | {
98 | Eigen::Map> jacobian_pose_j(jacobians[1]);
99 |
100 | Eigen::Matrix jaco_j;
101 | jaco_j.leftCols<3>() = ric.transpose() * -Rj.transpose();
102 | jaco_j.rightCols<3>() = ric.transpose() * Utility::skewSymmetric(pts_imu_j);
103 |
104 | jacobian_pose_j.leftCols<6>() = reduce * jaco_j;
105 | jacobian_pose_j.rightCols<1>().setZero();
106 | }
107 | // 对相机到IMU的外参 pbc,qbc (qic,tic)
108 | if (jacobians[2])
109 | {
110 | Eigen::Map> jacobian_ex_pose(jacobians[2]);
111 | Eigen::Matrix jaco_ex;
112 | jaco_ex.leftCols<3>() = ric.transpose() * (Rj.transpose() * Ri - Eigen::Matrix3d::Identity());
113 | Eigen::Matrix3d tmp_r = ric.transpose() * Rj.transpose() * Ri * ric;
114 | jaco_ex.rightCols<3>() = -tmp_r * Utility::skewSymmetric(pts_camera_i) + Utility::skewSymmetric(tmp_r * pts_camera_i) +
115 | Utility::skewSymmetric(ric.transpose() * (Rj.transpose() * (Ri * tic + Pi - Pj) - tic));
116 | jacobian_ex_pose.leftCols<6>() = reduce * jaco_ex;
117 | jacobian_ex_pose.rightCols<1>().setZero();
118 | }
119 | // 对逆深度 \lambda (inv_dep_i)
120 | if (jacobians[3])
121 | {
122 | Eigen::Map jacobian_feature(jacobians[3]);
123 | #if 1
124 | jacobian_feature = reduce * ric.transpose() * Rj.transpose() * Ri * ric * pts_i * -1.0 / (inv_dep_i * inv_dep_i);
125 | #else
126 | jacobian_feature = reduce * ric.transpose() * Rj.transpose() * Ri * ric * pts_i;
127 | #endif
128 | }
129 | }
130 | sum_t += tic_toc.toc();
131 |
132 | return true;
133 | }
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/projection_factor.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include "../utility/utility.h"
7 | #include "../utility/tic_toc.h"
8 | #include "../parameters.h"
9 |
10 | class ProjectionFactor : public ceres::SizedCostFunction<2, 7, 7, 7, 1>
11 | {
12 | public:
13 | ProjectionFactor(const Eigen::Vector3d &_pts_i, const Eigen::Vector3d &_pts_j);
14 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const;
15 | void check(double **parameters);
16 |
17 | Eigen::Vector3d pts_i, pts_j;
18 | Eigen::Matrix tangent_base;
19 | static Eigen::Matrix2d sqrt_info;
20 | static double sum_t;
21 | };
22 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/projection_td_factor.cpp:
--------------------------------------------------------------------------------
1 | #include "projection_td_factor.h"
2 |
3 | Eigen::Matrix2d ProjectionTdFactor::sqrt_info;
4 | double ProjectionTdFactor::sum_t;
5 |
6 | ProjectionTdFactor::ProjectionTdFactor(const Eigen::Vector3d &_pts_i, const Eigen::Vector3d &_pts_j,
7 | const Eigen::Vector2d &_velocity_i, const Eigen::Vector2d &_velocity_j,
8 | const double _td_i, const double _td_j, const double _row_i, const double _row_j) :
9 | pts_i(_pts_i), pts_j(_pts_j),
10 | td_i(_td_i), td_j(_td_j)
11 | {
12 | velocity_i.x() = _velocity_i.x();
13 | velocity_i.y() = _velocity_i.y();
14 | velocity_i.z() = 0;
15 | velocity_j.x() = _velocity_j.x();
16 | velocity_j.y() = _velocity_j.y();
17 | velocity_j.z() = 0;
18 | row_i = _row_i - ROW / 2;
19 | row_j = _row_j - ROW / 2;
20 |
21 | #ifdef UNIT_SPHERE_ERROR
22 | Eigen::Vector3d b1, b2;
23 | Eigen::Vector3d a = pts_j.normalized();
24 | Eigen::Vector3d tmp(0, 0, 1);
25 | if(a == tmp)
26 | tmp << 1, 0, 0;
27 | b1 = (tmp - a * (a.transpose() * tmp)).normalized();
28 | b2 = a.cross(b1);
29 | tangent_base.block<1, 3>(0, 0) = b1.transpose();
30 | tangent_base.block<1, 3>(1, 0) = b2.transpose();
31 | #endif
32 | };
33 |
34 | bool ProjectionTdFactor::Evaluate(double const *const *parameters, double *residuals, double **jacobians) const
35 | {
36 | TicToc tic_toc;
37 | Eigen::Vector3d Pi(parameters[0][0], parameters[0][1], parameters[0][2]);
38 | Eigen::Quaterniond Qi(parameters[0][6], parameters[0][3], parameters[0][4], parameters[0][5]);
39 |
40 | Eigen::Vector3d Pj(parameters[1][0], parameters[1][1], parameters[1][2]);
41 | Eigen::Quaterniond Qj(parameters[1][6], parameters[1][3], parameters[1][4], parameters[1][5]);
42 |
43 | Eigen::Vector3d tic(parameters[2][0], parameters[2][1], parameters[2][2]);
44 | Eigen::Quaterniond qic(parameters[2][6], parameters[2][3], parameters[2][4], parameters[2][5]);
45 |
46 | double inv_dep_i = parameters[3][0];
47 |
48 | double td = parameters[4][0];
49 |
50 | Eigen::Vector3d pts_i_td, pts_j_td;
51 | pts_i_td = pts_i - (td - td_i + TR / ROW * row_i) * velocity_i;
52 | pts_j_td = pts_j - (td - td_j + TR / ROW * row_j) * velocity_j;
53 | Eigen::Vector3d pts_camera_i = pts_i_td / inv_dep_i;
54 | Eigen::Vector3d pts_imu_i = qic * pts_camera_i + tic;
55 | Eigen::Vector3d pts_w = Qi * pts_imu_i + Pi;
56 | Eigen::Vector3d pts_imu_j = Qj.inverse() * (pts_w - Pj);
57 | Eigen::Vector3d pts_camera_j = qic.inverse() * (pts_imu_j - tic);
58 | Eigen::Map residual(residuals);
59 |
60 | #ifdef UNIT_SPHERE_ERROR
61 | residual = tangent_base * (pts_camera_j.normalized() - pts_j_td.normalized());
62 | #else
63 | double dep_j = pts_camera_j.z();
64 | residual = (pts_camera_j / dep_j).head<2>() - pts_j_td.head<2>();
65 | #endif
66 |
67 | residual = sqrt_info * residual;
68 |
69 | if (jacobians)
70 | {
71 | Eigen::Matrix3d Ri = Qi.toRotationMatrix();
72 | Eigen::Matrix3d Rj = Qj.toRotationMatrix();
73 | Eigen::Matrix3d ric = qic.toRotationMatrix();
74 | Eigen::Matrix reduce(2, 3);
75 | #ifdef UNIT_SPHERE_ERROR
76 | double norm = pts_camera_j.norm();
77 | Eigen::Matrix3d norm_jaco;
78 | double x1, x2, x3;
79 | x1 = pts_camera_j(0);
80 | x2 = pts_camera_j(1);
81 | x3 = pts_camera_j(2);
82 | norm_jaco << 1.0 / norm - x1 * x1 / pow(norm, 3), - x1 * x2 / pow(norm, 3), - x1 * x3 / pow(norm, 3),
83 | - x1 * x2 / pow(norm, 3), 1.0 / norm - x2 * x2 / pow(norm, 3), - x2 * x3 / pow(norm, 3),
84 | - x1 * x3 / pow(norm, 3), - x2 * x3 / pow(norm, 3), 1.0 / norm - x3 * x3 / pow(norm, 3);
85 | reduce = tangent_base * norm_jaco;
86 | #else
87 | reduce << 1. / dep_j, 0, -pts_camera_j(0) / (dep_j * dep_j),
88 | 0, 1. / dep_j, -pts_camera_j(1) / (dep_j * dep_j);
89 | #endif
90 | reduce = sqrt_info * reduce;
91 |
92 | if (jacobians[0])
93 | {
94 | Eigen::Map> jacobian_pose_i(jacobians[0]);
95 |
96 | Eigen::Matrix jaco_i;
97 | jaco_i.leftCols<3>() = ric.transpose() * Rj.transpose();
98 | jaco_i.rightCols<3>() = ric.transpose() * Rj.transpose() * Ri * -Utility::skewSymmetric(pts_imu_i);
99 |
100 | jacobian_pose_i.leftCols<6>() = reduce * jaco_i;
101 | jacobian_pose_i.rightCols<1>().setZero();
102 | }
103 |
104 | if (jacobians[1])
105 | {
106 | Eigen::Map> jacobian_pose_j(jacobians[1]);
107 |
108 | Eigen::Matrix jaco_j;
109 | jaco_j.leftCols<3>() = ric.transpose() * -Rj.transpose();
110 | jaco_j.rightCols<3>() = ric.transpose() * Utility::skewSymmetric(pts_imu_j);
111 |
112 | jacobian_pose_j.leftCols<6>() = reduce * jaco_j;
113 | jacobian_pose_j.rightCols<1>().setZero();
114 | }
115 | if (jacobians[2])
116 | {
117 | Eigen::Map> jacobian_ex_pose(jacobians[2]);
118 | Eigen::Matrix jaco_ex;
119 | jaco_ex.leftCols<3>() = ric.transpose() * (Rj.transpose() * Ri - Eigen::Matrix3d::Identity());
120 | Eigen::Matrix3d tmp_r = ric.transpose() * Rj.transpose() * Ri * ric;
121 | jaco_ex.rightCols<3>() = -tmp_r * Utility::skewSymmetric(pts_camera_i) + Utility::skewSymmetric(tmp_r * pts_camera_i) +
122 | Utility::skewSymmetric(ric.transpose() * (Rj.transpose() * (Ri * tic + Pi - Pj) - tic));
123 | jacobian_ex_pose.leftCols<6>() = reduce * jaco_ex;
124 | jacobian_ex_pose.rightCols<1>().setZero();
125 | }
126 | if (jacobians[3])
127 | {
128 | Eigen::Map jacobian_feature(jacobians[3]);
129 | jacobian_feature = reduce * ric.transpose() * Rj.transpose() * Ri * ric * pts_i_td * -1.0 / (inv_dep_i * inv_dep_i);
130 | }
131 | if (jacobians[4])
132 | {
133 | Eigen::Map jacobian_td(jacobians[4]);
134 | jacobian_td = reduce * ric.transpose() * Rj.transpose() * Ri * ric * velocity_i / inv_dep_i * -1.0 +
135 | sqrt_info * velocity_j.head(2);
136 | }
137 | }
138 | sum_t += tic_toc.toc();
139 |
140 | return true;
141 | }
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/factor/projection_td_factor.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include "../utility/utility.h"
7 | #include "../utility/tic_toc.h"
8 | #include "../parameters.h"
9 |
10 | //对imu-camera 时间戳不完全同步和 Rolling shutter 相机的支持
11 | class ProjectionTdFactor : public ceres::SizedCostFunction<2, 7, 7, 7, 1, 1>
12 | {
13 | public:
14 | ProjectionTdFactor(const Eigen::Vector3d &_pts_i, const Eigen::Vector3d &_pts_j,
15 | const Eigen::Vector2d &_velocity_i, const Eigen::Vector2d &_velocity_j,
16 | const double _td_i, const double _td_j, const double _row_i, const double _row_j);
17 | virtual bool Evaluate(double const *const *parameters, double *residuals, double **jacobians) const;
18 | void check(double **parameters);
19 |
20 | Eigen::Vector3d pts_i, pts_j;//角点在归一化平面的坐标
21 | Eigen::Vector3d velocity_i, velocity_j;//角点在归一化平面的速度
22 | double td_i, td_j;//处理IMU数据时用到的时间同步误差
23 | Eigen::Matrix tangent_base;
24 | double row_i, row_j;//角点图像坐标的纵坐标
25 | static Eigen::Matrix2d sqrt_info;
26 | static double sum_t;
27 | };
28 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/feature_manager.h:
--------------------------------------------------------------------------------
1 | #ifndef FEATURE_MANAGER_H
2 | #define FEATURE_MANAGER_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | using namespace std;
9 |
10 | #include
11 | using namespace Eigen;
12 |
13 | #include
14 | #include
15 |
16 | #include "parameters.h"
17 |
18 | class FeaturePerFrame
19 | {
20 | public:
21 | FeaturePerFrame(const Eigen::Matrix &_point, double td)
22 | {
23 | point.x() = _point(0);
24 | point.y() = _point(1);
25 | point.z() = _point(2);
26 | uv.x() = _point(3);
27 | uv.y() = _point(4);
28 | velocity.x() = _point(5);
29 | velocity.y() = _point(6);
30 | depth = _point(7);
31 | cur_td = td;
32 | }
33 | double cur_td;
34 | Vector3d point;
35 | Vector2d uv;
36 | Vector2d velocity;
37 | double z;
38 | bool is_used;
39 | double parallax;
40 | MatrixXd A;
41 | VectorXd b;
42 | double dep_gradient;
43 | double depth; // lidar depth, initialized with -1 from feature points in feature tracker node
44 | };
45 |
46 | class FeaturePerId
47 | {
48 | public:
49 | const int feature_id;
50 | int start_frame;
51 | vector feature_per_frame;
52 |
53 | int used_num;
54 | bool is_outlier;
55 | bool is_margin;
56 | double estimated_depth;
57 | bool lidar_depth_flag;
58 | int solve_flag; // 0 haven't solve yet; 1 solve succ; 2 solve fail;
59 |
60 | Vector3d gt_p;
61 |
62 | FeaturePerId(int _feature_id, int _start_frame, double _measured_depth)
63 | : feature_id(_feature_id), start_frame(_start_frame),
64 | used_num(0), estimated_depth(-1.0), lidar_depth_flag(false), solve_flag(0)
65 | {
66 | if (_measured_depth > 0)
67 | {
68 | estimated_depth = _measured_depth;
69 | lidar_depth_flag = true;
70 | }
71 | else
72 | {
73 | estimated_depth = -1;
74 | lidar_depth_flag = false;
75 | }
76 | }
77 |
78 | int endFrame();
79 | };
80 |
81 | class FeatureManager
82 | {
83 | public:
84 | FeatureManager(Matrix3d _Rs[]);
85 |
86 | void setRic(Matrix3d _ric[]);
87 |
88 | void clearState();
89 |
90 | int getFeatureCount();
91 |
92 | bool addFeatureCheckParallax(int frame_count, const map>>> &image, double td);
93 | void debugShow();
94 | vector> getCorresponding(int frame_count_l, int frame_count_r);
95 |
96 | //void updateDepth(const VectorXd &x);
97 | void setDepth(const VectorXd &x);
98 | void removeFailures();
99 | void clearDepth(const VectorXd &x);
100 | VectorXd getDepthVector();
101 | void triangulate(Vector3d Ps[], Vector3d tic[], Matrix3d ric[]);
102 | void removeBackShiftDepth(Eigen::Matrix3d marg_R, Eigen::Vector3d marg_P, Eigen::Matrix3d new_R, Eigen::Vector3d new_P);
103 | void removeBack();
104 | void removeFront(int frame_count);
105 | void removeOutlier();
106 | list feature;
107 | int last_track_num;
108 |
109 | private:
110 | double compensatedParallax2(const FeaturePerId &it_per_id, int frame_count);
111 | const Matrix3d *Rs;
112 | Matrix3d ric[NUM_OF_CAM];
113 | };
114 |
115 | #endif
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/initial/initial_ex_rotation.cpp:
--------------------------------------------------------------------------------
1 | #include "initial_ex_rotation.h"
2 |
3 | InitialEXRotation::InitialEXRotation(){
4 | frame_count = 0;
5 | Rc.push_back(Matrix3d::Identity());
6 | Rc_g.push_back(Matrix3d::Identity());
7 | Rimu.push_back(Matrix3d::Identity());
8 | ric = Matrix3d::Identity();
9 | }
10 |
11 | bool InitialEXRotation::CalibrationExRotation(vector> corres, Quaterniond delta_q_imu, Matrix3d &calib_ric_result)
12 | {
13 | frame_count++;
14 | Rc.push_back(solveRelativeR(corres));
15 | Rimu.push_back(delta_q_imu.toRotationMatrix());
16 | Rc_g.push_back(ric.inverse() * delta_q_imu * ric);
17 |
18 | Eigen::MatrixXd A(frame_count * 4, 4);
19 | A.setZero();
20 | int sum_ok = 0;
21 | for (int i = 1; i <= frame_count; i++)
22 | {
23 | Quaterniond r1(Rc[i]);
24 | Quaterniond r2(Rc_g[i]);
25 |
26 | double angular_distance = 180 / M_PI * r1.angularDistance(r2);
27 | ROS_DEBUG(
28 | "%d %f", i, angular_distance);
29 |
30 | double huber = angular_distance > 5.0 ? 5.0 / angular_distance : 1.0;
31 | ++sum_ok;
32 | Matrix4d L, R;
33 |
34 | double w = Quaterniond(Rc[i]).w();
35 | Vector3d q = Quaterniond(Rc[i]).vec();
36 | L.block<3, 3>(0, 0) = w * Matrix3d::Identity() + Utility::skewSymmetric(q);
37 | L.block<3, 1>(0, 3) = q;
38 | L.block<1, 3>(3, 0) = -q.transpose();
39 | L(3, 3) = w;
40 |
41 | Quaterniond R_ij(Rimu[i]);
42 | w = R_ij.w();
43 | q = R_ij.vec();
44 | R.block<3, 3>(0, 0) = w * Matrix3d::Identity() - Utility::skewSymmetric(q);
45 | R.block<3, 1>(0, 3) = q;
46 | R.block<1, 3>(3, 0) = -q.transpose();
47 | R(3, 3) = w;
48 |
49 | A.block<4, 4>((i - 1) * 4, 0) = huber * (L - R);
50 | }
51 |
52 | JacobiSVD svd(A, ComputeFullU | ComputeFullV);
53 | Matrix x = svd.matrixV().col(3);
54 | Quaterniond estimated_R(x);
55 | ric = estimated_R.toRotationMatrix().inverse();
56 | //cout << svd.singularValues().transpose() << endl;
57 | //cout << ric << endl;
58 | Vector3d ric_cov;
59 | ric_cov = svd.singularValues().tail<3>();
60 | if (frame_count >= WINDOW_SIZE && ric_cov(1) > 0.25)
61 | {
62 | calib_ric_result = ric;
63 | return true;
64 | }
65 | else
66 | return false;
67 | }
68 |
69 | Matrix3d InitialEXRotation::solveRelativeR(const vector> &corres)
70 | {
71 | if (corres.size() >= 9)
72 | {
73 | vector ll, rr;
74 | for (int i = 0; i < int(corres.size()); i++)
75 | {
76 | ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1)));
77 | rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1)));
78 | }
79 | cv::Mat E = cv::findFundamentalMat(ll, rr);
80 | cv::Mat_ R1, R2, t1, t2;
81 | decomposeE(E, R1, R2, t1, t2);
82 |
83 | if (determinant(R1) + 1.0 < 1e-09)
84 | {
85 | E = -E;
86 | decomposeE(E, R1, R2, t1, t2);
87 | }
88 | double ratio1 = max(testTriangulation(ll, rr, R1, t1), testTriangulation(ll, rr, R1, t2));
89 | double ratio2 = max(testTriangulation(ll, rr, R2, t1), testTriangulation(ll, rr, R2, t2));
90 | cv::Mat_ ans_R_cv = ratio1 > ratio2 ? R1 : R2;
91 |
92 | Matrix3d ans_R_eigen;
93 | for (int i = 0; i < 3; i++)
94 | for (int j = 0; j < 3; j++)
95 | ans_R_eigen(j, i) = ans_R_cv(i, j);
96 | return ans_R_eigen;
97 | }
98 | return Matrix3d::Identity();
99 | }
100 |
101 | double InitialEXRotation::testTriangulation(const vector &l,
102 | const vector &r,
103 | cv::Mat_ R, cv::Mat_ t)
104 | {
105 | cv::Mat pointcloud;
106 | cv::Matx34f P = cv::Matx34f(1, 0, 0, 0,
107 | 0, 1, 0, 0,
108 | 0, 0, 1, 0);
109 | cv::Matx34f P1 = cv::Matx34f(R(0, 0), R(0, 1), R(0, 2), t(0),
110 | R(1, 0), R(1, 1), R(1, 2), t(1),
111 | R(2, 0), R(2, 1), R(2, 2), t(2));
112 | cv::triangulatePoints(P, P1, l, r, pointcloud);
113 | int front_count = 0;
114 | for (int i = 0; i < pointcloud.cols; i++)
115 | {
116 | double normal_factor = pointcloud.col(i).at(3);
117 |
118 | cv::Mat_ p_3d_l = cv::Mat(P) * (pointcloud.col(i) / normal_factor);
119 | cv::Mat_ p_3d_r = cv::Mat(P1) * (pointcloud.col(i) / normal_factor);
120 | if (p_3d_l(2) > 0 && p_3d_r(2) > 0)
121 | front_count++;
122 | }
123 | ROS_DEBUG("MotionEstimator: %f", 1.0 * front_count / pointcloud.cols);
124 | return 1.0 * front_count / pointcloud.cols;
125 | }
126 |
127 | void InitialEXRotation::decomposeE(cv::Mat E,
128 | cv::Mat_ &R1, cv::Mat_ &R2,
129 | cv::Mat_ &t1, cv::Mat_ &t2)
130 | {
131 | cv::SVD svd(E, cv::SVD::MODIFY_A);
132 | cv::Matx33d W(0, -1, 0,
133 | 1, 0, 0,
134 | 0, 0, 1);
135 | cv::Matx33d Wt(0, 1, 0,
136 | -1, 0, 0,
137 | 0, 0, 1);
138 | R1 = svd.u * cv::Mat(W) * svd.vt;
139 | R2 = svd.u * cv::Mat(Wt) * svd.vt;
140 | t1 = svd.u.col(2);
141 | t2 = -svd.u.col(2);
142 | }
143 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/initial/initial_ex_rotation.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include "../parameters.h"
5 | using namespace std;
6 |
7 | #include
8 |
9 | #include
10 | using namespace Eigen;
11 | #include
12 |
13 | /* This class help you to calibrate extrinsic rotation between imu and camera when your totally don't konw the extrinsic parameter */
14 | class InitialEXRotation
15 | {
16 | public:
17 | InitialEXRotation();
18 | bool CalibrationExRotation(vector> corres, Quaterniond delta_q_imu, Matrix3d &calib_ric_result);
19 | private:
20 | Matrix3d solveRelativeR(const vector> &corres);
21 |
22 | double testTriangulation(const vector &l,
23 | const vector &r,
24 | cv::Mat_ R, cv::Mat_ t);
25 | void decomposeE(cv::Mat E,
26 | cv::Mat_ &R1, cv::Mat_ &R2,
27 | cv::Mat_ &t1, cv::Mat_ &t2);
28 | int frame_count;
29 |
30 | vector< Matrix3d > Rc;
31 | vector< Matrix3d > Rimu;
32 | vector< Matrix3d > Rc_g;
33 | Matrix3d ric;
34 | };
35 |
36 |
37 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/initial/initial_sfm.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | using namespace Eigen;
12 | using namespace std;
13 |
14 |
15 |
16 | struct SFMFeature
17 | {
18 | bool state;
19 | int id;
20 | vector> observation;
21 | double position[3];
22 | double depth;
23 | };
24 |
25 | struct ReprojectionError3D
26 | {
27 | ReprojectionError3D(double observed_u, double observed_v)
28 | :observed_u(observed_u), observed_v(observed_v)
29 | {}
30 |
31 | template
32 | bool operator()(const T* const camera_R, const T* const camera_T, const T* point, T* residuals) const
33 | {
34 | T p[3];
35 | ceres::QuaternionRotatePoint(camera_R, point, p);
36 | p[0] += camera_T[0]; p[1] += camera_T[1]; p[2] += camera_T[2];
37 | T xp = p[0] / p[2];
38 | T yp = p[1] / p[2];
39 | residuals[0] = xp - T(observed_u);
40 | residuals[1] = yp - T(observed_v);
41 | return true;
42 | }
43 |
44 | static ceres::CostFunction* Create(const double observed_x,
45 | const double observed_y)
46 | {
47 | return (new ceres::AutoDiffCostFunction<
48 | ReprojectionError3D, 2, 4, 3, 3>(
49 | new ReprojectionError3D(observed_x,observed_y)));
50 | }
51 |
52 | double observed_u;
53 | double observed_v;
54 | };
55 |
56 | class GlobalSFM
57 | {
58 | public:
59 | GlobalSFM();
60 | bool construct(int frame_num, Quaterniond* q, Vector3d* T, int l,
61 | const Matrix3d relative_R, const Vector3d relative_T,
62 | vector &sfm_f, map &sfm_tracked_points);
63 |
64 | private:
65 | bool solveFrameByPnP(Matrix3d &R_initial, Vector3d &P_initial, int i, vector &sfm_f);
66 |
67 | void triangulatePoint(Eigen::Matrix &Pose0, Eigen::Matrix &Pose1,
68 | Vector2d &point0, Vector2d &point1, Vector3d &point_3d);
69 | void triangulateTwoFrames(int frame0, Eigen::Matrix &Pose0,
70 | int frame1, Eigen::Matrix &Pose1,
71 | vector &sfm_f);
72 |
73 | int feature_num;
74 | };
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/initial/solve_5pts.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | using namespace std;
5 |
6 | #include
7 | //#include
8 | #include
9 | using namespace Eigen;
10 |
11 | #include
12 |
13 | class MotionEstimator
14 | {
15 | public:
16 |
17 | bool solveRelativeRT(const vector> &corres, Matrix3d &R, Vector3d &T);
18 |
19 | private:
20 | double testTriangulation(const vector &l,
21 | const vector &r,
22 | cv::Mat_ R, cv::Mat_ t);
23 | void decomposeE(cv::Mat E,
24 | cv::Mat_ &R1, cv::Mat_ &R2,
25 | cv::Mat_ &t1, cv::Mat_ &t2);
26 | };
27 |
28 |
29 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/parameters.cpp:
--------------------------------------------------------------------------------
1 | #include "parameters.h"
2 | #include "yaml-cpp/yaml.h"
3 | std::string PROJECT_NAME;
4 |
5 | double INIT_DEPTH;
6 | double MIN_PARALLAX; // 根据平均视差决定merge最老帧还是次新帧
7 | double ACC_N, ACC_W; // 加速度白噪声和随机游走
8 | double GYR_N, GYR_W; // 角速度白噪声和随机游走
9 | // camera to imu 外参
10 | std::vector RIC; // 旋转
11 | std::vector TIC; // 平移
12 |
13 | Eigen::Vector3d G{0.0, 0.0, 9.8};
14 | // std::string vio_trajectory_path;
15 | double BIAS_ACC_THRESHOLD;
16 | double BIAS_GYR_THRESHOLD;
17 | double SOLVER_TIME; // 非线性优化的时间限制
18 | int NUM_ITERATIONS;
19 | int ESTIMATE_EXTRINSIC; // 在线外参标定 (camera to imu)
20 | int ESTIMATE_TD; // 相机与imu时间戳同步
21 | int ROLLING_SHUTTER;
22 | std::string EX_CALIB_RESULT_PATH;
23 | std::string IMU_TOPIC;
24 | double ROW, COL;
25 | double TD, TR;
26 |
27 | // double Fx,Fy,Cx,Cy;
28 | int USE_LIDAR;
29 | int ALIGN_CAMERA_LIDAR_COORDINATE;
30 | /**
31 | * @brief modified
32 | * L_RX_I represents L^RX_I
33 | */
34 | double L_TX_I;
35 | double L_TY_I;
36 | double L_TZ_I;
37 | double L_RX_I;
38 | double L_RY_I;
39 | double L_RZ_I;
40 |
41 | int imu_Hz;
42 |
43 | double C_TX_L;
44 | double C_TY_L;
45 | double C_TZ_L;
46 | double C_RX_L;
47 | double C_RY_L;
48 | double C_RZ_L;
49 |
50 | double imuGravity;
51 | double imuAccNoise;
52 | double imuGyrNoise;
53 | double imuAccBiasN;
54 | double imuGyrBiasN;
55 | int imuHz;
56 |
57 | //从配置文件中读取参数
58 | void readParameters(ros::NodeHandle &n)
59 | {
60 | std::string config_file;
61 | n.getParam("vins_config_file", config_file);
62 | cv::FileStorage fsSettings(config_file, cv::FileStorage::READ);
63 | if(!fsSettings.isOpened())
64 | {
65 | std::cerr << "ERROR: Wrong path to settings" << std::endl;
66 | }
67 |
68 | fsSettings["project_name"] >> PROJECT_NAME;
69 | std::string pkg_path = ros::package::getPath(PROJECT_NAME);
70 |
71 | fsSettings["imu_topic"] >> IMU_TOPIC;
72 |
73 | fsSettings["use_lidar"] >> USE_LIDAR;
74 | fsSettings["align_camera_lidar_estimation"] >> ALIGN_CAMERA_LIDAR_COORDINATE;
75 |
76 | // fsSettings["vio_trajectory_path"]>>vio_trajectory_path;
77 |
78 | SOLVER_TIME = fsSettings["max_solver_time"];
79 | NUM_ITERATIONS = fsSettings["max_num_iterations"];
80 | MIN_PARALLAX = fsSettings["keyframe_parallax"];
81 | MIN_PARALLAX = MIN_PARALLAX / FOCAL_LENGTH;
82 |
83 | /**
84 | * @brief modified
85 | *
86 | */
87 | L_TX_I = fsSettings["imu_to_lidar_tx"];
88 | L_TY_I = fsSettings["imu_to_lidar_ty"];
89 | L_TZ_I = fsSettings["imu_to_lidar_tz"];
90 | L_RX_I = fsSettings["imu_to_lidar_rx"];
91 | L_RY_I = fsSettings["imu_to_lidar_ry"];
92 | L_RZ_I = fsSettings["imu_to_lidar_rz"];
93 |
94 | imu_Hz = fsSettings["imu_hz"];
95 |
96 | C_TX_L = fsSettings["lidar_to_cam_tx"];
97 | C_TY_L = fsSettings["lidar_to_cam_ty"];
98 | C_TZ_L = fsSettings["lidar_to_cam_tz"];
99 | C_RX_L = fsSettings["lidar_to_cam_rx"];
100 | C_RY_L = fsSettings["lidar_to_cam_ry"];
101 | C_RZ_L = fsSettings["lidar_to_cam_rz"];
102 |
103 | imuGravity = fsSettings["g_norm"];
104 | imuAccNoise = fsSettings["acc_n"];
105 | imuGyrNoise = fsSettings["gyr_n"];
106 | imuAccBiasN = fsSettings["acc_w"];
107 | imuGyrBiasN = fsSettings["gyr_w"];
108 | imuHz = fsSettings["imu_hz"];
109 |
110 |
111 | ACC_N = fsSettings["acc_n"];
112 | ACC_W = fsSettings["acc_w"];
113 | GYR_N = fsSettings["gyr_n"];
114 | GYR_W = fsSettings["gyr_w"];
115 | G.z() = fsSettings["g_norm"];
116 | ROW = fsSettings["image_height"];
117 | COL = fsSettings["image_width"];
118 |
119 | ROS_INFO("Image dimention: ROW: %f COL: %f ", ROW, COL);
120 |
121 | ESTIMATE_EXTRINSIC = fsSettings["estimate_extrinsic"];
122 | if (ESTIMATE_EXTRINSIC == 2)
123 | {
124 | ROS_INFO("have no prior about extrinsic param, calibrate extrinsic param");
125 | RIC.push_back(Eigen::Matrix3d::Identity());
126 | TIC.push_back(Eigen::Vector3d::Zero());
127 | EX_CALIB_RESULT_PATH = pkg_path + "/config/extrinsic_parameter.csv";
128 |
129 | }
130 | else
131 | {
132 | //优化外参
133 | if ( ESTIMATE_EXTRINSIC == 1)
134 | {
135 | ROS_INFO(" Optimize extrinsic param around initial guess!");
136 | EX_CALIB_RESULT_PATH = pkg_path + "/config/extrinsic_parameter.csv";
137 | }
138 | if (ESTIMATE_EXTRINSIC == 0)
139 | ROS_INFO(" Fix extrinsic param.");
140 |
141 | cv::Mat cv_R, cv_T;
142 | fsSettings["extrinsicRotation"] >> cv_R;
143 | fsSettings["extrinsicTranslation"] >> cv_T;
144 | Eigen::Matrix3d eigen_R;
145 | Eigen::Vector3d eigen_T;
146 | cv::cv2eigen(cv_R, eigen_R);
147 | cv::cv2eigen(cv_T, eigen_T);
148 | Eigen::Quaterniond Q(eigen_R);
149 | eigen_R = Q.normalized();
150 | RIC.push_back(eigen_R);
151 | TIC.push_back(eigen_T);
152 | ROS_INFO_STREAM("Extrinsic_R : " << std::endl << RIC[0]);
153 | ROS_INFO_STREAM("Extrinsic_T : " << std::endl << TIC[0].transpose());
154 |
155 | }
156 |
157 | INIT_DEPTH = 5.0;
158 | BIAS_ACC_THRESHOLD = 0.1;
159 | BIAS_GYR_THRESHOLD = 0.1;
160 |
161 | TD = fsSettings["td"];
162 | ESTIMATE_TD = fsSettings["estimate_td"];
163 | if (ESTIMATE_TD)
164 | ROS_INFO_STREAM("Unsynchronized sensors, online estimate time offset, initial td: " << TD);
165 | else
166 | ROS_INFO_STREAM("Synchronized sensors, fix time offset: " << TD);
167 |
168 | ROLLING_SHUTTER = fsSettings["rolling_shutter"];
169 | if (ROLLING_SHUTTER)
170 | {
171 | TR = fsSettings["rolling_shutter_tr"];
172 | ROS_INFO_STREAM("rolling shutter camera, read out time per line: " << TR);
173 | }
174 | else
175 | {
176 | TR = 0;
177 | }
178 |
179 | fsSettings.release();
180 | usleep(100);
181 | }
182 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/parameters.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include "utility/utility.h"
7 | #include
8 | #include
9 |
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 |
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 |
32 | #include
33 | #include
34 | #include
35 | #include
36 |
37 | #include
38 | #include
39 | #include
40 | #include
41 | #include
42 | #include
43 | #include
44 | #include
45 | #include
46 | #include
47 | #include
48 | #include
49 | #include
50 | #include
51 | #include
52 | #include
53 | #include
54 |
55 | const int WINDOW_SIZE = 10;
56 |
57 |
58 | const double FOCAL_LENGTH = 460.0;
59 | const int NUM_OF_CAM = 1;
60 | const int NUM_OF_F = 1000;
61 | // #define UNIT_SPHERE_ERROR 1
62 |
63 | // extern std::string vio_trajectory_path;
64 | extern double INIT_DEPTH;
65 | extern double MIN_PARALLAX;
66 | extern int ESTIMATE_EXTRINSIC;
67 |
68 | extern double ACC_N, ACC_W;
69 | extern double GYR_N, GYR_W;
70 |
71 | extern std::vector RIC;
72 | extern std::vector TIC;
73 | extern Eigen::Vector3d G;
74 |
75 | extern double BIAS_ACC_THRESHOLD;
76 | extern double BIAS_GYR_THRESHOLD;
77 | extern double SOLVER_TIME;
78 | extern int NUM_ITERATIONS;
79 | extern std::string EX_CALIB_RESULT_PATH;
80 | extern std::string PROJECT_NAME;
81 | extern std::string IMU_TOPIC;
82 | extern double TD;
83 | extern double TR;
84 | extern int ESTIMATE_TD;
85 | extern int ROLLING_SHUTTER;
86 | extern double ROW, COL;
87 | // extern double Fx,Fy,Cx,Cy;
88 | extern int USE_LIDAR;
89 | extern int ALIGN_CAMERA_LIDAR_COORDINATE;
90 | /**
91 | * @brief modified
92 | * L_RX_I represents L^RX_I
93 | */
94 | extern double L_TX_I;
95 | extern double L_TY_I;
96 | extern double L_TZ_I;
97 | extern double L_RX_I;
98 | extern double L_RY_I;
99 | extern double L_RZ_I;
100 |
101 | extern int imu_Hz;
102 |
103 | extern double C_TX_L;
104 | extern double C_TY_L;
105 | extern double C_TZ_L;
106 | extern double C_RX_L;
107 | extern double C_RY_L;
108 | extern double C_RZ_L;
109 |
110 | extern double imuGravity;
111 | extern double imuAccNoise;
112 | extern double imuGyrNoise;
113 | extern double imuAccBiasN;
114 | extern double imuGyrBiasN;
115 | extern int imuHz;
116 |
117 | void readParameters(ros::NodeHandle &n);
118 |
119 | enum SIZE_PARAMETERIZATION
120 | {
121 | SIZE_POSE = 7, // P,Q组合成pose, 6自由度7DOF
122 | SIZE_SPEEDBIAS = 9, // V, Ba, Bg, 共9自由度
123 | SIZE_FEATURE = 1 // 深度
124 | };
125 |
126 | enum StateOrder
127 | {
128 | O_P = 0,
129 | O_R = 3,
130 | O_V = 6,
131 | O_BA = 9,
132 | O_BG = 12
133 | };
134 |
135 | enum NoiseOrder
136 | {
137 | O_AN = 0,
138 | O_GN = 3,
139 | O_AW = 6,
140 | O_GW = 9
141 | };
142 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/utility/CameraPoseVisualization.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | class CameraPoseVisualization {
11 | public:
12 | std::string m_marker_ns;
13 |
14 | CameraPoseVisualization(float r, float g, float b, float a);
15 |
16 | void setImageBoundaryColor(float r, float g, float b, float a=1.0);
17 | void setOpticalCenterConnectorColor(float r, float g, float b, float a=1.0);
18 | void setScale(double s);
19 | void setLineWidth(double width);
20 |
21 | void add_pose(const Eigen::Vector3d& p, const Eigen::Quaterniond& q);
22 | void reset();
23 |
24 | void publish_by(ros::Publisher& pub, const std_msgs::Header& header);
25 | void add_edge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1);
26 | void add_loopedge(const Eigen::Vector3d& p0, const Eigen::Vector3d& p1);
27 | private:
28 | std::vector m_markers;
29 | std_msgs::ColorRGBA m_image_boundary_color;
30 | std_msgs::ColorRGBA m_optical_center_connector_color;
31 | double m_scale;
32 | double m_line_width;
33 |
34 | static const Eigen::Vector3d imlt;
35 | static const Eigen::Vector3d imlb;
36 | static const Eigen::Vector3d imrt;
37 | static const Eigen::Vector3d imrb;
38 | static const Eigen::Vector3d oc ;
39 | static const Eigen::Vector3d lt0 ;
40 | static const Eigen::Vector3d lt1 ;
41 | static const Eigen::Vector3d lt2 ;
42 | };
43 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/utility/tic_toc.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 |
7 | class TicToc
8 | {
9 | public:
10 | TicToc()
11 | {
12 | tic();
13 | }
14 |
15 | void tic()
16 | {
17 | start = std::chrono::system_clock::now();
18 | }
19 |
20 | double toc()
21 | {
22 | end = std::chrono::system_clock::now();
23 | std::chrono::duration elapsed_seconds = end - start;
24 | return elapsed_seconds.count() * 1000;
25 | }
26 |
27 | private:
28 | std::chrono::time_point start, end;
29 | };
30 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/utility/utility.cpp:
--------------------------------------------------------------------------------
1 | #include "utility.h"
2 |
3 | Eigen::Matrix3d Utility::g2R(const Eigen::Vector3d &g)
4 | {
5 | Eigen::Matrix3d R0;
6 | Eigen::Vector3d ng1 = g.normalized();
7 | Eigen::Vector3d ng2{0, 0, 1.0};
8 | R0 = Eigen::Quaterniond::FromTwoVectors(ng1, ng2).toRotationMatrix();
9 | double yaw = Utility::R2ypr(R0).x();
10 | R0 = Utility::ypr2R(Eigen::Vector3d{-yaw, 0, 0}) * R0;
11 | // R0 = Utility::ypr2R(Eigen::Vector3d{-90, 0, 0}) * R0;
12 | return R0;
13 | }
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_estimator/utility/visualization.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include "CameraPoseVisualization.h"
17 | #include
18 | #include "../estimator.h"
19 | #include "../parameters.h"
20 | #include
21 |
22 | extern ros::Publisher pub_odometry;
23 | extern ros::Publisher pub_path, pub_pose;
24 | extern ros::Publisher pub_cloud, pub_map;
25 | extern ros::Publisher pub_key_poses;
26 | extern ros::Publisher pub_ref_pose, pub_cur_pose;
27 | extern ros::Publisher pub_key;
28 | extern nav_msgs::Path path;
29 | extern ros::Publisher pub_pose_graph;
30 | extern int IMAGE_ROW, IMAGE_COL;
31 |
32 |
33 | void registerPub(ros::NodeHandle &n);
34 |
35 | tf::Transform transformConversion(const tf::StampedTransform& t);
36 |
37 | void pubLatestOdometry(const Eigen::Vector3d &P, const Eigen::Quaterniond &Q, const Eigen::Vector3d &V, const std_msgs::Header &header, const int &failureId);
38 |
39 | void printStatistics(const Estimator &estimator, double t);
40 |
41 | void pubOdometry(const Estimator &estimator, const std_msgs::Header &header);
42 |
43 | void pubInitialGuess(const Estimator &estimator, const std_msgs::Header &header);
44 |
45 | void pubKeyPoses(const Estimator &estimator, const std_msgs::Header &header);
46 |
47 | void pubCameraPose(const Estimator &estimator, const std_msgs::Header &header);
48 |
49 | void pubPointCloud(const Estimator &estimator, const std_msgs::Header &header);
50 |
51 | void pubTF(const Estimator &estimator, const std_msgs::Header &header);
52 |
53 | void pubKeyframe(const Estimator &estimator);
54 |
55 | void pubRelocalization(const Estimator &estimator);
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_feature/camera_models/Camera.cc:
--------------------------------------------------------------------------------
1 | #include "Camera.h"
2 | #include "ScaramuzzaCamera.h"
3 |
4 | #include
5 |
6 | namespace camodocal
7 | {
8 |
9 | Camera::Parameters::Parameters(ModelType modelType)
10 | : m_modelType(modelType)
11 | , m_imageWidth(0)
12 | , m_imageHeight(0)
13 | {
14 | switch (modelType)
15 | {
16 | case KANNALA_BRANDT:
17 | m_nIntrinsics = 8;
18 | break;
19 | case PINHOLE:
20 | m_nIntrinsics = 8;
21 | break;
22 | case SCARAMUZZA:
23 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
24 | break;
25 | case MEI:
26 | default:
27 | m_nIntrinsics = 9;
28 | }
29 | }
30 |
31 | Camera::Parameters::Parameters(ModelType modelType,
32 | const std::string& cameraName,
33 | int w, int h)
34 | : m_modelType(modelType)
35 | , m_cameraName(cameraName)
36 | , m_imageWidth(w)
37 | , m_imageHeight(h)
38 | {
39 | switch (modelType)
40 | {
41 | case KANNALA_BRANDT:
42 | m_nIntrinsics = 8;
43 | break;
44 | case PINHOLE:
45 | m_nIntrinsics = 8;
46 | break;
47 | case SCARAMUZZA:
48 | m_nIntrinsics = SCARAMUZZA_CAMERA_NUM_PARAMS;
49 | break;
50 | case MEI:
51 | default:
52 | m_nIntrinsics = 9;
53 | }
54 | }
55 |
56 | Camera::ModelType&
57 | Camera::Parameters::modelType(void)
58 | {
59 | return m_modelType;
60 | }
61 |
62 | std::string&
63 | Camera::Parameters::cameraName(void)
64 | {
65 | return m_cameraName;
66 | }
67 |
68 | int&
69 | Camera::Parameters::imageWidth(void)
70 | {
71 | return m_imageWidth;
72 | }
73 |
74 | int&
75 | Camera::Parameters::imageHeight(void)
76 | {
77 | return m_imageHeight;
78 | }
79 |
80 | Camera::ModelType
81 | Camera::Parameters::modelType(void) const
82 | {
83 | return m_modelType;
84 | }
85 |
86 | const std::string&
87 | Camera::Parameters::cameraName(void) const
88 | {
89 | return m_cameraName;
90 | }
91 |
92 | int
93 | Camera::Parameters::imageWidth(void) const
94 | {
95 | return m_imageWidth;
96 | }
97 |
98 | int
99 | Camera::Parameters::imageHeight(void) const
100 | {
101 | return m_imageHeight;
102 | }
103 |
104 | int
105 | Camera::Parameters::nIntrinsics(void) const
106 | {
107 | return m_nIntrinsics;
108 | }
109 |
110 | cv::Mat&
111 | Camera::mask(void)
112 | {
113 | return m_mask;
114 | }
115 |
116 | const cv::Mat&
117 | Camera::mask(void) const
118 | {
119 | return m_mask;
120 | }
121 |
122 | void
123 | Camera::estimateExtrinsics(const std::vector& objectPoints,
124 | const std::vector& imagePoints,
125 | cv::Mat& rvec, cv::Mat& tvec) const
126 | {
127 | std::vector Ms(imagePoints.size());
128 | for (size_t i = 0; i < Ms.size(); ++i)
129 | {
130 | Eigen::Vector3d P;
131 | liftProjective(Eigen::Vector2d(imagePoints.at(i).x, imagePoints.at(i).y), P);
132 |
133 | P /= P(2);
134 |
135 | Ms.at(i).x = P(0);
136 | Ms.at(i).y = P(1);
137 | }
138 |
139 | // assume unit focal length, zero principal point, and zero distortion
140 | cv::solvePnP(objectPoints, Ms, cv::Mat::eye(3, 3, CV_64F), cv::noArray(), rvec, tvec);
141 | }
142 |
143 | double
144 | Camera::reprojectionDist(const Eigen::Vector3d& P1, const Eigen::Vector3d& P2) const
145 | {
146 | Eigen::Vector2d p1, p2;
147 |
148 | spaceToPlane(P1, p1);
149 | spaceToPlane(P2, p2);
150 |
151 | return (p1 - p2).norm();
152 | }
153 |
154 | double
155 | Camera::reprojectionError(const std::vector< std::vector >& objectPoints,
156 | const std::vector< std::vector >& imagePoints,
157 | const std::vector& rvecs,
158 | const std::vector& tvecs,
159 | cv::OutputArray _perViewErrors) const
160 | {
161 | int imageCount = objectPoints.size();
162 | size_t pointsSoFar = 0;
163 | double totalErr = 0.0;
164 |
165 | bool computePerViewErrors = _perViewErrors.needed();
166 | cv::Mat perViewErrors;
167 | if (computePerViewErrors)
168 | {
169 | _perViewErrors.create(imageCount, 1, CV_64F);
170 | perViewErrors = _perViewErrors.getMat();
171 | }
172 |
173 | for (int i = 0; i < imageCount; ++i)
174 | {
175 | size_t pointCount = imagePoints.at(i).size();
176 |
177 | pointsSoFar += pointCount;
178 |
179 | std::vector estImagePoints;
180 | projectPoints(objectPoints.at(i), rvecs.at(i), tvecs.at(i),
181 | estImagePoints);
182 |
183 | double err = 0.0;
184 | for (size_t j = 0; j < imagePoints.at(i).size(); ++j)
185 | {
186 | err += cv::norm(imagePoints.at(i).at(j) - estImagePoints.at(j));
187 | }
188 |
189 | if (computePerViewErrors)
190 | {
191 | perViewErrors.at(i) = err / pointCount;
192 | }
193 |
194 | totalErr += err;
195 | }
196 |
197 | return totalErr / pointsSoFar;
198 | }
199 |
200 | double
201 | Camera::reprojectionError(const Eigen::Vector3d& P,
202 | const Eigen::Quaterniond& camera_q,
203 | const Eigen::Vector3d& camera_t,
204 | const Eigen::Vector2d& observed_p) const
205 | {
206 | Eigen::Vector3d P_cam = camera_q.toRotationMatrix() * P + camera_t;
207 |
208 | Eigen::Vector2d p;
209 | spaceToPlane(P_cam, p);
210 |
211 | return (p - observed_p).norm();
212 | }
213 |
214 | void
215 | Camera::projectPoints(const std::vector& objectPoints,
216 | const cv::Mat& rvec,
217 | const cv::Mat& tvec,
218 | std::vector& imagePoints) const
219 | {
220 | // project 3D object points to the image plane
221 | imagePoints.reserve(objectPoints.size());
222 |
223 | //double
224 | cv::Mat R0;
225 | cv::Rodrigues(rvec, R0);
226 |
227 | Eigen::MatrixXd R(3,3);
228 | R << R0.at(0,0), R0.at(0,1), R0.at(0,2),
229 | R0.at(1,0), R0.at(1,1), R0.at(1,2),
230 | R0.at(2,0), R0.at(2,1), R0.at(2,2);
231 |
232 | Eigen::Vector3d t;
233 | t << tvec.at(0), tvec.at(1), tvec.at(2);
234 |
235 | for (size_t i = 0; i < objectPoints.size(); ++i)
236 | {
237 | const cv::Point3f& objectPoint = objectPoints.at(i);
238 |
239 | // Rotate and translate
240 | Eigen::Vector3d P;
241 | P << objectPoint.x, objectPoint.y, objectPoint.z;
242 |
243 | P = R * P + t;
244 |
245 | Eigen::Vector2d p;
246 | spaceToPlane(P, p);
247 |
248 | imagePoints.push_back(cv::Point2f(p(0), p(1)));
249 | }
250 | }
251 |
252 | }
253 |
--------------------------------------------------------------------------------
/src/src/visual_odometry/visual_feature/camera_models/Camera.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERA_H
2 | #define CAMERA_H
3 |
4 | #include