├── .gitignore ├── CMakeLists.txt ├── README.md ├── app └── larvioMain.cpp ├── cmake ├── FindEigen3.cmake └── FindSuiteSparse.cmake ├── config ├── euroc.yaml └── mynteye.yaml ├── docker └── Dockerfile ├── include ├── Initializer │ ├── DynamicInitializer.h │ ├── FlexibleInitializer.h │ ├── ImuPreintegration.h │ ├── StaticInitializer.h │ ├── feature_manager.h │ ├── initial_alignment.h │ ├── initial_sfm.h │ └── solve_5pts.h ├── ORB │ └── ORBDescriptor.h ├── larvio │ ├── feature.hpp │ ├── feature_msg.h │ ├── image_processor.h │ ├── imu_state.h │ ├── larvio.h │ └── math_utils.hpp ├── sensors │ ├── ImageData.hpp │ └── ImuData.hpp ├── utils │ └── DataReader.hpp └── visualization │ └── visualize.hpp ├── licenses ├── LICENCE_VINS_MONO.txt ├── LICENSE_MSCKF_VIO.txt └── LICENSE_ORB_SLAM2.txt ├── results ├── TX2_result.png ├── comparison.jpg ├── euroc_x8.gif ├── tumvi_c_1.jpg └── uzhfpv_o_f_3.jpg ├── ros_wrapper └── src │ └── larvio │ ├── CMakeLists.txt │ ├── include │ ├── System.h │ └── System_nodelet.h │ ├── launch │ ├── euroc │ │ └── larvio_euroc.launch │ └── rviz │ │ ├── larvio.rviz │ │ └── larvio_rviz.launch │ ├── nodelets.xml │ ├── package.xml │ └── src │ ├── System.cpp │ └── System_nodelet.cpp ├── run.sh └── src ├── DynamicInitializer.cpp ├── FlexibleInitializer.cpp ├── ORBDescriptor.cpp ├── StaticInitializer.cpp ├── feature_manager.cpp ├── image_processor.cpp ├── initial_alignment.cpp ├── initial_sfm.cpp ├── larvio.cpp └── solve_5pts.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | **/build/ 2 | **/devel/ 3 | bin/ 4 | .vscode/ 5 | *.*~ 6 | .DS_Store 7 | ros_wrapper/src/CMakeLists.txt 8 | ros_wrapper/.catkin_workspace -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8) 2 | 3 | set(CMAKE_BUILD_TYPE "Release") 4 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") 5 | 6 | project(larvio) 7 | 8 | set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) 9 | #set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) 10 | 11 | find_package(OpenCV REQUIRED) 12 | find_package(Boost REQUIRED) 13 | find_package(Ceres REQUIRED) 14 | find_package(Eigen3 REQUIRED) 15 | find_package(SuiteSparse REQUIRED) 16 | find_package(Pangolin REQUIRED) 17 | 18 | include_directories( 19 | ${PROJECT_SOURCE_DIR}/include 20 | ${OpenCV_INCLUDE_DIRS} 21 | ${SUITESPARSE_INCLUDE_DIRS} 22 | ${CERES_INCLUDE_DIRS} 23 | ${Boost_INCLUDE_DIR} 24 | ${EIGEN3_INCLUDE_DIR} 25 | ${Pangolin_INCLUDE_DIRS} 26 | ) 27 | 28 | # Image processor 29 | add_library(image_processor 30 | ${PROJECT_SOURCE_DIR}/src/image_processor.cpp 31 | ${PROJECT_SOURCE_DIR}/src/ORBDescriptor.cpp 32 | ) 33 | target_link_libraries(image_processor 34 | ${OpenCV_LIBRARIES} 35 | ) 36 | 37 | # Initializer 38 | # -- Static Initializer 39 | add_library(staticInitializer 40 | ${PROJECT_SOURCE_DIR}/src/StaticInitializer.cpp 41 | ) 42 | target_link_libraries(staticInitializer 43 | ${SUITESPARSE_LIBRARIES} 44 | ) 45 | # -- Dynamic Initializer 46 | add_library(dynamicInitializer 47 | ${PROJECT_SOURCE_DIR}/src/StaticInitializer.cpp 48 | ${PROJECT_SOURCE_DIR}/src/DynamicInitializer.cpp 49 | ${PROJECT_SOURCE_DIR}/src/feature_manager.cpp 50 | ${PROJECT_SOURCE_DIR}/src/initial_alignment.cpp 51 | ${PROJECT_SOURCE_DIR}/src/initial_sfm.cpp 52 | ${PROJECT_SOURCE_DIR}/src/solve_5pts.cpp 53 | ) 54 | target_link_libraries(dynamicInitializer 55 | ${SUITESPARSE_LIBRARIES} 56 | ${CERES_LIBRARIES} 57 | ${OpenCV_LIBRARIES} 58 | ) 59 | # -- Flexible Initializer 60 | add_library(flexibleInitializer 61 | ${PROJECT_SOURCE_DIR}/src/FlexibleInitializer.cpp 62 | ) 63 | target_link_libraries(flexibleInitializer 64 | staticInitializer 65 | dynamicInitializer 66 | ) 67 | 68 | # Estimator 69 | add_library(estimator 70 | ${PROJECT_SOURCE_DIR}/src/larvio.cpp 71 | ) 72 | target_link_libraries(estimator 73 | flexibleInitializer 74 | ${SUITESPARSE_LIBRARIES} 75 | ${OpenCV_LIBRARIES} 76 | ) 77 | 78 | # App 79 | add_executable(larvio app/larvioMain.cpp) 80 | target_link_libraries(larvio 81 | image_processor 82 | estimator 83 | ${OpenCV_LIBS} 84 | ${Pangolin_LIBRARIES} 85 | ) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LARVIO 2 | LARVIO is short for Lightweight, Accurate and Robust monocular Visual Inertial Odometry, which is based on hybrid EKF VIO. It is featured by augmenting features with long track length into the filter state of MSCKF by 1D IDP to provide accurate positioning results. 3 | 4 | The core algorithm of LARVIO depends on `Eigen`, `Boost`, `Suitesparse`, `Ceres` and `OpenCV`, making the algorithm of good portability. 5 | 6 | A single-thread toyish example as well as a ROS nodelet package for LARVIO is provided in this repo. 7 | 8 | Notice that Hamilton quaternion is utilized in LARVIO, which is a little bit different from the JPL quaternion used in traditional MSCKF community. The filter formulation is thus derivated from scratch. Please check our [CJA2020](https://www.sciencedirect.com/science/article/pii/S1000936120301722) and [Senors2019](https://www.mdpi.com/1424-8220/19/8/1941/htm) papers for details. 9 | 10 | 11 | ## Results 12 | #### 1) Demo on EuRoC 13 | ![LARVIO on EuRoC](https://github.com/PetWorm/LARVIO/blob/master/results/euroc_x8.gif) 14 | 15 | #### 2) Trajectories RMSEs 16 | This is the results of an earlier version of LARVIO. Due to the changes, the current repo might not reproduce the exact results as below. 17 | 18 | RMSE on EuRoC dataset are listed. 19 | 20 | Evaluations below are done using [PetWorm/sim3_evaluate_tool](https://github.com/PetWorm/sim3_evaluate_tool). 21 | 22 | In the newest update, online imu-cam extrinsic and timestamp error calibration in VINS-MONO are turned on to explore its extreme ability. While in this setup, the V102 sequence would somehow fail. The result of VINS-MONO in V102 below is of setup without online calibration. 23 | 24 | Results of our algorithm are repeatible in every run of every computer I tested so far. 25 | 26 | ![comparison](https://github.com/PetWorm/LARVIO/blob/master/results/comparison.jpg) 27 | 28 | #### 3) TUM-VI Dataset 29 | 30 | 31 | #### 4) UZH-FPV Dataset 32 | 33 | 34 | 35 | ## Cross Platform Performance 36 | This package has been successfully deployed on ARM (Jetson Nano and Jetson TX2, realtime without GPU refinement). The performances are comparable to the results on PCs. 37 | 38 | Below is the exper1ment result in our office. A TX2-based multi-thread CPU-only implementation without ROS was developed here. We used [MYNT-EYE-D](https://github.com/slightech/MYNT-EYE-D-SDK) camera SDK to collect monocular images and IMU data, and estimate the camera poses in realtime. We walk around out the office to the corridor or the neighbor room, and return to the start point (in white circle) for a couple of times. 39 | 40 | ![TX2 implementation](https://github.com/PetWorm/LARVIO/blob/master/results/TX2_result.png) 41 | 42 | 43 | ## Acknowledgement 44 | This repo is for academic use only. 45 | 46 | LARVIO is originally developed based on [MSCKF_VIO](https://github.com/KumarRobotics/msckf_vio). Tremendous changes has been made, including the interface, visualization, visual front-end and filter details. 47 | 48 | LARVIO also benefits from [VINS-MONO](https://github.com/HKUST-Aerial-Robotics/VINS-Mono) and [ORB_SLAM2](https://github.com/raulmur/ORB_SLAM2). 49 | 50 | We would like to thank the authors of repos above for their great contribution. We kept the copyright announcement of these repos. 51 | 52 | 53 | ## Introduction 54 | LARVIO is an EKF-based monocular VIO. Loop closure was not applied in our algorithm. 55 | #### 1) Hybrid EKF with 1D IDP 56 | A hybrid EKF architecture is utilized, which is based on the work of [Mingyang Li](http://roboticsproceedings.org/rss08/p31.pdf). It augments features with long track length into the filter state. In LARVIO, One-Dimensional Inverse Depth Parametrization is utilized to parametrize the augmented feature state, which is different from the original 3d solution by Li. This novelty improves the computational efficiency compare to the 3d solution. The positioning precision is also improved thanks to the utilization of complete constraints of features with long track length. 57 | #### 2) Online calibration 58 | It is capable of online imu-cam extrinsic calibration, online timestamp error calibration and online imu intrinsic calibration. 59 | #### 3) Automatic initialization 60 | LARVIO can be automatically initialized in either static or dynamic scenerios. 61 | #### 4) Robust visual front-end 62 | We applied a ORB-descriptor assisted optical flow tracking visual front-end to improve the feature tracking performances. 63 | #### 5) Closed-form ZUPT 64 | A closed-form ZUPT measurement update is proposed to cope with the static scene. 65 | 66 | 67 | ## Feasibility 68 | LARVIO is a feasible software. 69 | 70 | Users can change the settings in config file to set the VIO as MSCKF-only, 3d hybrid or 1d hybrid solutions. And all the online calibration functions can be turned on or off in each solution by the config file. 71 | 72 | 73 | ## Dependencies 74 | LARVIO depends on `Eigen`, `Boost`, `Suitesparse`, `Ceres` and `OpenCV` for the core algorithm. 75 | #### Toyish example 76 | The toyish example depends on `OpenCV` (4.1.2 on OSX and 3.4.6 on Ubuntu 16.04/18.04), `Pangolin` is needed for visualization. Notice that extra `gcc 7` installation is needed for Ubuntu 16.04. 77 | #### ROS nodelet 78 | The ROS nodelet package has been tested on `Kinetic` and `Melodic` for Ubuntu 16.04/18.04. Following ROS packages are needed: `tf`, `cv_bridge`, `message_filters` and `image_transport`. 79 | 80 | 81 | ## Usage 82 | This part show how to play LARVIO with [EuRoC dataset](https://projects.asl.ethz.ch/datasets/doku.php?id=kmavvisualinertialdatasets). 83 | #### Toyish example 84 | The toyish LARVIO example is a `CMake` based software. After install the dependencies, try commands below to compile the software: 85 | ``` 86 | cd LARVIO 87 | mkdir build 88 | cd build 89 | cmake -D CMAKE_BUILD_TYPE=Release .. 90 | make 91 | ``` 92 | An example is given in `LARVIO/run.sh` to show how to run the example. 93 | #### ROS nodelet 94 | A ROS nodelet package is provided in `LARVIO/ros_wrapper`. It has been tested on `Kinetic` and `Melodic`. Use commands below to compile the nodelet: 95 | ``` 96 | cd YOUR_PATH/LARVIO/ros_wrapper 97 | catkin_make 98 | ``` 99 | After building it, launch the LARVIO by: 100 | ``` 101 | . YOUR_PATH/LARVIO/ros_wrapper/devel/setup.bash 102 | roslaunch larvio larvio_euroc.launch 103 | ``` 104 | Open a new terminal, and launch the `rviz` for visualization by (optional): 105 | ``` 106 | . YOUR_PATH/LARVIO/ros_wrapper/devel/setup.bash 107 | roslaunch larvio larvio_rviz.launch 108 | ``` 109 | Open a new terminal to play the dataset: 110 | ``` 111 | rosbag play MH_01_easy.bag 112 | ``` 113 | 114 | 115 | ## Docker 116 | A `Dockerfile` is provided in `LARVIO/docker`. After building it, you need to load dateset and modify the `run.sh` in container to run toyish example, or use 'roslaunch' to run the ROS package. Also, GUI is needed in the host to display the Pangolin and rviz view. 117 | 118 | There is another VNC docker image which is convinent for monitoring the rviz view. Click [petworm/vnc-larvio-playground](https://hub.docker.com/r/petworm/vnc-larvio-playground) to directly pull this image, or build it from source with [PetWorm/docker-larvio-playground](https://github.com/PetWorm/docker-larvio-playground). 119 | 120 | 121 | ## Related Works 122 | Please cite our CJA paper if you use LARVIO in your research: 123 | ```txt 124 | @article{qiu2020lightweight, 125 | title={Lightweight hybrid Visual-Inertial Odometry with Closed-Form Zero Velocity Update}, 126 | author={Qiu, Xiaochen and Zhang, Hai and Fu, Wenxing}, 127 | journal={Chinese Journal of Aeronautics}, 128 | year={2020}, 129 | publisher={Elsevier} 130 | } 131 | ``` 132 | 133 | Another earlier work illustrating some parts of LARVIO is as below: 134 | ```txt 135 | @article{qiu2019monocular, 136 | title={Monocular Visual-Inertial Odometry with an Unbiased Linear System Model and Robust Feature Tracking Front-End}, 137 | author={Qiu, Xiaochen and Zhang, Hai and Fu, Wenxing and Zhao, Chenxu and Jin, Yanqiong}, 138 | journal={Sensors}, 139 | volume={19}, 140 | number={8}, 141 | pages={1941}, 142 | year={2019}, 143 | publisher={Multidisciplinary Digital Publishing Institute} 144 | } 145 | ``` -------------------------------------------------------------------------------- /app/larvioMain.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * @Descripttion: Main function to boost LARVIO. 3 | * @Author: Xiaochen Qiu 4 | */ 5 | 6 | 7 | #include 8 | 9 | #include "utils/DataReader.hpp" 10 | 11 | #include "opencv2/core.hpp" 12 | #include "opencv2/highgui.hpp" 13 | 14 | #include "sensors/ImageData.hpp" 15 | 16 | #include "larvio/image_processor.h" 17 | #include "larvio/larvio.h" 18 | 19 | #include "Eigen/Dense" 20 | 21 | #include "visualization/visualize.hpp" 22 | 23 | using namespace std; 24 | 25 | 26 | int main(int argc, char **argv) { 27 | if(argc != 5) { 28 | cerr << endl << "Usage: ./larvio path_to_imu/data.csv path_to_cam0/data.csv path_to_cam0/data config_file_path" << endl; 29 | return 1; 30 | } 31 | 32 | // Read sensors 33 | vector allImuData; 34 | vector allImgInfo; 35 | larvio::loadImuFile(argv[1], allImuData); 36 | larvio::loadImageList(argv[2], allImgInfo); 37 | 38 | // Path of config file 39 | std::string config_file(argv[4]); 40 | 41 | // Initialize image processer. 42 | larvio::ImageProcessorPtr ImgProcesser; 43 | ImgProcesser.reset(new larvio::ImageProcessor(config_file)); 44 | if (!ImgProcesser->initialize()) { 45 | cerr << "Image Processer initialization failed!" << endl; 46 | return 1; 47 | } 48 | 49 | // Initialize estimator. 50 | larvio::LarVioPtr Estimator; 51 | Estimator.reset(new larvio::LarVio(config_file)); 52 | if (!Estimator->initialize()) { 53 | cerr << "Estimator initialization failed!" << endl; 54 | return 1; 55 | } 56 | 57 | // Initialize visualizer 58 | pangolin::CreateWindowAndBind("LARVIO Viewer",1024,768); 59 | glEnable(GL_DEPTH_TEST); // 3D Mouse handler requires depth testing to be enabled 60 | glEnable(GL_BLEND); // Issue specific OpenGl we might need 61 | glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); 62 | pangolin::CreatePanel("menu").SetBounds(0.0,1.0,0.0,pangolin::Attach::Pix(175)); 63 | pangolin::Var menuFollowCamera("menu.Follow Camera",true,true); 64 | pangolin::Var menuShowActivePoints("menu.Show Active Points",true,true); 65 | pangolin::Var menuShowStablePoints("menu.Show Stable Points",true,true); 66 | pangolin::Var menuShowSlideWindow("menu.Show Slide Window",true,true); 67 | pangolin::OpenGlRenderState s_cam( // Define Camera Render Object (for view / scene browsing) 68 | pangolin::ProjectionMatrix(1024,768,500,500,512,389,0.1,1000), 69 | pangolin::ModelViewLookAt(0,3.5,9, 0,0,0, 0,-1,0) 70 | ); 71 | pangolin::View& d_cam = pangolin::CreateDisplay() // Add named OpenGL viewport to window and provide 3D Handler 72 | .SetBounds(0.0, 1.0, pangolin::Attach::Pix(175), 1.0, -1024.0f/768.0f) 73 | .SetHandler(new pangolin::Handler3D(s_cam)); 74 | pangolin::View& d_image = pangolin::CreateDisplay() // Add named OpenGL viewport to image and provide 3D Handler 75 | .SetBounds(pangolin::Attach::Pix(0),1.0f,pangolin::Attach::Pix(175),1/3.5f,752.0/480) 76 | .SetLock(pangolin::LockLeft, pangolin::LockBottom); 77 | bool bFollow = true; 78 | pangolin::OpenGlMatrix Tbw_pgl; 79 | std::vector vPoses; 80 | std::vector vPoses_SW; 81 | std::map mActiveMapPoints; 82 | std::map mStableMapPoints; 83 | 84 | // Main loop 85 | int k = 0; 86 | std::vector imu_msg_buffer; 87 | for(size_t j=0; jtimeStampToSec = allImgInfo[j].timeStampToSec; 95 | imgPtr->image = cv::imread(fullPath,0); 96 | 97 | // get imus 98 | while (allImuData[k].timeStampToSec-imgPtr->timeStampToSec < 0.05 && kprocessImage(imgPtr, imu_msg_buffer, features); // process image 108 | int64 end_time_fe = cv::getTickCount(); 109 | double processing_time_fe = (end_time_fe - start_time_fe)/cv::getTickFrequency(); 110 | double processing_time_be; 111 | bool bPubOdo = false; 112 | if (bProcess) { 113 | int64 start_time_be = cv::getTickCount(); 114 | bPubOdo = Estimator->processFeatures(features, imu_msg_buffer); // state estimation 115 | int64 end_time_be = cv::getTickCount(); 116 | processing_time_be = (end_time_be - start_time_be)/cv::getTickFrequency(); 117 | } 118 | if (bPubOdo) { // Visualization 119 | // draw 3D 120 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 121 | larvio::visualize::GetCurrentOpenGLPoseMatrix(Tbw_pgl, Estimator->getTbw()); 122 | if(menuFollowCamera && bFollow) { 123 | s_cam.Follow(Tbw_pgl); 124 | } 125 | else if(menuFollowCamera && !bFollow) { 126 | s_cam.SetModelViewMatrix(pangolin::ModelViewLookAt(0,3.5,9, 0,0,0, 0,-1,0)); 127 | s_cam.Follow(Tbw_pgl); 128 | bFollow = true; 129 | } 130 | else if(!menuFollowCamera && bFollow) { 131 | bFollow = false; 132 | } 133 | d_cam.Activate(s_cam); 134 | glClearColor(0.0f,0.0f,0.0f,1.0f); 135 | larvio::visualize::DrawCurrentPose(Tbw_pgl); 136 | larvio::visualize::DrawKeyFrames(vPoses); 137 | if (menuShowSlideWindow) { 138 | vector swPoses; 139 | Estimator->getSwPoses(swPoses); 140 | larvio::visualize::GetSwOpenGLPoseMatrices(vPoses_SW, swPoses); 141 | larvio::visualize::DrawSlideWindow(vPoses_SW); 142 | } 143 | if (menuShowActivePoints) { 144 | Estimator->getActiveeMapPointPositions(mActiveMapPoints); 145 | larvio::visualize::DrawActiveMapPoints(mActiveMapPoints); 146 | mActiveMapPoints.clear(); 147 | } 148 | if (menuShowStablePoints) { 149 | Estimator->getStableMapPointPositions(mStableMapPoints); 150 | larvio::visualize::DrawStableMapPoints(mStableMapPoints); 151 | } 152 | 153 | // draw tracking image 154 | cv::Mat img4Show; 155 | ImgProcesser->getVisualImg().copyTo(img4Show); 156 | stringstream ss; 157 | ss << "fps: " << 1.0/(processing_time_fe+processing_time_be) << " HZ"; 158 | string msg = ss.str(); 159 | int baseLine=0; 160 | cv::Size textSize = cv::getTextSize(msg, 0, 0.01, 10, &baseLine); 161 | cv::Point textOrigin(2*baseLine,textSize.height+4*baseLine); 162 | putText(img4Show, msg, textOrigin, 1, 2, cv::Scalar(0,165,255), 2); 163 | pangolin::GlTexture imageTexture(img4Show.cols,img4Show.rows);; 164 | imageTexture.Upload(img4Show.data,GL_BGR,GL_UNSIGNED_BYTE); 165 | d_image.Activate(); 166 | glColor3f(1.0,1.0,1.0); 167 | imageTexture.RenderToViewportFlipY(); 168 | 169 | pangolin::FinishFrame(); 170 | 171 | vPoses.push_back(Tbw_pgl); 172 | } 173 | } 174 | 175 | cout << "Totally " << mStableMapPoints.size() << " SLAM points" << endl; 176 | 177 | // keep the final scene in pangolin 178 | while (!pangolin::ShouldQuit()) { 179 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 180 | if(menuFollowCamera && bFollow) { 181 | s_cam.Follow(Tbw_pgl); 182 | } 183 | else if(menuFollowCamera && !bFollow) { 184 | s_cam.SetModelViewMatrix(pangolin::ModelViewLookAt(0,3.5,9, 0,0,0, 0,-1,0)); 185 | s_cam.Follow(Tbw_pgl); 186 | bFollow = true; 187 | } 188 | else if(!menuFollowCamera && bFollow) { 189 | bFollow = false; 190 | } 191 | d_cam.Activate(s_cam); 192 | glClearColor(0.0f,0.0f,0.0f,1.0f); 193 | larvio::visualize::DrawCurrentPose(Tbw_pgl); 194 | larvio::visualize::DrawKeyFrames(vPoses); 195 | if (menuShowSlideWindow) { 196 | larvio::visualize::DrawSlideWindow(vPoses_SW); 197 | } 198 | if (menuShowStablePoints) { 199 | larvio::visualize::DrawStableMapPoints(mStableMapPoints); 200 | } 201 | pangolin::FinishFrame(); 202 | } 203 | 204 | return 0; 205 | } -------------------------------------------------------------------------------- /cmake/FindEigen3.cmake: -------------------------------------------------------------------------------- 1 | # - Try to find Eigen3 lib 2 | # 3 | # This module supports requiring a minimum version, e.g. you can do 4 | # find_package(Eigen3 3.1.2) 5 | # to require version 3.1.2 or newer of Eigen3. 6 | # 7 | # Once done this will define 8 | # 9 | # EIGEN3_FOUND - system has eigen lib with correct version 10 | # EIGEN3_INCLUDE_DIR - the eigen include directory 11 | # EIGEN3_VERSION - eigen version 12 | 13 | # Copyright (c) 2006, 2007 Montel Laurent, 14 | # Copyright (c) 2008, 2009 Gael Guennebaud, 15 | # Copyright (c) 2009 Benoit Jacob 16 | # Redistribution and use is allowed according to the terms of the 2-clause BSD license. 17 | 18 | if(NOT Eigen3_FIND_VERSION) 19 | if(NOT Eigen3_FIND_VERSION_MAJOR) 20 | set(Eigen3_FIND_VERSION_MAJOR 2) 21 | endif(NOT Eigen3_FIND_VERSION_MAJOR) 22 | if(NOT Eigen3_FIND_VERSION_MINOR) 23 | set(Eigen3_FIND_VERSION_MINOR 91) 24 | endif(NOT Eigen3_FIND_VERSION_MINOR) 25 | if(NOT Eigen3_FIND_VERSION_PATCH) 26 | set(Eigen3_FIND_VERSION_PATCH 0) 27 | endif(NOT Eigen3_FIND_VERSION_PATCH) 28 | 29 | set(Eigen3_FIND_VERSION "${Eigen3_FIND_VERSION_MAJOR}.${Eigen3_FIND_VERSION_MINOR}.${Eigen3_FIND_VERSION_PATCH}") 30 | endif(NOT Eigen3_FIND_VERSION) 31 | 32 | macro(_eigen3_check_version) 33 | file(READ "${EIGEN3_INCLUDE_DIR}/Eigen/src/Core/util/Macros.h" _eigen3_version_header) 34 | 35 | string(REGEX MATCH "define[ \t]+EIGEN_WORLD_VERSION[ \t]+([0-9]+)" _eigen3_world_version_match "${_eigen3_version_header}") 36 | set(EIGEN3_WORLD_VERSION "${CMAKE_MATCH_1}") 37 | string(REGEX MATCH "define[ \t]+EIGEN_MAJOR_VERSION[ \t]+([0-9]+)" _eigen3_major_version_match "${_eigen3_version_header}") 38 | set(EIGEN3_MAJOR_VERSION "${CMAKE_MATCH_1}") 39 | string(REGEX MATCH "define[ \t]+EIGEN_MINOR_VERSION[ \t]+([0-9]+)" _eigen3_minor_version_match "${_eigen3_version_header}") 40 | set(EIGEN3_MINOR_VERSION "${CMAKE_MATCH_1}") 41 | 42 | set(EIGEN3_VERSION ${EIGEN3_WORLD_VERSION}.${EIGEN3_MAJOR_VERSION}.${EIGEN3_MINOR_VERSION}) 43 | if(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) 44 | set(EIGEN3_VERSION_OK FALSE) 45 | else(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) 46 | set(EIGEN3_VERSION_OK TRUE) 47 | endif(${EIGEN3_VERSION} VERSION_LESS ${Eigen3_FIND_VERSION}) 48 | 49 | if(NOT EIGEN3_VERSION_OK) 50 | 51 | message(STATUS "Eigen3 version ${EIGEN3_VERSION} found in ${EIGEN3_INCLUDE_DIR}, " 52 | "but at least version ${Eigen3_FIND_VERSION} is required") 53 | endif(NOT EIGEN3_VERSION_OK) 54 | endmacro(_eigen3_check_version) 55 | 56 | if (EIGEN3_INCLUDE_DIR) 57 | 58 | # in cache already 59 | _eigen3_check_version() 60 | set(EIGEN3_FOUND ${EIGEN3_VERSION_OK}) 61 | 62 | else (EIGEN3_INCLUDE_DIR) 63 | 64 | # specific additional paths for some OS 65 | if (WIN32) 66 | set(EIGEN_ADDITIONAL_SEARCH_PATHS ${EIGEN_ADDITIONAL_SEARCH_PATHS} "C:/Program Files/Eigen/include" "C:/Program Files (x86)/Eigen/include") 67 | endif(WIN32) 68 | 69 | find_path(EIGEN3_INCLUDE_DIR NAMES signature_of_eigen3_matrix_library 70 | PATHS 71 | ${CMAKE_INSTALL_PREFIX}/include 72 | ${EIGEN_ADDITIONAL_SEARCH_PATHS} 73 | ${KDE4_INCLUDE_DIR} 74 | PATH_SUFFIXES eigen3 eigen 75 | ) 76 | 77 | if(EIGEN3_INCLUDE_DIR) 78 | _eigen3_check_version() 79 | endif(EIGEN3_INCLUDE_DIR) 80 | 81 | include(FindPackageHandleStandardArgs) 82 | find_package_handle_standard_args(Eigen3 DEFAULT_MSG EIGEN3_INCLUDE_DIR EIGEN3_VERSION_OK) 83 | 84 | mark_as_advanced(EIGEN3_INCLUDE_DIR) 85 | 86 | endif(EIGEN3_INCLUDE_DIR) 87 | 88 | -------------------------------------------------------------------------------- /config/euroc.yaml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | # output directory 4 | output_dir: "/root/larvio_results/" 5 | 6 | # switches 7 | if_FEJ: 1 # 0(false) or 1(true) 8 | estimate_extrin: 1 # 0(false) or 1(true) 9 | estimate_td: 1 # 0(false) or 1(true) 10 | calib_imu_instrinsic: 0 # 0(false) or 1(true) 11 | 12 | # camera instrinsic 13 | camera_model: "pinhole" # only support "pinhole" 14 | distortion_model: "radtan" # only support "radtan" and "equidistant" 15 | resolution_width: 752 16 | resolution_height: 480 17 | intrinsics: 18 | fx: 458.654 19 | fy: 457.296 20 | cx: 367.215 21 | cy: 248.375 22 | distortion_coeffs: 23 | k1: -0.28340811 24 | k2: 0.07395907 25 | p1: 0.00019359 26 | p2: 1.76187114e-05 27 | 28 | # imu-camera extrinsic, including spacial and temporal parameters 29 | T_cam_imu: !!opencv-matrix 30 | rows: 4 31 | cols: 4 32 | dt: d 33 | data: 34 | [0.014865542981794, 0.999557249008346, -0.025774436697440, 0.065222909535531, 35 | -0.999880929698575, 0.014967213324719, 0.003756188357967, -0.020706385492719, 36 | 0.004140296794224, 0.025715529947966, 0.999660727177902, -0.008054602460030, 37 | 0, 0, 0, 1.000000000000000] 38 | td: 0.0 39 | 40 | # TODO: if calibrate camera instrinsic online 41 | 42 | # visual front-end parameters 43 | pyramid_levels: 2 44 | patch_size: 21 45 | fast_threshold: 30 46 | max_iteration: 30 47 | track_precision: 0.01 48 | ransac_threshold: 1 49 | max_features_num: 200 50 | min_distance: 20 51 | flag_equalize: 1 # 0(false) or 1(true) 52 | pub_frequency: 10 53 | 54 | # window size 55 | sw_size: 20 56 | 57 | # online reset thresholds 58 | position_std_threshold: 8.0 59 | rotation_threshold: 0.2618 60 | translation_threshold: 0.4 61 | tracking_rate_threshold: 0.5 62 | 63 | # feature triangulation parameters 64 | least_observation_number: 3 65 | max_track_len: 6 66 | feature_translation_threshold: -1.0 67 | 68 | # imu and camera measurement noise parameters 69 | noise_gyro: 0.004 70 | noise_acc: 0.08 71 | noise_gyro_bias: 2e-6 72 | noise_acc_bias: 4e-5 73 | noise_feature: 0.008 74 | 75 | # filter intial covariance 76 | initial_covariance_orientation: 4e-4 77 | initial_covariance_velocity: 0.25 78 | initial_covariance_position: 1.0 79 | initial_covariance_gyro_bias: 4e-4 80 | initial_covariance_acc_bias: 0.01 81 | initial_covariance_extrin_rot: 3.0462e-8 82 | initial_covariance_extrin_trans: 9e-8 83 | 84 | # fej settings 85 | reset_fej_threshold: 10.11 86 | 87 | # zupt settings 88 | if_ZUPT_valid: 1 # 0(false) or 1(true) 89 | zupt_max_feature_dis: 2e-3 90 | zupt_noise_v: 1e-2 # std 91 | zupt_noise_p: 1e-2 92 | zupt_noise_q: 3.4e-2 93 | 94 | # static initialization setting 95 | static_duration: 1.0 96 | 97 | # measurement rate 98 | imu_rate: 200 99 | img_rate: 20 100 | 101 | # augmented feature state settings 102 | max_features_in_one_grid: 1 # pure msckf if set to 0 103 | aug_grid_rows: 5 104 | aug_grid_cols: 6 105 | feature_idp_dim: 1 # 1 or 3 106 | 107 | # if apply Schmidt filter 108 | use_schmidt: 0 # 0(false) or 1(true) 109 | -------------------------------------------------------------------------------- /config/mynteye.yaml: -------------------------------------------------------------------------------- 1 | %YAML:1.0 2 | 3 | # output directory 4 | output_dir: "/Users/aresqiu/workSpace/larvio_results/" 5 | 6 | # switches 7 | if_FEJ: 1 # 0(false) or 1(true) 8 | estimate_extrin: 1 # 0(false) or 1(true) 9 | estimate_td: 1 # 0(false) or 1(true) 10 | calib_imu_instrinsic: 0 # 0(false) or 1(true) 11 | 12 | # camera instrinsic 13 | camera_model: "pinhole" # only support "pinhole" 14 | distortion_model: "equidistant" # only support "radtan" and "equidistant" 15 | resolution_width: 1280 16 | resolution_height: 720 17 | intrinsics: 18 | fx: 712.6506687297613 19 | fy: 713.3381366201213 20 | cx: 639.2566285415327 21 | cy: 377.90328510285144 22 | distortion_coeffs: 23 | k1: -0.015661749636940888 24 | k2: 0.0028974710951617955 25 | p1: 0.0034539528765559204 26 | p2: -0.006466223707507623 27 | 28 | # imu-camera extrinsic, including spacial and temporal parameters 29 | T_cam_imu: !!opencv-matrix 30 | rows: 4 31 | cols: 4 32 | dt: d 33 | data: 34 | [0.9999851351447288, -0.0010261392271367887, 0.005355046952563305, 0.04918070909625808, 35 | -0.0010359006126353073, -0.999997806554969, 0.001820383248210167, 0.003166959323659525, 36 | 0.0053531672399027685, -0.001825903484895496, -0.9999840047105576, -0.01496732752058007, 37 | 0, 0, 0, 1.000000000000000] 38 | td: 0.0 39 | 40 | # TODO: if calibrate camera instrinsic online 41 | 42 | # visual front-end parameters 43 | pyramid_levels: 2 44 | patch_size: 21 45 | fast_threshold: 30 46 | max_iteration: 30 47 | track_precision: 0.01 48 | ransac_threshold: 1 49 | max_features_num: 300 50 | min_distance: 15 51 | flag_equalize: 1 # 0(false) or 1(true) 52 | pub_frequency: 20 53 | 54 | # window size 55 | sw_size: 20 56 | 57 | # online reset thresholds 58 | position_std_threshold: 8.0 59 | rotation_threshold: 0.2618 60 | translation_threshold: 0.4 61 | tracking_rate_threshold: 0.5 62 | 63 | # feature triangulation parameters 64 | least_observation_number: 3 65 | max_track_len: 6 66 | feature_translation_threshold: -1.0 67 | 68 | # imu and camera measurement noise parameters 69 | noise_gyro: 0.01 70 | noise_acc: 0.1 71 | noise_gyro_bias: 2e-5 72 | noise_acc_bias: 4e-4 73 | noise_feature: 0.008 74 | 75 | # filter intial covariance 76 | initial_covariance_orientation: 4e-4 77 | initial_covariance_velocity: 0.25 78 | initial_covariance_position: 0.0 79 | initial_covariance_gyro_bias: 4e-4 80 | initial_covariance_acc_bias: 0.01 81 | initial_covariance_extrin_rot: 3.0462e-8 82 | initial_covariance_extrin_trans: 9e-8 83 | 84 | # fej settings 85 | reset_fej_threshold: 10.11 86 | 87 | # zupt settings 88 | if_ZUPT_valid: 1 # 0(false) or 1(true) 89 | zupt_max_feature_dis: 2e-3 90 | zupt_noise_v: 1e-2 # std 91 | zupt_noise_p: 1e-2 92 | zupt_noise_q: 3.4e-2 93 | 94 | # static initialization setting 95 | static_duration: 1.0 96 | 97 | # measurement rate 98 | imu_rate: 200 99 | img_rate: 30 100 | 101 | # augmented feature state settings 102 | max_features_in_one_grid: 1 # pure msckf if set to 0 103 | aug_grid_rows: 5 104 | aug_grid_cols: 7 105 | feature_idp_dim: 1 # 1 or 3 106 | 107 | # if apply Schmidt filter 108 | use_schmidt: 0 # 0(false) or 1(true) -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ros:melodic-perception 2 | MAINTAINER QIU Xiaochen 3 | 4 | ENV CERES_VERSION="1.14.0" 5 | ENV JOBS_NUM="4" 6 | 7 | RUN apt-get -y update && apt-get install -y \ 8 | libboost-all-dev \ 9 | cmake git \ 10 | libgoogle-glog-dev \ 11 | libeigen3-dev \ 12 | libsuitesparse-dev \ 13 | libgl1-mesa-dev \ 14 | libglew-dev \ 15 | libxkbcommon-x11-dev \ 16 | ros-${ROS_DISTRO}-tf-conversions \ 17 | ros-${ROS_DISTRO}-rviz && \ 18 | rm -rf /var/lib/apt/lists/* 19 | 20 | RUN cd /root && \ 21 | git clone https://github.com/ceres-solver/ceres-solver.git && \ 22 | cd ceres-solver && git checkout tags/${CERES_VERSION} && \ 23 | mkdir build && cd build && \ 24 | cmake .. && make -j${JOBS_NUM} install && \ 25 | cd /root && \ 26 | git clone https://github.com/stevenlovegrove/Pangolin.git && \ 27 | cd Pangolin && mkdir build && cd build && \ 28 | cmake .. && make -j${JOBS_NUM} install && \ 29 | rm -rf /root/* 30 | 31 | RUN cd /root && \ 32 | git clone https://github.com/PetWorm/LARVIO.git && \ 33 | cd LARVIO && mkdir build && cd build && \ 34 | cmake -D CMAKE_BUILD_TYPE=Release .. && make -j${JOBS_NUM} && \ 35 | cd ../ros_wrapper && . /opt/ros/${ROS_DISTRO}/setup.sh && \ 36 | catkin_make -j${JOBS_NUM} -------------------------------------------------------------------------------- /include/Initializer/DynamicInitializer.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A vio initializer that utlize dynamic imu and img data to initialize. 4 | // The core method comes from VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono) 5 | // 6 | 7 | #ifndef DYNAMIC_INITIALIZER_H 8 | #define DYNAMIC_INITIALIZER_H 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | 20 | #include 21 | 22 | #include "Initializer/feature_manager.h" 23 | #include "Initializer/initial_alignment.h" 24 | #include "Initializer/initial_sfm.h" 25 | #include "Initializer/solve_5pts.h" 26 | 27 | #include "larvio/imu_state.h" 28 | 29 | #include 30 | 31 | #include "sensors/ImuData.hpp" 32 | 33 | using namespace std; 34 | 35 | namespace larvio { 36 | 37 | class DynamicInitializer 38 | { 39 | public: 40 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 41 | 42 | // Constructor. 43 | DynamicInitializer() = delete; 44 | DynamicInitializer(const double& td_, const Eigen::Matrix3d& Ma_, 45 | const Eigen::Matrix3d& Tg_, const Eigen::Matrix3d& As_, 46 | const double& acc_n_, const double& acc_w_, const double& gyr_n_, 47 | const double& gyr_w_, const Eigen::Matrix3d& R_c2b, 48 | const Eigen::Vector3d& t_bc_b, const double& imu_img_timeTh_) : 49 | td(td_), bInit(false), state_time(0.0), curr_time(-1), 50 | first_imu(false), frame_count(0), acc_n(acc_n_), acc_w(acc_w_), 51 | gyr_n(gyr_n_), gyr_w(gyr_w_), initial_timestamp(0.0), 52 | RIC(R_c2b), TIC(t_bc_b), imu_img_timeTh(imu_img_timeTh_), 53 | lower_time_bound(0.0) { 54 | 55 | Ma = Ma_; 56 | Tg = Tg_; 57 | As = As_; 58 | gyro_bias = Eigen::Vector3d::Zero(); 59 | acc_bias = Eigen::Vector3d::Zero(); 60 | position = Eigen::Vector3d::Zero(); 61 | velocity = Eigen::Vector3d::Zero(); 62 | orientation = Eigen::Vector4d(0.0, 0.0, 0.0, 1.0); 63 | 64 | for (int i = 0; i < WINDOW_SIZE + 1; i++) 65 | { 66 | Rs[i].setIdentity(); 67 | Ps[i].setZero(); 68 | Vs[i].setZero(); 69 | Bas[i].setZero(); 70 | Bgs[i].setZero(); 71 | dt_buf[i].clear(); 72 | linear_acceleration_buf[i].clear(); 73 | angular_velocity_buf[i].clear(); 74 | } 75 | 76 | g = Eigen::Vector3d::Zero(); 77 | 78 | // Initialize feature manager 79 | f_manager.clearState(); 80 | // f_manager.init(Rs); 81 | f_manager.setRic(R_c2b); 82 | 83 | Times.resize(WINDOW_SIZE + 1); 84 | } 85 | 86 | // Destructor. 87 | ~DynamicInitializer(){}; 88 | 89 | // Interface for trying to initialize. 90 | bool tryDynInit(const std::vector& imu_msg_buffer, 91 | MonoCameraMeasurementPtr img_msg); 92 | 93 | // Assign the initial state if initialized successfully. 94 | void assignInitialState(std::vector& imu_msg_buffer, 95 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, IMUState& imu_state); 96 | 97 | // If initialized. 98 | bool ifInitialized() { 99 | return bInit; 100 | } 101 | 102 | private: 103 | 104 | // Time lower bound for used imu data. 105 | double lower_time_bound; 106 | 107 | // Threshold for deciding which imu is at the same time with a img frame 108 | double imu_img_timeTh; 109 | 110 | // Flag indicating if initialized. 111 | bool bInit; 112 | 113 | // Error bewteen timestamp of imu and img. 114 | double td; 115 | 116 | // Error between img and its nearest imu msg. 117 | double ddt; 118 | 119 | // Relative rotation between camera and imu. 120 | Eigen::Matrix3d RIC; 121 | Eigen::Vector3d TIC; 122 | 123 | // Imu instrinsic. 124 | Eigen::Matrix3d Ma; 125 | Eigen::Matrix3d Tg; 126 | Eigen::Matrix3d As; 127 | 128 | // Initialize results. 129 | double state_time; 130 | Eigen::Vector3d gyro_bias; 131 | Eigen::Vector3d acc_bias; 132 | Eigen::Vector4d orientation; 133 | Eigen::Vector3d position; 134 | Eigen::Vector3d velocity; 135 | 136 | // Save the last imu data that have been processed. 137 | Eigen::Vector3d last_acc; 138 | Eigen::Vector3d last_gyro; 139 | 140 | // Flag for declare the first imu data. 141 | bool first_imu; 142 | 143 | // Imu data for initialize every imu preintegration base. 144 | Vector3d acc_0, gyr_0; 145 | 146 | // Frame counter in sliding window. 147 | int frame_count; 148 | 149 | // Current imu time. 150 | double curr_time; 151 | 152 | // Imu noise param. 153 | double acc_n, acc_w; 154 | double gyr_n, gyr_w; 155 | 156 | // IMU preintegration between keyframes. 157 | boost::shared_ptr pre_integrations[(WINDOW_SIZE + 1)]; 158 | 159 | // Temporal buff for imu preintegration between ordinary frames. 160 | boost::shared_ptr tmp_pre_integration; 161 | 162 | // Store the information of ordinary frames 163 | map all_image_frame; 164 | 165 | // Bias of gyro and accelerometer of imu corresponding to every keyframe. 166 | Eigen::Vector3d Bas[(WINDOW_SIZE + 1)]; 167 | Eigen::Vector3d Bgs[(WINDOW_SIZE + 1)]; 168 | 169 | // Every member of this vector store the dt between every adjacent imu 170 | // between two keyframes in sliding window. 171 | vector dt_buf[(WINDOW_SIZE + 1)]; 172 | 173 | // Every member of this two vectors store all imu measurements 174 | // between two keyframes in sliding window. 175 | vector linear_acceleration_buf[(WINDOW_SIZE + 1)]; 176 | vector angular_velocity_buf[(WINDOW_SIZE + 1)]; 177 | 178 | // Gravity under reference camera frame. 179 | Eigen::Vector3d g; 180 | 181 | // Feature manager. 182 | FeatureManager f_manager; 183 | 184 | // State of body frame under reference frame. 185 | Eigen::Matrix3d Rs[(WINDOW_SIZE + 1)]; 186 | Eigen::Vector3d Ps[(WINDOW_SIZE + 1)]; 187 | Eigen::Vector3d Vs[(WINDOW_SIZE + 1)]; 188 | 189 | // Flags for marginalization. 190 | enum MarginalizationFlag 191 | { 192 | MARGIN_OLD = 0, 193 | MARGIN_SECOND_NEW = 1 194 | }; 195 | MarginalizationFlag marginalization_flag; 196 | 197 | // Timestamps of sliding window. 198 | vector Times; 199 | 200 | // Initial timestamp 201 | double initial_timestamp; 202 | 203 | // For solving relative motion. 204 | MotionEstimator m_estimator; 205 | 206 | private: 207 | 208 | // Process every imu frame before the img. 209 | void processIMU(const ImuData& imu_msg); 210 | 211 | // Process img frame. 212 | void processImage(MonoCameraMeasurementPtr img_msg); 213 | 214 | // Check if the condition is fit to conduct the vio initialization, and conduct it while suitable. 215 | bool initialStructure(); 216 | 217 | // Try to recover relative pose. 218 | bool relativePose(Matrix3d &relative_R, Vector3d &relative_T, int &l); 219 | 220 | // Align the visual sfm with imu preintegration. 221 | bool visualInitialAlign(); 222 | 223 | // Slide the window. 224 | void slideWindow(); 225 | }; 226 | 227 | } 228 | 229 | #endif //DYNAMIC_INITIALIZER_H 230 | -------------------------------------------------------------------------------- /include/Initializer/FlexibleInitializer.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A flexible initializer that can automatically initialize in case of static or dynamic scene. 4 | // 5 | 6 | #ifndef FLEXIBLE_INITIALIZER_H 7 | #define FLEXIBLE_INITIALIZER_H 8 | 9 | #include 10 | #include 11 | 12 | #include "Initializer/StaticInitializer.h" 13 | #include "Initializer/DynamicInitializer.h" 14 | 15 | #include "sensors/ImuData.hpp" 16 | 17 | using namespace std; 18 | 19 | namespace larvio { 20 | 21 | class FlexibleInitializer 22 | { 23 | public: 24 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 25 | 26 | // Constructor 27 | FlexibleInitializer() = delete; 28 | FlexibleInitializer(const double& max_feature_dis_, const int& static_Num_, 29 | const double& td_, const Eigen::Matrix3d& Ma_, 30 | const Eigen::Matrix3d& Tg_, const Eigen::Matrix3d& As_, 31 | const double& acc_n_, const double& acc_w_, const double& gyr_n_, 32 | const double& gyr_w_, const Eigen::Matrix3d& R_c2b, 33 | const Eigen::Vector3d& t_bc_b, const double& imu_img_timeTh_) { 34 | 35 | staticInitPtr.reset(new StaticInitializer( 36 | max_feature_dis_, static_Num_, td_, Ma_, Tg_, As_)); 37 | 38 | dynamicInitPtr.reset(new DynamicInitializer( 39 | td_, Ma_, Tg_, As_, acc_n_, acc_w_, gyr_n_, 40 | gyr_w_, R_c2b, t_bc_b, imu_img_timeTh_)); 41 | } 42 | 43 | // Destructor 44 | ~FlexibleInitializer(){} 45 | 46 | // Interface for trying to initialize 47 | bool tryIncInit(std::vector& imu_msg_buffer, 48 | MonoCameraMeasurementPtr img_msg, 49 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, 50 | IMUState& imu_state); 51 | 52 | 53 | private: 54 | 55 | // Inclinometer-initializer 56 | boost::shared_ptr staticInitPtr; 57 | // Dynamic initializer 58 | boost::shared_ptr dynamicInitPtr; 59 | 60 | 61 | }; 62 | 63 | } 64 | 65 | 66 | #endif //FLEXIBLE_INITIALIZER_H 67 | -------------------------------------------------------------------------------- /include/Initializer/ImuPreintegration.h: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-14. 3 | // Methods for IMU preintegration. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono) 5 | // 6 | 7 | #ifndef IMU_PREINTEGRATION_H 8 | #define IMU_PREINTEGRATION_H 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | 18 | using namespace Eigen; 19 | 20 | namespace larvio { 21 | 22 | static Vector3d Gravity = Vector3d(0, 0, 9.81); 23 | 24 | enum StateOrder 25 | { 26 | O_P = 0, 27 | O_R = 3, 28 | O_V = 6, 29 | O_BA = 9, 30 | O_BG = 12 31 | }; 32 | 33 | class IntegrationBase 34 | { 35 | public: 36 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 37 | IntegrationBase() = delete; 38 | IntegrationBase(const Eigen::Vector3d &_acc_0, const Eigen::Vector3d &_gyr_0, 39 | const Eigen::Vector3d &_linearized_ba, const Eigen::Vector3d &_linearized_bg, 40 | const double& acc_n, const double& acc_w, const double& gyr_n, 41 | const double& gyr_w) 42 | : acc_0{_acc_0}, gyr_0{_gyr_0}, linearized_acc{_acc_0}, linearized_gyr{_gyr_0}, 43 | linearized_ba{_linearized_ba}, linearized_bg{_linearized_bg}, 44 | jacobian{Eigen::Matrix::Identity()}, covariance{Eigen::Matrix::Zero()}, 45 | sum_dt{0.0}, delta_p{Eigen::Vector3d::Zero()}, delta_q{Eigen::Quaterniond::Identity()}, delta_v{Eigen::Vector3d::Zero()}, 46 | ACC_N{acc_n}, ACC_W{acc_w}, GYR_N{gyr_n}, GYR_W{gyr_w} 47 | { 48 | noise = Eigen::Matrix::Zero(); 49 | noise.block<3, 3>(0, 0) = (ACC_N * ACC_N) * Eigen::Matrix3d::Identity(); 50 | noise.block<3, 3>(3, 3) = (GYR_N * GYR_N) * Eigen::Matrix3d::Identity(); 51 | noise.block<3, 3>(6, 6) = (ACC_N * ACC_N) * Eigen::Matrix3d::Identity(); 52 | noise.block<3, 3>(9, 9) = (GYR_N * GYR_N) * Eigen::Matrix3d::Identity(); 53 | noise.block<3, 3>(12, 12) = (ACC_W * ACC_W) * Eigen::Matrix3d::Identity(); 54 | noise.block<3, 3>(15, 15) = (GYR_W * GYR_W) * Eigen::Matrix3d::Identity(); 55 | } 56 | 57 | void push_back(double dt, const Eigen::Vector3d &acc, const Eigen::Vector3d &gyr) 58 | { 59 | dt_buf.push_back(dt); 60 | acc_buf.push_back(acc); 61 | gyr_buf.push_back(gyr); 62 | propagate(dt, acc, gyr); 63 | } 64 | 65 | void repropagate(const Eigen::Vector3d &_linearized_ba, const Eigen::Vector3d &_linearized_bg) 66 | { 67 | sum_dt = 0.0; 68 | acc_0 = linearized_acc; 69 | gyr_0 = linearized_gyr; 70 | delta_p.setZero(); 71 | delta_q.setIdentity(); 72 | delta_v.setZero(); 73 | linearized_ba = _linearized_ba; 74 | linearized_bg = _linearized_bg; 75 | jacobian.setIdentity(); 76 | covariance.setZero(); 77 | for (int i = 0; i < static_cast(dt_buf.size()); i++) 78 | propagate(dt_buf[i], acc_buf[i], gyr_buf[i]); 79 | } 80 | 81 | void midPointIntegration(double _dt, 82 | const Eigen::Vector3d &_acc_0, const Eigen::Vector3d &_gyr_0, 83 | const Eigen::Vector3d &_acc_1, const Eigen::Vector3d &_gyr_1, 84 | const Eigen::Vector3d &delta_p, const Eigen::Quaterniond &delta_q, const Eigen::Vector3d &delta_v, 85 | const Eigen::Vector3d &linearized_ba, const Eigen::Vector3d &linearized_bg, 86 | Eigen::Vector3d &result_delta_p, Eigen::Quaterniond &result_delta_q, Eigen::Vector3d &result_delta_v, 87 | Eigen::Vector3d &result_linearized_ba, Eigen::Vector3d &result_linearized_bg, bool update_jacobian) 88 | { 89 | //printf("midpoint integration"); 90 | Vector3d un_acc_0 = delta_q * (_acc_0 - linearized_ba); 91 | Vector3d un_gyr = 0.5 * (_gyr_0 + _gyr_1) - linearized_bg; 92 | result_delta_q = delta_q * Quaterniond(1, un_gyr(0) * _dt / 2, un_gyr(1) * _dt / 2, un_gyr(2) * _dt / 2); 93 | Vector3d un_acc_1 = result_delta_q * (_acc_1 - linearized_ba); 94 | Vector3d un_acc = 0.5 * (un_acc_0 + un_acc_1); 95 | result_delta_p = delta_p + delta_v * _dt + 0.5 * un_acc * _dt * _dt; 96 | result_delta_v = delta_v + un_acc * _dt; 97 | result_linearized_ba = linearized_ba; 98 | result_linearized_bg = linearized_bg; 99 | 100 | if(update_jacobian) 101 | { 102 | Vector3d w_x = 0.5 * (_gyr_0 + _gyr_1) - linearized_bg; 103 | Vector3d a_0_x = _acc_0 - linearized_ba; 104 | Vector3d a_1_x = _acc_1 - linearized_ba; 105 | Matrix3d R_w_x, R_a_0_x, R_a_1_x; 106 | 107 | R_w_x<<0, -w_x(2), w_x(1), 108 | w_x(2), 0, -w_x(0), 109 | -w_x(1), w_x(0), 0; 110 | R_a_0_x<<0, -a_0_x(2), a_0_x(1), 111 | a_0_x(2), 0, -a_0_x(0), 112 | -a_0_x(1), a_0_x(0), 0; 113 | R_a_1_x<<0, -a_1_x(2), a_1_x(1), 114 | a_1_x(2), 0, -a_1_x(0), 115 | -a_1_x(1), a_1_x(0), 0; 116 | 117 | MatrixXd F = MatrixXd::Zero(15, 15); 118 | F.block<3, 3>(0, 0) = Matrix3d::Identity(); 119 | F.block<3, 3>(0, 3) = -0.25 * delta_q.toRotationMatrix() * R_a_0_x * _dt * _dt + 120 | -0.25 * result_delta_q.toRotationMatrix() * R_a_1_x * (Matrix3d::Identity() - R_w_x * _dt) * _dt * _dt; 121 | F.block<3, 3>(0, 6) = MatrixXd::Identity(3,3) * _dt; 122 | F.block<3, 3>(0, 9) = -0.25 * (delta_q.toRotationMatrix() + result_delta_q.toRotationMatrix()) * _dt * _dt; 123 | F.block<3, 3>(0, 12) = -0.25 * result_delta_q.toRotationMatrix() * R_a_1_x * _dt * _dt * -_dt; 124 | F.block<3, 3>(3, 3) = Matrix3d::Identity() - R_w_x * _dt; 125 | F.block<3, 3>(3, 12) = -1.0 * MatrixXd::Identity(3,3) * _dt; 126 | F.block<3, 3>(6, 3) = -0.5 * delta_q.toRotationMatrix() * R_a_0_x * _dt + 127 | -0.5 * result_delta_q.toRotationMatrix() * R_a_1_x * (Matrix3d::Identity() - R_w_x * _dt) * _dt; 128 | F.block<3, 3>(6, 6) = Matrix3d::Identity(); 129 | F.block<3, 3>(6, 9) = -0.5 * (delta_q.toRotationMatrix() + result_delta_q.toRotationMatrix()) * _dt; 130 | F.block<3, 3>(6, 12) = -0.5 * result_delta_q.toRotationMatrix() * R_a_1_x * _dt * -_dt; 131 | F.block<3, 3>(9, 9) = Matrix3d::Identity(); 132 | F.block<3, 3>(12, 12) = Matrix3d::Identity(); 133 | //cout<<"A"<(0, 0) = 0.25 * delta_q.toRotationMatrix() * _dt * _dt; 137 | V.block<3, 3>(0, 3) = 0.25 * -result_delta_q.toRotationMatrix() * R_a_1_x * _dt * _dt * 0.5 * _dt; 138 | V.block<3, 3>(0, 6) = 0.25 * result_delta_q.toRotationMatrix() * _dt * _dt; 139 | V.block<3, 3>(0, 9) = V.block<3, 3>(0, 3); 140 | V.block<3, 3>(3, 3) = 0.5 * MatrixXd::Identity(3,3) * _dt; 141 | V.block<3, 3>(3, 9) = 0.5 * MatrixXd::Identity(3,3) * _dt; 142 | V.block<3, 3>(6, 0) = 0.5 * delta_q.toRotationMatrix() * _dt; 143 | V.block<3, 3>(6, 3) = 0.5 * -result_delta_q.toRotationMatrix() * R_a_1_x * _dt * 0.5 * _dt; 144 | V.block<3, 3>(6, 6) = 0.5 * result_delta_q.toRotationMatrix() * _dt; 145 | V.block<3, 3>(6, 9) = V.block<3, 3>(6, 3); 146 | V.block<3, 3>(9, 12) = MatrixXd::Identity(3,3) * _dt; 147 | V.block<3, 3>(12, 15) = MatrixXd::Identity(3,3) * _dt; 148 | 149 | //step_jacobian = F; 150 | //step_V = V; 151 | jacobian = F * jacobian; 152 | covariance = F * covariance * F.transpose() + V * noise * V.transpose(); 153 | } 154 | 155 | } 156 | 157 | void propagate(double _dt, const Eigen::Vector3d &_acc_1, const Eigen::Vector3d &_gyr_1) 158 | { 159 | dt = _dt; 160 | acc_1 = _acc_1; 161 | gyr_1 = _gyr_1; 162 | Vector3d result_delta_p; 163 | Quaterniond result_delta_q; 164 | Vector3d result_delta_v; 165 | Vector3d result_linearized_ba; 166 | Vector3d result_linearized_bg; 167 | 168 | midPointIntegration(_dt, acc_0, gyr_0, _acc_1, _gyr_1, delta_p, delta_q, delta_v, 169 | linearized_ba, linearized_bg, 170 | result_delta_p, result_delta_q, result_delta_v, 171 | result_linearized_ba, result_linearized_bg, 1); 172 | 173 | //checkJacobian(_dt, acc_0, gyr_0, acc_1, gyr_1, delta_p, delta_q, delta_v, 174 | // linearized_ba, linearized_bg); 175 | delta_p = result_delta_p; 176 | delta_q = result_delta_q; 177 | delta_v = result_delta_v; 178 | linearized_ba = result_linearized_ba; 179 | linearized_bg = result_linearized_bg; 180 | delta_q.normalize(); 181 | sum_dt += dt; 182 | acc_0 = acc_1; 183 | gyr_0 = gyr_1; 184 | 185 | } 186 | 187 | Eigen::Matrix evaluate(const Eigen::Vector3d &Pi, const Eigen::Quaterniond &Qi, const Eigen::Vector3d &Vi, const Eigen::Vector3d &Bai, const Eigen::Vector3d &Bgi, 188 | const Eigen::Vector3d &Pj, const Eigen::Quaterniond &Qj, const Eigen::Vector3d &Vj, const Eigen::Vector3d &Baj, const Eigen::Vector3d &Bgj) 189 | { 190 | Eigen::Matrix residuals; 191 | 192 | Eigen::Matrix3d dp_dba = jacobian.block<3, 3>(O_P, O_BA); 193 | Eigen::Matrix3d dp_dbg = jacobian.block<3, 3>(O_P, O_BG); 194 | 195 | Eigen::Matrix3d dq_dbg = jacobian.block<3, 3>(O_R, O_BG); 196 | 197 | Eigen::Matrix3d dv_dba = jacobian.block<3, 3>(O_V, O_BA); 198 | Eigen::Matrix3d dv_dbg = jacobian.block<3, 3>(O_V, O_BG); 199 | 200 | Eigen::Vector3d dba = Bai - linearized_ba; 201 | Eigen::Vector3d dbg = Bgi - linearized_bg; 202 | 203 | Eigen::Quaterniond corrected_delta_q = delta_q * getSmallAngleQuaternion(dq_dbg * dbg); 204 | Eigen::Vector3d corrected_delta_v = delta_v + dv_dba * dba + dv_dbg * dbg; 205 | Eigen::Vector3d corrected_delta_p = delta_p + dp_dba * dba + dp_dbg * dbg; 206 | 207 | residuals.block<3, 1>(O_P, 0) = Qi.inverse() * (0.5 * Gravity * sum_dt * sum_dt + Pj - Pi - Vi * sum_dt) - corrected_delta_p; 208 | residuals.block<3, 1>(O_R, 0) = 2 * (corrected_delta_q.inverse() * (Qi.inverse() * Qj)).vec(); 209 | residuals.block<3, 1>(O_V, 0) = Qi.inverse() * (Gravity * sum_dt + Vj - Vi) - corrected_delta_v; 210 | residuals.block<3, 1>(O_BA, 0) = Baj - Bai; 211 | residuals.block<3, 1>(O_BG, 0) = Bgj - Bgi; 212 | return residuals; 213 | } 214 | 215 | double dt; 216 | Eigen::Vector3d acc_0, gyr_0; 217 | Eigen::Vector3d acc_1, gyr_1; 218 | 219 | const Eigen::Vector3d linearized_acc, linearized_gyr; 220 | Eigen::Vector3d linearized_ba, linearized_bg; 221 | 222 | Eigen::Matrix jacobian, covariance; 223 | Eigen::Matrix step_jacobian; 224 | Eigen::Matrix step_V; 225 | Eigen::Matrix noise; 226 | 227 | double sum_dt; 228 | Eigen::Vector3d delta_p; 229 | Eigen::Quaterniond delta_q; 230 | Eigen::Vector3d delta_v; 231 | 232 | std::vector dt_buf; 233 | std::vector acc_buf; 234 | std::vector gyr_buf; 235 | 236 | // Imu noise param 237 | double ACC_N, ACC_W; 238 | double GYR_N, GYR_W; 239 | }; 240 | 241 | } 242 | 243 | 244 | #endif //IMU_PREINTEGRATION_H 245 | -------------------------------------------------------------------------------- /include/Initializer/StaticInitializer.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A inclinometer-initializer utilizing the static scene. 4 | // 5 | 6 | #ifndef STATIC_INITIALIZER_H 7 | #define STATIC_INITIALIZER_H 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | 19 | #include "larvio/imu_state.h" 20 | 21 | #include "sensors/ImuData.hpp" 22 | 23 | using namespace std; 24 | 25 | namespace larvio { 26 | 27 | class StaticInitializer 28 | { 29 | public: 30 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 31 | 32 | // Constructor 33 | StaticInitializer() = delete; 34 | StaticInitializer(const double& max_feature_dis_, const int& static_Num_, const double& td_, 35 | const Eigen::Matrix3d& Ma_, const Eigen::Matrix3d& Tg_, const Eigen::Matrix3d& As_) : 36 | max_feature_dis(max_feature_dis_), static_Num(static_Num_), td(td_), 37 | bInit(false), state_time(0.0), lower_time_bound(0.0) { 38 | staticImgCounter = 0; 39 | init_features.clear(); 40 | Ma = Ma_; 41 | Tg = Tg_; 42 | As = As_; 43 | gyro_bias = Eigen::Vector3d::Zero(); 44 | acc_bias = Eigen::Vector3d::Zero(); 45 | position = Eigen::Vector3d::Zero(); 46 | velocity = Eigen::Vector3d::Zero(); 47 | orientation = Eigen::Vector4d(0.0, 0.0, 0.0, 1.0); 48 | } 49 | 50 | // Destructor 51 | ~StaticInitializer(){} 52 | 53 | // Interface for trying to initialize 54 | bool tryIncInit(const std::vector& imu_msg_buffer, 55 | MonoCameraMeasurementPtr img_msg); 56 | 57 | // Assign the initial state if initialized successfully 58 | void assignInitialState(std::vector& imu_msg_buffer, 59 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, IMUState& imu_state); 60 | 61 | // If initialized 62 | bool ifInitialized() { 63 | return bInit; 64 | } 65 | 66 | private: 67 | 68 | // Time lower bound for used imu data. 69 | double lower_time_bound; 70 | 71 | // Maximum feature distance allowed bewteen static images 72 | double max_feature_dis; 73 | 74 | // Number of consecutive image for trigger static initializer 75 | unsigned int static_Num; 76 | 77 | // Defined type for initialization 78 | typedef std::map, 79 | Eigen::aligned_allocator< 80 | std::pair > > InitFeatures; 81 | InitFeatures init_features; 82 | 83 | // Counter for static images that will be used in inclinometer-initializer 84 | unsigned int staticImgCounter; 85 | 86 | // Error bewteen timestamp of imu and img 87 | double td; 88 | 89 | // Imu instrinsic 90 | Eigen::Matrix3d Ma; 91 | Eigen::Matrix3d Tg; 92 | Eigen::Matrix3d As; 93 | 94 | // Initialize results 95 | double state_time; 96 | Eigen::Vector3d gyro_bias; 97 | Eigen::Vector3d acc_bias; 98 | Eigen::Vector4d orientation; 99 | Eigen::Vector3d position; 100 | Eigen::Vector3d velocity; 101 | 102 | // Flag indicating if initialized 103 | bool bInit; 104 | 105 | // initialize rotation and gyro bias by static imu datas 106 | void initializeGravityAndBias(const double& time_bound, 107 | const std::vector& imu_msg_buffer); 108 | }; 109 | 110 | } // namespace larvio 111 | 112 | 113 | #endif //STATIC_INITIALIZER_H 114 | -------------------------------------------------------------------------------- /include/Initializer/feature_manager.h: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-15. 3 | // Feature manager. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #ifndef FEATURE_MANAGER_H 8 | #define FEATURE_MANAGER_H 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | using namespace std; 15 | 16 | #include 17 | using namespace Eigen; 18 | 19 | #include 20 | 21 | 22 | namespace larvio { 23 | 24 | const int WINDOW_SIZE = 10; 25 | const double MIN_PARALLAX = 10/460; 26 | 27 | class FeaturePerFrame 28 | { 29 | public: 30 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 31 | 32 | FeaturePerFrame(const MonoFeatureMeasurement& _point, double td) 33 | { 34 | point.x() = _point.u+_point.u_vel*td; 35 | point.y() = _point.v+_point.v_vel*td; 36 | point.z() = 1; 37 | velocity.x() = _point.u_vel; 38 | velocity.y() = _point.v_vel; 39 | cur_td = td; 40 | } 41 | double cur_td; 42 | Vector3d point; 43 | Vector2d velocity; 44 | double z; 45 | bool is_used; 46 | double parallax; 47 | MatrixXd A; 48 | VectorXd b; 49 | double dep_gradient; 50 | }; 51 | 52 | class FeaturePerId 53 | { 54 | public: 55 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 56 | 57 | const int feature_id; 58 | int start_frame; 59 | vector feature_per_frame; 60 | 61 | int used_num; 62 | bool is_outlier; 63 | bool is_margin; 64 | double estimated_depth; 65 | int solve_flag; // 0 haven't solve yet; 1 solve succ; 2 solve fail; 66 | 67 | Vector3d gt_p; 68 | 69 | FeaturePerId(int _feature_id, int _start_frame) 70 | : feature_id(_feature_id), start_frame(_start_frame), 71 | used_num(0), estimated_depth(-1.0), solve_flag(0) 72 | { 73 | } 74 | 75 | int endFrame(); 76 | }; 77 | 78 | class FeatureManager 79 | { 80 | public: 81 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 82 | 83 | FeatureManager(){}; 84 | 85 | void setRic(const Matrix3d& _ric); 86 | 87 | void clearState(); 88 | 89 | int getFeatureCount(); 90 | 91 | bool addFeatureCheckParallax(int frame_count, MonoCameraMeasurementPtr image, double td); 92 | vector> getCorresponding(int frame_count_l, int frame_count_r); 93 | 94 | //void updateDepth(const VectorXd &x); 95 | void setDepth(const VectorXd &x); 96 | void removeFailures(); 97 | void clearDepth(const VectorXd &x); 98 | VectorXd getDepthVector(); 99 | void removeBack(); 100 | void removeFront(int frame_count); 101 | void removeOutlier(); 102 | list feature; 103 | int last_track_num; 104 | 105 | private: 106 | double compensatedParallax2(const FeaturePerId &it_per_id, int frame_count); 107 | Matrix3d ric; 108 | }; 109 | 110 | } 111 | 112 | #endif 113 | -------------------------------------------------------------------------------- /include/Initializer/initial_alignment.h: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-16. 3 | // Type and methods for initial alignment. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #ifndef INITIAL_ALIGNMENT_H 8 | #define INITIAL_ALIGNMENT_H 9 | 10 | // #pragma once 11 | #include 12 | #include 13 | #include "Initializer/ImuPreintegration.h" 14 | #include "Initializer/feature_manager.h" 15 | #include 16 | 17 | #include 18 | 19 | #include 20 | 21 | using namespace Eigen; 22 | using namespace std; 23 | 24 | namespace larvio { 25 | 26 | class ImageFrame 27 | { 28 | public: 29 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 30 | 31 | ImageFrame(){}; 32 | ImageFrame(MonoCameraMeasurementPtr _points, const double& td):is_key_frame{false} 33 | { 34 | t = _points->timeStampToSec+td; 35 | for (const auto& pt : _points->features) 36 | { 37 | double x = pt.u+pt.u_vel*td; 38 | double y = pt.v+pt.v_vel*td; 39 | double z = 1; 40 | double p_u = pt.u; 41 | double p_v = pt.v; 42 | double velocity_x = pt.u_vel; 43 | double velocity_y = pt.v_vel; 44 | Eigen::Matrix xyz_uv_velocity; 45 | xyz_uv_velocity << x, y, z, p_u, p_v, velocity_x, velocity_y; 46 | points[pt.id] = xyz_uv_velocity; 47 | } 48 | }; 49 | map> points; 50 | double t; 51 | Matrix3d R; 52 | Vector3d T; 53 | boost::shared_ptr pre_integration; 54 | bool is_key_frame; 55 | }; 56 | 57 | bool VisualIMUAlignment(map &all_image_frame, Vector3d* Bgs, Vector3d &g, VectorXd &x, const Vector3d& TIC); 58 | 59 | } 60 | 61 | 62 | #endif //INITIAL_ALIGNMENT_H 63 | -------------------------------------------------------------------------------- /include/Initializer/initial_sfm.h: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-16. 3 | // Type and methods for initial sfm. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #ifndef INITIAL_SFM_H 8 | #define INITIAL_SFM_H 9 | 10 | // #pragma once 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | using namespace Eigen; 22 | using namespace std; 23 | 24 | namespace larvio { 25 | 26 | struct SFMFeature 27 | { 28 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 29 | 30 | bool state; 31 | int id; 32 | vector> observation; 33 | double position[3]; 34 | double depth; 35 | }; 36 | 37 | struct ReprojectionError3D 38 | { 39 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 40 | 41 | ReprojectionError3D(double observed_u, double observed_v) 42 | :observed_u(observed_u), observed_v(observed_v) 43 | {} 44 | 45 | template 46 | bool operator()(const T* const camera_R, const T* const camera_T, const T* point, T* residuals) const 47 | { 48 | T p[3]; 49 | ceres::QuaternionRotatePoint(camera_R, point, p); 50 | p[0] += camera_T[0]; p[1] += camera_T[1]; p[2] += camera_T[2]; 51 | T xp = p[0] / p[2]; 52 | T yp = p[1] / p[2]; 53 | residuals[0] = xp - T(observed_u); 54 | residuals[1] = yp - T(observed_v); 55 | return true; 56 | } 57 | 58 | static ceres::CostFunction* Create(const double observed_x, 59 | const double observed_y) 60 | { 61 | return (new ceres::AutoDiffCostFunction< 62 | ReprojectionError3D, 2, 4, 3, 3>( 63 | new ReprojectionError3D(observed_x,observed_y))); 64 | } 65 | 66 | double observed_u; 67 | double observed_v; 68 | }; 69 | 70 | class GlobalSFM 71 | { 72 | public: 73 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 74 | 75 | GlobalSFM(); 76 | bool construct(int frame_num, Quaterniond* q, Vector3d* T, int l, 77 | const Matrix3d relative_R, const Vector3d relative_T, 78 | vector &sfm_f, map &sfm_tracked_points); 79 | 80 | private: 81 | bool solveFrameByPnP(Matrix3d &R_initial, Vector3d &P_initial, int i, vector &sfm_f); 82 | 83 | void triangulatePoint(Eigen::Matrix &Pose0, Eigen::Matrix &Pose1, 84 | Vector2d &point0, Vector2d &point1, Vector3d &point_3d); 85 | void triangulateTwoFrames(int frame0, Eigen::Matrix &Pose0, 86 | int frame1, Eigen::Matrix &Pose1, 87 | vector &sfm_f); 88 | 89 | int feature_num; 90 | }; 91 | 92 | } 93 | 94 | 95 | #endif //INITIAL_SFM_H -------------------------------------------------------------------------------- /include/Initializer/solve_5pts.h: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-16. 3 | // Solving relative motion. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #ifndef SOLVE_5PTS_H 8 | #define SOLVE_5PTS_H 9 | 10 | // #pragma once 11 | 12 | #include 13 | 14 | using namespace std; 15 | 16 | #include 17 | #include 18 | 19 | using namespace Eigen; 20 | 21 | 22 | namespace larvio { 23 | 24 | class MotionEstimator 25 | { 26 | public: 27 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 28 | 29 | bool solveRelativeRT(const vector> &corres, Matrix3d &R, Vector3d &T); 30 | }; 31 | 32 | } 33 | 34 | 35 | #endif //SOLVE_5PTS_H -------------------------------------------------------------------------------- /include/ORB/ORBDescriptor.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 18-8-27. 3 | // Functions for calculate ORB descriptor of an arbitrary image point. 4 | // Stems from ORB_SLAM2: ORBextractor.cc and opencv/orb.cpp 5 | // 6 | 7 | #ifndef LKTTRACKER_ORBDESCRIPTOR_H 8 | #define LKTTRACKER_ORBDESCRIPTOR_H 9 | 10 | // #include 11 | #include 12 | #include 13 | 14 | using namespace std; 15 | 16 | namespace larvio { 17 | 18 | class ORBdescriptor // this class is created by QXC based on ORB_SLAM2::ORBextractor 19 | { 20 | public: 21 | 22 | ORBdescriptor(const cv::Mat& _image, float _scaleFactor, int _nlevels, 23 | int _edgeThreshold=31, int _patchSize=31); 24 | 25 | ~ORBdescriptor(){} 26 | 27 | // modified based on ORBextractor::computeDescriptors by QXC 28 | // @INPUTs: 29 | // @pts: cv::Point object of key points in source image, i.e. image at level 0 30 | // @levels: level of @image in pyramid 31 | // @OUTPUTs 32 | // @descriptors: each row store the descriptor of corresponding key point 33 | // @BRIEF: 34 | // This function calculate the descriptors of keypoints of same level. 35 | // Should be called after calling function initializeLayerAndPyramid(). 36 | bool computeDescriptors(const vector& pts, 37 | const vector& levels, 38 | cv::Mat& descriptors); 39 | 40 | // copy from ORBmatcher.cc, below is original comment 41 | // Bit set count operation from 42 | // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 43 | static int computeDescriptorDistance(const cv::Mat &a, const cv::Mat &b) 44 | { 45 | const int *pa = a.ptr(); 46 | const int *pb = b.ptr(); 47 | 48 | int dist=0; 49 | 50 | for(int i=0; i<8; i++, pa++, pb++) 51 | { 52 | unsigned int v = *pa ^ *pb; 53 | v = v - ((v >> 1) & 0x55555555); 54 | v = (v & 0x33333333) + ((v >> 2) & 0x33333333); 55 | dist += (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; 56 | } 57 | 58 | return dist; 59 | } 60 | 61 | static const float factorPI; 62 | 63 | static const int TH_HIGH; // threshold for worst descriptor distance in all levels 64 | static const int TH_LOW; // threshold for best descriptor distance in all levels 65 | 66 | private: 67 | 68 | static const int HARRIS_BLOCK_SIZE; 69 | 70 | int patchSize; 71 | int halfPatchSize; 72 | int edgeThreshold; 73 | 74 | double scaleFactor; 75 | int nlevels; 76 | 77 | vector pattern; 78 | vector umax; 79 | 80 | vector mvScaleFactor; 81 | vector mvInvScaleFactor; 82 | vector mvLayerInfo; 83 | vector mvLayerScale; 84 | cv::Mat mImagePyramid; 85 | cv::Mat mBluredImagePyramid; 86 | 87 | private: 88 | // compute descriptor of a key point and save it into @desc 89 | void computeOrbDescriptor(const cv::KeyPoint& kpt, 90 | uchar* desc); 91 | 92 | // copy from opencv/orb.cpp 93 | void makeRandomPattern(int patchSize, cv::Point* pattern_, int npoints); 94 | 95 | // initialize layer information and image of pyramid 96 | void initializeLayerAndPyramid(const cv::Mat& image); 97 | 98 | public: 99 | // calculate Angle for an ordinary point 100 | float IC_Angle(const int& levels, const cv::Point2f& pt); 101 | }; 102 | 103 | } 104 | 105 | 106 | #endif //LKTTRACKER_ORBDESCRIPTOR_H -------------------------------------------------------------------------------- /include/larvio/feature_msg.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-21. 3 | // Managing the image processer and the estimator. 4 | // 5 | 6 | #ifndef FEATURE_MSG_H 7 | #define FEATURE_MSG_H 8 | 9 | #include 10 | #include 11 | 12 | namespace larvio { 13 | 14 | // Measurement for one features 15 | class MonoFeatureMeasurement { 16 | public: 17 | MonoFeatureMeasurement() 18 | : id(0) 19 | , u(0.0) 20 | , v(0.0) 21 | , u_init(0.0) 22 | , v_init(0.0) 23 | , u_vel(0.0) 24 | , v_vel(0.0) 25 | , u_init_vel(0.0) 26 | , v_init_vel(0.0) { 27 | } 28 | 29 | // id 30 | unsigned long long int id; 31 | // Normalized feature coordinates (with identity intrinsic matrix) 32 | double u; // horizontal coordinate 33 | double v; // vertical coordinate 34 | // Normalized feature coordinates (with identity intrinsic matrix) in initial frame of this feature 35 | //# (meaningful if this is the first msg of this feature id) 36 | double u_init; 37 | double v_init; 38 | // Velocity of current normalized feature coordinate 39 | double u_vel; 40 | double v_vel; 41 | // Velocity of initial normalized feature coordinate 42 | double u_init_vel; 43 | double v_init_vel; 44 | }; 45 | 46 | // Measurements for features in one camera img 47 | class MonoCameraMeasurement { 48 | public: 49 | double timeStampToSec; 50 | // All features on the current image, 51 | // including tracked ones and newly detected ones. 52 | std::vector features; 53 | }; 54 | 55 | // typedef boost::shared_ptr MonoCameraMeasurementPtr; 56 | typedef MonoCameraMeasurement* MonoCameraMeasurementPtr; 57 | 58 | } 59 | 60 | #endif //FEATURE_MSG_H -------------------------------------------------------------------------------- /include/larvio/image_processor.h: -------------------------------------------------------------------------------- 1 | /* 2 | * COPYRIGHT AND PERMISSION NOTICE 3 | * Penn Software MSCKF_VIO 4 | * Copyright (C) 2017 The Trustees of the University of Pennsylvania 5 | * All rights reserved. 6 | */ 7 | 8 | // The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/) 9 | // Tremendous changes have been made to use it in LARVIO 10 | 11 | 12 | #ifndef IMAGE_PROCESSOR_H 13 | #define IMAGE_PROCESSOR_H 14 | 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | #include 24 | 25 | #include 26 | 27 | #include "sensors/ImuData.hpp" 28 | #include "sensors/ImageData.hpp" 29 | 30 | namespace larvio { 31 | 32 | /* 33 | * @brief ImageProcessor Detects and tracks features 34 | * in image sequences. 35 | */ 36 | class ImageProcessor { 37 | public: 38 | // Constructor 39 | ImageProcessor(std::string& config_file_); 40 | // Disable copy and assign constructors. 41 | ImageProcessor(const ImageProcessor&) = delete; 42 | ImageProcessor operator=(const ImageProcessor&) = delete; 43 | 44 | // Destructor 45 | ~ImageProcessor(); 46 | 47 | // Initialize the object. 48 | bool initialize(); 49 | 50 | /* 51 | * @brief processImage 52 | * Processing function for the monocular images. 53 | * @param image msg. 54 | * @param imu msg buffer. 55 | * @return true if have feature msg. 56 | */ 57 | bool processImage( 58 | const ImageDataPtr& msg, 59 | const std::vector& imu_msg_buffer, 60 | MonoCameraMeasurementPtr features); 61 | 62 | // Get publish image. 63 | cv::Mat getVisualImg() { 64 | return visual_img; 65 | } 66 | 67 | typedef boost::shared_ptr Ptr; 68 | typedef boost::shared_ptr ConstPtr; 69 | 70 | private: 71 | 72 | /* 73 | * @brief ProcessorConfig Configuration parameters for 74 | * feature detection and tracking. 75 | */ 76 | struct ProcessorConfig { 77 | int pyramid_levels; 78 | int patch_size; 79 | int fast_threshold; 80 | int max_iteration; 81 | double track_precision; 82 | double ransac_threshold; 83 | 84 | int max_features_num; 85 | int min_distance; 86 | bool flag_equalize; 87 | 88 | int img_rate; 89 | int pub_frequency; 90 | }; 91 | 92 | /* 93 | * @brief FeatureIDType An alias for unsigned long long int. 94 | */ 95 | typedef unsigned long long int FeatureIDType; 96 | 97 | /* 98 | * @brief loadParameters 99 | * Load parameters from the parameter server. 100 | */ 101 | bool loadParameters(); 102 | 103 | /* 104 | * @brief integrateImuData Integrates the IMU gyro readings 105 | * between the two consecutive images, which is used for 106 | * both tracking prediction and 2-point RANSAC. 107 | * @param imu msg buffer 108 | * @return cam_R_p2c: a rotation matrix which takes a vector 109 | * from previous cam0 frame to current cam0 frame. 110 | */ 111 | void integrateImuData(cv::Matx33f& cam_R_p2c, 112 | const std::vector& imu_msg_buffer); 113 | 114 | /* 115 | * @brief predictFeatureTracking Compensates the rotation 116 | * between consecutive camera frames so that feature 117 | * tracking would be more robust and fast. 118 | * @param input_pts: features in the previous image to be tracked. 119 | * @param R_p_c: a rotation matrix takes a vector in the previous 120 | * camera frame to the current camera frame. 121 | * @param intrinsics: intrinsic matrix of the camera. 122 | * @return compensated_pts: predicted locations of the features 123 | * in the current image based on the provided rotation. 124 | * 125 | * Note that the input and output points are of pixel coordinates. 126 | */ 127 | void predictFeatureTracking( 128 | const std::vector& input_pts, 129 | const cv::Matx33f& R_p_c, 130 | const cv::Vec4d& intrinsics, 131 | std::vector& compenstated_pts); 132 | 133 | /* 134 | * @brief initializeFirstFrame 135 | * Initialize the image processing sequence, which is 136 | * bascially detect new features on the first image. 137 | */ 138 | bool initializeFirstFrame(); 139 | 140 | /* 141 | * @brief initializeFirstFeatures 142 | * Initialize the image processing sequence, which is 143 | * bascially detect new features on the first set of 144 | * stereo images. 145 | * @param imu msg buffer 146 | */ 147 | bool initializeFirstFeatures( 148 | const std::vector& imu_msg_buffer); 149 | 150 | /* 151 | * @brief findNewFeaturesToBeTracked 152 | * Find new features in current image to be tracked, 153 | * until being tracked successfully in next image, 154 | * features found in this function would not be valid 155 | * features. 156 | */ 157 | void findNewFeaturesToBeTracked(); 158 | 159 | /* 160 | * @brief trackFeatures 161 | * Tracker features on the newly received image. 162 | */ 163 | void trackFeatures(); 164 | 165 | /* 166 | * @brief trackNewFeatures 167 | * Track new features extracted in last image. 168 | */ 169 | void trackNewFeatures(); 170 | 171 | /* 172 | * @brief publish 173 | * Publish the features on the current image including 174 | * both the tracked and newly detected ones. 175 | */ 176 | void publish(); 177 | 178 | /* 179 | * @brief createImagePyramids 180 | * Create image pyramids used for klt tracking. 181 | */ 182 | void createImagePyramids(); 183 | 184 | /* 185 | * @brief undistortPoints Undistort points based on camera 186 | * calibration intrinsics and distort model. 187 | * @param pts_in: input distorted points. 188 | * @param intrinsics: intrinsics of the camera. 189 | * @param distortion_model: distortion model of the camera. 190 | * @param distortion_coeffs: distortion coefficients. 191 | * @param rectification_matrix: matrix to rectify undistorted points. 192 | * @param new_intrinsics: intrinsics on new camera for 193 | * undistorted points to projected into. 194 | * @return pts_out: undistorted points. 195 | */ 196 | void undistortPoints( 197 | const std::vector& pts_in, 198 | const cv::Vec4d& intrinsics, 199 | const std::string& distortion_model, 200 | const cv::Vec4d& distortion_coeffs, 201 | std::vector& pts_out, 202 | const cv::Matx33d &rectification_matrix = cv::Matx33d::eye(), 203 | const cv::Vec4d &new_intrinsics = cv::Vec4d(1,1,0,0)); 204 | 205 | /* 206 | * @brief removeUnmarkedElements Remove the unmarked elements 207 | * within a vector. 208 | * @param raw_vec: vector with outliers. 209 | * @param markers: 0 will represent a outlier, 1 will be an inlier. 210 | * @return refined_vec: a vector without outliers. 211 | * 212 | * Note that the order of the inliers in the raw_vec is perserved 213 | * in the refined_vec. 214 | */ 215 | template 216 | void removeUnmarkedElements( 217 | const std::vector& raw_vec, 218 | const std::vector& markers, 219 | std::vector& refined_vec) { 220 | if (raw_vec.size() != markers.size()) { 221 | for (int i = 0; i < raw_vec.size(); ++i) 222 | refined_vec.push_back(raw_vec[i]); 223 | return; 224 | } 225 | for (int i = 0; i < markers.size(); ++i) { 226 | if (markers[i] == 0) continue; 227 | refined_vec.push_back(raw_vec[i]); 228 | } 229 | return; 230 | } 231 | 232 | /* 233 | * @brief rescalePoints Rescale image coordinate of pixels to gain 234 | * numerical stability. 235 | * @param pts1: an array of image coordinate of some pixels, 236 | * they will be rescaled. 237 | * @param pts2: corresponding image coordinate of some pixels 238 | * in another image as pts2, they will be rescaled. 239 | * @return scaling_factor: scaling factor of the rescaling process. 240 | */ 241 | void rescalePoints( 242 | std::vector& pts1, 243 | std::vector& pts2, 244 | float& scaling_factor); 245 | 246 | /* 247 | * @brief getFeatureMsg Get processed feature msg. 248 | * @return pointer for processed features 249 | */ 250 | void getFeatureMsg(MonoCameraMeasurementPtr features); 251 | 252 | // Enum type for image state. 253 | enum eImageState { 254 | FIRST_IMAGE = 1, 255 | SECOND_IMAGE = 2, 256 | OTHER_IMAGES = 3 257 | }; 258 | 259 | // Indicate if this is the first or second image message. 260 | eImageState image_state; 261 | 262 | // ID for the next new feature. 263 | FeatureIDType next_feature_id; 264 | 265 | // Feature detector 266 | ProcessorConfig processor_config; 267 | 268 | // Camera calibration parameters 269 | std::string cam_distortion_model; 270 | cv::Vec2i cam_resolution; 271 | cv::Vec4d cam_intrinsics; 272 | cv::Vec4d cam_distortion_coeffs; 273 | 274 | // Take a vector from cam frame to the IMU frame. 275 | cv::Matx33d R_cam_imu; 276 | cv::Vec3d t_cam_imu; 277 | 278 | // Take a vector from prev cam frame to curr cam frame 279 | cv::Matx33f R_Prev2Curr; 280 | 281 | // Previous and current images 282 | ImageDataPtr prev_img_ptr; 283 | ImageDataPtr curr_img_ptr; 284 | 285 | // Pyramids for previous and current image 286 | std::vector prev_pyramid_; 287 | std::vector curr_pyramid_; 288 | 289 | // Number of features after each outlier removal step. 290 | int before_tracking; 291 | int after_tracking; 292 | int after_ransac; 293 | 294 | // Config file path 295 | std::string config_file; 296 | 297 | // Image for visualization 298 | cv::Mat visual_img; 299 | 300 | // Points for tracking, added by QXC 301 | std::vector new_pts_; 302 | std::vector prev_pts_; 303 | std::vector curr_pts_; 304 | std::vector pts_ids_; 305 | std::vector pts_lifetime_; 306 | std::vector init_pts_; 307 | 308 | // Time of last published image 309 | double last_pub_time; 310 | double curr_img_time; 311 | double prev_img_time; 312 | 313 | // Publish counter 314 | long pub_counter; 315 | 316 | // debug log: 317 | std::string output_dir; 318 | 319 | // ORB descriptor pointer, added by QXC 320 | boost::shared_ptr prevORBDescriptor_ptr; 321 | boost::shared_ptr currORBDescriptor_ptr; 322 | std::vector vOrbDescriptors; 323 | 324 | // flag for first useful image msg 325 | bool bFirstImg; 326 | }; 327 | 328 | typedef ImageProcessor::Ptr ImageProcessorPtr; 329 | typedef ImageProcessor::ConstPtr ImageProcessorConstPtr; 330 | 331 | } // end namespace larvio 332 | 333 | #endif 334 | -------------------------------------------------------------------------------- /include/larvio/imu_state.h: -------------------------------------------------------------------------------- 1 | /* 2 | * COPYRIGHT AND PERMISSION NOTICE 3 | * Penn Software MSCKF_VIO 4 | * Copyright (C) 2017 The Trustees of the University of Pennsylvania 5 | * All rights reserved. 6 | */ 7 | 8 | // The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/) 9 | // Some changes have been made to use it in LARVIO 10 | 11 | 12 | #ifndef IMU_STATE_H 13 | #define IMU_STATE_H 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | 20 | #define GRAVITY_ACCELERATION 9.81 21 | 22 | namespace larvio { 23 | 24 | typedef long long int FeatureIDType; 25 | 26 | /* 27 | * @brief IMUState State for IMU 28 | */ 29 | struct IMUState { 30 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 31 | typedef long long int StateIDType; 32 | 33 | // An unique identifier for the IMU state. 34 | StateIDType id; 35 | 36 | // id for next IMU state 37 | static StateIDType next_id; 38 | 39 | // Time when the state is recorded 40 | double time; 41 | 42 | // Time interval to the nearest image 43 | double dt; 44 | 45 | // Orientation 46 | // Take a quaternion from the world frame to 47 | // the IMU (body) frame. 48 | Eigen::Vector4d orientation; 49 | 50 | // Position of the IMU (body) frame 51 | // in the world frame. 52 | Eigen::Vector3d position; 53 | 54 | // Velocity of the IMU (body) frame 55 | // in the world frame. 56 | Eigen::Vector3d velocity; 57 | 58 | // Bias for measured angular velocity 59 | // and acceleration. 60 | Eigen::Vector3d gyro_bias; 61 | Eigen::Vector3d acc_bias; 62 | 63 | // Transformation between the IMU and the 64 | // left camera (cam0) 65 | Eigen::Matrix3d R_imu_cam0; 66 | Eigen::Vector3d t_cam0_imu; 67 | 68 | // Gravity vector in the world frame 69 | static Eigen::Vector3d gravity; 70 | 71 | // Transformation offset from the IMU frame to 72 | // the body frame. The transformation takes a 73 | // vector from the IMU frame to the body frame. 74 | // The z axis of the body frame should point upwards. 75 | // Normally, this transform should be identity. 76 | static Eigen::Isometry3d T_imu_body; 77 | 78 | IMUState(): id(0), time(0), 79 | orientation(Eigen::Vector4d(0, 0, 0, 1)), 80 | position(Eigen::Vector3d::Zero()), 81 | velocity(Eigen::Vector3d::Zero()), 82 | gyro_bias(Eigen::Vector3d::Zero()), 83 | acc_bias(Eigen::Vector3d::Zero()) {} 84 | 85 | IMUState(const StateIDType& new_id): id(new_id), time(0), 86 | orientation(Eigen::Vector4d(0, 0, 0, 1)), 87 | position(Eigen::Vector3d::Zero()), 88 | velocity(Eigen::Vector3d::Zero()), 89 | gyro_bias(Eigen::Vector3d::Zero()), 90 | acc_bias(Eigen::Vector3d::Zero()) {} 91 | 92 | }; 93 | 94 | typedef IMUState::StateIDType StateIDType; 95 | 96 | /* 97 | * @brief IMUState_aug Augmented state for IMU, including only orientation and position 98 | */ 99 | struct IMUState_Aug { 100 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 101 | 102 | // An unique identifier for the IMU state. 103 | StateIDType id; 104 | 105 | // Time when the state is recorded 106 | double time; 107 | 108 | // Time interval to the nearest image 109 | double dt; 110 | 111 | // Orientation 112 | // Take a quaternion from the world frame to 113 | // the IMU (body) frame. 114 | Eigen::Vector4d orientation; 115 | 116 | // Position of the IMU (body) frame 117 | // in the world frame. 118 | Eigen::Vector3d position; 119 | 120 | // First estimated Jacobian of position; 121 | Eigen::Vector3d position_FEJ; 122 | 123 | // Transformation between the IMU and the 124 | // left camera (cam0) 125 | Eigen::Matrix3d R_imu_cam0; 126 | Eigen::Vector3d t_cam0_imu; 127 | 128 | // Corresponding camera pose 129 | Eigen::Vector4d orientation_cam; 130 | Eigen::Vector3d position_cam; 131 | 132 | // Store the feature id whose anchor frame is 133 | // corresponding to this imu state. 134 | std::vector FeatureIDs; 135 | 136 | IMUState_Aug(): id(0), time(0), 137 | orientation(Eigen::Vector4d(0, 0, 0, 1)), 138 | position(Eigen::Vector3d::Zero()) {} 139 | 140 | IMUState_Aug(const StateIDType& new_id): id(new_id), time(0), 141 | orientation(Eigen::Vector4d(0, 0, 0, 1)), 142 | position(Eigen::Vector3d::Zero()) {} 143 | }; 144 | 145 | // for augmented imu states, added by QXC 146 | typedef std::map, 147 | Eigen::aligned_allocator< 148 | std::pair > > IMUStateServer; 149 | 150 | } // namespace larvio 151 | 152 | #endif // IMU_STATE_H 153 | -------------------------------------------------------------------------------- /include/larvio/larvio.h: -------------------------------------------------------------------------------- 1 | /* 2 | * COPYRIGHT AND PERMISSION NOTICE 3 | * Penn Software MSCKF_VIO 4 | * Copyright (C) 2017 The Trustees of the University of Pennsylvania 5 | * All rights reserved. 6 | */ 7 | 8 | // The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/) 9 | // Tremendous changes have been made to use it in LARVIO 10 | 11 | #ifndef LARVIO_H 12 | #define LARVIO_H 13 | 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include "imu_state.h" 23 | #include "feature.hpp" 24 | #include 25 | 26 | #include 27 | #include 28 | 29 | #include 30 | 31 | #include "Initializer/FlexibleInitializer.h" 32 | 33 | #include "sensors/ImuData.hpp" 34 | 35 | namespace larvio { 36 | 37 | class LarVio { 38 | public: 39 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 40 | 41 | // Constructor 42 | LarVio(std::string& config_file_); 43 | // Disable copy and assign constructor 44 | LarVio(const LarVio&) = delete; 45 | LarVio operator=(const LarVio&) = delete; 46 | 47 | // Destructor 48 | ~LarVio(); 49 | 50 | /* 51 | * @brief initialize Initialize the VIO. 52 | */ 53 | bool initialize(); 54 | 55 | /* 56 | * @brief reset Resets the VIO to initial status. 57 | */ 58 | void reset(); 59 | 60 | /* 61 | * @brief processFeatures 62 | * Filtering by imu and feature measurements. 63 | * @param msg Mono feature measurements. 64 | * @param imu_msg_buffer Imu msg buffer. 65 | * @return true if publish msgs 66 | */ 67 | bool processFeatures(MonoCameraMeasurementPtr msg, 68 | std::vector& imu_msg_buffer); 69 | 70 | // Get pose 71 | Eigen::Isometry3d getTbw(); 72 | 73 | // Get velocity 74 | Eigen::Vector3d getVel(); 75 | 76 | // Get pose covariance 77 | Eigen::Matrix getPpose(); 78 | 79 | // Get velocity covariance 80 | Eigen::Matrix3d getPvel(); 81 | 82 | // Get poses of augmented IMU states 83 | void getSwPoses(vector& swPoses); 84 | 85 | // Get position of map points 86 | void getStableMapPointPositions(std::map& mMapPoints); 87 | void getActiveeMapPointPositions(std::map& mMapPoints); 88 | 89 | typedef boost::shared_ptr Ptr; 90 | typedef boost::shared_ptr ConstPtr; 91 | 92 | private: 93 | /* 94 | * @brief StateServer Store one IMU states and several 95 | * augmented IMU and mappoint states for constructing measurement 96 | * model. 97 | * and FEJ added by QXC 98 | */ 99 | struct StateServer { 100 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 101 | 102 | IMUState imu_state; 103 | 104 | // first estimated imu state for calculate Jacobian 105 | IMUState imu_state_FEJ_now; 106 | IMUState imu_state_FEJ_old; 107 | 108 | // last imu state 109 | IMUState imu_state_old; 110 | 111 | // augmented imu states and first estimated linearized point 112 | IMUStateServer imu_states_augment; 113 | 114 | // timestamp compensation: t_imu = t_cam + td, timestamp of camera has to add @td to synchronize with clock of imu 115 | double td; 116 | 117 | // augmmented feature states 118 | std::vector feature_states; 119 | 120 | // State covariance matrix 121 | Eigen::MatrixXd state_cov; 122 | Eigen::Matrix continuous_noise_cov; 123 | 124 | // Imu instrinsics 125 | Eigen::Matrix3d Ma; 126 | Eigen::Matrix3d Tg; 127 | Eigen::Matrix3d As; 128 | // subset vector for imu instrinsics matrice 129 | Eigen::Vector3d M1; 130 | Eigen::Vector3d M2; 131 | Eigen::Vector3d T1; 132 | Eigen::Vector3d T2; 133 | Eigen::Vector3d T3; 134 | Eigen::Vector3d A1; 135 | Eigen::Vector3d A2; 136 | Eigen::Vector3d A3; 137 | 138 | // nuisance imu states 139 | std::vector nui_ids; 140 | IMUStateServer nui_imu_states; 141 | std::map> nui_features; 142 | }; 143 | 144 | 145 | /* 146 | * @brief loadParameters 147 | * Load parameters from the parameter server. 148 | */ 149 | bool loadParameters(); 150 | 151 | // Filter related functions 152 | // Propogate the state 153 | void batchImuProcessing(const double& time_bound, 154 | std::vector& imu_msg_buffer); 155 | void processModel(const double& time, 156 | const Eigen::Vector3d& m_gyro, 157 | const Eigen::Vector3d& m_acc); 158 | void predictNewState(const double& dt, 159 | const Eigen::Vector3d& gyro, 160 | const Eigen::Vector3d& acc); 161 | // void predictNewState_(const double& dt, 162 | // const Eigen::Vector3d& gyro, const Eigen::Vector3d& acc, 163 | // const Eigen::Vector3d& gyro_old, const Eigen::Vector3d& acc_old); 164 | 165 | // Measurement update 166 | void stateAugmentation(); 167 | void addFeatureObservations(MonoCameraMeasurementPtr msg); 168 | // This function is used to compute the measurement Jacobian 169 | // for a single feature observed at a single camera frame. 170 | void measurementJacobian_msckf(const StateIDType& state_id, 171 | const FeatureIDType& feature_id, 172 | Eigen::Matrix& H_x, 173 | Eigen::Matrix& H_e, 174 | Eigen::Matrix& H_f, 175 | Eigen::Vector2d& r); 176 | // This function computes the Jacobian of all measurements viewed 177 | // in the given camera states of this feature. 178 | void featureJacobian_msckf(const FeatureIDType& feature_id, 179 | const std::vector& state_ids, 180 | Eigen::MatrixXd& H_x, Eigen::VectorXd& r); 181 | void measurementUpdate_msckf(const Eigen::MatrixXd& H, 182 | const Eigen::VectorXd& r); 183 | bool gatingTest(const Eigen::MatrixXd& H, 184 | const Eigen::VectorXd&r, const int& dof); 185 | void removeLostFeatures(); 186 | void findRedundantImuStates( 187 | std::vector& rm_state_ids); 188 | void pruneImuStateBuffer(); 189 | 190 | // Chi squared test table. 191 | std::map chi_squared_test_table; 192 | 193 | // State vector 194 | StateServer state_server; 195 | // Maximum number of sliding window states 196 | int sw_size; 197 | 198 | // Features used 199 | MapServer map_server; 200 | 201 | // Features pass through the gating test, 202 | // and are truely used in measurement update. 203 | // For visualization only. 204 | MapServer publish_features; 205 | 206 | // Lost SLAM features. 207 | // For visualization only. 208 | MapServer lost_slam_features; 209 | 210 | // Active SLAM features. 211 | // For visualization only. 212 | MapServer active_slam_features; 213 | 214 | // Indicate if the gravity vector is set. 215 | bool is_gravity_set; 216 | 217 | // Indicate if the received image is the first one. The 218 | // system will start after receiving the first image. 219 | bool is_first_img; 220 | 221 | // Sensors measurement noise parameters 222 | double imu_gyro_noise; 223 | double imu_acc_noise; 224 | double imu_gyro_bias_noise; 225 | double imu_acc_bias_noise; 226 | double feature_observation_noise; 227 | 228 | // The position uncertainty threshold is used to determine 229 | // when to reset the system online. Otherwise, the ever- 230 | // increaseing uncertainty will make the estimation unstable. 231 | // Note this online reset will be some dead-reckoning. 232 | // Set this threshold to nonpositive to disable online reset. 233 | double position_std_threshold; 234 | 235 | // Tracking rate 236 | double tracking_rate; 237 | 238 | // Threshold for determine keyframes 239 | double translation_threshold; 240 | double rotation_threshold; 241 | double tracking_rate_threshold; 242 | 243 | // Maximum tracking length for a feature, added by QXC 244 | int max_track_len; 245 | 246 | // FEJ reset time interval threshold 247 | double reset_fej_threshold; 248 | 249 | // Config file path 250 | std::string config_file; 251 | 252 | // Rate of features published by front-end. This variable is 253 | // used to determine the timing threshold of 254 | // each iteration of the filter. 255 | // And decide how many imgs to be used for inclinometer-initializer. 256 | double features_rate; 257 | 258 | // Rate of imu msg. 259 | double imu_rate; 260 | // Threshold for deciding which imu is at the same time with a img frame 261 | double imu_img_timeTh; 262 | 263 | // QXC: Last m_gyro and m_acc 264 | Eigen::Vector3d m_gyro_old; 265 | Eigen::Vector3d m_acc_old; 266 | 267 | // Take off stamp, added by QXC 268 | double take_off_stamp; 269 | 270 | // receive config flag for if applicate FEJ, added by QXC 271 | bool if_FEJ_config; 272 | 273 | // If applicate FEJ, added by QXC 274 | bool if_FEJ; 275 | 276 | // least observation number for a valid feature, added by QXC 277 | int least_Obs_Num; 278 | 279 | // time of last update, added by QXC 280 | double last_update_time; 281 | 282 | // time of last ZUPT 283 | double last_ZUPT_time; 284 | 285 | // reset First Estimate Point to current estimate if last update is far away, added by QXC 286 | void resetFejPoint(); 287 | 288 | // store the distances of matched feature coordinates of current and previous image, for ZUPT(zero velocity update), added by QXC 289 | std::list coarse_feature_dis; 290 | 291 | // ZUPT relevant, added by QXC 292 | bool if_ZUPT_valid; // use ZUPT in MSCKF 293 | bool if_ZUPT; // if ZUPT applied in current turn 294 | bool checkZUPT(); // check if need ZUPT 295 | double zupt_max_feature_dis; // threshold for velocity detection 296 | void measurementUpdate_ZUPT_vpq(); // measurement function for ZUPT, with measurement of velocity, relative position and relative quaterinion 297 | double zupt_noise_v; // diagonal element of zupt_v noise corviance 298 | double zupt_noise_p; // diagonal element of zupt_p noise corviance 299 | double zupt_noise_q; // diagonal element of zupt_q noise corviance 300 | Eigen::Vector3d meanSForce; // store the mean of accelerator between two frames 301 | 302 | // debug log: filtering 303 | std::ofstream fImuState; 304 | std::ofstream fTakeOffStamp; 305 | 306 | // debug log: the path for output files 307 | std::string output_dir; 308 | 309 | // flag for first useful image features 310 | bool bFirstFeatures; 311 | 312 | // Flexible initializer 313 | float Static_Duration; // static scene duration for inclinometer-initializer to utlize, in seconds. 314 | int Static_Num; // static img mumber for inclinometer-initializer to utlize, calculate according to @Static_Duration and @features_rate 315 | boost::shared_ptr flexInitPtr; 316 | 317 | // input for initialize td 318 | double td_input; 319 | 320 | // if estimate td 321 | bool estimate_td; 322 | 323 | // if estimate extrinsic 324 | bool estimate_extrin; 325 | 326 | // This function is used to compute the EKF-SLAM measurement Jacobian 327 | // for a single feature observed at a single camera frame. 328 | // 3d idp version. 329 | void measurementJacobian_ekf_3didp(const StateIDType& state_id, 330 | const FeatureIDType& feature_id, 331 | Eigen::Matrix& H_f, 332 | Eigen::Matrix& H_a, 333 | Eigen::Matrix& H_x, 334 | Eigen::Matrix& H_e, 335 | Eigen::Vector2d& r); 336 | // This function is used to compute the EKF-SLAM measurement Jacobian 337 | // for a single feature observed at a single camera frame. 338 | // 1d idp version. 339 | void measurementJacobian_ekf_1didp(const StateIDType& state_id, 340 | const FeatureIDType& feature_id, 341 | Eigen::Matrix& H_f, 342 | Eigen::Matrix& H_a, 343 | Eigen::Matrix& H_x, 344 | Eigen::Matrix& H_e, 345 | Eigen::Vector2d& r); 346 | // This function computes Jacobian of new EKF-SLAM features 347 | void featureJacobian_ekf_new(const FeatureIDType& feature_id, 348 | const std::vector& state_ids, 349 | Eigen::MatrixXd& H_x, Eigen::VectorXd& r); 350 | // This function computes Jacobian of EKF-SLAM features 351 | void featureJacobian_ekf(const FeatureIDType& feature_id, 352 | Eigen::MatrixXd& H_x, Eigen::Vector2d& r); 353 | // Measurement update function for hybrid vio 354 | void measurementUpdate_hybrid( 355 | const Eigen::MatrixXd& H_ekf_new, const Eigen::VectorXd& r_ekf_new, 356 | const Eigen::MatrixXd& H_ekf, const Eigen::VectorXd& r_ekf, 357 | const Eigen::MatrixXd& H_msckf, const Eigen::VectorXd& r_msckf); 358 | // Update feature covariance if its anchor is to be removed 359 | // 3d idp version 360 | void updateFeatureCov_3didp(const FeatureIDType& feature_id, 361 | const StateIDType& old_state_id, const StateIDType& new_state_id); 362 | // Update feature covariance if its anchor is to be removed 363 | // 1d idp version 364 | void updateFeatureCov_1didp(const FeatureIDType& feature_id, 365 | const StateIDType& old_state_id, const StateIDType& new_state_id); 366 | // Update corvariance matrix because of ekf feature lost 367 | void rmLostFeaturesCov(const std::vector& lost_ids); 368 | // Maximum number of ekf features in a single grid 369 | int max_features; 370 | 371 | // For feature points distribution, got to know the camera instrinc and image resolution 372 | std::vector cam_resolution; 373 | std::vector cam_intrinsics; 374 | // Rows and cols number to make grids for feature points distribution. 375 | // If one of these numbers is zero, then apply the pure msckf. 376 | // Now max_features will be decided by grid_rows*grid_cols. 377 | int grid_rows, grid_cols; 378 | // Boundary of feature measurement coordinate 379 | double x_min, y_min, x_max, y_max; 380 | // Width and height of each grid 381 | double grid_height, grid_width; 382 | // Map to store the id of current features in state according to their code 383 | std::map> grid_map; 384 | // Update imformation of grid map 385 | void updateGridMap(); 386 | // Delete redundant features in grid map 387 | static bool compareByObsNum( 388 | const std::pair& lhs, const std::pair& rhs) { 389 | return lhs.second < rhs.second; 390 | } 391 | void delRedundantFeatures(); 392 | 393 | // Parameters to control if using "1d ipd" or "3d idp for augmented features 394 | int feature_idp_dim; 395 | 396 | // Function to select the new anchor frame 397 | StateIDType getNewAnchorId(Feature& feature, const std::vector& rmIDs); 398 | 399 | // if calibrate imu instrinsic online 400 | bool calib_imu; 401 | 402 | // Dimension of legacy error state 403 | int LEG_DIM; 404 | 405 | // Function to calculate error state transition equation 406 | void calPhi(Eigen::MatrixXd& Phi, const double& dtime, 407 | const Eigen::Vector3d& f, const Eigen::Vector3d& w, 408 | const Eigen::Vector3d& acc, const Eigen::Vector3d& gyro, 409 | const Eigen::Vector3d& f_old, const Eigen::Vector3d& w_old, 410 | const Eigen::Vector3d& acc_old, const Eigen::Vector3d& gyro_old); 411 | 412 | // Function to update IMU instrinsic matrices by their sections 413 | void updateImuMx(); 414 | 415 | // If apply Schmidt EKF 416 | bool use_schmidt; 417 | // Remove useless nuisance state (ones with no EKF features are anchored at) 418 | void rmUselessNuisanceState(); 419 | }; 420 | 421 | typedef LarVio::Ptr LarVioPtr; 422 | typedef LarVio::ConstPtr LarVioConstPtr; 423 | 424 | } // namespace larvio 425 | 426 | #endif 427 | -------------------------------------------------------------------------------- /include/larvio/math_utils.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * COPYRIGHT AND PERMISSION NOTICE 3 | * Penn Software MSCKF_VIO 4 | * Copyright (C) 2017 The Trustees of the University of Pennsylvania 5 | * All rights reserved. 6 | */ 7 | 8 | // The original file belongs to MSCKF_VIO (https://github.com/KumarRobotics/msckf_vio/) 9 | // Some changes have been made to use it in LARVIO 10 | 11 | #ifndef MATH_UTILS_HPP 12 | #define MATH_UTILS_HPP 13 | 14 | #include 15 | #include 16 | 17 | namespace larvio { 18 | 19 | /* 20 | * @brief Create a skew-symmetric matrix from a 3-element vector. 21 | * @note Performs the operation: 22 | * w -> [ 0 -w3 w2] 23 | * [ w3 0 -w1] 24 | * [-w2 w1 0] 25 | */ 26 | inline Eigen::Matrix3d skewSymmetric(const Eigen::Vector3d& w) { 27 | Eigen::Matrix3d w_hat; 28 | w_hat(0, 0) = 0; 29 | w_hat(0, 1) = -w(2); 30 | w_hat(0, 2) = w(1); 31 | w_hat(1, 0) = w(2); 32 | w_hat(1, 1) = 0; 33 | w_hat(1, 2) = -w(0); 34 | w_hat(2, 0) = -w(1); 35 | w_hat(2, 1) = w(0); 36 | w_hat(2, 2) = 0; 37 | return w_hat; 38 | } 39 | 40 | /* 41 | * @brief Normalize the given quaternion to unit quaternion. 42 | */ 43 | inline void quaternionNormalize(Eigen::Vector4d& q) { 44 | double norm = q.norm(); 45 | q = q / norm; 46 | return; 47 | } 48 | 49 | /* 50 | * @brief Perform q1 * q2. 51 | * 52 | * Format of q1 and q2 is as [x,y,z,w] 53 | */ 54 | inline Eigen::Vector4d quaternionMultiplication( 55 | const Eigen::Vector4d& q1, 56 | const Eigen::Vector4d& q2) { 57 | Eigen::Matrix4d L; 58 | 59 | // QXC: JPL 60 | // L(0, 0) = q1(3); L(0, 1) = q1(2); L(0, 2) = -q1(1); L(0, 3) = q1(0); 61 | // L(1, 0) = -q1(2); L(1, 1) = q1(3); L(1, 2) = q1(0); L(1, 3) = q1(1); 62 | // L(2, 0) = q1(1); L(2, 1) = -q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2); 63 | // L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3); 64 | 65 | // QXC: Hamilton 66 | L(0, 0) = q1(3); L(0, 1) = -q1(2); L(0, 2) = q1(1); L(0, 3) = q1(0); 67 | L(1, 0) = q1(2); L(1, 1) = q1(3); L(1, 2) = -q1(0); L(1, 3) = q1(1); 68 | L(2, 0) = -q1(1); L(2, 1) = q1(0); L(2, 2) = q1(3); L(2, 3) = q1(2); 69 | L(3, 0) = -q1(0); L(3, 1) = -q1(1); L(3, 2) = -q1(2); L(3, 3) = q1(3); 70 | 71 | Eigen::Vector4d q = L * q2; 72 | quaternionNormalize(q); 73 | return q; 74 | } 75 | 76 | /* 77 | * @brief Convert the vector part of a quaternion to a 78 | * full quaternion. 79 | * @note This function is useful to convert delta quaternion 80 | * which is usually a 3x1 vector to a full quaternion. 81 | * For more details, check Section 3.2 "Kalman Filter Update" in 82 | * "Indirect Kalman Filter for 3D Attitude Estimation: 83 | * A Tutorial for quaternion Algebra". 84 | */ 85 | inline Eigen::Vector4d smallAngleQuaternion( 86 | const Eigen::Vector3d& dtheta) { 87 | 88 | Eigen::Vector3d dq = dtheta / 2.0; 89 | Eigen::Vector4d q; 90 | double dq_square_norm = dq.squaredNorm(); 91 | 92 | if (dq_square_norm <= 1) { 93 | q.head<3>() = dq; 94 | q(3) = std::sqrt(1-dq_square_norm); 95 | } else { 96 | q.head<3>() = dq; 97 | q(3) = 1; 98 | q = q / std::sqrt(1+dq_square_norm); 99 | } 100 | 101 | return q; 102 | } 103 | 104 | /* 105 | * @brief Convert the vector part of a quaternion to a 106 | * full quaternion. 107 | * @note This function is useful to convert delta quaternion 108 | * which is usually a 3x1 vector to a full quaternion. 109 | * For more details, check Section 3.2 "Kalman Filter Update" in 110 | * "Indirect Kalman Filter for 3D Attitude Estimation: 111 | * A Tutorial for quaternion Algebra". 112 | */ 113 | inline Eigen::Quaterniond getSmallAngleQuaternion( 114 | const Eigen::Vector3d& dtheta) { 115 | 116 | Eigen::Vector3d dq = dtheta / 2.0; 117 | Eigen::Quaterniond q; 118 | double dq_square_norm = dq.squaredNorm(); 119 | 120 | if (dq_square_norm <= 1) { 121 | q.x() = dq(0); 122 | q.y() = dq(1); 123 | q.z() = dq(2); 124 | q.w() = std::sqrt(1-dq_square_norm); 125 | } else { 126 | q.x() = dq(0); 127 | q.y() = dq(1); 128 | q.z() = dq(2); 129 | q.w() = 1; 130 | q.normalize(); 131 | } 132 | 133 | return q; 134 | } 135 | 136 | /* 137 | * @brief Convert a quaternion to the corresponding rotation matrix 138 | * @note Pay attention to the convention used. The function follows the 139 | * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: 140 | * A Tutorial for Quaternion Algebra", Equation (78). 141 | * 142 | * The input quaternion should be in the form 143 | * [q1, q2, q3, q4(scalar)]^T 144 | */ 145 | inline Eigen::Matrix3d quaternionToRotation( 146 | const Eigen::Vector4d& q) { 147 | // QXC: Hamilton 148 | const double& qw = q(3); 149 | const double& qx = q(0); 150 | const double& qy = q(1); 151 | const double& qz = q(2); 152 | Eigen::Matrix3d R; 153 | R(0, 0) = 1-2*(qy*qy+qz*qz); R(0, 1) = 2*(qx*qy-qw*qz); R(0, 2) = 2*(qx*qz+qw*qy); 154 | R(1, 0) = 2*(qx*qy+qw*qz); R(1, 1) = 1-2*(qx*qx+qz*qz); R(1, 2) = 2*(qy*qz-qw*qx); 155 | R(2, 0) = 2*(qx*qz-qw*qy); R(2, 1) = 2*(qy*qz+qw*qx); R(2, 2) = 1-2*(qx*qx+qy*qy); 156 | 157 | return R; 158 | } 159 | 160 | /* 161 | * @brief Convert a rotation matrix to a quaternion. 162 | * @note Pay attention to the convention used. The function follows the 163 | * conversion in "Indirect Kalman Filter for 3D Attitude Estimation: 164 | * A Tutorial for Quaternion Algebra", Equation (78). 165 | * 166 | * The input quaternion should be in the form 167 | * [q1, q2, q3, q4(scalar)]^T 168 | */ 169 | inline Eigen::Vector4d rotationToQuaternion( 170 | const Eigen::Matrix3d& R) { 171 | Eigen::Vector4d score; 172 | score(0) = R(0, 0); 173 | score(1) = R(1, 1); 174 | score(2) = R(2, 2); 175 | score(3) = R.trace(); 176 | 177 | int max_row = 0, max_col = 0; 178 | score.maxCoeff(&max_row, &max_col); 179 | 180 | Eigen::Vector4d q = Eigen::Vector4d::Zero(); 181 | 182 | // // QXC: JPL 183 | // if (max_row == 0) { 184 | // q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0; 185 | // q(1) = (R(0, 1)+R(1, 0)) / (4*q(0)); 186 | // q(2) = (R(0, 2)+R(2, 0)) / (4*q(0)); 187 | // q(3) = (R(1, 2)-R(2, 1)) / (4*q(0)); 188 | // } else if (max_row == 1) { 189 | // q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0; 190 | // q(0) = (R(0, 1)+R(1, 0)) / (4*q(1)); 191 | // q(2) = (R(1, 2)+R(2, 1)) / (4*q(1)); 192 | // q(3) = (R(2, 0)-R(0, 2)) / (4*q(1)); 193 | // } else if (max_row == 2) { 194 | // q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0; 195 | // q(0) = (R(0, 2)+R(2, 0)) / (4*q(2)); 196 | // q(1) = (R(1, 2)+R(2, 1)) / (4*q(2)); 197 | // q(3) = (R(0, 1)-R(1, 0)) / (4*q(2)); 198 | // } else { 199 | // q(3) = std::sqrt(1+R.trace()) / 2.0; 200 | // q(0) = (R(1, 2)-R(2, 1)) / (4*q(3)); 201 | // q(1) = (R(2, 0)-R(0, 2)) / (4*q(3)); 202 | // q(2) = (R(0, 1)-R(1, 0)) / (4*q(3)); 203 | // } 204 | 205 | // QXC: Hamilton 206 | if (max_row == 0) { 207 | q(0) = std::sqrt(1+2*R(0, 0)-R.trace()) / 2.0; 208 | q(1) = (R(0, 1)+R(1, 0)) / (4*q(0)); 209 | q(2) = (R(0, 2)+R(2, 0)) / (4*q(0)); 210 | q(3) = (R(2, 1)-R(1, 2)) / (4*q(0)); 211 | } else if (max_row == 1) { 212 | q(1) = std::sqrt(1+2*R(1, 1)-R.trace()) / 2.0; 213 | q(0) = (R(0, 1)+R(1, 0)) / (4*q(1)); 214 | q(2) = (R(1, 2)+R(2, 1)) / (4*q(1)); 215 | q(3) = (R(0, 2)-R(2, 0)) / (4*q(1)); 216 | } else if (max_row == 2) { 217 | q(2) = std::sqrt(1+2*R(2, 2)-R.trace()) / 2.0; 218 | q(0) = (R(0, 2)+R(2, 0)) / (4*q(2)); 219 | q(1) = (R(1, 2)+R(2, 1)) / (4*q(2)); 220 | q(3) = (R(1, 0)-R(0, 1)) / (4*q(2)); 221 | } else { 222 | q(3) = std::sqrt(1+R.trace()) / 2.0; 223 | q(0) = (R(2, 1)-R(1, 2)) / (4*q(3)); 224 | q(1) = (R(0, 2)-R(2, 0)) / (4*q(3)); 225 | q(2) = (R(1, 0)-R(0, 1)) / (4*q(3)); 226 | } 227 | 228 | if (q(3) < 0) q = -q; 229 | quaternionNormalize(q); 230 | return q; 231 | } 232 | 233 | } // end namespace larvio 234 | 235 | #endif // MATH_UTILS_HPP 236 | -------------------------------------------------------------------------------- /include/sensors/ImageData.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * @Descripttion: Type of image sensor data 3 | * @Author: Xiaochen Qiu 4 | */ 5 | 6 | 7 | #ifndef IMAGE_DATA_HPP 8 | #define IMAGE_DATA_HPP 9 | 10 | 11 | #include "opencv2/core.hpp" 12 | #include "boost/shared_ptr.hpp" 13 | 14 | namespace larvio { 15 | 16 | struct ImgData { 17 | double timeStampToSec; 18 | cv::Mat image; 19 | }; 20 | 21 | typedef boost::shared_ptr ImageDataPtr; 22 | 23 | } 24 | 25 | 26 | #endif // IMAGE_DATA_HPP -------------------------------------------------------------------------------- /include/sensors/ImuData.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * @Descripttion: Types of IMU sensor data. 3 | * @Author: Xiaochen Qiu 4 | */ 5 | 6 | 7 | #ifndef IMU_DATA_HPP 8 | #define IMU_DATA_HPP 9 | 10 | 11 | #include "Eigen/Core" 12 | #include "Eigen/Dense" 13 | 14 | namespace larvio { 15 | 16 | struct ImuData { 17 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 18 | 19 | ImuData (double t, double wx, double wy, double wz, 20 | double ax, double ay, double az) { 21 | timeStampToSec = t; 22 | angular_velocity[0] = wx; 23 | angular_velocity[1] = wy; 24 | angular_velocity[2] = wz; 25 | linear_acceleration[0] = ax; 26 | linear_acceleration[1] = ay; 27 | linear_acceleration[2] = az; 28 | } 29 | 30 | ImuData (double t, const Eigen::Vector3d& omg, const Eigen::Vector3d& acc) { 31 | timeStampToSec = t; 32 | angular_velocity = omg; 33 | linear_acceleration = acc; 34 | } 35 | 36 | double timeStampToSec; 37 | Eigen::Vector3d angular_velocity; 38 | Eigen::Vector3d linear_acceleration; 39 | }; 40 | 41 | } 42 | 43 | 44 | #endif // IMU_DATA_HPP -------------------------------------------------------------------------------- /include/utils/DataReader.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * @Descripttion: This header include functions and types for reading IMU and image data, and methods to manipulate. 3 | * @Author: Xiaochen Qiu 4 | */ 5 | 6 | 7 | #ifndef DATA_READER_H 8 | #define DATA_READER_H 9 | 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | #include "sensors/ImuData.hpp" 16 | 17 | using namespace std; 18 | 19 | namespace larvio { 20 | 21 | struct ImgInfo { 22 | double timeStampToSec; 23 | string imgName; 24 | }; 25 | 26 | /** 27 | * @description: Read data.csv containing image file names 28 | * @param imagePath Path of data.csv 29 | * @return: iListData Cotaining image informations. 30 | */ 31 | void loadImageList(char* imagePath, vector &iListData) { 32 | ifstream inf; 33 | inf.open(imagePath, ifstream::in); 34 | const int cnt = 2; 35 | string line; 36 | int j = 0; 37 | size_t comma = 0; 38 | size_t comma2 = 0; 39 | ImgInfo temp; 40 | 41 | getline(inf,line); 42 | while (!inf.eof()) { 43 | getline(inf,line); 44 | 45 | comma = line.find(',',0); 46 | temp.timeStampToSec = 1e-9*atol(line.substr(0,comma).c_str()); 47 | 48 | while (comma < line.size() && j != cnt-1) { 49 | comma2 = line.find(',',comma + 1); 50 | temp.imgName = line.substr(comma + 1,comma2-comma-1).c_str(); 51 | ++j; 52 | comma = comma2; 53 | } 54 | 55 | iListData.push_back(temp); 56 | j = 0; 57 | } 58 | 59 | inf.close(); 60 | } 61 | 62 | /** 63 | * @description: Read data.csv containing IMU data 64 | * @param imuPath Path of data.csv 65 | * @return: vimuData Cotaining IMU informations. 66 | */ 67 | // QXC:读取imu的data.csv文件 68 | void loadImuFile(char* imuPath, vector &vimuData) { 69 | ifstream inf; 70 | inf.open(imuPath, ifstream::in); 71 | const int cnt = 7; 72 | string line; 73 | int j = 0; 74 | size_t comma = 0; 75 | size_t comma2 = 0; 76 | char imuTime[14] = {0}; 77 | double acc[3] = {0.0}; 78 | double grad[3] = {0.0}; 79 | double imuTimeStamp = 0; 80 | 81 | getline(inf,line); 82 | while (!inf.eof()) { 83 | getline(inf,line); 84 | 85 | comma = line.find(',',0); 86 | string temp = line.substr(0,comma); 87 | imuTimeStamp = 1e-9*atol(line.substr(0,comma).c_str()); 88 | 89 | while (comma < line.size() && j != cnt-1) { 90 | comma2 = line.find(',',comma + 1); 91 | switch(j) { 92 | case 0: 93 | grad[0] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 94 | break; 95 | case 1: 96 | grad[1] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 97 | break; 98 | case 2: 99 | grad[2] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 100 | break; 101 | case 3: 102 | acc[0] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 103 | break; 104 | case 4: 105 | acc[1] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 106 | break; 107 | case 5: 108 | acc[2] = atof(line.substr(comma + 1,comma2-comma-1).c_str()); 109 | break; 110 | } 111 | ++j; 112 | comma = comma2; 113 | } 114 | ImuData tempImu(imuTimeStamp, grad[0], grad[1], grad[2], acc[0], acc[1], acc[2]); 115 | vimuData.push_back(tempImu); 116 | j = 0; 117 | } 118 | 119 | inf.close(); 120 | } 121 | 122 | 123 | bool findFirstAlign(const vector &vImu, const vector &vImg, pair &ImgImuAlign) { 124 | double imuTime0 = vImu[0].timeStampToSec; 125 | double imgTime0 = vImg[0].timeStampToSec; 126 | 127 | if(imuTime0>imgTime0) { 128 | for(size_t i=1; i 10 | #include 11 | #include 12 | 13 | #include "larvio/imu_state.h" 14 | 15 | 16 | namespace larvio { 17 | 18 | namespace visualize { 19 | 20 | 21 | void DrawCurrentPose(pangolin::OpenGlMatrix &Twc) { 22 | const float &w = 0.08; 23 | const float h = w*0.75; 24 | const float z = w*0.6; 25 | 26 | glPushMatrix(); 27 | 28 | #ifdef HAVE_GLES 29 | glMultMatrixf(Twc.m); 30 | #else 31 | glMultMatrixd(Twc.m); 32 | #endif 33 | 34 | glLineWidth(3); 35 | glColor3f(0.0f,0.0f,1.0f); 36 | glBegin(GL_LINES); 37 | glVertex3f(0,0,0); 38 | glVertex3f(w,h,z); 39 | glVertex3f(0,0,0); 40 | glVertex3f(w,-h,z); 41 | glVertex3f(0,0,0); 42 | glVertex3f(-w,-h,z); 43 | glVertex3f(0,0,0); 44 | glVertex3f(-w,h,z); 45 | 46 | glVertex3f(w,h,z); 47 | glVertex3f(w,-h,z); 48 | 49 | glVertex3f(-w,h,z); 50 | glVertex3f(-w,-h,z); 51 | 52 | glVertex3f(-w,h,z); 53 | glVertex3f(w,h,z); 54 | 55 | glVertex3f(-w,-h,z); 56 | glVertex3f(w,-h,z); 57 | glEnd(); 58 | 59 | glPopMatrix(); 60 | } 61 | 62 | 63 | void DrawKeyFrames(const std::vector& poses) { 64 | const float &w = 0.05; 65 | const float h = w*0.75; 66 | const float z = w*0.6; 67 | 68 | for(auto pose : poses) { 69 | glPushMatrix(); 70 | 71 | #ifdef HAVE_GLES 72 | glMultMatrixf(pose.m); 73 | #else 74 | glMultMatrixd(pose.m); 75 | #endif 76 | 77 | glLineWidth(1); 78 | glColor3f(1.0f,1.0f,0.0f); 79 | glBegin(GL_LINES); 80 | glVertex3f(0,0,0); 81 | glVertex3f(w,h,z); 82 | glVertex3f(0,0,0); 83 | glVertex3f(w,-h,z); 84 | glVertex3f(0,0,0); 85 | glVertex3f(-w,-h,z); 86 | glVertex3f(0,0,0); 87 | glVertex3f(-w,h,z); 88 | 89 | glVertex3f(w,h,z); 90 | glVertex3f(w,-h,z); 91 | 92 | glVertex3f(-w,h,z); 93 | glVertex3f(-w,-h,z); 94 | 95 | glVertex3f(-w,h,z); 96 | glVertex3f(w,h,z); 97 | 98 | glVertex3f(-w,-h,z); 99 | glVertex3f(w,-h,z); 100 | glEnd(); 101 | 102 | glPopMatrix(); 103 | } 104 | } 105 | 106 | 107 | void GetCurrentOpenGLPoseMatrix(pangolin::OpenGlMatrix &M, const Eigen::Isometry3d &Tbw) { 108 | Eigen::Matrix3d R = Tbw.rotation(); 109 | Eigen::Vector3d t = Tbw.translation(); 110 | 111 | M.m[0] = R(0,0); 112 | M.m[1] = R(1,0); 113 | M.m[2] = R(2,0); 114 | M.m[3] = 0.0; 115 | 116 | M.m[4] = R(0,1); 117 | M.m[5] = R(1,1); 118 | M.m[6] = R(2,1); 119 | M.m[7] = 0.0; 120 | 121 | M.m[8] = R(0,2); 122 | M.m[9] = R(1,2); 123 | M.m[10] = R(2,2); 124 | M.m[11] = 0.0; 125 | 126 | M.m[12] = t(0); 127 | M.m[13] = t(1); 128 | M.m[14] = t(2); 129 | M.m[15] = 1.0; 130 | } 131 | 132 | 133 | void GetSwOpenGLPoseMatrices(std::vector& vPoses_SW, const std::vector& swPoses) { 134 | vPoses_SW.clear(); 135 | for (auto pose : swPoses) { 136 | pangolin::OpenGlMatrix M; 137 | GetCurrentOpenGLPoseMatrix(M, pose); 138 | vPoses_SW.push_back(M); 139 | } 140 | } 141 | 142 | 143 | void DrawSlideWindow(const std::vector& vPoses_SW) { 144 | const float &w = 0.05; 145 | const float h = w*0.75; 146 | const float z = w*0.6; 147 | 148 | for (auto pose : vPoses_SW) { 149 | glPushMatrix(); 150 | 151 | #ifdef HAVE_GLES 152 | glMultMatrixf(pose.m); 153 | #else 154 | glMultMatrixd(pose.m); 155 | #endif 156 | 157 | glLineWidth(1); 158 | glColor3f(1.0f,0.0f,0.0f); 159 | glBegin(GL_LINES); 160 | glVertex3f(0,0,0); 161 | glVertex3f(w,h,z); 162 | glVertex3f(0,0,0); 163 | glVertex3f(w,-h,z); 164 | glVertex3f(0,0,0); 165 | glVertex3f(-w,-h,z); 166 | glVertex3f(0,0,0); 167 | glVertex3f(-w,h,z); 168 | 169 | glVertex3f(w,h,z); 170 | glVertex3f(w,-h,z); 171 | 172 | glVertex3f(-w,h,z); 173 | glVertex3f(-w,-h,z); 174 | 175 | glVertex3f(-w,h,z); 176 | glVertex3f(w,h,z); 177 | 178 | glVertex3f(-w,-h,z); 179 | glVertex3f(w,-h,z); 180 | glEnd(); 181 | 182 | glPopMatrix(); 183 | } 184 | } 185 | 186 | 187 | void DrawActiveMapPoints(const std::map& mMapPoints) { 188 | if (mMapPoints.empty()) 189 | return; 190 | 191 | glPointSize(10); 192 | glBegin(GL_POINTS); 193 | glColor3f(1.0,0.0,0.0); 194 | 195 | for (auto pt : mMapPoints) 196 | glVertex3f((pt.second)(0),(pt.second)(1),(pt.second)(2)); 197 | 198 | glEnd(); 199 | } 200 | 201 | 202 | void DrawStableMapPoints(const std::map& mMapPoints) { 203 | if (mMapPoints.empty()) 204 | return; 205 | 206 | glPointSize(3); 207 | glBegin(GL_POINTS); 208 | glColor3f(1.0,1.0,1.0); 209 | 210 | for (auto pt : mMapPoints) 211 | glVertex3f((pt.second)(0),(pt.second)(1),(pt.second)(2)); 212 | 213 | glEnd(); 214 | } 215 | 216 | 217 | }; 218 | 219 | }; 220 | 221 | #endif -------------------------------------------------------------------------------- /licenses/LICENSE_MSCKF_VIO.txt: -------------------------------------------------------------------------------- 1 | COPYRIGHT AND PERMISSION NOTICE 2 | Penn Software MSCKF_VIO 3 | Copyright (C) 2017 The Trustees of the University of Pennsylvania 4 | All rights reserved. 5 | 6 | The Trustees of the University of Pennsylvania (“Penn”) and Ke Sun, Kartik Mohta, the developer (“Developer”) of Penn Software MSCKF_VIO (“Software”) give recipient (“Recipient”) and Recipient’s Institution (“Institution”) permission to use, copy, and modify the software in source and binary forms, with or without modification for non-profit research purposes only provided that the following conditions are met: 7 | 8 | 1) All copies of Software in binary form and/or source code, related documentation and/or other materials provided with the Software must reproduce and retain the above copyright notice, this list of conditions and the following disclaimer. 9 | 10 | 11 | 2) Recipient shall have the right to create modifications of the Software (“Modifications”) for their internal research and academic purposes only. 12 | 13 | 14 | 3) All copies of Modifications in binary form and/or source code and related documentation must reproduce and retain the above copyright notice, this list of conditions and the following disclaimer. 15 | 16 | 17 | 4) Recipient and Institution shall not distribute Software or Modifications to any third parties without the prior written approval of Penn. 18 | 19 | 20 | 5) Recipient will provide the Developer with feedback on the use of the Software and Modifications, if any, in their research. The Developers and Penn are permitted to use any information Recipient provides in making changes to the Software. All feedback, bug reports and technical questions shall be sent to: 21 | sunke@seas.upenn.edu 22 | kmohta@seas.upenn.edu 23 | 24 | 25 | 6) Recipient acknowledges that the Developers, Penn and its licensees may develop modifications to Software that may be substantially similar to Recipient’s modifications of Software, and that the Developers, Penn and its licensees shall not be constrained in any way by Recipient in Penn’s or its licensees’ use or management of such modifications. Recipient acknowledges the right of the Developers and Penn to prepare and publish modifications to Software that may be substantially similar or functionally equivalent to your modifications and improvements, and if Recipient or Institution obtains patent protection for any modification or improvement to Software, Recipient and Institution agree not to allege or enjoin infringement of their patent by the Developers, Penn or any of Penn’s licensees obtaining modifications or improvements to Software from the Penn or the Developers. 26 | 27 | 28 | 7) Recipient and Developer will acknowledge in their respective publications the contributions made to each other’s research involving or based on the Software. The current citations for Software are: 29 | 30 | 31 | 8) Any party desiring a license to use the Software and/or Modifications for commercial purposes shall contact The Penn Center for Innovation at 215-898-9591. 32 | 33 | 34 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS, CONTRIBUTORS, AND THE TRUSTEES OF THE UNIVERSITY OF PENNSYLVANIA "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER, CONTRIBUTORS OR THE TRUSTEES OF THE UNIVERSITY OF PENNSYLVANIA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 | 36 | 37 | -------------------------------------------------------------------------------- /licenses/LICENSE_ORB_SLAM2.txt: -------------------------------------------------------------------------------- 1 | ORB-SLAM2 is released under a GPLv3 license (see License-gpl.txt). 2 | Please see Dependencies.md for a list of all included code and library dependencies which are not property of the authors of ORB-SLAM2. 3 | 4 | For a closed-source version of ORB-SLAM2 for commercial purposes, please contact the authors. 5 | 6 | If you use ORB-SLAM in an academic work, please cite the most relevant publication associated by visiting: 7 | https://github.com/raulmur/ORB_SLAM2 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /results/TX2_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PetWorm/LARVIO/6de23309f13a708be32ca22fbadde874f8e54905/results/TX2_result.png -------------------------------------------------------------------------------- /results/comparison.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PetWorm/LARVIO/6de23309f13a708be32ca22fbadde874f8e54905/results/comparison.jpg -------------------------------------------------------------------------------- /results/euroc_x8.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PetWorm/LARVIO/6de23309f13a708be32ca22fbadde874f8e54905/results/euroc_x8.gif -------------------------------------------------------------------------------- /results/tumvi_c_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PetWorm/LARVIO/6de23309f13a708be32ca22fbadde874f8e54905/results/tumvi_c_1.jpg -------------------------------------------------------------------------------- /results/uzhfpv_o_f_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PetWorm/LARVIO/6de23309f13a708be32ca22fbadde874f8e54905/results/uzhfpv_o_f_3.jpg -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | project(larvio) 3 | 4 | set(CMAKE_BUILD_TYPE "Release") 5 | add_compile_options(-std=c++11) 6 | 7 | # Modify cmake module path if new .cmake files are required 8 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_LIST_DIR}/../../../cmake") 9 | set(LARVIO_SRC "${CMAKE_CURRENT_LIST_DIR}/../../../src") 10 | set(LARVIO_INCLUDE "${CMAKE_CURRENT_LIST_DIR}/../../../include") 11 | 12 | find_package(catkin REQUIRED COMPONENTS 13 | roscpp 14 | std_msgs 15 | tf 16 | nav_msgs 17 | sensor_msgs 18 | geometry_msgs 19 | eigen_conversions 20 | tf_conversions 21 | nodelet 22 | image_transport 23 | cv_bridge 24 | message_filters 25 | pcl_conversions 26 | pcl_ros 27 | std_srvs 28 | ) 29 | 30 | ## System dependencies are found with CMake's conventions 31 | find_package(Boost REQUIRED) 32 | find_package(Eigen3 REQUIRED) 33 | find_package(OpenCV REQUIRED) 34 | find_package(SuiteSparse REQUIRED) 35 | find_package(Ceres REQUIRED) 36 | 37 | ################################### 38 | ## catkin specific configuration ## 39 | ################################### 40 | catkin_package( 41 | INCLUDE_DIRS include ${LARVIO_INCLUDE} 42 | LIBRARIES system_manager estimator image_processor 43 | CATKIN_DEPENDS 44 | roscpp std_msgs tf nav_msgs sensor_msgs geometry_msgs 45 | eigen_conversions tf_conversions message_runtime 46 | image_transport cv_bridge message_filters pcl_conversions 47 | pcl_ros std_srvs 48 | DEPENDS Boost EIGEN3 OpenCV SUITESPARSE 49 | ) 50 | 51 | ########### 52 | ## Build ## 53 | ########### 54 | 55 | include_directories( 56 | include 57 | ${LARVIO_INCLUDE} 58 | ${catkin_INCLUDE_DIRS} 59 | ${EIGEN3_INCLUDE_DIR} 60 | ${Boost_INCLUDE_DIR} 61 | ${OpenCV_INCLUDE_DIRS} 62 | ${SUITESPARSE_INCLUDE_DIRS} 63 | ${CERES_INCLUDE_DIRS} 64 | ) 65 | 66 | # Image processor 67 | add_library(image_processor 68 | ${LARVIO_SRC}/image_processor.cpp 69 | ${LARVIO_SRC}/ORBDescriptor.cpp 70 | ) 71 | target_link_libraries(image_processor 72 | ${OpenCV_LIBRARIES} 73 | ) 74 | 75 | # Initializer 76 | # -- Static Initializer 77 | add_library(staticInitializer 78 | ${LARVIO_SRC}/StaticInitializer.cpp 79 | ) 80 | target_link_libraries(staticInitializer 81 | ${SUITESPARSE_LIBRARIES} 82 | ) 83 | # -- Dynamic Initializer 84 | add_library(dynamicInitializer 85 | ${LARVIO_SRC}/StaticInitializer.cpp 86 | ${LARVIO_SRC}/DynamicInitializer.cpp 87 | ${LARVIO_SRC}/feature_manager.cpp 88 | ${LARVIO_SRC}/initial_alignment.cpp 89 | ${LARVIO_SRC}/initial_sfm.cpp 90 | ${LARVIO_SRC}/solve_5pts.cpp 91 | ) 92 | target_link_libraries(dynamicInitializer 93 | ${SUITESPARSE_LIBRARIES} 94 | ${CERES_LIBRARIES} 95 | ${OpenCV_LIBRARIES} 96 | ) 97 | # -- Flexible Initializer 98 | add_library(flexibleInitializer 99 | ${LARVIO_SRC}/FlexibleInitializer.cpp 100 | ) 101 | target_link_libraries(flexibleInitializer 102 | staticInitializer 103 | dynamicInitializer 104 | ) 105 | 106 | # Estimator 107 | add_library(estimator 108 | ${LARVIO_SRC}/larvio.cpp 109 | ) 110 | target_link_libraries(estimator 111 | flexibleInitializer 112 | ${SUITESPARSE_LIBRARIES} 113 | ${OpenCV_LIBRARIES} 114 | ) 115 | 116 | # System Manger 117 | add_library(system_manager 118 | src/System.cpp 119 | ${LARVIO_SRC}/larvio.cpp 120 | ${LARVIO_SRC}/image_processor.cpp 121 | ) 122 | add_dependencies(system_manager 123 | ${${PROJECT_NAME}_EXPORTED_TARGETS} 124 | ${catkin_EXPORTED_TARGETS} 125 | ) 126 | target_link_libraries(system_manager 127 | image_processor estimator 128 | ${catkin_LIBRARIES} 129 | ) 130 | 131 | # System Manager nodelet 132 | add_library(system_manager_nodelet 133 | src/System_nodelet.cpp 134 | ) 135 | add_dependencies(system_manager_nodelet 136 | ${${PROJECT_NAME}_EXPORTED_TARGETS} 137 | ${catkin_EXPORTED_TARGETS} 138 | ) 139 | target_link_libraries(system_manager_nodelet 140 | system_manager 141 | ${catkin_LIBRARIES} 142 | ) 143 | 144 | ############# 145 | ## Install ## 146 | ############# 147 | 148 | install(TARGETS 149 | image_processor estimator system_manager system_manager_nodelet 150 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 151 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 152 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 153 | ) 154 | 155 | install(DIRECTORY include/${PROJECT_NAME}/ 156 | DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 157 | PATTERN "*_nodelet.h" EXCLUDE 158 | ) 159 | 160 | install(FILES nodelets.xml 161 | DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 162 | ) 163 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/include/System.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-21. 3 | // Managing the image processer and the estimator. 4 | // 5 | 6 | #ifndef SYSTEM_H 7 | #define SYSTEM_H 8 | 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | 23 | #include "sensors/ImuData.hpp" 24 | #include "sensors/ImageData.hpp" 25 | 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | namespace larvio { 32 | 33 | /* 34 | * @brief Manager of the system. 35 | */ 36 | class System { 37 | public: 38 | // Constructor 39 | System(ros::NodeHandle& n); 40 | // Disable copy and assign constructors. 41 | System(const ImageProcessor&) = delete; 42 | System operator=(const System&) = delete; 43 | 44 | // Destructor. 45 | ~System(); 46 | 47 | // Initialize the object. 48 | bool initialize(); 49 | 50 | typedef boost::shared_ptr Ptr; 51 | typedef boost::shared_ptr ConstPtr; 52 | 53 | private: 54 | 55 | // Ros node handle. 56 | ros::NodeHandle nh; 57 | 58 | // Subscribers. 59 | ros::Subscriber img_sub; 60 | ros::Subscriber imu_sub; 61 | 62 | // Publishers. 63 | image_transport::Publisher vis_img_pub; 64 | ros::Publisher odom_pub; 65 | ros::Publisher stable_feature_pub; 66 | ros::Publisher active_feature_pub; 67 | ros::Publisher path_pub; 68 | 69 | // Msgs to be published. 70 | std::vector header_buffer; // buffer for heads of msgs to be published 71 | 72 | // Msgs to be published. 73 | nav_msgs::Odometry odom_msg; 74 | pcl::PointCloud::Ptr stable_feature_msg_ptr; 75 | pcl::PointCloud::Ptr active_feature_msg_ptr; 76 | nav_msgs::Path path_msg; 77 | 78 | // Frame id 79 | std::string fixed_frame_id; 80 | std::string child_frame_id; 81 | 82 | // Pointer for image processer. 83 | ImageProcessorPtr ImgProcesser; 84 | 85 | // Pointer for estimator. 86 | LarVioPtr Estimator; 87 | 88 | // Directory for config file. 89 | std::string config_file; 90 | 91 | // Imu and image msg synchronized threshold. 92 | double imu_img_timeTh; 93 | 94 | // IMU message buffer. 95 | std::vector imu_msg_buffer; 96 | 97 | // Img message buffer. 98 | std::vector img_msg_buffer; 99 | 100 | /* 101 | * @brief loadParameters 102 | * Load parameters from the parameter server. 103 | */ 104 | bool loadParameters(); 105 | 106 | /* 107 | * @brief createRosIO 108 | * Create ros publisher and subscirbers. 109 | */ 110 | bool createRosIO(); 111 | 112 | /* 113 | * @brief imageCallback 114 | * Callback function for the monocular images. 115 | * @param image msg. 116 | */ 117 | void imageCallback(const sensor_msgs::ImageConstPtr& msg); 118 | 119 | /* 120 | * @brief imuCallback 121 | * Callback function for the imu message. 122 | * @param msg IMU msg. 123 | */ 124 | void imuCallback(const sensor_msgs::ImuConstPtr& msg); 125 | 126 | /* 127 | * @brief publish Publish the results of VIO. 128 | * @param time The time stamp of output msgs. 129 | */ 130 | void publishVIO(const ros::Time& time); 131 | }; 132 | 133 | typedef System::Ptr SystemPtr; 134 | typedef System::ConstPtr SystemConstPtr; 135 | 136 | } // end namespace larvio 137 | 138 | 139 | #endif //SYSTEM_H 140 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/include/System_nodelet.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-21. 3 | // Nodelet for system manager. 4 | // 5 | 6 | #ifndef SYSTEM_NODELET_H 7 | #define SYSTEM_NODELET_H 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | namespace larvio { 14 | class SystemNodelet : public nodelet::Nodelet { 15 | public: 16 | SystemNodelet() { return; } 17 | ~SystemNodelet() { 18 | // debug log 19 | std::cout << "in ~SystemNodelet()" << std::endl; 20 | return; } 21 | 22 | private: 23 | virtual void onInit(); 24 | SystemPtr system_ptr; 25 | }; 26 | } // end namespace larvio 27 | 28 | #endif //SYSTEM_NODELET_H 29 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/launch/euroc/larvio_euroc.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/launch/rviz/larvio.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 78 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Odometry1/Shape1 8 | - /Odometry1/Covariance1 9 | Splitter Ratio: 0.5 10 | Tree Height: 109 11 | - Class: rviz/Selection 12 | Name: Selection 13 | - Class: rviz/Tool Properties 14 | Expanded: 15 | - /2D Pose Estimate1 16 | - /2D Nav Goal1 17 | - /Publish Point1 18 | Name: Tool Properties 19 | Splitter Ratio: 0.588679016 20 | - Class: rviz/Views 21 | Expanded: 22 | - /Current View1 23 | Name: Views 24 | Splitter Ratio: 0.5 25 | - Class: rviz/Time 26 | Experimental: false 27 | Name: Time 28 | SyncMode: 0 29 | SyncSource: Image 30 | Visualization Manager: 31 | Class: "" 32 | Displays: 33 | - Alpha: 0.5 34 | Cell Size: 1 35 | Class: rviz/Grid 36 | Color: 160; 160; 164 37 | Enabled: true 38 | Line Style: 39 | Line Width: 0.0299999993 40 | Value: Lines 41 | Name: Grid 42 | Normal Cell Count: 0 43 | Offset: 44 | X: 0 45 | Y: 0 46 | Z: 0 47 | Plane: XY 48 | Plane Cell Count: 10 49 | Reference Frame: 50 | Value: true 51 | - Angle Tolerance: 0.100000001 52 | Class: rviz/Odometry 53 | Covariance: 54 | Orientation: 55 | Alpha: 0.5 56 | Color: 255; 255; 127 57 | Color Style: Unique 58 | Frame: Local 59 | Offset: 1 60 | Scale: 1 61 | Value: true 62 | Position: 63 | Alpha: 0.300000012 64 | Color: 204; 51; 204 65 | Scale: 1 66 | Value: true 67 | Value: false 68 | Enabled: true 69 | Keep: 1 70 | Name: Current Pose 71 | Position Tolerance: 0.100000001 72 | Shape: 73 | Alpha: 1 74 | Axes Length: 0.5 75 | Axes Radius: 0.100000001 76 | Color: 0; 255; 0 77 | Head Length: 0.0500000007 78 | Head Radius: 0.0500000007 79 | Shaft Length: 0.0500000007 80 | Shaft Radius: 0.0500000007 81 | Value: Axes 82 | Topic: /qxc_robot/system/odom 83 | Unreliable: true 84 | Value: true 85 | - Alpha: 1 86 | Buffer Length: 1 87 | Class: rviz/Path 88 | Color: 255; 255; 0 89 | Enabled: true 90 | Head Diameter: 0.300000012 91 | Head Length: 0.200000003 92 | Length: 0.300000012 93 | Line Style: Billboards 94 | Line Width: 0.0799999982 95 | Name: Trajectory 96 | Offset: 97 | X: 0 98 | Y: 0 99 | Z: 0 100 | Pose Color: 255; 85; 255 101 | Pose Style: None 102 | Radius: 0.0299999993 103 | Shaft Diameter: 0.100000001 104 | Shaft Length: 0.100000001 105 | Topic: /qxc_robot/system/path 106 | Unreliable: false 107 | Value: true 108 | - Class: rviz/Axes 109 | Enabled: false 110 | Length: 2 111 | Name: Axes 112 | Radius: 0.100000001 113 | Reference Frame: 114 | Value: false 115 | - Alpha: 1 116 | Autocompute Intensity Bounds: true 117 | Autocompute Value Bounds: 118 | Max Value: 10 119 | Min Value: -10 120 | Value: true 121 | Axis: Z 122 | Channel Name: x 123 | Class: rviz/PointCloud2 124 | Color: 255; 255; 255 125 | Color Transformer: Intensity 126 | Enabled: true 127 | Invert Rainbow: false 128 | Max Color: 255; 255; 255 129 | Max Intensity: 4096 130 | Min Color: 255; 255; 255 131 | Min Intensity: 0 132 | Name: Map Points 133 | Position Transformer: XYZ 134 | Queue Size: 1 135 | Selectable: true 136 | Size (Pixels): 2 137 | Style: Points 138 | Topic: /qxc_robot/system/stable_feature_point_cloud 139 | Unreliable: false 140 | Use Fixed Frame: true 141 | Use rainbow: false 142 | Value: true 143 | - Alpha: 1 144 | Autocompute Intensity Bounds: true 145 | Autocompute Value Bounds: 146 | Max Value: 10 147 | Min Value: -10 148 | Value: true 149 | Axis: Z 150 | Channel Name: x 151 | Class: rviz/PointCloud2 152 | Color: 255; 0; 0 153 | Color Transformer: Intensity 154 | Enabled: true 155 | Invert Rainbow: false 156 | Max Color: 255; 255; 255 157 | Max Intensity: 4096 158 | Min Color: 255; 255; 255 159 | Min Intensity: 0 160 | Name: Active Points 161 | Position Transformer: XYZ 162 | Queue Size: 1 163 | Selectable: true 164 | Size (Pixels): 5 165 | Style: Points 166 | Topic: /qxc_robot/system/active_feature_point_cloud 167 | Unreliable: false 168 | Use Fixed Frame: true 169 | Use rainbow: true 170 | Value: true 171 | - Class: rviz/Image 172 | Enabled: true 173 | Image Topic: /qxc_robot/system/visualization_image 174 | Max Value: 1 175 | Median window: 5 176 | Min Value: 0 177 | Name: Feature Tracks 178 | Normalize Range: true 179 | Queue Size: 2 180 | Transport Hint: raw 181 | Unreliable: false 182 | Value: true 183 | Enabled: true 184 | Global Options: 185 | Background Color: 48; 48; 48 186 | Default Light: true 187 | Fixed Frame: world 188 | Frame Rate: 30 189 | Name: root 190 | Tools: 191 | - Class: rviz/Interact 192 | Hide Inactive Objects: true 193 | - Class: rviz/MoveCamera 194 | - Class: rviz/Select 195 | - Class: rviz/FocusCamera 196 | - Class: rviz/Measure 197 | - Class: rviz/SetInitialPose 198 | Topic: /initialpose 199 | - Class: rviz/SetGoal 200 | Topic: /move_base_simple/goal 201 | - Class: rviz/PublishPoint 202 | Single click: true 203 | Topic: /clicked_point 204 | Value: true 205 | Views: 206 | Current: 207 | Class: rviz/Orbit 208 | Distance: 18.2661743 209 | Enable Stereo Rendering: 210 | Stereo Eye Separation: 0.0599999987 211 | Stereo Focal Distance: 1 212 | Swap Stereo Eyes: false 213 | Value: false 214 | Focal Point: 215 | X: 0 216 | Y: 0 217 | Z: 0 218 | Focal Shape Fixed Size: true 219 | Focal Shape Size: 0.0500000007 220 | Invert Z Axis: false 221 | Name: Current View 222 | Near Clip Distance: 0.00999999978 223 | Pitch: 1.3697964 224 | Target Frame: 225 | Value: Orbit (rviz) 226 | Yaw: 6.23360825 227 | Saved: ~ 228 | Window Geometry: 229 | Displays: 230 | collapsed: false 231 | Height: 876 232 | Hide Left Dock: false 233 | Hide Right Dock: true 234 | Image: 235 | collapsed: false 236 | QMainWindow State: 000000ff00000000fd0000000400000000000001ad000002e2fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006100fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730100000028000000fc000000d700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d006100670065010000012a000001190000001600fffffffb0000001200520061007700200049006d0061006700650100000249000000c10000001600ffffff000000010000010f000002c4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000028000002c4000000ad00fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000055f0000003efc0100000002fb0000000800540069006d006501000000000000055f0000030000fffffffb0000000800540069006d00650100000000000004500000000000000000000003ac000002e200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 237 | Raw Image: 238 | collapsed: false 239 | Selection: 240 | collapsed: false 241 | Time: 242 | collapsed: false 243 | Tool Properties: 244 | collapsed: false 245 | Views: 246 | collapsed: true 247 | Width: 1375 248 | X: 65 249 | Y: 24 250 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/launch/rviz/larvio_rviz.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/nodelets.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | System manager nodelet for organize the visual 7 | front-end and filter back-end. 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | larvio 5 | 0.0.1 6 | Lightweight Accurate and Robust VIO based on MSCKF 7 | 8 | PetWorm 9 | 10 | TODO 11 | 12 | PetWorm 13 | 14 | catkin 15 | 16 | roscpp 17 | std_msgs 18 | tf 19 | nav_msgs 20 | sensor_msgs 21 | geometry_msgs 22 | eigen_conversions 23 | tf_conversions 24 | nodelet 25 | image_transport 26 | cv_bridge 27 | message_filters 28 | pcl_conversions 29 | pcl_ros 30 | std_srvs 31 | message_runtime 32 | 33 | libpcl-all-dev 34 | libpcl-all 35 | suitesparse 36 | 37 | rosunit 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/src/System.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-21. 3 | // Managing the image processer and the estimator. 4 | // 5 | 6 | #include 7 | 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | 14 | #include 15 | #include 16 | 17 | using namespace std; 18 | using namespace cv; 19 | using namespace Eigen; 20 | 21 | namespace larvio { 22 | 23 | 24 | System::System(ros::NodeHandle& n) : nh(n) {} 25 | 26 | 27 | System::~System() { 28 | // Clear buffer 29 | imu_msg_buffer.clear(); 30 | img_msg_buffer.clear(); 31 | } 32 | 33 | 34 | // Load parameters from launch file 35 | bool System::loadParameters() { 36 | // Configuration file path. 37 | nh.getParam("config_file", config_file); 38 | 39 | // Imu and img synchronized threshold. 40 | double imu_rate; 41 | nh.param("imu_rate", imu_rate, 200.0); 42 | imu_img_timeTh = 1/(2*imu_rate); 43 | 44 | return true; 45 | } 46 | 47 | 48 | // Subscribe image and imu msgs. 49 | bool System::createRosIO() { 50 | // Subscribe imu msg. 51 | imu_sub = nh.subscribe("imu", 5000, &System::imuCallback, this); 52 | 53 | // Subscribe image msg. 54 | img_sub = nh.subscribe("cam0_image", 50, &System::imageCallback, this); 55 | 56 | // Advertise processed image msg. 57 | image_transport::ImageTransport it(nh); 58 | vis_img_pub = it.advertise("visualization_image", 1); 59 | 60 | // Advertise odometry msg. 61 | odom_pub = nh.advertise("odom", 10); 62 | 63 | // Advertise point cloud msg. 64 | stable_feature_pub = nh.advertise( 65 | "stable_feature_point_cloud", 1); 66 | active_feature_pub = nh.advertise( 67 | "active_feature_point_cloud", 1); 68 | 69 | // Advertise path msg. 70 | path_pub = nh.advertise("path", 10); 71 | 72 | nh.param("fixed_frame_id", fixed_frame_id, "world"); 73 | nh.param("child_frame_id", child_frame_id, "robot"); 74 | 75 | stable_feature_msg_ptr.reset( 76 | new pcl::PointCloud()); 77 | stable_feature_msg_ptr->header.frame_id = fixed_frame_id; 78 | stable_feature_msg_ptr->height = 1; 79 | 80 | return true; 81 | } 82 | 83 | 84 | // Initializing the system. 85 | bool System::initialize() { 86 | // Load necessary parameters 87 | if (!loadParameters()) 88 | return false; 89 | ROS_INFO("System: Finish loading ROS parameters..."); 90 | 91 | // Set pointers of image processer and estimator. 92 | ImgProcesser.reset(new ImageProcessor(config_file)); 93 | Estimator.reset(new LarVio(config_file)); 94 | 95 | // Initialize image processer and estimator. 96 | if (!ImgProcesser->initialize()) { 97 | ROS_WARN("Image Processer initialization failed!"); 98 | return false; 99 | } 100 | if (!Estimator->initialize()) { 101 | ROS_WARN("Estimator initialization failed!"); 102 | return false; 103 | } 104 | 105 | // Try subscribing msgs 106 | if (!createRosIO()) 107 | return false; 108 | ROS_INFO("System Manager: Finish creating ROS IO..."); 109 | 110 | return true; 111 | } 112 | 113 | 114 | // Push imu msg into the buffer. 115 | void System::imuCallback(const sensor_msgs::ImuConstPtr& msg) { 116 | imu_msg_buffer.push_back(ImuData(msg->header.stamp.toSec(), 117 | msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z, 118 | msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z)); 119 | } 120 | 121 | 122 | // Process the image and trigger the estimator. 123 | void System::imageCallback(const sensor_msgs::ImageConstPtr& msg) { 124 | // Do nothing if no imu msg is received. 125 | if (imu_msg_buffer.empty()) 126 | return; 127 | 128 | // test 129 | cv_bridge::CvImageConstPtr cvCPtr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::MONO8); 130 | larvio::ImageDataPtr msgPtr(new ImgData); 131 | msgPtr->timeStampToSec = cvCPtr->header.stamp.toSec(); 132 | msgPtr->image = cvCPtr->image.clone(); 133 | std_msgs::Header header = cvCPtr->header; 134 | 135 | // Decide if use img msg in buffer. 136 | bool bUseBuff = false; 137 | if (!imu_msg_buffer.empty() || 138 | (imu_msg_buffer.end()-1)->timeStampToSec-msgPtr->timeStampToSec<-imu_img_timeTh) { 139 | img_msg_buffer.push_back(msgPtr); 140 | header_buffer.push_back(header); 141 | } 142 | if (!img_msg_buffer.empty()) { 143 | if ((imu_msg_buffer.end()-1)->timeStampToSec-(*(img_msg_buffer.begin()))->timeStampToSec<-imu_img_timeTh) 144 | return; 145 | bUseBuff = true; 146 | } 147 | 148 | if (!bUseBuff) { 149 | MonoCameraMeasurementPtr features = new MonoCameraMeasurement; 150 | 151 | // Process image to get feature measurement. 152 | bool bProcess = ImgProcesser->processImage(msgPtr, imu_msg_buffer, features); 153 | 154 | // Filtering if get processed feature. 155 | bool bPubOdo = false; 156 | if (bProcess) { 157 | bPubOdo = Estimator->processFeatures(features, imu_msg_buffer); 158 | } 159 | 160 | // Publish msgs if necessary 161 | if (bProcess) { 162 | cv_bridge::CvImage _image(header, "bgr8", ImgProcesser->getVisualImg()); 163 | vis_img_pub.publish(_image.toImageMsg()); 164 | } 165 | if (bPubOdo) { 166 | publishVIO(header.stamp); 167 | } 168 | 169 | delete features; 170 | 171 | return; 172 | } else { 173 | // Loop for using all the img in the buffer that satisfy the condition. 174 | int counter = 0; 175 | for (int i = 0; i < img_msg_buffer.size(); ++i) { 176 | // Break the loop if imu data is not enough 177 | if ((imu_msg_buffer.end()-1)->timeStampToSec-img_msg_buffer[i]->timeStampToSec<-imu_img_timeTh) 178 | break; 179 | 180 | MonoCameraMeasurementPtr features = new MonoCameraMeasurement; 181 | 182 | // Process image to get feature measurement. 183 | bool bProcess = ImgProcesser->processImage(img_msg_buffer[i], imu_msg_buffer, features); 184 | 185 | // Filtering if get processed feature. 186 | bool bPubOdo = false; 187 | if (bProcess) { 188 | bPubOdo = Estimator->processFeatures(features, imu_msg_buffer); 189 | } 190 | 191 | // Publish msgs if necessary 192 | if (bProcess) { 193 | cv_bridge::CvImage _image(header_buffer[i], "bgr8", ImgProcesser->getVisualImg()); 194 | vis_img_pub.publish(_image.toImageMsg()); 195 | } 196 | if (bPubOdo) { 197 | publishVIO(header_buffer[i].stamp); 198 | } 199 | 200 | delete features; 201 | 202 | counter++; 203 | } 204 | img_msg_buffer.erase(img_msg_buffer.begin(), img_msg_buffer.begin()+counter); 205 | header_buffer.erase(header_buffer.begin(), header_buffer.begin()+counter); 206 | } 207 | } 208 | 209 | 210 | // Publish informations of VIO, including odometry, path, points cloud and whatever needed. 211 | void System::publishVIO(const ros::Time& time) { 212 | // construct odometry msg 213 | odom_msg.header.stamp = time; 214 | odom_msg.header.frame_id = fixed_frame_id; 215 | odom_msg.child_frame_id = child_frame_id; 216 | Eigen::Isometry3d T_b_w = Estimator->getTbw(); 217 | Eigen::Vector3d body_velocity = Estimator->getVel(); 218 | Matrix P_body_pose = Estimator->getPpose(); 219 | Matrix3d P_body_vel = Estimator->getPvel(); 220 | tf::poseEigenToMsg(T_b_w, odom_msg.pose.pose); 221 | tf::vectorEigenToMsg(body_velocity, odom_msg.twist.twist.linear); 222 | for (int i = 0; i < 6; ++i) 223 | for (int j = 0; j < 6; ++j) 224 | odom_msg.pose.covariance[6*i+j] = P_body_pose(i, j); 225 | for (int i = 0; i < 3; ++i) 226 | for (int j = 0; j < 3; ++j) 227 | odom_msg.twist.covariance[i*6+j] = P_body_vel(i, j); 228 | 229 | // construct path msg 230 | path_msg.header.stamp = time; 231 | path_msg.header.frame_id = fixed_frame_id; 232 | geometry_msgs::PoseStamped curr_path; 233 | curr_path.header.stamp = time; 234 | curr_path.header.frame_id = fixed_frame_id; 235 | tf::poseEigenToMsg(T_b_w, curr_path.pose); 236 | path_msg.poses.push_back(curr_path); 237 | 238 | // construct point cloud msg 239 | // Publish the 3D positions of the features. 240 | // Including stable and active ones. 241 | // --Stable features 242 | std::map StableMapPoints; 243 | Estimator->getStableMapPointPositions(StableMapPoints); 244 | for (const auto& item : StableMapPoints) { 245 | const auto& feature_position = item.second; 246 | stable_feature_msg_ptr->points.push_back(pcl::PointXYZ( 247 | feature_position(0), feature_position(1), feature_position(2))); 248 | } 249 | stable_feature_msg_ptr->width = stable_feature_msg_ptr->points.size(); 250 | // --Active features 251 | active_feature_msg_ptr.reset( 252 | new pcl::PointCloud()); 253 | active_feature_msg_ptr->header.frame_id = fixed_frame_id; 254 | active_feature_msg_ptr->height = 1; 255 | std::map ActiveMapPoints; 256 | Estimator->getActiveeMapPointPositions(ActiveMapPoints); 257 | for (const auto& item : ActiveMapPoints) { 258 | const auto& feature_position = item.second; 259 | active_feature_msg_ptr->points.push_back(pcl::PointXYZ( 260 | feature_position(0), feature_position(1), feature_position(2))); 261 | } 262 | active_feature_msg_ptr->width = active_feature_msg_ptr->points.size(); 263 | 264 | odom_pub.publish(odom_msg); 265 | stable_feature_pub.publish(stable_feature_msg_ptr); 266 | active_feature_pub.publish(active_feature_msg_ptr); 267 | path_pub.publish(path_msg); 268 | } 269 | 270 | } // end namespace larvio 271 | -------------------------------------------------------------------------------- /ros_wrapper/src/larvio/src/System_nodelet.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-21. 3 | // Nodelet for system manager. 4 | // 5 | 6 | #include 7 | 8 | namespace larvio { 9 | 10 | void SystemNodelet::onInit() { 11 | system_ptr.reset(new System(getPrivateNodeHandle())); 12 | if (!system_ptr->initialize()) { 13 | ROS_ERROR("Cannot initialize System Manager..."); 14 | return; 15 | } 16 | return; 17 | } 18 | 19 | PLUGINLIB_EXPORT_CLASS(larvio::SystemNodelet, nodelet::Nodelet); 20 | 21 | } // end namespace larvio 22 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | ~/YOUR_PATH/LARVIO/build/larvio ~/YOUR_PATH_TO_DATASET/MH_01/mav0/imu0/data.csv ~/YOUR_PATH_TO_DATASET/MH_01/mav0/cam0/data.csv ~/YOUR_PATH_TO_DATASET/MH_01/mav0/cam0/data ~/YOUR_PATH/LARVIO/config/euroc.yaml -------------------------------------------------------------------------------- /src/DynamicInitializer.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A vio initializer that utlize dynamic imu and img data to initialize. 4 | // The core method comes from VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono) 5 | // 6 | 7 | #include "Initializer/DynamicInitializer.h" 8 | #include "larvio/math_utils.hpp" 9 | 10 | #include 11 | 12 | // debug log 13 | #include 14 | #include 15 | 16 | using namespace std; 17 | using namespace Eigen; 18 | 19 | namespace larvio { 20 | 21 | bool DynamicInitializer::tryDynInit(const std::vector& imu_msg_buffer, 22 | MonoCameraMeasurementPtr img_msg) { 23 | 24 | // Counter how many IMU msgs in the buffer are used. 25 | int used_imu_msg_cntr = 0; 26 | 27 | double time_bound = img_msg->timeStampToSec+td; 28 | 29 | for (const auto& imu_msg : imu_msg_buffer) { 30 | double imu_time = imu_msg.timeStampToSec; 31 | if ( imu_time <= lower_time_bound ) continue; 32 | if ( imu_time-time_bound > imu_img_timeTh ) break; // threshold is adjusted according to the imu frequency 33 | ddt = imu_time-time_bound; // ddt 34 | processIMU(imu_msg); 35 | ++used_imu_msg_cntr; 36 | } 37 | lower_time_bound = time_bound+imu_img_timeTh; 38 | 39 | processImage(img_msg); 40 | 41 | if (bInit) { 42 | printf("Dynamic initialization success !\n\n"); 43 | return true; 44 | } else 45 | return false; 46 | } 47 | 48 | 49 | void DynamicInitializer::processIMU(const ImuData& imu_msg) { 50 | 51 | Vector3d angular_velocity, linear_acceleration; 52 | linear_acceleration = Ma*imu_msg.linear_acceleration; 53 | angular_velocity = Tg*(imu_msg.angular_velocity-As*linear_acceleration); 54 | 55 | // qxc: into this if only at the first time 56 | if (!first_imu) { 57 | first_imu = true; 58 | acc_0 = linear_acceleration; 59 | gyr_0 = angular_velocity; 60 | curr_time = imu_msg.timeStampToSec; 61 | } 62 | 63 | double dt = imu_msg.timeStampToSec-curr_time; 64 | 65 | if (!pre_integrations[frame_count]) // qxc: into this if only at the first time 66 | { 67 | pre_integrations[frame_count].reset(new IntegrationBase{acc_0, gyr_0, Bas[frame_count], Bgs[frame_count], 68 | acc_n, acc_w, gyr_n, gyr_w}); 69 | } 70 | 71 | if (frame_count != 0) 72 | { 73 | pre_integrations[frame_count]->push_back(dt, linear_acceleration, angular_velocity); 74 | tmp_pre_integration->push_back(dt, linear_acceleration, angular_velocity); 75 | 76 | dt_buf[frame_count].push_back(dt); 77 | linear_acceleration_buf[frame_count].push_back(linear_acceleration); 78 | angular_velocity_buf[frame_count].push_back(angular_velocity); 79 | 80 | int j = frame_count; 81 | Vector3d un_acc_0 = Rs[j] * (acc_0 - Bas[j]) - g; 82 | Vector3d un_gyr = 0.5 * (gyr_0 + angular_velocity) - Bgs[j]; 83 | Rs[j] *= getSmallAngleQuaternion(un_gyr * dt).toRotationMatrix(); 84 | Vector3d un_acc_1 = Rs[j] * (linear_acceleration - Bas[j]) - g; 85 | Vector3d un_acc = 0.5 * (un_acc_0 + un_acc_1); 86 | Ps[j] += dt * Vs[j] + 0.5 * dt * dt * un_acc; 87 | Vs[j] += dt * un_acc; 88 | } 89 | 90 | acc_0 = linear_acceleration; 91 | gyr_0 = angular_velocity; 92 | curr_time = imu_msg.timeStampToSec; 93 | 94 | // QXC: update last m_gyro and last m_acc 95 | last_gyro = angular_velocity; 96 | last_acc = linear_acceleration; 97 | } 98 | 99 | 100 | void DynamicInitializer::processImage(MonoCameraMeasurementPtr img_msg) { 101 | 102 | if (f_manager.addFeatureCheckParallax(frame_count, img_msg, td+ddt)) 103 | marginalization_flag = MARGIN_OLD; 104 | else 105 | marginalization_flag = MARGIN_SECOND_NEW; 106 | 107 | Times[frame_count] = img_msg->timeStampToSec; 108 | 109 | ImageFrame imageframe(img_msg, td+ddt); 110 | imageframe.pre_integration = tmp_pre_integration; 111 | all_image_frame.insert(make_pair(img_msg->timeStampToSec+td, imageframe)); 112 | tmp_pre_integration.reset(new IntegrationBase{acc_0, gyr_0, Bas[frame_count], Bgs[frame_count], 113 | acc_n, acc_w, gyr_n, gyr_w}); 114 | 115 | if (frame_count == WINDOW_SIZE) { 116 | bool result = false; 117 | if((img_msg->timeStampToSec-initial_timestamp) > 0.1) { 118 | result = initialStructure(); 119 | initial_timestamp = img_msg->timeStampToSec; 120 | } 121 | if(result) 122 | bInit = true; 123 | else { 124 | slideWindow(); 125 | } 126 | } else 127 | frame_count++; 128 | } 129 | 130 | 131 | bool DynamicInitializer::initialStructure() { 132 | 133 | //check imu observibility 134 | { 135 | map::iterator frame_it; 136 | Vector3d sum_g; 137 | for (frame_it = all_image_frame.begin(), frame_it++; frame_it != all_image_frame.end(); frame_it++) 138 | { 139 | double dt = frame_it->second.pre_integration->sum_dt; 140 | Vector3d tmp_g = frame_it->second.pre_integration->delta_v / dt; 141 | sum_g += tmp_g; 142 | } 143 | Vector3d aver_g; 144 | aver_g = sum_g * 1.0 / ((int)all_image_frame.size() - 1); 145 | double var = 0; 146 | for (frame_it = all_image_frame.begin(), frame_it++; frame_it != all_image_frame.end(); frame_it++) 147 | { 148 | double dt = frame_it->second.pre_integration->sum_dt; 149 | Vector3d tmp_g = frame_it->second.pre_integration->delta_v / dt; 150 | var += (tmp_g - aver_g).transpose() * (tmp_g - aver_g); 151 | } 152 | var = sqrt(var / ((int)all_image_frame.size() - 1)); 153 | if(var < 0.25) 154 | { 155 | // printf("IMU excitation not enough!\n"); 156 | } 157 | } 158 | 159 | // global sfm 160 | Quaterniond Q[frame_count + 1]; 161 | Vector3d T[frame_count + 1]; 162 | map sfm_tracked_points; 163 | vector sfm_f; 164 | for (auto &it_per_id : f_manager.feature) 165 | { 166 | int imu_j = it_per_id.start_frame - 1; 167 | SFMFeature tmp_feature; 168 | tmp_feature.state = false; 169 | tmp_feature.id = it_per_id.feature_id; 170 | for (auto &it_per_frame : it_per_id.feature_per_frame) 171 | { 172 | imu_j++; 173 | Vector3d pts_j = it_per_frame.point; 174 | tmp_feature.observation.push_back(make_pair(imu_j, Eigen::Vector2d{pts_j.x(), pts_j.y()})); 175 | } 176 | sfm_f.push_back(tmp_feature); 177 | } 178 | Matrix3d relative_R; 179 | Vector3d relative_T; 180 | int l; 181 | if (!relativePose(relative_R, relative_T, l)) 182 | { 183 | // printf("Not enough features or parallax; Move device around\n"); 184 | return false; 185 | } 186 | GlobalSFM sfm; 187 | if(!sfm.construct(frame_count + 1, Q, T, l, 188 | relative_R, relative_T, 189 | sfm_f, sfm_tracked_points)) 190 | { 191 | // printf("global SFM failed!\n"); 192 | marginalization_flag = MARGIN_OLD; 193 | return false; 194 | } 195 | 196 | // solve pnp for all frame 197 | map::iterator frame_it; 198 | map::iterator it; 199 | frame_it = all_image_frame.begin(); 200 | for (int i = 0; frame_it != all_image_frame.end( ); frame_it++) 201 | { 202 | // provide initial guess 203 | cv::Mat r, rvec, t, D, tmp_r; 204 | if((frame_it->first) == Times[i]+td) { 205 | frame_it->second.is_key_frame = true; 206 | frame_it->second.R = Q[i].toRotationMatrix() * RIC.transpose(); 207 | frame_it->second.T = T[i]; 208 | i++; 209 | continue; 210 | } 211 | if((frame_it->first) > Times[i]+td) 212 | i++; 213 | 214 | Matrix3d R_inital = (Q[i].inverse()).toRotationMatrix(); 215 | Vector3d P_inital = - R_inital * T[i]; 216 | cv::eigen2cv(R_inital, tmp_r); 217 | cv::Rodrigues(tmp_r, rvec); 218 | cv::eigen2cv(P_inital, t); 219 | 220 | frame_it->second.is_key_frame = false; 221 | vector pts_3_vector; 222 | vector pts_2_vector; 223 | for (auto &id_pts : frame_it->second.points) 224 | { 225 | int feature_id = id_pts.first; 226 | auto i_p = id_pts.second; 227 | it = sfm_tracked_points.find(feature_id); 228 | if(it != sfm_tracked_points.end()) 229 | { 230 | Vector3d world_pts = it->second; 231 | cv::Point3f pts_3(world_pts(0), world_pts(1), world_pts(2)); 232 | pts_3_vector.push_back(pts_3); 233 | Vector2d img_pts = i_p.head<2>(); 234 | cv::Point2f pts_2(img_pts(0), img_pts(1)); 235 | pts_2_vector.push_back(pts_2); 236 | } 237 | } 238 | cv::Mat K = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); 239 | if(pts_3_vector.size() < 6) 240 | { 241 | // printf("Not enough points for solve pnp !\n"); 242 | return false; 243 | } 244 | if (!cv::solvePnP(pts_3_vector, pts_2_vector, K, D, rvec, t, 1)) 245 | { 246 | // printf("solve pnp fail!\n"); 247 | return false; 248 | } 249 | cv::Rodrigues(rvec, r); 250 | MatrixXd R_pnp,tmp_R_pnp; 251 | cv::cv2eigen(r, tmp_R_pnp); 252 | R_pnp = tmp_R_pnp.transpose(); 253 | MatrixXd T_pnp; 254 | cv::cv2eigen(t, T_pnp); 255 | T_pnp = R_pnp * (-T_pnp); 256 | frame_it->second.R = R_pnp * RIC.transpose(); 257 | frame_it->second.T = T_pnp; 258 | } 259 | 260 | if (visualInitialAlign()) 261 | { 262 | return true; 263 | } 264 | else 265 | { 266 | // printf("misalign visual structure with IMU\n"); 267 | return false; 268 | } 269 | } 270 | 271 | 272 | bool DynamicInitializer::visualInitialAlign() { 273 | 274 | VectorXd x; 275 | // solve scale 276 | bool result = VisualIMUAlignment(all_image_frame, Bgs, g, x, TIC); 277 | if(!result) 278 | { 279 | // printf("solve g failed!\n"); 280 | return false; 281 | } 282 | 283 | // change state 284 | for (int i = 0; i <= frame_count; i++) 285 | { 286 | Matrix3d Ri = all_image_frame[Times[i]+td].R; 287 | Vector3d Pi = all_image_frame[Times[i]+td].T; 288 | Ps[i] = Pi; 289 | Rs[i] = Ri; 290 | all_image_frame[Times[i]+td].is_key_frame = true; 291 | } 292 | 293 | // update velocity: express it under reference camera frame 294 | int kv = -1; 295 | map::iterator frame_i; 296 | for (frame_i = all_image_frame.begin(); frame_i != all_image_frame.end(); frame_i++) 297 | { 298 | if(frame_i->second.is_key_frame) 299 | { 300 | kv++; 301 | Vs[kv] = frame_i->second.R * x.segment<3>(kv * 3); 302 | } 303 | } 304 | 305 | // Compute intial state 306 | // Initialize the initial orientation, so that the estimation 307 | // is consistent with the inertial frame. 308 | Vector3d gravity_c0(g); 309 | double gravity_norm = gravity_c0.norm(); 310 | Vector3d gravity_world(0.0, 0.0, -gravity_norm); 311 | 312 | // Set rotation 313 | Matrix3d R_c02w = Quaterniond::FromTwoVectors( 314 | gravity_c0, -gravity_world).toRotationMatrix(); 315 | Matrix3d R_bl2w = R_c02w*Rs[frame_count]; 316 | Quaterniond q0_w_i = Quaterniond(R_bl2w); 317 | orientation = q0_w_i.coeffs(); 318 | 319 | // Set other state and timestamp 320 | state_time = Times[frame_count]+td+ddt; 321 | position = Vector3d(0.0, 0.0, 0.0); 322 | velocity = R_c02w*Vs[frame_count]; 323 | acc_bias = Vector3d(0.0, 0.0, 0.0); 324 | gyro_bias = Bgs[frame_count]; 325 | 326 | return true; 327 | } 328 | 329 | 330 | bool DynamicInitializer::relativePose(Matrix3d &relative_R, Vector3d &relative_T, int &l) { 331 | 332 | // find previous frame which contians enough correspondance and parallex with newest frame 333 | for (int i = 0; i < WINDOW_SIZE; i++) 334 | { 335 | vector> corres; 336 | corres = f_manager.getCorresponding(i, WINDOW_SIZE); 337 | if (corres.size() > 20) 338 | { 339 | double sum_parallax = 0; 340 | double average_parallax; 341 | for (int j = 0; j < int(corres.size()); j++) 342 | { 343 | Vector2d pts_0(corres[j].first(0), corres[j].first(1)); 344 | Vector2d pts_1(corres[j].second(0), corres[j].second(1)); 345 | double parallax = (pts_0 - pts_1).norm(); 346 | sum_parallax = sum_parallax + parallax; 347 | } 348 | average_parallax = 1.0 * sum_parallax / int(corres.size()); 349 | if(average_parallax * 460 > 30 && m_estimator.solveRelativeRT(corres, relative_R, relative_T)) 350 | { 351 | l = i; 352 | // printf("average_parallax %f choose l %d and newest frame to triangulate the whole structure\n", average_parallax * 460, l); 353 | return true; 354 | } 355 | } 356 | } 357 | return false; 358 | } 359 | 360 | 361 | void DynamicInitializer::slideWindow() { 362 | 363 | if (marginalization_flag == MARGIN_OLD) 364 | { 365 | if (frame_count == WINDOW_SIZE) 366 | { 367 | for (int i = 0; i < WINDOW_SIZE; i++) 368 | { 369 | Rs[i].swap(Rs[i + 1]); 370 | 371 | pre_integrations[i].swap(pre_integrations[i + 1]); 372 | 373 | dt_buf[i].swap(dt_buf[i + 1]); 374 | linear_acceleration_buf[i].swap(linear_acceleration_buf[i + 1]); 375 | angular_velocity_buf[i].swap(angular_velocity_buf[i + 1]); 376 | 377 | Times[i] = Times[i + 1]; 378 | Ps[i].swap(Ps[i + 1]); 379 | Vs[i].swap(Vs[i + 1]); 380 | Bas[i].swap(Bas[i + 1]); 381 | Bgs[i].swap(Bgs[i + 1]); 382 | } 383 | Times[WINDOW_SIZE] = Times[WINDOW_SIZE - 1]; 384 | Ps[WINDOW_SIZE] = Ps[WINDOW_SIZE - 1]; 385 | Vs[WINDOW_SIZE] = Vs[WINDOW_SIZE - 1]; 386 | Rs[WINDOW_SIZE] = Rs[WINDOW_SIZE - 1]; 387 | Bas[WINDOW_SIZE] = Bas[WINDOW_SIZE - 1]; 388 | Bgs[WINDOW_SIZE] = Bgs[WINDOW_SIZE - 1]; 389 | 390 | pre_integrations[WINDOW_SIZE].reset(new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE], 391 | acc_n, acc_w, gyr_n, gyr_w}); 392 | 393 | dt_buf[WINDOW_SIZE].clear(); 394 | linear_acceleration_buf[WINDOW_SIZE].clear(); 395 | angular_velocity_buf[WINDOW_SIZE].clear(); 396 | 397 | double t_0 = Times[0]+td; 398 | map::iterator it_0; 399 | it_0 = all_image_frame.find(t_0); 400 | it_0->second.pre_integration.reset(); 401 | all_image_frame.erase(all_image_frame.begin(), it_0); 402 | 403 | f_manager.removeBack(); 404 | } 405 | } 406 | else 407 | { 408 | if (frame_count == WINDOW_SIZE) 409 | { 410 | for (unsigned int i = 0; i < dt_buf[frame_count].size(); i++) 411 | { 412 | double tmp_dt = dt_buf[frame_count][i]; 413 | Vector3d tmp_linear_acceleration = linear_acceleration_buf[frame_count][i]; 414 | Vector3d tmp_angular_velocity = angular_velocity_buf[frame_count][i]; 415 | 416 | pre_integrations[frame_count - 1]->push_back(tmp_dt, tmp_linear_acceleration, tmp_angular_velocity); 417 | 418 | dt_buf[frame_count - 1].push_back(tmp_dt); 419 | linear_acceleration_buf[frame_count - 1].push_back(tmp_linear_acceleration); 420 | angular_velocity_buf[frame_count - 1].push_back(tmp_angular_velocity); 421 | } 422 | 423 | Times[frame_count - 1] = Times[frame_count]; 424 | Ps[frame_count - 1] = Ps[frame_count]; 425 | Vs[frame_count - 1] = Vs[frame_count]; 426 | Rs[frame_count - 1] = Rs[frame_count]; 427 | Bas[frame_count - 1] = Bas[frame_count]; 428 | Bgs[frame_count - 1] = Bgs[frame_count]; 429 | 430 | pre_integrations[WINDOW_SIZE].reset(new IntegrationBase{acc_0, gyr_0, Bas[WINDOW_SIZE], Bgs[WINDOW_SIZE], 431 | acc_n, acc_w, gyr_n, gyr_w}); 432 | 433 | dt_buf[WINDOW_SIZE].clear(); 434 | linear_acceleration_buf[WINDOW_SIZE].clear(); 435 | angular_velocity_buf[WINDOW_SIZE].clear(); 436 | 437 | f_manager.removeFront(frame_count); 438 | } 439 | } 440 | } 441 | 442 | 443 | void DynamicInitializer::assignInitialState(std::vector& imu_msg_buffer, 444 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, IMUState& imu_state) { 445 | if (!bInit) { 446 | printf("Cannot assign initial state before initialization !!!\n"); 447 | return; 448 | } 449 | 450 | // Remove used imu data 451 | int usefulImuSize = 0; 452 | for (const auto& imu_msg : imu_msg_buffer) { 453 | double imu_time = imu_msg.timeStampToSec; 454 | if (imu_time > state_time) break; 455 | usefulImuSize++; 456 | } 457 | 458 | // Earse used imu data 459 | imu_msg_buffer.erase(imu_msg_buffer.begin(), 460 | imu_msg_buffer.begin()+usefulImuSize); 461 | 462 | // Initialize last m_gyro and last m_acc 463 | m_gyro_old = last_gyro; 464 | m_acc_old = last_acc; 465 | 466 | // Set initial state 467 | imu_state.time = state_time; 468 | imu_state.gyro_bias = gyro_bias; 469 | imu_state.acc_bias = acc_bias; 470 | imu_state.orientation = orientation; 471 | imu_state.position = position; 472 | imu_state.velocity = velocity; 473 | 474 | for (int i = 0; i < WINDOW_SIZE + 1; i++) { 475 | dt_buf[i].clear(); 476 | linear_acceleration_buf[i].clear(); 477 | angular_velocity_buf[i].clear(); 478 | } 479 | 480 | return; 481 | } 482 | 483 | 484 | } 485 | -------------------------------------------------------------------------------- /src/FlexibleInitializer.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A flexible initializer that can automatically initialize in case of static or dynamic scene. 4 | // 5 | 6 | #include "Initializer/FlexibleInitializer.h" 7 | 8 | namespace larvio { 9 | 10 | bool FlexibleInitializer::tryIncInit(std::vector& imu_msg_buffer, 11 | MonoCameraMeasurementPtr img_msg, 12 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, 13 | IMUState& imu_state) { 14 | 15 | if(staticInitPtr->tryIncInit(imu_msg_buffer, img_msg)) { 16 | staticInitPtr->assignInitialState(imu_msg_buffer, 17 | m_gyro_old, m_acc_old, imu_state); 18 | return true; 19 | } else if (dynamicInitPtr->tryDynInit(imu_msg_buffer, img_msg)) { 20 | dynamicInitPtr->assignInitialState(imu_msg_buffer, 21 | m_gyro_old, m_acc_old, imu_state); 22 | return true; 23 | } 24 | 25 | return false; 26 | } 27 | 28 | 29 | } -------------------------------------------------------------------------------- /src/StaticInitializer.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Created by xiaochen at 19-8-13. 3 | // A inclinometer-initializer utilizing the static scene. 4 | // 5 | 6 | #include "Initializer/StaticInitializer.h" 7 | 8 | using namespace std; 9 | using namespace Eigen; 10 | 11 | namespace larvio { 12 | 13 | bool StaticInitializer::tryIncInit(const std::vector& imu_msg_buffer, 14 | MonoCameraMeasurementPtr img_msg) { 15 | // return false if this is the 1st image for inclinometer-initializer 16 | if (0 == staticImgCounter) { 17 | staticImgCounter++; 18 | init_features.clear(); 19 | // add features to init_features 20 | for (const auto& feature : img_msg->features) 21 | init_features[feature.id] = Vector2d(feature.u, feature.v); 22 | // assign the lower time bound as the timestamp of first img 23 | lower_time_bound = img_msg->timeStampToSec+td; 24 | return false; 25 | } 26 | 27 | // calculate feature distance of matched features between prev and curr images 28 | InitFeatures curr_features; 29 | list feature_dis; 30 | for (const auto& feature : img_msg->features) { 31 | curr_features[feature.id] = Vector2d(feature.u, feature.v); 32 | if (init_features.find(feature.id) != init_features.end()) { 33 | Vector2d vec2d_c(feature.u, feature.v); 34 | Vector2d vec2d_p = init_features[feature.id]; 35 | feature_dis.push_back((vec2d_c-vec2d_p).norm()); 36 | } 37 | } 38 | // return false if number of matched features is small 39 | if (feature_dis.empty() 40 | || feature_dis.size()<20) { 41 | staticImgCounter = 0; 42 | return false; 43 | } 44 | // ignore outliers rudely 45 | feature_dis.sort(); 46 | auto itr = feature_dis.end(); 47 | for (int i = 0; i < 19; i++) 48 | itr--; 49 | double maxDis = *itr; 50 | // classified as static image if maxDis is smaller than threshold, otherwise reset image counter 51 | if (maxDis < max_feature_dis) { 52 | staticImgCounter++; 53 | init_features.swap(curr_features); 54 | if (staticImgCounter < static_Num) // return false if number of consecitive static images does not reach @static_Num 55 | return false; 56 | } else { 57 | // printf("inclinometer-initializer failed at No.%d static image.",staticImgCounter+1); 58 | staticImgCounter = 0; 59 | return false; 60 | } 61 | 62 | /* reach here means staticImgCounter is equal to static_Num */ 63 | 64 | // initialize rotation and gyro bias by imu data between the 1st and the No.static_Num image 65 | // set take_off_stamp as time of the No.static_Num image 66 | // set initial imu_state as the state of No.static_Num image 67 | // earse imu data with timestamp earlier than the No.static_Num image 68 | initializeGravityAndBias(img_msg->timeStampToSec+td, imu_msg_buffer); 69 | 70 | bInit = true; 71 | 72 | return true; 73 | } 74 | 75 | 76 | void StaticInitializer::initializeGravityAndBias(const double& time_bound, 77 | const std::vector& imu_msg_buffer) { 78 | // Initialize gravity and gyro bias. 79 | Vector3d sum_angular_vel = Vector3d::Zero(); 80 | Vector3d sum_linear_acc = Vector3d::Zero(); 81 | 82 | int usefulImuSize = 0; 83 | double last_imu_time; 84 | for (const auto& imu_msg : imu_msg_buffer) { 85 | double imu_time = imu_msg.timeStampToSec; 86 | if (imu_time < lower_time_bound) continue; 87 | if (imu_time > time_bound) break; 88 | 89 | sum_angular_vel += Tg*(imu_msg.angular_velocity-As*Ma*imu_msg.linear_acceleration); 90 | sum_linear_acc += Ma*imu_msg.linear_acceleration; 91 | 92 | usefulImuSize++; 93 | 94 | last_imu_time = imu_time; 95 | } 96 | 97 | // Compute gyro bias. 98 | gyro_bias = sum_angular_vel / usefulImuSize; 99 | 100 | // This is the gravity in the IMU frame. 101 | Vector3d gravity_imu = 102 | sum_linear_acc / usefulImuSize; 103 | 104 | // Initialize the initial orientation, so that the estimation 105 | // is consistent with the inertial frame. 106 | double gravity_norm = gravity_imu.norm(); 107 | Vector3d gravity_world(0.0, 0.0, -gravity_norm); 108 | 109 | // Set rotation 110 | Quaterniond q0_w_i = Quaterniond::FromTwoVectors( 111 | gravity_imu, -gravity_world); 112 | orientation = q0_w_i.coeffs(); 113 | 114 | // Set other state and timestamp 115 | state_time = last_imu_time; 116 | position = Vector3d(0.0, 0.0, 0.0); 117 | velocity = Vector3d(0.0, 0.0, 0.0); 118 | acc_bias = Vector3d(0.0, 0.0, 0.0); 119 | 120 | printf("Inclinometer-initializer completed by using %d imu data !!!\n\n",usefulImuSize); 121 | 122 | return; 123 | } 124 | 125 | 126 | void StaticInitializer::assignInitialState(std::vector& imu_msg_buffer, 127 | Eigen::Vector3d& m_gyro_old, Eigen::Vector3d& m_acc_old, IMUState& imu_state) { 128 | if (!bInit) { 129 | printf("Cannot assign initial state before initialization !!!\n"); 130 | return; 131 | } 132 | 133 | // Remove used imu data 134 | int usefulImuSize = 0; 135 | for (const auto& imu_msg : imu_msg_buffer) { 136 | double imu_time = imu_msg.timeStampToSec; 137 | if (imu_time > state_time) break; 138 | usefulImuSize++; 139 | } 140 | if (usefulImuSize>=imu_msg_buffer.size()) 141 | usefulImuSize--; 142 | 143 | // Initialize last m_gyro and last m_acc 144 | const auto& imu_msg = imu_msg_buffer[usefulImuSize]; 145 | m_gyro_old = imu_msg.angular_velocity; 146 | m_acc_old = imu_msg.linear_acceleration; 147 | 148 | // Earse used imu data 149 | imu_msg_buffer.erase(imu_msg_buffer.begin(), 150 | imu_msg_buffer.begin()+usefulImuSize); 151 | 152 | // Set initial state 153 | imu_state.time = state_time; 154 | imu_state.gyro_bias = gyro_bias; 155 | imu_state.acc_bias = acc_bias; 156 | imu_state.orientation = orientation; 157 | imu_state.position = position; 158 | imu_state.velocity = velocity; 159 | 160 | return; 161 | } 162 | 163 | 164 | } -------------------------------------------------------------------------------- /src/feature_manager.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-15. 3 | // Feature manager. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #include "Initializer/feature_manager.h" 8 | 9 | namespace larvio { 10 | 11 | 12 | int FeaturePerId::endFrame() 13 | { 14 | return start_frame + feature_per_frame.size() - 1; 15 | } 16 | 17 | 18 | void FeatureManager::setRic(const Matrix3d& _ric) 19 | { 20 | ric = _ric; 21 | } 22 | 23 | void FeatureManager::clearState() 24 | { 25 | feature.clear(); 26 | } 27 | 28 | 29 | int FeatureManager::getFeatureCount() 30 | { 31 | int cnt = 0; 32 | for (auto &it : feature) 33 | { 34 | it.used_num = it.feature_per_frame.size(); 35 | 36 | if (it.used_num >= 2 && it.start_frame < WINDOW_SIZE - 2) 37 | { 38 | cnt++; 39 | } 40 | } 41 | return cnt; 42 | } 43 | 44 | 45 | bool FeatureManager::addFeatureCheckParallax(int frame_count, MonoCameraMeasurementPtr image, double td) 46 | { 47 | // printf("input feature: %d", (int)image.size()); 48 | // printf("num of feature: %d", getFeatureCount()); 49 | double parallax_sum = 0; 50 | int parallax_num = 0; 51 | last_track_num = 0; 52 | for (const auto& id_pts : image->features) 53 | { 54 | FeaturePerFrame f_per_fra(id_pts, td); 55 | 56 | int feature_id = id_pts.id; 57 | auto it = find_if(feature.begin(), feature.end(), [feature_id](const FeaturePerId &it) 58 | { 59 | return it.feature_id == feature_id; 60 | }); 61 | 62 | if (it == feature.end()) 63 | { 64 | feature.push_back(FeaturePerId(feature_id, frame_count)); 65 | feature.back().feature_per_frame.push_back(f_per_fra); 66 | } 67 | else if (it->feature_id == feature_id) 68 | { 69 | it->feature_per_frame.push_back(f_per_fra); 70 | last_track_num++; 71 | } 72 | } 73 | 74 | if (frame_count < 2 || last_track_num < 20) 75 | return true; 76 | 77 | for (auto &it_per_id : feature) 78 | { 79 | if (it_per_id.start_frame <= frame_count - 2 && 80 | it_per_id.start_frame + int(it_per_id.feature_per_frame.size()) - 1 >= frame_count - 1) 81 | { 82 | parallax_sum += compensatedParallax2(it_per_id, frame_count); 83 | parallax_num++; 84 | } 85 | } 86 | 87 | if (parallax_num == 0) 88 | { 89 | return true; 90 | } 91 | else 92 | { 93 | // printf("parallax_sum: %lf, parallax_num: %d", parallax_sum, parallax_num); 94 | // printf("current parallax: %lf", parallax_sum / parallax_num * FOCAL_LENGTH); 95 | return parallax_sum / parallax_num >= MIN_PARALLAX; 96 | } 97 | } 98 | 99 | 100 | vector> FeatureManager::getCorresponding(int frame_count_l, int frame_count_r) 101 | { 102 | vector> corres; 103 | for (auto &it : feature) 104 | { 105 | if (it.start_frame <= frame_count_l && it.endFrame() >= frame_count_r) 106 | { 107 | Vector3d a = Vector3d::Zero(), b = Vector3d::Zero(); 108 | int idx_l = frame_count_l - it.start_frame; 109 | int idx_r = frame_count_r - it.start_frame; 110 | 111 | a = it.feature_per_frame[idx_l].point; 112 | 113 | b = it.feature_per_frame[idx_r].point; 114 | 115 | corres.push_back(make_pair(a, b)); 116 | } 117 | } 118 | return corres; 119 | } 120 | 121 | 122 | void FeatureManager::setDepth(const VectorXd &x) 123 | { 124 | int feature_index = -1; 125 | for (auto &it_per_id : feature) 126 | { 127 | it_per_id.used_num = it_per_id.feature_per_frame.size(); 128 | if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2)) 129 | continue; 130 | 131 | it_per_id.estimated_depth = 1.0 / x(++feature_index); 132 | //printf("feature id %d , start_frame %d, depth %f ", it_per_id->feature_id, it_per_id-> start_frame, it_per_id->estimated_depth); 133 | if (it_per_id.estimated_depth < 0) 134 | { 135 | it_per_id.solve_flag = 2; 136 | } 137 | else 138 | it_per_id.solve_flag = 1; 139 | } 140 | } 141 | 142 | 143 | void FeatureManager::removeFailures() 144 | { 145 | for (auto it = feature.begin(), it_next = feature.begin(); 146 | it != feature.end(); it = it_next) 147 | { 148 | it_next++; 149 | if (it->solve_flag == 2) 150 | feature.erase(it); 151 | } 152 | } 153 | 154 | 155 | void FeatureManager::clearDepth(const VectorXd &x) 156 | { 157 | int feature_index = -1; 158 | for (auto &it_per_id : feature) 159 | { 160 | it_per_id.used_num = it_per_id.feature_per_frame.size(); 161 | if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2)) 162 | continue; 163 | it_per_id.estimated_depth = 1.0 / x(++feature_index); 164 | } 165 | } 166 | 167 | 168 | VectorXd FeatureManager::getDepthVector() 169 | { 170 | VectorXd dep_vec(getFeatureCount()); 171 | int feature_index = -1; 172 | for (auto &it_per_id : feature) 173 | { 174 | it_per_id.used_num = it_per_id.feature_per_frame.size(); 175 | if (!(it_per_id.used_num >= 2 && it_per_id.start_frame < WINDOW_SIZE - 2)) 176 | continue; 177 | #if 1 178 | dep_vec(++feature_index) = 1. / it_per_id.estimated_depth; 179 | #else 180 | dep_vec(++feature_index) = it_per_id->estimated_depth; 181 | #endif 182 | } 183 | return dep_vec; 184 | } 185 | 186 | 187 | void FeatureManager::removeOutlier() 188 | { 189 | int i = -1; 190 | for (auto it = feature.begin(), it_next = feature.begin(); 191 | it != feature.end(); it = it_next) 192 | { 193 | it_next++; 194 | i += it->used_num != 0; 195 | if (it->used_num != 0 && it->is_outlier == true) 196 | { 197 | feature.erase(it); 198 | } 199 | } 200 | } 201 | 202 | 203 | void FeatureManager::removeBack() 204 | { 205 | for (auto it = feature.begin(), it_next = feature.begin(); 206 | it != feature.end(); it = it_next) 207 | { 208 | it_next++; 209 | 210 | if (it->start_frame != 0) 211 | it->start_frame--; 212 | else 213 | { 214 | it->feature_per_frame.erase(it->feature_per_frame.begin()); 215 | if (it->feature_per_frame.size() == 0) 216 | feature.erase(it); 217 | } 218 | } 219 | } 220 | 221 | 222 | void FeatureManager::removeFront(int frame_count) 223 | { 224 | for (auto it = feature.begin(), it_next = feature.begin(); it != feature.end(); it = it_next) 225 | { 226 | it_next++; 227 | 228 | if (it->start_frame == frame_count) 229 | { 230 | it->start_frame--; 231 | } 232 | else 233 | { 234 | int j = WINDOW_SIZE - 1 - it->start_frame; 235 | if (it->endFrame() < frame_count - 1) 236 | continue; 237 | it->feature_per_frame.erase(it->feature_per_frame.begin() + j); 238 | if (it->feature_per_frame.size() == 0) 239 | feature.erase(it); 240 | } 241 | } 242 | } 243 | 244 | 245 | double FeatureManager::compensatedParallax2(const FeaturePerId &it_per_id, int frame_count) 246 | { 247 | //check the second last frame is keyframe or not 248 | //parallax betwwen seconde last frame and third last frame 249 | const FeaturePerFrame &frame_i = it_per_id.feature_per_frame[frame_count - 2 - it_per_id.start_frame]; 250 | const FeaturePerFrame &frame_j = it_per_id.feature_per_frame[frame_count - 1 - it_per_id.start_frame]; 251 | 252 | double ans = 0; 253 | Vector3d p_j = frame_j.point; 254 | 255 | double u_j = p_j(0); 256 | double v_j = p_j(1); 257 | 258 | Vector3d p_i = frame_i.point; 259 | Vector3d p_i_comp; 260 | 261 | //int r_i = frame_count - 2; 262 | //int r_j = frame_count - 1; 263 | //p_i_comp = ric[camera_id_j].transpose() * Rs[r_j].transpose() * Rs[r_i] * ric[camera_id_i] * p_i; 264 | p_i_comp = p_i; 265 | double dep_i = p_i(2); 266 | double u_i = p_i(0) / dep_i; 267 | double v_i = p_i(1) / dep_i; 268 | double du = u_i - u_j, dv = v_i - v_j; 269 | 270 | double dep_i_comp = p_i_comp(2); 271 | double u_i_comp = p_i_comp(0) / dep_i_comp; 272 | double v_i_comp = p_i_comp(1) / dep_i_comp; 273 | double du_comp = u_i_comp - u_j, dv_comp = v_i_comp - v_j; 274 | 275 | ans = max(ans, sqrt(min(du * du + dv * dv, du_comp * du_comp + dv_comp * dv_comp))); 276 | 277 | return ans; 278 | } 279 | 280 | 281 | } -------------------------------------------------------------------------------- /src/initial_alignment.cpp: -------------------------------------------------------------------------------- 1 | #include "Initializer/initial_alignment.h" 2 | 3 | // debug log 4 | #include 5 | #include 6 | 7 | namespace larvio { 8 | 9 | 10 | void solveGyroscopeBias(map &all_image_frame, Vector3d* Bgs) 11 | { 12 | Matrix3d A; 13 | Vector3d b; 14 | Vector3d delta_bg; 15 | A.setZero(); 16 | b.setZero(); 17 | map::iterator frame_i; 18 | map::iterator frame_j; 19 | for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++) 20 | { 21 | frame_j = next(frame_i); 22 | MatrixXd tmp_A(3, 3); 23 | tmp_A.setZero(); 24 | VectorXd tmp_b(3); 25 | tmp_b.setZero(); 26 | Eigen::Quaterniond q_ij(frame_i->second.R.transpose() * frame_j->second.R); 27 | tmp_A = frame_j->second.pre_integration->jacobian.template block<3, 3>(O_R, O_BG); 28 | tmp_b = 2 * (frame_j->second.pre_integration->delta_q.inverse() * q_ij).vec(); 29 | A += tmp_A.transpose() * tmp_A; 30 | b += tmp_A.transpose() * tmp_b; 31 | } 32 | delta_bg = A.ldlt().solve(b); 33 | // printf("gyroscope bias initial calibration " << delta_bg.transpose()); 34 | 35 | for (int i = 0; i <= WINDOW_SIZE; i++) 36 | Bgs[i] += delta_bg; 37 | 38 | for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end( ); frame_i++) 39 | { 40 | frame_j = next(frame_i); 41 | frame_j->second.pre_integration->repropagate(Vector3d::Zero(), Bgs[0]); 42 | } 43 | } 44 | 45 | 46 | MatrixXd TangentBasis(Vector3d &g0) 47 | { 48 | Vector3d b, c; 49 | Vector3d a = g0.normalized(); 50 | Vector3d tmp(0, 0, 1); 51 | if(a == tmp) 52 | tmp << 1, 0, 0; 53 | b = (tmp - a * (a.transpose() * tmp)).normalized(); 54 | c = a.cross(b); 55 | MatrixXd bc(3, 2); 56 | bc.block<3, 1>(0, 0) = b; 57 | bc.block<3, 1>(0, 1) = c; 58 | return bc; 59 | } 60 | 61 | 62 | void RefineGravity(map &all_image_frame, Vector3d &g, VectorXd &x, const Vector3d& TIC) 63 | { 64 | Vector3d g0 = g.normalized() * Gravity.norm(); 65 | Vector3d lx, ly; 66 | //VectorXd x; 67 | int all_frame_count = all_image_frame.size(); 68 | int n_state = all_frame_count * 3 + 2 + 1; 69 | 70 | MatrixXd A{n_state, n_state}; 71 | A.setZero(); 72 | VectorXd b{n_state}; 73 | b.setZero(); 74 | 75 | map::iterator frame_i; 76 | map::iterator frame_j; 77 | for(int k = 0; k < 4; k++) 78 | { 79 | MatrixXd lxly(3, 2); 80 | lxly = TangentBasis(g0); 81 | int i = 0; 82 | for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++, i++) 83 | { 84 | frame_j = next(frame_i); 85 | 86 | MatrixXd tmp_A(6, 9); 87 | tmp_A.setZero(); 88 | VectorXd tmp_b(6); 89 | tmp_b.setZero(); 90 | 91 | double dt = frame_j->second.pre_integration->sum_dt; 92 | 93 | tmp_A.block<3, 3>(0, 0) = -dt * Matrix3d::Identity(); 94 | tmp_A.block<3, 2>(0, 6) = frame_i->second.R.transpose() * dt * dt / 2 * Matrix3d::Identity() * lxly; 95 | tmp_A.block<3, 1>(0, 8) = frame_i->second.R.transpose() * (frame_j->second.T - frame_i->second.T) / 100.0; 96 | tmp_b.block<3, 1>(0, 0) = frame_j->second.pre_integration->delta_p + frame_i->second.R.transpose() * frame_j->second.R * TIC - TIC - frame_i->second.R.transpose() * dt * dt / 2 * g0; 97 | 98 | tmp_A.block<3, 3>(3, 0) = -Matrix3d::Identity(); 99 | tmp_A.block<3, 3>(3, 3) = frame_i->second.R.transpose() * frame_j->second.R; 100 | tmp_A.block<3, 2>(3, 6) = frame_i->second.R.transpose() * dt * Matrix3d::Identity() * lxly; 101 | tmp_b.block<3, 1>(3, 0) = frame_j->second.pre_integration->delta_v - frame_i->second.R.transpose() * dt * Matrix3d::Identity() * g0; 102 | 103 | Matrix cov_inv = Matrix::Zero(); 104 | //cov.block<6, 6>(0, 0) = IMU_cov[i + 1]; 105 | //MatrixXd cov_inv = cov.inverse(); 106 | cov_inv.setIdentity(); 107 | 108 | MatrixXd r_A = tmp_A.transpose() * cov_inv * tmp_A; 109 | VectorXd r_b = tmp_A.transpose() * cov_inv * tmp_b; 110 | 111 | A.block<6, 6>(i * 3, i * 3) += r_A.topLeftCorner<6, 6>(); 112 | b.segment<6>(i * 3) += r_b.head<6>(); 113 | 114 | A.bottomRightCorner<3, 3>() += r_A.bottomRightCorner<3, 3>(); 115 | b.tail<3>() += r_b.tail<3>(); 116 | 117 | A.block<6, 3>(i * 3, n_state - 3) += r_A.topRightCorner<6, 3>(); 118 | A.block<3, 6>(n_state - 3, i * 3) += r_A.bottomLeftCorner<3, 6>(); 119 | } 120 | A = A * 1000.0; 121 | b = b * 1000.0; 122 | x = A.ldlt().solve(b); 123 | VectorXd dg = x.segment<2>(n_state - 3); 124 | g0 = (g0 + lxly * dg).normalized() * Gravity.norm(); 125 | //double s = x(n_state - 1); 126 | } 127 | g = g0; 128 | } 129 | 130 | 131 | bool LinearAlignment(map &all_image_frame, Vector3d &g, VectorXd &x, const Vector3d& TIC) 132 | { 133 | int all_frame_count = all_image_frame.size(); 134 | int n_state = all_frame_count * 3 + 3 + 1; 135 | 136 | MatrixXd A{n_state, n_state}; 137 | A.setZero(); 138 | VectorXd b{n_state}; 139 | b.setZero(); 140 | 141 | map::iterator frame_i; 142 | map::iterator frame_j; 143 | int i = 0; 144 | for (frame_i = all_image_frame.begin(); next(frame_i) != all_image_frame.end(); frame_i++, i++) 145 | { 146 | frame_j = next(frame_i); 147 | 148 | MatrixXd tmp_A(6, 10); 149 | tmp_A.setZero(); 150 | VectorXd tmp_b(6); 151 | tmp_b.setZero(); 152 | 153 | double dt = frame_j->second.pre_integration->sum_dt; 154 | 155 | tmp_A.block<3, 3>(0, 0) = -dt * Matrix3d::Identity(); 156 | tmp_A.block<3, 3>(0, 6) = frame_i->second.R.transpose() * dt * dt / 2 * Matrix3d::Identity(); 157 | tmp_A.block<3, 1>(0, 9) = frame_i->second.R.transpose() * (frame_j->second.T - frame_i->second.T) / 100.0; 158 | tmp_b.block<3, 1>(0, 0) = frame_j->second.pre_integration->delta_p + frame_i->second.R.transpose() * frame_j->second.R * TIC - TIC; 159 | //cout << "delta_p " << frame_j->second.pre_integration->delta_p.transpose() << endl; 160 | tmp_A.block<3, 3>(3, 0) = -Matrix3d::Identity(); 161 | tmp_A.block<3, 3>(3, 3) = frame_i->second.R.transpose() * frame_j->second.R; 162 | tmp_A.block<3, 3>(3, 6) = frame_i->second.R.transpose() * dt * Matrix3d::Identity(); 163 | tmp_b.block<3, 1>(3, 0) = frame_j->second.pre_integration->delta_v; 164 | //cout << "delta_v " << frame_j->second.pre_integration->delta_v.transpose() << endl; 165 | 166 | Matrix cov_inv = Matrix::Zero(); 167 | //cov.block<6, 6>(0, 0) = IMU_cov[i + 1]; 168 | //MatrixXd cov_inv = cov.inverse(); 169 | cov_inv.setIdentity(); 170 | 171 | MatrixXd r_A = tmp_A.transpose() * cov_inv * tmp_A; 172 | VectorXd r_b = tmp_A.transpose() * cov_inv * tmp_b; 173 | 174 | A.block<6, 6>(i * 3, i * 3) += r_A.topLeftCorner<6, 6>(); 175 | b.segment<6>(i * 3) += r_b.head<6>(); 176 | 177 | A.bottomRightCorner<4, 4>() += r_A.bottomRightCorner<4, 4>(); 178 | b.tail<4>() += r_b.tail<4>(); 179 | 180 | A.block<6, 4>(i * 3, n_state - 4) += r_A.topRightCorner<6, 4>(); 181 | A.block<4, 6>(n_state - 4, i * 3) += r_A.bottomLeftCorner<4, 6>(); 182 | } 183 | A = A * 1000.0; 184 | b = b * 1000.0; 185 | x = A.ldlt().solve(b); 186 | double s = x(n_state - 1) / 100.0; 187 | // printf("estimated scale: %f", s); 188 | g = x.segment<3>(n_state - 4); 189 | // printf(" result g " << g.norm() << " " << g.transpose()); 190 | if(fabs(g.norm() - Gravity.norm()) > 1.0 || s < 0) 191 | { 192 | return false; 193 | } 194 | 195 | RefineGravity(all_image_frame, g, x, TIC); 196 | s = (x.tail<1>())(0) / 100.0; 197 | (x.tail<1>())(0) = s; 198 | // printf(" refine " << g.norm() << " " << g.transpose()); 199 | if(s < 0.0 ) 200 | return false; 201 | else 202 | return true; 203 | } 204 | 205 | 206 | bool VisualIMUAlignment(map &all_image_frame, Vector3d* Bgs, Vector3d &g, VectorXd &x, const Vector3d& TIC) 207 | { 208 | solveGyroscopeBias(all_image_frame, Bgs); 209 | 210 | if(LinearAlignment(all_image_frame, g, x, TIC)) 211 | return true; 212 | else 213 | return false; 214 | } 215 | 216 | } -------------------------------------------------------------------------------- /src/initial_sfm.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // Added by xiaochen at 19-8-16. 3 | // Type and methods for initial sfm. 4 | // The original file belong to VINS-MONO (https://github.com/HKUST-Aerial-Robotics/VINS-Mono). 5 | // 6 | 7 | #include "Initializer/initial_sfm.h" 8 | 9 | namespace larvio { 10 | 11 | GlobalSFM::GlobalSFM(){} 12 | 13 | 14 | void GlobalSFM::triangulatePoint(Eigen::Matrix &Pose0, Eigen::Matrix &Pose1, 15 | Vector2d &point0, Vector2d &point1, Vector3d &point_3d) 16 | { 17 | Matrix4d design_matrix = Matrix4d::Zero(); 18 | design_matrix.row(0) = point0[0] * Pose0.row(2) - Pose0.row(0); 19 | design_matrix.row(1) = point0[1] * Pose0.row(2) - Pose0.row(1); 20 | design_matrix.row(2) = point1[0] * Pose1.row(2) - Pose1.row(0); 21 | design_matrix.row(3) = point1[1] * Pose1.row(2) - Pose1.row(1); 22 | Vector4d triangulated_point; 23 | triangulated_point = 24 | design_matrix.jacobiSvd(Eigen::ComputeFullV).matrixV().rightCols<1>(); 25 | point_3d(0) = triangulated_point(0) / triangulated_point(3); 26 | point_3d(1) = triangulated_point(1) / triangulated_point(3); 27 | point_3d(2) = triangulated_point(2) / triangulated_point(3); 28 | } 29 | 30 | 31 | bool GlobalSFM::solveFrameByPnP(Matrix3d &R_initial, Vector3d &P_initial, int i, 32 | vector &sfm_f) 33 | { 34 | vector pts_2_vector; 35 | vector pts_3_vector; 36 | for (int j = 0; j < feature_num; j++) 37 | { 38 | if (sfm_f[j].state != true) 39 | continue; 40 | Vector2d point2d; 41 | for (int k = 0; k < (int)sfm_f[j].observation.size(); k++) 42 | { 43 | if (sfm_f[j].observation[k].first == i) 44 | { 45 | Vector2d img_pts = sfm_f[j].observation[k].second; 46 | cv::Point2f pts_2(img_pts(0), img_pts(1)); 47 | pts_2_vector.push_back(pts_2); // qxc: 保存归一化观测 48 | cv::Point3f pts_3(sfm_f[j].position[0], sfm_f[j].position[1], sfm_f[j].position[2]); 49 | pts_3_vector.push_back(pts_3); // qxc: 保存三角化结果 50 | break; 51 | } 52 | } 53 | } 54 | if (int(pts_2_vector.size()) < 15) 55 | { 56 | printf("unstable features tracking, please slowly move you device! or just put in on the table!\n\n"); 57 | if (int(pts_2_vector.size()) < 10) 58 | return false; 59 | } 60 | cv::Mat r, rvec, t, D, tmp_r; 61 | cv::eigen2cv(R_initial, tmp_r); 62 | cv::Rodrigues(tmp_r, rvec); 63 | cv::eigen2cv(P_initial, t); 64 | cv::Mat K = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); 65 | bool pnp_succ; 66 | pnp_succ = cv::solvePnP(pts_3_vector, pts_2_vector, K, D, rvec, t, 1); 67 | if(!pnp_succ) 68 | { 69 | return false; 70 | } 71 | cv::Rodrigues(rvec, r); 72 | //cout << "r " << endl << r << endl; 73 | MatrixXd R_pnp; 74 | cv::cv2eigen(r, R_pnp); 75 | MatrixXd T_pnp; 76 | cv::cv2eigen(t, T_pnp); 77 | R_initial = R_pnp; 78 | P_initial = T_pnp; 79 | return true; 80 | } 81 | 82 | 83 | void GlobalSFM::triangulateTwoFrames(int frame0, Eigen::Matrix &Pose0, 84 | int frame1, Eigen::Matrix &Pose1, 85 | vector &sfm_f) 86 | { 87 | assert(frame0 != frame1); 88 | for (int j = 0; j < feature_num; j++) 89 | { 90 | if (sfm_f[j].state == true) 91 | continue; 92 | bool has_0 = false, has_1 = false; 93 | Vector2d point0; 94 | Vector2d point1; 95 | for (int k = 0; k < (int)sfm_f[j].observation.size(); k++) 96 | { 97 | if (sfm_f[j].observation[k].first == frame0) 98 | { 99 | point0 = sfm_f[j].observation[k].second; 100 | has_0 = true; 101 | } 102 | if (sfm_f[j].observation[k].first == frame1) 103 | { 104 | point1 = sfm_f[j].observation[k].second; 105 | has_1 = true; 106 | } 107 | } 108 | if (has_0 && has_1) 109 | { 110 | Vector3d point_3d; 111 | triangulatePoint(Pose0, Pose1, point0, point1, point_3d); 112 | sfm_f[j].state = true; 113 | sfm_f[j].position[0] = point_3d(0); 114 | sfm_f[j].position[1] = point_3d(1); 115 | sfm_f[j].position[2] = point_3d(2); 116 | //cout << "trangulated : " << frame1 << " 3d point : " << j << " " << point_3d.transpose() << endl; 117 | } 118 | } 119 | } 120 | 121 | 122 | // original: 123 | // q w_R_cam t w_R_cam 124 | // c_rotation cam_R_w 125 | // c_translation cam_R_w 126 | // relative_q[i][j] j_q_i 127 | // relative_t[i][j] j_t_ji (j < i) 128 | bool GlobalSFM::construct(int frame_num, Quaterniond* q, Vector3d* T, int l, 129 | const Matrix3d relative_R, const Vector3d relative_T, 130 | vector &sfm_f, map &sfm_tracked_points) 131 | { 132 | feature_num = sfm_f.size(); 133 | //cout << "set 0 and " << l << " as known " << endl; 134 | // have relative_r relative_t 135 | // intial two view 136 | q[l].w() = 1; 137 | q[l].x() = 0; 138 | q[l].y() = 0; 139 | q[l].z() = 0; 140 | T[l].setZero(); 141 | q[frame_num - 1] = q[l] * Quaterniond(relative_R); 142 | T[frame_num - 1] = relative_T; 143 | //cout << "init q_l " << q[l].w() << " " << q[l].vec().transpose() << endl; 144 | //cout << "init t_l " << T[l].transpose() << endl; 145 | 146 | //rotate to cam frame 147 | Matrix3d c_Rotation[frame_num]; 148 | Vector3d c_Translation[frame_num]; 149 | Quaterniond c_Quat[frame_num]; 150 | double c_rotation[frame_num][4]; 151 | double c_translation[frame_num][3]; 152 | Eigen::Matrix Pose[frame_num]; 153 | 154 | c_Quat[l] = q[l].inverse(); 155 | c_Rotation[l] = c_Quat[l].toRotationMatrix(); 156 | c_Translation[l] = -1 * (c_Rotation[l] * T[l]); 157 | Pose[l].block<3, 3>(0, 0) = c_Rotation[l]; 158 | Pose[l].block<3, 1>(0, 3) = c_Translation[l]; 159 | 160 | c_Quat[frame_num - 1] = q[frame_num - 1].inverse(); 161 | c_Rotation[frame_num - 1] = c_Quat[frame_num - 1].toRotationMatrix(); 162 | c_Translation[frame_num - 1] = -1 * (c_Rotation[frame_num - 1] * T[frame_num - 1]); 163 | Pose[frame_num - 1].block<3, 3>(0, 0) = c_Rotation[frame_num - 1]; 164 | Pose[frame_num - 1].block<3, 1>(0, 3) = c_Translation[frame_num - 1]; 165 | 166 | //1: trangulate between l ----- frame_num - 1 167 | //2: solve pnp l + 1; trangulate l + 1 ------- frame_num - 1; 168 | for (int i = l; i < frame_num - 1 ; i++) 169 | { 170 | // solve pnp 171 | if (i > l) 172 | { 173 | Matrix3d R_initial = c_Rotation[i - 1]; 174 | Vector3d P_initial = c_Translation[i - 1]; 175 | if(!solveFrameByPnP(R_initial, P_initial, i, sfm_f)) 176 | return false; 177 | c_Rotation[i] = R_initial; 178 | c_Translation[i] = P_initial; 179 | c_Quat[i] = c_Rotation[i]; 180 | Pose[i].block<3, 3>(0, 0) = c_Rotation[i]; 181 | Pose[i].block<3, 1>(0, 3) = c_Translation[i]; 182 | } 183 | 184 | // triangulate point based on the solve pnp result 185 | triangulateTwoFrames(i, Pose[i], frame_num - 1, Pose[frame_num - 1], sfm_f); 186 | } 187 | //3: triangulate l-----l+1 l+2 ... frame_num -2 188 | for (int i = l + 1; i < frame_num - 1; i++) 189 | triangulateTwoFrames(l, Pose[l], i, Pose[i], sfm_f); 190 | //4: solve pnp l-1; triangulate l-1 ----- l 191 | // l-2 l-2 ----- l 192 | for (int i = l - 1; i >= 0; i--) 193 | { 194 | //solve pnp 195 | Matrix3d R_initial = c_Rotation[i + 1]; 196 | Vector3d P_initial = c_Translation[i + 1]; 197 | if(!solveFrameByPnP(R_initial, P_initial, i, sfm_f)) 198 | return false; 199 | c_Rotation[i] = R_initial; 200 | c_Translation[i] = P_initial; 201 | c_Quat[i] = c_Rotation[i]; 202 | Pose[i].block<3, 3>(0, 0) = c_Rotation[i]; 203 | Pose[i].block<3, 1>(0, 3) = c_Translation[i]; 204 | //triangulate 205 | triangulateTwoFrames(i, Pose[i], l, Pose[l], sfm_f); 206 | } 207 | //5: triangulate all other points 208 | for (int j = 0; j < feature_num; j++) 209 | { 210 | if (sfm_f[j].state == true) 211 | continue; 212 | if ((int)sfm_f[j].observation.size() >= 2) 213 | { 214 | Vector2d point0, point1; 215 | int frame_0 = sfm_f[j].observation[0].first; 216 | point0 = sfm_f[j].observation[0].second; 217 | int frame_1 = sfm_f[j].observation.back().first; 218 | point1 = sfm_f[j].observation.back().second; 219 | Vector3d point_3d; 220 | triangulatePoint(Pose[frame_0], Pose[frame_1], point0, point1, point_3d); 221 | sfm_f[j].state = true; 222 | sfm_f[j].position[0] = point_3d(0); 223 | sfm_f[j].position[1] = point_3d(1); 224 | sfm_f[j].position[2] = point_3d(2); 225 | //cout << "trangulated : " << frame_0 << " " << frame_1 << " 3d point : " << j << " " << point_3d.transpose() << endl; 226 | } 227 | } 228 | 229 | /* 230 | for (int i = 0; i < frame_num; i++) 231 | { 232 | q[i] = c_Rotation[i].transpose(); 233 | cout << "solvePnP q" << " i " << i <<" " <(3, 3) << 0, 1, 0, -1, 0, 0, 0, 0, 1); 17 | W.convertTo(W, E.type()); 18 | 19 | Mat R1, R2, t; 20 | R1 = U * W * Vt; 21 | R2 = U * W.t() * Vt; 22 | t = U.col(2) * 1.0; 23 | 24 | R1.copyTo(_R1); 25 | R2.copyTo(_R2); 26 | t.copyTo(_t); 27 | } 28 | 29 | int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix, 30 | OutputArray _R, OutputArray _t, InputOutputArray _mask) 31 | { 32 | 33 | Mat points1, points2, cameraMatrix; 34 | _points1.getMat().convertTo(points1, CV_64F); 35 | _points2.getMat().convertTo(points2, CV_64F); 36 | _cameraMatrix.getMat().convertTo(cameraMatrix, CV_64F); 37 | 38 | int npoints = points1.checkVector(2); 39 | CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints && 40 | points1.type() == points2.type()); 41 | 42 | CV_Assert(cameraMatrix.rows == 3 && cameraMatrix.cols == 3 && cameraMatrix.channels() == 1); 43 | 44 | if (points1.channels() > 1) 45 | { 46 | points1 = points1.reshape(1, npoints); 47 | points2 = points2.reshape(1, npoints); 48 | } 49 | 50 | double fx = cameraMatrix.at(0,0); 51 | double fy = cameraMatrix.at(1,1); 52 | double cx = cameraMatrix.at(0,2); 53 | double cy = cameraMatrix.at(1,2); 54 | 55 | points1.col(0) = (points1.col(0) - cx) / fx; 56 | points2.col(0) = (points2.col(0) - cx) / fx; 57 | points1.col(1) = (points1.col(1) - cy) / fy; 58 | points2.col(1) = (points2.col(1) - cy) / fy; 59 | 60 | points1 = points1.t(); 61 | points2 = points2.t(); 62 | 63 | Mat R1, R2, t; 64 | decomposeEssentialMat(E, R1, R2, t); 65 | Mat P0 = Mat::eye(3, 4, R1.type()); 66 | Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type()); 67 | P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0; 68 | P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0; 69 | P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0; 70 | P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0; 71 | 72 | // Do the cheirality check. 73 | // Notice here a threshold dist is used to filter 74 | // out far away points (i.e. infinite points) since 75 | // there depth may vary between postive and negtive. 76 | double dist = 50.0; 77 | Mat Q; 78 | triangulatePoints(P0, P1, points1, points2, Q); 79 | Mat mask1 = Q.row(2).mul(Q.row(3)) > 0; 80 | Q.row(0) /= Q.row(3); 81 | Q.row(1) /= Q.row(3); 82 | Q.row(2) /= Q.row(3); 83 | Q.row(3) /= Q.row(3); 84 | mask1 = (Q.row(2) < dist) & mask1; 85 | Q = P1 * Q; 86 | mask1 = (Q.row(2) > 0) & mask1; 87 | mask1 = (Q.row(2) < dist) & mask1; 88 | 89 | triangulatePoints(P0, P2, points1, points2, Q); 90 | Mat mask2 = Q.row(2).mul(Q.row(3)) > 0; 91 | Q.row(0) /= Q.row(3); 92 | Q.row(1) /= Q.row(3); 93 | Q.row(2) /= Q.row(3); 94 | Q.row(3) /= Q.row(3); 95 | mask2 = (Q.row(2) < dist) & mask2; 96 | Q = P2 * Q; 97 | mask2 = (Q.row(2) > 0) & mask2; 98 | mask2 = (Q.row(2) < dist) & mask2; 99 | 100 | triangulatePoints(P0, P3, points1, points2, Q); 101 | Mat mask3 = Q.row(2).mul(Q.row(3)) > 0; 102 | Q.row(0) /= Q.row(3); 103 | Q.row(1) /= Q.row(3); 104 | Q.row(2) /= Q.row(3); 105 | Q.row(3) /= Q.row(3); 106 | mask3 = (Q.row(2) < dist) & mask3; 107 | Q = P3 * Q; 108 | mask3 = (Q.row(2) > 0) & mask3; 109 | mask3 = (Q.row(2) < dist) & mask3; 110 | 111 | triangulatePoints(P0, P4, points1, points2, Q); 112 | Mat mask4 = Q.row(2).mul(Q.row(3)) > 0; 113 | Q.row(0) /= Q.row(3); 114 | Q.row(1) /= Q.row(3); 115 | Q.row(2) /= Q.row(3); 116 | Q.row(3) /= Q.row(3); 117 | mask4 = (Q.row(2) < dist) & mask4; 118 | Q = P4 * Q; 119 | mask4 = (Q.row(2) > 0) & mask4; 120 | mask4 = (Q.row(2) < dist) & mask4; 121 | 122 | mask1 = mask1.t(); 123 | mask2 = mask2.t(); 124 | mask3 = mask3.t(); 125 | mask4 = mask4.t(); 126 | 127 | // If _mask is given, then use it to filter outliers. 128 | if (!_mask.empty()) 129 | { 130 | Mat mask = _mask.getMat(); 131 | CV_Assert(mask.size() == mask1.size()); 132 | bitwise_and(mask, mask1, mask1); 133 | bitwise_and(mask, mask2, mask2); 134 | bitwise_and(mask, mask3, mask3); 135 | bitwise_and(mask, mask4, mask4); 136 | } 137 | if (_mask.empty() && _mask.needed()) 138 | { 139 | _mask.create(mask1.size(), CV_8U); 140 | } 141 | 142 | CV_Assert(_R.needed() && _t.needed()); 143 | _R.create(3, 3, R1.type()); 144 | _t.create(3, 1, t.type()); 145 | 146 | int good1 = countNonZero(mask1); 147 | int good2 = countNonZero(mask2); 148 | int good3 = countNonZero(mask3); 149 | int good4 = countNonZero(mask4); 150 | 151 | if (good1 >= good2 && good1 >= good3 && good1 >= good4) 152 | { 153 | R1.copyTo(_R); 154 | t.copyTo(_t); 155 | if (_mask.needed()) mask1.copyTo(_mask); 156 | return good1; 157 | } 158 | else if (good2 >= good1 && good2 >= good3 && good2 >= good4) 159 | { 160 | R2.copyTo(_R); 161 | t.copyTo(_t); 162 | if (_mask.needed()) mask2.copyTo(_mask); 163 | return good2; 164 | } 165 | else if (good3 >= good1 && good3 >= good2 && good3 >= good4) 166 | { 167 | t = -t; 168 | R1.copyTo(_R); 169 | t.copyTo(_t); 170 | if (_mask.needed()) mask3.copyTo(_mask); 171 | return good3; 172 | } 173 | else 174 | { 175 | t = -t; 176 | R2.copyTo(_R); 177 | t.copyTo(_t); 178 | if (_mask.needed()) mask4.copyTo(_mask); 179 | return good4; 180 | } 181 | } 182 | 183 | int recoverPose( InputArray E, InputArray _points1, InputArray _points2, OutputArray _R, 184 | OutputArray _t, double focal, Point2d pp, InputOutputArray _mask) 185 | { 186 | Mat cameraMatrix = (Mat_(3,3) << focal, 0, pp.x, 0, focal, pp.y, 0, 0, 1); 187 | return cv::recoverPose(E, _points1, _points2, cameraMatrix, _R, _t, _mask); 188 | } 189 | } 190 | 191 | 192 | namespace larvio { 193 | 194 | bool MotionEstimator::solveRelativeRT(const vector> &corres, Matrix3d &Rotation, Vector3d &Translation) 195 | { 196 | if (corres.size() >= 15) 197 | { 198 | vector ll, rr; 199 | for (int i = 0; i < int(corres.size()); i++) 200 | { 201 | ll.push_back(cv::Point2f(corres[i].first(0), corres[i].first(1))); 202 | rr.push_back(cv::Point2f(corres[i].second(0), corres[i].second(1))); 203 | } 204 | cv::Mat mask; 205 | 206 | cv::Mat E = cv::findFundamentalMat(ll, rr, cv::FM_RANSAC, 0.3 / 460, 0.99, mask); 207 | cv::Mat cameraMatrix = (cv::Mat_(3, 3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); 208 | cv::Mat rot, trans; 209 | int inlier_cnt = cv::recoverPose(E, ll, rr, cameraMatrix, rot, trans, mask); 210 | //cout << "inlier_cnt " << inlier_cnt << endl; 211 | 212 | Eigen::Matrix3d R; 213 | Eigen::Vector3d T; 214 | for (int i = 0; i < 3; i++) 215 | { 216 | T(i) = trans.at(i, 0); 217 | for (int j = 0; j < 3; j++) 218 | R(i, j) = rot.at(i, j); 219 | } 220 | 221 | Rotation = R.transpose(); 222 | Translation = -R.transpose() * T; 223 | if(inlier_cnt > 12) 224 | return true; 225 | else 226 | return false; 227 | } 228 | return false; 229 | } 230 | 231 | 232 | } 233 | --------------------------------------------------------------------------------