├── .gitignore
├── README.md
├── camera_model
├── CMakeLists.txt
├── cmake
│ └── FindEigen.cmake
├── include
│ └── camodocal
│ │ ├── calib
│ │ └── CameraCalibration.h
│ │ ├── camera_models
│ │ ├── Camera.h
│ │ ├── CameraFactory.h
│ │ ├── CataCamera.h
│ │ ├── CostFunctionFactory.h
│ │ ├── EquidistantCamera.h
│ │ ├── PinholeCamera.h
│ │ └── ScaramuzzaCamera.h
│ │ ├── chessboard
│ │ ├── Chessboard.h
│ │ ├── ChessboardCorner.h
│ │ ├── ChessboardQuad.h
│ │ └── Spline.h
│ │ ├── gpl
│ │ ├── EigenQuaternionParameterization.h
│ │ ├── EigenUtils.h
│ │ └── gpl.h
│ │ └── sparse_graph
│ │ └── Transform.h
├── instruction
├── package.xml
├── readme.md
└── src
│ ├── calib
│ └── CameraCalibration.cc
│ ├── camera_models
│ ├── Camera.cc
│ ├── CameraFactory.cc
│ ├── CataCamera.cc
│ ├── CostFunctionFactory.cc
│ ├── EquidistantCamera.cc
│ ├── PinholeCamera.cc
│ └── ScaramuzzaCamera.cc
│ ├── chessboard
│ └── Chessboard.cc
│ ├── gpl
│ ├── EigenQuaternionParameterization.cc
│ └── gpl.cc
│ ├── intrinsic_calib.cc
│ └── sparse_graph
│ └── Transform.cc
├── config
├── campus.rviz
├── fisheye_mask.jpg
├── fisheye_mask_752x480.jpg
├── indoor.rviz
├── openloris
│ ├── openloris_vio.yaml
│ └── openloris_vio_atlas.yaml
├── realsense
│ ├── vio.yaml
│ ├── vio_atlas.yaml
│ └── vio_campus.yaml
├── tsinghua.rviz
├── tum_rgbd
│ ├── tum_fr3.yaml
│ └── tum_fr3_atlas.yaml
└── video.rviz
├── doc
├── INSTALL.md
├── MULTIPLE_DEVICES.md
└── image
│ ├── framework.png
│ └── video_cover.png
├── output
└── .gitignore
├── pose_graph
├── CMakeLists.txt
├── cmake
│ └── FindEigen.cmake
├── nodelet_plugin.xml
├── package.xml
└── src
│ ├── ThirdParty
│ ├── DBoW
│ │ ├── BowVector.cpp
│ │ ├── BowVector.h
│ │ ├── DBoW2.h
│ │ ├── FBrief.cpp
│ │ ├── FBrief.h
│ │ ├── FClass.h
│ │ ├── FeatureVector.cpp
│ │ ├── FeatureVector.h
│ │ ├── QueryResults.cpp
│ │ ├── QueryResults.h
│ │ ├── ScoringObject.cpp
│ │ ├── ScoringObject.h
│ │ ├── TemplatedDatabase.h
│ │ └── TemplatedVocabulary.h
│ ├── DUtils
│ │ ├── DException.h
│ │ ├── DUtils.h
│ │ ├── Random.cpp
│ │ ├── Random.h
│ │ ├── Timestamp.cpp
│ │ └── Timestamp.h
│ ├── DVision
│ │ ├── BRIEF.cpp
│ │ ├── BRIEF.h
│ │ └── DVision.h
│ ├── VocabularyBinary.cpp
│ └── VocabularyBinary.hpp
│ ├── keyframe
│ ├── keyframe.cpp
│ └── keyframe.h
│ ├── pose_graph
│ ├── pose_graph.cpp
│ └── pose_graph.h
│ ├── pose_graph_nodelet.cpp
│ └── utility
│ ├── CameraPoseVisualization.cpp
│ ├── CameraPoseVisualization.h
│ ├── parameters.h
│ ├── tic_toc.h
│ ├── utility.cpp
│ └── utility.h
├── scripts
├── download_hitsz.sh
├── download_thusz.sh
├── merge_imu_topics.py
├── rosbag_chunk.py
└── rosbag_merge_chunk.py
├── support_files
├── brief_k10L6.bin
└── brief_pattern.yml
└── vins_estimator
├── CMakeLists.txt
├── cmake
└── FindEigen.cmake
├── launch
├── atlas200
│ ├── compressed2img.launch
│ └── img2compressed.launch
├── openloris
│ ├── openloris_vio_atlas.launch
│ ├── openloris_vio_pytorch.launch
│ ├── openloris_vio_tensorrt.launch
│ └── openloris_vo.launch
├── realsense
│ ├── realsense_vio.launch
│ ├── realsense_vio_atlas.launch
│ ├── realsense_vio_campus.launch
│ └── rs_camera.launch
├── tum_rgbd
│ ├── tum_rgbd_atlas.launch
│ ├── tum_rgbd_pytorch.launch
│ └── tum_rgbd_tensorrt.launch
├── vins_rviz.launch
└── yolo
│ ├── atlas.launch
│ ├── pytorch.launch
│ └── tensorrt.launch
├── nodelet_description.xml
├── package.xml
└── src
├── estimator
├── estimator.cpp
└── estimator.h
├── estimator_nodelet.cpp
├── factor
├── imu_factor.h
├── integration_base.h
├── marginalization_factor.cpp
├── marginalization_factor.h
├── pose_local_parameterization.cpp
├── pose_local_parameterization.h
├── projection_factor.cpp
├── projection_factor.h
├── projection_td_factor.cpp
└── projection_td_factor.h
├── feature_manager
├── feature_manager.cpp
└── feature_manager.h
├── feature_tracker
├── ThreadPool.h
├── feature_tracker.cpp
└── feature_tracker.h
├── initial
├── initial_aligment.cpp
├── initial_alignment.h
├── initial_ex_rotation.cpp
├── initial_ex_rotation.h
├── initial_sfm.cpp
├── initial_sfm.h
├── solve_5pts.cpp
└── solve_5pts.h
└── utility
├── CameraPoseVisualization.cpp
├── CameraPoseVisualization.h
├── parameters.cpp
├── parameters.h
├── tic_toc.h
├── utility.cpp
├── utility.h
├── visualization.cpp
└── visualization.h
/.gitignore:
--------------------------------------------------------------------------------
1 | **/build
2 | **/devel
3 | **/.idea
4 | **/.vscode
5 | **/cmake-build-debug
6 | **/.cache
7 |
8 | !.gitignore
9 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Dynamic-VINS
2 |
3 | # RGB-D Inertial Odometry for a Resource-restricted Robot in Dynamic Environments
4 |
5 | ## 1. Introduction
6 | **Dynamic-VINS** is a real-time RGB-D Inertial Odometry system for resource-restricted robots in dynamic environments.
7 | - Dynamic feature recognition by object detection and depth information with the performance comparable to semantic segmentation.
8 | - Grid-based feature detection and efficient high-quality FAST feature extraction.
9 | - Competitive localization accuracy and robustness in dynamic environments are shown in a real-time application on resource-restricted platforms, such as [HUAWEI Atlas200 DK](https://e.huawei.com/en/products/cloud-computing-dc/atlas/atlas-200), [NVIDIA Jetson AGX Xavier](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-agx-xavier/).
10 |
11 | Authors: [Jianheng Liu](https://github.com/jianhengLiu), Xuanfu Li, [Yueqian Liu](https://github.com/ErcBunny), and [Haoyao Chen](https://github.com/HitszChen) from the [Networked RObotics and Sytems Lab](http://www.nrs-lab.com), [HITSZ](https://www.hitsz.edu.cn/index.html)
12 |
13 | If you use Dynamic-VINS for your academic research, please cite the following paper [[pdf](https://arxiv.org/pdf/2304.10987.pdf)].
14 | ```
15 | @ARTICLE{9830851,
16 | author={Liu, Jianheng and Li, Xuanfu and Liu, Yueqian and Chen, Haoyao},
17 | journal={IEEE Robotics and Automation Letters},
18 | title={RGB-D Inertial Odometry for a Resource-Restricted Robot in Dynamic Environments},
19 | year={2022},
20 | volume={7},
21 | number={4},
22 | pages={9573-9580},
23 | doi={10.1109/LRA.2022.3191193}}
24 | ```
25 |
26 |
27 | ### 1.1. Framework
28 |
29 |

30 |
31 | ### 1.2. Related Video:
32 |
33 |
38 |
39 |
40 | **Video links:** [Youtube](https://youtu.be/y0U1IVtFBwY) or [Bilibili](https://www.bilibili.com/video/BV1bF411t7mx).
41 |
42 |
43 | ## 2. Installation
44 | Tested on Ubuntu 18.04 and 20.04.
45 |
46 | Find how to install Dynamic-VINS and its dependencies here: **[Installation instructions](./doc/INSTALL.md)**.
47 |
48 | ## 3. Run datasets examples
49 |
50 | ### 3.1. OpenLORIS
51 | Download [OpenLORIS](https://shimo.im/docs/HhJj6XHYhdRQ6jjk/read) datasets.
52 |
53 | Take [OpenLORIS-cafe](https://drive.google.com/file/d/1lVhp6Z8GxXFCPd8bR9plHFoVZUadYiBZ/view) as examples.
54 |
55 | ```
56 | tar -xzvf cafe1-1_2-rosbag.tar
57 | cd cafe
58 | rosbag decompress cafe*
59 | python YOUR_PATH_TO_DYNAMIC_VINS/scripts/merge_imu_topics.py cafe1-1.bag cafe1-2.bag
60 | ```
61 |
62 | **NVIDIA devices (pytorch)**
63 |
64 | ```bash
65 | roslaunch vins_estimator openloris_vio_pytorch.launch
66 | roslaunch vins_estimator vins_rviz.launch # Visualization
67 | rosbag play YOUR_PATH_TO_DATASET/cafe.bag
68 | ```
69 |
70 | **NVIDIA devices (tensorrt)**
71 |
72 | ```bash
73 | roslaunch vins_estimator openloris_vio_tensorrt.launch
74 | roslaunch vins_estimator vins_rviz.launch # Visualization
75 | rosbag play YOUR_PATH_TO_DATASET/cafe.bag
76 | ```
77 |
78 | **HUAWEI Atlas200DK**
79 |
80 | ```bash
81 | roslaunch vins_estimator openloris_vio_atlas.launch
82 | ```
83 | Running Dynamic-VINS on HUAWEI Atlas200DK requires multile devices communication setting. For specific instructions, please refer to the [MULTIPLE_DEVICES](./doc/MULTIPLE_DEVICES.md). And other kinds of edge devices also could refer to this instruction.
84 |
85 | ### 3.2. HITSZ & THUSZ Datasets
86 |
87 | Please prepare enough space for the [datasets](https://furtive-lamprey-00b.notion.site/Dynamic-VINS-Datasets-4458bad4d1104697b00410936919327d).
88 | - HITSZ(41.0GB x 2)
89 | - THUSZ(51.3GB x 2)
90 |
91 | 1. download datasets vis [Dyanmic-VINS-Datasets](https://furtive-lamprey-00b.notion.site/Dynamic-VINS-Datasets-4458bad4d1104697b00410936919327d).
92 | 2. run following cmd:
93 | ```bash
94 | # bash scripts/download_hitsz.sh # bash scripts/download_thusz.sh
95 | python3 scipts/rosbag_merge_chunk.py Datasets/hitsz_00.bag # python3 scipts/rosbag_merge_chunk.py Datasets/hitsz_00.bag
96 | # rm ./Datasets/hitsz_*.bag # rm ./Datasets/thusz_*.bag
97 | roslaunch vins_estimator realsense_vio_campus.launch
98 | roslaunch vins_estimator vins_rviz.launch
99 | rosbag play Datasets/hitsz.bag # rosbag play thusz.bag
100 | ```
101 |
102 | ## 4. Configuration
103 |
104 | - Detailed illustration of configuration please refer to [realsense configuration](./config/realsense/vio.yaml).
105 | - A running procedure across two different platform is exampled in [Multiple Devices running_procedure](./doc/MULTIPLE_DEVICES.md).
106 | - HUAWEI Atlas200DK setup please refer to [HUAWEI Atlas200DK Setup](https://support.huaweicloud.com/environment-deployment-Atlas200DK1012/atlased_04_0017.html).
107 |
108 |
109 | ## 5. Acknowledgments
110 | Dynamic-VINS is extended based on [VINS-Mono](https://github.com/HKUST-Aerial-Robotics/VINS-Mono), [VINS-RGBD](https://github.com/STAR-Center/VINS-RGBD), [yolov5](https://github.com/ultralytics/yolov5), [tensorrt_yolov5](https://github.com/wang-xinyu/tensorrtx/tree/master/yolov5), [ascend_yolo](https://gitee.com/ascend/samples/tree/master/cplusplus/level2_simple_inference/2_object_detection/YOLOV3_coco_detection_video).
111 |
--------------------------------------------------------------------------------
/camera_model/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.3)
2 | project(camera_model)
3 |
4 | set(CMAKE_BUILD_TYPE "Release")
5 | # https://blog.csdn.net/qq_24624539/article/details/111056791
6 | #set(CMAKE_CXX_FLAGS "-std=c++11")
7 | set(CMAKE_CXX_FLAGS "-std=c++14")
8 | set(CMAKE_CXX_FLAGS_RELEASE "-O3 -fPIC")
9 |
10 | find_package(catkin REQUIRED COMPONENTS
11 | roscpp
12 | std_msgs
13 | )
14 |
15 | find_package(Boost REQUIRED COMPONENTS filesystem program_options system)
16 | include_directories(${Boost_INCLUDE_DIRS})
17 |
18 | find_package(OpenCV REQUIRED)
19 |
20 | # set(EIGEN_INCLUDE_DIR "/usr/local/include/eigen3")
21 | find_package(Ceres REQUIRED)
22 | include_directories(${CERES_INCLUDE_DIRS})
23 |
24 |
25 | catkin_package(
26 | INCLUDE_DIRS include
27 | LIBRARIES camera_model
28 | CATKIN_DEPENDS roscpp std_msgs
29 | # DEPENDS system_lib
30 | )
31 |
32 | include_directories(
33 | ${catkin_INCLUDE_DIRS}
34 | )
35 |
36 | include_directories("include")
37 |
38 |
39 | add_executable(Calibration
40 | src/intrinsic_calib.cc
41 | src/chessboard/Chessboard.cc
42 | src/calib/CameraCalibration.cc
43 | src/camera_models/Camera.cc
44 | src/camera_models/CameraFactory.cc
45 | src/camera_models/CostFunctionFactory.cc
46 | src/camera_models/PinholeCamera.cc
47 | src/camera_models/CataCamera.cc
48 | src/camera_models/EquidistantCamera.cc
49 | src/camera_models/ScaramuzzaCamera.cc
50 | src/sparse_graph/Transform.cc
51 | src/gpl/gpl.cc
52 | src/gpl/EigenQuaternionParameterization.cc)
53 |
54 | add_library(camera_model
55 | src/chessboard/Chessboard.cc
56 | src/calib/CameraCalibration.cc
57 | src/camera_models/Camera.cc
58 | src/camera_models/CameraFactory.cc
59 | src/camera_models/CostFunctionFactory.cc
60 | src/camera_models/PinholeCamera.cc
61 | src/camera_models/CataCamera.cc
62 | src/camera_models/EquidistantCamera.cc
63 | src/camera_models/ScaramuzzaCamera.cc
64 | src/sparse_graph/Transform.cc
65 | src/gpl/gpl.cc
66 | src/gpl/EigenQuaternionParameterization.cc)
67 |
68 | target_link_libraries(Calibration ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
69 | target_link_libraries(camera_model ${Boost_LIBRARIES} ${OpenCV_LIBS} ${CERES_LIBRARIES})
70 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/calib/CameraCalibration.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERACALIBRATION_H
2 | #define CAMERACALIBRATION_H
3 |
4 | #include
5 |
6 | #include "camodocal/camera_models/Camera.h"
7 |
8 | namespace camodocal
9 | {
10 |
11 | class CameraCalibration
12 | {
13 | public:
14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW
15 | CameraCalibration();
16 |
17 | CameraCalibration(Camera::ModelType modelType,
18 | const std::string& cameraName,
19 | const cv::Size& imageSize,
20 | const cv::Size& boardSize,
21 | float squareSize);
22 |
23 | void clear(void);
24 |
25 | void addChessboardData(const std::vector& corners);
26 |
27 | bool calibrate(void);
28 |
29 | int sampleCount(void) const;
30 | std::vector >& imagePoints(void);
31 | const std::vector >& imagePoints(void) const;
32 | std::vector >& scenePoints(void);
33 | const std::vector >& scenePoints(void) const;
34 | CameraPtr& camera(void);
35 | const CameraConstPtr camera(void) const;
36 |
37 | Eigen::Matrix2d& measurementCovariance(void);
38 | const Eigen::Matrix2d& measurementCovariance(void) const;
39 |
40 | cv::Mat& cameraPoses(void);
41 | const cv::Mat& cameraPoses(void) const;
42 |
43 | void drawResults(std::vector& images) const;
44 |
45 | void writeParams(const std::string& filename) const;
46 |
47 | bool writeChessboardData(const std::string& filename) const;
48 | bool readChessboardData(const std::string& filename);
49 |
50 | void setVerbose(bool verbose);
51 |
52 | private:
53 | bool calibrateHelper(CameraPtr& camera,
54 | std::vector& rvecs, std::vector& tvecs) const;
55 |
56 | void optimize(CameraPtr& camera,
57 | std::vector& rvecs, std::vector& tvecs) const;
58 |
59 | template
60 | void readData(std::ifstream& ifs, T& data) const;
61 |
62 | template
63 | void writeData(std::ofstream& ofs, T data) const;
64 |
65 | cv::Size m_boardSize;
66 | float m_squareSize;
67 |
68 | CameraPtr m_camera;
69 | cv::Mat m_cameraPoses;
70 |
71 | std::vector > m_imagePoints;
72 | std::vector > m_scenePoints;
73 |
74 | Eigen::Matrix2d m_measurementCovariance;
75 |
76 | bool m_verbose;
77 | };
78 |
79 | }
80 |
81 | #endif
82 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/camera_models/CameraFactory.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERAFACTORY_H
2 | #define CAMERAFACTORY_H
3 |
4 | #include
5 | #include
6 |
7 | #include "camodocal/camera_models/Camera.h"
8 |
9 | namespace camodocal
10 | {
11 |
12 | class CameraFactory
13 | {
14 | public:
15 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW
16 | CameraFactory();
17 |
18 | static boost::shared_ptr instance(void);
19 |
20 | CameraPtr generateCamera(Camera::ModelType modelType,
21 | const std::string& cameraName,
22 | cv::Size imageSize) const;
23 |
24 | CameraPtr generateCameraFromYamlFile(const std::string& filename);
25 |
26 | private:
27 | static boost::shared_ptr m_instance;
28 | };
29 |
30 | }
31 |
32 | #endif
33 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/camera_models/CostFunctionFactory.h:
--------------------------------------------------------------------------------
1 | #ifndef COSTFUNCTIONFACTORY_H
2 | #define COSTFUNCTIONFACTORY_H
3 |
4 | #include
5 | #include
6 |
7 | #include "camodocal/camera_models/Camera.h"
8 |
9 | namespace ceres
10 | {
11 | class CostFunction;
12 | }
13 |
14 | namespace camodocal
15 | {
16 |
17 | enum
18 | {
19 | CAMERA_INTRINSICS = 1 << 0,
20 | CAMERA_POSE = 1 << 1,
21 | POINT_3D = 1 << 2,
22 | ODOMETRY_INTRINSICS = 1 << 3,
23 | ODOMETRY_3D_POSE = 1 << 4,
24 | ODOMETRY_6D_POSE = 1 << 5,
25 | CAMERA_ODOMETRY_TRANSFORM = 1 << 6
26 | };
27 |
28 | class CostFunctionFactory
29 | {
30 | public:
31 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW
32 | CostFunctionFactory();
33 |
34 | static boost::shared_ptr instance(void);
35 |
36 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
37 | const Eigen::Vector3d& observed_P,
38 | const Eigen::Vector2d& observed_p,
39 | int flags) const;
40 |
41 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
42 | const Eigen::Vector3d& observed_P,
43 | const Eigen::Vector2d& observed_p,
44 | const Eigen::Matrix2d& sqrtPrecisionMat,
45 | int flags) const;
46 |
47 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
48 | const Eigen::Vector2d& observed_p,
49 | int flags, bool optimize_cam_odo_z = true) const;
50 |
51 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
52 | const Eigen::Vector2d& observed_p,
53 | const Eigen::Matrix2d& sqrtPrecisionMat,
54 | int flags, bool optimize_cam_odo_z = true) const;
55 |
56 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
57 | const Eigen::Vector3d& odo_pos,
58 | const Eigen::Vector3d& odo_att,
59 | const Eigen::Vector2d& observed_p,
60 | int flags, bool optimize_cam_odo_z = true) const;
61 |
62 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& camera,
63 | const Eigen::Quaterniond& cam_odo_q,
64 | const Eigen::Vector3d& cam_odo_t,
65 | const Eigen::Vector3d& odo_pos,
66 | const Eigen::Vector3d& odo_att,
67 | const Eigen::Vector2d& observed_p,
68 | int flags) const;
69 |
70 | ceres::CostFunction* generateCostFunction(const CameraConstPtr& cameraLeft,
71 | const CameraConstPtr& cameraRight,
72 | const Eigen::Vector3d& observed_P,
73 | const Eigen::Vector2d& observed_p_left,
74 | const Eigen::Vector2d& observed_p_right) const;
75 |
76 | private:
77 | static boost::shared_ptr m_instance;
78 | };
79 |
80 | }
81 |
82 | #endif
83 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/chessboard/Chessboard.h:
--------------------------------------------------------------------------------
1 | #ifndef CHESSBOARD_H
2 | #define CHESSBOARD_H
3 |
4 | #include
5 | #include
6 |
7 | namespace camodocal
8 | {
9 |
10 | // forward declarations
11 | class ChessboardCorner;
12 | typedef boost::shared_ptr ChessboardCornerPtr;
13 | class ChessboardQuad;
14 | typedef boost::shared_ptr ChessboardQuadPtr;
15 |
16 | class Chessboard
17 | {
18 | public:
19 | Chessboard(cv::Size boardSize, cv::Mat& image);
20 |
21 | void findCorners(bool useOpenCV = false);
22 | const std::vector& getCorners(void) const;
23 | bool cornersFound(void) const;
24 |
25 | const cv::Mat& getImage(void) const;
26 | const cv::Mat& getSketch(void) const;
27 |
28 | private:
29 | bool findChessboardCorners(const cv::Mat& image,
30 | const cv::Size& patternSize,
31 | std::vector& corners,
32 | int flags, bool useOpenCV);
33 |
34 | bool findChessboardCornersImproved(const cv::Mat& image,
35 | const cv::Size& patternSize,
36 | std::vector& corners,
37 | int flags);
38 |
39 | void cleanFoundConnectedQuads(std::vector& quadGroup, cv::Size patternSize);
40 |
41 | void findConnectedQuads(std::vector& quads,
42 | std::vector& group,
43 | int group_idx, int dilation);
44 |
45 | // int checkQuadGroup(std::vector& quadGroup,
46 | // std::vector& outCorners,
47 | // cv::Size patternSize);
48 |
49 | void labelQuadGroup(std::vector& quad_group,
50 | cv::Size patternSize, bool firstRun);
51 |
52 | void findQuadNeighbors(std::vector& quads, int dilation);
53 |
54 | int augmentBestRun(std::vector& candidateQuads, int candidateDilation,
55 | std::vector& existingQuads, int existingDilation);
56 |
57 | void generateQuads(std::vector& quads,
58 | cv::Mat& image, int flags,
59 | int dilation, bool firstRun);
60 |
61 | bool checkQuadGroup(std::vector& quads,
62 | std::vector& corners,
63 | cv::Size patternSize);
64 |
65 | void getQuadrangleHypotheses(const std::vector< std::vector >& contours,
66 | std::vector< std::pair >& quads,
67 | int classId) const;
68 |
69 | bool checkChessboard(const cv::Mat& image, cv::Size patternSize) const;
70 |
71 | bool checkBoardMonotony(std::vector& corners,
72 | cv::Size patternSize);
73 |
74 | bool matchCorners(ChessboardQuadPtr& quad1, int corner1,
75 | ChessboardQuadPtr& quad2, int corner2) const;
76 |
77 | cv::Mat mImage;
78 | cv::Mat mSketch;
79 | std::vector mCorners;
80 | cv::Size mBoardSize;
81 | bool mCornersFound;
82 | };
83 |
84 | }
85 |
86 | #endif
87 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/chessboard/ChessboardCorner.h:
--------------------------------------------------------------------------------
1 | #ifndef CHESSBOARDCORNER_H
2 | #define CHESSBOARDCORNER_H
3 |
4 | #include
5 | #include
6 |
7 | namespace camodocal
8 | {
9 |
10 | class ChessboardCorner;
11 | typedef boost::shared_ptr ChessboardCornerPtr;
12 |
13 | class ChessboardCorner
14 | {
15 | public:
16 | ChessboardCorner() : row(0), column(0), needsNeighbor(true), count(0) {}
17 |
18 | float meanDist(int &n) const
19 | {
20 | float sum = 0;
21 | n = 0;
22 | for (int i = 0; i < 4; ++i)
23 | {
24 | if (neighbors[i].get())
25 | {
26 | float dx = neighbors[i]->pt.x - pt.x;
27 | float dy = neighbors[i]->pt.y - pt.y;
28 | sum += sqrt(dx*dx + dy*dy);
29 | n++;
30 | }
31 | }
32 | return sum / std::max(n, 1);
33 | }
34 |
35 | cv::Point2f pt; // X and y coordinates
36 | int row; // Row and column of the corner
37 | int column; // in the found pattern
38 | bool needsNeighbor; // Does the corner require a neighbor?
39 | int count; // number of corner neighbors
40 | ChessboardCornerPtr neighbors[4]; // pointer to all corner neighbors
41 | };
42 |
43 | }
44 |
45 | #endif
46 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/chessboard/ChessboardQuad.h:
--------------------------------------------------------------------------------
1 | #ifndef CHESSBOARDQUAD_H
2 | #define CHESSBOARDQUAD_H
3 |
4 | #include
5 |
6 | #include "camodocal/chessboard/ChessboardCorner.h"
7 |
8 | namespace camodocal
9 | {
10 |
11 | class ChessboardQuad;
12 | typedef boost::shared_ptr ChessboardQuadPtr;
13 |
14 | class ChessboardQuad
15 | {
16 | public:
17 | ChessboardQuad() : count(0), group_idx(-1), edge_len(FLT_MAX), labeled(false) {}
18 |
19 | int count; // Number of quad neighbors
20 | int group_idx; // Quad group ID
21 | float edge_len; // Smallest side length^2
22 | ChessboardCornerPtr corners[4]; // Coordinates of quad corners
23 | ChessboardQuadPtr neighbors[4]; // Pointers of quad neighbors
24 | bool labeled; // Has this corner been labeled?
25 | };
26 |
27 | }
28 |
29 | #endif
30 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/gpl/EigenQuaternionParameterization.h:
--------------------------------------------------------------------------------
1 | #ifndef EIGENQUATERNIONPARAMETERIZATION_H
2 | #define EIGENQUATERNIONPARAMETERIZATION_H
3 |
4 | #include "ceres/local_parameterization.h"
5 |
6 | namespace camodocal
7 | {
8 |
9 | class EigenQuaternionParameterization : public ceres::LocalParameterization
10 | {
11 | public:
12 | virtual ~EigenQuaternionParameterization() {}
13 | virtual bool Plus(const double* x,
14 | const double* delta,
15 | double* x_plus_delta) const;
16 | virtual bool ComputeJacobian(const double* x,
17 | double* jacobian) const;
18 | virtual int GlobalSize() const { return 4; }
19 | virtual int LocalSize() const { return 3; }
20 |
21 | private:
22 | template
23 | void EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const;
24 | };
25 |
26 |
27 | template
28 | void
29 | EigenQuaternionParameterization::EigenQuaternionProduct(const T z[4], const T w[4], T zw[4]) const
30 | {
31 | zw[0] = z[3] * w[0] + z[0] * w[3] + z[1] * w[2] - z[2] * w[1];
32 | zw[1] = z[3] * w[1] - z[0] * w[2] + z[1] * w[3] + z[2] * w[0];
33 | zw[2] = z[3] * w[2] + z[0] * w[1] - z[1] * w[0] + z[2] * w[3];
34 | zw[3] = z[3] * w[3] - z[0] * w[0] - z[1] * w[1] - z[2] * w[2];
35 | }
36 |
37 | }
38 |
39 | #endif
40 |
41 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/gpl/gpl.h:
--------------------------------------------------------------------------------
1 | #ifndef GPL_H
2 | #define GPL_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace camodocal
9 | {
10 |
11 | template
12 | const T clamp(const T& v, const T& a, const T& b)
13 | {
14 | return std::min(b, std::max(a, v));
15 | }
16 |
17 | double hypot3(double x, double y, double z);
18 | float hypot3f(float x, float y, float z);
19 |
20 | template
21 | const T normalizeTheta(const T& theta)
22 | {
23 | T normTheta = theta;
24 |
25 | while (normTheta < - M_PI)
26 | {
27 | normTheta += 2.0 * M_PI;
28 | }
29 | while (normTheta > M_PI)
30 | {
31 | normTheta -= 2.0 * M_PI;
32 | }
33 |
34 | return normTheta;
35 | }
36 |
37 | double d2r(double deg);
38 | float d2r(float deg);
39 | double r2d(double rad);
40 | float r2d(float rad);
41 |
42 | double sinc(double theta);
43 |
44 | template
45 | const T square(const T& x)
46 | {
47 | return x * x;
48 | }
49 |
50 | template
51 | const T cube(const T& x)
52 | {
53 | return x * x * x;
54 | }
55 |
56 | template
57 | const T random(const T& a, const T& b)
58 | {
59 | return static_cast(rand()) / RAND_MAX * (b - a) + a;
60 | }
61 |
62 | template
63 | const T randomNormal(const T& sigma)
64 | {
65 | T x1, x2, w;
66 |
67 | do
68 | {
69 | x1 = 2.0 * random(0.0, 1.0) - 1.0;
70 | x2 = 2.0 * random(0.0, 1.0) - 1.0;
71 | w = x1 * x1 + x2 * x2;
72 | }
73 | while (w >= 1.0 || w == 0.0);
74 |
75 | w = sqrt((-2.0 * log(w)) / w);
76 |
77 | return x1 * w * sigma;
78 | }
79 |
80 | unsigned long long timeInMicroseconds(void);
81 |
82 | double timeInSeconds(void);
83 |
84 | void colorDepthImage(cv::Mat& imgDepth,
85 | cv::Mat& imgColoredDepth,
86 | float minRange, float maxRange);
87 |
88 | bool colormap(const std::string& name, unsigned char idx,
89 | float& r, float& g, float& b);
90 |
91 | std::vector bresLine(int x0, int y0, int x1, int y1);
92 | std::vector bresCircle(int x0, int y0, int r);
93 |
94 | void fitCircle(const std::vector& points,
95 | double& centerX, double& centerY, double& radius);
96 |
97 | std::vector intersectCircles(double x1, double y1, double r1,
98 | double x2, double y2, double r2);
99 |
100 | void LLtoUTM(double latitude, double longitude,
101 | double& utmNorthing, double& utmEasting,
102 | std::string& utmZone);
103 | void UTMtoLL(double utmNorthing, double utmEasting,
104 | const std::string& utmZone,
105 | double& latitude, double& longitude);
106 |
107 | long int timestampDiff(uint64_t t1, uint64_t t2);
108 |
109 | }
110 |
111 | #endif
112 |
--------------------------------------------------------------------------------
/camera_model/include/camodocal/sparse_graph/Transform.h:
--------------------------------------------------------------------------------
1 | #ifndef TRANSFORM_H
2 | #define TRANSFORM_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | namespace camodocal
9 | {
10 |
11 | class Transform
12 | {
13 | public:
14 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW
15 |
16 | Transform();
17 | Transform(const Eigen::Matrix4d& H);
18 |
19 | Eigen::Quaterniond& rotation(void);
20 | const Eigen::Quaterniond& rotation(void) const;
21 | double* rotationData(void);
22 | const double* const rotationData(void) const;
23 |
24 | Eigen::Vector3d& translation(void);
25 | const Eigen::Vector3d& translation(void) const;
26 | double* translationData(void);
27 | const double* const translationData(void) const;
28 |
29 | Eigen::Matrix4d toMatrix(void) const;
30 |
31 | private:
32 | Eigen::Quaterniond m_q;
33 | Eigen::Vector3d m_t;
34 | };
35 |
36 | }
37 |
38 | #endif
39 |
--------------------------------------------------------------------------------
/camera_model/instruction:
--------------------------------------------------------------------------------
1 | rosrun camera_model Calibration -w 8 -h 11 -s 70 -i ~/bag/PX/calib/
2 |
--------------------------------------------------------------------------------
/camera_model/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | camera_model
4 | 0.0.0
5 | The camera_model package
6 |
7 |
8 |
9 |
10 | dvorak
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | catkin
43 | roscpp
44 | std_msgs
45 | roscpp
46 | std_msgs
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/camera_model/readme.md:
--------------------------------------------------------------------------------
1 | part of [camodocal](https://github.com/hengli/camodocal)
2 |
3 | [Google Ceres](http://ceres-solver.org) is needed.
4 |
5 | # Calibration:
6 |
7 | Use [intrinsic_calib.cc](https://github.com/dvorak0/camera_model/blob/master/src/intrinsic_calib.cc) to calibrate your camera.
8 |
9 | # Undistortion:
10 |
11 | See [Camera.h](https://github.com/dvorak0/camera_model/blob/master/include/camodocal/camera_models/Camera.h) for general interface:
12 |
13 | - liftProjective: Lift points from the image plane to the projective space.
14 | - spaceToPlane: Projects 3D points to the image plane (Pi function)
15 |
16 |
--------------------------------------------------------------------------------
/camera_model/src/camera_models/CameraFactory.cc:
--------------------------------------------------------------------------------
1 | #include "camodocal/camera_models/CameraFactory.h"
2 |
3 | #include
4 |
5 |
6 | #include "camodocal/camera_models/CataCamera.h"
7 | #include "camodocal/camera_models/EquidistantCamera.h"
8 | #include "camodocal/camera_models/PinholeCamera.h"
9 | #include "camodocal/camera_models/ScaramuzzaCamera.h"
10 |
11 | #include "ceres/ceres.h"
12 |
13 | namespace camodocal
14 | {
15 |
16 | boost::shared_ptr CameraFactory::m_instance;
17 |
18 | CameraFactory::CameraFactory()
19 | {
20 |
21 | }
22 |
23 | boost::shared_ptr
24 | CameraFactory::instance(void)
25 | {
26 | if (m_instance.get() == 0)
27 | {
28 | m_instance.reset(new CameraFactory);
29 | }
30 |
31 | return m_instance;
32 | }
33 |
34 | CameraPtr
35 | CameraFactory::generateCamera(Camera::ModelType modelType,
36 | const std::string& cameraName,
37 | cv::Size imageSize) const
38 | {
39 | switch (modelType)
40 | {
41 | case Camera::KANNALA_BRANDT:
42 | {
43 | EquidistantCameraPtr camera(new EquidistantCamera);
44 |
45 | EquidistantCamera::Parameters params = camera->getParameters();
46 | params.cameraName() = cameraName;
47 | params.imageWidth() = imageSize.width;
48 | params.imageHeight() = imageSize.height;
49 | camera->setParameters(params);
50 | return camera;
51 | }
52 | case Camera::PINHOLE:
53 | {
54 | PinholeCameraPtr camera(new PinholeCamera);
55 |
56 | PinholeCamera::Parameters params = camera->getParameters();
57 | params.cameraName() = cameraName;
58 | params.imageWidth() = imageSize.width;
59 | params.imageHeight() = imageSize.height;
60 | camera->setParameters(params);
61 | return camera;
62 | }
63 | case Camera::SCARAMUZZA:
64 | {
65 | OCAMCameraPtr camera(new OCAMCamera);
66 |
67 | OCAMCamera::Parameters params = camera->getParameters();
68 | params.cameraName() = cameraName;
69 | params.imageWidth() = imageSize.width;
70 | params.imageHeight() = imageSize.height;
71 | camera->setParameters(params);
72 | return camera;
73 | }
74 | case Camera::MEI:
75 | default:
76 | {
77 | CataCameraPtr camera(new CataCamera);
78 |
79 | CataCamera::Parameters params = camera->getParameters();
80 | params.cameraName() = cameraName;
81 | params.imageWidth() = imageSize.width;
82 | params.imageHeight() = imageSize.height;
83 | camera->setParameters(params);
84 | return camera;
85 | }
86 | }
87 | }
88 |
89 | CameraPtr
90 | CameraFactory::generateCameraFromYamlFile(const std::string& filename)
91 | {
92 | cv::FileStorage fs(filename, cv::FileStorage::READ);
93 |
94 | if (!fs.isOpened())
95 | {
96 | return CameraPtr();
97 | }
98 |
99 | Camera::ModelType modelType = Camera::MEI;
100 | if (!fs["model_type"].isNone())
101 | {
102 | std::string sModelType;
103 | fs["model_type"] >> sModelType;
104 |
105 | if (boost::iequals(sModelType, "kannala_brandt"))
106 | {
107 | modelType = Camera::KANNALA_BRANDT;
108 | }
109 | else if (boost::iequals(sModelType, "mei"))
110 | {
111 | modelType = Camera::MEI;
112 | }
113 | else if (boost::iequals(sModelType, "scaramuzza"))
114 | {
115 | modelType = Camera::SCARAMUZZA;
116 | }
117 | else if (boost::iequals(sModelType, "pinhole"))
118 | {
119 | modelType = Camera::PINHOLE;
120 | }
121 | else
122 | {
123 | std::cerr << "# ERROR: Unknown camera model: " << sModelType << std::endl;
124 | return CameraPtr();
125 | }
126 | }
127 |
128 | switch (modelType)
129 | {
130 | case Camera::KANNALA_BRANDT:
131 | {
132 | EquidistantCameraPtr camera(new EquidistantCamera);
133 |
134 | EquidistantCamera::Parameters params = camera->getParameters();
135 | params.readFromYamlFile(filename);
136 | camera->setParameters(params);
137 | return camera;
138 | }
139 | case Camera::PINHOLE:
140 | {
141 | PinholeCameraPtr camera(new PinholeCamera);
142 |
143 | PinholeCamera::Parameters params = camera->getParameters();
144 | params.readFromYamlFile(filename);
145 | camera->setParameters(params);
146 | return camera;
147 | }
148 | case Camera::SCARAMUZZA:
149 | {
150 | OCAMCameraPtr camera(new OCAMCamera);
151 |
152 | OCAMCamera::Parameters params = camera->getParameters();
153 | params.readFromYamlFile(filename);
154 | camera->setParameters(params);
155 | return camera;
156 | }
157 | case Camera::MEI:
158 | default:
159 | {
160 | CataCameraPtr camera(new CataCamera);
161 |
162 | CataCamera::Parameters params = camera->getParameters();
163 | params.readFromYamlFile(filename);
164 | camera->setParameters(params);
165 | return camera;
166 | }
167 | }
168 |
169 | return CameraPtr();
170 | }
171 |
172 | }
173 |
174 |
--------------------------------------------------------------------------------
/camera_model/src/gpl/EigenQuaternionParameterization.cc:
--------------------------------------------------------------------------------
1 | #include "camodocal/gpl/EigenQuaternionParameterization.h"
2 |
3 | #include
4 |
5 | namespace camodocal
6 | {
7 |
8 | bool
9 | EigenQuaternionParameterization::Plus(const double* x,
10 | const double* delta,
11 | double* x_plus_delta) const
12 | {
13 | const double norm_delta =
14 | sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
15 | if (norm_delta > 0.0)
16 | {
17 | const double sin_delta_by_delta = (sin(norm_delta) / norm_delta);
18 | double q_delta[4];
19 | q_delta[0] = sin_delta_by_delta * delta[0];
20 | q_delta[1] = sin_delta_by_delta * delta[1];
21 | q_delta[2] = sin_delta_by_delta * delta[2];
22 | q_delta[3] = cos(norm_delta);
23 | EigenQuaternionProduct(q_delta, x, x_plus_delta);
24 | }
25 | else
26 | {
27 | for (int i = 0; i < 4; ++i)
28 | {
29 | x_plus_delta[i] = x[i];
30 | }
31 | }
32 | return true;
33 | }
34 |
35 | bool
36 | EigenQuaternionParameterization::ComputeJacobian(const double* x,
37 | double* jacobian) const
38 | {
39 | jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT
40 | jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT
41 | jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT
42 | jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT
43 | return true;
44 | }
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/camera_model/src/sparse_graph/Transform.cc:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | namespace camodocal
4 | {
5 |
6 | Transform::Transform()
7 | {
8 | m_q.setIdentity();
9 | m_t.setZero();
10 | }
11 |
12 | Transform::Transform(const Eigen::Matrix4d& H)
13 | {
14 | m_q = Eigen::Quaterniond(H.block<3,3>(0,0));
15 | m_t = H.block<3,1>(0,3);
16 | }
17 |
18 | Eigen::Quaterniond&
19 | Transform::rotation(void)
20 | {
21 | return m_q;
22 | }
23 |
24 | const Eigen::Quaterniond&
25 | Transform::rotation(void) const
26 | {
27 | return m_q;
28 | }
29 |
30 | double*
31 | Transform::rotationData(void)
32 | {
33 | return m_q.coeffs().data();
34 | }
35 |
36 | const double* const
37 | Transform::rotationData(void) const
38 | {
39 | return m_q.coeffs().data();
40 | }
41 |
42 | Eigen::Vector3d&
43 | Transform::translation(void)
44 | {
45 | return m_t;
46 | }
47 |
48 | const Eigen::Vector3d&
49 | Transform::translation(void) const
50 | {
51 | return m_t;
52 | }
53 |
54 | double*
55 | Transform::translationData(void)
56 | {
57 | return m_t.data();
58 | }
59 |
60 | const double* const
61 | Transform::translationData(void) const
62 | {
63 | return m_t.data();
64 | }
65 |
66 | Eigen::Matrix4d
67 | Transform::toMatrix(void) const
68 | {
69 | Eigen::Matrix4d H;
70 | H.setIdentity();
71 | H.block<3,3>(0,0) = m_q.toRotationMatrix();
72 | H.block<3,1>(0,3) = m_t;
73 |
74 | return H;
75 | }
76 |
77 | }
78 |
--------------------------------------------------------------------------------
/config/fisheye_mask.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HITSZ-NRSL/Dynamic-VINS/b167a7aa1b3da1094b4d53f4d4844988197854b4/config/fisheye_mask.jpg
--------------------------------------------------------------------------------
/config/fisheye_mask_752x480.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HITSZ-NRSL/Dynamic-VINS/b167a7aa1b3da1094b4d53f4d4844988197854b4/config/fisheye_mask_752x480.jpg
--------------------------------------------------------------------------------
/config/openloris/openloris_vio.yaml:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 |
3 | num_threads: 0 # 0 Use the max number of threads of your device.
4 | # For some devices, like HUAWEI Atlas200, the auto detected max number of threads might not equivalent to the usable numble of threads. (Some cores(threads) might be reserved for other usage(NPU) by system.)
5 | # x It is proper that 1 < x < MAX_THREAD.
6 | # For now, this parameter is relevant to grid-detector to run in parallel.
7 |
8 | #common parameters
9 | imu: 1
10 | static_init: 0
11 | imu_topic: "/d400/imu0"
12 | image_topic: "/d400/color/image_raw"
13 | depth_topic: "/d400/aligned_depth_to_color/image_raw"
14 | output_path: "/home/nrosliu/DynamicVINS-Atlas/src/Dynamic_VINS/output"
15 |
16 | #RGBD camera Ideal Range
17 | depth_min_dist: 0.3
18 | depth_max_dist: 3
19 |
20 | frontend_freq: 30 # It should be raised in VO mode(without IMU).
21 | keyframe_parallax: 10.0 # keyframe selection threshold (pixel); if system fails frequently, please try to reduce the "keyframe_parallax"
22 | num_grid_rows: 7
23 | num_grid_cols: 8
24 |
25 | #unsynchronization parameters
26 | estimate_td: 1 ########## # online estimate time offset between camera and imu
27 | td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
28 |
29 | #camera calibration
30 | model_type: PINHOLE
31 | camera_name: camera
32 | image_width: 848
33 | image_height: 480
34 | #TODO modify distortion
35 |
36 | distortion_parameters:
37 | k1: 0.0
38 | k2: 0.0
39 | p1: 0.0
40 | p2: 0.0
41 | projection_parameters:
42 | fx: 611.4509887695312
43 | fy: 611.4857177734375
44 | cx: 433.2039794921875
45 | cy: 249.4730224609375
46 |
47 | # Extrinsic parameter between IMU and Camera.
48 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
49 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
50 | # 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
51 | #If you choose 0 or 1, you should write down the following matrix.
52 | #Rotation from camera frame to imu frame, imu^R_cam
53 | extrinsicRotation: !!opencv-matrix
54 | rows: 3
55 | cols: 3
56 | dt: d
57 | data: [ 0.999975572679493, 0.003849141066713, 0.005854714944742,
58 | -0.003828680351062, 0.999986658473099, -0.003501944262433,
59 | -0.005868115609379, 0.003479442469180, 0.999976848846595]
60 | #Translation from camera frame to imu frame, imu^T_cam
61 | extrinsicTranslation: !!opencv-matrix
62 | rows: 3
63 | cols: 1
64 | dt: d
65 | data: [0.0203127935529, -0.00510325236246, -0.0112013882026]
66 |
67 | #feature traker paprameters
68 | max_cnt: 130 # max feature number in feature tracking. It is suggested to be raised in VO mode.
69 | min_dist: 30 # min distance between two features
70 | freq: 10 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
71 | F_threshold: 1.0 # ransac threshold (pixel)
72 | show_track: 1 # publish tracking image as topic
73 | equalize: 0 # if image is too dark or light, trun on equalize to find enough features
74 | fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
75 |
76 | #optimization parameters
77 | max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
78 | max_num_iterations: 8 # max solver itrations, to guarantee real time
79 |
80 | #imu parameters The more accurate parameters you provide, the better performance
81 | acc_n: 0.1 # accelerometer measurement noise standard deviation.
82 | gyr_n: 0.01 # gyroscope measurement noise standard deviation.
83 | acc_w: 0.0002 # accelerometer bias random work noise standard deviation. #0.02
84 | gyr_w: 2.0e-5 # gyroscope bias random work noise standard deviation. #4.0e-5
85 |
86 | g_norm: 9.805 # gravity magnitude
87 |
88 | #rolling shutter parameters
89 | rolling_shutter: 1 # 0: global shutter camera, 1: rolling shutter camera
90 | rolling_shutter_tr: 0.033 # unit: s. rolling shutter read out time per frame (from data sheet)
91 |
92 | #loop closure parameters
93 | loop_closure: 0 # start loop closure
94 | fast_relocalization: 0 # useful in real-time and large project
95 | load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
96 | pose_graph_save_path: "/home/nrosliu/DynamicVINS-Atlas/src/Dynamic_VINS/output/pose_graph" # save and load path
97 |
98 | #visualization parameters
99 | save_image: 0 # save image in pose graph for visualization prupose; you can close this function by setting 0
100 | visualize_imu_forward: 0 # output imu forward propogation to achieve low latency and high frequence results
101 | visualize_camera_size: 0.4 # size of camera marker in RVIZ
102 |
103 | #暂时仅考虑动物
104 | dynamic_label: ["person", "cat", "dog", "bicycle", "car","bus"]
105 |
106 |
--------------------------------------------------------------------------------
/config/openloris/openloris_vio_atlas.yaml:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 |
3 | num_threads: 6 # 0 Use the max number of threads of your device.
4 | # For some devices, like HUAWEI Atlas200, the auto detected max number of threads might not equivalent to the usable numble of threads. (Some cores(threads) might be reserved for other usage(NPU) by system.)
5 | # x It is proper that 1 < x < MAX_THREAD.
6 | # For now, this parameter is relevant to grid-detector to run in parallel.
7 |
8 |
9 | #common parameters
10 | imu: 1
11 | static_init: 1
12 | imu_topic: "/d400/imu0"
13 | image_topic: "/decompressed_img"
14 | depth_topic: "/d400/aligned_depth_to_color/image_raw" # check your depth image bandwidth is lower than 30Mbs, otherwise compressedDepth is suggested
15 | output_path: "/home/nrosliu/DynamicVINS-Atlas/src/Dynamic_VINS/output"
16 |
17 | # 0.3
2 |
4 |
5 | PoseGraphNodelet
6 |
7 |
8 |
--------------------------------------------------------------------------------
/pose_graph/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | pose_graph
4 | 0.0.0
5 | pose_graph package
6 |
7 |
8 |
9 |
10 | tony-ws
11 |
12 |
13 |
14 |
15 |
16 | TODO
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | catkin
43 | roscpp
44 | roscpp
45 | camera_model
46 | camera_model
47 |
48 | nodelet
49 | nodelet
50 |
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/pose_graph/src/ThirdParty/DBoW/BowVector.cpp:
--------------------------------------------------------------------------------
1 | /**
2 | * File: BowVector.cpp
3 | * Date: March 2011
4 | * Author: Dorian Galvez-Lopez
5 | * Description: bag of words vector
6 | * License: see the LICENSE.txt file
7 | *
8 | */
9 |
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 | #include "BowVector.h"
17 |
18 | namespace DBoW2 {
19 |
20 | // --------------------------------------------------------------------------
21 |
22 | BowVector::BowVector(void)
23 | {
24 | }
25 |
26 | // --------------------------------------------------------------------------
27 |
28 | BowVector::~BowVector(void)
29 | {
30 | }
31 |
32 | // --------------------------------------------------------------------------
33 |
34 | void BowVector::addWeight(WordId id, WordValue v)
35 | {
36 | BowVector::iterator vit = this->lower_bound(id);
37 |
38 | if(vit != this->end() && !(this->key_comp()(id, vit->first)))
39 | {
40 | vit->second += v;
41 | }
42 | else
43 | {
44 | this->insert(vit, BowVector::value_type(id, v));
45 | }
46 | }
47 |
48 | // --------------------------------------------------------------------------
49 |
50 | void BowVector::addIfNotExist(WordId id, WordValue v)
51 | {
52 | BowVector::iterator vit = this->lower_bound(id);
53 |
54 | if(vit == this->end() || (this->key_comp()(id, vit->first)))
55 | {
56 | this->insert(vit, BowVector::value_type(id, v));
57 | }
58 | }
59 |
60 | // --------------------------------------------------------------------------
61 |
62 | void BowVector::normalize(LNorm norm_type)
63 | {
64 | double norm = 0.0;
65 | BowVector::iterator it;
66 |
67 | if(norm_type == DBoW2::L1)
68 | {
69 | for(it = begin(); it != end(); ++it)
70 | norm += fabs(it->second);
71 | }
72 | else
73 | {
74 | for(it = begin(); it != end(); ++it)
75 | norm += it->second * it->second;
76 | norm = sqrt(norm);
77 | }
78 |
79 | if(norm > 0.0)
80 | {
81 | for(it = begin(); it != end(); ++it)
82 | it->second /= norm;
83 | }
84 | }
85 |
86 | // --------------------------------------------------------------------------
87 |
88 | std::ostream& operator<< (std::ostream &out, const BowVector &v)
89 | {
90 | BowVector::const_iterator vit;
91 | std::vector::const_iterator iit;
92 | unsigned int i = 0;
93 | const unsigned int N = v.size();
94 | for(vit = v.begin(); vit != v.end(); ++vit, ++i)
95 | {
96 | out << "<" << vit->first << ", " << vit->second << ">";
97 |
98 | if(i < N-1) out << ", ";
99 | }
100 | return out;
101 | }
102 |
103 | // --------------------------------------------------------------------------
104 |
105 | void BowVector::saveM(const std::string &filename, size_t W) const
106 | {
107 | std::fstream f(filename.c_str(), std::ios::out);
108 |
109 | WordId last = 0;
110 | BowVector::const_iterator bit;
111 | for(bit = this->begin(); bit != this->end(); ++bit)
112 | {
113 | for(; last < bit->first; ++last)
114 | {
115 | f << "0 ";
116 | }
117 | f << bit->second << " ";
118 |
119 | last = bit->first + 1;
120 | }
121 | for(; last < (WordId)W; ++last)
122 | f << "0 ";
123 |
124 | f.close();
125 | }
126 |
127 | // --------------------------------------------------------------------------
128 |
129 | } // namespace DBoW2
130 |
131 |
--------------------------------------------------------------------------------
/pose_graph/src/ThirdParty/DBoW/BowVector.h:
--------------------------------------------------------------------------------
1 | /**
2 | * File: BowVector.h
3 | * Date: March 2011
4 | * Author: Dorian Galvez-Lopez
5 | * Description: bag of words vector
6 | * License: see the LICENSE.txt file
7 | *
8 | */
9 |
10 | #ifndef __D_T_BOW_VECTOR__
11 | #define __D_T_BOW_VECTOR__
12 |
13 | #include
14 | #include