├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── _unit_test_slam_ransac.cpp ├── ba_stats.hpp ├── bow_index.cpp ├── bow_index.hpp ├── bundle_adjuster.cpp ├── bundle_adjuster.hpp ├── design-document.pdf ├── feature_detector.cpp ├── feature_detector.hpp ├── feature_search.cpp ├── feature_search.hpp ├── id.cpp ├── id.hpp ├── image_pyramid.cpp ├── image_pyramid.hpp ├── key_point.hpp ├── keyframe.cpp ├── keyframe.hpp ├── keyframe_matcher.cpp ├── keyframe_matcher.hpp ├── loop_closer.cpp ├── loop_closer.hpp ├── loop_closer_stats.hpp ├── loop_ransac.cpp ├── loop_ransac.hpp ├── map_point.cpp ├── map_point.hpp ├── mapdb.cpp ├── mapdb.hpp ├── mapper.cpp ├── mapper.hpp ├── mapper_helpers.cpp ├── mapper_helpers.hpp ├── opencv_viewer_data_publisher.cpp ├── opencv_viewer_data_publisher.hpp ├── openvslam ├── README.md ├── essential_solver.cc ├── essential_solver.h ├── match_angle_checker.h ├── match_base.h ├── orb_point_pairs.h ├── random_array.cc ├── random_array.h └── trigonometric.h ├── optimize_transform.cpp ├── optimize_transform.hpp ├── orb_extractor.cpp ├── orb_extractor.hpp ├── relocation.cpp ├── relocation.hpp ├── serialization.hpp ├── slam_implementation.cpp ├── slam_implementation.hpp ├── slam_viewer.cpp ├── slam_viewer.hpp ├── static_settings.cpp ├── static_settings.hpp ├── viewer_data_publisher.cpp └── viewer_data_publisher.hpp /.gitignore: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | *.d 3 | 4 | # Compiled Object files 5 | *.slo 6 | *.lo 7 | *.o 8 | *.obj 9 | 10 | # Precompiled Headers 11 | *.gch 12 | *.pch 13 | 14 | # Compiled Dynamic libraries 15 | *.so 16 | *.dylib 17 | *.dll 18 | 19 | # Fortran module files 20 | *.mod 21 | *.smod 22 | 23 | # Compiled Static libraries 24 | *.lai 25 | *.la 26 | *.a 27 | *.lib 28 | 29 | # Executables 30 | *.exe 31 | *.out 32 | *.app 33 | 34 | # Apple stuff 35 | .DS_Store 36 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(slam) 3 | 4 | set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O2") 5 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2") 6 | 7 | set(BUILD_VISUALIZATIONS OFF CACHE BOOL "Build visualizations") 8 | add_definitions(-DUSE_DBOW2) 9 | 10 | set(SLAM_SOURCE_FILES 11 | slam_implementation.cpp 12 | mapdb.cpp 13 | mapper.cpp 14 | mapper_helpers.cpp 15 | bundle_adjuster.cpp 16 | keyframe.cpp 17 | map_point.cpp 18 | loop_ransac.cpp 19 | feature_search.cpp 20 | keyframe_matcher.cpp 21 | bow_index.cpp 22 | loop_closer.cpp 23 | relocation.cpp 24 | viewer_data_publisher.cpp 25 | optimize_transform.cpp 26 | id.cpp 27 | static_settings.cpp 28 | orb_extractor.cpp 29 | feature_detector.cpp 30 | image_pyramid.cpp 31 | # remaining unmodified OpenVSLAM files 32 | openvslam/essential_solver.cc 33 | openvslam/random_array.cc) 34 | 35 | if (BUILD_VISUALIZATIONS) 36 | if (APPLE) 37 | find_package(GLEW 2.0 REQUIRED) 38 | list(APPEND EXTRA_INCLUDES ${GLEW_INCLUDE_DIR}) 39 | list(APPEND EXTRA_LIBS ${GLEW_LIBRARY}) 40 | endif() 41 | list(APPEND SLAM_SOURCE_FILES 42 | slam_viewer.cpp 43 | opencv_viewer_data_publisher.cpp) 44 | endif() 45 | 46 | add_library(slam ${SLAM_SOURCE_FILES}) 47 | 48 | # libmetis and suitesparseconfig have to be specified here due to linking 49 | # issues caused by dropping GPL components from g2o / OpenVSLAM. Could be 50 | # fixed properly later 51 | target_include_directories(slam PUBLIC "." ${EXTRA_INCLUDES}) 52 | target_link_libraries(slam mobile-cv-suite::core mobile-cv-suite::slam parameters ${EXTRA_LIBS}) 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VIO-SLAM Module 2 | 3 | The state-of-the-art SLAM methods are somewhat difficult to grasp due to their complexity and unexpected tight couplings / leaky abstractions when trying to find out what the main parts are. This repository contains a SLAM module implementation that tries to coherently include relevant aspects of the SLAM pipeline. 4 | 5 | See the [design-document.pdf](design-document.pdf) for details. 6 | 7 | ## Contributors 8 | 9 | Authors and contributors at Aalto University: Otto Seiskari, Pekka Rantalankila, Arno Solin, Jerry Ylilammi, Juho Kannala, Johan Jern 10 | 11 | ## License 12 | 13 | This SLAM module is based on OpenVSLAM, which was initially claimed by its authors to be licensed under a BSD-style permissive license, but later taken offline due to possible GPL-licensing violation in connection to the ORB-SLAM codebase. Therefore, this module is not suitable for commercial use, see the file `openvslam/README.md` for more details. 14 | -------------------------------------------------------------------------------- /_unit_test_slam_ransac.cpp: -------------------------------------------------------------------------------- 1 | #include "catch2/catch.hpp" 2 | 3 | #ifdef USE_SLAM 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "loop_ransac.hpp" 11 | 12 | 13 | TEST_CASE("computeSim3") { 14 | Eigen::Matrix3d pts1; 15 | pts1 << 3.40188, 2.9844, -1.64777, 16 | -1.05617, 4.11647, 2.6823, 17 | 2.83099, -3.02449, -2.22225; 18 | 19 | Eigen::Matrix3d rotZ = (Eigen::AngleAxisd(0.653 * M_PI, Eigen::Vector3d::UnitX()) 20 | * Eigen::AngleAxisd(-1.02 * M_PI, Eigen::Vector3d::UnitY()) 21 | * Eigen::AngleAxisd(0.13 * M_PI, Eigen::Vector3d::UnitZ())) 22 | .toRotationMatrix(); 23 | 24 | Eigen::Vector3d trans(3.13321, -1.05617, 2.83099); 25 | 26 | Eigen::Matrix3d pts2 = (rotZ * pts1).colwise() + trans; 27 | 28 | Eigen::Matrix3d pts3 = (rotZ.transpose() * pts2).colwise() - rotZ.transpose() * trans; 29 | CAPTURE(pts1); 30 | CAPTURE(pts2); 31 | CAPTURE(pts3); 32 | 33 | Eigen::Matrix3d rotRes; 34 | Eigen::Vector3d transRes; 35 | float scaleRes; 36 | slam::computeSim3(pts1, pts2, rotRes, transRes, scaleRes); 37 | 38 | CAPTURE(rotRes); 39 | CAPTURE(rotZ); 40 | double diff = (rotRes - rotZ).squaredNorm(); 41 | REQUIRE(diff < 0.001); 42 | 43 | CAPTURE(transRes); 44 | CAPTURE(trans); 45 | REQUIRE((transRes - trans).norm() < 0.001); 46 | 47 | CAPTURE(scaleRes); 48 | REQUIRE(scaleRes == 1); 49 | } 50 | 51 | TEST_CASE("computeRotZ") { 52 | Eigen::Matrix3d pts1; 53 | pts1 << 3.40188, 2.9844, -1.64777, 54 | -1.05617, 4.11647, 2.6823, 55 | 2.83099, -3.02449, -2.22225; 56 | 57 | double trueRad = 0.653 * M_PI; 58 | Eigen::Matrix3d rotZ = Eigen::AngleAxisd(trueRad, Eigen::Vector3d::UnitZ()).toRotationMatrix(); 59 | 60 | Eigen::Vector3d trans(3.13321, -1.05617, 2.83099); 61 | 62 | Eigen::Matrix3d pts2 = (rotZ * pts1).colwise() + trans; 63 | 64 | Eigen::Matrix3d pts3 = (rotZ.transpose() * pts2).colwise() - rotZ.transpose() * trans; 65 | CAPTURE(pts1); 66 | CAPTURE(pts2); 67 | CAPTURE(pts3); 68 | 69 | Eigen::Matrix3d rotRes; 70 | Eigen::Vector3d transRes; 71 | float scaleRes; 72 | slam::computeRotZ(pts1, pts2, rotRes, transRes, scaleRes); 73 | 74 | CAPTURE(rotRes); 75 | CAPTURE(rotZ); 76 | double diff = (rotRes - rotZ).squaredNorm(); 77 | REQUIRE(diff < 0.001); 78 | 79 | CAPTURE(transRes); 80 | CAPTURE(trans); 81 | REQUIRE((transRes - trans).norm() < 0.001); 82 | 83 | CAPTURE(scaleRes); 84 | REQUIRE(scaleRes == 1); 85 | } 86 | 87 | TEST_CASE("computeRotZ scale changed") { 88 | Eigen::Matrix3d pts1; 89 | pts1 << 3.40188, 2.9844, -1.64777, 90 | -1.05617, 4.11647, 2.6823, 91 | 2.83099, -3.02449, -2.22225; 92 | 93 | double trueRad = 0.653 * M_PI; 94 | Eigen::Matrix3d rotZ = Eigen::AngleAxisd(trueRad, Eigen::Vector3d::UnitZ()).toRotationMatrix(); 95 | 96 | Eigen::Vector3d trans(3.13321, -1.05617, 2.83099); 97 | 98 | float scale = 1.3211; 99 | 100 | Eigen::Matrix3d pts2 = (scale * rotZ * pts1).colwise() + trans; 101 | 102 | Eigen::Matrix3d pts3 = (rotZ.transpose() * pts2).colwise() - rotZ.transpose() * trans; 103 | CAPTURE(pts1); 104 | CAPTURE(pts2); 105 | CAPTURE(pts3); 106 | 107 | Eigen::Matrix3d rotRes; 108 | Eigen::Vector3d transRes; 109 | float scaleRes; 110 | slam::computeRotZ(pts1, pts2, rotRes, transRes, scaleRes); 111 | 112 | CAPTURE(rotRes); 113 | CAPTURE(rotZ); 114 | double diff = (rotRes - rotZ).squaredNorm(); 115 | REQUIRE(diff < 0.001); 116 | 117 | CAPTURE(transRes); 118 | CAPTURE(trans); 119 | REQUIRE((transRes - trans).norm() < 0.001); 120 | 121 | CAPTURE(scaleRes); 122 | REQUIRE(scaleRes == scale); 123 | } 124 | 125 | TEST_CASE("computeRotZ random cases") { 126 | // std::random_device rd; 127 | std::mt19937 rng(3249); 128 | std::uniform_real_distribution dist(-1, 1); 129 | 130 | for (int i = 0; i < 100; i++) { 131 | Eigen::Matrix3d pts1 = Eigen::Matrix3d::Random() * 5; 132 | 133 | double trueRad = dist(rng) * M_PI; 134 | Eigen::Matrix3d rotZ = Eigen::AngleAxisd(trueRad, Eigen::Vector3d::UnitZ()).toRotationMatrix(); 135 | 136 | Eigen::Vector3d trans = Eigen::Vector3d::Random() * 5; 137 | 138 | Eigen::Matrix3d pts2 = (rotZ * pts1).colwise() + trans; 139 | 140 | Eigen::Matrix3d rotRes; 141 | Eigen::Vector3d transRes; 142 | float scaleRes; 143 | slam::computeRotZ(pts1, pts2, rotRes, transRes, scaleRes); 144 | 145 | CAPTURE(rotRes); 146 | CAPTURE(rotZ); 147 | double diff = (rotRes - rotZ).squaredNorm(); 148 | REQUIRE(diff < 0.001); 149 | 150 | CAPTURE(transRes); 151 | CAPTURE(trans); 152 | REQUIRE((transRes - trans).norm() < 0.001); 153 | 154 | CAPTURE(scaleRes); 155 | REQUIRE(scaleRes == 1); 156 | } 157 | } 158 | 159 | TEST_CASE("computeRotZ random cases + noise") { 160 | // std::random_device rd; 161 | std::mt19937 rng(2432); 162 | std::uniform_real_distribution dist(-1, 1); 163 | 164 | for (int i = 0; i < 100; i++) { 165 | Eigen::Matrix3d pts1 = Eigen::Matrix3d::Random() * 5; 166 | 167 | double trueRad = dist(rng) * M_PI; 168 | Eigen::Matrix3d rotZ = Eigen::AngleAxisd( 169 | trueRad + dist(rng) * 0.05, 170 | Eigen::Vector3d::UnitZ() 171 | ).toRotationMatrix(); 172 | 173 | Eigen::Vector3d trans = Eigen::Vector3d::Random() * 5; 174 | 175 | Eigen::Matrix3d pts2 = (rotZ * pts1).colwise() + (trans + Eigen::Vector3d::Random() * 0.1); 176 | 177 | Eigen::Matrix3d rotRes; 178 | Eigen::Vector3d transRes; 179 | float scaleRes; 180 | slam::computeRotZ(pts1, pts2, rotRes, transRes, scaleRes); 181 | 182 | CAPTURE(rotRes); 183 | CAPTURE(rotZ); 184 | double diff = (rotRes - rotZ).squaredNorm(); 185 | REQUIRE(diff < 0.2); 186 | 187 | CAPTURE(transRes); 188 | CAPTURE(trans); 189 | REQUIRE((transRes - trans).norm() < 0.2); 190 | 191 | CAPTURE(scaleRes); 192 | REQUIRE(scaleRes == 1); 193 | } 194 | } 195 | 196 | #endif 197 | -------------------------------------------------------------------------------- /ba_stats.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_BA_STATS_H_ 2 | #define SLAM_BA_STATS_H_ 3 | 4 | #include "../util/logging.hpp" 5 | 6 | namespace slam { 7 | namespace { 8 | 9 | class BaStats { 10 | public: 11 | enum class Ba { 12 | NONE, 13 | POSE, 14 | NEIGHBOR, 15 | LOCAL, 16 | GLOBAL, 17 | // Marker for iteration. 18 | LAST 19 | }; 20 | 21 | private: 22 | const int last = static_cast(Ba::LAST); 23 | bool enabled; 24 | 25 | std::map bas; 26 | std::map totalBas; 27 | 28 | public: 29 | BaStats(bool enabled) : 30 | enabled(enabled) 31 | { 32 | if (!enabled) return; 33 | for (int i = 0; i < last; ++i) { 34 | Ba l = static_cast(i); 35 | bas.emplace(l, 0); 36 | totalBas.emplace(l, 0); 37 | } 38 | } 39 | 40 | void update(Ba ba) { 41 | if (!enabled) return; 42 | ++bas.at(ba); 43 | } 44 | 45 | void finishFrame() { 46 | if (!enabled) return; 47 | 48 | int count = 0; 49 | for (int i = 0; i < last; ++i) { 50 | Ba t = static_cast(i); 51 | totalBas.at(t) += bas.at(t); 52 | count += bas.at(t); 53 | } 54 | if (count == 0) { 55 | bas.at(Ba::NONE) += 1; 56 | totalBas.at(Ba::NONE) += 1; 57 | } 58 | 59 | const char *names[6] = { 60 | "none ", 61 | "pose ", 62 | "neighbor ", 63 | "local ", 64 | "global ", 65 | "TOTAL ", 66 | }; 67 | log_info(""); 68 | log_info("TYPE \tNUM\tTOTAL"); 69 | int sum = 0; 70 | int totalSum = 0; 71 | for (int i = 0; i < last; ++i) { 72 | Ba t = static_cast(i); 73 | sum += bas.at(t); 74 | totalSum += totalBas.at(t); 75 | log_info("%s\t%d\t%d", names[i], bas.at(t), totalBas.at(t)); 76 | } 77 | log_info("%s\t%d\t%d", names[last], sum, totalSum); 78 | 79 | for (int i = 0; i < last; ++i) { 80 | Ba t = static_cast(i); 81 | bas.at(t) = 0; 82 | } 83 | } 84 | }; 85 | 86 | } // anonymous namespace 87 | } // namespace slam 88 | 89 | #endif // SLAM_BA_STATS_H_ 90 | -------------------------------------------------------------------------------- /bow_index.cpp: -------------------------------------------------------------------------------- 1 | #include "bow_index.hpp" 2 | 3 | #include "keyframe.hpp" 4 | #include "key_point.hpp" 5 | #include "mapdb.hpp" 6 | #include "../util/util.hpp" 7 | #include "../util/logging.hpp" 8 | 9 | namespace slam { 10 | namespace { 11 | 12 | BowVocabulary createBowVocabulary(std::string vocabularyPath) { 13 | BowVocabulary bowVocabulary; 14 | assert(!vocabularyPath.empty()); 15 | const std::string inputSuffix = vocabularyPath.substr(util::removeFileSuffix(vocabularyPath).size()); 16 | if (inputSuffix == ".txt") { 17 | log_debug("Loading BoW vocabulary from TEXT file %s", vocabularyPath.c_str()); 18 | bowVocabulary.loadFromTextFile(vocabularyPath); 19 | } 20 | else { 21 | if (inputSuffix != ".dbow2") { 22 | log_warn("Expected extension `.txt` or `.dbow2` for vocabulary file."); 23 | } 24 | log_debug("Loading BoW vocabulary from BINARY file %s", vocabularyPath.c_str()); 25 | bowVocabulary.loadFromBinaryFile(vocabularyPath); 26 | } 27 | return bowVocabulary; 28 | } 29 | 30 | } // anonymous namespace 31 | 32 | BowIndex::BowIndex(const odometry::ParametersSlam ¶meters) : 33 | parameters(parameters), 34 | index(), 35 | bowVocabulary(createBowVocabulary(parameters.vocabularyPath)) 36 | { 37 | // log_debug("BoW tree node count %u, levels %u, branching factor %u.", 38 | // bowVocabulary.size(), 39 | // bowVocabulary.getDepthLevels(), 40 | // bowVocabulary.getBranchingFactor()); 41 | index.resize(bowVocabulary.size()); 42 | } 43 | 44 | void BowIndex::add(const Keyframe &keyframe, MapId mapId) { 45 | for (const auto& word : keyframe.shared->bowVec) { 46 | index[word.first].push_back(MapKf { mapId, keyframe.id }); 47 | } 48 | } 49 | 50 | void BowIndex::remove(MapKf mapKf) { 51 | for (auto &l : index) { 52 | for (auto it = l.begin(); it != l.end(); ) { 53 | if (*it == mapKf) it = l.erase(it); 54 | else it++; 55 | } 56 | } 57 | } 58 | 59 | void BowIndex::transform( 60 | const KeyPointVector &keypoints, 61 | DBoW2::BowVector &bowVector, 62 | DBoW2::FeatureVector &bowFeatureVector 63 | ) { 64 | // convert to descVector for DBoW2 65 | unsigned idx = 0; 66 | tmp.descVector.clear(); 67 | for (const auto &kp : keypoints) { 68 | const auto &desc = kp.descriptor; 69 | const unsigned sz = desc.size() * sizeof(desc[0]); 70 | if (idx >= tmp.cvMatStore.size()) { 71 | cv::Mat newMat(1, sz, CV_8U); 72 | tmp.cvMatStore.push_back(newMat); 73 | assert(tmp.cvMatStore.size() == idx + 1); 74 | } 75 | cv::Mat m = tmp.cvMatStore.at(idx); 76 | const auto *descAsUint8 = reinterpret_cast(desc.data()); 77 | for (unsigned i = 0; i < sz; ++i) m.data[i] = descAsUint8[i]; 78 | tmp.descVector.push_back(m); 79 | idx++; 80 | } 81 | 82 | // The vocabulary file we typically use has 6 levels with the branching factor of 10. 83 | // That means the number of words at the bottom level is roughly 10^6. By going 4 84 | // levels up, we thus reduce number of nodes to about 10^(6-4) = 100 (size() of 85 | // `bowFeatureVector`). 86 | const int levelsUp = 4; 87 | bowVocabulary.transform( 88 | tmp.descVector, 89 | bowVector, 90 | bowFeatureVector, 91 | levelsUp 92 | ); 93 | } 94 | 95 | std::vector BowIndex::getBowSimilar( 96 | const MapDB &mapDB, 97 | const Atlas &atlas, 98 | const Keyframe &kf 99 | ) { 100 | MapKf currentMapKf { MapId(CURRENT_MAP_ID), kf.id }; 101 | std::map wordsInCommon; 102 | 103 | const auto& bowVec = kf.shared->bowVec; 104 | 105 | for (const auto& pair : bowVec) { 106 | DBoW2::WordId wordId = pair.first; 107 | 108 | // If not in the BoW database, continue 109 | if (index.at(wordId).empty()) { 110 | continue; 111 | } 112 | // Get the keyframes which share the word (node ID) with the query. 113 | const std::list &inNode = index.at(wordId); 114 | // For each keyframe, increase shared word number one by one 115 | for (MapKf mapKf : inNode) { 116 | if (mapKf == currentMapKf) { 117 | continue; 118 | } 119 | assert(getMapWithId(mapKf.mapId, mapDB, atlas).keyframes.count(mapKf.kfId)); 120 | 121 | // Initialize if not in num_common_words 122 | if (!wordsInCommon.count(mapKf)) { 123 | wordsInCommon[mapKf] = 0; 124 | } 125 | // Count up the number of words 126 | ++wordsInCommon.at(mapKf); 127 | } 128 | } 129 | 130 | if (wordsInCommon.empty()) { 131 | return {}; 132 | } 133 | 134 | // TODO When searching loop closure candidates, we want to use only the current map, 135 | // but now the code sets `maxInCommon` and `minScore` based on the best match 136 | // across all maps. 137 | unsigned int maxInCommon = 0; 138 | for (const auto &word : wordsInCommon) { 139 | if (word.second > maxInCommon) maxInCommon = word.second; 140 | } 141 | 142 | // Constrain search around the best word count match. 143 | const auto minInCommon = 144 | static_cast(parameters.bowMinInCommonRatio * static_cast(maxInCommon)); 145 | 146 | std::vector similar; 147 | for (const auto &word : wordsInCommon) { 148 | MapId mapId = word.first.mapId; 149 | KfId kfId = word.first.kfId; 150 | 151 | if (word.second > minInCommon) { 152 | const MapDB &m = getMapWithId(mapId, mapDB, atlas); 153 | float score = bowVocabulary.score(kf.shared->bowVec, m.keyframes.at(kfId)->shared->bowVec); 154 | similar.push_back(BowSimilar { 155 | .mapKf = MapKf { 156 | .mapId = mapId, 157 | .kfId = kfId, 158 | }, 159 | .score = score, 160 | }); 161 | } 162 | } 163 | 164 | if (similar.empty()) return {}; 165 | 166 | using vt = decltype(similar)::value_type; 167 | std::sort(similar.begin(), similar.end(), [](const vt &p1, const vt &p2) { return p1.score > p2.score ; }); 168 | 169 | // Return all keyframes with score close enough to the best score. 170 | float minScore = similar[0].score * parameters.bowScoreRatio; 171 | auto cut = std::find_if(similar.begin(), similar.end(), [&minScore](const vt &p) { return p.score < minScore; }); 172 | if (cut != similar.end()) { 173 | similar.erase(cut, similar.end()); 174 | } 175 | return similar; 176 | } 177 | 178 | bool operator == (const MapKf &lhs, const MapKf &rhs) { 179 | return lhs.mapId == rhs.mapId && lhs.kfId == rhs.kfId; 180 | } 181 | 182 | // Needed for std::map. 183 | bool operator < (const MapKf &lhs, const MapKf &rhs) { 184 | if (lhs.mapId == rhs.mapId) { 185 | return lhs.kfId < rhs.kfId; 186 | } 187 | return lhs.mapId < rhs.mapId; 188 | } 189 | 190 | } // namespace slam 191 | -------------------------------------------------------------------------------- /bow_index.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_BOW_INDEX_HPP 2 | #define SLAM_BOW_INDEX_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include "id.hpp" 12 | #include "key_point.hpp" 13 | #include "../odometry/parameters.hpp" 14 | 15 | namespace slam { 16 | 17 | class Keyframe; 18 | class MapDB; 19 | 20 | using BowVocabulary = DBoW2::TemplatedVocabulary; 21 | using Atlas = std::vector; 22 | 23 | struct MapKf { 24 | MapId mapId; 25 | KfId kfId; 26 | }; 27 | 28 | bool operator == (const MapKf &lhs, const MapKf &rhs); 29 | bool operator < (const MapKf &lhs, const MapKf &rhs); 30 | 31 | struct BowSimilar { 32 | MapKf mapKf; 33 | float score; 34 | }; 35 | 36 | class BowIndex { 37 | public: 38 | BowIndex(const odometry::ParametersSlam ¶meter); 39 | 40 | void add(const Keyframe &keyframe, MapId mapId); 41 | 42 | void remove(MapKf mapKf); 43 | 44 | void transform( 45 | const KeyPointVector &keypoints, 46 | DBoW2::BowVector &bowVector, 47 | DBoW2::FeatureVector &bowFeatureVector 48 | ); 49 | 50 | // Get all keyframes similar to a query keyframe. 51 | std::vector getBowSimilar(const MapDB &mapDB, const Atlas &atlas, const Keyframe &kf); 52 | 53 | private: 54 | const odometry::ParametersSlam ¶meters; 55 | 56 | // Called inverse index in the DBoW paper. 57 | std::vector> index; 58 | struct Workspace { 59 | std::vector descVector, cvMatStore; 60 | } tmp; 61 | 62 | 63 | BowVocabulary bowVocabulary; 64 | }; 65 | 66 | } // namespace slam 67 | 68 | #endif // SLAM_BOW_INDEX_HPP 69 | -------------------------------------------------------------------------------- /bundle_adjuster.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_BUNDLE_ADJUSTER_HPP 2 | #define SLAM_BUNDLE_ADJUSTER_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include "id.hpp" 8 | #include "static_settings.hpp" 9 | #include "ba_stats.hpp" 10 | 11 | namespace slam { 12 | 13 | class Keyframe; 14 | class MapDB; 15 | 16 | struct WorkspaceBA { 17 | std::set localMpIds; 18 | std::set> localKfIds; 19 | 20 | BaStats baStats; 21 | 22 | WorkspaceBA(bool enableBaStats) : 23 | baStats(enableBaStats) 24 | {} 25 | }; 26 | 27 | /** 28 | * Do a local bundle adjustment 29 | */ 30 | std::set localBundleAdjust( 31 | Keyframe &keyframe, 32 | WorkspaceBA &workspace, 33 | MapDB &mapDB, 34 | int problemMaxSize, 35 | const StaticSettings &settings 36 | ); 37 | 38 | /** 39 | * Do a local bundle adjustment for a single pose 40 | */ 41 | bool poseBundleAdjust( 42 | Keyframe &keyframe, 43 | MapDB &mapDB, 44 | const StaticSettings &settings 45 | ); 46 | 47 | void globalBundleAdjust( 48 | KfId currentKfId, 49 | MapDB &mapDB, 50 | const StaticSettings &settings 51 | ); 52 | 53 | } 54 | 55 | #endif 56 | -------------------------------------------------------------------------------- /design-document.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AaltoML/SLAM-module/87fbaab161f7652084300830fdf1bded3ed2c1ae/design-document.pdf -------------------------------------------------------------------------------- /feature_detector.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include "feature_detector.hpp" 9 | #include "static_settings.hpp" 10 | #include "image_pyramid.hpp" 11 | #include "../tracker/image.hpp" 12 | #include "../odometry/parameters.hpp" 13 | #include "../util/logging.hpp" 14 | #include "../tracker/feature_detector.hpp" 15 | 16 | namespace slam { 17 | namespace { 18 | class FeatureDetectorImplementation : public FeatureDetector { 19 | public: 20 | std::size_t detect(ImagePyramid &imagePyramid, std::vector &keypointsPerLevel) final { 21 | computeKeypoints(keypointsPerLevel, imagePyramid); 22 | 23 | unsigned int num_keypts = 0; 24 | for (unsigned int level = 0; level < parameters.orbScaleLevels; ++level) { 25 | num_keypts += keypointsPerLevel.at(level).size(); 26 | } 27 | return num_keypts; 28 | } 29 | 30 | FeatureDetectorImplementation(const StaticSettings &settings, tracker::Image &modelImage) : 31 | parameters(settings.parameters.slam), 32 | gpuProcessor(modelImage.getProcessor()), 33 | gpuImgFactory(modelImage.getImageFactory()), 34 | gpuOpFactory(modelImage.getOperationsFactory()) 35 | { 36 | for (auto maxKps : settings.maxNumberOfKeypointsPerLevel()) { 37 | // copy & change 38 | odometry::ParametersTracker params = settings.parameters.tracker; 39 | params.maxTracks = maxKps; 40 | const auto detector = settings.parameters.slam.slamFeatureDetector; 41 | if (!detector.empty()) params.featureDetector = detector; 42 | // subpixel adjustment would happen elsewhere, no need to disable 43 | featureDetectorParams.push_back(params); 44 | } 45 | 46 | cpuProcessor = accelerated::Processor::createInstant(); 47 | cpuImgFactory = accelerated::cpu::Image::createFactory(); 48 | cpuOpFactory = accelerated::cpu::operations::createFactory(*cpuProcessor); 49 | } 50 | 51 | private: 52 | const odometry::ParametersSlam ¶meters; 53 | accelerated::Processor &gpuProcessor; 54 | accelerated::Image::Factory &gpuImgFactory; 55 | accelerated::operations::StandardFactory &gpuOpFactory; 56 | 57 | std::unique_ptr cpuProcessor; 58 | std::unique_ptr cpuImgFactory; 59 | std::unique_ptr cpuOpFactory; 60 | 61 | std::vector> featureDetectors; 62 | std::vector featureDetectorParams; 63 | 64 | struct Workspace { 65 | std::vector> featurePointsPerLevel; 66 | } work; 67 | 68 | void computeKeypoints(std::vector& keypoints, ImagePyramid &pyramid) { 69 | keypoints.resize(parameters.orbScaleLevels); 70 | work.featurePointsPerLevel.resize(parameters.orbScaleLevels); 71 | 72 | const bool isGpu = pyramid.isGpu(); 73 | for (unsigned int level = 0; level < parameters.orbScaleLevels; ++level) { 74 | auto &lev = isGpu ? pyramid.getGpuLevel(level) : pyramid.getLevel(level); 75 | auto ¶ms = featureDetectorParams.at(level); 76 | 77 | if (level >= featureDetectors.size()) { 78 | // from our GFTT detector 79 | const auto minDim = std::min(lev.width, lev.height); 80 | const double su = minDim / 720.0 * 0.8; 81 | const int minDist = std::floor(params.gfttMinDistance * su + 0.5); 82 | params.gfttMinDistance = minDist; 83 | 84 | log_debug("Lazy-initializing %s feature detector for ORB level %d (%d x %d), max features: %d, minDist: %d", 85 | params.featureDetector.c_str(), int(level), lev.width, lev.height, params.maxTracks, minDist); 86 | 87 | // Note: with a GPU image pyramid, this generates dozens of small textures since each 88 | // detection level has its own set of workspace textures for the GPU feature detection. 89 | featureDetectors.push_back( 90 | tracker::FeatureDetector::build(lev.width, lev.height, 91 | isGpu ? gpuProcessor : *cpuProcessor, 92 | isGpu ? gpuImgFactory : *cpuImgFactory, 93 | isGpu ? gpuOpFactory : *cpuOpFactory, 94 | params)); 95 | } 96 | 97 | auto fut = featureDetectors.at(level)->detect(lev, 98 | work.featurePointsPerLevel.at(level), {}, params.gfttMinDistance); 99 | 100 | if (level == parameters.orbScaleLevels - 1) fut.wait(); 101 | } 102 | 103 | for (unsigned level = 0; level < parameters.orbScaleLevels; ++level) { 104 | // log_debug("detected %zu points at level %d", work.featurePoints.size(), level); 105 | 106 | constexpr unsigned MARGIN = StaticSettings::ORB_PATCH_RADIUS; 107 | constexpr int min_x = MARGIN; 108 | constexpr int min_y = MARGIN; 109 | const auto &lev = isGpu ? pyramid.getGpuLevel(level) : pyramid.getLevel(level); 110 | const int max_x = lev.width - MARGIN; 111 | const int max_y = lev.height - MARGIN; 112 | 113 | auto &keyptsAtLev = keypoints.at(level); 114 | const auto &points = work.featurePointsPerLevel.at(level); 115 | keyptsAtLev.clear(); 116 | keyptsAtLev.reserve(points.size()); 117 | for (const auto& keypt : points) { 118 | const int x = std::round(keypt.x); 119 | const int y = std::round(keypt.y); 120 | // TODO: could support these margins directly in the feature detector 121 | if (x < min_x || y < min_y || x >= max_x || y >= max_y) { 122 | continue; 123 | } 124 | keyptsAtLev.push_back({ 125 | .pt = { 126 | .x = keypt.x, 127 | .y = keypt.y 128 | }, 129 | .angle = 0, // computed elsewhere 130 | .octave = int(level), 131 | }); 132 | } 133 | } 134 | } 135 | }; 136 | } 137 | 138 | std::unique_ptr FeatureDetector::build(const StaticSettings &settings, tracker::Image &modelImage) { 139 | return std::unique_ptr(new FeatureDetectorImplementation(settings, modelImage)); 140 | } 141 | 142 | FeatureDetector::~FeatureDetector() = default; 143 | 144 | } 145 | -------------------------------------------------------------------------------- /feature_detector.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_MULTI_FAST_DETECTOR_HPP 2 | #define SLAM_MULTI_FAST_DETECTOR_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include "key_point.hpp" 8 | 9 | namespace tracker { struct Image; } 10 | 11 | namespace slam { 12 | struct StaticSettings; 13 | struct ImagePyramid; 14 | 15 | struct FeatureDetector { 16 | static std::unique_ptr build(const StaticSettings &settings, tracker::Image &modelImage); 17 | virtual ~FeatureDetector(); 18 | 19 | /** @return the total number of detected keypoints */ 20 | virtual std::size_t detect( 21 | ImagePyramid &imagePyramid, 22 | std::vector< KeyPointVector > &keypointsPerLevel) = 0; 23 | 24 | }; 25 | } 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /feature_search.cpp: -------------------------------------------------------------------------------- 1 | #include "feature_search.hpp" 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | namespace slam { 8 | class FeatureSearchImplementation : public FeatureSearch { 9 | private: 10 | struct Node { 11 | float x; 12 | float y; 13 | std::size_t keypointIndex; 14 | }; 15 | 16 | // first implementation: keypoints in an array sorted by Y coordinate 17 | // (the Y size of the image is assumed to be larger than X size) 18 | std::vector indexByY; 19 | std::function comparator; 20 | 21 | public: 22 | FeatureSearchImplementation(const KeyPointVector &keypoints) { 23 | for (std::size_t i = 0; i < keypoints.size(); ++i) { 24 | const auto &kp = keypoints[i]; 25 | indexByY.push_back({ kp.pt.x, kp.pt.y, i }); 26 | } 27 | comparator = [](const Node &a, const Node &b) -> bool { 28 | return a.y < b.y; 29 | }; 30 | std::sort(indexByY.begin(), indexByY.end(), comparator); 31 | } 32 | 33 | void getFeaturesAround(float x, float y, float r, std::vector &output) const final { 34 | output.clear(); 35 | 36 | // find Y range begin using binary search 37 | const Node lb { x, y - r, 0 }; 38 | for (auto itr = std::lower_bound(indexByY.begin(), indexByY.end(), lb, comparator); 39 | itr != indexByY.end() && itr->y <= y + r; 40 | ++itr) 41 | { 42 | // pick points within correct X bounds 43 | const float dx = x - itr->x; 44 | const float dy = y - itr->y; 45 | if (dx*dx + dy*dy < r*r) 46 | output.push_back(itr->keypointIndex); 47 | } 48 | } 49 | }; 50 | 51 | 52 | std::unique_ptr FeatureSearch::create(const KeyPointVector &kps) { 53 | return std::unique_ptr(new FeatureSearchImplementation(kps)); 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /feature_search.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_FEATURE_SEARCH_HPP 2 | #define SLAM_FEATURE_SEARCH_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include "key_point.hpp" 8 | 9 | namespace slam { 10 | 11 | /** 12 | * An acceleration data structure for 2D keypoint search. Once constructed 13 | * with a fixed set of 2D points, can find indices of these points within 14 | * a given radius from a given point fast. 15 | */ 16 | class FeatureSearch { 17 | public: 18 | static std::unique_ptr create(const KeyPointVector &keypoints); 19 | virtual ~FeatureSearch() = default; 20 | virtual void getFeaturesAround(float x, float y, float r, std::vector &output) const = 0; 21 | }; 22 | 23 | } 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /id.cpp: -------------------------------------------------------------------------------- 1 | #include "id.hpp" 2 | 3 | #include 4 | 5 | namespace slam { 6 | 7 | #define ID_OP_IMPL(ID, OP) bool operator OP (const ID &lhs, const ID &rhs) { return lhs.v OP rhs.v; } 8 | 9 | ID_OP_IMPL(KfId, ==) 10 | ID_OP_IMPL(KfId, !=) 11 | ID_OP_IMPL(KfId, <) 12 | ID_OP_IMPL(KfId, <=) 13 | ID_OP_IMPL(KfId, >) 14 | ID_OP_IMPL(KfId, >=) 15 | ID_OP_IMPL(MpId, ==) 16 | ID_OP_IMPL(MpId, !=) 17 | ID_OP_IMPL(MpId, <) 18 | ID_OP_IMPL(MpId, >) 19 | ID_OP_IMPL(KpId, <) 20 | ID_OP_IMPL(TrackId, <) 21 | ID_OP_IMPL(MapId, ==) 22 | ID_OP_IMPL(MapId, !=) 23 | ID_OP_IMPL(MapId, <) 24 | 25 | #undef ID_OP_IMPL 26 | 27 | VertexIdConverter::VertexIdConverter(const std::pair &maxIds) { 28 | // The arguments are maximum used ids, thus need to add one to get length 29 | // and avoid overlap with next type. 30 | mp0 = std::get<0>(maxIds).v + 1; 31 | custom0 = mp0 + std::get<1>(maxIds).v + 1; 32 | } 33 | 34 | int VertexIdConverter::keyframe(KfId kfId) const { 35 | assert(kfId.v >= 0); 36 | assert(kfId.v < mp0); 37 | return kfId.v; 38 | } 39 | 40 | int VertexIdConverter::mapPoint(MpId mpId) const { 41 | assert(mpId.v >= 0); 42 | assert(mp0 + mpId.v < custom0); 43 | return mp0 + mpId.v; 44 | } 45 | 46 | int VertexIdConverter::custom(int i) const { 47 | assert(i >= 0); 48 | return custom0 + i; 49 | } 50 | 51 | KfId VertexIdConverter::invKeyframe(int id) const { 52 | assert(id >= 0 && id < mp0); 53 | return KfId(id); 54 | } 55 | 56 | MpId VertexIdConverter::invMapPoint(int id) const { 57 | assert(id >= mp0 && id < custom0); 58 | return MpId(id - mp0); 59 | } 60 | 61 | int VertexIdConverter::invCustom(int i) const { 62 | assert(i >= custom0); 63 | return i - custom0; 64 | } 65 | 66 | } // namespace slam 67 | -------------------------------------------------------------------------------- /id.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_SLAM_ID_HPP 2 | #define SLAM_SLAM_ID_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace slam { 9 | 10 | enum class TriangulationMethod { 11 | TME, MIDPOINT 12 | }; 13 | 14 | struct Id { 15 | int v = -1; 16 | 17 | // Constructor with no arguments may make using std::map and std::set easier, 18 | // and seems required for serialization with cereal. 19 | Id() : v(-1) {} 20 | 21 | explicit Id(int v) : v(v) {} 22 | }; 23 | 24 | /* 25 | // Used for `std::unordered_map`. 26 | struct IdHash { 27 | std::size_t operator() (const Id &id) const { 28 | return std::hash()(id.v); 29 | } 30 | }; 31 | 32 | // Used for `std::unordered_map`. 33 | struct IdEqual { 34 | bool operator() (const Id &lhs, const Id &rhs) const { 35 | return lhs.v == rhs.v; 36 | } 37 | }; 38 | 39 | template 40 | using id_unordered_map = std::unordered_map; 41 | 42 | template 43 | using id_unordered_set = std::unordered_set; 44 | */ 45 | 46 | // Keyframe id. 47 | struct KfId : Id { 48 | KfId() : Id() {} 49 | explicit KfId(int v) : Id(v) {} 50 | }; 51 | 52 | // Map point id. 53 | struct MpId : Id { 54 | MpId() : Id() {} 55 | explicit MpId(int v) : Id(v) {} 56 | }; 57 | 58 | // Keypoint id. 59 | struct KpId : Id { 60 | KpId() : Id() {} 61 | explicit KpId(int v) : Id(v) {} 62 | }; 63 | 64 | // Track id. 65 | struct TrackId : Id { 66 | TrackId() : Id() {} 67 | explicit TrackId(int v) : Id(v) {} 68 | }; 69 | 70 | // Map id. 71 | struct MapId : Id { 72 | MapId() : Id() {} 73 | explicit MapId(int v) : Id(v) {} 74 | }; 75 | 76 | const MapId CURRENT_MAP_ID = MapId(1000); 77 | 78 | #define ID_OP(ID, OP) bool operator OP (const ID &lhs, const ID &rhs); 79 | 80 | ID_OP(KfId, ==) 81 | ID_OP(KfId, !=) 82 | ID_OP(KfId, <) 83 | ID_OP(KfId, <=) 84 | ID_OP(KfId, >) 85 | ID_OP(KfId, >=) 86 | ID_OP(MpId, ==) 87 | ID_OP(MpId, !=) 88 | ID_OP(MpId, <) 89 | ID_OP(MpId, >) 90 | ID_OP(KpId, <) 91 | ID_OP(TrackId, <) 92 | ID_OP(MapId, ==) 93 | ID_OP(MapId, !=) 94 | ID_OP(MapId, <) 95 | 96 | #undef ID_OP 97 | 98 | // Helper for managing g2o ids in common BA tasks. 99 | class VertexIdConverter { 100 | public: 101 | VertexIdConverter(const std::pair &maxIds); 102 | int keyframe(KfId kfId) const; 103 | int mapPoint(MpId mpId) const; 104 | int custom(int i) const; 105 | KfId invKeyframe(int id) const; 106 | MpId invMapPoint(int id) const; 107 | int invCustom(int i) const; 108 | private: 109 | int mp0; 110 | int custom0; 111 | }; 112 | 113 | } // namespace slam 114 | 115 | #endif // SLAM_SLAM_ID_HPP 116 | -------------------------------------------------------------------------------- /image_pyramid.cpp: -------------------------------------------------------------------------------- 1 | #include "image_pyramid.hpp" 2 | #include "static_settings.hpp" 3 | #include "../odometry/parameters.hpp" 4 | #include "../util/logging.hpp" 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | namespace slam { 11 | namespace { 12 | // note: as long as we compute anything from these on the CPU, it makes sense 13 | // to store at least a copy of the results on the CPU side 14 | struct CpuStoredImagePyramid : ImagePyramid { 15 | std::vector pyramid, blurredPyramid; 16 | std::vector> pyramidImgCpu, blurredPyramidImgCpu; 17 | 18 | CpuStoredImagePyramid(int levels) { 19 | pyramid.resize(levels); 20 | blurredPyramid.resize(levels); 21 | pyramidImgCpu.resize(levels); 22 | blurredPyramidImgCpu.resize(levels); 23 | } 24 | 25 | std::size_t numberOfLevels() const final { 26 | return pyramid.size(); 27 | } 28 | 29 | accelerated::Image &getLevel(std::size_t level) final { 30 | if (!pyramidImgCpu.at(level)) { 31 | auto &cvImg = pyramid.at(level); 32 | assert(!cvImg.empty()); 33 | pyramidImgCpu[level] = accelerated::opencv::ref(cvImg); 34 | } 35 | return *pyramidImgCpu.at(level); 36 | } 37 | 38 | accelerated::Image &getBlurredLevel(std::size_t level) final { 39 | if (!blurredPyramidImgCpu.at(level)) { 40 | auto &cvImg = blurredPyramid.at(level); 41 | assert(!cvImg.empty()); 42 | blurredPyramidImgCpu[level] = accelerated::opencv::ref(cvImg); 43 | } 44 | return *blurredPyramidImgCpu.at(level); 45 | } 46 | 47 | void debugVisualize(cv::Mat &target) final { 48 | cv::Mat lev0 = pyramid.at(0); 49 | cv::vconcat(lev0, lev0, target); 50 | cv::Mat targetOtherSide(target, cv::Rect(0, lev0.rows, lev0.cols, lev0.rows)); 51 | for (std::size_t i = 0; i < pyramid.size(); ++i) { 52 | auto &level = pyramid.at(i); 53 | level.copyTo(cv::Mat(target, cv::Rect(0, 0, level.cols, level.rows))); 54 | cv::Mat blurTarget(targetOtherSide, cv::Rect(0, 0, level.cols, level.rows)); 55 | blurredPyramid.at(i).copyTo(blurTarget); 56 | } 57 | } 58 | }; 59 | 60 | struct CpuImagePyramid : CpuStoredImagePyramid { 61 | const StaticSettings &settings; 62 | 63 | CpuImagePyramid(const StaticSettings &settings) : 64 | CpuStoredImagePyramid(settings.parameters.slam.orbScaleLevels), 65 | settings(settings) 66 | {} 67 | 68 | void update(tracker::Image &trackerImage) final { 69 | auto image = reinterpret_cast(trackerImage).getOpenCvMat(); 70 | const auto ¶meters = settings.parameters.slam; 71 | 72 | assert(pyramid.size() == parameters.orbScaleLevels); 73 | assert(blurredPyramid.size() == parameters.orbScaleLevels); 74 | 75 | image.copyTo(pyramid.at(0)); 76 | for (unsigned int level = 1; level < parameters.orbScaleLevels; ++level) { 77 | const double scale = settings.scaleFactors.at(level); 78 | const cv::Size size(std::round(image.cols * 1.0 / scale), std::round(image.rows * 1.0 / scale)); 79 | cv::resize(pyramid.at(level - 1), pyramid.at(level), size, 0, 0, cv::INTER_LINEAR); 80 | } 81 | 82 | for (unsigned level = 0; level < parameters.orbScaleLevels; ++level) { 83 | cv::Mat &blurredImage = blurredPyramid.at(level); 84 | cv::GaussianBlur(pyramid.at(level), blurredImage, cv::Size(7, 7), 2, 2, cv::BORDER_REFLECT_101); 85 | } 86 | } 87 | 88 | accelerated::Image &getGpuLevel(std::size_t level) final { 89 | assert(false && "not GPU"); 90 | return getLevel(level); 91 | } 92 | 93 | bool isGpu() const final { 94 | return false; 95 | } 96 | }; 97 | 98 | struct GpuImagePyramid : CpuStoredImagePyramid { 99 | const StaticSettings &settings; 100 | std::vector> pyramidGpu, blurredPyramidGpu, blurTmpPyramidGpu; 101 | accelerated::operations::Function resizeOp, blurX, blurY; 102 | 103 | static std::vector gaussianKernel1D(int width, double stdev) { 104 | std::vector kernel; 105 | double sum = 0.0; 106 | for (int i = 0; i < width; ++i) { 107 | double x = i - (width - 1) * 0.5; 108 | double v = std::exp(-0.5 * x * x / (stdev * stdev)); 109 | sum += v; 110 | kernel.push_back(v); 111 | } 112 | for (auto &v : kernel) v /= sum; 113 | return kernel; 114 | } 115 | 116 | static std::vector> gaussianKernel(bool yDir) { 117 | constexpr double stdev = 2.0; 118 | const int width = 7; 119 | auto kernel1D = gaussianKernel1D(width, stdev); 120 | std::vector> kernel; 121 | if (yDir) { 122 | for (const auto &el : kernel1D) kernel.push_back({ el }); 123 | } else { 124 | kernel.push_back(kernel1D); 125 | } 126 | return kernel; 127 | } 128 | 129 | GpuImagePyramid(const StaticSettings &settings, 130 | const accelerated::Image &accImage, 131 | accelerated::Image::Factory &imgFactory, 132 | accelerated::operations::StandardFactory &opFactory) : 133 | CpuStoredImagePyramid(settings.parameters.slam.orbScaleLevels), 134 | settings(settings) 135 | { 136 | log_debug("initializing GPU ORB image pyramid"); 137 | const auto ¶meters = settings.parameters.slam; 138 | 139 | assert(pyramid.size() == parameters.orbScaleLevels); 140 | assert(blurredPyramid.size() == parameters.orbScaleLevels); 141 | 142 | pyramidGpu.push_back(imgFactory.createLike(accImage)); 143 | blurredPyramidGpu.push_back(imgFactory.createLike(accImage)); 144 | blurTmpPyramidGpu.push_back(imgFactory.createLike(accImage)); 145 | 146 | for (unsigned int level = 1; level < parameters.orbScaleLevels; ++level) { 147 | const double scale = settings.scaleFactors.at(level); 148 | const int w = std::round(accImage.width * 1.0 / scale); 149 | const int h = std::round(accImage.height * 1.0 / scale); 150 | 151 | pyramidGpu.push_back(imgFactory.create(w, h, 1, accImage.dataType)); 152 | blurredPyramidGpu.push_back(imgFactory.createLike(*pyramidGpu.back())); 153 | blurTmpPyramidGpu.push_back(imgFactory.createLike(*pyramidGpu.back())); 154 | } 155 | 156 | for (unsigned int level = 0; level < parameters.orbScaleLevels; ++level) { 157 | auto &cur = *pyramidGpu.at(level); 158 | pyramid.at(level) = accelerated::opencv::emptyLike(cur); 159 | blurredPyramid.at(level) = accelerated::opencv::emptyLike(cur); 160 | } 161 | 162 | resizeOp = opFactory.rescale() 163 | .setInterpolation(accelerated::Image::Interpolation::LINEAR) 164 | .build(accImage); 165 | 166 | const auto BORDER_TYPE = accelerated::Image::Border::MIRROR; 167 | blurX = opFactory.fixedConvolution2D(gaussianKernel(false)) 168 | .setBorder(BORDER_TYPE) 169 | .build(accImage); 170 | blurY = opFactory.fixedConvolution2D(gaussianKernel(true)) 171 | .setBorder(BORDER_TYPE) 172 | .build(accImage); 173 | } 174 | 175 | void update(tracker::Image &trackerImage) final { 176 | auto &accImage = trackerImage.getAccImage(); 177 | const int n = pyramid.size(); 178 | accelerated::operations::callUnary(resizeOp, accImage, *pyramidGpu.at(0)); 179 | accelerated::operations::callUnary(blurX, accImage, *blurTmpPyramidGpu.at(0)); 180 | accelerated::operations::callUnary(blurY, *blurTmpPyramidGpu.at(0), *blurredPyramidGpu.at(0)); 181 | accelerated::opencv::copy(accImage, pyramid.at(0)); 182 | accelerated::opencv::copy(*blurredPyramidGpu.at(0), blurredPyramid.at(0)); 183 | 184 | for (int level = 1; level < n; ++level) { 185 | auto &cur = *pyramidGpu.at(level); 186 | accelerated::operations::callUnary(resizeOp, *pyramidGpu.at(level - 1), cur); 187 | auto &curTmp = *blurTmpPyramidGpu.at(level); 188 | accelerated::operations::callUnary(blurX, cur, curTmp); 189 | auto &curBlur = *blurredPyramidGpu.at(level); 190 | accelerated::operations::callUnary(blurY, curTmp, curBlur); 191 | 192 | accelerated::opencv::copy(cur, pyramid.at(level)); 193 | auto fut = accelerated::opencv::copy(curBlur, blurredPyramid.at(level)); 194 | 195 | if (level == n - 1) fut.wait(); 196 | } 197 | } 198 | 199 | accelerated::Image &getGpuLevel(std::size_t level) final { 200 | return *pyramidGpu.at(level); 201 | } 202 | 203 | bool isGpu() const final { 204 | return true; 205 | } 206 | }; 207 | } 208 | 209 | std::unique_ptr ImagePyramid::build(const StaticSettings &s, tracker::Image &img) { 210 | auto &accImg = img.getAccImage(); 211 | if (accImg.storageType == accelerated::Image::StorageType::CPU || !s.parameters.slam.useGpuImagePyramid) { 212 | return std::unique_ptr(new CpuImagePyramid(s)); 213 | } else { 214 | return std::unique_ptr(new GpuImagePyramid(s, 215 | accImg, 216 | img.getImageFactory(), 217 | img.getOperationsFactory())); 218 | } 219 | } 220 | 221 | ImagePyramid::~ImagePyramid() = default; 222 | } 223 | -------------------------------------------------------------------------------- /image_pyramid.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_SLAM_IMAGE_PYRAMID_HPP 2 | #define SLAM_SLAM_IMAGE_PYRAMID_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | #include "../tracker/image.hpp" 11 | 12 | namespace cv { class Mat; } 13 | namespace slam { 14 | struct StaticSettings; 15 | 16 | struct ImagePyramid { 17 | static std::unique_ptr build(const StaticSettings &settings, tracker::Image &modelImage); 18 | virtual ~ImagePyramid(); 19 | 20 | virtual void update(tracker::Image &image) = 0; 21 | virtual std::size_t numberOfLevels() const = 0; 22 | virtual bool isGpu() const = 0; 23 | 24 | virtual accelerated::Image &getLevel(std::size_t level) = 0; // CPU 25 | virtual accelerated::Image &getBlurredLevel(std::size_t level) = 0; // CPU 26 | virtual accelerated::Image &getGpuLevel(std::size_t level) = 0; // GPU, not blurred 27 | 28 | // for debugging, render as OpenCV matrix 29 | virtual void debugVisualize(cv::Mat &target) = 0; 30 | }; 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /key_point.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_KEY_POINT_HPP 2 | #define SLAM_KEY_POINT_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include "../tracker/track.hpp" 9 | 10 | namespace slam { 11 | struct KeyPoint { 12 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 13 | 14 | tracker::Feature::Point pt; 15 | float angle; 16 | int octave; 17 | Eigen::Vector3d bearing; 18 | 19 | using Descriptor = std::array; 20 | Descriptor descriptor; 21 | 22 | template 23 | void serialize(Archive &ar) { 24 | ar(pt.x, pt.y, angle, octave, octave, bearing, descriptor); 25 | } 26 | }; 27 | 28 | using KeyPointVector = std::vector>; 29 | } 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /keyframe.cpp: -------------------------------------------------------------------------------- 1 | #include "keyframe.hpp" 2 | 3 | #include // gray frame conversion 4 | #include 5 | 6 | #include "../tracker/image.hpp" 7 | #include "../odometry/parameters.hpp" 8 | #include "../util/logging.hpp" 9 | #include "../util/string_utils.hpp" 10 | #include "map_point.hpp" 11 | #include "mapdb.hpp" 12 | #include "../api/slam.hpp" 13 | #include "orb_extractor.hpp" 14 | 15 | using Eigen::Matrix4d; 16 | using Eigen::Matrix3d; 17 | using Eigen::Vector3d; 18 | using Eigen::Vector2f; 19 | 20 | 21 | namespace slam { 22 | namespace { 23 | cv::Vec3b BLACK(0, 0, 0); 24 | std::vector keyPointTrackIds; 25 | 26 | cv::Vec3b getPixel(const cv::Mat &img, cv::Point2f pt); 27 | void setStereoPointCloud(const MapperInput &mapperInput, KeyframeShared &kfShared); 28 | 29 | bool convertKeypointToBearing(const tracker::Camera &camera, const KeyPoint& kp, Eigen::Vector3d &out) { 30 | return camera.pixelToRay(Eigen::Vector2d(kp.pt.x, kp.pt.y), out); 31 | } 32 | 33 | // shared code for detectFullFeatures & detectTrackerFeatures 34 | void processKeyPoints(Keyframe &kf, std::vector &colors, const MapperInput &mapperInput) { 35 | // populate colors for debugging and nice visualizations 36 | if (!mapperInput.colorFrame.empty()) { 37 | colors.reserve(kf.shared->keyPoints.size()); 38 | cv::Rect rect(cv::Point(), mapperInput.colorFrame.size()); 39 | for (const auto &kp : kf.shared->keyPoints) { 40 | cv::Point2f p(kp.pt.x, kp.pt.y); 41 | colors.push_back( 42 | rect.contains(p) 43 | ? getPixel(mapperInput.colorFrame, p) 44 | : BLACK 45 | ); 46 | } 47 | } 48 | 49 | kf.mapPoints.assign(kf.shared->keyPoints.size(), MpId(-1)); 50 | 51 | std::map trackIdToIndex; 52 | for (size_t idx = 0; idx < mapperInput.trackerFeatures.size(); ++idx) 53 | trackIdToIndex[TrackId(mapperInput.trackerFeatures.at(idx).id)] = idx; 54 | 55 | for (size_t kpIdx = 0; kpIdx < kf.shared->keyPoints.size(); ++kpIdx) { 56 | auto &kp = kf.shared->keyPoints.at(kpIdx); 57 | float depth = -1; 58 | if (kf.keyPointToTrackId.count(KpId(kpIdx))) { 59 | depth = mapperInput.trackerFeatures.at(trackIdToIndex.at(kf.keyPointToTrackId.at(KpId(kpIdx)))).depth; 60 | } 61 | if (depth < 0) { 62 | depth = mapperInput.frame->getDepth(Eigen::Vector2f(kp.pt.x, kp.pt.y)); 63 | } 64 | kf.keyPointDepth.push_back(depth); 65 | // Note: must have filtered out any possible invalid keypoints 66 | // before this operation 67 | assert(convertKeypointToBearing(*kf.shared->camera, kp, kp.bearing)); 68 | } 69 | } 70 | } // namespace 71 | 72 | // needs to be defined in a file that include tracker/image.hpp 73 | MapperInput::MapperInput() = default; 74 | MapperInput::~MapperInput() = default; 75 | 76 | // For cereal. 77 | Keyframe::Keyframe() {} 78 | 79 | Keyframe::Keyframe(const MapperInput &mapperInput) : 80 | shared(std::make_shared()), 81 | id(KfId(mapperInput.poseTrail.at(0).frameNumber)), 82 | previousKfId(KfId(-1)), 83 | nextKfId(KfId(-1)), 84 | poseCW(Eigen::Matrix4d::Identity()), 85 | origPoseCW(mapperInput.poseTrail[0].pose), 86 | uncertainty(mapperInput.poseTrail[0].uncertainty), 87 | t(mapperInput.poseTrail[0].t), 88 | hasFullFeatures(false) 89 | { 90 | assert(id.v >= 0); 91 | shared->camera = mapperInput.frame->getCamera(); 92 | setStereoPointCloud(mapperInput, *shared); 93 | } 94 | 95 | void Keyframe::addFullFeatures( 96 | const MapperInput &mapperInput, 97 | OrbExtractor &orb) { 98 | hasFullFeatures = true; 99 | orb.detectAndExtract( 100 | *mapperInput.frame, 101 | *shared->camera, 102 | mapperInput.trackerFeatures, 103 | shared->keyPoints, 104 | keyPointTrackIds); 105 | 106 | for (unsigned i = 0; i < shared->keyPoints.size(); ++i) { 107 | int trackId = keyPointTrackIds.at(i); 108 | if (trackId >= 0) { 109 | keyPointToTrackId.emplace(KpId(i), trackId); 110 | } 111 | } 112 | 113 | processKeyPoints(*this, shared->colors, mapperInput); 114 | 115 | shared->featureSearch = FeatureSearch::create(shared->keyPoints); 116 | } 117 | 118 | void Keyframe::addTrackerFeatures(const MapperInput &mapperInput) { 119 | for (unsigned i = 0; i < mapperInput.trackerFeatures.size(); ++i) { 120 | const auto &track = mapperInput.trackerFeatures.at(i); 121 | const auto &pt = track.points[0]; 122 | if (!shared->camera->isValidPixel(pt.x, pt.y)) continue; 123 | shared->keyPoints.push_back({ 124 | .pt = pt, 125 | // should not be used 126 | .angle = 0, 127 | .octave = 0 128 | }); 129 | keyPointToTrackId.emplace(KpId(i), track.id); 130 | } 131 | 132 | processKeyPoints(*this, shared->colors, mapperInput); 133 | } 134 | 135 | Keyframe::Keyframe(const Keyframe &kf) : 136 | shared(kf.shared), 137 | id(kf.id), 138 | previousKfId(kf.previousKfId), 139 | nextKfId(kf.nextKfId), 140 | keyPointToTrackId(kf.keyPointToTrackId), 141 | mapPoints(kf.mapPoints), 142 | keyPointDepth(kf.keyPointDepth), 143 | poseCW(kf.poseCW), 144 | origPoseCW(kf.origPoseCW), 145 | uncertainty(kf.uncertainty), 146 | t(kf.t) 147 | {} 148 | 149 | std::unique_ptr KeyframeShared::clone() const 150 | { 151 | auto s = std::make_unique(); 152 | s->camera = camera; 153 | s->imgDbg = imgDbg; // cv::Mat is copied shallowly. 154 | s->stereoPointCloud = stereoPointCloud; // shallow copy 155 | s->stereoPointCloudColor = stereoPointCloudColor; 156 | 157 | // everything else should be empty if this method is used 158 | assert(keyPoints.empty()); 159 | assert(!featureSearch); 160 | 161 | return s; 162 | } 163 | 164 | float Keyframe::computeMedianDepth(const MapDB &mapDB, float defaultDepth) { 165 | std::vector depths; 166 | depths.reserve(shared->keyPoints.size()); 167 | const Vector3d &rotZRow = poseCW.block<1, 3>(2, 0); 168 | const float transZ = poseCW(2, 3); 169 | 170 | for (MpId mpId : mapPoints) { 171 | if (mpId.v == -1) { 172 | continue; 173 | } 174 | const MapPoint &mp = mapDB.mapPoints.at(mpId); 175 | if (mp.status != MapPointStatus::TRIANGULATED) { 176 | continue; 177 | } 178 | 179 | const float posCZ = rotZRow.dot(mp.position) + transZ; 180 | depths.push_back(posCZ); 181 | } 182 | 183 | if (depths.empty()) { 184 | return defaultDepth; 185 | } 186 | 187 | std::sort(depths.begin(), depths.end()); 188 | 189 | return depths.at((depths.size() - 1) / 2); 190 | } 191 | 192 | std::vector Keyframe::getNeighbors(const MapDB &mapDB, int minCovisibilities, bool triangulatedOnly) const { 193 | std::map covisibilities; 194 | 195 | // Previous and next KF are considered neighbors always. 196 | if (previousKfId.v != -1) { 197 | covisibilities.emplace(previousKfId, minCovisibilities); 198 | } 199 | if (nextKfId.v != -1) { 200 | covisibilities.emplace(nextKfId, minCovisibilities); 201 | } 202 | 203 | for (MpId mpId : mapPoints) { 204 | if (mpId.v == -1) 205 | continue; 206 | 207 | const auto &mp = mapDB.mapPoints.at(mpId); 208 | if (triangulatedOnly && mp.status != MapPointStatus::TRIANGULATED) 209 | continue; 210 | 211 | for (const auto &kfIdKeypointId : mp.observations) { 212 | KfId kfId = kfIdKeypointId.first; 213 | if (covisibilities.count(kfId)) { 214 | covisibilities[kfId]++; 215 | } else { 216 | covisibilities[kfId] = 1; 217 | } 218 | } 219 | } 220 | 221 | std::vector res; 222 | for (const auto &kfIdObs : covisibilities) { 223 | KfId kfId = kfIdObs.first; 224 | int covis = kfIdObs.second; 225 | if (kfId != id && covis >= minCovisibilities) { 226 | res.push_back(kfIdObs.first); 227 | } 228 | } 229 | return res; 230 | } 231 | 232 | // Camera position computed from `poseCW`, in world coordinates. 233 | Eigen::Vector3d Keyframe::cameraCenter() const { 234 | return worldToCameraMatrixCameraCenter(poseCW); 235 | } 236 | 237 | Eigen::Vector3d Keyframe::origPoseCameraCenter() const { 238 | return worldToCameraMatrixCameraCenter(origPoseCW); 239 | } 240 | 241 | void Keyframe::getFeaturesAround(const Eigen::Vector2f &point, float r, std::vector &indices) { 242 | assert(shared->featureSearch); 243 | shared->featureSearch->getFeaturesAround(point.x(), point.y(), r, indices); 244 | } 245 | 246 | // Check angle, scale 247 | bool Keyframe::isInFrustum(const MapPoint &mp, float viewAngleLimitCos) const { 248 | Eigen::Vector2f reprojectionDummy = Eigen::Vector2f::Zero(); 249 | if (!reproject(mp.position, reprojectionDummy)) 250 | return false; 251 | 252 | Eigen::Vector3f mpToKf = (cameraCenter() - mp.position).cast(); 253 | float dist = mpToKf.norm(); 254 | if (dist < mp.minViewingDistance || mp.maxViewingDistance < dist) 255 | return false; 256 | 257 | float viewingAngleCos = mpToKf.normalized().dot(mp.norm); 258 | if (viewingAngleCos < viewAngleLimitCos) 259 | return false; 260 | 261 | return true; 262 | } 263 | 264 | bool Keyframe::reproject(const Vector3d &pointW, Vector2f &reprojected) const { 265 | Matrix3d rotCW = poseCW.topLeftCorner<3,3>(); 266 | Vector3d transCW = poseCW.block<3,1>(0,3); 267 | float unused = 0; 268 | Eigen::Vector2d pointD = Eigen::Vector2d::Zero(); 269 | const bool visible = reprojectToImage(*shared->camera, rotCW, transCW, pointW, pointD, unused); 270 | reprojected << pointD.x(), pointD.y(); 271 | return visible; 272 | } 273 | 274 | void Keyframe::addObservation(MpId mapPointId, KpId keyPointId) { 275 | assert(mapPoints[keyPointId.v].v == -1); 276 | mapPoints[keyPointId.v] = mapPointId; 277 | } 278 | 279 | void Keyframe::eraseObservation(MpId mapPointId) { 280 | const auto it = std::find(mapPoints.begin(), mapPoints.end(), mapPointId); 281 | assert(it != mapPoints.end() && "MapPoint not observed in keyframe"); 282 | 283 | (*it).v = -1; 284 | KpId keyPointId(std::distance(mapPoints.begin(), it)); 285 | if (keyPointToTrackId.count(keyPointId)) { 286 | keyPointToTrackId.erase(keyPointId); 287 | } 288 | } 289 | 290 | template void vectorToStringstream(std::stringstream* ss, std::vector v) { 291 | for(size_t i = 0; i < v.size(); ++i) { 292 | if (i != 0) { 293 | *ss << ","; 294 | } 295 | *ss << v[i]; 296 | } 297 | } 298 | 299 | void keyPointVectorToStringstream(std::stringstream* ss, KeyPointVector v) { 300 | for(size_t i = 0; i < v.size(); ++i) { 301 | if (i != 0) { 302 | *ss << ","; 303 | } 304 | *ss << "(" << v[i].pt.x << "," << v[i].pt.y << ")"; 305 | } 306 | } 307 | 308 | std::string Keyframe::toString() { 309 | std::stringstream ss; 310 | ss << "keyframe(" << std::to_string(id.v) << ") ["; 311 | ss << "previousKfId=" << std::to_string(previousKfId.v); 312 | ss << "nextKfId=" << std::to_string(nextKfId.v); 313 | ss << ", points=" << std::to_string(shared->keyPoints.size()); 314 | ss << ", poseCW=" << util::eigenToString(poseCW); 315 | ss << ", origPoseCW=" << util::eigenToString(origPoseCW); 316 | ss << ", camera=" << shared->camera->serialize(); 317 | ss << ", keyPoints="; 318 | keyPointVectorToStringstream(&ss, shared->keyPoints); 319 | ss << ", bowFeatureVec="; 320 | for (auto feature : shared->bowFeatureVec) { 321 | ss << "(nodeId=" << std::to_string(feature.first) << ", "; 322 | vectorToStringstream(&ss, feature.second); 323 | ss << ")"; 324 | } 325 | ss << "]"; 326 | // TODO: Fields missing text representation 327 | // std::unordered_map keyPointToTrackId; 328 | // std::vector mapPoints; 329 | // DBoW2::BowVector bowVec; 330 | // DBoW2::FeatureVector bowFeatureVec; 331 | return ss.str(); 332 | } 333 | 334 | const cv::Vec3b &Keyframe::getKeyPointColor(KpId kpId) const { 335 | if (shared->colors.empty()) return BLACK; 336 | return shared->colors.at(kpId.v); 337 | } 338 | 339 | // implementations copied from openvslam/camera/perspective.cc 340 | bool reprojectToImage( 341 | const tracker::Camera &camera, 342 | const Eigen::Matrix3d& rot_cw, 343 | const Eigen::Vector3d& trans_cw, 344 | const Eigen::Vector3d& pos_w, 345 | Eigen::Vector2d& reproj, 346 | float& x_right) 347 | { 348 | const Eigen::Vector3d pos_c = rot_cw * pos_w + trans_cw; 349 | x_right = 0.0; 350 | if (!camera.rayToPixel(pos_c, reproj)) return false; 351 | if (!camera.isValidPixel(reproj)) return false; 352 | 353 | // TODO: stereo stuff, currently unused 354 | x_right = reproj(0); //reproj(0) - focal_x_baseline_ * z_inv; 355 | return true; 356 | } 357 | 358 | bool reprojectToBearing( 359 | const tracker::Camera &camera, 360 | const Eigen::Matrix3d& rot_cw, 361 | const Eigen::Vector3d& trans_cw, 362 | const Eigen::Vector3d& pos_w, 363 | Eigen::Vector3d& reproj) 364 | { 365 | // convert to camera-coordinates 366 | reproj = rot_cw * pos_w + trans_cw; 367 | 368 | Eigen::Vector2d pix; 369 | // check if the point is within camera FOV and reproject 370 | if (!camera.rayToPixel(rot_cw * pos_w + trans_cw, pix)) 371 | return false; 372 | 373 | // check if the point is visible 374 | if (!camera.isValidPixel(pix)) return false; 375 | 376 | return camera.pixelToRay(pix, reproj); 377 | } 378 | 379 | void asciiKeyframes(const std::function status, const MapDB &mapDB, int len) { 380 | if (mapDB.keyframes.empty()) return; 381 | KfId lastId = mapDB.keyframes.rbegin()->first; 382 | 383 | std::string kfStatus(len, ' '); 384 | int lastRev = 0; 385 | for (int ind = 0, rev = len - 1; lastId.v - ind >= 0 && rev >= 0; ++ind) { 386 | KfId kfId(lastId.v - ind); // TODO: broken 387 | // Do not draw empty space for non-existing keyframes. 388 | if (!mapDB.keyframes.count(kfId)) continue; 389 | 390 | kfStatus[rev] = status(kfId); 391 | lastRev = rev; 392 | rev--; 393 | } 394 | // Set first KF like this because the actual KfId(0) KF may be removed. 395 | if (lastRev > 0 && kfStatus[lastRev] == ' ') kfStatus[lastRev] = '0'; 396 | std::cout << kfStatus << std::endl; 397 | } 398 | 399 | namespace { 400 | cv::Vec3b getPixel(const cv::Mat &img, cv::Point2f pt) { 401 | switch (img.channels()) { 402 | case 1: { 403 | const auto b = img.at(pt); 404 | return cv::Vec3b(b, b, b); 405 | } 406 | case 4: { // RGBA 407 | const auto c = img.at(pt); 408 | return cv::Vec3b(c[2], c[1], c[0]); // BGR -> RGB 409 | } 410 | case 3: { 411 | const auto c = img.at(pt); 412 | return cv::Vec3b(c[2], c[1], c[0]); // BGR -> RGB 413 | } 414 | default: 415 | assert(false && "invalid number of channels"); 416 | } 417 | return cv::Vec3b(0, 0, 0); 418 | } 419 | 420 | void setStereoPointCloud(const MapperInput &mapperInput, KeyframeShared &kfShared) { 421 | if (mapperInput.frame->hasStereoPointCloud()) { 422 | kfShared.stereoPointCloud = std::make_shared(); 423 | *kfShared.stereoPointCloud = mapperInput.frame->getStereoPointCloud(); 424 | 425 | const cv::Mat &img = mapperInput.colorFrame; 426 | if (!img.empty()) { 427 | kfShared.stereoPointCloudColor = std::make_shared>(); 428 | for (const Eigen::Vector3f &pCam : *kfShared.stereoPointCloud) { 429 | cv::Vec3b color = slam::BLACK; 430 | Eigen::Vector2d pix; 431 | if (kfShared.camera->rayToPixel(pCam.cast(), pix)) { 432 | int x = int(pix.x()), y = int(pix.y()); 433 | if (x >= 0 && y >= 0 && x < img.cols && y < img.rows) { 434 | color = getPixel(img, cv::Point2f(x, y)); 435 | } 436 | } 437 | kfShared.stereoPointCloudColor->push_back(color); 438 | } 439 | } 440 | } 441 | } 442 | } 443 | 444 | } // namespace slam 445 | -------------------------------------------------------------------------------- /keyframe.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_KEYFRAME_HPP 2 | #define SLAM_KEYFRAME_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "feature_search.hpp" 15 | #include "id.hpp" 16 | #include "bow_index.hpp" 17 | #include "../api/slam.hpp" 18 | #include "key_point.hpp" 19 | #include "../tracker/camera.hpp" 20 | 21 | namespace slam { 22 | 23 | inline Eigen::Vector3d worldToCameraMatrixCameraCenter(const Eigen::Matrix4d &poseCW) { 24 | return -poseCW.topLeftCorner<3, 3>().transpose() * poseCW.block<3, 1>(0, 3); 25 | } 26 | 27 | class MapPoint; 28 | class MapDB; 29 | struct OrbExtractor; 30 | 31 | struct MapperInput { 32 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 33 | 34 | std::shared_ptr frame; 35 | std::vector trackerFeatures; 36 | std::vector poseTrail; 37 | double t; 38 | 39 | // for debuggging & nicer visualization. Can be empty 40 | cv::Mat colorFrame; 41 | 42 | MapperInput(); 43 | ~MapperInput(); 44 | }; 45 | 46 | /** 47 | * Keyframe information that can be shared between copies, accessed from 48 | * multiple threads 49 | */ 50 | struct KeyframeShared { 51 | std::shared_ptr camera; 52 | KeyPointVector keyPoints; 53 | std::unique_ptr featureSearch; // For speeding up feature search. 54 | 55 | // debugging / visu 56 | cv::Mat imgDbg; 57 | std::vector colors; 58 | using StereoPointCloud = std::vector>; 59 | std::shared_ptr stereoPointCloud; 60 | std::shared_ptr> stereoPointCloudColor; 61 | 62 | // The bag-of-words representation of the image. 63 | // Size() of this map is about the same as number of keypoints, because typically 64 | // the vocabulary has much more words than there are keypoints (10^6 >> 10^4), so 65 | // most keypoints will be assigned to a unique word. 66 | // * std::map = std::map 67 | DBoW2::BowVector bowVec; 68 | 69 | // List of keypoint ids for each node on a specific level of the vocabulary tree. 70 | // The higher the level, the larger and fewer the nodes (buckets) become. 71 | // Typically the level is set so that there are about 100 nodes. 72 | // Called "direct index" in the DBoW paper. 73 | // * std::map>, where NodeId = unsigned int. 74 | DBoW2::FeatureVector bowFeatureVec; 75 | 76 | // make a copy, assuming most of the fields have not been populated yet 77 | // (assert fails if this used too late) 78 | std::unique_ptr clone() const; 79 | 80 | template 81 | void save(Archive &ar) const { 82 | std::string cameraModel = camera->serialize(); 83 | ar( 84 | cameraModel, 85 | keyPoints, 86 | // featureSearch, // Computed after deserialization 87 | // imgDbg, // Not required 88 | colors, 89 | stereoPointCloud 90 | // bowVec, // Computed after deserialization. 91 | // bowFeatureVec, // Computed after deserialization. 92 | ); 93 | } 94 | 95 | template 96 | void load(Archive &ar) { 97 | std::string cameraModel; 98 | ar( 99 | cameraModel, 100 | keyPoints, 101 | colors, 102 | stereoPointCloud 103 | ); 104 | camera = tracker::Camera::deserialize(cameraModel); 105 | } 106 | }; 107 | 108 | class Keyframe { 109 | public: 110 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 111 | 112 | Keyframe(const MapperInput &mapperInput); 113 | 114 | Keyframe(const Keyframe &kf); // Copy constructor 115 | 116 | Keyframe(); 117 | 118 | bool hasFeatureDescriptors() const { return hasFullFeatures; } 119 | 120 | void addFullFeatures(const MapperInput &mapperInput, OrbExtractor &orb); 121 | void addTrackerFeatures(const MapperInput &mapperInput); 122 | 123 | void addObservation(MpId mapPointId, KpId keyPointId); 124 | 125 | void eraseObservation(MpId mapPointId); 126 | 127 | /** 128 | * Compute median depth of mapPoints observed in keyframe 129 | * 130 | * @return median depth, if no triangulated MapPoint%s are observed return #defaultDepth 131 | */ 132 | float computeMedianDepth(const MapDB &mapDB, float defaultDepth = 2.0); 133 | 134 | bool isInFrustum(const MapPoint &mp, float viewAngleLimitCos = 0.5) const; 135 | 136 | bool reproject(const Eigen::Vector3d &point, Eigen::Vector2f &reprojected) const; 137 | 138 | Eigen::Vector3d cameraCenter() const; 139 | Eigen::Vector3d origPoseCameraCenter() const; 140 | inline Eigen::Matrix3d cameraToWorldRotation() const { 141 | return poseCW.topLeftCorner<3, 3>().transpose(); 142 | } 143 | 144 | void getFeaturesAround(const Eigen::Vector2f &point, float r, std::vector &output); 145 | 146 | std::vector getNeighbors( 147 | const MapDB &mapDB, 148 | int minCovisibilities = 1, 149 | bool triangulatedOnly = false 150 | ) const; 151 | 152 | // for visualizations 153 | const cv::Vec3b &getKeyPointColor(KpId kp) const; 154 | 155 | // Immutable properties that are shared among all copies of the keyframe 156 | std::shared_ptr shared; 157 | 158 | KfId id; 159 | KfId previousKfId; 160 | KfId nextKfId; 161 | 162 | std::map keyPointToTrackId; 163 | 164 | // These are indexed by keypoint id value. 165 | std::vector mapPoints; 166 | std::vector keyPointDepth; 167 | 168 | // "CW" means world-to-camera, coming from notation like `p_C = T_CW * p_W`. 169 | // The main pose of the keyframe that is used for most computations and initializing 170 | // KF positions in non-linear optimization. 171 | Eigen::Matrix4d poseCW; 172 | 173 | // Pose computed by odometry with no (or minimal) influence from SLAM. It is used 174 | // mainly to setup priors between subsequent keyframes in non-linear optimization. 175 | Eigen::Matrix4d origPoseCW; 176 | 177 | // Uncertainty matrix for position & rotation 178 | Eigen::Matrix uncertainty; 179 | 180 | double t; 181 | 182 | // True if ORB features have been computed, false if only 183 | // odometry-based info is present 184 | bool hasFullFeatures; 185 | 186 | // For debugging 187 | std::string toString(); 188 | 189 | template 190 | void serialize(Archive &ar) { 191 | ar( 192 | shared, 193 | id, 194 | previousKfId, 195 | nextKfId, 196 | keyPointToTrackId, 197 | mapPoints, 198 | keyPointDepth, 199 | poseCW, 200 | origPoseCW, 201 | uncertainty, 202 | t, 203 | hasFullFeatures 204 | ); 205 | } 206 | }; 207 | 208 | // OpenVSLAM helper functions 209 | bool reprojectToImage( 210 | const tracker::Camera &camera, 211 | const Eigen::Matrix3d& rot_cw, 212 | const Eigen::Vector3d& trans_cw, 213 | const Eigen::Vector3d& pos_w, 214 | Eigen::Vector2d& reproj, 215 | float& x_right); 216 | 217 | bool reprojectToBearing( 218 | const tracker::Camera &camera, 219 | const Eigen::Matrix3d& rot_cw, 220 | const Eigen::Vector3d& trans_cw, 221 | const Eigen::Vector3d& pos_w, 222 | Eigen::Vector3d& reproj); 223 | 224 | /** 225 | * An ASCII visualization of keyframe statuses. 226 | * 227 | * @param status Function that tells what character should be printed for given keyframe id. 228 | * @param mapDB 229 | * @param len Width of the visualization in terminal characters. 230 | */ 231 | void asciiKeyframes(const std::function status, const MapDB &mapDB, int len); 232 | 233 | } // namespace slam 234 | #endif //SLAM_KEYFRAME_HPP 235 | -------------------------------------------------------------------------------- /keyframe_matcher.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_KEYFRAME_MATCHER_HPP 2 | #define SLAM_KEYFRAME_MATCHER_HPP 3 | 4 | #include "keyframe.hpp" 5 | #include "static_settings.hpp" 6 | #include "viewer_data_publisher.hpp" 7 | 8 | namespace slam { 9 | 10 | constexpr unsigned int HAMMING_DIST_THR_LOW = 50; 11 | constexpr unsigned int HAMMING_DIST_THR_HIGH = 100; 12 | constexpr unsigned int MAX_HAMMING_DIST = 256; 13 | 14 | /** 15 | * Compares descriptors of two keyframes, finding the best match for each feature 16 | * of the first keyframe in the second keyframe. 17 | * 18 | * The output is vector with the length of number of keypoints in `kf1`. Each element 19 | * is a keypoint index of `kf2`, or -1 if no match was found. 20 | * 21 | * Based on openvslam::match::bow_tree::match_frame_and_keyframe. 22 | * 23 | * Utilizes DBow index tree to only compare descriptors that are similar to each other. Should 24 | * be about the same functionality as cv::BFMatcher::knnMatch(....., 2) and selecting only 25 | * matches where `match1.distance < lowe_ratio * match2.distance` 26 | * 27 | * @param kf1 first keyframe 28 | * @param kf2 second keyframe 29 | * @param mapDB1 the map first keyframe is part of 30 | * @param mapDB2 the map second keyframe is part of 31 | * @param matchedMapPoints the output 32 | */ 33 | unsigned int matchForLoopClosures( 34 | const Keyframe &kf1, 35 | const Keyframe &kf2, 36 | const MapDB &mapDB1, 37 | const MapDB &mapDB2, 38 | std::vector &matchedMapPoints, 39 | const odometry::ParametersSlam ¶meters 40 | ); 41 | 42 | /** 43 | * Based on openvslam::match::robust::match_for_triangulation. 44 | * 45 | * Match features not associated with `slam::MapPoint`s between two keyframes. Checks epipolar constraint 46 | * and possibly orientation of features. 47 | * 48 | * @param kf1 first keyframe 49 | * @param kf2 second keyframe 50 | * @param checkOrientation return only matches where the orientation of features matches 51 | * @return matches between keyframes 52 | */ 53 | std::vector> matchForTriangulationDBoW(Keyframe &kf1, Keyframe &kf2, const StaticSettings &settings); 54 | 55 | /** 56 | * For a (fresh) keyframe, projects the given map points and adds observations for close matches 57 | * for those keypoints that have no associated map points (MpId == -1). 58 | */ 59 | int searchByProjection( 60 | Keyframe &kf, 61 | const std::vector &mps, 62 | MapDB &mapDB, 63 | ViewerDataPublisher *dataPublisher, 64 | const float threshold, 65 | const StaticSettings &settings 66 | ); 67 | 68 | /** 69 | * Takes a collection of map points, projects them on the given keyframe and looks for close matches 70 | * to existing keypoints/map points and merges them. 71 | */ 72 | template 73 | unsigned int replaceDuplication( 74 | Keyframe &kf, 75 | const T &mapPoints, 76 | const float margin, 77 | MapDB &mapDB, 78 | const StaticSettings &settings); 79 | 80 | /** 81 | * Match MapPoints between #kf1 and #kf2 using the provided Sim3 transformation 82 | * 83 | * @param matches Vector with current matches. Updated with new matches found by projection. 84 | */ 85 | void matchMapPointsSim3( 86 | Keyframe &kf1, 87 | Keyframe &kf2, 88 | const Eigen::Matrix4d &transform12, 89 | MapDB &mapDB, 90 | std::vector> &matches, 91 | const StaticSettings &settings); 92 | 93 | } // namespace slam 94 | 95 | #endif //SLAM_KEYFRAME_MATCHER_HPP 96 | -------------------------------------------------------------------------------- /loop_closer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_LOOP_CLOSER_HPP 2 | #define SLAM_LOOP_CLOSER_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "../commandline/command_queue.hpp" // TODO: bad!!! 10 | #include "id.hpp" 11 | #include "static_settings.hpp" 12 | 13 | namespace slam { 14 | 15 | class Keyframe; 16 | class MapDB; 17 | class BowIndex; 18 | class ViewerDataPublisher; 19 | 20 | using Atlas = std::vector; 21 | 22 | // Different stages of loop processing, used for visualizations. 23 | enum class LoopStage { 24 | BOW_MATCH, 25 | QUICK_TESTS, 26 | MAP_POINT_MATCHES, 27 | ACCEPTED, 28 | RELOCATION_MAP_POINT_MATCHES, 29 | RELOCATION_MAP_POINT_RANSAC, 30 | }; 31 | 32 | struct LoopClosureEdge { 33 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 34 | 35 | KfId kfId1; 36 | KfId kfId2; 37 | Eigen::Matrix4d poseDiff; 38 | 39 | template 40 | void serialize(Archive &ar) { 41 | ar(kfId1, kfId2, poseDiff); 42 | } 43 | }; 44 | 45 | class LoopCloser { 46 | public: 47 | static std::unique_ptr create( 48 | const StaticSettings &settings, 49 | BowIndex &bowIndex, 50 | MapDB &mapDB, 51 | const Atlas &atlas 52 | ); 53 | 54 | virtual ~LoopCloser() = default; 55 | 56 | virtual bool tryLoopClosure( 57 | Keyframe ¤tKf, 58 | const std::vector &adjacentKfIds 59 | ) = 0; 60 | 61 | virtual void setViewerDataPublisher(ViewerDataPublisher *dataPublisher) = 0; 62 | 63 | virtual void setCommandQueue(CommandQueue *commands) = 0; 64 | }; 65 | 66 | } // slam 67 | 68 | #endif // SLAM_LOOP_CLOSER_HPP 69 | -------------------------------------------------------------------------------- /loop_closer_stats.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_LOOP_CLOSER_STATS_H_ 2 | #define SLAM_LOOP_CLOSER_STATS_H_ 3 | 4 | namespace slam { 5 | namespace { 6 | 7 | class LoopCloserStats { 8 | public: 9 | enum class Loop { 10 | // Applied loop closures, up to 1 per frame. Computed automatically from OK updates. 11 | DONE, 12 | // All accepted loop closures. 13 | OK, 14 | // The failure types. 15 | TOO_CLOSE_TIME, 16 | UNNECESSARY_EARLY, 17 | EARLY_MAP_IGNORED, 18 | TOO_FEW_FEATURE_MATCHES, 19 | RANSAC_FAILED, 20 | UNNECESSARY, 21 | TOO_LARGE_POSITION_DRIFT, 22 | TOO_LARGE_ANGLE_DRIFT, 23 | UNKNOWN, 24 | // Marker for iteration. 25 | LAST 26 | }; 27 | 28 | private: 29 | const int last = static_cast(Loop::LAST); 30 | bool enabled; 31 | bool applyLoopClosures; 32 | int loopCount = 0; 33 | int totalLoopCount = 0; 34 | bool didLoopClosure = false; 35 | 36 | std::map loops; 37 | std::map totalLoops; 38 | 39 | public: 40 | LoopCloserStats(const odometry::ParametersSlam ¶meters) : 41 | enabled(parameters.printLoopCloserStats), 42 | applyLoopClosures(parameters.applyLoopClosures) 43 | { 44 | if (!enabled) return; 45 | for (int i = 0; i < last; ++i) { 46 | Loop l = static_cast(i); 47 | loops.emplace(l, 0); 48 | totalLoops.emplace(l, 0); 49 | } 50 | } 51 | 52 | void newLoop() { 53 | if (!enabled) return; 54 | ++loopCount; 55 | } 56 | 57 | void update(Loop loop) { 58 | if (!enabled) return; 59 | ++loops.at(loop); 60 | if (loop == Loop::OK) didLoopClosure = true; 61 | } 62 | 63 | void finishFrame() { 64 | if (!enabled) return; 65 | 66 | if (didLoopClosure && applyLoopClosures) { 67 | loops.at(Loop::DONE) += 1; 68 | } 69 | didLoopClosure = false; 70 | 71 | totalLoopCount += loopCount; 72 | int count = 0; 73 | for (int i = 0; i < last; ++i) { 74 | Loop t = static_cast(i); 75 | totalLoops.at(t) += loops.at(t); 76 | count += loops.at(t); 77 | } 78 | // Should be zero, but won't be if the loop over loop closure candidates does `continue;` 79 | // without telling us about the loop. 80 | int unknown = loopCount - count > 0 ? loopCount - count : 0; 81 | loops.at(Loop::UNKNOWN) += unknown; 82 | totalLoops.at(Loop::UNKNOWN) += unknown; 83 | 84 | const char *names[12] = { 85 | "done ", 86 | "ok ", 87 | "too close time ", 88 | "unnecessary early ", 89 | "early map ignored ", 90 | "too few features ", 91 | "ransac failed ", 92 | "unnecessary ", 93 | "too large pos drift", 94 | "too large ang drift", 95 | "unknown ", 96 | "TOTAL ", 97 | }; 98 | log_info(""); 99 | log_info("TYPE \tNUM\tTOTAL"); 100 | int sum = 0; 101 | int totalSum = 0; 102 | for (int i = 0; i < last; ++i) { 103 | Loop t = static_cast(i); 104 | sum += loops.at(t); 105 | totalSum += totalLoops.at(t); 106 | log_info("%s\t%d\t%d", names[i], loops.at(t), totalLoops.at(t)); 107 | } 108 | log_info("%s\t%d\t%d", names[last], sum, totalSum); 109 | 110 | loopCount = 0; 111 | for (int i = 0; i < last; ++i) { 112 | Loop t = static_cast(i); 113 | loops.at(t) = 0; 114 | } 115 | } 116 | }; 117 | 118 | } // anonymous namespace 119 | } // namespace slam 120 | 121 | #endif // SLAM_LOOP_CLOSER_STATS_H_ 122 | -------------------------------------------------------------------------------- /loop_ransac.cpp: -------------------------------------------------------------------------------- 1 | #include "loop_ransac.hpp" 2 | #include "map_point.hpp" 3 | #include "openvslam/random_array.h" 4 | #include "../util/logging.hpp" 5 | 6 | namespace slam { 7 | 8 | LoopRansac::LoopRansac( 9 | const Keyframe &kf1, 10 | const Keyframe &kf2, 11 | const std::vector> &matches, 12 | const MapDB &mapDB1, 13 | const MapDB &mapDB2, 14 | const StaticSettings &settings 15 | ) : camera1(*kf1.shared->camera), camera2(*kf2.shared->camera), settings(settings) { 16 | 17 | const auto size = matches.size(); 18 | commonPtsInKeyframe1.reserve(size); 19 | commonPtsInKeyframe2.reserve(size); 20 | chiSqSigmaSq1.reserve(size); 21 | chiSqSigmaSq2.reserve(size); 22 | visibleSame1.reserve(size); 23 | visibleSame2.reserve(size); 24 | 25 | matchCount = matches.size(); 26 | 27 | // For p = 0.01 28 | constexpr float CHI_SQ_2D = 9.21034; 29 | 30 | for (const auto &match : matches) { 31 | const MapPoint &mp1 = mapDB1.mapPoints.at(match.first); 32 | const MapPoint &mp2 = mapDB2.mapPoints.at(match.second); 33 | 34 | commonPtsInKeyframe1.emplace_back(Eigen::Isometry3d(kf1.poseCW) * mp1.position); 35 | commonPtsInKeyframe2.emplace_back(Eigen::Isometry3d(kf2.poseCW) * mp2.position); 36 | 37 | int octave1 = kf1.shared->keyPoints.at(mp1.observations.at(kf1.id).v).octave; 38 | int octave2 = kf2.shared->keyPoints.at(mp2.observations.at(kf2.id).v).octave; 39 | chiSqSigmaSq1.push_back(CHI_SQ_2D * settings.levelSigmaSq.at(octave1)); 40 | chiSqSigmaSq2.push_back(CHI_SQ_2D * settings.levelSigmaSq.at(octave2)); 41 | } 42 | 43 | reprojected1 = reproject_to_same_image(commonPtsInKeyframe1, camera1, visibleSame1); 44 | reprojected2 = reproject_to_same_image(commonPtsInKeyframe2, camera2, visibleSame2); 45 | } 46 | 47 | void LoopRansac::ransacSolve(const unsigned int max_num_iter, DoF solveType) { 48 | solutionOk = false; 49 | bestInlierCount = 0; 50 | const unsigned minInlierCount = settings.parameters.slam.loopClosureRansacMinInliers; 51 | 52 | if (matchCount < 3 || matchCount < minInlierCount) { 53 | return; 54 | } 55 | 56 | // Variables used in RANSAC loop 57 | Eigen::Matrix3d R12Ransac; 58 | Eigen::Vector3d t12Ransac; 59 | float s12Ransac; 60 | Eigen::Matrix3d R21Ransac; 61 | Eigen::Vector3d t21Ransac; 62 | float s21Ransac; 63 | 64 | std::vector inliers; 65 | 66 | // RANSAC loop 67 | for (unsigned i = 0; i < max_num_iter; ++i) { 68 | 69 | // Randomly sample 3 points 70 | Eigen::Matrix3d pts_1, pts_2; 71 | const auto random_indices = openvslam::util::create_random_array(3, 0, static_cast(matchCount - 1)); 72 | for (unsigned int i = 0; i < 3; ++i) { 73 | pts_1.block(0, i, 3, 1) = commonPtsInKeyframe1.at(random_indices.at(i)); 74 | pts_2.block(0, i, 3, 1) = commonPtsInKeyframe2.at(random_indices.at(i)); 75 | } 76 | 77 | if (solveType == DoF::ZROT) { 78 | computeRotZ(pts_1, pts_2, R21Ransac, t21Ransac, s21Ransac); 79 | } else if (solveType == DoF::SIM3) { 80 | computeSim3(pts_1, pts_2, R21Ransac, t21Ransac, s21Ransac); 81 | } else { 82 | assert(false && "Unknown solveType"); 83 | } 84 | 85 | if (settings.parameters.slam.loopClosureRansacFixScale) 86 | s21Ransac = 1; 87 | 88 | s12Ransac = 1 / s21Ransac; 89 | 90 | R12Ransac = R21Ransac.transpose(); 91 | t12Ransac = -s12Ransac * R12Ransac * t21Ransac; 92 | 93 | 94 | unsigned int num_inliers = count_inliers(R12Ransac, t12Ransac, s12Ransac, 95 | R21Ransac, t21Ransac, s21Ransac, 96 | inliers); 97 | 98 | if (bestInlierCount < num_inliers) { 99 | bestInlierCount = num_inliers; 100 | bestR12 = R12Ransac; 101 | bestT12 = t12Ransac; 102 | bestScale12 = s12Ransac; 103 | bestInliers = inliers; 104 | } 105 | } 106 | 107 | if (bestInlierCount >= minInlierCount) { 108 | solutionOk = true; 109 | } 110 | } 111 | 112 | void computeSim3( 113 | const Eigen::Matrix3d &pts_1, 114 | const Eigen::Matrix3d &pts_2, 115 | Eigen::Matrix3d &rot_21, 116 | Eigen::Vector3d &trans_21, 117 | float &scale_21 118 | ) { 119 | // Based on "Closed-form solution of absolute orientation using unit quaternions" 120 | // http://people.csail.mit.edu/bkph/papers/Absolute_Orientation.pdf 121 | 122 | // 各点集合のcentroidを求める 123 | const Eigen::Vector3d centroid_1 = pts_1.rowwise().mean(); 124 | const Eigen::Vector3d centroid_2 = pts_2.rowwise().mean(); 125 | 126 | // 分布の中心をcentroidに動かす 127 | Eigen::Matrix3d ave_pts_1 = pts_1; 128 | ave_pts_1.colwise() -= centroid_1; 129 | Eigen::Matrix3d ave_pts_2 = pts_2; 130 | ave_pts_2.colwise() -= centroid_2; 131 | 132 | // 4.A Matrix of Sums of Products 133 | 134 | // 行列Mを求める 135 | const Eigen::Matrix3d M = ave_pts_1 * ave_pts_2.transpose(); 136 | 137 | // 行列Nを求める 138 | const double& Sxx = M(0, 0); 139 | const double& Syx = M(1, 0); 140 | const double& Szx = M(2, 0); 141 | const double& Sxy = M(0, 1); 142 | const double& Syy = M(1, 1); 143 | const double& Szy = M(2, 1); 144 | const double& Sxz = M(0, 2); 145 | const double& Syz = M(1, 2); 146 | const double& Szz = M(2, 2); 147 | Eigen::Matrix4d N; 148 | N << (Sxx + Syy + Szz), (Syz - Szy), (Szx - Sxz), (Sxy - Syx), 149 | (Syz - Szy), (Sxx - Syy - Szz), (Sxy + Syx), (Szx + Sxz), 150 | (Szx - Sxz), (Sxy + Syx), (-Sxx + Syy - Szz), (Syz + Szy), 151 | (Sxy - Syx), (Szx + Sxz), (Syz + Szy), (-Sxx - Syy + Szz); 152 | 153 | // 4.B Eigenvector Maximizes Matrix Product 154 | 155 | // Nを固有値分解する 156 | Eigen::EigenSolver eigensolver(N); 157 | 158 | // 最大固有値を探す 159 | const auto& eigenvalues = eigensolver.eigenvalues(); 160 | int max_idx = -1; 161 | double max_eigenvalue = -INFINITY; 162 | for (int idx = 0; idx < 4; ++idx) { 163 | if (max_eigenvalue <= eigenvalues(idx, 0).real()) { 164 | max_eigenvalue = eigenvalues(idx, 0).real(); 165 | max_idx = idx; 166 | } 167 | } 168 | const auto max_eigenvector = eigensolver.eigenvectors().col(max_idx); 169 | 170 | // 複素数なので実数のみ取り出す 171 | Eigen::Vector4d eigenvector; 172 | eigenvector << max_eigenvector(0, 0).real(), max_eigenvector(1, 0).real(), max_eigenvector(2, 0).real(), max_eigenvector(3, 0).real(); 173 | eigenvector.normalize(); 174 | 175 | // unit quaternionにする 176 | Eigen::Quaterniond q_rot_21(eigenvector(0), eigenvector(1), eigenvector(2), eigenvector(3)); 177 | 178 | // 回転行列に変換 179 | rot_21 = q_rot_21.normalized().toRotationMatrix(); 180 | 181 | // 2.D Finding the Scale 182 | 183 | // averaged points 1をpoints 2の座標系に変換(回転のみ) 184 | const Eigen::Matrix3d ave_pts_1_in_2 = rot_21 * ave_pts_1; 185 | 186 | // 分母 187 | const double denom = ave_pts_1.squaredNorm(); 188 | // 分子 189 | const double numer = ave_pts_2.cwiseProduct(ave_pts_1_in_2).sum(); 190 | // スケール 191 | scale_21 = numer / denom; 192 | 193 | // 2.C Centroids of the Sets of Measurements 194 | 195 | trans_21 = centroid_2 - scale_21 * rot_21 * centroid_1; 196 | } 197 | 198 | unsigned int LoopRansac::count_inliers(const Eigen::Matrix3d& rot_12, const Eigen::Vector3d& trans_12, const float scale_12, 199 | const Eigen::Matrix3d& rot_21, const Eigen::Vector3d& trans_21, const float scale_21, 200 | std::vector& inliers) { 201 | unsigned int num_inliers = 0; 202 | inliers.resize(matchCount, false); 203 | std::vector visible1; 204 | std::vector visible2; 205 | 206 | vecVector2d reprojected_1_in_cam_2 = 207 | reproject_to_other_image(commonPtsInKeyframe1, rot_21, trans_21, scale_21, camera2, visible1); 208 | 209 | vecVector2d reprojected_2_in_cam_1 = 210 | reproject_to_other_image(commonPtsInKeyframe2, rot_12, trans_12, scale_12, camera1, visible2); 211 | 212 | for (unsigned int i = 0; i < matchCount; ++i) { 213 | if (!visible1[i] || !visible2[i] || !visibleSame1[i] || !visibleSame2[i]) { 214 | continue; 215 | } 216 | const Eigen::Vector2d dist_in_2 = (reprojected_1_in_cam_2.at(i) - reprojected2.at(i)); 217 | const Eigen::Vector2d dist_in_1 = (reprojected_2_in_cam_1.at(i) - reprojected1.at(i)); 218 | 219 | const double error_in_2 = dist_in_2.dot(dist_in_2); 220 | const double error_in_1 = dist_in_1.dot(dist_in_1); 221 | 222 | if (error_in_2 < chiSqSigmaSq2.at(i) && error_in_1 < chiSqSigmaSq1.at(i)) { 223 | inliers.at(i) = true; 224 | ++num_inliers; 225 | } 226 | } 227 | 228 | return num_inliers; 229 | } 230 | 231 | vecVector2d 232 | LoopRansac::reproject_to_other_image( 233 | const vecVector3d &lm_coords_in_cam_1, 234 | const Eigen::Matrix3d &rot_21, 235 | const Eigen::Vector3d &trans_21, 236 | const float scale_21, 237 | const tracker::Camera &camera, 238 | std::vector &visible 239 | ) { 240 | visible.clear(); 241 | vecVector2d reprojected_in_cam_2; 242 | reprojected_in_cam_2.reserve(lm_coords_in_cam_1.size()); 243 | 244 | for (const auto &lm_coord_in_cam_1 : lm_coords_in_cam_1) { 245 | Eigen::Vector2d reproj_in_cam_2 = Eigen::Vector2d::Zero(); 246 | float x_right = 0.0; 247 | bool v = reprojectToImage(camera, scale_21 * rot_21, trans_21, lm_coord_in_cam_1, reproj_in_cam_2, x_right); 248 | 249 | visible.push_back(v); 250 | reprojected_in_cam_2.push_back(reproj_in_cam_2); 251 | } 252 | 253 | return reprojected_in_cam_2; 254 | } 255 | 256 | vecVector2d 257 | LoopRansac::reproject_to_same_image( 258 | const vecVector3d &lm_coords_in_cam, 259 | const tracker::Camera &camera, 260 | std::vector &visible 261 | ) { 262 | visible.clear(); 263 | vecVector2d reprojected; 264 | reprojected.reserve(lm_coords_in_cam.size()); 265 | 266 | for (const auto &lm_coord_in_cam : lm_coords_in_cam) { 267 | Eigen::Vector2d reproj = Eigen::Vector2d::Zero(); 268 | float x_right = 0.0; 269 | bool v = reprojectToImage(camera, Eigen::Matrix3d::Identity(), Eigen::Vector3d::Zero(), lm_coord_in_cam, reproj, x_right); 270 | 271 | visible.push_back(v); 272 | reprojected.push_back(reproj); 273 | } 274 | return reprojected; 275 | } 276 | 277 | void computeRotZ( 278 | const Eigen::Matrix3d &pts1, 279 | const Eigen::Matrix3d &pts2, 280 | Eigen::Matrix3d &r21, 281 | Eigen::Vector3d &t21, 282 | float &s21 283 | ) { 284 | // Based on 5.A "Coplanar points" in 285 | // http://people.csail.mit.edu/bkph/papers/Absolute_Orientation.pdf 286 | 287 | const Eigen::Vector3d centroid1 = pts1.rowwise().mean(); 288 | const Eigen::Vector3d centroid2 = pts2.rowwise().mean(); 289 | 290 | Eigen::Matrix3d centered1 = pts1.colwise() - centroid1; 291 | Eigen::Matrix3d centered2 = pts2.colwise() - centroid2; 292 | 293 | double C = (centered1.topRows<2>().cwiseProduct(centered2.topRows<2>())).sum(); 294 | 295 | double S = 0; 296 | for (int i = 0; i < 3; i++) { 297 | auto p1 = centered1.col(i).head<2>(); 298 | auto p2 = centered2.col(i).head<2>(); 299 | S += p1.x() * p2.y() - p1.y() * p2.x(); 300 | } 301 | 302 | double cosTheta = C / std::sqrt(C*C + S*S); 303 | double sinTheta = S / std::sqrt(C*C + S*S); 304 | 305 | r21.setZero(); 306 | r21(2,2) = 1; 307 | r21.topLeftCorner<2,2>() << cosTheta, -sinTheta, sinTheta, cosTheta; 308 | 309 | s21 = (centered2.cwiseProduct(r21 * centered1)).sum() / centered1.squaredNorm(); 310 | 311 | t21 = centroid2 - s21 * r21 * centroid1; 312 | } 313 | 314 | } // namespace slam 315 | -------------------------------------------------------------------------------- /loop_ransac.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_LOOPRANSAC_HPP 2 | #define SLAM_LOOPRANSAC_HPP 3 | 4 | #include "keyframe.hpp" 5 | #include "mapdb.hpp" 6 | #include "static_settings.hpp" 7 | 8 | #include "../odometry/util.hpp" 9 | 10 | namespace slam { 11 | 12 | class LoopRansac { 13 | public: 14 | LoopRansac( 15 | const Keyframe &kf1, 16 | const Keyframe &kf2, 17 | const std::vector> &matches, 18 | const MapDB &mapDB1, 19 | const MapDB &mapDB2, 20 | const StaticSettings &settings 21 | ); 22 | 23 | enum class DoF { 24 | SIM3, ZROT 25 | }; 26 | 27 | void ransacSolve(const unsigned int max_num_iter, DoF dof = DoF::ZROT); 28 | 29 | unsigned int count_inliers( 30 | const Eigen::Matrix3d &rot_12, 31 | const Eigen::Vector3d &trans_12, 32 | const float scale_12, 33 | const Eigen::Matrix3d &rot_21, 34 | const Eigen::Vector3d &trans_21, 35 | const float scale_21, 36 | std::vector &inliers 37 | ); 38 | 39 | vecVector2d reproject_to_same_image( 40 | const vecVector3d &lm_coords_in_cam, 41 | const tracker::Camera &camera, 42 | std::vector &visible 43 | ); 44 | 45 | vecVector2d reproject_to_other_image( 46 | const vecVector3d &lm_coords_in_cam_1, 47 | const Eigen::Matrix3d &rot_21, 48 | const Eigen::Vector3d &trans_21, 49 | const float scale_21, 50 | const tracker::Camera &camera, 51 | std::vector &visible 52 | ); 53 | 54 | const tracker::Camera &camera1; 55 | const tracker::Camera &camera2; 56 | 57 | // Local coordinates in kf1 and kf2 of matched mapPoints 58 | vecVector3d commonPtsInKeyframe1; 59 | vecVector3d commonPtsInKeyframe2; 60 | 61 | std::vector visibleSame1; 62 | std::vector visibleSame2; 63 | 64 | // Chi-square values with two degrees of freedom for reprojection errors 65 | std::vector chiSqSigmaSq1; 66 | std::vector chiSqSigmaSq2; 67 | 68 | unsigned int matchCount = 0; 69 | 70 | //! solution is valid or not 71 | bool solutionOk = false; 72 | //! most reliable rotation from keyframe 2 to keyframe 1 73 | Eigen::Matrix3d bestR12; 74 | //! most reliable translation from keyframe 2 to keyframe 1 75 | Eigen::Vector3d bestT12; 76 | //! most reliable scale from keyframe 2 to keyframe 1 77 | float bestScale12; 78 | std::vector bestInliers; 79 | unsigned int bestInlierCount; 80 | 81 | // Image coordinates of reporjected mapPoints 82 | vecVector2d reprojected1; 83 | vecVector2d reprojected2; 84 | 85 | const StaticSettings &settings; 86 | }; 87 | 88 | /** 89 | * Compute Sim3 transformation between two sets of points. 90 | * pts2 = s21 * r21 * pts1 + t21 91 | */ 92 | void computeSim3( 93 | const Eigen::Matrix3d &pts_1, 94 | const Eigen::Matrix3d &pts_2, 95 | Eigen::Matrix3d &rot_21, 96 | Eigen::Vector3d &trans_21, 97 | float &scale_21); 98 | 99 | /** 100 | * Compute transformation between two sets of points. 101 | * Transformation limited to only: rotation around z-axis + translation 102 | * pts2 = r21 * pts1 + t21 103 | */ 104 | void computeRotZ( 105 | const Eigen::Matrix3d &pts1W, 106 | const Eigen::Matrix3d &pts2W, 107 | Eigen::Matrix3d &r21, 108 | Eigen::Vector3d &t21, 109 | float &s21); 110 | 111 | } // namespace slam 112 | 113 | #endif // SLAM_LOOPRANSAC_HPP 114 | -------------------------------------------------------------------------------- /map_point.cpp: -------------------------------------------------------------------------------- 1 | #include "map_point.hpp" 2 | #include "mapdb.hpp" 3 | #include "keyframe.hpp" 4 | 5 | #include "openvslam/match_base.h" 6 | 7 | using Eigen::Vector3d; 8 | 9 | namespace slam { 10 | 11 | MapPoint::MapPoint(MpId id, KfId keyframeId, KpId keyPointId) : 12 | id(id), referenceKeyframe(keyframeId) { 13 | assert(keyframeId.v != -1 && "Cannot create MapPoint without reference keyframe"); 14 | addObservation(keyframeId, keyPointId); 15 | position = Eigen::Vector3d::Zero(); 16 | norm = Eigen::Vector3f::Zero(); 17 | } 18 | 19 | // For cereal. 20 | MapPoint::MapPoint() {} 21 | 22 | MapPoint::MapPoint(const MapPoint mapPoint, const std::set &activeKeyframes) { 23 | id = mapPoint.id; 24 | trackId = mapPoint.trackId; 25 | status = mapPoint.status; 26 | position = mapPoint.position; 27 | norm = mapPoint.norm; 28 | minViewingDistance = mapPoint.minViewingDistance; 29 | maxViewingDistance = mapPoint.maxViewingDistance; 30 | descriptor = mapPoint.descriptor; 31 | 32 | std::copy_if(mapPoint.observations.begin(), mapPoint.observations.end(), std::inserter(observations, observations.begin()), 33 | [activeKeyframes](std::pair const& pair) { 34 | return activeKeyframes.count(pair.first); 35 | } 36 | ); 37 | 38 | if (activeKeyframes.count(mapPoint.referenceKeyframe)) { 39 | referenceKeyframe = mapPoint.referenceKeyframe; 40 | } else { 41 | referenceKeyframe = observations.begin()->first; // TODO: Just gets first observation, could do better here? 42 | } 43 | } 44 | 45 | static KfId getFirstOrLastObservation(const MapPoint &mp, bool first = true) { 46 | assert(!mp.observations.empty() && "Every MapPoint should have at least one observation"); 47 | 48 | using P = decltype(mp.observations)::value_type; 49 | 50 | return std::min_element( 51 | mp.observations.begin(), 52 | mp.observations.end(), 53 | [first](const P &p1, const P &p2) { return (p1.first.v < p2.first.v) == first; } 54 | )->first; 55 | } 56 | 57 | KfId MapPoint::getFirstObservation() const { 58 | return getFirstOrLastObservation(*this, true); 59 | } 60 | 61 | KfId MapPoint::getLastObservation() const { 62 | return getFirstOrLastObservation(*this, false); 63 | } 64 | 65 | void MapPoint::addObservation(KfId keyframeId, KpId keyPointId) { 66 | assert(!observations.count(keyframeId)); 67 | observations.emplace(keyframeId, keyPointId); 68 | } 69 | 70 | void MapPoint::eraseObservation(KfId keyframeId) { 71 | assert(observations.count(keyframeId)); 72 | observations.erase(keyframeId); 73 | } 74 | 75 | void MapPoint::updateDescriptor(const MapDB &mapDB) { 76 | std::vector descriptors; 77 | descriptors.reserve(observations.size()); 78 | for (const auto& obs : observations) { 79 | const auto &kf = *mapDB.keyframes.at(obs.first); 80 | if (kf.hasFeatureDescriptors()) { 81 | const auto &kp = kf.shared->keyPoints.at(obs.second.v); 82 | descriptors.push_back(kp.descriptor); 83 | } 84 | } 85 | 86 | if (descriptors.empty()) return; 87 | 88 | // Get median of Hamming distance 89 | // Calculate all the Hamming distances between every pair of the features 90 | const auto num_descs = descriptors.size(); 91 | std::vector> hamm_dists(num_descs, std::vector(num_descs)); 92 | for (unsigned int i = 0; i < num_descs; ++i) { 93 | hamm_dists.at(i).at(i) = 0; 94 | for (unsigned int j = i + 1; j < num_descs; ++j) { 95 | const auto dist = openvslam::match::compute_descriptor_distance_32(descriptors.at(i).data(), descriptors.at(j).data()); 96 | hamm_dists.at(i).at(j) = dist; 97 | hamm_dists.at(j).at(i) = dist; 98 | } 99 | } 100 | 101 | // Get the nearest value to median 102 | unsigned int best_median_dist = openvslam::match::MAX_HAMMING_DIST; 103 | unsigned int best_idx = 0; 104 | for (unsigned idx = 0; idx < num_descs; ++idx) { 105 | std::vector partial_hamm_dists(hamm_dists.at(idx).begin(), hamm_dists.at(idx).begin() + num_descs); 106 | std::sort(partial_hamm_dists.begin(), partial_hamm_dists.end()); 107 | const auto median_dist = partial_hamm_dists.at(static_cast(0.5 * (num_descs - 1))); 108 | 109 | if (median_dist < best_median_dist) { 110 | best_median_dist = median_dist; 111 | best_idx = idx; 112 | } 113 | } 114 | 115 | descriptor = descriptors.at(best_idx); 116 | } 117 | 118 | void MapPoint::replaceWith(MapDB &mapDB, MapPoint &otherMp) { 119 | assert(this->id.v != -1); 120 | assert(mapDB.mapPoints.count(this->id)); 121 | assert(otherMp.id.v != -1); 122 | assert(mapDB.mapPoints.count(otherMp.id)); 123 | 124 | if (otherMp.id == this->id) { 125 | return; 126 | } 127 | 128 | if (trackId.v != -1) { 129 | if (otherMp.trackId.v == -1) { 130 | mapDB.trackIdToMapPoint.at(trackId) = otherMp.id; 131 | otherMp.trackId = trackId; 132 | } else { 133 | mapDB.trackIdToMapPoint.erase(trackId); 134 | } 135 | } 136 | 137 | for (const auto& kfIdKeypointId : observations) { 138 | KfId kfId = kfIdKeypointId.first; 139 | KpId keyPointId = kfIdKeypointId.second; 140 | Keyframe &kf = *mapDB.keyframes.at(kfId); 141 | 142 | if (kf.keyPointToTrackId.count(keyPointId)) { 143 | kf.keyPointToTrackId.erase(keyPointId); 144 | } 145 | 146 | if (!otherMp.observations.count(kfId)) { 147 | kf.mapPoints[keyPointId.v] = otherMp.id; 148 | otherMp.addObservation(kfId, keyPointId); 149 | } else { 150 | kf.mapPoints[keyPointId.v] = MpId(-1); 151 | } 152 | } 153 | 154 | status = MapPointStatus::BAD; 155 | mapDB.mapPoints.erase(this->id); 156 | } 157 | 158 | void MapPoint::updateDistanceAndNorm(const MapDB &mapDB, const StaticSettings &settings) { 159 | Vector3d normSum = Vector3d::Zero(); 160 | for (const auto &kfIdKp : observations) { 161 | const auto &kf = *mapDB.keyframes.at(kfIdKp.first); 162 | normSum += (kf.cameraCenter() - position).normalized(); 163 | } 164 | norm = normSum.cast() / observations.size(); 165 | 166 | const auto &firstKf = *mapDB.keyframes.at(getFirstObservation()); 167 | const float dist = (firstKf.cameraCenter() - position).norm(); 168 | const auto &kp = firstKf.shared->keyPoints[observations.at(firstKf.id).v]; 169 | 170 | maxViewingDistance = dist * settings.scaleFactors[kp.octave]; 171 | minViewingDistance = dist * settings.scaleFactors[kp.octave] / settings.scaleFactors.back(); 172 | } 173 | 174 | int MapPoint::predictScaleLevel(float dist, const StaticSettings &settings) const { 175 | const float ratio = maxViewingDistance / dist; 176 | 177 | int scale = std::ceil(std::log(ratio)/std::log(settings.parameters.slam.orbScaleFactor)); 178 | 179 | return std::min( 180 | std::max(scale, 0), 181 | static_cast(settings.scaleFactors.size() - 1) 182 | ); 183 | } 184 | 185 | } // namespace slam 186 | -------------------------------------------------------------------------------- /map_point.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_MAP_POINT_HPP 2 | #define SLAM_MAP_POINT_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "id.hpp" 13 | #include "key_point.hpp" 14 | #include "static_settings.hpp" 15 | 16 | namespace slam { 17 | 18 | class Keyframe; 19 | class MapDB; 20 | 21 | enum class MapPointStatus { TRIANGULATED, NOT_TRIANGULATED, UNSURE, BAD }; 22 | 23 | class MapPoint { 24 | public: 25 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 26 | 27 | MapPoint(MpId id, KfId keyframeId, KpId keyPointId); 28 | MapPoint(); 29 | MapPoint(const MapPoint mapPoint, const std::set &activeKeyframes); // Copy MapPoint and remove links to inactive keyframes 30 | 31 | KfId getFirstObservation() const; 32 | KfId getLastObservation() const; 33 | 34 | void addObservation(KfId keyframeId, KpId keyPointId); 35 | 36 | void eraseObservation(KfId keyframeId); 37 | 38 | /** 39 | * Update mapPoint descriptor to the featuredescriptor closest to the median 40 | * of all featuredescriptors. 41 | * 42 | * Implementation from `openvslam::landmark::compute_descriptor()` 43 | */ 44 | void updateDescriptor(const MapDB &mapDB); 45 | 46 | void updateDistanceAndNorm(const MapDB &mapDB, const StaticSettings &settings); 47 | 48 | void replaceWith(MapDB &mapDB, MapPoint &otherMp); 49 | 50 | int predictScaleLevel(float dist, const StaticSettings &settings) const; 51 | 52 | /** Unique ID of this map point */ 53 | MpId id; 54 | 55 | /** Which odometry track ID does this correspond to, -1 if none */ 56 | TrackId trackId = TrackId(-1); 57 | 58 | MapPointStatus status = MapPointStatus::NOT_TRIANGULATED; 59 | Eigen::Vector3d position; 60 | 61 | // Viewing direction 62 | Eigen::Vector3f norm; 63 | float minViewingDistance = 0; 64 | float maxViewingDistance = 30; 65 | 66 | KeyPoint::Descriptor descriptor; 67 | 68 | /** 69 | * Observations of this map_point (keyframeId, keyPoint Id in keyframe) 70 | */ 71 | std::map observations; 72 | 73 | KfId referenceKeyframe; 74 | 75 | cv::Vec3b color; // for visualization / debugging purposes 76 | 77 | template 78 | void serialize(Archive &ar) { 79 | ar( 80 | id, 81 | trackId, 82 | status, 83 | position, 84 | norm, 85 | minViewingDistance, 86 | maxViewingDistance, 87 | descriptor, 88 | observations, 89 | referenceKeyframe, 90 | color 91 | ); 92 | } 93 | }; 94 | 95 | } // namespace slam 96 | 97 | #endif //SLAM_MAP_POINT_HPP 98 | -------------------------------------------------------------------------------- /mapdb.cpp: -------------------------------------------------------------------------------- 1 | #include "mapdb.hpp" 2 | #include "../util/logging.hpp" 3 | #include "../odometry/util.hpp" 4 | 5 | namespace { 6 | Eigen::Matrix4d removeZAxisTiltComparedToReference(const Eigen::Matrix4d &poseCW, const Eigen::Matrix4d &refPose) { 7 | const Eigen::Matrix3d poseRot = refPose.topLeftCorner<3,3>(); 8 | return odometry::util::replacePoseOrientationKeepPosition( 9 | poseCW, 10 | poseRot * odometry::util::removeRotationMatrixZTilt( 11 | poseRot.inverse() * 12 | poseCW.topLeftCorner<3,3>())); 13 | } 14 | 15 | const slam::Pose *findInPoseTrail(const std::vector &poseTrail, slam::KfId targetKeyframeId) { 16 | for (const auto &pose : poseTrail) { 17 | if (targetKeyframeId == slam::KfId(pose.frameNumber)) { 18 | return &pose; 19 | } 20 | } 21 | return nullptr; 22 | } 23 | 24 | /*void debugPrintPoseTrail(const std::vector &poseTrail) { 25 | std::string poseTrailStr = ""; 26 | for (const auto &p : poseTrail) { 27 | poseTrailStr += std::to_string(p.frameNumber) + ", "; 28 | }; 29 | log_debug("current pose trail: %s", poseTrailStr.c_str()); 30 | }*/ 31 | } 32 | 33 | namespace slam { 34 | 35 | std::shared_ptr MapDB::insertNewKeyframeCandidate( 36 | std::unique_ptr keyframeUniq, 37 | bool keyframeDecision, 38 | const std::vector &poseTrail, 39 | const odometry::ParametersSlam ¶meters) 40 | { 41 | Eigen::Matrix4d pose; 42 | std::shared_ptr keyframe = std::move(keyframeUniq); 43 | Keyframe *previousKf = latestKeyframe(); 44 | // debugPrintPoseTrail(poseTrail); 45 | 46 | if (prevPoseKfId.v < 0) { 47 | pose = keyframe->origPoseCW; 48 | // Tip: Use this to change direction of SLAM map and debug transform issues. 49 | // pose.topLeftCorner<3, 3>() = pose.topLeftCorner<3, 3>() * Eigen::AngleAxisd(0.5 * M_PI, Vector3d::UnitZ()); 50 | // pose.block<3, 1>(0, 3) << 4, 1, -2; 51 | } 52 | else { 53 | assert(previousKf); 54 | // this "two-step" mechanism may be required to handle loop closures correctly 55 | Eigen::Matrix4d refPose = prevPose; 56 | if (parameters.useVariableLengthDeltas) { 57 | refPose = prevPoseToPrevKeyframeDelta * previousKf->poseCW; 58 | } 59 | 60 | Eigen::Matrix4d poseTilted, refPrevPose; 61 | refPrevPose = prevInputPose; 62 | if (parameters.useOdometryPoseTrailDelta) { 63 | // note that this can fail if in certain cases with long 64 | // stationarity and no new keyframes unless special care is taken 65 | const auto *prevPoseInPoseTrail = findInPoseTrail(poseTrail, prevPoseKfId); 66 | if (prevPoseInPoseTrail == nullptr) { 67 | log_debug("keyframe %d not found in pose trail", prevPoseKfId.v); 68 | } else { 69 | assert(KfId(prevPoseInPoseTrail->frameNumber) != keyframe->id); 70 | refPrevPose = prevPoseInPoseTrail->pose; 71 | } 72 | } 73 | poseTilted = keyframe->origPoseCW * refPrevPose.inverse() * refPose; 74 | 75 | if (parameters.removeOdometryTransformZAxisTilt) { 76 | pose = removeZAxisTiltComparedToReference(poseTilted, keyframe->origPoseCW); 77 | } else { 78 | pose = poseTilted; 79 | } 80 | } 81 | 82 | keyframe->poseCW = pose; 83 | 84 | if (previousKf) { 85 | keyframe->previousKfId = previousKf->id; 86 | previousKf->nextKfId = keyframe->id; 87 | } 88 | 89 | lastKfCandidateId = keyframe->id; 90 | if (keyframeDecision) { 91 | lastKfId = keyframe->id; 92 | } 93 | 94 | keyframes[keyframe->id] = keyframe; 95 | return keyframe; 96 | } 97 | 98 | MapDB::MapDB(const MapDB &mapDB) { 99 | for (const auto &kfP : mapDB.keyframes) { 100 | keyframes.emplace(kfP.second->id, std::make_unique(*kfP.second)); 101 | } 102 | 103 | mapPoints = mapDB.mapPoints; 104 | trackIdToMapPoint = mapDB.trackIdToMapPoint; 105 | loopClosureEdges = mapDB.loopClosureEdges; 106 | 107 | prevPose = mapDB.prevPose; 108 | prevInputPose = mapDB.prevInputPose; 109 | discardedUncertainty = mapDB.discardedUncertainty; 110 | 111 | nextMp = mapDB.nextMp; 112 | lastKfCandidateId = mapDB.lastKfCandidateId; 113 | lastKfId = mapDB.lastKfId; 114 | } 115 | 116 | MapDB::MapDB(const MapDB &mapDB, const std::set &activeKeyframes) { 117 | std::set activeMapPoints; 118 | 119 | for (auto &kfId : activeKeyframes) { 120 | auto &origKf = mapDB.keyframes.at(kfId); 121 | auto kf = ( 122 | *keyframes.emplace(origKf->id, std::make_unique(*origKf)).first 123 | ).second.get(); 124 | if (kf->nextKfId.v >= 0 && !activeKeyframes.count(kf->nextKfId)) { 125 | kf->nextKfId = KfId(-1); 126 | } 127 | if (kf->previousKfId.v >= 0 && !activeKeyframes.count(kf->previousKfId)) { 128 | kf->previousKfId = KfId(-1); 129 | } 130 | for (auto mpId : kf->mapPoints) { 131 | if (mpId.v >= 0) { 132 | activeMapPoints.insert(mpId); 133 | } 134 | } 135 | } 136 | 137 | for (auto &mpId : activeMapPoints) { 138 | mapPoints.emplace(mpId, MapPoint(mapDB.mapPoints.at(mpId), activeKeyframes)); 139 | } 140 | 141 | std::copy_if(mapDB.trackIdToMapPoint.begin(), mapDB.trackIdToMapPoint.end(), std::inserter(trackIdToMapPoint, trackIdToMapPoint.begin()), 142 | [activeMapPoints](std::pair const& pair) { 143 | return activeMapPoints.count(pair.second); 144 | } 145 | ); 146 | 147 | // loopClosureEdges = mapDB.loopClosureEdges; // TODO: Required? 148 | // visualizedLoopClosureCandidates = mapDB.visualizedLoopClosureCandidates; // TODO: Required? 149 | 150 | prevPose = mapDB.prevPose; 151 | prevInputPose = mapDB.prevInputPose; 152 | prevPoseToPrevKeyframeDelta = mapDB.prevPoseToPrevKeyframeDelta; 153 | discardedUncertainty = mapDB.discardedUncertainty; 154 | 155 | nextMp = mapDB.nextMp; 156 | prevPoseKfId = mapDB.prevPoseKfId; 157 | lastKfCandidateId = mapDB.lastKfCandidateId; 158 | lastKfId = mapDB.lastKfId; 159 | } 160 | 161 | std::map::iterator 162 | MapDB::removeMapPoint(const MapPoint &mapPoint) { 163 | for (const auto &it : mapPoint.observations) { 164 | keyframes.at(it.first)->eraseObservation(mapPoint.id); 165 | } 166 | 167 | if (mapPoint.trackId.v != -1) { 168 | assert(trackIdToMapPoint.at(mapPoint.trackId) == mapPoint.id); 169 | trackIdToMapPoint.erase(mapPoint.trackId); 170 | } 171 | 172 | // in the rare case the keyframe would be empty of observations, do nothing 173 | return mapPoints.erase(mapPoints.find(mapPoint.id)); 174 | } 175 | 176 | MpId MapDB::nextMpId() { 177 | nextMp++; 178 | return MpId(nextMp - 1); 179 | } 180 | 181 | std::pair MapDB::maxIds() const { 182 | KfId maxKfId(-1); 183 | MpId maxMpId(-1); 184 | for (const auto &it : keyframes) { 185 | if (it.first > maxKfId) maxKfId = it.first; 186 | } 187 | for (const auto &it : mapPoints) { 188 | if (it.first > maxMpId) maxMpId = it.first; 189 | } 190 | return { maxKfId, maxMpId }; 191 | } 192 | 193 | void MapDB::mergeMapPoints(MpId mpId1, MpId mpId2) { 194 | assert(mpId1 != mpId2); 195 | const MpId first = mpId1 < mpId2 ? mpId1 : mpId2; 196 | const MpId last = mpId1 < mpId2 ? mpId2 : mpId1; 197 | const auto &firstMpIt = mapPoints.find(first); 198 | const auto &lastMpIt = mapPoints.find(last); 199 | assert(firstMpIt != mapPoints.end()); 200 | assert(lastMpIt != mapPoints.end()); 201 | TrackId lastTrackId = lastMpIt->second.trackId; 202 | for (auto &it : keyframes) { 203 | Keyframe &keyframe = *it.second; 204 | assert(keyframe.mapPoints.size() == keyframe.shared->keyPoints.size()); 205 | for (size_t i = 0; i < keyframe.mapPoints.size(); ++i) { 206 | KpId kpId(i); 207 | if (keyframe.mapPoints[i] == last) { 208 | keyframe.mapPoints[i] = first; 209 | if (keyframe.keyPointToTrackId.count(kpId)) { 210 | keyframe.keyPointToTrackId.at(kpId) = firstMpIt->second.trackId; 211 | } 212 | firstMpIt->second.observations.at(keyframe.id) = kpId; 213 | break; 214 | } 215 | } 216 | } 217 | 218 | if (lastTrackId.v != -1) { 219 | trackIdToMapPoint.erase(trackIdToMapPoint.find(lastTrackId)); 220 | } 221 | mapPoints.erase(lastMpIt); 222 | } 223 | 224 | Eigen::Matrix4d MapDB::poseDifference(KfId kfId1, KfId kfId2) const { 225 | assert(kfId1 <= kfId2); 226 | const Keyframe &kf1 = *keyframes.at(kfId1); 227 | const Keyframe &kf2 = *keyframes.at(kfId2); 228 | return kf1.origPoseCW * kf2.origPoseCW.inverse(); 229 | } 230 | 231 | void MapDB::updatePrevPose( 232 | const Keyframe ¤tKeyframe, 233 | bool keyframeDecision, 234 | const std::vector &poseTrail, 235 | const odometry::Parameters ¶meters) 236 | { 237 | if (!keyframeDecision && parameters.slam.useVariableLengthDeltas && findInPoseTrail(poseTrail, prevPoseKfId) == nullptr) { 238 | log_debug("prevPoseKfId %d lost in pose trail: must update", prevPoseKfId.v); 239 | // TODO: could rather try to update to an older pose isn the trail 240 | // TODO: such odometry non-keyframes should also be removed from the map 241 | keyframeDecision = true; 242 | } 243 | 244 | if (!keyframeDecision && parameters.slam.useVariableLengthDeltas) { 245 | // TODO: this is overly complex 246 | const int nextKfCandidateAge = currentKeyframe.id.v - prevPoseKfId.v + parameters.slam.keyframeCandidateInterval; 247 | assert(nextKfCandidateAge > 0); 248 | // TODO: +1? (not so serious here) 249 | const int maxPoseTrailSize = parameters.odometry.cameraTrailLength - std::max(0, parameters.slam.delayIntervalMultiplier) * parameters.slam.keyframeCandidateInterval; 250 | // log_debug("next %d/%d", nextKfCandidateAge, maxPoseTrailSize); 251 | if (parameters.slam.useOdometryPoseTrailDelta && nextKfCandidateAge >= maxPoseTrailSize) { 252 | log_debug("storing prevPose of non-KF %d: max pose trail length will be reached", currentKeyframe.id.v); 253 | } else { 254 | // log_debug(" skipping non-keyframe %d as prev pose", currentKeyframe.id.v); 255 | return; 256 | } 257 | } 258 | 259 | // log_debug("storing keyframe %d as prev pose", currentKeyframe.id.v); 260 | prevPoseKfId = currentKeyframe.id; 261 | prevInputPose = currentKeyframe.origPoseCW; 262 | prevPose = currentKeyframe.poseCW; 263 | 264 | const auto *prevKf = latestKeyframe(); 265 | assert(prevKf); 266 | prevPoseToPrevKeyframeDelta = prevPose * prevKf->poseCW.inverse(); 267 | } 268 | 269 | const MapDB& getMapWithId(MapId mapId, const MapDB &mapDB, const Atlas &atlas) { 270 | if (mapId == CURRENT_MAP_ID) return mapDB; 271 | return atlas[mapId.v]; 272 | } 273 | 274 | } // namespace slam 275 | -------------------------------------------------------------------------------- /mapdb.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_MAPDB_HPP 2 | #define SLAM_MAPDB_HPP 3 | 4 | #include 5 | 6 | #include "id.hpp" 7 | #include "keyframe.hpp" 8 | #include "loop_closer.hpp" 9 | #include "map_point.hpp" 10 | #include "../api/slam_map_point_record.hpp" 11 | 12 | #include 13 | #include 14 | 15 | namespace slam { 16 | 17 | class MapDB { 18 | public: 19 | 20 | MapDB() : 21 | discardedUncertainty(Eigen::MatrixXd::Zero(3, 6)), 22 | prevPose(Eigen::Matrix4d::Identity()), 23 | prevInputPose(Eigen::Matrix4d::Identity()) {}; 24 | MapDB(const MapDB &mapDB); // Copy constructor 25 | MapDB(const MapDB &mapDB, const std::set &activeKeyframes); // Copy constructor that only copies given frames 26 | 27 | std::map> keyframes; 28 | std::map mapPoints; 29 | std::map trackIdToMapPoint; 30 | std::vector loopClosureEdges; 31 | 32 | Eigen::MatrixXd discardedUncertainty; 33 | 34 | double firstKfTimestamp = -1.0; 35 | 36 | std::shared_ptr insertNewKeyframeCandidate( 37 | std::unique_ptr keyframe, 38 | bool keyframeDecision, 39 | const std::vector &poseTrail, 40 | const odometry::ParametersSlam ¶meters); 41 | 42 | std::map::iterator removeMapPoint(const MapPoint &mapPoint); 43 | 44 | MpId nextMpId(); 45 | KfId lastKeyframeCandidateId() const { return lastKfCandidateId; } 46 | Keyframe *latestKeyframe() { 47 | if (lastKfId.v >= 0) { 48 | if (!keyframes.count(lastKfId)) return nullptr; 49 | return keyframes.at(lastKfId).get(); 50 | } 51 | return nullptr; 52 | } 53 | std::pair maxIds() const; 54 | 55 | void mergeMapPoints(MpId mpId1, MpId mpId2); 56 | 57 | Eigen::Matrix4d poseDifference(KfId kfId1, KfId kfId2) const; 58 | 59 | void updatePrevPose( 60 | const Keyframe ¤tKeyframe, 61 | bool keyframeDecision, 62 | const std::vector &poseTrail, 63 | const odometry::Parameters ¶meters); 64 | 65 | // Visualization stuff stored here for convenience. 66 | std::map loopStages; 67 | std::vector adjacentKfIds; 68 | std::map mapPointRecords; 69 | 70 | private: 71 | Eigen::Matrix4d prevPose; 72 | Eigen::Matrix4d prevInputPose; 73 | Eigen::Matrix4d prevPoseToPrevKeyframeDelta = Eigen::Matrix4d::Identity(); 74 | int nextMp = 0; 75 | // id of the frame corresponding to prevPose & prevInput pose. May no longer exist 76 | KfId prevPoseKfId = KfId(-1); 77 | // id of the last inserted, thing, which may no longer exist 78 | KfId lastKfCandidateId = KfId(-1); 79 | // lastest keyframe ID. Should exist 80 | KfId lastKfId = KfId(-1); 81 | 82 | public: 83 | template 84 | void serialize(Archive &ar) { 85 | ar( 86 | keyframes, 87 | mapPoints, 88 | trackIdToMapPoint, 89 | loopClosureEdges, 90 | prevPose, 91 | prevInputPose, 92 | discardedUncertainty, 93 | firstKfTimestamp, 94 | nextMp, 95 | lastKfCandidateId, 96 | lastKfId 97 | ); 98 | } 99 | }; 100 | 101 | using Atlas = std::vector; 102 | 103 | const MapDB& getMapWithId(MapId mapId, const MapDB &mapDB, const Atlas &atlas); 104 | 105 | } // namespace slam 106 | 107 | #endif // SLAM_MAPDB_HPP 108 | -------------------------------------------------------------------------------- /mapper.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_MAPPER_HPP 2 | #define SLAM_MAPPER_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include "../api/slam.hpp" 9 | #include "slam_implementation.hpp" 10 | #include "keyframe.hpp" 11 | #include "map_point.hpp" 12 | #include "id.hpp" 13 | 14 | namespace odometry { struct Parameters; } 15 | namespace slam { 16 | 17 | class Keyframe; 18 | class ViewerDataPublisher; 19 | 20 | class Mapper { 21 | public: 22 | static std::unique_ptr 23 | create(const odometry::Parameters ¶meters); 24 | 25 | virtual ~Mapper() = default; 26 | 27 | /** 28 | * @param mapperInput Data for constructing input keyframe 29 | * @param resultPose output: The SLAM-corrected pose 30 | * @param pointCloud output: The currently visible map points 31 | */ 32 | virtual void advance( 33 | std::shared_ptr mapperInput, 34 | Eigen::Matrix4d &resultPose, 35 | Slam::Result::PointCloud &pointCloud) = 0; 36 | 37 | virtual void connectDebugAPI(DebugAPI &debug) = 0; 38 | 39 | virtual bool end(const std::string &mapSavePath) = 0; 40 | }; 41 | 42 | } // namespace slam 43 | 44 | #endif //SLAM_MAPPER_HPP 45 | -------------------------------------------------------------------------------- /mapper_helpers.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_MAPPER_HELPERS_HPP 2 | #define SLAM_MAPPER_HELPERS_HPP 3 | 4 | #include 5 | #include 6 | 7 | #include "bow_index.hpp" 8 | #include "../api/slam.hpp" 9 | #include "map_point.hpp" 10 | #include "bundle_adjuster.hpp" 11 | #include "../odometry/util.hpp" 12 | #include "../util/util.hpp" 13 | #include "viewer_data_publisher.hpp" 14 | #include "static_settings.hpp" 15 | 16 | class CommandQueue; 17 | 18 | namespace slam { 19 | 20 | class ViewerDataPublisher; 21 | class LoopCloser; 22 | struct MapperInput; 23 | struct OrbExtractor; 24 | 25 | bool makeKeyframeDecision( 26 | const Keyframe ¤tKeyframe, 27 | const Keyframe *previousKeyframe, 28 | const std::vector ¤tTracks, 29 | const odometry::ParametersSlam ¶meters 30 | ); 31 | 32 | void matchTrackedFeatures( 33 | Keyframe ¤tKeyframe, 34 | MapDB &mapDB, 35 | const StaticSettings &settings 36 | ); 37 | 38 | /** 39 | * Compute a set of spatially nearby keyframes. This function attempts to improve over 40 | * the "neighbors" concept from ORBSLAM/OpenVSLAM, which considers only visual "neighborness". 41 | * This tends to leave holes in the keyframe sequences, which in some situations (local BA) is 42 | * especially bad for our method that has strong inertial information about successive keyframes. 43 | * 44 | * @param currentKeyframe The keyframe to search around, won't be included in return vector. 45 | * @param minCovisibilities Number of shared map point observations required to include a keyframe from a different "island". 46 | * @param maxKeyframes Return up to this many keyframes, dropping more distant ones. 47 | * @param mapDB 48 | * @param settings 49 | * @param visualize Produce visualizations from this call. 50 | * @return Vector of keyframes, sorted by ascending distance, not including the argument keyframe. 51 | */ 52 | std::vector computeAdjacentKeyframes( 53 | const Keyframe ¤tKeyframe, 54 | int minCovisibilities, 55 | int maxKeyframes, 56 | const MapDB &mapDB, 57 | const StaticSettings &settings, 58 | bool visualize = false 59 | ); 60 | 61 | void matchLocalMapPoints( 62 | Keyframe ¤tKeyframe, 63 | const std::vector &adjacentKfIds, 64 | MapDB &mapDB, 65 | const StaticSettings &settings, 66 | ViewerDataPublisher *dataPublisher 67 | ); 68 | 69 | void createNewMapPoints( 70 | Keyframe ¤tKeyframe, 71 | const std::vector &adjacentKfIds, 72 | MapDB &mapDB, 73 | const StaticSettings &settings, 74 | ViewerDataPublisher *dataPublisher 75 | ); 76 | 77 | void deduplicateMapPoints( 78 | Keyframe ¤tKeyframe, 79 | const std::vector &adjacentKfIds, 80 | MapDB &mapDB, 81 | const StaticSettings &settings 82 | ); 83 | 84 | void cullMapPoints( 85 | Keyframe ¤tKeyframe, 86 | MapDB &mapDB, 87 | const odometry::ParametersSlam ¶meters 88 | ); 89 | 90 | void cullKeyframes( 91 | const std::vector &adjacentKfIds, 92 | MapDB &mapDB, 93 | BowIndex &bowIndex, 94 | const odometry::ParametersSlam ¶meters 95 | ); 96 | 97 | void checkConsistency(const MapDB &mapDB); 98 | 99 | bool checkPositiveDepth( 100 | const Eigen::Vector3d &positionW, 101 | const Eigen::Matrix4d &poseCW 102 | ); 103 | 104 | bool checkTriangulationAngle(const vecVector3d &raysW, double minAngleDeg); 105 | 106 | int getFocalLength(const Keyframe &kf); 107 | 108 | bool checkReprojectionError( 109 | const Eigen::Vector3d& pos, 110 | const Keyframe &kf, 111 | const StaticSettings &settings, 112 | KpId kpId, 113 | float relativeReprojectionErrorThreshold 114 | ); 115 | 116 | void triangulateMapPoint( 117 | MapDB &mapDB, 118 | MapPoint &mapPoint, 119 | const StaticSettings &settings, 120 | TriangulationMethod method = TriangulationMethod::TME 121 | ); 122 | 123 | void triangulateMapPointFirstLastObs( 124 | MapDB &mapDB, 125 | MapPoint &mapPoint, 126 | const StaticSettings &settings 127 | ); 128 | 129 | void publishMapForViewer( 130 | ViewerDataPublisher &dataPublisher, 131 | const WorkspaceBA *workspaceBA, 132 | const MapDB &mapDB, 133 | const odometry::ParametersSlam ¶meters 134 | ); 135 | 136 | Eigen::MatrixXd odometryPriorStrengths( 137 | KfId kfId1, 138 | KfId kfId2, 139 | const odometry::ParametersSlam ¶meters, 140 | const slam::MapDB &mapDB 141 | ); 142 | 143 | MapDB loadMapDB( 144 | MapId mapId, 145 | BowIndex &bowIndex, 146 | const std::string &loadPath 147 | ); 148 | 149 | void addKeyframeFrontend( 150 | MapDB &mapDB, 151 | std::unique_ptr keyframePtr, 152 | bool keyFrameDecision, 153 | const MapperInput &mapperInput, 154 | const StaticSettings &settings, 155 | Eigen::Matrix4d &resultPose, 156 | Slam::Result::PointCloud *resultPointCloud 157 | ); 158 | 159 | KfId addKeyframeBackend( 160 | MapDB &mapDB, 161 | std::unique_ptr keyframePtr, 162 | bool keyFrameDecision, 163 | const MapperInput &mapperInput, 164 | const StaticSettings &settings, 165 | WorkspaceBA &workspaceBA, 166 | LoopCloser &loopCloser, 167 | OrbExtractor &orbExtractor, 168 | BowIndex &bowIndex, 169 | CommandQueue *commands, 170 | ViewerDataPublisher *dataPublisher, 171 | Eigen::Matrix4d &resultPose, 172 | Slam::Result::PointCloud *resultPointCloud); 173 | 174 | ViewerAtlasMap mapDBtoViewerAtlasMap(const MapDB &mapDB); 175 | 176 | } // namespace slam 177 | 178 | #endif //SLAM_MAPPER_HELPERS_HPP 179 | -------------------------------------------------------------------------------- /opencv_viewer_data_publisher.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "opencv_viewer_data_publisher.hpp" 3 | #include "../api/slam.hpp" 4 | #include "../slam/keyframe.hpp" 5 | #include "../slam/map_point.hpp" 6 | #include "../slam/mapdb.hpp" 7 | 8 | #include "../tracker/util.hpp" 9 | #include "../util/util.hpp" 10 | 11 | namespace slam { 12 | 13 | // local helpers 14 | namespace { 15 | 16 | const cv::Scalar triangulatedColor{0, 255, 255}; // yellow 17 | const cv::Scalar unsureColor{255,255,0}; // cyan 18 | const cv::Scalar trackedColor{255, 255, 255}; // white 19 | const cv::Scalar noMatchColor{255, 0, 0}; // blue 20 | const cv::Scalar orbColor{0, 128, 255}; // orange 21 | const cv::Scalar white{255, 255, 255}; 22 | 23 | void scaleImage(cv::Mat &m) { 24 | constexpr float maxLonger = 1960.0; 25 | float scale = maxLonger / static_cast(std::max(m.cols, m.rows)); 26 | if (scale < 1.0) { 27 | cv::resize(m, m, cv::Size(), scale, scale, cv::INTER_CUBIC); 28 | } 29 | } 30 | 31 | void drawText(cv::Mat &img, cv::Mat &imgWithText, std::string text) { 32 | cv::Size textSize = cv::getTextSize(text, cv::FONT_HERSHEY_PLAIN, 1.5, 1, 0); 33 | 34 | imgWithText.create(img.rows + textSize.height + 10, img.cols, img.type()); 35 | img.copyTo(imgWithText.rowRange(0, img.rows).colRange(0, img.cols)); 36 | imgWithText.rowRange(img.rows, imgWithText.rows) = cv::Scalar(0); 37 | 38 | cv::Point bottomLeft(5, imgWithText.rows - 5); 39 | cv::putText(imgWithText, text, bottomLeft, cv::FONT_HERSHEY_PLAIN, 1.5, white, 1, 8); 40 | } 41 | 42 | class PointRotator { 43 | public: 44 | PointRotator(double width, double height, int rotation) { 45 | switch (util::modulo(rotation, 4)) { 46 | case 0: 47 | R << 1, 0, 0, 1; 48 | t << 0, 0; 49 | break; 50 | case 1: 51 | R << 0, -1, 1, 0; 52 | t << height, 0; 53 | break; 54 | case 2: 55 | R << -1, 0, 0, -1; 56 | t << width, height; 57 | break; 58 | default: 59 | R << 0, 1, -1, 0; 60 | t << 0, width; 61 | break; 62 | } 63 | } 64 | 65 | Eigen::Vector2d rotate(const Eigen::Vector2d &p) { 66 | return R * p + t; 67 | } 68 | 69 | private: 70 | Eigen::Matrix2d R; 71 | Eigen::Vector2d t; 72 | }; 73 | 74 | void copyAsColor(const cv::Mat &frame, cv::Mat &target) { 75 | if (frame.channels() == 1) { 76 | cv::cvtColor(frame, target, cv::COLOR_GRAY2BGR); 77 | } else { 78 | frame.copyTo(target); 79 | } 80 | } 81 | 82 | void desaturate(cv::Mat &frame, cv::Mat work = {}) { 83 | // :) 84 | cv::cvtColor(frame, work, cv::COLOR_BGR2GRAY); 85 | cv::cvtColor(work, frame, cv::COLOR_GRAY2BGR); 86 | } 87 | 88 | } // namespace 89 | 90 | OpenCVViewerDataPublisher::OpenCVViewerDataPublisher(const cmd::ParametersSlam ¶meters) : 91 | ViewerDataPublisher(parameters) 92 | {} 93 | 94 | // * Orange dot: ORB keypoint with no associated map point. 95 | // * Blue dot: Associated map point, but it's not triangulated. 96 | // * Cyan square: "Unsure" or "bad" triangulation. 97 | // * Yellow square: Successful triangulation. 98 | // * White line: Difference between keypoint and map point projection. 99 | // * White circle: Reprojection failed. 100 | void OpenCVViewerDataPublisher::visualizeKeyframe( 101 | const MapDB &mapDB, 102 | const cv::Mat &frame, 103 | const Keyframe &kf 104 | ) { 105 | cv::Mat &img = tmpWindow; 106 | copyAsColor(frame, img); 107 | 108 | PointRotator r(img.cols, img.rows, rotation); 109 | tracker::util::rotateMatrixCW90(img, img, rotation); 110 | std::vector points; 111 | for (const auto &kp : kf.shared->keyPoints) { 112 | Eigen::Vector2d p = r.rotate(Eigen::Vector2d(kp.pt.x, kp.pt.y)); 113 | points.push_back(cv::Point2f(p(0), p(1))); 114 | } 115 | 116 | for (unsigned i = 0; i < kf.shared->keyPoints.size(); i++) { 117 | cv::Point2f center = points[i]; 118 | 119 | cv::circle(img, center, 2, orbColor, -1); 120 | 121 | const MpId mapPointId = kf.mapPoints[i]; 122 | if (mapPointId.v == -1) 123 | continue; 124 | 125 | auto &mapPoint = mapDB.mapPoints.at(mapPointId); 126 | if (mapPoint.status == MapPointStatus::NOT_TRIANGULATED) { 127 | cv::circle(img, center, 2, noMatchColor, -1); 128 | } 129 | else { 130 | float squareSize = 5; 131 | cv::Point2f pt_begin{center.x - squareSize, center.y - squareSize}; 132 | cv::Point2f pt_end{center.x + squareSize, center.y + squareSize}; 133 | 134 | cv::Scalar color = mapPoint.status == MapPointStatus::TRIANGULATED ? triangulatedColor : unsureColor; 135 | cv::rectangle(img, pt_begin, pt_end, color); 136 | cv::circle(img, center, 2, color, 1); 137 | 138 | Eigen::Vector2f reprojEigen; 139 | bool reprojectionOk = kf.reproject(mapPoint.position, reprojEigen); 140 | Eigen::Vector2d d = r.rotate(reprojEigen.cast()); 141 | 142 | cv::Point2f reprojection(d.x(), d.y()); 143 | cv::line(img, center, reprojection, trackedColor); 144 | cv::circle(img, reprojection, 2, trackedColor, 1); 145 | 146 | if (!reprojectionOk) { 147 | cv::circle(img, center, squareSize, trackedColor, 3); 148 | } 149 | } 150 | } 151 | 152 | std::stringstream ss; 153 | { 154 | std::lock_guard l(m); 155 | ss << "KFs: " << keyframes.size() << ", " 156 | << "MPs: " << mapPoints.size(); 157 | } 158 | 159 | drawText(img, tmpWindow2, ss.str()); 160 | 161 | scaleImage(tmpWindow2); 162 | 163 | std::lock_guard l(m); 164 | tmpWindow2.copyTo(keyframeWindow); 165 | } 166 | 167 | 168 | void OpenCVViewerDataPublisher::visualizeOrbs(const cv::Mat &frame, const Keyframe &kf) { 169 | cv::Mat &img = tmpWindow; 170 | copyAsColor(frame, img); 171 | desaturate(img, tmpWindow2); 172 | 173 | const cv::Scalar otherColor(0x50, 0x50, 0x50); 174 | const cv::Scalar mapPointColor(0, 0xa0, 0xa0); 175 | const cv::Scalar trackColor(0xff, 0, 0xff); 176 | const cv::Scalar trackMapPointColor(0x80, 0xff, 0xff); 177 | 178 | for (unsigned i = 0; i < kf.shared->keyPoints.size(); ++i) { 179 | const auto &kp = kf.shared->keyPoints.at(i); 180 | int radius = 2 + kp.octave*3; // TODO; 181 | 182 | cv::Scalar color; 183 | bool isTracked = kf.keyPointToTrackId.count(KpId(i)); 184 | bool hasMapPoint = kf.mapPoints.at(i).v >= 0; 185 | if (hasMapPoint && isTracked) { 186 | color = trackMapPointColor; 187 | } else if (hasMapPoint) { 188 | color = mapPointColor; 189 | } else if (isTracked) { 190 | color = trackColor; 191 | } else { 192 | color = otherColor; 193 | } 194 | cv::Point2f c(kp.pt.x, kp.pt.y); 195 | cv::circle(img, c, radius, color, 1); 196 | const double aRad = kp.angle / 180.0 * M_PI; 197 | cv::line(img, c, c + cv::Point2f(std::cos(aRad), std::sin(aRad))*radius, color, 1); 198 | } 199 | 200 | tracker::util::rotateMatrixCW90(img, tmpWindow2, rotation); 201 | scaleImage(tmpWindow2); 202 | 203 | std::lock_guard l(m); 204 | tmpWindow2.copyTo(otherWindow); 205 | } 206 | 207 | // * Orange dot: ORB keypoint for which no matching map point was found. 208 | // * Cyan circle: Projection of map point for which no matching ORB was found. 209 | // * White: ORB (dot) and its matching map point projection (circle). 210 | void OpenCVViewerDataPublisher::visualizeMapPointSearch( 211 | const cv::Mat &frame, 212 | const SearchedMapPointVector &searched, 213 | const Vector2dVector &mps, 214 | const Vector2dVector &kps 215 | ) { 216 | cv::Mat &img = tmpWindow; 217 | 218 | if (frame.channels() == 1) { 219 | cv::cvtColor(frame, img, cv::COLOR_GRAY2BGR); 220 | } else { 221 | frame.copyTo(img); 222 | } 223 | PointRotator r(img.cols, img.rows, rotation); 224 | tracker::util::rotateMatrixCW90(img, img, rotation); 225 | 226 | // Map points here contain also the matches, draw first 227 | // so they are covered by the match drawings. 228 | for (const Eigen::Vector2d &mp : mps) { 229 | Eigen::Vector2d d = r.rotate(mp); 230 | cv::Point2f p(d.x(), d.y()); 231 | cv::circle(img, p, 4, unsureColor); 232 | } 233 | 234 | for (const Eigen::Vector2d &kp : kps) { 235 | Eigen::Vector2d d = r.rotate(kp); 236 | cv::Point2f p(d.x(), d.y()); 237 | cv::circle(img, p, 2, orbColor); 238 | } 239 | 240 | for (const SearchedMapPoint &s : searched) { 241 | Eigen::Vector2d d = r.rotate(s.kp); 242 | cv::Point2f p0(d.x(), d.y()); 243 | 244 | d = r.rotate(s.mp); 245 | cv::Point2f p1(d.x(), d.y()); 246 | 247 | cv::line(img, p0, p1, trackedColor); 248 | cv::circle(img, p0, 2, trackedColor); 249 | cv::circle(img, p1, 4, trackedColor); 250 | } 251 | 252 | char s[100]; 253 | double percent = 100.0 * static_cast(searched.size()) / static_cast(mps.size()); 254 | snprintf(s, 100, "MPs: %zu, matches: %zu (%.0f%%), ORBs: %zu", mps.size(), searched.size(), percent, kps.size()); 255 | drawText(img, tmpWindow2, s); 256 | 257 | scaleImage(tmpWindow2); 258 | 259 | std::lock_guard l(m); 260 | tmpWindow2.copyTo(searchWindow); 261 | } 262 | 263 | void OpenCVViewerDataPublisher::showMatches( 264 | const Keyframe &kf1, 265 | const Keyframe &kf2, 266 | const std::vector> &matches, 267 | MatchType matchType 268 | ) { 269 | std::vector dmatches; 270 | for (const auto &match : matches) { 271 | dmatches.emplace_back(match.first.v, match.second.v, 1); 272 | } 273 | 274 | PointRotator r(kf1.shared->imgDbg.cols, kf1.shared->imgDbg.rows, rotation); 275 | std::vector keyPoints1; 276 | std::vector keyPoints2; 277 | constexpr int DUMMY = 100; 278 | for (const auto &kp : kf1.shared->keyPoints) { 279 | Eigen::Vector2d p = r.rotate(Eigen::Vector2d(kp.pt.x, kp.pt.y)); 280 | cv::KeyPoint nkp(cv::Point2f(kp.pt.x, kp.pt.y), DUMMY); 281 | nkp.pt = cv::Point2f(p(0), p(1)); 282 | keyPoints1.push_back(nkp); 283 | } 284 | for (const auto &kp : kf2.shared->keyPoints) { 285 | Eigen::Vector2d p = r.rotate(Eigen::Vector2d(kp.pt.x, kp.pt.y)); 286 | cv::KeyPoint nkp(cv::Point2f(kp.pt.x, kp.pt.y), DUMMY); 287 | nkp.pt = cv::Point2f(p(0), p(1)); 288 | keyPoints2.push_back(nkp); 289 | } 290 | 291 | tracker::util::rotateMatrixCW90(kf1.shared->imgDbg, tmpWindow, rotation); 292 | tracker::util::rotateMatrixCW90(kf2.shared->imgDbg, tmpWindow2, rotation); 293 | 294 | cv::Mat &outImg = tmpWindow3; 295 | cv::drawMatches( 296 | tmpWindow, 297 | keyPoints1, 298 | tmpWindow2, 299 | keyPoints2, 300 | dmatches, 301 | outImg 302 | ); 303 | 304 | scaleImage(outImg); 305 | 306 | std::lock_guard l(m); 307 | 308 | if (matchType == MatchType::MAPPER) 309 | outImg.copyTo(matchWindow); 310 | else 311 | outImg.copyTo(matchWindowLoop); 312 | } 313 | 314 | void OpenCVViewerDataPublisher::visualizeOther(const cv::Mat &mat) { 315 | tracker::util::rotateMatrixCW90(mat, tmpWindow, rotation); 316 | scaleImage(tmpWindow); 317 | tmpWindow.copyTo(otherWindow); 318 | } 319 | 320 | std::map OpenCVViewerDataPublisher::pollVisualizations() { 321 | std::lock_guard l(m); 322 | std::map result; 323 | if (!matchWindow.empty()) { 324 | result["ORB matches"] = matchWindow.clone(); 325 | matchWindow.resize(0); // clear without reallocating 326 | } 327 | if (!matchWindowLoop.empty()) { 328 | result["Loop ORB matches"] = matchWindowLoop.clone(); 329 | matchWindowLoop.resize(0); // clear without reallocating 330 | } 331 | if (!keyframeWindow.empty()) { 332 | result["SLAM keyframe"] = keyframeWindow.clone(); 333 | keyframeWindow.resize(0); 334 | } 335 | if (!searchWindow.empty()) { 336 | result["Map point search"] = searchWindow.clone(); 337 | searchWindow.resize(0); 338 | } 339 | if (!otherWindow.empty()) { 340 | result["SLAM (other)"] = otherWindow.clone(); 341 | otherWindow.resize(0); 342 | } 343 | return result; 344 | } 345 | 346 | void OpenCVViewerDataPublisher::setFrameRotation(int rotation) { 347 | this->rotation = rotation; 348 | } 349 | 350 | } // namespace slam 351 | -------------------------------------------------------------------------------- /opencv_viewer_data_publisher.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_OPENCV_DATA_PUBLISHER_HPP 2 | #define SLAM_OPENCV_DATA_PUBLISHER_HPP 3 | 4 | #include "../slam/viewer_data_publisher.hpp" 5 | #include 6 | 7 | namespace slam { 8 | 9 | class OpenCVViewerDataPublisher : public ViewerDataPublisher { 10 | public: 11 | OpenCVViewerDataPublisher(const cmd::ParametersSlam ¶meters); 12 | 13 | void visualizeKeyframe( 14 | const MapDB &mapDB, 15 | const cv::Mat &frame, 16 | const Keyframe &kf 17 | ) final; 18 | 19 | void visualizeOrbs( 20 | const cv::Mat &frame, 21 | const Keyframe &kf 22 | ) final; 23 | 24 | void visualizeMapPointSearch( 25 | const cv::Mat &frame, 26 | const SearchedMapPointVector &searched, 27 | const Vector2dVector &mps, 28 | const Vector2dVector &kps 29 | ) final; 30 | 31 | void showMatches( 32 | const Keyframe &kf1, 33 | const Keyframe &kf2, 34 | const std::vector> &matches, 35 | MatchType matchType 36 | ) final; 37 | 38 | virtual void visualizeOther(const cv::Mat &mat) final; 39 | 40 | std::map pollVisualizations() final; 41 | void setFrameRotation(int rotation) final; 42 | 43 | private: 44 | cv::Mat tmpWindow, tmpWindow2, tmpWindow3; 45 | cv::Mat keyframeWindow, matchWindow, matchWindowLoop, searchWindow, otherWindow; 46 | // Number of CW 90 degree rotations for frame visualizations. 47 | int rotation = 0; 48 | }; 49 | } 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /openvslam/README.md: -------------------------------------------------------------------------------- 1 | The contents of this folder have been copied from OpenVSLAM with 2 | only trivial modifications like changed paths. Some of them, like other 3 | OpenVSLAM files, have been inherited from other projects such as OpenCV and 4 | have various copyright notices. 5 | 6 | OpenVSLAM's code was originally licensed under a 2-Clause BSD License (included below), 7 | but due to the licensing incident (https://github.com/xdspacelab/openvslam/wiki/Termination-of-the-release), 8 | it may be that the license is not valid, and the code may actually be under GPL-v3 terms 9 | from ORB-SLAM2 (https://github.com/raulmur/ORB_SLAM2/blob/f2e6f51cdc8d067655d90a78c06261378e07e8f3/License-gpl.txt). 10 | No-one knows for sure. Use for commercial purposes is not recommended. 11 | 12 | ---- 13 | 14 | BSD 2-Clause License 15 | 16 | Copyright (c) 2019, 17 | National Institute of Advanced Industrial Science and Technology (AIST), 18 | All rights reserved. 19 | 20 | Redistribution and use in source and binary forms, with or without 21 | modification, are permitted provided that the following conditions are met: 22 | 23 | 1. Redistributions of source code must retain the above copyright notice, this 24 | list of conditions and the following disclaimer. 25 | 26 | 2. Redistributions in binary form must reproduce the above copyright notice, 27 | this list of conditions and the following disclaimer in the documentation 28 | and/or other materials provided with the distribution. 29 | 30 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 31 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 33 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 34 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 36 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 37 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 38 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 39 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 | -------------------------------------------------------------------------------- /openvslam/essential_solver.cc: -------------------------------------------------------------------------------- 1 | #include "essential_solver.h" 2 | #include "random_array.h" 3 | 4 | namespace openvslam { 5 | namespace solve { 6 | 7 | essential_solver::essential_solver(const eigen_alloc_vector& bearings_1, const eigen_alloc_vector& bearings_2, 8 | const std::vector>& matches_12) 9 | : bearings_1_(bearings_1), bearings_2_(bearings_2), matches_12_(matches_12) {} 10 | 11 | void essential_solver::find_via_ransac(const unsigned int max_num_iter, const bool recompute) { 12 | const auto num_matches = static_cast(matches_12_.size()); 13 | 14 | // 1. Prepare for RANSAC 15 | 16 | // minimum number of samples (= 8) 17 | constexpr unsigned int min_set_size = 8; 18 | if (num_matches < min_set_size) { 19 | solution_is_valid_ = false; 20 | return; 21 | } 22 | 23 | // RANSAC variables 24 | best_score_ = 0.0; 25 | is_inlier_match_ = std::vector(num_matches, false); 26 | 27 | // minimum set of keypoint matches 28 | eigen_alloc_vector min_set_bearings_1(min_set_size); 29 | eigen_alloc_vector min_set_bearings_2(min_set_size); 30 | 31 | // shared variables in RANSAC loop 32 | // essential matrix from shot 1 to shot 2 33 | Mat33_t E_21_in_sac; 34 | // inlier/outlier flags 35 | std::vector is_inlier_match_in_sac(num_matches, false); 36 | // score of essential matrix 37 | float score_in_sac; 38 | 39 | // 2. RANSAC loop 40 | 41 | for (unsigned int iter = 0; iter < max_num_iter; iter++) { 42 | // 2-1. Create a minimum set 43 | const auto indices = util::create_random_array(min_set_size, 0U, num_matches - 1); 44 | for (unsigned int i = 0; i < min_set_size; ++i) { 45 | const auto idx = indices.at(i); 46 | min_set_bearings_1.at(i) = bearings_1_.at(matches_12_.at(idx).first); 47 | min_set_bearings_2.at(i) = bearings_2_.at(matches_12_.at(idx).second); 48 | } 49 | 50 | // 2-2. Compute an essential matrix 51 | E_21_in_sac = compute_E_21(min_set_bearings_1, min_set_bearings_2); 52 | 53 | // 2-3. Check inliers and compute a score 54 | score_in_sac = check_inliers(E_21_in_sac, is_inlier_match_in_sac); 55 | 56 | // 2-4. Update the best model 57 | if (best_score_ < score_in_sac) { 58 | best_score_ = score_in_sac; 59 | best_E_21_ = E_21_in_sac; 60 | is_inlier_match_ = is_inlier_match_in_sac; 61 | } 62 | } 63 | 64 | const auto num_inliers = std::count(is_inlier_match_.begin(), is_inlier_match_.end(), true); 65 | solution_is_valid_ = (best_score_ > 0.0) && (num_inliers >= min_set_size); 66 | 67 | if (!recompute || !solution_is_valid_) { 68 | return; 69 | } 70 | 71 | // 3. Recompute an essential matrix only with the inlier matches 72 | 73 | eigen_alloc_vector inlier_bearing_1; 74 | eigen_alloc_vector inlier_bearing_2; 75 | inlier_bearing_1.reserve(matches_12_.size()); 76 | inlier_bearing_2.reserve(matches_12_.size()); 77 | for (unsigned int i = 0; i < matches_12_.size(); ++i) { 78 | if (is_inlier_match_.at(i)) { 79 | inlier_bearing_1.push_back(bearings_1_.at(matches_12_.at(i).first)); 80 | inlier_bearing_2.push_back(bearings_2_.at(matches_12_.at(i).second)); 81 | } 82 | } 83 | best_E_21_ = solve::essential_solver::compute_E_21(inlier_bearing_1, inlier_bearing_2); 84 | best_score_ = check_inliers(best_E_21_, is_inlier_match_); 85 | } 86 | 87 | Mat33_t essential_solver::compute_E_21(const eigen_alloc_vector& bearings_1, const eigen_alloc_vector& bearings_2) { 88 | assert(bearings_1.size() == bearings_2.size()); 89 | 90 | const auto num_points = bearings_1.size(); 91 | 92 | typedef Eigen::Matrix CoeffMatrix; 93 | CoeffMatrix A(num_points, 9); 94 | 95 | for (unsigned int i = 0; i < num_points; i++) { 96 | A.block<1, 3>(i, 0) = bearings_2.at(i)(0) * bearings_1.at(i); 97 | A.block<1, 3>(i, 3) = bearings_2.at(i)(1) * bearings_1.at(i); 98 | A.block<1, 3>(i, 6) = bearings_2.at(i)(2) * bearings_1.at(i); 99 | } 100 | 101 | const Eigen::JacobiSVD init_svd(A, Eigen::ComputeFullU | Eigen::ComputeFullV); 102 | 103 | const Eigen::Matrix v = init_svd.matrixV().col(8); 104 | // need transpose() because elements are contained as col-major after it was constructed from a pointer 105 | const Mat33_t init_E_21 = Mat33_t(v.data()).transpose(); 106 | 107 | const Eigen::JacobiSVD svd(init_E_21, Eigen::ComputeFullU | Eigen::ComputeFullV); 108 | 109 | const Mat33_t& U = svd.matrixU(); 110 | Vec3_t lambda = svd.singularValues(); 111 | const Mat33_t& V = svd.matrixV(); 112 | 113 | lambda(2) = 0.0; 114 | 115 | const Mat33_t E_21 = U * lambda.asDiagonal() * V.transpose(); 116 | 117 | return E_21; 118 | } 119 | 120 | bool essential_solver::decompose(const Mat33_t& E_21, eigen_alloc_vector& init_rots, eigen_alloc_vector& init_transes) { 121 | // https://en.wikipedia.org/wiki/Essential_matrix#Determining_R_and_t_from_E 122 | 123 | const Eigen::JacobiSVD svd(E_21, Eigen::ComputeFullU | Eigen::ComputeFullV); 124 | 125 | Vec3_t trans = svd.matrixU().col(2); 126 | trans.normalize(); 127 | 128 | Mat33_t W = Mat33_t::Zero(); 129 | W(0, 1) = -1; 130 | W(1, 0) = 1; 131 | W(2, 2) = 1; 132 | 133 | Mat33_t rot_1 = svd.matrixU() * W * svd.matrixV().transpose(); 134 | if (rot_1.determinant() < 0) { 135 | rot_1 *= -1; 136 | } 137 | 138 | Mat33_t rot_2 = svd.matrixU() * W.transpose() * svd.matrixV().transpose(); 139 | if (rot_2.determinant() < 0) { 140 | rot_2 *= -1; 141 | } 142 | 143 | init_rots = {rot_1, rot_1, rot_2, rot_2}; 144 | init_transes = {trans, -trans, trans, -trans}; 145 | 146 | return true; 147 | } 148 | 149 | static Mat33_t to_skew_symmetric_mat(const Vec3_t& vec) { 150 | Mat33_t skew; 151 | skew << 0, -vec(2), vec(1), 152 | vec(2), 0, -vec(0), 153 | -vec(1), vec(0), 0; 154 | return skew; 155 | } 156 | 157 | Mat33_t essential_solver::create_E_21(const Mat33_t& rot_1w, const Vec3_t& trans_1w, const Mat33_t& rot_2w, const Vec3_t& trans_2w) { 158 | const Mat33_t rot_21 = rot_2w * rot_1w.transpose(); 159 | const Vec3_t trans_21 = -rot_21 * trans_1w + trans_2w; 160 | const Mat33_t trans_21_x = to_skew_symmetric_mat(trans_21); 161 | return trans_21_x * rot_21; 162 | } 163 | 164 | float essential_solver::check_inliers(const Mat33_t& E_21, std::vector& is_inlier_match) { 165 | const auto num_points = matches_12_.size(); 166 | 167 | is_inlier_match.resize(num_points); 168 | 169 | const Mat33_t E_12 = E_21.transpose(); 170 | 171 | float score = 0; 172 | 173 | // outlier threshold as cosine value between a bearing vector and a normal vector of the epipolar plane 174 | constexpr float residual_cos_thr = 0.01745240643; 175 | 176 | for (unsigned int i = 0; i < num_points; ++i) { 177 | const auto& bearing_1 = bearings_1_.at(matches_12_.at(i).first); 178 | const auto& bearing_2 = bearings_2_.at(matches_12_.at(i).second); 179 | 180 | // 1. Transform a point in shot 1 to the epipolar plane in shot 2, 181 | // then compute a transfer error (= dot product) 182 | 183 | const Vec3_t epiplane_in_2 = E_21 * bearing_1; 184 | const float residual_in_2 = std::abs(epiplane_in_2.dot(bearing_2) / epiplane_in_2.norm()); 185 | 186 | // if a match is inlier, accumulate the score 187 | if (residual_cos_thr < residual_in_2) { 188 | is_inlier_match.at(i) = false; 189 | continue; 190 | } 191 | else { 192 | is_inlier_match.at(i) = true; 193 | score += residual_in_2; 194 | } 195 | 196 | // 2. Transform a point in shot 2 to the epipolar plane in shot 1, 197 | // then compute a transfer error (= dot product) 198 | 199 | const Vec3_t epiplane_in_1 = E_12 * bearing_2; 200 | 201 | const float residual_in_1 = std::abs(epiplane_in_1.dot(bearing_1) / epiplane_in_1.norm()); 202 | 203 | // if a match is inlier, accumulate the score 204 | if (residual_cos_thr < residual_in_1) { 205 | is_inlier_match.at(i) = false; 206 | continue; 207 | } 208 | else { 209 | is_inlier_match.at(i) = true; 210 | score += residual_in_1; 211 | } 212 | } 213 | 214 | return score; 215 | } 216 | 217 | } // namespace solve 218 | } // namespace openvslam 219 | -------------------------------------------------------------------------------- /openvslam/essential_solver.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENVSLAM_SOLVE_ESSENTIAL_SOLVER_H 2 | #define OPENVSLAM_SOLVE_ESSENTIAL_SOLVER_H 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | namespace openvslam { 10 | 11 | // used to be in type.h 12 | using Mat33_t = Eigen::Matrix3d; 13 | using Vec3_t = Eigen::Vector3d; 14 | template 15 | using eigen_alloc_vector = std::vector>; 16 | 17 | namespace solve { 18 | 19 | class essential_solver { 20 | public: 21 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 22 | 23 | //! Constructor 24 | essential_solver(const eigen_alloc_vector& bearings_1, const eigen_alloc_vector& bearings_2, 25 | const std::vector>& matches_12); 26 | 27 | //! Destructor 28 | virtual ~essential_solver() = default; 29 | 30 | //! Find the most reliable essential matrix via RANSAC 31 | void find_via_ransac(const unsigned int max_num_iter, const bool recompute = true); 32 | 33 | //! Check if the solution is valid or not 34 | bool solution_is_valid() const { 35 | return solution_is_valid_; 36 | } 37 | 38 | //! Get the best score 39 | double get_best_score() const { 40 | return best_score_; 41 | } 42 | 43 | //! Get the most reliable essential matrix 44 | Mat33_t get_best_E_21() const { 45 | return best_E_21_; 46 | } 47 | 48 | //! Get the inlier matches 49 | std::vector get_inlier_matches() const { 50 | return is_inlier_match_; 51 | } 52 | 53 | //! Compute an essential matrix with 8-point algorithm 54 | static Mat33_t compute_E_21(const eigen_alloc_vector& bearings_1, const eigen_alloc_vector& bearings_2); 55 | 56 | //! Decompose an essential matrix to four pairs of rotation and translation 57 | static bool decompose(const Mat33_t& E_21, eigen_alloc_vector& init_rots, eigen_alloc_vector& init_transes); 58 | 59 | //! Create an essential matrix from camera poses 60 | static Mat33_t create_E_21(const Mat33_t& rot_1w, const Vec3_t& trans_1w, const Mat33_t& rot_2w, const Vec3_t& trans_2w); 61 | 62 | private: 63 | //! Check inliers of the epipolar constraint 64 | //! (Note: inlier flags are set to `inlier_match` and a score is returned) 65 | float check_inliers(const Mat33_t& E_21, std::vector& is_inlier_match); 66 | 67 | //! bearing vectors of shot 1 68 | const eigen_alloc_vector& bearings_1_; 69 | //! bearing vectors of shot 2 70 | const eigen_alloc_vector& bearings_2_; 71 | //! matched indices between shots 1 and 2 72 | const std::vector>& matches_12_; 73 | 74 | //! solution is valid or not 75 | bool solution_is_valid_ = false; 76 | //! best score of RANSAC 77 | double best_score_ = 0.0; 78 | //! most reliable essential matrix 79 | Mat33_t best_E_21_; 80 | //! inlier matches computed via RANSAC 81 | std::vector is_inlier_match_; 82 | }; 83 | 84 | } // namespace solve 85 | } // namespace openvslam 86 | 87 | #endif // OPENVSLAM_SOLVE_ESSENTIAL_SOLVER_H 88 | -------------------------------------------------------------------------------- /openvslam/match_angle_checker.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENVSLAM_MATCH_ANGLE_CHECKER_H 2 | #define OPENVSLAM_MATCH_ANGLE_CHECKER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | namespace openvslam { 12 | namespace match { 13 | 14 | template 15 | class angle_checker { 16 | public: 17 | /** 18 | * Constructor 19 | */ 20 | explicit angle_checker(const unsigned int histogram_length = 30, 21 | const unsigned int num_bins_thr = 3); 22 | 23 | /** 24 | * Destructor 25 | */ 26 | ~angle_checker() = default; 27 | 28 | /** 29 | * Append a delta angle to the histogram 30 | */ 31 | void append_delta_angle(float delta_angle, const T& match); 32 | 33 | /** 34 | * Get the valid matches sampled from the top-N of the histogram 35 | */ 36 | std::vector get_valid_matches() const; 37 | 38 | /** 39 | * Get the invalid matches sampled from the remainder of the top-N of the histogram 40 | */ 41 | std::vector get_invalid_matches() const; 42 | 43 | private: 44 | /** 45 | * Index sort with the size of each vector 46 | */ 47 | std::vector index_sort_by_size(const std::vector>& vec) const; 48 | 49 | //! histogram length 50 | const unsigned int histogram_length_; 51 | //! inverse of the histogram length 52 | const float inv_histogram_length_; 53 | //! histogram object which contain match information at the corresponding bin 54 | std::vector> angle_histogram_; 55 | 56 | //! threshold of the number of VALID bins 57 | const unsigned int num_bins_thr_; 58 | }; 59 | 60 | template 61 | angle_checker::angle_checker(const unsigned int histogram_length, const unsigned int num_bins_thr) 62 | : histogram_length_(histogram_length), inv_histogram_length_(1.0f / histogram_length), 63 | num_bins_thr_(num_bins_thr) { 64 | assert(num_bins_thr_ <= histogram_length_); 65 | angle_histogram_.resize(histogram_length_); 66 | for (auto& bin : angle_histogram_) { 67 | bin.reserve(300); 68 | } 69 | } 70 | 71 | template 72 | inline void angle_checker::append_delta_angle(float delta_angle, const T& match) { 73 | if (delta_angle < 0.0) { 74 | delta_angle += 360.0; 75 | } 76 | if (360.0 <= delta_angle) { 77 | delta_angle -= 360.0; 78 | } 79 | const auto bin = static_cast(cvRound(delta_angle * inv_histogram_length_)); 80 | 81 | assert(bin < histogram_length_); 82 | angle_histogram_.at(bin).push_back(match); 83 | } 84 | 85 | template 86 | std::vector angle_checker::get_valid_matches() const { 87 | std::vector valid_matches; 88 | valid_matches.reserve(300 * num_bins_thr_); 89 | 90 | const auto bins = index_sort_by_size(angle_histogram_); 91 | for (unsigned int bin = 0; bin < histogram_length_; ++bin) { 92 | const bool is_valid_match = std::any_of(bins.begin(), bins.begin() + num_bins_thr_, 93 | [bin](const unsigned int i) { return bin == i; }); 94 | if (is_valid_match) { 95 | const auto bin_begin_iter = angle_histogram_.at(bin).cbegin(); 96 | const auto bin_end_iter = angle_histogram_.at(bin).cend(); 97 | valid_matches.reserve(valid_matches.size() + std::distance(bin_begin_iter, bin_end_iter)); 98 | valid_matches.insert(valid_matches.end(), bin_begin_iter, bin_end_iter); 99 | } 100 | } 101 | 102 | return valid_matches; 103 | } 104 | 105 | template 106 | std::vector angle_checker::get_invalid_matches() const { 107 | std::vector invalid_matches; 108 | invalid_matches.reserve(300 * (histogram_length_ - num_bins_thr_)); 109 | 110 | const auto bins = index_sort_by_size(angle_histogram_); 111 | for (unsigned int bin = 0; bin < histogram_length_; ++bin) { 112 | const bool is_valid_match = std::any_of(bins.begin(), bins.begin() + num_bins_thr_, 113 | [bin](const unsigned int i) { return bin == i; }); 114 | if (!is_valid_match) { 115 | const auto bin_begin_iter = angle_histogram_.at(bin).cbegin(); 116 | const auto bin_end_iter = angle_histogram_.at(bin).cend(); 117 | invalid_matches.reserve(invalid_matches.size() + std::distance(bin_begin_iter, bin_end_iter)); 118 | invalid_matches.insert(invalid_matches.end(), bin_begin_iter, bin_end_iter); 119 | } 120 | } 121 | 122 | return invalid_matches; 123 | } 124 | 125 | template 126 | std::vector angle_checker::index_sort_by_size(const std::vector>& vec) const { 127 | std::vector indices(vec.size()); 128 | std::iota(indices.begin(), indices.end(), 0); 129 | std::sort(indices.begin(), indices.end(), 130 | [&vec](const unsigned int a, const unsigned int b) { 131 | return (vec.at(a).size() > vec.at(b).size()); 132 | }); 133 | return indices; 134 | } 135 | 136 | } // namespace match 137 | } // namespace openvslam 138 | 139 | #endif // OPENVSLAM_MATCH_ANGLE_CHECKER_H 140 | -------------------------------------------------------------------------------- /openvslam/match_base.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENVSLAM_MATCH_BASE_H 2 | #define OPENVSLAM_MATCH_BASE_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | namespace openvslam { 11 | namespace match { 12 | 13 | static constexpr unsigned int HAMMING_DIST_THR_LOW = 50; 14 | static constexpr unsigned int HAMMING_DIST_THR_HIGH = 100; 15 | static constexpr unsigned int MAX_HAMMING_DIST = 256; 16 | 17 | // Mostly from OpenVSLAM, but modified to use uint32_t arrays instead of cv::Mat 18 | inline unsigned int compute_descriptor_distance_32(const uint32_t *desc_1, const uint32_t *desc_2) { 19 | // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 20 | 21 | constexpr uint32_t mask_1 = 0x55555555U; 22 | constexpr uint32_t mask_2 = 0x33333333U; 23 | constexpr uint32_t mask_3 = 0x0F0F0F0FU; 24 | constexpr uint32_t mask_4 = 0x01010101U; 25 | 26 | const auto* pa = desc_1; 27 | const auto* pb = desc_2; 28 | 29 | unsigned int dist = 0; 30 | 31 | for (unsigned int i = 0; i < 8; ++i, ++pa, ++pb) { 32 | auto v = *pa ^ *pb; 33 | v -= ((v >> 1) & mask_1); 34 | v = (v & mask_2) + ((v >> 2) & mask_2); 35 | dist += (((v + (v >> 4)) & mask_3) * mask_4) >> 24; 36 | } 37 | 38 | return dist; 39 | } 40 | 41 | class base { 42 | public: 43 | base(const float lowe_ratio, const bool check_orientation) 44 | : lowe_ratio_(lowe_ratio), check_orientation_(check_orientation) {} 45 | 46 | virtual ~base() = default; 47 | 48 | protected: 49 | const float lowe_ratio_; 50 | const bool check_orientation_; 51 | }; 52 | 53 | } // namespace match 54 | } // namespace openvslam 55 | 56 | #endif // OPENVSLAM_MATCH_BASE_H 57 | -------------------------------------------------------------------------------- /openvslam/random_array.cc: -------------------------------------------------------------------------------- 1 | #include "random_array.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace openvslam { 10 | namespace util { 11 | 12 | // std::mt19937 create_random_engine() { 13 | // std::random_device random_device; 14 | // std::vector v(10); 15 | // std::generate(v.begin(), v.end(), std::ref(random_device)); 16 | // std::seed_seq seed(v.begin(), v.end()); 17 | // return std::mt19937(seed); 18 | // } 19 | 20 | thread_local std::mt19937 random_engine(94235682); // Fixed seed to get deterministic results 21 | 22 | template 23 | std::vector create_random_array(const size_t size, const T rand_min, const T rand_max) { 24 | assert(rand_min <= rand_max); 25 | assert(size <= static_cast(rand_max - rand_min + 1)); 26 | 27 | // メルセンヌ・ツイスタ作成 28 | // auto random_engine = create_random_engine(); 29 | std::uniform_int_distribution uniform_int_distribution(rand_min, rand_max); 30 | 31 | // sizeより少し大きくランダム数列(重複あり)を作成する 32 | const auto make_size = static_cast(size * 1.2); 33 | 34 | // vがsizeになるまで繰り返す 35 | std::vector v; 36 | v.reserve(size); 37 | while (v.size() != size) { 38 | // ランダム整数列を順に追加(重複がある可能性がある) 39 | while (v.size() < make_size) { 40 | v.push_back(uniform_int_distribution(random_engine)); 41 | } 42 | 43 | // ソートして重複を除く -> 重複が除かれた数列の末尾のイテレータがunique_endに入る 44 | std::sort(v.begin(), v.end()); 45 | auto unique_end = std::unique(v.begin(), v.end()); 46 | 47 | // vのサイズが大きすぎたら,sizeまでのイテレータに変えておく 48 | if (size < static_cast(std::distance(v.begin(), unique_end))) { 49 | unique_end = std::next(v.begin(), size); 50 | } 51 | 52 | // 重複部分から最後までを削除する 53 | v.erase(unique_end, v.end()); 54 | } 55 | 56 | // 昇順になっているのでシャッフル 57 | std::shuffle(v.begin(), v.end(), random_engine); 58 | 59 | return v; 60 | } 61 | 62 | // 明示的に実体化しておく 63 | template std::vector create_random_array(size_t, int, int); 64 | template std::vector create_random_array(size_t, unsigned int, unsigned int); 65 | 66 | } // namespace util 67 | } // namespace openvslam 68 | -------------------------------------------------------------------------------- /openvslam/random_array.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENVSLAM_UTIL_RANDOM_ARRAY_H 2 | #define OPENVSLAM_UTIL_RANDOM_ARRAY_H 3 | 4 | #include 5 | #include 6 | 7 | namespace openvslam { 8 | namespace util { 9 | 10 | std::mt19937 create_random_engine(); 11 | 12 | template 13 | std::vector create_random_array(const size_t size, const T rand_min, const T rand_max); 14 | 15 | } // namespace util 16 | } // namespace openvslam 17 | 18 | #endif // OPENVSLAM_UTIL_RANDOM_ARRAY_H 19 | -------------------------------------------------------------------------------- /openvslam/trigonometric.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENVSLAM_UTIL_TRIGONOMETRIC_H 2 | #define OPENVSLAM_UTIL_TRIGONOMETRIC_H 3 | 4 | #include 5 | 6 | #include 7 | 8 | namespace openvslam { 9 | namespace util { 10 | 11 | static constexpr float _PI = 3.14159265358979f; 12 | static constexpr float _PI_2 = _PI / 2.0f; 13 | static constexpr float _TWO_PI = 2.0f * _PI; 14 | static constexpr float _INV_TWO_PI = 1.0f / _TWO_PI; 15 | static constexpr float _THREE_PI_2 = 3.0f * _PI_2; 16 | 17 | inline float _cos(float v) { 18 | constexpr float c1 = 0.99940307f; 19 | constexpr float c2 = -0.49558072f; 20 | constexpr float c3 = 0.03679168f; 21 | 22 | const float v2 = v * v; 23 | return c1 + v2 * (c2 + c3 * v2); 24 | } 25 | 26 | inline float cos(float v) { 27 | v = v - cvFloor(v * _INV_TWO_PI) * _TWO_PI; 28 | v = (0.0f < v) ? v : -v; 29 | 30 | if (v < _PI_2) { 31 | return _cos(v); 32 | } 33 | else if (v < _PI) { 34 | return -_cos(_PI - v); 35 | } 36 | else if (v < _THREE_PI_2) { 37 | return -_cos(v - _PI); 38 | } 39 | else { 40 | return _cos(_TWO_PI - v); 41 | } 42 | } 43 | 44 | inline float sin(float v) { 45 | return openvslam::util::cos(_PI_2 - v); 46 | } 47 | 48 | } // namespace util 49 | } // namespace openvslam 50 | 51 | #endif // OPENVSLAM_UTIL_TRIGONOMETRIC_H 52 | -------------------------------------------------------------------------------- /optimize_transform.cpp: -------------------------------------------------------------------------------- 1 | #include "optimize_transform.hpp" 2 | 3 | #include "keyframe.hpp" 4 | #include "map_point.hpp" 5 | #include "mapdb.hpp" 6 | #include "../util/logging.hpp" 7 | #include "../util/util.hpp" 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include "mapper_helpers.hpp" 22 | 23 | /* 24 | namespace { 25 | 26 | g2o::Sim3 se3ToSim3(const Eigen::Matrix4d &se3) { 27 | return g2o::Sim3( 28 | se3.topLeftCorner<3,3>(), 29 | se3.block<3,1>(0,3), 30 | 1 31 | ); 32 | } 33 | 34 | Eigen::Matrix4d sim3ToSe3(const g2o::Sim3 &sim3) { 35 | Eigen::Matrix4d se3 = Eigen::Matrix4d::Identity(); 36 | se3.topLeftCorner<3,3>() = sim3.rotation().toRotationMatrix(); 37 | se3.block<3,1>(0, 3) = sim3.translation(); 38 | return se3; 39 | } 40 | 41 | g2o::SE3Quat sim3ToSE3Quat(const g2o::Sim3 &sim3) { 42 | return g2o::SE3Quat(sim3.rotation(), sim3.translation()); 43 | } 44 | 45 | } 46 | */ 47 | 48 | namespace slam { 49 | namespace { 50 | std::unique_ptr getMpVertex(const MapPoint &mp, int vertexId, const Keyframe &kf) { 51 | auto mpVertex = std::make_unique(); 52 | mpVertex->setEstimate(Eigen::Isometry3d(kf.poseCW) * mp.position); 53 | // TODO This should probably use map point ids, but now g2o it gives warnings 54 | // about same ids being inserted multiple times, which probably indicates 55 | // some bug in our logic. 56 | mpVertex->setId(vertexId); 57 | mpVertex->setFixed(true); 58 | 59 | return mpVertex; 60 | } 61 | } 62 | 63 | unsigned int OptimizeSim3Transform( 64 | const Keyframe &kf1, 65 | const Keyframe &kf2, 66 | const std::vector> &matches, 67 | MapDB &mapDB, 68 | g2o::Sim3 &transform12, 69 | const StaticSettings &settings) { 70 | 71 | const auto ¶meters = settings.parameters.slam; 72 | const double inlierThreshold = parameters.loopClosureInlierThreshold; 73 | const float deltaHuber = std::sqrt(inlierThreshold); 74 | const bool fixScale = parameters.loopClosureRansacFixScale; 75 | 76 | auto linearSolver = std::make_unique>(); 77 | auto blockSolver = std::make_unique<::g2o::BlockSolverX>(std::move(linearSolver)); 78 | auto algorithm = std::make_unique(std::move(blockSolver)); 79 | 80 | g2o::SparseOptimizer optimizer; 81 | optimizer.setAlgorithm(algorithm.release()); 82 | 83 | auto Sim3Vertex12 = std::make_unique(); 84 | Sim3Vertex12->setId(0); 85 | Sim3Vertex12->setEstimate(transform12); 86 | Sim3Vertex12->setFixed(false); 87 | Sim3Vertex12->_fix_scale = fixScale; 88 | 89 | Sim3Vertex12->_principle_point1[0] = 0; 90 | Sim3Vertex12->_principle_point1[1] = 0; 91 | Sim3Vertex12->_focal_length1[0] = 1; 92 | Sim3Vertex12->_focal_length1[1] = 1; 93 | 94 | Sim3Vertex12->_principle_point2[0] = 0; 95 | Sim3Vertex12->_principle_point2[1] = 0; 96 | Sim3Vertex12->_focal_length2[0] = 1; 97 | Sim3Vertex12->_focal_length2[1] = 1; 98 | optimizer.addVertex(Sim3Vertex12.release()); 99 | 100 | int idCounter = 1; 101 | for (const auto &match : matches) { 102 | const MapPoint &mp1 = mapDB.mapPoints.at(match.first); 103 | int vertexId1 = idCounter++; 104 | auto mpVertex1 = getMpVertex(mp1, vertexId1, kf1); 105 | 106 | const MapPoint &mp2 = mapDB.mapPoints.at(match.second); 107 | int vertexId2 = idCounter++; 108 | auto mpVertex2 = getMpVertex(mp2, vertexId2, kf2); 109 | 110 | optimizer.addVertex(mpVertex1.release()); 111 | optimizer.addVertex(mpVertex2.release()); 112 | 113 | // TODO: DRY 114 | int kpIdx1 = mp1.observations.at(kf1.id).v; 115 | const auto &keyPoint = kf1.shared->keyPoints.at(kpIdx1); 116 | Eigen::Vector2d obs1 = keyPoint.bearing.segment<2>(0) / keyPoint.bearing.z(); 117 | 118 | auto e12 = std::make_unique(); 119 | e12->setVertex(0, dynamic_cast(optimizer.vertex(vertexId2))); 120 | e12->setVertex(1, dynamic_cast(optimizer.vertex(0))); 121 | e12->setMeasurement(obs1); 122 | e12->setInformation(Eigen::Matrix2d::Identity()*settings.levelSigmaSq.at(keyPoint.octave)); 123 | 124 | auto rk1 = std::make_unique(); 125 | rk1->setDelta(deltaHuber); 126 | e12->setRobustKernel(rk1.release()); 127 | optimizer.addEdge(e12.release()); 128 | 129 | int kpIdx2 = mp2.observations.at(kf2.id).v; 130 | const auto &keyPoint2 = kf2.shared->keyPoints.at(kpIdx2); 131 | Eigen::Vector2d obs2 = keyPoint2.bearing.segment<2>(0) / keyPoint2.bearing.z(); 132 | 133 | auto e21 = std::make_unique(); 134 | e21->setVertex(0, dynamic_cast(optimizer.vertex(vertexId1))); 135 | e21->setVertex(1, dynamic_cast(optimizer.vertex(0))); 136 | e21->setMeasurement(obs2); 137 | e21->setInformation(Eigen::Matrix2d::Identity()*settings.levelSigmaSq.at(keyPoint2.octave)); 138 | 139 | auto rk2 = std::make_unique(); 140 | rk2->setDelta(deltaHuber); 141 | e21->setRobustKernel(rk2.release()); 142 | optimizer.addEdge(e21.release()); 143 | } 144 | 145 | optimizer.initializeOptimization(); 146 | optimizer.optimize(20); 147 | 148 | // TODO(jhnj): Inlier check and reoptimize 149 | 150 | // Apply optimization results. 151 | transform12 = static_cast(optimizer.vertex(0))->estimate(); 152 | 153 | // Return all matches as inliers before inlier check (see TODO above) 154 | return matches.size(); 155 | } 156 | 157 | } // slam 158 | -------------------------------------------------------------------------------- /optimize_transform.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_OPTIMIZE_TRANFORM_HPP 2 | #define SLAM_OPTIMIZE_TRANFORM_HPP 3 | 4 | #include "keyframe.hpp" 5 | #include "map_point.hpp" 6 | #include "static_settings.hpp" 7 | 8 | #include 9 | 10 | namespace slam { 11 | 12 | class MapDB; 13 | 14 | unsigned int OptimizeSim3Transform( 15 | const Keyframe &kf1, 16 | const Keyframe &kf2, 17 | const std::vector> &matches, 18 | MapDB &mapDB, 19 | g2o::Sim3 &transform12, 20 | const StaticSettings &settings); 21 | 22 | } // namespace slam 23 | 24 | #endif // SLAM_OPTIMIZE_TRANFORM_HPP 25 | -------------------------------------------------------------------------------- /orb_extractor.cpp: -------------------------------------------------------------------------------- 1 | /******************************************************************************* 2 | 3 | License Agreement 4 | For Open Source Computer Vision Library 5 | (3-clause BSD License) 6 | 7 | Copyright (C) 2009, Willow Garage Inc., all rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without modification, 10 | are permitted provided that the following conditions are met: 11 | 12 | * Redistributions of source code must retain the above copyright notice, 13 | this list of conditions and the following disclaimer. 14 | 15 | * Redistributions in binary form must reproduce the above copyright notice, 16 | this list of conditions and the following disclaimer in the documentation 17 | and/or other materials provided with the distribution. 18 | 19 | * Neither the names of the copyright holders nor the names of the contributors 20 | may be used to endorse or promote products derived from this software 21 | without specific prior written permission. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 27 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 30 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | *******************************************************************************/ 35 | 36 | // This file is based on orb_extractor.cc and orb_extractor.h in OpenVSLAM 37 | // which is licensed under the above BSD License, apparently inherited from 38 | // OpenCV 39 | 40 | #include "orb_extractor.hpp" 41 | #include "feature_detector.hpp" 42 | #include "image_pyramid.hpp" 43 | #include "../odometry/parameters.hpp" 44 | #include "../tracker/camera.hpp" 45 | #include "../tracker/image.hpp" 46 | 47 | #include 48 | #include 49 | 50 | #include "openvslam/orb_point_pairs.h" 51 | #include "openvslam/trigonometric.h" 52 | 53 | #include 54 | #include 55 | 56 | #include 57 | 58 | #ifdef USE_SSE_ORB 59 | #ifdef _MSC_VER 60 | #include 61 | #else 62 | #include 63 | #endif 64 | #endif // USE_SSE_ORB 65 | 66 | namespace slam { 67 | namespace { 68 | using namespace openvslam; 69 | using namespace openvslam::feature; 70 | 71 | class OrbExtractorImplementation : public OrbExtractor { 72 | public: 73 | void detectAndExtract( 74 | tracker::Image &trackerImage, 75 | const tracker::Camera &camera, 76 | const std::vector &tracks, 77 | KeyPointVector &keypts, 78 | std::vector &keyptTrackIds) final 79 | { 80 | if (!imagePyramid) imagePyramid = ImagePyramid::build(settings, trackerImage); 81 | if (!featureDetector) featureDetector = FeatureDetector::build(settings, trackerImage); 82 | 83 | imagePyramid->update(trackerImage); 84 | 85 | // convert tracker keypoints & compute descriptors 86 | keypts.clear(); 87 | keyptTrackIds.clear(); 88 | 89 | for (const auto &track : tracks) { 90 | const auto &pt = track.points[0]; 91 | const unsigned level = parameters.orbLkTrackLevel; 92 | 93 | const float scale = scale_factors_.at(level); 94 | const cv::Mat levelImg = accelerated::opencv::ref(imagePyramid->getLevel(level)); 95 | const int x = cvRound(pt.x / scale); 96 | const int y = cvRound(pt.y / scale); 97 | const int margin = orb_patch_radius_; 98 | 99 | if (x >= margin && y >= margin && 100 | x < levelImg.cols - margin && y < levelImg.rows - margin && 101 | camera.isValidPixel(pt.x, pt.y)) 102 | { 103 | keypts.push_back({ 104 | .pt = { 105 | static_cast(x), 106 | static_cast(y) 107 | }, 108 | .angle = ic_angle(levelImg, x, y), 109 | .octave = int(level) 110 | }); 111 | const cv::Mat blurred_img = accelerated::opencv::ref(imagePyramid->getBlurredLevel(level)); 112 | 113 | assert(!blurred_img.empty()); 114 | assert(blurred_img.cols == levelImg.cols && blurred_img.rows == levelImg.rows); 115 | 116 | compute_orb_descriptor( 117 | x, y, keypts.back().angle, 118 | blurred_img, 119 | keypts.back().descriptor); 120 | 121 | keypts.back().pt = pt; // correct scale 122 | keyptTrackIds.push_back(track.id); 123 | } 124 | } 125 | 126 | // find other keypoints 127 | auto &all_keypts = work.keypoints; 128 | featureDetector->detect(*imagePyramid, all_keypts); 129 | const unsigned num_keypts = dropInvalidKeypoints(camera, all_keypts) + keypts.size(); 130 | 131 | // Compute orientations 132 | for (unsigned int level = 0; level < parameters.orbScaleLevels; ++level) { 133 | compute_orientation(accelerated::opencv::ref(imagePyramid->getLevel(level)), all_keypts.at(level)); 134 | } 135 | 136 | unsigned int offset = keypts.size(); // number of tracker keypoints 137 | if (num_keypts == 0) return; 138 | 139 | for (unsigned int level = 0; level < parameters.orbScaleLevels; ++level) { 140 | auto& keypts_at_level = all_keypts.at(level); 141 | const auto num_keypts_at_level = keypts_at_level.size(); 142 | 143 | if (num_keypts_at_level == 0) { 144 | continue; 145 | } 146 | 147 | const cv::Mat blurred_image = accelerated::opencv::ref(imagePyramid->getBlurredLevel(level)); 148 | assert(!blurred_image.empty()); 149 | compute_orb_descriptors(blurred_image, keypts_at_level, level); 150 | 151 | offset += num_keypts_at_level; 152 | 153 | const float scale_at_level = scale_factors_.at(level); 154 | for (const auto &kp : keypts_at_level) { 155 | keypts.push_back({ 156 | .pt = { kp.pt.x * scale_at_level, kp.pt.y * scale_at_level }, 157 | .angle = kp.angle, 158 | .octave = kp.octave, 159 | .descriptor = kp.descriptor 160 | }); 161 | keyptTrackIds.push_back(-1); 162 | } 163 | } 164 | } 165 | 166 | OrbExtractorImplementation(const StaticSettings &settings) : 167 | settings(settings), 168 | parameters(settings.parameters.slam) 169 | { 170 | // compute scale pyramid information 171 | scale_factors_ = settings.scaleFactors; 172 | 173 | // Preparate for computation of orientation 174 | u_max_.resize(fast_half_patch_size_ + 1); 175 | const unsigned int vmax = std::floor(fast_half_patch_size_ * std::sqrt(2.0) / 2 + 1); 176 | const unsigned int vmin = std::ceil(fast_half_patch_size_ * std::sqrt(2.0) / 2); 177 | for (unsigned int v = 0; v <= vmax; ++v) { 178 | u_max_.at(v) = std::round(std::sqrt(fast_half_patch_size_ * fast_half_patch_size_ - v * v)); 179 | } 180 | for (unsigned int v = fast_half_patch_size_, v0 = 0; vmin <= v; --v) { 181 | while (u_max_.at(v0) == u_max_.at(v0 + 1)) { 182 | ++v0; 183 | } 184 | u_max_.at(v) = v0; 185 | ++v0; 186 | } 187 | } 188 | 189 | void debugVisualize(const tracker::Image &trackerImage, cv::Mat &target, VisualizationMode mode) const final { 190 | assert(mode == VisualizationMode::IMAGE_PYRAMID); 191 | (void)trackerImage; 192 | assert(imagePyramid); 193 | imagePyramid->debugVisualize(target); 194 | } 195 | 196 | private: 197 | //! parameters for ORB extraction 198 | const StaticSettings &settings; 199 | const odometry::ParametersSlam ¶meters; 200 | 201 | std::unique_ptr featureDetector; 202 | 203 | std::unique_ptr imagePyramid; 204 | 205 | //! half size of FAST patch 206 | static constexpr int fast_half_patch_size_ = StaticSettings::ORB_FAST_PATCH_HALF_SIZE; 207 | 208 | //! size of maximum ORB patch radius 209 | static constexpr unsigned int orb_patch_radius_ = StaticSettings::ORB_PATCH_RADIUS; 210 | 211 | //! A list of the scale factor of each pyramid layer 212 | std::vector scale_factors_; 213 | 214 | //! Index limitation that used for calculating of keypoint orientation 215 | std::vector u_max_; 216 | 217 | struct Workspace { 218 | std::vector keypoints; 219 | } work; 220 | 221 | unsigned dropInvalidKeypoints( 222 | const tracker::Camera &camera, 223 | std::vector &kpsPerLevel) const 224 | { 225 | unsigned nLeft = 0; 226 | for (std::size_t i = 0; i < kpsPerLevel.size(); ++i) { 227 | float scale = scale_factors_.at(i); 228 | auto &kps = kpsPerLevel.at(i); 229 | kps.erase(std::remove_if(kps.begin(), kps.end(), 230 | [&camera, scale](const KeyPoint &kp) { 231 | return !camera.isValidPixel(kp.pt.x * scale, kp.pt.y * scale); 232 | } 233 | ), kps.end()); 234 | nLeft += kps.size(); 235 | } 236 | return nLeft; 237 | } 238 | 239 | void compute_orientation(const cv::Mat& image, KeyPointVector& keypts) const { 240 | for (auto& keypt : keypts) { 241 | keypt.angle = ic_angle(image, cvRound(keypt.pt.x), cvRound(keypt.pt.y)); 242 | } 243 | } 244 | 245 | float ic_angle(const cv::Mat& image, int x, int y) const { 246 | int m_01 = 0, m_10 = 0; 247 | 248 | assert(x >= fast_half_patch_size_); 249 | assert(x < image.cols - fast_half_patch_size_); 250 | assert(y >= fast_half_patch_size_); 251 | assert(y < image.rows - fast_half_patch_size_); 252 | 253 | // note: .at(y,x) if using "matrix" indexing, but .at(cv::Point2f(x,y)) 254 | const uchar* const center = &image.at(y, x); 255 | 256 | for (int u = -fast_half_patch_size_; u <= fast_half_patch_size_; ++u) { 257 | m_10 += u * center[u]; 258 | } 259 | 260 | const auto step = static_cast(image.step1()); 261 | for (int v = 1; v <= fast_half_patch_size_; ++v) { 262 | unsigned int v_sum = 0; 263 | const int d = u_max_.at(v); 264 | for (int u = -d; u <= d; ++u) { 265 | const int val_plus = center[u + v * step]; 266 | const int val_minus = center[u - v * step]; 267 | v_sum += (val_plus - val_minus); 268 | m_10 += u * (val_plus + val_minus); 269 | } 270 | m_01 += v * v_sum; 271 | } 272 | 273 | // unlike std::atan2, this returns the angle in DEGREES!! 274 | return cv::fastAtan2(m_01, m_10); 275 | } 276 | 277 | void compute_orb_descriptors(const cv::Mat& image, KeyPointVector& keypts, int level) const { 278 | for (auto &kp : keypts) { 279 | kp.octave = level; 280 | compute_orb_descriptor(cvRound(kp.pt.x), cvRound(kp.pt.y), kp.angle, image, kp.descriptor); 281 | } 282 | } 283 | 284 | void compute_orb_descriptor(int x, int y, float angleDeg, const cv::Mat& image, KeyPoint::Descriptor &descObj) const { 285 | auto *desc = reinterpret_cast(descObj.data()); 286 | const float angle = angleDeg * M_PI / 180.0; 287 | const float cos_angle = util::cos(angle); 288 | const float sin_angle = util::sin(angle); 289 | 290 | // note: at(y,x) if using "matrix" indexing, but .at(cv::Point2f(x,y)) 291 | const uchar* const center = &image.at(y, x); 292 | const auto step = static_cast(image.step); 293 | 294 | assert(x >= int(orb_patch_radius_)); 295 | assert(x < int(image.cols - orb_patch_radius_)); 296 | assert(y >= int(orb_patch_radius_)); 297 | assert(y < int(image.rows - orb_patch_radius_)); 298 | 299 | #ifdef USE_SSE_ORB 300 | #if !((defined _MSC_VER && defined _M_X64) \ 301 | || (defined __GNUC__ && defined __x86_64__ && defined __SSE3__) \ 302 | || CV_SSE3) 303 | #error "The processor is not compatible with SSE. Please configure the CMake with -DUSE_SSE_ORB=OFF." 304 | #endif 305 | 306 | const __m128 _trig1 = _mm_set_ps(cos_angle, sin_angle, cos_angle, sin_angle); 307 | const __m128 _trig2 = _mm_set_ps(-sin_angle, cos_angle, -sin_angle, cos_angle); 308 | __m128 _point_pairs; 309 | __m128 _mul1; 310 | __m128 _mul2; 311 | __m128 _vs; 312 | __m128i _vi; 313 | alignas(16) int32_t ii[4]; 314 | 315 | #define COMPARE_ORB_POINTS(shift) \ 316 | (_point_pairs = _mm_load_ps(orb_point_pairs + shift), \ 317 | _mul1 = _mm_mul_ps(_point_pairs, _trig1), \ 318 | _mul2 = _mm_mul_ps(_point_pairs, _trig2), \ 319 | _vs = _mm_hadd_ps(_mul1, _mul2), \ 320 | _vi = _mm_cvtps_epi32(_vs), \ 321 | _mm_store_si128(reinterpret_cast<__m128i*>(ii), _vi), \ 322 | center[ii[0] * step + ii[2]] < center[ii[1] * step + ii[3]]) 323 | 324 | #else 325 | 326 | #define GET_VALUE(shift) \ 327 | (center[cvRound(*(orb_point_pairs + shift) * sin_angle + *(orb_point_pairs + shift + 1) * cos_angle) * step \ 328 | + cvRound(*(orb_point_pairs + shift) * cos_angle - *(orb_point_pairs + shift + 1) * sin_angle)]) 329 | 330 | #define COMPARE_ORB_POINTS(shift) \ 331 | (GET_VALUE(shift) < GET_VALUE(shift + 2)) 332 | 333 | #endif 334 | 335 | // interval: (X, Y) x 2 points x 8 pairs = 32 336 | static constexpr unsigned interval = 32; 337 | 338 | for (unsigned int i = 0; i < orb_point_pairs_size / interval; ++i) { 339 | int32_t val = COMPARE_ORB_POINTS(i * interval); 340 | val |= COMPARE_ORB_POINTS(i * interval + 4) << 1; 341 | val |= COMPARE_ORB_POINTS(i * interval + 8) << 2; 342 | val |= COMPARE_ORB_POINTS(i * interval + 12) << 3; 343 | val |= COMPARE_ORB_POINTS(i * interval + 16) << 4; 344 | val |= COMPARE_ORB_POINTS(i * interval + 20) << 5; 345 | val |= COMPARE_ORB_POINTS(i * interval + 24) << 6; 346 | val |= COMPARE_ORB_POINTS(i * interval + 28) << 7; 347 | desc[i] = static_cast(val); 348 | } 349 | 350 | #undef GET_VALUE 351 | #undef COMPARE_ORB_POINTS 352 | } 353 | }; 354 | } 355 | 356 | std::unique_ptr OrbExtractor::build(const StaticSettings &settings) { 357 | return std::unique_ptr(new OrbExtractorImplementation(settings)); 358 | } 359 | } 360 | -------------------------------------------------------------------------------- /orb_extractor.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_SLAM_ORB_EXTRACTOR_HPP 2 | #define SLAM_SLAM_ORB_EXTRACTOR_HPP 3 | 4 | #include "static_settings.hpp" 5 | #include "key_point.hpp" 6 | #include 7 | 8 | namespace cv { class Mat; } 9 | namespace tracker { class Camera; struct Image; } 10 | namespace slam { 11 | struct OrbExtractor { 12 | constexpr static int DESCRIPTOR_COLS = 32; 13 | 14 | virtual ~OrbExtractor() {}; 15 | 16 | virtual void detectAndExtract(tracker::Image &img, 17 | const tracker::Camera &camera, 18 | const std::vector &tracks, 19 | KeyPointVector &keyPoints, 20 | std::vector &keyPointTrackIds) = 0; 21 | 22 | static std::unique_ptr build(const StaticSettings &settings); 23 | 24 | // debug visualization stuff 25 | enum class VisualizationMode { IMAGE_PYRAMID }; 26 | virtual void debugVisualize( 27 | const tracker::Image &img, 28 | cv::Mat &target, 29 | VisualizationMode mode) const = 0; 30 | }; 31 | } 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /relocation.cpp: -------------------------------------------------------------------------------- 1 | #include "relocation.hpp" 2 | 3 | #include "../util/logging.hpp" 4 | #include "keyframe_matcher.hpp" 5 | #include "loop_ransac.hpp" 6 | 7 | namespace slam { 8 | 9 | void tryRelocation( 10 | KfId currentKfId, 11 | MapKf candidate, 12 | MapDB ¤tMapDB, 13 | const Atlas &atlas, 14 | const odometry::ParametersSlam ¶meters, 15 | const StaticSettings &settings 16 | ) { 17 | const MapDB &candidateMapDB = atlas[candidate.mapId.v]; 18 | 19 | const Keyframe ¤tKf = *currentMapDB.keyframes.at(currentKfId); 20 | const Keyframe &candidateKf = *candidateMapDB.keyframes.at(candidate.kfId); 21 | 22 | std::vector matchedFeatureIds; 23 | matchForLoopClosures( 24 | currentKf, 25 | candidateKf, 26 | currentMapDB, 27 | candidateMapDB, 28 | matchedFeatureIds, 29 | parameters); 30 | 31 | std::vector> matches; 32 | for (unsigned i = 0; i < matchedFeatureIds.size(); ++i) { 33 | const int kfIdx2 = matchedFeatureIds.at(i); 34 | if (kfIdx2 >= 0) { 35 | const MpId mpId1 = currentKf.mapPoints.at(i); 36 | const MpId mpId2 = candidateKf.mapPoints.at(kfIdx2); 37 | if (mpId1.v != -1 && mpId2.v != -1) { 38 | matches.emplace_back(mpId1, mpId2); 39 | } 40 | } 41 | } 42 | 43 | if (matches.size() < parameters.minLoopClosureFeatureMatches) { 44 | return; 45 | } 46 | currentMapDB.loopStages[candidate] = LoopStage::RELOCATION_MAP_POINT_MATCHES; 47 | 48 | LoopRansac loopRansac( 49 | currentKf, 50 | candidateKf, 51 | matches, 52 | currentMapDB, 53 | candidateMapDB, 54 | settings 55 | ); 56 | loopRansac.ransacSolve(parameters.loopClosureRansacIterations, LoopRansac::DoF::SIM3); 57 | if (!loopRansac.solutionOk) { 58 | return; 59 | } 60 | currentMapDB.loopStages[candidate] = LoopStage::RELOCATION_MAP_POINT_RANSAC; 61 | } 62 | 63 | } // namespace slam 64 | -------------------------------------------------------------------------------- /relocation.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_RELOCATION_HPP 2 | #define SLAM_RELOCATION_HPP 3 | 4 | #include "id.hpp" 5 | #include "mapdb.hpp" 6 | #include "bow_index.hpp" 7 | 8 | namespace slam { 9 | 10 | void tryRelocation( 11 | KfId currentKf, 12 | MapKf candidate, 13 | MapDB &mapDB, 14 | const Atlas &atlas, 15 | const odometry::ParametersSlam ¶meters, 16 | const StaticSettings &settings 17 | ); 18 | 19 | } // namespace slam 20 | 21 | #endif // SLAM_RELOCATION_HPP 22 | -------------------------------------------------------------------------------- /serialization.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_SERIALIZATION_HPP 2 | #define SLAM_SERIALIZATION_HPP 3 | 4 | #include "../util/serialization.hpp" 5 | #include "id.hpp" 6 | 7 | namespace cereal { 8 | 9 | template 10 | void serialize(Archive &ar, slam::Id &id) { 11 | ar(id.v); 12 | } 13 | 14 | template 15 | void serialize(Archive &ar, cv::KeyPoint &kp) { 16 | ar( 17 | kp.angle, 18 | kp.class_id, 19 | kp.octave, 20 | kp.pt, 21 | kp.response, 22 | kp.size 23 | ); 24 | } 25 | 26 | } // namespace cereal 27 | 28 | #endif // SLAM_SERIALIZATION_HPP 29 | -------------------------------------------------------------------------------- /slam_implementation.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #include "slam_implementation.hpp" 10 | #include "mapper.hpp" 11 | #include "viewer_data_publisher.hpp" 12 | #include "keyframe.hpp" 13 | 14 | #include "../commandline/command_queue.hpp" 15 | #include "../odometry/parameters.hpp" 16 | #include "../util/logging.hpp" 17 | #include "../util/string_utils.hpp" 18 | 19 | #include "../tracker/image.hpp" 20 | 21 | namespace slam { 22 | namespace { 23 | class Worker { 24 | public: 25 | using Result = Slam::Result; 26 | struct Task { 27 | struct Mapper { 28 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 29 | std::shared_ptr mapperInput; 30 | // using a pointer here is an optimization that primarily reduces 31 | // reallocation of PointCloud objects 32 | std::promise *result = nullptr; 33 | }; 34 | 35 | struct End { 36 | std::promise result; 37 | }; 38 | 39 | std::unique_ptr mapperInput; 40 | std::unique_ptr end; 41 | 42 | static std::unique_ptr wrap(std::unique_ptr mapperInput) { 43 | auto task = std::make_unique(); 44 | task->mapperInput = std::move(mapperInput); 45 | return task; 46 | } 47 | 48 | static std::unique_ptr wrap(std::unique_ptr mapperInput) { 49 | auto task = std::make_unique(); 50 | task->end = std::move(mapperInput); 51 | return task; 52 | } 53 | }; 54 | 55 | Worker(const odometry::Parameters ¶meters) : 56 | mapper(Mapper::create(parameters)), 57 | resultStorage(100), // MAX_QUEUED_RESULTS = 100 58 | shouldQuit(false), 59 | mutex(), 60 | // Make sure thread is initialized last, because it can immediately call work() before this constructor finishes 61 | thread(parameters.slam.slamThread ? 62 | std::make_unique(&Worker::work, this) : 63 | nullptr) 64 | {} 65 | 66 | ~Worker() { 67 | if (thread) { 68 | log_debug("signaling the SLAM thead to quit"); 69 | { 70 | std::unique_lock lock(mutex); 71 | shouldQuit = true; 72 | } 73 | 74 | log_debug("waiting for the SLAM thead to quit"); 75 | thread->join(); 76 | } 77 | log_debug("~Worker end"); 78 | } 79 | 80 | std::future enqueueMapperInput(std::unique_ptr task) { 81 | std::promise *result; 82 | auto wrapped = Task::wrap(std::move(task)); 83 | if (!thread) { 84 | result = &nextResult(); 85 | wrapped->mapperInput->result = result; 86 | process(*wrapped); 87 | } else { 88 | std::unique_lock lock(mutex); 89 | workQueue.push_back(std::move(wrapped)); 90 | result = &nextResult(); 91 | workQueue.back()->mapperInput->result = result; 92 | } 93 | return result->get_future(); 94 | } 95 | 96 | std::future end() { 97 | std::unique_ptr endTask( 98 | new Worker::Task::End { 99 | .result = std::promise(), 100 | }); 101 | std::promise &result = endTask->result; 102 | auto wrapped = Task::wrap(std::move(endTask)); 103 | if (!thread) { 104 | process(*wrapped); 105 | } else { 106 | std::unique_lock lock(mutex); 107 | workQueue.push_back(std::move(wrapped)); 108 | } 109 | return result.get_future(); 110 | } 111 | 112 | void connectDebugAPI(DebugAPI &debug) { 113 | // NOTE: not thread-safe, but called in practice before anything 114 | // is going on in the worker thread 115 | mapper->connectDebugAPI(debug); 116 | mapSavePath = debug.mapSavePath; 117 | } 118 | 119 | private: 120 | std::promise &nextResult() { 121 | auto &result = resultStorage.at(resultCounter); 122 | result = {}; 123 | resultCounter = (resultCounter + 1) % resultStorage.size(); 124 | return result; 125 | } 126 | 127 | std::unique_ptr mapper; 128 | 129 | // TODO: not sure if this is really necessary 130 | std::vector> resultStorage; 131 | int resultCounter = 0; 132 | 133 | bool shouldQuit; 134 | std::mutex mutex; 135 | std::deque> workQueue; 136 | std::unique_ptr thread; // Can call work() before constructor finishes, at the very least keep it last in constructor. 137 | 138 | std::string mapSavePath; 139 | 140 | void work() { 141 | log_debug("starting SLAM thread"); 142 | while (true) { 143 | { 144 | std::unique_lock lock(mutex); 145 | if (shouldQuit) return; 146 | } 147 | 148 | std::unique_ptr task; 149 | { 150 | std::unique_lock lock(mutex); 151 | if (!workQueue.empty()) { 152 | task = std::move(workQueue.front()); 153 | workQueue.pop_front(); 154 | } 155 | } 156 | 157 | if (task) { 158 | process(*task); 159 | } 160 | else { 161 | // TODO: bad, should properly wait for new elements 162 | std::this_thread::sleep_for(std::chrono::milliseconds(10)); 163 | } 164 | } 165 | } 166 | 167 | void process(Task &typedTask) { 168 | if (typedTask.mapperInput) { 169 | Result result; 170 | Eigen::Matrix4d resultPoseMat; 171 | auto &task = *typedTask.mapperInput; 172 | 173 | // if (parameters.slam.useFullPoseTrail) { 174 | // mapper->updatePoseTrail(*task.mapperInput); 175 | // } 176 | 177 | mapper->advance(task.mapperInput, resultPoseMat, result.pointCloud); 178 | 179 | result.poseMat = resultPoseMat; 180 | task.result->set_value(result); 181 | } 182 | if (typedTask.end) { 183 | const bool success = mapper->end(mapSavePath); 184 | typedTask.end->result.set_value(success); 185 | log_debug("end processed"); 186 | } 187 | } 188 | }; 189 | 190 | class SlamImplementation : public Slam { 191 | private: 192 | std::unique_ptr worker; 193 | 194 | public: 195 | SlamImplementation(const odometry::Parameters ¶meters) : 196 | worker(new Worker(parameters)) 197 | {} 198 | 199 | void connectDebugAPI(DebugAPI &debug) final { 200 | worker->connectDebugAPI(debug); 201 | } 202 | 203 | std::future addFrame( 204 | std::shared_ptr frame, 205 | const std::vector &poseTrail, 206 | const std::vector &features, 207 | const cv::Mat &colorFrame 208 | ) final { 209 | auto mapperInput = std::unique_ptr(new MapperInput); 210 | mapperInput->frame = frame; 211 | mapperInput->trackerFeatures = features; 212 | mapperInput->poseTrail = poseTrail; 213 | mapperInput->t = poseTrail[0].t; 214 | mapperInput->colorFrame = colorFrame; 215 | 216 | return worker->enqueueMapperInput(std::unique_ptr( 217 | new Worker::Task::Mapper { 218 | .mapperInput = std::move(mapperInput) 219 | } 220 | )); 221 | } 222 | 223 | std::future end() final { 224 | log_debug("SlamImplementation::end"); 225 | return worker->end(); 226 | } 227 | }; 228 | } 229 | 230 | std::unique_ptr Slam::build(const odometry::Parameters ¶meters) { 231 | return std::make_unique(parameters); 232 | } 233 | 234 | } // namespace slam 235 | -------------------------------------------------------------------------------- /slam_implementation.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_SLAM_IMPLEMENTATION_HPP 2 | #define SLAM_SLAM_IMPLEMENTATION_HPP 3 | 4 | #include 5 | #include "../api/slam.hpp" 6 | #include "../api/slam_map_point_record.hpp" 7 | 8 | #include "../odometry/parameters.hpp" 9 | #include "../commandline/command_queue.hpp" 10 | 11 | namespace slam { 12 | 13 | class ViewerDataPublisher; 14 | 15 | struct DebugAPI { 16 | ViewerDataPublisher *dataPublisher = nullptr; 17 | CommandQueue *commandQueue = nullptr; 18 | std::string mapSavePath; 19 | std::function&)> endDebugCallback = nullptr; 20 | }; 21 | 22 | } // namespace slam 23 | 24 | #endif //SLAM_SLAM_IMPLEMENTATION_HPP 25 | -------------------------------------------------------------------------------- /slam_viewer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_VIEWER_HPP 2 | #define SLAM_VIEWER_HPP 3 | 4 | #include "opencv_viewer_data_publisher.hpp" 5 | #include "../commandline/draw_gl.hpp" 6 | #include "slam_implementation.hpp" 7 | #include "viewer_data_publisher.hpp" 8 | #include "../codegen/output/cmd_parameters.hpp" 9 | #include "../commandline/command_queue.hpp" 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | 17 | namespace slam { 18 | 19 | namespace viewer { 20 | 21 | struct AtlasControl { 22 | std::unique_ptr> angle; 23 | std::unique_ptr> x; 24 | std::unique_ptr> y; 25 | }; 26 | 27 | class Viewer { 28 | public: 29 | Viewer(const cmd::Parameters ¶meters, CommandQueue &commands); 30 | 31 | void setup(); 32 | void draw(); 33 | 34 | /** 35 | * Request to terminate the viewer 36 | * (NOTE: this function does not wait for terminate) 37 | */ 38 | void request_terminate(); 39 | 40 | ViewerDataPublisher &get_data_publisher() { return dataPublisher; } 41 | 42 | bool is_paused() const { return menu_paused_atomic.load(); } 43 | 44 | private: 45 | 46 | void create_menu_panel(); 47 | 48 | /** 49 | * Update SLAM atlas if it was changed. 50 | */ 51 | void getAtlas(); 52 | 53 | /** 54 | * Follow to the specified camera pose 55 | * @param gl_cam_pose_wc 56 | */ 57 | void follow_camera(const pangolin::OpenGlMatrix &gl_cam_pose_wc); 58 | 59 | void drawKeyframes(); 60 | 61 | void drawLoops(); 62 | 63 | void drawMapPoints(); 64 | 65 | OpenCVViewerDataPublisher dataPublisher; 66 | 67 | /** 68 | * Drawing displacement for atlas maps. 69 | */ 70 | Eigen::Vector2f atlasOffset(size_t mapInd) const; 71 | 72 | cmd::ParametersViewer parameters; 73 | CommandQueue &commands; 74 | 75 | const float viewpoint_x_, viewpoint_y_, viewpoint_z_, viewpoint_f_; 76 | 77 | const float keyfrm_line_width_; 78 | const float graph_line_width_; 79 | const float point_size_; 80 | const float camera_size_; 81 | draw::Theme theme; 82 | int theme_ind; 83 | 84 | pangolin::View d_cam; 85 | pangolin::OpenGlMatrix gl_cam_pose_wc; 86 | 87 | // menu panel 88 | std::unique_ptr> menu_paused_; 89 | std::unique_ptr> menu_follow_camera_; 90 | std::unique_ptr> menu_grid_; 91 | std::unique_ptr> menu_show_keyfrms_; 92 | std::unique_ptr> menu_show_orig_poses_; 93 | std::unique_ptr> menu_show_mps_; 94 | std::unique_ptr> menu_show_stereo_pc_; 95 | std::unique_ptr> menu_show_local_map_; 96 | std::unique_ptr> menu_show_graph_; 97 | std::unique_ptr> menu_show_loops_; 98 | std::unique_ptr> menu_show_loop_cands_; 99 | std::unique_ptr> menu_normal_colors_; 100 | std::unique_ptr> menu_natural_colors_; 101 | std::unique_ptr> menu_mp_size_; 102 | std::unique_ptr> menu_mp_alpha_; 103 | std::unique_ptr> menu_map_scale_; 104 | std::unique_ptr> menu_change_theme_; 105 | 106 | std::atomic_bool menu_paused_atomic; 107 | 108 | // camera renderer 109 | std::unique_ptr s_cam_; 110 | 111 | // current state 112 | bool follow_camera_ = true; 113 | 114 | // viewer appearance 115 | const std::string map_viewer_name_{"Map Viewer"}; 116 | static constexpr float map_viewer_width_ = 1024; 117 | static constexpr float map_viewer_height_ = 768; 118 | 119 | // Data state which is not updated on every draw call. 120 | std::unique_ptr atlas; 121 | std::vector atlasControls; 122 | double atlasOffsetX; 123 | double atlasOffsetY; 124 | 125 | draw::Animation animation; 126 | std::set> animatedLoopStages; 127 | 128 | //----------------------------------------- 129 | // management for terminate process 130 | 131 | //! mutex for access to terminate procedure 132 | mutable std::mutex mtx_terminate_; 133 | 134 | /** 135 | * Check if termination is requested or not 136 | * @return 137 | */ 138 | bool terminate_is_requested(); 139 | 140 | //! flag which indicates termination is requested or not 141 | bool terminate_is_requested_ = false; 142 | }; 143 | 144 | } // namespace viewer 145 | 146 | } // namespace slam 147 | 148 | 149 | #endif //SLAM_VIEWER_HPP 150 | -------------------------------------------------------------------------------- /static_settings.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "static_settings.hpp" 4 | #include "../odometry/parameters.hpp" 5 | 6 | namespace slam { 7 | namespace { 8 | // Helpers from OpenVSLAM 9 | std::vector calc_scale_factors(const unsigned int num_scale_levels, const float scale_factor) { 10 | std::vector scale_factors(num_scale_levels, 1.0); 11 | for (unsigned int level = 1; level < num_scale_levels; ++level) { 12 | scale_factors.at(level) = scale_factor * scale_factors.at(level - 1); 13 | } 14 | return scale_factors; 15 | } 16 | std::vector calc_level_sigma_sq(const unsigned int num_scale_levels, const float scale_factor) { 17 | float scale_factor_at_level = 1.0; 18 | std::vector level_sigma_sq(num_scale_levels, 1.0); 19 | for (unsigned int level = 1; level < num_scale_levels; ++level) { 20 | scale_factor_at_level = scale_factor * scale_factor_at_level; 21 | level_sigma_sq.at(level) = scale_factor_at_level * scale_factor_at_level; 22 | } 23 | return level_sigma_sq; 24 | } 25 | } 26 | 27 | StaticSettings::StaticSettings(const odometry::Parameters &p) : 28 | parameters(p), 29 | scaleFactors( 30 | calc_scale_factors( 31 | parameters.slam.orbScaleLevels, 32 | parameters.slam.orbScaleFactor)), 33 | levelSigmaSq( 34 | calc_level_sigma_sq( 35 | parameters.slam.orbScaleLevels, 36 | parameters.slam.orbScaleFactor)) 37 | {} 38 | 39 | std::vector StaticSettings::maxNumberOfKeypointsPerLevel() const { 40 | const auto ¶meters = this->parameters.slam; 41 | std::vector num_keypts_per_level; 42 | 43 | // ---- copied from orb_extractor.cc in OpenVSLAM with minor modifications 44 | num_keypts_per_level.resize(parameters.orbScaleLevels); 45 | 46 | // compute the desired number of keypoints per scale 47 | double desired_num_keypts_per_scale 48 | = parameters.maxKeypoints * (1.0 - 1.0 / parameters.orbScaleFactor) 49 | / (1.0 - std::pow(1.0 / parameters.orbScaleFactor, static_cast(parameters.orbScaleLevels))); 50 | unsigned int total_num_keypts = 0; 51 | for (unsigned int level = 0; level < parameters.orbScaleLevels - 1; ++level) { 52 | num_keypts_per_level.at(level) = std::round(desired_num_keypts_per_scale); 53 | total_num_keypts += num_keypts_per_level.at(level); 54 | desired_num_keypts_per_scale *= 1.0 / parameters.orbScaleFactor; 55 | } 56 | num_keypts_per_level.at(parameters.orbScaleLevels - 1) = std::max(static_cast(parameters.maxKeypoints) - static_cast(total_num_keypts), 0); 57 | // ---- end OpenVSLAM code 58 | 59 | return num_keypts_per_level; 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /static_settings.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_STATIC_SETTINGS_HPP 2 | #define SLAM_STATIC_SETTINGS_HPP 3 | 4 | #include 5 | 6 | namespace odometry { struct Parameters; } 7 | namespace slam { 8 | struct StaticSettings { 9 | const odometry::Parameters ¶meters; 10 | std::vector scaleFactors; 11 | std::vector levelSigmaSq; 12 | StaticSettings(const odometry::Parameters &p); 13 | 14 | static constexpr unsigned ORB_PATCH_RADIUS = 19; 15 | static constexpr unsigned ORB_FAST_PATCH_SIZE = 31; 16 | static constexpr unsigned ORB_FAST_PATCH_HALF_SIZE = ORB_FAST_PATCH_SIZE / 2; 17 | 18 | 19 | 20 | std::vector maxNumberOfKeypointsPerLevel() const; 21 | 22 | }; 23 | } 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /viewer_data_publisher.cpp: -------------------------------------------------------------------------------- 1 | #include "viewer_data_publisher.hpp" 2 | 3 | #include "keyframe.hpp" 4 | #include "../util/logging.hpp" 5 | 6 | #include 7 | 8 | namespace slam { 9 | 10 | ViewerDataPublisher::ViewerDataPublisher(const cmd::ParametersSlam ¶meters) : 11 | parameters(parameters) 12 | {} 13 | 14 | std::vector ViewerDataPublisher::getLoopClosures() { 15 | std::lock_guard l(m); 16 | return loopClosures; 17 | } 18 | 19 | void ViewerDataPublisher::addLoopClosure(const ViewerLoopClosure &lc) { 20 | std::lock_guard l(m); 21 | loopClosures.push_back(lc); 22 | } 23 | 24 | void ViewerDataPublisher::setMap( 25 | const ViewerDataPublisher::MapPointVector &mapPoints, 26 | const ViewerDataPublisher::KeyframeVector &keyframes, 27 | const std::map &loopStages, 28 | double age 29 | ) { 30 | std::lock_guard l(m); 31 | this->mapPoints = mapPoints; 32 | this->keyframes = keyframes; 33 | this->loopStages = loopStages; 34 | this->age = age; 35 | } 36 | 37 | ViewerDataPublisher::MapPointVector ViewerDataPublisher::getMapPoints() { 38 | std::lock_guard l(m); 39 | return mapPoints; 40 | } 41 | 42 | void ViewerDataPublisher::getKeyframes( 43 | KeyframeVector &keyframes, 44 | std::map &loopStages, 45 | double &age 46 | ) { 47 | std::lock_guard l(m); 48 | keyframes = this->keyframes; 49 | loopStages = this->loopStages; 50 | age = this->age; 51 | } 52 | 53 | void ViewerDataPublisher::setAtlas(std::unique_ptr atlas) { 54 | std::lock_guard l(m); 55 | this->atlas = std::move(atlas); 56 | } 57 | 58 | std::unique_ptr ViewerDataPublisher::takeAtlas() { 59 | std::lock_guard l(m); 60 | if (atlas) { 61 | return std::move(atlas); 62 | } 63 | return {}; 64 | } 65 | 66 | const cmd::ParametersSlam &ViewerDataPublisher::getParameters() const { 67 | return parameters; 68 | } 69 | 70 | } // namespace slam 71 | -------------------------------------------------------------------------------- /viewer_data_publisher.hpp: -------------------------------------------------------------------------------- 1 | #ifndef SLAM_DATA_PUBLISHER_HPP 2 | #define SLAM_DATA_PUBLISHER_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include "../codegen/output/cmd_parameters.hpp" 16 | #include "id.hpp" 17 | #include "bow_index.hpp" 18 | #include "loop_closer.hpp" 19 | 20 | namespace slam { 21 | 22 | class MapDB; 23 | class Keyframe; 24 | 25 | enum class MatchType { 26 | MAPPER, LOOP 27 | }; 28 | 29 | struct ViewerLoopClosure { 30 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 31 | 32 | // Camera-to-world. 33 | Eigen::Matrix4f currentPose; 34 | Eigen::Matrix4f candidatePose; 35 | Eigen::Matrix4f updatedPose; 36 | }; 37 | 38 | struct ViewerMapPoint { 39 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 40 | 41 | Eigen::Vector3f position; 42 | Eigen::Vector3f normal; 43 | Eigen::Vector3f color; 44 | int status; 45 | bool localMap; 46 | bool nowVisible; 47 | }; 48 | 49 | struct ViewerKeyframe { 50 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 51 | 52 | KfId id; 53 | bool localMap; 54 | bool current; 55 | Eigen::Matrix4f poseWC; 56 | Eigen::Matrix4f origPoseWC; 57 | std::vector neighbors; 58 | std::shared_ptr>> stereoPointCloud; 59 | std::shared_ptr> stereoPointCloudColor; 60 | }; 61 | 62 | struct ViewerAtlasMapPoint { 63 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 64 | 65 | Eigen::Vector3f position; 66 | }; 67 | 68 | struct ViewerAtlasKeyframe { 69 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 70 | 71 | KfId id; 72 | Eigen::Matrix4f poseWC; 73 | }; 74 | 75 | using ViewerAtlasMapPointVector = std::vector>; 76 | using ViewerAtlasKeyframeVector = std::vector>; 77 | struct ViewerAtlasMap { 78 | // EIGEN_MAKE_ALIGNED_OPERATOR_NEW 79 | 80 | ViewerAtlasMapPointVector mapPoints; 81 | ViewerAtlasKeyframeVector keyframes; 82 | }; 83 | 84 | using ViewerAtlas = std::vector; 85 | 86 | struct SearchedMapPoint { 87 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW 88 | 89 | Eigen::Vector2d mp; 90 | Eigen::Vector2d kp; 91 | }; 92 | 93 | class ViewerDataPublisher { 94 | public: 95 | using MapPointVector = std::vector>; 96 | using KeyframeVector = std::vector>; 97 | using SearchedMapPointVector = std::vector>; 98 | using Vector2dVector = std::vector>; 99 | 100 | ViewerDataPublisher(const cmd::ParametersSlam ¶meters); 101 | 102 | std::vector getLoopClosures(); 103 | void addLoopClosure(const ViewerLoopClosure &lc); 104 | 105 | void setMap( 106 | const MapPointVector &mps, 107 | const KeyframeVector &kfs, 108 | const std::map &loopStages, 109 | double age 110 | ); 111 | 112 | MapPointVector getMapPoints(); 113 | void getKeyframes( 114 | KeyframeVector &keyframes, 115 | std::map &loopStages, 116 | double &age 117 | ); 118 | 119 | void setAtlas(std::unique_ptr atlas); 120 | std::unique_ptr takeAtlas(); 121 | 122 | const cmd::ParametersSlam &getParameters() const; 123 | 124 | // TODO: not the ideal design making this abstract, but the quickest fix 125 | // for separating OpenCV GUI code from cross-platform production code 126 | virtual void visualizeKeyframe( 127 | const MapDB &mapDB, 128 | const cv::Mat &grayFrame, 129 | const Keyframe &kf 130 | ) = 0; 131 | virtual void visualizeOrbs( 132 | const cv::Mat &frame, 133 | const Keyframe &kf 134 | ) = 0; 135 | virtual void visualizeMapPointSearch( 136 | const cv::Mat &frame, 137 | const SearchedMapPointVector &searched, 138 | const Vector2dVector &mps, 139 | const Vector2dVector &kps 140 | ) = 0; 141 | virtual void showMatches( 142 | const Keyframe &kf1, 143 | const Keyframe &kf2, 144 | const std::vector> &matches, 145 | MatchType matchType 146 | ) = 0; 147 | virtual void visualizeOther(const cv::Mat &mat) = 0; 148 | virtual std::map pollVisualizations() = 0; 149 | virtual void setFrameRotation(int rotation) = 0; 150 | 151 | protected: 152 | std::mutex m; 153 | std::vector loopClosures; 154 | MapPointVector mapPoints; 155 | KeyframeVector keyframes; 156 | std::map loopStages; 157 | double age = 0.0; 158 | std::unique_ptr atlas; 159 | cmd::ParametersSlam parameters; 160 | }; 161 | 162 | } // namespace slam 163 | 164 | #endif // SLAM_DATA_PUBLISHER_HPP 165 | --------------------------------------------------------------------------------