├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENCE ├── README.md ├── cfgs ├── eval │ ├── default.yaml │ ├── hypersim.yaml │ └── tnt.yaml ├── fitnmerge │ ├── default.yaml │ ├── eth3d.yaml │ ├── hypersim.yaml │ └── scannet.yaml ├── global_pl_association │ └── default.yaml ├── localization │ ├── 7scenes.yaml │ ├── cambridge.yaml │ ├── default.yaml │ └── inloc.yaml ├── refinement │ └── default.yaml └── triangulation │ ├── aachen.yaml │ ├── default.yaml │ ├── default_fast.yaml │ ├── dubrovnik6k.yaml │ ├── eth3d.yaml │ ├── hypersim.yaml │ ├── rome16k.yaml │ ├── scannet.yaml │ └── tnt.yaml ├── cmake └── CMakeHelper.cmake ├── docker ├── Dockerfile └── README.md ├── limap ├── CMakeLists.txt ├── __init__.py ├── _limap │ ├── CMakeLists.txt │ ├── bindings.cc │ └── helpers.h ├── base │ ├── CMakeLists.txt │ ├── __init__.py │ ├── align.py │ ├── bindings.cc │ ├── camera.cc │ ├── camera.h │ ├── camera_models.h │ ├── camera_view.cc │ ├── camera_view.h │ ├── depth_reader_base.py │ ├── functions.py │ ├── graph.cc │ ├── graph.h │ ├── image_collection.cc │ ├── image_collection.h │ ├── infinite_line.cc │ ├── infinite_line.h │ ├── line_dists.cc │ ├── line_dists.h │ ├── line_linker.cc │ ├── line_linker.h │ ├── linebase.cc │ ├── linebase.h │ ├── linetrack.cc │ ├── linetrack.h │ ├── p3d_reader_base.py │ ├── pointtrack.cc │ ├── pointtrack.h │ ├── transforms.cc │ ├── transforms.h │ └── unit_test.py ├── ceresbase │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── ceres_extensions.h │ ├── interpolation.h │ ├── line_dists.h │ ├── line_projection.h │ ├── line_transforms.h │ ├── loss_functions.h │ └── point_projection.h ├── estimators │ ├── CMakeLists.txt │ ├── __init__.py │ ├── absolute_pose │ │ ├── CMakeLists.txt │ │ ├── __init__.py │ │ ├── bindings.cc │ │ ├── hybrid_pose_estimator.cc │ │ ├── hybrid_pose_estimator.h │ │ ├── joint_pose_estimator.cc │ │ ├── joint_pose_estimator.h │ │ ├── pl_absolute_pose_hybrid_ransac.h │ │ ├── pl_absolute_pose_ransac.h │ │ └── pl_estimate_absolute_pose.py │ ├── bindings.cc │ └── extended_hybrid_ransac.h ├── evaluation │ ├── CMakeLists.txt │ ├── __init__.py │ ├── base_evaluator.cc │ ├── base_evaluator.h │ ├── bindings.cc │ ├── mesh_evaluator.cc │ ├── mesh_evaluator.h │ ├── point_cloud_evaluator.cc │ ├── point_cloud_evaluator.h │ ├── refline_evaluator.cc │ └── refline_evaluator.h ├── features │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── dense_sift.h │ ├── extract_line_patches.py │ ├── extractors.py │ ├── featuremap.cc │ ├── featuremap.h │ ├── featurepatch.cc │ ├── featurepatch.h │ ├── line_patch_extractor.cc │ ├── line_patch_extractor.h │ └── models │ │ ├── __init__.py │ │ ├── base_model.py │ │ ├── s2dnet.py │ │ └── vggnet.py ├── fitting │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── fitting.py │ ├── line3d_estimator.cc │ └── line3d_estimator.h ├── line2d │ ├── DeepLSD │ │ ├── __init__.py │ │ └── deeplsd.py │ ├── GlueStick │ │ ├── __init__.py │ │ ├── extractor.py │ │ └── matcher.py │ ├── HAWPv3 │ │ ├── __init__.py │ │ ├── hawp.py │ │ └── hawpv3.yaml │ ├── L2D2 │ │ ├── RAL_net_cov.py │ │ ├── __init__.py │ │ ├── extractor.py │ │ └── matcher.py │ ├── LBD │ │ ├── __init__.py │ │ ├── extractor.py │ │ └── matcher.py │ ├── LSD │ │ ├── __init__.py │ │ └── lsd.py │ ├── LineTR │ │ ├── __init__.py │ │ ├── extractor.py │ │ ├── line_attention.py │ │ ├── line_process.py │ │ ├── line_transformer.py │ │ ├── linetr_pipeline.py │ │ ├── matcher.py │ │ └── nn_matcher.py │ ├── SOLD2 │ │ ├── __init__.py │ │ ├── config │ │ │ └── export_line_features.yaml │ │ ├── experiment.py │ │ ├── misc │ │ │ ├── __init__.py │ │ │ ├── geometry_utils.py │ │ │ ├── train_utils.py │ │ │ └── visualize_util.py │ │ ├── model │ │ │ ├── __init__.py │ │ │ ├── line_detection.py │ │ │ ├── line_detector.py │ │ │ ├── line_matcher.py │ │ │ ├── line_matching.py │ │ │ ├── loss.py │ │ │ ├── lr_scheduler.py │ │ │ ├── metrics.py │ │ │ ├── model_util.py │ │ │ └── nets │ │ │ │ ├── __init__.py │ │ │ │ ├── backbone.py │ │ │ │ ├── descriptor_decoder.py │ │ │ │ ├── heatmap_decoder.py │ │ │ │ ├── junction_decoder.py │ │ │ │ └── lcnn_hourglass.py │ │ ├── sold2.py │ │ ├── sold2_wrapper.py │ │ └── train.py │ ├── TP_LSD │ │ ├── __init__.py │ │ └── tp_lsd.py │ ├── __init__.py │ ├── base_detector.py │ ├── base_matcher.py │ ├── endpoints │ │ ├── __init__.py │ │ ├── extractor.py │ │ └── matcher.py │ ├── line_utils │ │ ├── __init__.py │ │ └── merge_lines.py │ ├── register_detector.py │ └── register_matcher.py ├── merging │ ├── CMakeLists.txt │ ├── __init__.py │ ├── aggregator.cc │ ├── aggregator.h │ ├── bindings.cc │ ├── merging.cc │ ├── merging.h │ ├── merging.py │ ├── merging_utils.cc │ └── merging_utils.h ├── optimize │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── extract_heatmaps_sold2.py │ ├── extract_track_patches_s2dnet.py │ ├── functions.py │ ├── global_pl_association │ │ ├── CMakeLists.txt │ │ ├── __init__.py │ │ ├── bindings.cc │ │ ├── cost_functions.h │ │ ├── global_associator.cc │ │ ├── global_associator.h │ │ └── solve.py │ ├── hybrid_bundle_adjustment │ │ ├── CMakeLists.txt │ │ ├── __init__.py │ │ ├── bindings.cc │ │ ├── cost_functions.h │ │ ├── hybrid_bundle_adjustment.cc │ │ ├── hybrid_bundle_adjustment.h │ │ ├── hybrid_bundle_adjustment_config.h │ │ └── solve.py │ ├── line_localization │ │ ├── CMakeLists.txt │ │ ├── __init__.py │ │ ├── bindings.cc │ │ ├── cost_functions.h │ │ ├── functions.py │ │ ├── lineloc.cc │ │ ├── lineloc.h │ │ ├── lineloc_config.h │ │ └── solve.py │ └── line_refinement │ │ ├── CMakeLists.txt │ │ ├── __init__.py │ │ ├── bindings.cc │ │ ├── cost_functions.h │ │ ├── line_refinement.py │ │ ├── pixel_cost_functions.h │ │ ├── refine.cc │ │ ├── refine.h │ │ ├── refinement_config.h │ │ └── solve.py ├── point2d │ ├── __init__.py │ ├── superglue │ │ ├── __init__.py │ │ └── superglue.py │ └── superpoint │ │ ├── __init__.py │ │ ├── main.py │ │ └── superpoint.py ├── pointsfm │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── bundler_reader.py │ ├── colmap_reader.py │ ├── colmap_sfm.py │ ├── database.py │ ├── functions.py │ ├── model_converter.py │ ├── read_write_model.py │ ├── sfm_model.cc │ ├── sfm_model.h │ └── visualsfm_reader.py ├── runners │ ├── __init__.py │ ├── functions.py │ ├── functions_structures.py │ ├── line_fitnmerge.py │ ├── line_localization.py │ └── line_triangulation.py ├── solvers │ ├── CMakeLists.txt │ └── triangulation │ │ ├── CMakeLists.txt │ │ ├── triangulate_line_with_one_point.cc │ │ └── triangulate_line_with_one_point.h ├── structures │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── pl_bipartite.cc │ ├── pl_bipartite.h │ ├── pl_bipartite_base.cc │ ├── pl_bipartite_base.h │ ├── vpline_bipartite.cc │ └── vpline_bipartite.h ├── triangulation │ ├── CMakeLists.txt │ ├── __init__.py │ ├── base_line_triangulator.cc │ ├── base_line_triangulator.h │ ├── bindings.cc │ ├── functions.cc │ ├── functions.h │ ├── global_line_triangulator.cc │ ├── global_line_triangulator.h │ └── triangulation.py ├── undistortion │ ├── CMakeLists.txt │ ├── __init__.py │ ├── bindings.cc │ ├── undistort.cc │ ├── undistort.h │ └── undistort.py ├── util │ ├── CMakeLists.txt │ ├── __init__.py │ ├── config.py │ ├── evaluation.py │ ├── geometry.py │ ├── io.py │ ├── kd_tree.cpp │ ├── kd_tree.h │ ├── log_exceptions.h │ ├── nanoflann.hpp │ ├── simple_logger.cc │ ├── simple_logger.h │ └── types.h ├── visualize │ ├── __init__.py │ ├── trackvis │ │ ├── __init__.py │ │ ├── base.py │ │ ├── open3d.py │ │ ├── pyvista.py │ │ └── rerun.py │ ├── vis_bipartite.py │ ├── vis_lines.py │ ├── vis_matches.py │ └── vis_utils.py └── vplib │ ├── CMakeLists.txt │ ├── JLinkage │ ├── CMakeLists.txt │ ├── JLinkage.cc │ ├── JLinkage.h │ ├── JLinkage.py │ ├── __init__.py │ └── bindings.cc │ ├── __init__.py │ ├── base_vp_detector.cc │ ├── base_vp_detector.h │ ├── base_vp_detector.py │ ├── bindings.cc │ ├── global_vptrack_constructor.cc │ ├── global_vptrack_constructor.h │ ├── progressivex │ ├── __init__.py │ └── progressivex.py │ ├── register_vp_detector.py │ ├── vpbase.h │ ├── vptrack.cc │ └── vptrack.h ├── misc ├── install │ ├── lbd.md │ ├── poselib.md │ └── tp_lsd.md └── media │ ├── barn_lsd.gif │ └── supp_qualitative_5x3.png ├── requirements.txt ├── runners ├── 7scenes │ ├── localization.py │ └── utils.py ├── __init__.py ├── bundler_triangulation.py ├── cambridge │ ├── localization.py │ └── utils.py ├── colmap_triangulation.py ├── eth3d │ ├── ETH3D.py │ ├── fitnmerge.py │ ├── loader.py │ └── triangulation.py ├── hypersim │ ├── Hypersim.py │ ├── fitnmerge.py │ ├── loader.py │ ├── refine_sfm.py │ └── triangulation.py ├── inloc │ ├── localization.py │ └── utils.py ├── pointline_association.py ├── refinement.py ├── rome16k │ ├── Rome16K.py │ ├── statistics.py │ └── triangulation.py ├── scannet │ ├── ScanNet.py │ ├── fitnmerge.py │ ├── loader.py │ └── triangulation.py ├── tests │ ├── localization.py │ ├── localization_test_data_stairs_1.npy │ └── localization_test_data_stairs_2.npy └── visualsfm_triangulation.py ├── scripts ├── aachen_undistort.py ├── convert_model.py ├── eval_hypersim.py ├── eval_tnt.py ├── quickstart.sh ├── tnt_align.py └── tnt_colmap_runner.py ├── setup.py ├── third-party ├── CMakeLists.txt ├── half.h └── progressbar.hpp └── visualize_3d_lines.py /.gitignore: -------------------------------------------------------------------------------- 1 | /build/ 2 | /dist/ 3 | cmake-build-* 4 | *.egg-info/ 5 | 6 | *.npy 7 | *.png 8 | *.pyc 9 | *.tar 10 | *.obj 11 | *.pth 12 | *.gif 13 | *.mp4 14 | *.h5 15 | *.swp 16 | *.zip 17 | *.tar.gz 18 | *.th 19 | *.so 20 | 21 | experiments 22 | data 23 | results 24 | outputs 25 | tmp 26 | 27 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "third-party/pybind11"] 2 | path = third-party/pybind11 3 | url = git@github.com:pybind/pybind11.git 4 | [submodule "third-party/Hierarchical-Localization"] 5 | path = third-party/Hierarchical-Localization 6 | url = git@github.com:cvg/Hierarchical-Localization.git 7 | [submodule "third-party/JLinkage"] 8 | path = third-party/JLinkage 9 | url = git@github.com:B1ueber2y/JLinkage.git 10 | [submodule "third-party/libigl"] 11 | path = third-party/libigl 12 | url = git@github.com:B1ueber2y/libigl.git 13 | [submodule "third-party/RansacLib"] 14 | path = third-party/RansacLib 15 | url = git@github.com:B1ueber2y/RansacLib.git 16 | branch = header-only 17 | [submodule "third-party/HighFive"] 18 | path = third-party/HighFive 19 | url = git@github.com:B1ueber2y/HighFive.git 20 | [submodule "third-party/pytlsd"] 21 | path = third-party/pytlsd 22 | url = git@github.com:iago-suarez/pytlsd.git 23 | [submodule "third-party/pytlbd"] 24 | path = third-party/pytlbd 25 | url = git@github.com:iago-suarez/pytlbd.git 26 | [submodule "third-party/hawp"] 27 | path = third-party/hawp 28 | url = git@github.com:cherubicXN/hawp.git 29 | ignore = dirty 30 | [submodule "third-party/TP-LSD"] 31 | path = third-party/TP-LSD 32 | url = git@github.com:rpautrat/TP-LSD.git 33 | [submodule "third-party/DeepLSD"] 34 | path = third-party/DeepLSD 35 | url = git@github.com:cvg/DeepLSD.git 36 | [submodule "third-party/GlueStick"] 37 | path = third-party/GlueStick 38 | url = git@github.com:cvg/GlueStick.git 39 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, Shaohui Liu 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /cfgs/eval/default.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | cfg_type: "evaluation" 5 | n_visible_views: 4 # for loading npy file and folder to linetracks 6 | visualize: False 7 | 8 | -------------------------------------------------------------------------------- /cfgs/eval/hypersim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "data" 5 | scene_id: "ai_001_001" 6 | cam_id: 0 7 | max_image_dim: 800 8 | input_n_views: 100 9 | input_stride: 1 10 | 11 | -------------------------------------------------------------------------------- /cfgs/eval/tnt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | n_neighbors: 20 5 | max_image_dim: -1 6 | line2d: 7 | detector: 8 | method: "lsd" 9 | triangulation: 10 | add_halfpix: true 11 | 12 | -------------------------------------------------------------------------------- /cfgs/fitnmerge/default.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | cfg_type: "fitnmerge" 5 | weight_path: "~/.limap/models" 6 | load_meta: False 7 | load_det: False 8 | load_fit: False 9 | use_tmp: False 10 | n_visible_views: 4 11 | n_neighbors: 100 12 | use_cuda: True 13 | visualize: True 14 | skip_exists: False 15 | output_dir: null 16 | output_folder: "fitnmerge_finaltracks" 17 | load_dir: null 18 | 19 | ############################## 20 | # sfm config 21 | sfm: 22 | colmap_output_path: "colmap_outputs" 23 | reuse: False 24 | min_triangulation_angle: 1.0 25 | neighbor_type: "dice" # ["overlap", "iou", "dice"] 26 | ranges: 27 | range_robust: [0.05, 0.95] 28 | k_stretch: 1.25 29 | hloc: 30 | descriptor: "superpoint_aachen" 31 | matcher: "NN-superpoint" 32 | 33 | ############################## 34 | # line detection config 35 | line2d: 36 | max_num_2d_segs: 3000 37 | do_merge_lines: False 38 | detector: 39 | method: "deeplsd" # ["lsd", "sold2", "hawpv3", "tp_lsd", "deeplsd"] 40 | skip_exists: False 41 | visualize: False 42 | save_l3dpp: False 43 | compute_descinfo: False 44 | var2d: # in pixels 45 | sold2: 5.0 46 | lsd: 2.0 47 | hawpv3: 5.0 48 | tp_lsd: 5.0 49 | 50 | ############################## 51 | # fitting config 52 | fitting: 53 | var2d: -1.0 # depends on the detector 54 | ransac_th: 0.75 55 | min_percentage_inliers: 0.9 56 | n_jobs: 4 57 | 58 | ############################## 59 | # merging config 60 | merging: 61 | var2d: -1.0 # depends on the detector 62 | linker3d: 63 | score_th: 0.5 64 | th_angle: 8.0 65 | th_overlap: 0.01 66 | th_smartoverlap: 0.1 67 | th_smartangle: 1.0 68 | th_perp: 0.75 69 | th_innerseg: 0.75 70 | linker2d: 71 | score_th: 0.5 72 | th_angle: 5.0 73 | th_perp: 2.0 # in pixels 74 | th_overlap: 0.05 75 | remerging: 76 | disable: False 77 | linker3d: 78 | score_th: 0.5 79 | th_angle: 5.0 80 | th_overlap: 0.001 81 | th_smartoverlap: 0.1 82 | th_smartangle: 1.0 83 | th_perp: 0.5 84 | th_innerseg: 0.5 85 | filtering2d: 86 | th_angular_2d: 8.0 87 | th_perp_2d: 5.0 # in pixels 88 | 89 | ############################## 90 | # geometric refinement config 91 | refinement: 92 | disable: True 93 | constant_intrinsics: True 94 | constant_pose: True 95 | constant_line: False 96 | min_num_images: 4 97 | use_geometric: True 98 | use_heatmap: False 99 | use_feature: False 100 | 101 | -------------------------------------------------------------------------------- /cfgs/fitnmerge/eth3d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "/local/home/shaoliu/data/ETH3D" 5 | reso_type: "dslr" # ["dslr", "lowres"] 6 | scene_id: "train/delivery_area" 7 | cam_id: 0 8 | max_image_dim: 756 9 | 10 | -------------------------------------------------------------------------------- /cfgs/fitnmerge/hypersim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "data" 5 | scene_id: "ai_001_001" 6 | cam_id: 0 7 | max_image_dim: 800 8 | input_n_views: 100 9 | input_stride: 1 10 | 11 | -------------------------------------------------------------------------------- /cfgs/fitnmerge/scannet.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "/local/home/shaoliu/data/ScanNet" 5 | scene_id: "scene0678_01" 6 | max_image_dim: 800 7 | stride: 5 8 | 9 | -------------------------------------------------------------------------------- /cfgs/global_pl_association/default.yaml: -------------------------------------------------------------------------------- 1 | global_pl_association: 2 | use_vp: True 3 | print_summary: True 4 | constant_intrinsics: True 5 | constant_pose: True 6 | constant_point: False 7 | constant_line: False 8 | constant_vp: False 9 | 10 | # vpdetection 11 | vpdet: 12 | method: "jlinkage" 13 | n_jobs: 1 14 | min_length: 20 15 | 16 | # geometric loss 17 | lw_point: 0.1 18 | geometric_alpha: 10.0 19 | 20 | # junction reassociation 21 | th_count_lineline: 3 22 | th_angle_lineline: 30.0 23 | 24 | # point-line association 25 | lw_pointline_association: 10.0 # the global loss weight 26 | th_pixel_sigma: 2.0 # in pixels 27 | th_weight_pointline: 3.0 28 | 29 | # vp-line association 30 | lw_vpline_association: 1.0 31 | th_count_vpline: 3 32 | 33 | # vp orthogonality 34 | lw_vp_orthogonality: 1.0 35 | th_angle_orthogonality: 87.0 36 | 37 | # vp collinearity 38 | lw_vp_collinearity: 0.0 39 | th_angle_collinearity: 1.0 40 | 41 | # hard association for output 42 | th_hard_pl_dist3d: 2.0 43 | 44 | -------------------------------------------------------------------------------- /cfgs/localization/7scenes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | max_image_dim: -1 5 | visualize: False 6 | load_undistort: True 7 | n_jobs: 1 8 | 9 | localization: 10 | 2d_matcher: "sold2" 11 | epipolar_filter: False 12 | IoU_threshold: 0.2 13 | reprojection_filter: null 14 | ransac: 15 | method: "hybrid" 16 | thres: 5 17 | thres_point: 5 18 | thres_line: 5 19 | optimize: 20 | normalize_weight: False # For Stairs: Set this to True, and loss_func to TrivialLoss for best performance 21 | loss_func: "HuberLoss" 22 | loss_func_args: [1.0] 23 | line_cost_func: "PerpendicularDist" 24 | 25 | line2d: 26 | detector: 27 | skip_exists: True 28 | method: "lsd" 29 | extractor: 30 | skip_exists: True 31 | method: "sold2" 32 | matcher: 33 | skip_exists: True 34 | method: "sold2" 35 | 36 | sfm: 37 | hloc: 38 | descriptor: "superpoint_inloc" 39 | matcher: "superglue" 40 | -------------------------------------------------------------------------------- /cfgs/localization/cambridge.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | max_image_dim: 1024 5 | visualize: False 6 | load_undistort: True 7 | n_jobs: 1 # for undistortion 8 | 9 | localization: 10 | 2d_matcher: "sold2" 11 | epipolar_filter: False 12 | IoU_threshold: 0.8 13 | reprojection_filter: null 14 | ransac: 15 | method: "hybrid" 16 | thres: 6 17 | thres_point: 6 18 | thres_line: 6 19 | optimize: 20 | normalize_weight: False 21 | loss_func: HuberLoss 22 | loss_func_args: [1.2] 23 | line_cost_func: "PerpendicularDist" 24 | 25 | line2d: 26 | detector: 27 | skip_exists: True 28 | method: "lsd" 29 | extractor: 30 | skip_exists: True 31 | 32 | var2d: 33 | lsd: 1.6 34 | 35 | sfm: 36 | hloc: 37 | descriptor: "superpoint_aachen" 38 | matcher: "superglue" 39 | -------------------------------------------------------------------------------- /cfgs/localization/inloc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | max_image_dim: 1600 5 | visualize: False 6 | n_jobs: 1 7 | 8 | localization: 9 | 2d_matcher: "sold2" 10 | epipolar_filter: False 11 | IoU_threshold: 0.2 12 | reprojection_filter: null 13 | ransac: 14 | method: "hybrid" 15 | thres: 12 16 | thres_point: 12 17 | thres_line: 12 18 | line_cost_func: "PerpendicularDist" 19 | 20 | line2d: 21 | detector: 22 | skip_exists: True 23 | method: "lsd" 24 | extractor: 25 | method: "sold2" 26 | skip_exists: True 27 | matcher: 28 | method: "sold2" 29 | skip_exists: True 30 | superglue: 31 | weights: "outdoor" 32 | 33 | fitting: 34 | n_jobs: 4 35 | ransac_th: 0.6 36 | 37 | merging: 38 | do_merging: False -------------------------------------------------------------------------------- /cfgs/refinement/default.yaml: -------------------------------------------------------------------------------- 1 | refinement: 2 | dtype: "float16" 3 | min_num_images: 4 4 | print_summary: False 5 | constant_intrinsics: True 6 | constant_pose: True 7 | constant_line: False 8 | 9 | # geometric 10 | use_geometric: True 11 | 12 | # vp 13 | use_vp: False 14 | vp_multiplier: 0.1 15 | vpdet: 16 | method: "jlinkage" 17 | n_jobs: 8 18 | 19 | # heatmap 20 | use_heatmap: False 21 | sample_range_min: 0.05 22 | sample_range_max: 0.95 23 | n_samples_heatmap: 10 24 | heatmap_multiplier: 1.0 25 | 26 | # features 27 | use_feature: False 28 | channels: 128 29 | n_samples_feature: 100 30 | n_samples_feature_2d: 10 # for keyline adjustment 31 | use_ref_descriptor: False 32 | ref_multiplier: 5.0 33 | patch: 34 | k_stretch: 1.0 # by default we do not use k_stretch 35 | t_stretch: 10 # in pixels 36 | range_perp: 16 # in pixels 37 | fconsis_multiplier: 0.1 38 | 39 | -------------------------------------------------------------------------------- /cfgs/triangulation/aachen.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | n_neighbors: 20 5 | max_image_dim: -1 6 | 7 | -------------------------------------------------------------------------------- /cfgs/triangulation/default_fast.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################### 3 | # fast config 4 | base_config_file: "cfgs/triangulation/default.yaml" 5 | line2d: 6 | detector: 7 | method: "lsd" 8 | extractor: 9 | method: "superpoint_endpoints" 10 | matcher: 11 | method: "nn_endpoints" 12 | 13 | -------------------------------------------------------------------------------- /cfgs/triangulation/dubrovnik6k.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | n_neighbors: 20 5 | max_image_dim: 800 6 | triangulation: 7 | add_halfpix: True 8 | 9 | -------------------------------------------------------------------------------- /cfgs/triangulation/eth3d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "/local/home/shaoliu/data/ETH3D" 5 | reso_type: "lowres" # ["dslr", "lowres"] 6 | scene_id: "train/delivery_area" 7 | cam_id: 0 8 | max_image_dim: 756 9 | triangulation: 10 | add_halfpix: true 11 | -------------------------------------------------------------------------------- /cfgs/triangulation/hypersim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "data" 5 | scene_id: "ai_001_001" 6 | cam_id: 0 7 | max_image_dim: 800 8 | input_n_views: 100 9 | input_stride: 1 10 | 11 | -------------------------------------------------------------------------------- /cfgs/triangulation/rome16k.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | n_neighbors: 20 5 | max_image_dim: 800 6 | use_descriptor: False 7 | triangulation: 8 | add_halfpix: True 9 | 10 | -------------------------------------------------------------------------------- /cfgs/triangulation/scannet.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | data_dir: "/local/home/shaoliu/data/ScanNet" 5 | scene_id: "scene0678_01" 6 | max_image_dim: 800 7 | stride: 5 8 | 9 | -------------------------------------------------------------------------------- /cfgs/triangulation/tnt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################## 3 | # global config 4 | n_neighbors: 20 5 | max_image_dim: -1 6 | triangulation: 7 | add_halfpix: true 8 | 9 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # LIMAP Docker Image Build from Dockerfile 2 | This document explains how to build docker image for LIMAP. This document assumes that readers' systems meet following requirements: 3 | - x86-64 (amd64) architecture 4 | - GPU that supports CUDA 11.5 5 | - Ubuntu 6 | 7 | ### Dependencies 8 | - [Docker](https://docs.docker.com/engine/install/) 9 | - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#setting-up-nvidia-container-toolkit) 10 | 11 | ### Github ssh setting 12 | You need to set up a ssh key for Github account to clone the LIMAP when building the image. Follow [this instruction](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) to generate a key, and then [this instruction](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account) to register the key to your Github account. 13 | 14 | ### Building Docker Image 15 | Download the attached Dockerfile and run the below command at where the Dockerfile is. 16 | ```bash 17 | docker build --build-arg SSH_PRIVATE_KEY="$(cat ~/.ssh/id_ed25519)" -t="3dv:latest" . 18 | ``` 19 | 20 | Run the built Docker image with the following command. 21 | ```bash 22 | docker run \ 23 | --rm \ 24 | -it \ 25 | --gpus all \ 26 | --shm-size 50G \ 27 | --device=/dev/dri:/dev/dri \ 28 | -v /tmp/.X11-unix:/tmp/.X11-unix \ 29 | -e DISPLAY=$DISPLAY \ 30 | -e QT_X11_NO_MITSHM=1 \ 31 | 3dv:latest \ 32 | bash 33 | ``` 34 | 35 | In case you want to run a GUI application on the container, you should allow X server connection from the host side: 36 | ```bash 37 | xhost +local:* 38 | ``` 39 | -------------------------------------------------------------------------------- /limap/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(base) 2 | add_subdirectory(ceresbase) 3 | add_subdirectory(solvers) 4 | add_subdirectory(vplib) 5 | add_subdirectory(util) 6 | add_subdirectory(pointsfm) 7 | add_subdirectory(triangulation) 8 | add_subdirectory(merging) 9 | add_subdirectory(undistortion) 10 | add_subdirectory(evaluation) 11 | add_subdirectory(fitting) 12 | add_subdirectory(estimators) 13 | add_subdirectory(optimize) 14 | add_subdirectory(structures) 15 | if(INTERPOLATION_ENABLED) 16 | add_subdirectory(features) 17 | endif() 18 | 19 | add_subdirectory(_limap) 20 | 21 | LIMAP_ADD_STATIC_LIBRARY(limap ${LIMAP_SOURCES}) 22 | target_link_libraries(limap 23 | ${LIMAP_INTERNAL_LIBRARIES} ${LIMAP_EXTERNAL_LIBRARIES}) 24 | 25 | -------------------------------------------------------------------------------- /limap/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("build/limap/_limap") 3 | from _limap import * 4 | 5 | from . import base 6 | from . import point2d 7 | from . import line2d 8 | from . import vplib 9 | from . import pointsfm 10 | from . import undistortion 11 | 12 | from . import triangulation 13 | from . import merging 14 | from . import evaluation 15 | from . import fitting 16 | from . import util 17 | from . import visualize 18 | from . import structures 19 | 20 | from . import features 21 | from . import optimize 22 | 23 | from . import runners 24 | 25 | -------------------------------------------------------------------------------- /limap/_limap/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "pylimap") 2 | 3 | LIMAP_ADD_SOURCES( 4 | helpers.h 5 | ) 6 | 7 | LIMAP_ADD_PYMODULE(_limap 8 | bindings.cc) 9 | 10 | target_link_libraries(_limap PRIVATE 11 | ${LIMAP_INTERNAL_LIBRARIES} ${LIMAP_EXTERNAL_LIBRARIES}) 12 | 13 | 14 | -------------------------------------------------------------------------------- /limap/base/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "base") 2 | 3 | LIMAP_ADD_SOURCES( 4 | graph.h graph.cc 5 | camera.h camera.cc 6 | camera_models.h 7 | transforms.h transforms.cc 8 | camera_view.h camera_view.cc 9 | image_collection.h image_collection.cc 10 | 11 | pointtrack.h pointtrack.cc 12 | linebase.h linebase.cc 13 | linetrack.h linetrack.cc 14 | line_dists.h line_dists.cc 15 | line_linker.h line_linker.cc 16 | infinite_line.h infinite_line.cc 17 | 18 | bindings.cc 19 | ) 20 | 21 | -------------------------------------------------------------------------------- /limap/base/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._base import * 2 | from .functions import * 3 | from .align import * 4 | from .unit_test import * 5 | from .depth_reader_base import * 6 | from .p3d_reader_base import * 7 | 8 | -------------------------------------------------------------------------------- /limap/base/align.py: -------------------------------------------------------------------------------- 1 | import _limap._base as _base 2 | import numpy as np 3 | 4 | def umeyama_alignment(x, y, with_scale=True): 5 | """ 6 | Computes the least squares solution parameters of an Sim(m) matrix 7 | that minimizes the distance between a set of registered points. 8 | Umeyama, Shinji: Least-squares estimation of transformation parameters 9 | between two point patterns. IEEE PAMI, 1991 10 | :param x: mxn matrix of points, m = dimension, n = nr. of data points 11 | :param y: mxn matrix of points, m = dimension, n = nr. of data points 12 | :param with_scale: set to True to align also the scale (default: 1.0 scale) 13 | :return: r, t, c - rotation matrix, translation vector and scale factor 14 | """ 15 | if x.shape != y.shape: 16 | assert False, "x.shape not equal to y.shape" 17 | 18 | # m = dimension, n = nr. of data points 19 | m, n = x.shape 20 | 21 | # means, eq. 34 and 35 22 | mean_x = x.mean(axis=1) 23 | mean_y = y.mean(axis=1) 24 | 25 | # variance, eq. 36 26 | # "transpose" for column subtraction 27 | sigma_x = 1.0 / n * (np.linalg.norm(x - mean_x[:, np.newaxis])**2) 28 | 29 | # covariance matrix, eq. 38 30 | outer_sum = np.zeros((m, m)) 31 | for i in range(n): 32 | outer_sum += np.outer((y[:, i] - mean_y), (x[:, i] - mean_x)) 33 | cov_xy = np.multiply(1.0 / n, outer_sum) 34 | 35 | # SVD (text betw. eq. 38 and 39) 36 | u, d, v = np.linalg.svd(cov_xy) 37 | 38 | # S matrix, eq. 43 39 | s = np.eye(m) 40 | if np.linalg.det(u) * np.linalg.det(v) < 0.0: 41 | # Ensure a RHS coordinate system (Kabsch algorithm). 42 | s[m - 1, m - 1] = -1 43 | 44 | # rotation, eq. 40 45 | r = u.dot(s).dot(v) 46 | 47 | # scale & translation, eq. 42 and 41 48 | c = 1 / sigma_x * np.trace(np.diag(d).dot(s)) if with_scale else 1.0 49 | t = mean_y - np.multiply(c, r.dot(mean_x)) 50 | return r, t, c 51 | 52 | def align_imagecols(imagecols_src, imagecols_dst): 53 | # TODO: robust alignment 54 | # assertion check 55 | assert imagecols_src.NumImages() == imagecols_dst.NumImages() 56 | assert np.all(imagecols_src.get_img_ids() == imagecols_dst.get_img_ids()) == True 57 | 58 | # fit transformation 59 | xyz_src = np.array(imagecols_src.get_locations()).transpose() 60 | xyz_dst = np.array(imagecols_dst.get_locations()).transpose() 61 | r, t, c = umeyama_alignment(xyz_src, xyz_dst, with_scale=True) 62 | transform = _base.SimilarityTransform3(r, t, c) 63 | imagecols_aligned = imagecols_src.apply_similarity_transform(transform) 64 | return transform, imagecols_aligned 65 | 66 | -------------------------------------------------------------------------------- /limap/base/camera_models.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_BASE_CAMERA_MODELS_H_ 2 | #define LIMAP_BASE_CAMERA_MODELS_H_ 3 | 4 | #include 5 | 6 | namespace limap { 7 | 8 | // modified from COLMAP 9 | // [Link] https://github.com/colmap/colmap/blob/dev/src/base/camera_models.h 10 | #ifndef LIMAP_UNDISTORTED_CAMERA_MODEL_CASES 11 | #define LIMAP_UNDISTORTED_CAMERA_MODEL_CASES \ 12 | CAMERA_MODEL_CASE(colmap::SimplePinholeCameraModel) \ 13 | CAMERA_MODEL_CASE(colmap::PinholeCameraModel) 14 | #endif 15 | 16 | #ifndef LIMAP_UNDISTORTED_CAMERA_MODEL_SWITCH_CASES 17 | #define LIMAP_UNDISTORTED_CAMERA_MODEL_SWITCH_CASES \ 18 | LIMAP_UNDISTORTED_CAMERA_MODEL_CASES \ 19 | default: \ 20 | LIMAP_CAMERA_MODEL_DOES_NOT_EXIST_EXCEPTION \ 21 | break; 22 | #endif 23 | 24 | #define LIMAP_CAMERA_MODEL_DOES_NOT_EXIST_EXCEPTION \ 25 | throw std::domain_error("Camera model does not exist"); 26 | 27 | // Get the 4-dimensional kvec [fx, fy, cx, cy] from the following colmap camera model 28 | // colmap camera models: 29 | // (0, SIMPLE_PINHOLE) 30 | // (1, PINHOLE) 31 | template 32 | void ParamsToKvec(const int model_id, const T* params, T* kvec) { 33 | if (model_id == 0) { // SIMPLE_PINHOLE 34 | kvec[0] = params[0]; 35 | kvec[1] = params[0]; 36 | kvec[2] = params[1]; 37 | kvec[3] = params[2]; 38 | } 39 | else if (model_id == 1) { // PINHOLE 40 | kvec[0] = params[0]; 41 | kvec[1] = params[1]; 42 | kvec[2] = params[2]; 43 | kvec[3] = params[3]; 44 | } 45 | else 46 | throw std::runtime_error("Error! Limap optimization does not support non-pinhole models."); 47 | } 48 | 49 | } // namespace limap 50 | 51 | #endif 52 | 53 | -------------------------------------------------------------------------------- /limap/base/depth_reader_base.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import cv2 3 | 4 | class BaseDepthReader(): 5 | def __init__(self, filename): 6 | self.filename = filename 7 | 8 | def read(self, filename): 9 | raise NotImplementedError 10 | 11 | def read_depth(self, img_hw=None): 12 | depth = self.read(self.filename) 13 | if img_hw is not None and (depth.shape[0] != img_hw[0] or depth.shape[1] != img_hw[1]): 14 | depth = cv2.resize(depth, (img_hw[1], img_hw[0])) 15 | return depth 16 | 17 | -------------------------------------------------------------------------------- /limap/base/functions.py: -------------------------------------------------------------------------------- 1 | import _limap._base as _base 2 | 3 | def get_all_lines_2d(all_2d_segs): 4 | all_lines_2d = {} 5 | for img_id in all_2d_segs: 6 | all_lines_2d[img_id] = _base._GetLine2dVectorFromArray(all_2d_segs[img_id]) 7 | return all_lines_2d 8 | 9 | def get_all_lines_3d(seg3d_list): 10 | all_lines_3d = {} 11 | for img_id, segs3d in seg3d_list.items(): 12 | all_lines_3d[img_id] = _base._GetLine3dVectorFromArray(segs3d) 13 | return all_lines_3d 14 | 15 | def get_invert_idmap_from_linetracks(all_lines_2d, linetracks): 16 | map = {} 17 | for img_id in all_lines_2d: 18 | lines_2d = all_lines_2d[img_id] 19 | map[img_id] = [-1] * len(lines_2d) 20 | for track_id, track in enumerate(linetracks): 21 | for img_id, line_id in zip(track.image_id_list, track.line_id_list): 22 | map[img_id][line_id] = track_id 23 | return map 24 | 25 | -------------------------------------------------------------------------------- /limap/base/linebase.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_BASE_LINEBASE_H_ 2 | #define LIMAP_BASE_LINEBASE_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace py = pybind11; 11 | 12 | #include "util/types.h" 13 | #include "base/camera_view.h" 14 | 15 | namespace limap { 16 | 17 | class Line2d { 18 | public: 19 | Line2d() {} 20 | Line2d(const Eigen::MatrixXd& seg2d); 21 | Line2d(V2D start, V2D end, double score=-1); 22 | V2D start, end; 23 | double score = -1; 24 | 25 | double length() const {return (start - end).norm();} 26 | V2D midpoint() const {return 0.5 * (start + end);} 27 | V2D direction() const {return (end - start).normalized();} 28 | V2D perp_direction() const {V2D dir = direction(); return V2D(dir[1], -dir[0]); } 29 | V3D coords() const; // get homogeneous coordinate 30 | V2D point_projection(const V2D& p) const; 31 | double point_distance(const V2D& p) const; 32 | Eigen::MatrixXd as_array() const; 33 | }; 34 | 35 | class Line3d { 36 | public: 37 | Line3d() {} 38 | Line3d(const Eigen::MatrixXd& seg3d); 39 | Line3d(V3D start, V3D end, double score=-1, double depth_start=-1, double depth_end=-1, double uncertainty=-1); 40 | V3D start, end; 41 | double score = -1; 42 | double uncertainty = -1.0; 43 | V2D depths; // [depth_start, depth_end] for the source perspective image 44 | 45 | void set_uncertainty(const double val) { uncertainty = val; } 46 | double length() const {return (start - end).norm();} 47 | V3D midpoint() const {return 0.5 * (start + end);} 48 | V3D direction() const {return (end - start).normalized();} 49 | V3D point_projection(const V3D& p) const; 50 | double point_distance(const V3D& p) const; 51 | Eigen::MatrixXd as_array() const; 52 | Line2d projection(const CameraView& view) const; 53 | double sensitivity(const CameraView& view) const; // in angle, 0 for perfect view, 90 for collapsing 54 | double computeUncertainty(const CameraView& view, const double var2d=5.0) const; 55 | }; 56 | 57 | std::vector GetLine2dVectorFromArray(const Eigen::MatrixXd& segs2d); 58 | std::vector GetLine3dVectorFromArray(const std::vector& segs3d); 59 | 60 | Line2d projection_line3d(const Line3d& line3d, const CameraView& view); 61 | Line3d unprojection_line2d(const Line2d& line2d, const CameraView& view, const std::pair& depths); 62 | 63 | } // namespace limap 64 | 65 | #endif 66 | 67 | -------------------------------------------------------------------------------- /limap/base/p3d_reader_base.py: -------------------------------------------------------------------------------- 1 | class BaseP3DReader(): 2 | def __init__(self, filename): 3 | self.filename = filename 4 | 5 | def read(self, filename): 6 | raise NotImplementedError 7 | 8 | def read_p3ds(self): 9 | p3ds = self.read(self.filename) 10 | return p3ds 11 | 12 | -------------------------------------------------------------------------------- /limap/base/pointtrack.cc: -------------------------------------------------------------------------------- 1 | #include "base/pointtrack.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace limap { 9 | 10 | PointTrack::PointTrack(const PointTrack& track) { 11 | size_t n_supports = track.p2d_id_list.size(); 12 | p = track.p; 13 | std::copy(track.image_id_list.begin(), track.image_id_list.end(), std::back_inserter(image_id_list)); 14 | std::copy(track.p2d_id_list.begin(), track.p2d_id_list.end(), std::back_inserter(p2d_id_list)); 15 | std::copy(track.p2d_list.begin(), track.p2d_list.end(), std::back_inserter(p2d_list)); 16 | } 17 | 18 | py::dict PointTrack::as_dict() const { 19 | py::dict output; 20 | output["p"] = p; 21 | output["image_id_list"] = image_id_list; 22 | output["p2d_id_list"] = p2d_id_list; 23 | output["p2d_list"] = p2d_list; 24 | return output; 25 | } 26 | 27 | PointTrack::PointTrack(py::dict dict) { 28 | ASSIGN_PYDICT_ITEM(dict, p, V3D) 29 | ASSIGN_PYDICT_ITEM(dict, image_id_list, std::vector) 30 | ASSIGN_PYDICT_ITEM(dict, p2d_id_list, std::vector) 31 | ASSIGN_PYDICT_ITEM(dict, p2d_list, std::vector) 32 | } 33 | 34 | } // namespace limap 35 | 36 | -------------------------------------------------------------------------------- /limap/base/pointtrack.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_BASE_POINTTRACK_H_ 2 | #define LIMAP_BASE_POINTTRACK_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace py = pybind11; 13 | 14 | #include "util/types.h" 15 | #include "_limap/helpers.h" 16 | 17 | namespace limap { 18 | 19 | template 20 | struct Feature2dWith3dIndex { 21 | Feature2dWith3dIndex() {} 22 | Feature2dWith3dIndex(PTYPE p_, int point3D_id_ = -1): p(p_), point3D_id(point3D_id_) {} 23 | Feature2dWith3dIndex(py::dict dict) { 24 | ASSIGN_PYDICT_ITEM(dict, p, PTYPE) 25 | ASSIGN_PYDICT_ITEM(dict, point3D_id, int) 26 | } 27 | py::dict as_dict() const { 28 | py::dict output; 29 | output["p"] = p; 30 | output["point3D_id"] = point3D_id; 31 | return output; 32 | } 33 | PTYPE p; 34 | int point3D_id = -1; 35 | }; 36 | typedef Feature2dWith3dIndex Point2d; 37 | 38 | class PointTrack { 39 | public: 40 | PointTrack() {} 41 | PointTrack(const PointTrack& track); 42 | PointTrack(const V3D& p_, const std::vector& image_id_list_, const std::vector& p2d_id_list_, const std::vector p2d_list_): p(p_), image_id_list(image_id_list_), p2d_id_list(p2d_id_list_), p2d_list(p2d_list_) {} 43 | py::dict as_dict() const; 44 | PointTrack(py::dict dict); 45 | 46 | V3D p; 47 | std::vector image_id_list; 48 | std::vector p2d_id_list; 49 | std::vector p2d_list; 50 | 51 | size_t count_images() const { return image_id_list.size(); } 52 | }; 53 | 54 | } // namespace limap 55 | 56 | #endif 57 | 58 | -------------------------------------------------------------------------------- /limap/base/transforms.cc: -------------------------------------------------------------------------------- 1 | #include "base/transforms.h" 2 | 3 | namespace limap { 4 | 5 | CameraPose pose_similarity_transform(const CameraPose& pose, const SimilarityTransform3& transform) { 6 | M3D new_R = pose.R() * transform.R().transpose(); 7 | V3D new_T = transform.s() * pose.T() - new_R * transform.T(); 8 | return CameraPose(new_R, new_T); 9 | } 10 | 11 | } // namespace limap 12 | 13 | -------------------------------------------------------------------------------- /limap/base/transforms.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_BASE_TRANSFORMS_H_ 2 | #define LIMAP_BASE_TRANSFORMS_H_ 3 | 4 | #include 5 | 6 | #include "base/camera.h" 7 | 8 | namespace limap { 9 | 10 | class SimilarityTransform3 { 11 | /* 12 | * t_prime = R @ (s * t) + T 13 | */ 14 | public: 15 | SimilarityTransform3() {} 16 | SimilarityTransform3(V4D qqvec, V3D ttvec, double s = 1.0): qvec(qqvec), tvec(ttvec), scale(s) {} 17 | SimilarityTransform3(M3D R, V3D T, double s = 1.0): tvec(T), scale(s) { qvec = colmap::RotationMatrixToQuaternion(R); } 18 | V4D qvec; 19 | V3D tvec; 20 | double scale; 21 | 22 | M3D R() const { return colmap::QuaternionToRotationMatrix(qvec); } 23 | V3D T() const { return tvec; } 24 | double s() const { return scale; } 25 | }; 26 | 27 | CameraPose pose_similarity_transform(const CameraPose& pose, const SimilarityTransform3& transform); 28 | 29 | } // namespace limap 30 | 31 | #endif 32 | 33 | -------------------------------------------------------------------------------- /limap/base/unit_test.py: -------------------------------------------------------------------------------- 1 | import _limap._base as _base 2 | import numpy as np 3 | 4 | def unit_test_add_noise(imagecols): 5 | dict_imagecols = imagecols.as_dict() 6 | # # perturb cameras 7 | # m_cameras = dict_imagecols["cameras"] 8 | # for cam_id in imagecols.get_cam_ids(): 9 | # n_params = len(m_cameras[cam_id]["params"]) 10 | # m_cameras[cam_id]["params"] += np.random.normal(0, 10, n_params); 11 | # perturb poses 12 | m_images = dict_imagecols["images"] 13 | for img_id in imagecols.get_img_ids(): 14 | m_images[img_id]["qvec"] += np.random.normal(0, 0.001, 4) 15 | m_images[img_id]["tvec"] += np.random.normal(0, 0.01, 3) 16 | return _base.ImageCollection(dict_imagecols) 17 | 18 | def report_error(imagecols_pred, imagecols): 19 | # cameras 20 | camera_errors = [] 21 | for cam_id in imagecols.get_cam_ids(): 22 | error = np.array(imagecols_pred.cam(cam_id).params()) - np.array(imagecols.cam(cam_id).params()) 23 | error = np.abs(error) 24 | camera_errors.append(error) 25 | print("camera_errors", np.array(camera_errors).mean(0)) 26 | 27 | # images 28 | pose_errors = [] 29 | for img_id in imagecols.get_img_ids(): 30 | R_error = imagecols_pred.camimage(img_id).R() - imagecols.camimage(img_id).R() 31 | R_error = np.sqrt(np.sum(R_error ** 2)) 32 | T_error = imagecols_pred.camimage(img_id).T() - imagecols.camimage(img_id).T() 33 | T_error = np.sqrt(np.sum(T_error ** 2)) 34 | pose_errors.append(np.array([R_error, T_error])) 35 | print("pose_error: (R, T)", np.array(pose_errors).mean(0)) 36 | 37 | -------------------------------------------------------------------------------- /limap/ceresbase/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "ceresbase") 2 | 3 | LIMAP_ADD_SOURCES( 4 | ceres_extensions.h 5 | interpolation.h 6 | loss_functions.h 7 | line_transforms.h 8 | line_dists.h 9 | point_projection.h line_projection.h 10 | bindings.cc 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /limap/ceresbase/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._ceresbase import * 2 | 3 | -------------------------------------------------------------------------------- /limap/ceresbase/ceres_extensions.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_CERESBASE_CERES_EXTENSIONS_H 2 | #define LIMAP_CERESBASE_CERES_EXTENSIONS_H 3 | 4 | // Modified from the pixel-perfect-sfm project 5 | 6 | #include 7 | #include "_limap/helpers.h" 8 | #include 9 | 10 | class PyIterationCallback: public ceres::IterationCallback { 11 | public: 12 | using ceres::IterationCallback::IterationCallback; 13 | 14 | ceres::CallbackReturnType operator()(const 15 | ceres::IterationSummary& summary) override { 16 | PYBIND11_OVERRIDE_PURE_NAME( 17 | ceres::CallbackReturnType, // Return type (ret_type) 18 | ceres::IterationCallback, // Parent class (cname) 19 | "__call__", // Name of method in Python (name) 20 | operator(), // Name of function in C++ (fn) 21 | summary 22 | ); 23 | } 24 | }; 25 | 26 | class PyLossFunction : public ceres::LossFunction { 27 | public: 28 | /* Inherit the constructors */ 29 | using ceres::LossFunction::LossFunction; 30 | 31 | void Evaluate(double sq_norm, double out[3]) const override {} 32 | }; 33 | 34 | inline void AssignSolverOptionsFromDict(ceres::Solver::Options& solver_options, py::dict dict) { 35 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,function_tolerance,double) 36 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,gradient_tolerance,double) 37 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,parameter_tolerance,double) 38 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,minimizer_progress_to_stdout,bool) 39 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,max_linear_solver_iterations,int) 40 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,max_num_iterations,int) 41 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,max_num_consecutive_invalid_steps,int) 42 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,max_consecutive_nonmonotonic_steps,int) 43 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,use_inner_iterations,bool) 44 | ASSIGN_PYDICT_ITEM_TO_MEMBER(solver_options,dict,inner_iteration_tolerance,double) 45 | } 46 | 47 | #endif 48 | -------------------------------------------------------------------------------- /limap/ceresbase/line_transforms.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_CERESBASE_LINE_TRANSFORMS_H 2 | #define LIMAP_CERESBASE_LINE_TRANSFORMS_H 3 | 4 | #include 5 | #include 6 | 7 | namespace limap { 8 | 9 | template 10 | void MinimalPluckerToPlucker(const T uvec[4], const T wvec[2], T d[3], T m[3]) { 11 | // [LINK] https://faculty.sites.iastate.edu/jia/files/inline-files/plucker-coordinates.pdf 12 | // [LINK] https://hal.archives-ouvertes.fr/hal-00092589/document 13 | // Refer to "base/infinite_line.h" 14 | T rotmat[3 * 3]; 15 | ceres::QuaternionToRotation(uvec, rotmat); 16 | T w1, w2; 17 | w1 = ceres::abs(wvec[0]); 18 | w2 = ceres::abs(wvec[1]); 19 | 20 | // direc = a = Q.col(0) * w1 21 | // b = Q.col(1) * w2 22 | d[0] = rotmat[0]; 23 | d[1] = rotmat[3]; 24 | d[2] = rotmat[6]; 25 | T b_norm = w2 / (w1 + EPS); 26 | m[0] = rotmat[1] * b_norm; 27 | m[1] = rotmat[4] * b_norm; 28 | m[2] = rotmat[7] * b_norm; 29 | } 30 | 31 | template 32 | void PluckerToMatrix(const T d[3], const T m[3], T L[4 * 4]) { 33 | // Plucker matrix from geometric form 34 | // [LINK] https://en.wikipedia.org/wiki/Pl%C3%BCcker_matrix 35 | // Refer to "base/infinite_line.h" 36 | L[0] = T(0.0); L[1] = -m[2]; L[2] = m[1]; L[3] = d[0]; 37 | L[4] = m[2]; L[5] = T(0.0); L[6] = -m[0]; L[7] = d[1]; 38 | L[8] = -m[1]; L[9] = m[0]; L[10] = T(0.0); L[11] = d[2]; 39 | L[12] = -d[0]; L[13] = -d[1]; L[14] = -d[2]; L[15] = T(0.0); 40 | } 41 | 42 | template 43 | bool Ceres_IntersectLineCoordinates(const T coor1[3], const T coor2[3], T xy[2]) { 44 | T p_homo[3]; 45 | ceres::CrossProduct(coor1, coor2, p_homo); 46 | T norm = ceres::sqrt(p_homo[0] * p_homo[0] + p_homo[1] * p_homo[1] + p_homo[2] * p_homo[2]); 47 | p_homo[0] /= norm; 48 | p_homo[1] /= norm; 49 | p_homo[2] /= norm; 50 | T eps(EPS); 51 | if (ceres::abs(p_homo[2]) < eps) 52 | return false; 53 | else { 54 | xy[0] = p_homo[0] / p_homo[2]; 55 | xy[1] = p_homo[1] / p_homo[2]; 56 | } 57 | return true; 58 | } 59 | 60 | } // namespace limap 61 | 62 | #endif 63 | 64 | -------------------------------------------------------------------------------- /limap/ceresbase/point_projection.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_CERESBASE_POINT_PROJECTION_H 2 | #define LIMAP_CERESBASE_POINT_PROJECTION_H 3 | 4 | // Modified from the pixel-perfect-sfm project 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | namespace limap { 13 | 14 | template 15 | void ImageToWorld(const T* kvec, const T x, const T y, T* u, T* v) { 16 | const T f1 = kvec[0]; 17 | const T f2 = kvec[1]; 18 | const T c1 = kvec[2]; 19 | const T c2 = kvec[3]; 20 | 21 | *u = (x - c1) / f1; 22 | *v = (y - c2) / f2; 23 | } 24 | 25 | template 26 | void WorldToImage(const T* kvec, const T u, const T v, T* x, T* y) { 27 | const T f1 = kvec[0]; 28 | const T f2 = kvec[1]; 29 | const T c1 = kvec[2]; 30 | const T c2 = kvec[3]; 31 | 32 | *x = f1 * u + c1; 33 | *y = f2 * v + c2; 34 | } 35 | 36 | template 37 | void PixelToWorld(const T* kvec, const T* qvec, const T* tvec, const T x, const T y, const T* depth, T* xyz) { 38 | T local_xyz[3]; 39 | ImageToWorld(kvec, x, y, &local_xyz[0], &local_xyz[1]); 40 | local_xyz[2] = T(1.0); 41 | for (int i = 0; i < 3; i++) { 42 | local_xyz[i] = local_xyz[i] * depth[0] - tvec[i]; 43 | } 44 | 45 | Eigen::Quaternion q(qvec[0], qvec[1], qvec[2], qvec[3]); 46 | Eigen::Map> map(xyz); 47 | map = q.conjugate() * Eigen::Map>(local_xyz); 48 | } 49 | 50 | template 51 | inline void WorldToPixel(const T* kvec, const T* qvec, const T* tvec, const T* xyz, T* xy) { 52 | T projection[3]; 53 | ceres::QuaternionRotatePoint(qvec, xyz, projection); 54 | projection[0] += tvec[0]; 55 | projection[1] += tvec[1]; 56 | projection[2] += tvec[2]; 57 | 58 | projection[0] /= projection[2]; // u 59 | projection[1] /= projection[2]; // v 60 | WorldToImage(kvec, projection[0], projection[1], &xy[0], &xy[1]); 61 | } 62 | 63 | template 64 | inline bool IsInsideZeroL(const T& value, double L) { 65 | return (value > 0.0 && value < L); 66 | } 67 | 68 | } // namespace limap 69 | 70 | #endif 71 | 72 | -------------------------------------------------------------------------------- /limap/estimators/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "estimators") 2 | 3 | add_subdirectory(absolute_pose) 4 | 5 | LIMAP_ADD_SOURCES( 6 | bindings.cc 7 | ) 8 | -------------------------------------------------------------------------------- /limap/estimators/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._estimators import * 2 | from .absolute_pose import * 3 | -------------------------------------------------------------------------------- /limap/estimators/absolute_pose/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "absolute_pose") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | pl_absolute_pose_ransac.h 6 | pl_absolute_pose_hybrid_ransac.h 7 | joint_pose_estimator.h joint_pose_estimator.cc 8 | hybrid_pose_estimator.h hybrid_pose_estimator.cc 9 | ) 10 | 11 | -------------------------------------------------------------------------------- /limap/estimators/absolute_pose/__init__.py: -------------------------------------------------------------------------------- 1 | from .pl_estimate_absolute_pose import * 2 | 3 | -------------------------------------------------------------------------------- /limap/estimators/absolute_pose/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include 11 | 12 | #include "estimators/absolute_pose/joint_pose_estimator.h" 13 | #include "estimators/absolute_pose/hybrid_pose_estimator.h" 14 | 15 | namespace py = pybind11; 16 | using namespace py::literals; 17 | 18 | namespace limap { 19 | 20 | void bind_absolute_pose(py::module& m) { 21 | using namespace estimators::absolute_pose; 22 | 23 | py::class_(m, "JointPoseEstimatorOptions") 24 | .def(py::init<>()) 25 | .def_readwrite("ransac_options", &JointPoseEstimatorOptions::ransac_options) 26 | .def_readwrite("lineloc_config", &JointPoseEstimatorOptions::lineloc_config) 27 | .def_readwrite("cheirality_min_depth", &JointPoseEstimatorOptions::cheirality_min_depth) 28 | .def_readwrite("cheirality_overlap_pixels", &JointPoseEstimatorOptions::cheirality_overlap_pixels) 29 | .def_readwrite("sample_solver_first", &JointPoseEstimatorOptions::sample_solver_first) 30 | .def_readwrite("random", &JointPoseEstimatorOptions::random); 31 | 32 | py::class_(m, "HybridPoseEstimatorOptions") 33 | .def(py::init<>()) 34 | .def_readwrite("ransac_options", &HybridPoseEstimatorOptions::ransac_options) 35 | .def_readwrite("lineloc_config", &HybridPoseEstimatorOptions::lineloc_config) 36 | .def_readwrite("solver_flags", &HybridPoseEstimatorOptions::solver_flags) 37 | .def_readwrite("cheirality_min_depth", &HybridPoseEstimatorOptions::cheirality_min_depth) 38 | .def_readwrite("cheirality_overlap_pixels", &HybridPoseEstimatorOptions::cheirality_overlap_pixels) 39 | .def_readwrite("random", &HybridPoseEstimatorOptions::random); 40 | 41 | m.def("EstimateAbsolutePose_PointLine", &EstimateAbsolutePose_PointLine, 42 | "l3ds"_a, "l3d_ids"_a, "l2ds"_a, "p3ds"_a, "p2ds"_a, "cam"_a, "options_"_a); 43 | m.def("EstimateAbsolutePose_PointLine_Hybrid", &EstimateAbsolutePose_PointLine_Hybrid, 44 | "l3ds"_a, "l3d_ids"_a, "l2ds"_a, "p3ds"_a, "p2ds"_a, "cam"_a, "options_"_a); 45 | } 46 | 47 | } // namespace limap 48 | 49 | -------------------------------------------------------------------------------- /limap/estimators/extended_hybrid_ransac.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_ESTIMATORS_EXTENDED_HYBRID_RANSAC_H_ 2 | #define LIMAP_ESTIMATORS_EXTENDED_HYBRID_RANSAC_H_ 3 | 4 | #include 5 | 6 | namespace limap { 7 | 8 | namespace estimators { 9 | 10 | class ExtendedHybridLORansacOptions : public ransac_lib::HybridLORansacOptions { 11 | public: 12 | ExtendedHybridLORansacOptions() 13 | : non_min_sample_multiplier_(3) {} 14 | // We add this to do non minimal sampling in LO step in align with 15 | // the original definition of the LO step 16 | int non_min_sample_multiplier_; 17 | }; 18 | 19 | } // namespace estimators 20 | 21 | } // namespace limap 22 | 23 | #endif -------------------------------------------------------------------------------- /limap/evaluation/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "evaluation") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | base_evaluator.h base_evaluator.cc 6 | point_cloud_evaluator.h point_cloud_evaluator.cc 7 | mesh_evaluator.h mesh_evaluator.cc 8 | refline_evaluator.h refline_evaluator.cc 9 | ) 10 | 11 | -------------------------------------------------------------------------------- /limap/evaluation/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._evaluation import * 2 | -------------------------------------------------------------------------------- /limap/evaluation/base_evaluator.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_EVALUATION_BASE_EVALUATOR_H_ 2 | #define LIMAP_EVALUATION_BASE_EVALUATOR_H_ 3 | 4 | #include "base/linebase.h" 5 | #include "util/types.h" 6 | 7 | #include 8 | #include 9 | 10 | namespace limap { 11 | 12 | namespace evaluation { 13 | 14 | class BaseEvaluator { 15 | public: 16 | virtual double ComputeDistPoint(const V3D& point) = 0; 17 | double ComputeDistLine(const Line3d& line, int n_samples=10); 18 | 19 | // compute inlier ratio 20 | double ComputeInlierRatio(const Line3d& line, double threshold, int n_samples=1000); 21 | 22 | // visualization 23 | std::vector ComputeInlierSegsOneLine(const Line3d& line, double threshold, int n_samples=1000); 24 | std::vector ComputeInlierSegs(const std::vector& lines, double threshold, int n_samples=1000); 25 | std::vector ComputeOutlierSegsOneLine(const Line3d& line, double threshold, int n_samples=1000); 26 | std::vector ComputeOutlierSegs(const std::vector& lines, double threshold, int n_samples=1000); 27 | }; 28 | 29 | } // namespace evaluation 30 | 31 | } // namespace limap 32 | 33 | #endif 34 | 35 | -------------------------------------------------------------------------------- /limap/evaluation/mesh_evaluator.cc: -------------------------------------------------------------------------------- 1 | #include "evaluation/mesh_evaluator.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace limap { 11 | 12 | namespace evaluation { 13 | 14 | MeshEvaluator::MeshEvaluator(const std::string& filename, const double& mpau): BaseEvaluator() { 15 | std::string extension = filename.substr(filename.rfind('.') + 1); 16 | if (extension == std::string("off")) 17 | igl::readOFF(filename, V_, F_); 18 | else if (extension == std::string("obj")) 19 | igl::readOBJ(filename, V_, F_); 20 | else 21 | throw std::runtime_error("Not Implemented!!"); 22 | std::cout<<"read a new mesh: V.rows() = "< 9 | #include 10 | #include 11 | 12 | namespace limap { 13 | 14 | namespace evaluation { 15 | 16 | class MeshEvaluator: public BaseEvaluator { 17 | public: 18 | MeshEvaluator(): BaseEvaluator() {} 19 | MeshEvaluator(const std::string& filename, const double& mpau); 20 | 21 | // compute dist point 22 | double ComputeDistPoint(const V3D& point) override; 23 | 24 | private: 25 | Eigen::MatrixXd V_; 26 | Eigen::MatrixXi F_; 27 | igl::AABB tree_; 28 | }; 29 | 30 | } // namespace evaluation 31 | 32 | } // namespace limap 33 | 34 | #endif 35 | 36 | -------------------------------------------------------------------------------- /limap/evaluation/point_cloud_evaluator.cc: -------------------------------------------------------------------------------- 1 | #include "evaluation/point_cloud_evaluator.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace limap { 9 | 10 | namespace evaluation { 11 | 12 | double PointCloudEvaluator::ComputeDistPoint(const V3D& point) { 13 | V3D p = tree_.query_nearest(point); 14 | return (point - p).norm(); 15 | } 16 | 17 | std::vector PointCloudEvaluator::ComputeDistsforEachPoint(const std::vector& lines) const { 18 | size_t n_points = tree_.cloud.pts.size(); 19 | std::vector dists(n_points); 20 | 21 | progressbar bar(n_points); 22 | #pragma omp parallel for 23 | for (size_t i = 0; i < n_points; ++i) { 24 | bar.update(); 25 | V3D p = tree_.point(i); 26 | double min_dist = std::numeric_limits::max(); 27 | for (auto it = lines.begin(); it != lines.end(); ++it) { 28 | double dist = it->point_distance(p); 29 | if (dist < min_dist) 30 | min_dist = dist; 31 | } 32 | dists[i] = min_dist; 33 | } 34 | return dists; 35 | } 36 | 37 | std::vector PointCloudEvaluator::ComputeDistsforEachPoint_KDTree(const std::vector& lines) const { 38 | size_t n_points = tree_.cloud.pts.size(); 39 | std::vector dists(n_points); 40 | 41 | // sample points uniformly on all the lines and build a kd tree 42 | // TODO: sample by length 43 | const int n_samples = 1000; 44 | std::vector line_points; 45 | std::vector labels; 46 | for (size_t line_id = 0; line_id < lines.size(); ++line_id) { 47 | auto& line = lines[line_id]; 48 | double interval = line.length() / (n_samples - 1); 49 | for (size_t i = 0; i < n_samples; ++i) { 50 | V3D p = line.start + i * interval * (line.end - line.start); 51 | line_points.push_back(p); 52 | labels.push_back(line_id); 53 | } 54 | } 55 | KDTree line_tree; 56 | line_tree.initialize(line_points); 57 | 58 | progressbar bar(n_points); 59 | #pragma omp parallel for 60 | for (size_t i = 0; i < n_points; ++i) { 61 | bar.update(); 62 | V3D p = tree_.point(i); 63 | std::vector res; 64 | line_tree.query_knn(p, res, 1); 65 | int index = res[0]; 66 | int line_id = labels[index]; 67 | double min_dist = lines[line_id].point_distance(p); 68 | dists[i] = min_dist; 69 | } 70 | return dists; 71 | } 72 | 73 | } // namespace evaluation 74 | 75 | } // namespace limap 76 | 77 | -------------------------------------------------------------------------------- /limap/evaluation/point_cloud_evaluator.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_EVALUATION_POINT_CLOUD_EVALUATOR_H_ 2 | #define LIMAP_EVALUATION_POINT_CLOUD_EVALUATOR_H_ 3 | 4 | #include "base/linebase.h" 5 | #include "util/types.h" 6 | #include "util/kd_tree.h" 7 | #include "evaluation/base_evaluator.h" 8 | 9 | #include 10 | #include 11 | 12 | namespace limap { 13 | 14 | namespace evaluation { 15 | 16 | class PointCloudEvaluator: public BaseEvaluator { 17 | public: 18 | PointCloudEvaluator(): BaseEvaluator() {} 19 | PointCloudEvaluator(const std::vector& points): BaseEvaluator() { tree_.initialize(points, false); } 20 | PointCloudEvaluator(const Eigen::MatrixXd& points): BaseEvaluator() { tree_.initialize(points, false); } 21 | 22 | // build indexes 23 | void Build() { tree_.buildIndex(); } 24 | 25 | // IO 26 | void Save(const std::string& filename) { tree_.save(filename); } 27 | void Load(const std::string& filename) { tree_.load(filename); } 28 | 29 | // compute dist point 30 | double ComputeDistPoint(const V3D& point) override; 31 | 32 | // inverse recall computed on the reference point cloud 33 | std::vector ComputeDistsforEachPoint(const std::vector& lines) const; 34 | // approximation 35 | std::vector ComputeDistsforEachPoint_KDTree(const std::vector& lines) const; 36 | 37 | private: 38 | KDTree tree_; 39 | }; 40 | 41 | } // namespace evaluation 42 | 43 | } // namespace limap 44 | 45 | #endif 46 | 47 | -------------------------------------------------------------------------------- /limap/evaluation/refline_evaluator.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_EVALUATION_REFLINE_EVALUATOR_H_ 2 | #define LIMAP_EVALUATION_REFLINE_EVALUATOR_H_ 3 | 4 | #include "base/linebase.h" 5 | #include "util/types.h" 6 | 7 | #include 8 | 9 | namespace limap { 10 | 11 | namespace evaluation { 12 | 13 | class RefLineEvaluator { 14 | public: 15 | RefLineEvaluator() {} 16 | RefLineEvaluator(const std::vector& ref_lines): ref_lines_(ref_lines) {}; 17 | 18 | double SumLength() const; 19 | double ComputeRecallRef(const std::vector& lines, const double threshold, const int num_samples=1000) const; 20 | double ComputeRecallTested(const std::vector& lines, const double threshold, const int num_samples=1000) const; 21 | 22 | private: 23 | std::vector ref_lines_; 24 | double DistPointLine(const V3D& point, const Line3d& line) const; 25 | double DistPointLines(const V3D& point, const std::vector& line) const; 26 | 27 | double ComputeRecallLength(const std::vector& ref_lines, const std::vector& lines, const double threshold, const int num_samples=1000) const; 28 | }; 29 | 30 | } // namespace evaluation 31 | 32 | } // namespace limap 33 | 34 | #endif 35 | 36 | -------------------------------------------------------------------------------- /limap/features/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "features") 2 | 3 | LIMAP_ADD_SOURCES( 4 | featuremap.h featuremap.cc 5 | featurepatch.h featurepatch.cc 6 | 7 | dense_sift.h 8 | line_patch_extractor.h line_patch_extractor.cc 9 | 10 | bindings.cc 11 | ) 12 | 13 | -------------------------------------------------------------------------------- /limap/features/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._features import * 2 | from .extractors import * 3 | from .extract_line_patches import * 4 | 5 | -------------------------------------------------------------------------------- /limap/features/dense_sift.h: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | 5 | namespace py = pybind11; 6 | 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "VLFeat/covdet.h" 15 | #include "VLFeat/sift.h" 16 | #include "VLFeat/dsift.h" 17 | #include 18 | 19 | #include 20 | 21 | #include "util/log_exceptions.h" 22 | #include "util/simple_logger.h" 23 | 24 | namespace limap { 25 | 26 | namespace features { 27 | 28 | py::array_t extract_dsift( 29 | const py::array_t image, 30 | const double step, 31 | const double bin_size 32 | ) { 33 | // Check that input is grayscale. 34 | // assert(image.ndim() == 2); 35 | THROW_CHECK_EQ(image.ndim(), 2); 36 | 37 | 38 | 39 | VlDsiftFilter* dsift = vl_dsift_new_basic(image.shape(1), image.shape(0), step, bin_size); 40 | 41 | // Recover pointer to image; 42 | py::buffer_info image_buf = image.request(); 43 | float *image_ptr = (float *)image_buf.ptr; 44 | 45 | size_t width = image.shape(1) - 3 *bin_size; 46 | size_t height = image.shape(0) -3 *bin_size; 47 | size_t channels = 128; 48 | 49 | size_t kdim = width * height * channels; 50 | auto t1 = std::chrono::high_resolution_clock::now(); 51 | 52 | vl_dsift_process(dsift, image_ptr); 53 | const float* descriptors_f = vl_dsift_get_descriptors(dsift); 54 | 55 | auto t2 = std::chrono::high_resolution_clock::now(); 56 | STDLOG(INFO) << "DSIFT:" << " " << 57 | std::chrono::duration_cast(t2 - t1).count() 58 | << "ms" << std::endl; 59 | // Descriptors. 60 | py::array_t pydescriptors( 61 | py::detail::any_container( 62 | {height, width, channels} 63 | ) 64 | ); 65 | py::buffer_info pydescriptors_buf = pydescriptors.request(); 66 | float *pydescriptors_ptr = (float *)pydescriptors_buf.ptr; 67 | 68 | memcpy(pydescriptors_ptr, descriptors_f, kdim * sizeof(float)); 69 | 70 | vl_dsift_delete(dsift); 71 | 72 | return pydescriptors; 73 | } 74 | 75 | } // namespace features 76 | 77 | } // namespace limap 78 | 79 | -------------------------------------------------------------------------------- /limap/features/extract_line_patches.py: -------------------------------------------------------------------------------- 1 | from _limap import _features 2 | import numpy as np 3 | 4 | def write_patch(fname, patch, dtype="float16"): 5 | # write out a PatchInfo_f object 6 | array = patch.array 7 | if dtype == "float16": 8 | array = array.astype(np.float16) 9 | elif dtype == "float32": 10 | array = array.astype(np.float32) 11 | with open(fname, 'wb') as f: 12 | np.savez(f, array=array, R=patch.R, tvec=patch.tvec, img_hw=patch.img_hw) 13 | 14 | def load_patch(fname, dtype="float16"): 15 | # return a PatchInfo_f object 16 | patch_info_name = "PatchInfo_f{0}".format(dtype[-2:]) 17 | with open(fname, 'rb') as f: 18 | data = np.load(f, allow_pickle=True) 19 | patch = getattr(_features, patch_info_name)(data["array"], data["R"], data["tvec"], data["img_hw"]) 20 | return patch 21 | 22 | def get_extractor(cfg, channels): 23 | lpe_options = _features.LinePatchExtractorOptions(cfg) 24 | patch_extractor_name = "LinePatchExtractor_f64_c{0}".format(channels) 25 | extractor = getattr(_features, patch_extractor_name)(lpe_options) 26 | return extractor 27 | 28 | def extract_line_patch_oneimage(cfg, track, img_id, camview, feature): 29 | ''' 30 | Returns: 31 | _features.PatchInfo_fx 32 | ''' 33 | lpe_options = _features.LinePatchExtractorOptions(cfg) 34 | patch_extractor_name = "LinePatchExtractor_f64_c{0}".format(feature.shape[2]) 35 | extractor = getattr(_features, patch_extractor_name)(lpe_options) 36 | patch = extractor.ExtractOneImage(track, img_id, camview, feature) 37 | return patch 38 | 39 | def extract_line_patches(cfg, track, p_camviews, p_features): 40 | ''' 41 | Returns: 42 | list of _features.PatchInfo_fx 43 | ''' 44 | lpe_options = _features.LinePatchExtractorOptions(cfg) 45 | patch_extractor_name = "LinePatchExtractor_f64_c{0}".format(p_features[0].shape[2]) 46 | extractor = getattr(_features, patch_extractor_name)(lpe_options) 47 | patches = extractor.Extract(track, p_camviews, p_features) 48 | return patches 49 | 50 | -------------------------------------------------------------------------------- /limap/features/featuremap.cc: -------------------------------------------------------------------------------- 1 | #include "features/featuremap.h" 2 | 3 | namespace limap { 4 | 5 | namespace features { 6 | 7 | template 8 | FeatureMap::FeatureMap(const Eigen::MatrixXd& array) { 9 | height = array.rows(); 10 | width = array.cols(); 11 | channels = 1; 12 | 13 | data_.resize(height * width); 14 | for (int row = 0; row < height; ++row) { 15 | for (int col = 0; col < width; ++col) { 16 | data_[row * width + col] = DTYPE(array(row, col)); 17 | } 18 | } 19 | data_ptr_ = data_.data(); 20 | } 21 | 22 | template 23 | FeatureMap::FeatureMap(const py::array_t& pyarray, bool do_copy) { 24 | py::buffer_info buffer_info = pyarray.request(); 25 | 26 | data_ptr_ = static_cast(buffer_info.ptr); 27 | std::vector shape = buffer_info.shape; 28 | if (shape.size() != 2 && shape.size() != 3) { 29 | throw std::runtime_error("Unsupported shape!"); 30 | } 31 | height = shape[0]; 32 | width = shape[1]; 33 | if (shape.size() == 3) 34 | channels = shape[2]; 35 | else 36 | channels = 1; 37 | 38 | if (do_copy) { 39 | ssize_t size = Size(); 40 | THROW_CHECK_EQ(buffer_info.size, size); 41 | data_.assign(data_ptr_, data_ptr_ + size); 42 | data_ptr_ = &data_[0]; 43 | } 44 | } 45 | 46 | template class FeatureMap; 47 | template class FeatureMap; 48 | template class FeatureMap; 49 | 50 | } // namespace features 51 | 52 | } // namespace limap 53 | 54 | -------------------------------------------------------------------------------- /limap/features/featurepatch.cc: -------------------------------------------------------------------------------- 1 | #include "features/featurepatch.h" 2 | 3 | namespace limap { 4 | 5 | namespace features { 6 | 7 | template 8 | PatchInfo FeaturePatch::GetPatchInfo() const { 9 | size_t height, width, channels; 10 | height = FeatureMap::Height(); 11 | width = FeatureMap::Width(); 12 | channels = FeatureMap::Channels(); 13 | size_t size = FeatureMap::Size(); 14 | const DTYPE* data_ptr = FeatureMap::Data(); 15 | 16 | py::array_t pyarray = py::array_t(std::vector{height, width, channels}); 17 | py::buffer_info buffer_info = pyarray.request(); 18 | DTYPE* data_ptr_array = static_cast(buffer_info.ptr); 19 | memcpy(data_ptr_array, data_ptr, sizeof(DTYPE) * size); 20 | return PatchInfo(pyarray, R, tvec, img_hw); 21 | } 22 | 23 | template class FeaturePatch; 24 | template class FeaturePatch; 25 | template class FeaturePatch; 26 | 27 | } // namespace features 28 | 29 | } // namespace limap 30 | 31 | -------------------------------------------------------------------------------- /limap/features/line_patch_extractor.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_FEATURES_LINE_PATCH_EXTRACTOR_H_ 2 | #define LIMAP_FEATURES_LINE_PATCH_EXTRACTOR_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | #include "features/featurepatch.h" 9 | #include "base/camera_view.h" 10 | #include "base/linebase.h" 11 | #include "base/linetrack.h" 12 | #include "util/types.h" 13 | 14 | namespace py = pybind11; 15 | 16 | namespace limap { 17 | 18 | namespace features { 19 | 20 | class LinePatchExtractorOptions { 21 | public: 22 | LinePatchExtractorOptions() {} 23 | LinePatchExtractorOptions(py::dict dict): LinePatchExtractorOptions() { 24 | ASSIGN_PYDICT_ITEM(dict, k_stretch, double); 25 | ASSIGN_PYDICT_ITEM(dict, t_stretch, int); 26 | ASSIGN_PYDICT_ITEM(dict, range_perp, int); 27 | } 28 | // finallength = std::max(length * k_stretch, length + t_stretch) 29 | double k_stretch = 1.0; // by default we do not stretch lines 30 | int t_stretch = 10; // in pixels 31 | int range_perp = 20; // in pixels 32 | }; 33 | 34 | 35 | template 36 | class LinePatchExtractor { 37 | public: 38 | LinePatchExtractor() {} 39 | LinePatchExtractor(const LinePatchExtractorOptions& options): options_(options) {} 40 | 41 | PatchInfo ExtractLinePatch(const Line2d& line2d, 42 | const py::array_t& feature); 43 | std::vector> ExtractLinePatches(const std::vector& line2ds, 44 | const py::array_t& feature); 45 | 46 | Line2d GetLine2DRange(const LineTrack& track, 47 | const int image_id, 48 | const CameraView& view); 49 | 50 | PatchInfo ExtractOneImage(const LineTrack& track, 51 | const int image_id, 52 | const CameraView& view, 53 | const py::array_t& feature); 54 | 55 | void Extract(const LineTrack& track, 56 | const std::vector& p_views, 57 | const std::vector>& p_features, 58 | std::vector>& patchinfos); 59 | private: 60 | LinePatchExtractorOptions options_; 61 | }; 62 | 63 | } // namespace features 64 | 65 | } // namespace limap 66 | 67 | #endif 68 | 69 | -------------------------------------------------------------------------------- /limap/features/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/limap/features/models/__init__.py -------------------------------------------------------------------------------- /limap/features/models/vggnet.py: -------------------------------------------------------------------------------- 1 | from limap.features.models.s2dnet import * 2 | 3 | class VGGNet(BaseModel): 4 | default_conf = { 5 | 'hypercolumn_layers': ["conv1_2", "conv3_3"],# "conv5_3"], 6 | 'checkpointing': None, 7 | 'output_dim': 128, 8 | 'pretrained': 'imagenet', 9 | } 10 | mean = [0.485, 0.456, 0.406] 11 | std = [0.229, 0.224, 0.225] 12 | 13 | def _init(self, conf = default_conf): 14 | assert conf.pretrained in ['s2dnet', 'imagenet', None] 15 | 16 | self.layer_to_index = {k: v for v, k in enumerate(vgg16_layers.keys())} 17 | self.hypercolumn_indices = [ 18 | self.layer_to_index[n] for n in conf.hypercolumn_layers] 19 | num_layers = self.hypercolumn_indices[-1] + 1 20 | 21 | # Initialize architecture 22 | vgg16 = models.vgg16(pretrained=True) 23 | layers = list(vgg16.features.children())[:num_layers] 24 | self.encoder = nn.ModuleList(layers) 25 | 26 | self.scales = [] 27 | current_scale = 0 28 | for i, layer in enumerate(layers): 29 | if isinstance(layer, torch.nn.MaxPool2d): 30 | current_scale += 1 31 | if i in self.hypercolumn_indices: 32 | self.scales.append(2**current_scale) 33 | 34 | def _forward(self, data): 35 | image = data#data['image'] 36 | mean, std = image.new_tensor(self.mean), image.new_tensor(self.std) 37 | image = (image - mean[:, None, None]) / std[:, None, None] 38 | # torch.true_divide((image - mean[:, None, None]),) 39 | del mean, std 40 | feature_map = image 41 | feature_maps = [] 42 | start = 0 43 | for idx in self.hypercolumn_indices: 44 | if self.conf.checkpointing: 45 | blocks = list(range(start, idx+2, self.conf.checkpointing)) 46 | if blocks[-1] != idx+1: 47 | blocks.append(idx+1) 48 | for start_, end_ in zip(blocks[:-1], blocks[1:]): 49 | feature_map = torch.utils.checkpoint.checkpoint( 50 | nn.Sequential(*self.encoder[start_:end_]), feature_map) 51 | else: 52 | for i in range(start, idx + 1): 53 | feature_map = self.encoder[i](feature_map) 54 | feature_maps.append(feature_map) 55 | start = idx + 1 56 | 57 | #feature_maps = self.adaptation_layers(feature_maps) 58 | return {'feature_maps': feature_maps} 59 | 60 | def loss(self, pred, data): 61 | raise NotImplementedError 62 | 63 | def metrics(self, pred, data): 64 | raise NotImplementedError 65 | 66 | -------------------------------------------------------------------------------- /limap/fitting/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "fitting") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | line3d_estimator.h line3d_estimator.cc 6 | ) 7 | 8 | -------------------------------------------------------------------------------- /limap/fitting/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._fitting import * 2 | from .fitting import * 3 | 4 | -------------------------------------------------------------------------------- /limap/fitting/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include "fitting/line3d_estimator.h" 11 | 12 | namespace py = pybind11; 13 | 14 | namespace limap { 15 | 16 | void bind_fitting(py::module& m) { 17 | using namespace fitting; 18 | 19 | m.def("Fit3DPoints", &Fit3DPoints); 20 | } 21 | 22 | } // namespace limap 23 | 24 | -------------------------------------------------------------------------------- /limap/fitting/line3d_estimator.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_FITTING_LINE_ESTIMATOR_H_ 2 | #define LIMAP_FITTING_LINE_ESTIMATOR_H_ 3 | 4 | #include "_limap/helpers.h" 5 | #include "util/types.h" 6 | #include "base/linebase.h" 7 | #include "base/infinite_line.h" 8 | 9 | #include 10 | 11 | namespace limap { 12 | 13 | namespace fitting { 14 | 15 | // Implements a simple solver that estimates a 3D line from two data points. 16 | // Reference link: https://github.com/B1ueber2y/RansacLib/blob/master/examples/line_estimator.h 17 | class Line3dEstimator { 18 | public: 19 | Line3dEstimator(const Eigen::Matrix3Xd& data); 20 | 21 | inline int min_sample_size() const { return 2; } 22 | 23 | inline int non_minimal_sample_size() const { return 6; } 24 | 25 | inline int num_data() const { return num_data_; } 26 | 27 | int MinimalSolver(const std::vector& sample, 28 | std::vector* lines) const; 29 | 30 | // Returns 0 if no model could be estimated and 1 otherwise. 31 | // Implemented by a simple linear least squares solver. 32 | int NonMinimalSolver(const std::vector& sample, 33 | InfiniteLine3d* line) const; 34 | 35 | // Evaluates the line on the i-th data point. 36 | double EvaluateModelOnPoint(const InfiniteLine3d& line, int i) const; 37 | 38 | // Linear least squares solver. Calls NonMinimalSolver. 39 | inline void LeastSquares(const std::vector& sample, 40 | InfiniteLine3d* line) const { 41 | NonMinimalSolver(sample, line); 42 | } 43 | 44 | protected: 45 | // Matrix holding the 3D points through which the line is fitted. 46 | Eigen::Matrix3Xd data_; 47 | int num_data_; 48 | }; 49 | 50 | std::pair Fit3DPoints(const Eigen::Matrix3Xd points, const ransac_lib::LORansacOptions& options); 51 | 52 | } // namespace fitting 53 | 54 | } // namespace limap 55 | 56 | #endif 57 | 58 | -------------------------------------------------------------------------------- /limap/line2d/DeepLSD/__init__.py: -------------------------------------------------------------------------------- 1 | from .deeplsd import DeepLSDDetector 2 | 3 | -------------------------------------------------------------------------------- /limap/line2d/DeepLSD/deeplsd.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | from base_detector import BaseDetector, BaseDetectorOptions 4 | import numpy as np 5 | import torch 6 | 7 | from deeplsd.models.deeplsd_inference import DeepLSD 8 | 9 | 10 | class DeepLSDDetector(BaseDetector): 11 | def __init__(self, options = BaseDetectorOptions()): 12 | super(DeepLSDDetector, self).__init__(options) 13 | 14 | conf = { 15 | 'detect_lines': True, 16 | 'line_detection_params': { 17 | 'merge': False, 18 | 'grad_nfa': True, 19 | 'filtering': 'normal', 20 | 'grad_thresh': 3, 21 | }, 22 | } 23 | self.device = 'cuda' if torch.cuda.is_available() else 'cpu' 24 | if self.weight_path is None: 25 | ckpt = os.path.join(os.path.dirname(__file__), 'deeplsd_md.tar') 26 | else: 27 | ckpt = os.path.join(self.weight_path, "line2d", "DeepLSD", 'deeplsd_md.tar') 28 | if not os.path.isfile(ckpt): 29 | self.download_model(ckpt) 30 | ckpt = torch.load(ckpt, map_location='cpu') 31 | self.net = DeepLSD(conf).eval() 32 | self.net.load_state_dict(ckpt['model']) 33 | self.net = self.net.to(self.device) 34 | 35 | def download_model(self, path): 36 | import subprocess 37 | if not os.path.exists(os.path.dirname(path)): 38 | os.makedirs(os.path.dirname(path)) 39 | link = "https://www.polybox.ethz.ch/index.php/s/XVb30sUyuJttFys/download" 40 | cmd = ["wget", link, "-O", path] 41 | print("Downloading DeepLSD model...") 42 | subprocess.run(cmd, check=True) 43 | 44 | def get_module_name(self): 45 | return "deeplsd" 46 | 47 | def detect(self, camview): 48 | img = camview.read_image(set_gray=True) 49 | img = torch.tensor(img[None, None], dtype=torch.float, 50 | device=self.device) / 255 51 | with torch.no_grad(): 52 | lines = self.net({'image': img})['lines'][0] 53 | 54 | # Use the line length as score 55 | lines = np.concatenate([ 56 | lines.reshape(-1, 4), 57 | np.linalg.norm(lines[:, 0] - lines[:, 1], axis=1, keepdims=True)], 58 | axis=1) 59 | return lines 60 | -------------------------------------------------------------------------------- /limap/line2d/GlueStick/__init__.py: -------------------------------------------------------------------------------- 1 | from .extractor import WireframeExtractor 2 | from .matcher import GlueStickMatcher 3 | -------------------------------------------------------------------------------- /limap/line2d/HAWPv3/__init__.py: -------------------------------------------------------------------------------- 1 | from .hawp import HAWPv3Detector 2 | -------------------------------------------------------------------------------- /limap/line2d/HAWPv3/hawpv3.yaml: -------------------------------------------------------------------------------- 1 | ENCODER: 2 | ANG_TH: 0.0 3 | BACKGROUND_WEIGHT: 0.0 4 | DIS_TH: 2 5 | NUM_STATIC_NEG_LINES: 0 6 | NUM_STATIC_POS_LINES: 300 7 | MODEL: 8 | DEVICE: cuda 9 | USE_LINE_HEATMAP: True 10 | HEAD_SIZE: 11 | - - 3 12 | - - 1 13 | - - 1 14 | - - 2 15 | - - 2 16 | HGNETS: 17 | DEPTH: 4 18 | INPLANES: 64 19 | NUM_BLOCKS: 1 20 | NUM_FEATS: 128 21 | NUM_STACKS: 2 22 | LOI_POOLING: 23 | ACTIVATION: relu 24 | DIM_EDGE_FEATURE: 4 25 | DIM_FC: 1024 26 | DIM_JUNCTION_FEATURE: 128 27 | LAYER_NORM: false 28 | NUM_POINTS: 32 29 | TYPE: softmax 30 | LOSS_WEIGHTS: 31 | loss_aux: 1.0 32 | loss_dis: 1.0 33 | loss_jloc: 8.0 34 | loss_joff: 0.25 35 | # loss_joff: 0.0 36 | loss_lineness: 1.0 37 | loss_md: 1.0 38 | loss_neg: 1.0 39 | loss_pos: 1.0 40 | loss_res: 1.0 41 | loss_heatmap: 1.0 42 | NAME: Hourglass 43 | OUT_FEATURE_CHANNELS: 256 44 | PARSING_HEAD: 45 | DIM_FC: 1024 46 | DIM_LOI: 128 47 | J2L_THRESHOLD: 10.0 48 | JMATCH_THRESHOLD: 1.5 49 | MATCHING_STRATEGY: junction 50 | MAX_DISTANCE: 5.0 51 | N_DYN_JUNC: 300 52 | N_DYN_NEGL: 40 53 | N_DYN_OTHR: 0 54 | N_DYN_OTHR2: 300 55 | N_DYN_POSL: 300 56 | N_OUT_JUNC: 250 57 | N_OUT_LINE: 2500 58 | N_PTS0: 32 59 | N_PTS1: 8 60 | N_STC_NEGL: 40 61 | N_STC_POSL: 300 62 | USE_RESIDUAL: 1 63 | RESNETS: 64 | BASENET: resnet50 65 | PRETRAIN: true 66 | SCALE: 1.0 67 | WEIGHTS: '' 68 | MODELING_PATH: ihawp-v2 69 | OUTPUT_DIR: output/ihawp 70 | SOLVER: 71 | AMSGRAD: true 72 | BACKBONE_LR_FACTOR: 1.0 73 | BASE_LR: 0.00004 74 | BIAS_LR_FACTOR: 1 75 | CHECKPOINT_PERIOD: 1 76 | GAMMA: 0.1 77 | IMS_PER_BATCH: 6 78 | MAX_EPOCH: 30 79 | MOMENTUM: 0.9 80 | OPTIMIZER: ADAM 81 | STEPS: 82 | - 25 83 | WEIGHT_DECAY: 0.0001 84 | WEIGHT_DECAY_BIAS: 0 85 | -------------------------------------------------------------------------------- /limap/line2d/L2D2/RAL_net_cov.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | import torch 3 | import torch.nn.init 4 | import torch.nn as nn 5 | 6 | 7 | class L2Norm(nn.Module): 8 | def __init__(self): 9 | super(L2Norm,self).__init__() 10 | self.eps = 1e-10 11 | def forward(self, x): 12 | norm = torch.sqrt(torch.sum(x * x, dim = 1) + self.eps) 13 | x= x / norm.unsqueeze(-1).expand_as(x) 14 | return x 15 | 16 | 17 | class L2Net(nn.Module): 18 | def __init__(self): 19 | super(L2Net, self).__init__() 20 | self.features = nn.Sequential( 21 | nn.Conv2d(1, 32, kernel_size=3, padding=1, bias = False), 22 | nn.BatchNorm2d(32, affine=False), 23 | nn.ReLU(), 24 | nn.Conv2d(32, 32, kernel_size=3, padding=1, bias = False), 25 | nn.BatchNorm2d(32, affine=False), 26 | nn.ReLU(), 27 | nn.Conv2d(32, 64, kernel_size=(4,3), stride=2, padding=1, bias = False),#3 28 | nn.BatchNorm2d(64, affine=False), 29 | nn.ReLU(), 30 | nn.Conv2d(64, 64, kernel_size=3, padding=1, bias = False), 31 | nn.BatchNorm2d(64, affine=False), 32 | nn.ReLU(), 33 | nn.Conv2d(64, 128, kernel_size=(4,3), stride=2,padding=1, bias = False),#3 34 | nn.BatchNorm2d(128, affine=False), 35 | nn.ReLU(), 36 | nn.Conv2d(128, 128, kernel_size=3, padding=1, bias = False), 37 | nn.BatchNorm2d(128, affine=False), 38 | nn.ReLU(), 39 | nn.Dropout(0.3), 40 | nn.Conv2d(128, 128, kernel_size=(12,8), bias = False),#8 41 | nn.BatchNorm2d(128, affine=False), 42 | 43 | ) 44 | self.features.apply(weights_init) 45 | return 46 | 47 | def input_norm(self,x): 48 | flat = x.view(x.size(0), -1) 49 | mp = torch.mean(flat, dim=1) 50 | sp = torch.std(flat, dim=1) + 1e-7 51 | return (x - mp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand_as(x)) / sp.detach().unsqueeze(-1).unsqueeze(-1).unsqueeze(1).expand_as(x) 52 | 53 | def forward(self, input): 54 | x_features = self.features(self.input_norm(input)) 55 | x = x_features.view(x_features.size(0), -1) 56 | 57 | return L2Norm()(x) 58 | 59 | def weights_init(m): 60 | if isinstance(m, nn.Conv2d): 61 | nn.init.orthogonal_(m.weight.data, gain=0.6) 62 | try: 63 | nn.init.constant_(m.bias.data, 0.01) 64 | 65 | except: 66 | pass 67 | return 68 | 69 | def get_net(): 70 | return L2Net() 71 | -------------------------------------------------------------------------------- /limap/line2d/L2D2/__init__.py: -------------------------------------------------------------------------------- 1 | from .extractor import L2D2Extractor 2 | from .matcher import L2D2Matcher 3 | -------------------------------------------------------------------------------- /limap/line2d/L2D2/matcher.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | from base_matcher import BaseMatcher, BaseMatcherOptions 6 | 7 | 8 | class L2D2Matcher(BaseMatcher): 9 | def __init__(self, extractor, options = BaseMatcherOptions()): 10 | super(L2D2Matcher, self).__init__(extractor, options) 11 | 12 | def get_module_name(self): 13 | return "l2d2" 14 | 15 | def check_compatibility(self, extractor): 16 | return extractor.get_module_name() == "l2d2" 17 | 18 | def match_pair(self, descinfo1, descinfo2): 19 | if self.topk == 0: 20 | return self.match_segs_with_descinfo(descinfo1, descinfo2) 21 | else: 22 | return self.match_segs_with_descinfo_topk(descinfo1, descinfo2, topk=self.topk) 23 | 24 | def match_segs_with_descinfo(self, descinfo1, descinfo2): 25 | desc1 = descinfo1['line_descriptors'] 26 | desc2 = descinfo2['line_descriptors'] 27 | 28 | # Default case when an image has no lines 29 | if len(desc1) == 0 or len(desc2) == 0: 30 | return np.empty((0, 2)) 31 | 32 | # Mutual nearest neighbor matching 33 | score_mat = desc1 @ desc2.T 34 | nearest1 = np.argmax(score_mat, axis=1) 35 | nearest2 = np.argmax(score_mat, axis=0) 36 | mutual = nearest2[nearest1] == np.arange(len(desc1)) 37 | nearest1[~mutual] = -1 38 | 39 | # Transform matches to [n_matches, 2] 40 | id_list_1 = np.arange(0, len(nearest1))[mutual] 41 | id_list_2 = nearest1[mutual] 42 | matches_t = np.stack([id_list_1, id_list_2], 1) 43 | return matches_t 44 | 45 | def match_segs_with_descinfo_topk(self, descinfo1, descinfo2, topk=10): 46 | desc1 = descinfo1['line_descriptors'] 47 | desc2 = descinfo2['line_descriptors'] 48 | 49 | # Default case when an image has no lines 50 | if len(desc1) == 0 or len(desc2) == 0: 51 | return np.empty((0, 2)) 52 | 53 | # Top k nearest neighbor matching 54 | score_mat = desc1 @ desc2.T 55 | matches = np.argsort(score_mat, axis=1)[:, -topk:] 56 | matches = np.flip(matches, axis=1) 57 | 58 | # Transform matches to [n_matches, 2] 59 | n_lines = len(matches) 60 | matches_t = np.stack([np.arange(n_lines).repeat(topk), 61 | matches.flatten()], axis=1) 62 | return matches_t 63 | -------------------------------------------------------------------------------- /limap/line2d/LBD/__init__.py: -------------------------------------------------------------------------------- 1 | from .extractor import LBDExtractor 2 | from .matcher import LBDMatcher 3 | -------------------------------------------------------------------------------- /limap/line2d/LBD/matcher.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | import pytlbd 5 | 6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 7 | from base_matcher import BaseMatcher, BaseMatcherOptions 8 | 9 | class LBDMatcher(BaseMatcher): 10 | def __init__(self, extractor, options = BaseMatcherOptions()): 11 | super(LBDMatcher, self).__init__(extractor, options) 12 | 13 | def get_module_name(self): 14 | return "lbd" 15 | 16 | def check_compatibility(self, extractor): 17 | return extractor.get_module_name() == "lbd" 18 | 19 | def match_pair(self, descinfo1, descinfo2): 20 | if self.topk == 0: 21 | return self.match_segs_with_descinfo(descinfo1, descinfo2) 22 | else: 23 | return self.match_segs_with_descinfo_topk(descinfo1, descinfo2, topk=self.topk) 24 | 25 | def match_segs_with_descinfo(self, descinfo1, descinfo2): 26 | try: 27 | matches = pytlbd.lbd_matching_multiscale( 28 | descinfo1['ms_lines'].tolist(), 29 | descinfo2['ms_lines'].tolist(), 30 | descinfo1['line_descriptors'].tolist(), 31 | descinfo2['line_descriptors'].tolist()) 32 | matches = np.array(matches)[:, :2] 33 | except RuntimeError: 34 | matches = np.zeros((0, 2)) 35 | return matches 36 | 37 | def match_segs_with_descinfo_topk(self, descinfo1, descinfo2, topk=10): 38 | raise NotImplementedError() 39 | -------------------------------------------------------------------------------- /limap/line2d/LSD/__init__.py: -------------------------------------------------------------------------------- 1 | from .lsd import LSDDetector 2 | 3 | -------------------------------------------------------------------------------- /limap/line2d/LSD/lsd.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | from base_detector import BaseDetector, BaseDetectorOptions 4 | 5 | import pytlsd 6 | import numpy as np 7 | 8 | class LSDDetector(BaseDetector): 9 | def __init__(self, options = BaseDetectorOptions()): 10 | super(LSDDetector, self).__init__(options) 11 | 12 | def get_module_name(self): 13 | return "lsd" 14 | 15 | def detect(self, camview): 16 | img = camview.read_image(set_gray=self.set_gray) 17 | segs = pytlsd.lsd(img) 18 | return segs 19 | 20 | -------------------------------------------------------------------------------- /limap/line2d/LineTR/__init__.py: -------------------------------------------------------------------------------- 1 | from .extractor import LineTRExtractor 2 | from .matcher import LineTRMatcher -------------------------------------------------------------------------------- /limap/line2d/LineTR/matcher.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import torch 4 | 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 6 | from base_matcher import BaseMatcher, BaseMatcherOptions 7 | from LineTR.line_transformer import LineTransformer 8 | from LineTR.line_process import get_dist_matrix 9 | from LineTR.nn_matcher import nn_matcher_distmat 10 | 11 | 12 | class LineTRMatcher(BaseMatcher): 13 | def __init__(self, extractor, options = BaseMatcherOptions(), 14 | topk=0, device=None): 15 | super(LineTRMatcher, self).__init__(extractor, options) 16 | self.device = "cuda" if device is None else device 17 | self.linetr = LineTransformer({'weight_path': self.weight_path}).eval().to(self.device) 18 | 19 | def get_module_name(self): 20 | return "linetr" 21 | 22 | def check_compatibility(self, extractor): 23 | return extractor.get_module_name() == "linetr" 24 | 25 | def match_pair(self, descinfo1, descinfo2): 26 | if self.topk == 0: 27 | return self.match_segs_with_descinfo(descinfo1, descinfo2) 28 | else: 29 | return self.match_segs_with_descinfo_topk(descinfo1, descinfo2, 30 | topk=self.topk) 31 | 32 | def match_segs_with_descinfo(self, descinfo1, descinfo2): 33 | line_desc1 = descinfo1['line_descriptors'].T[None] 34 | line_desc2 = descinfo2['line_descriptors'].T[None] 35 | distance_sublines = get_dist_matrix(line_desc1, line_desc2) 36 | distance_matrix = self.linetr.subline2keyline( 37 | distance_sublines, descinfo1['mat_klines2sublines'], 38 | descinfo2['mat_klines2sublines'])[0] 39 | match_mat = nn_matcher_distmat( 40 | distance_matrix, self.linetr.config['nn_threshold'], 41 | is_mutual_NN=True)[0] 42 | matches = np.stack(np.where(match_mat > 0), axis=-1) 43 | return matches 44 | 45 | def match_segs_with_descinfo_topk(self, descinfo1, descinfo2, topk=10): 46 | line_desc1 = descinfo1['line_descriptors'].T[None] 47 | line_desc2 = descinfo2['line_descriptors'].T[None] 48 | distance_sublines = get_dist_matrix(line_desc1, line_desc2) 49 | distance_matrix = self.linetr.subline2keyline( 50 | distance_sublines, descinfo1['mat_klines2sublines'], 51 | descinfo2['mat_klines2sublines'])[0, 0] 52 | 53 | # For each line in img1, retrieve the topk matches in img2 54 | matches = np.argsort(distance_matrix, axis=1)[:, :topk] 55 | 56 | # Transform matches to [n_matches, 2] 57 | n_lines = matches.shape[0] 58 | matches_t = np.stack([np.arange(n_lines).repeat(topk), 59 | matches.flatten()], axis=1) 60 | return matches_t 61 | -------------------------------------------------------------------------------- /limap/line2d/LineTR/nn_matcher.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def nn_matcher_distmat(dist_mat, nn_thresh, is_mutual_NN = True): 4 | """ Nearest Neighbor Matching using a distance matrix """ 5 | n0 = dist_mat.shape[1] 6 | n1 = dist_mat.shape[2] 7 | b = 1 8 | mat_nn_ = np.zeros((b,n0,n1)) 9 | if n0==0 or n1==0: 10 | return mat_nn_ 11 | 12 | for b_idx in np.arange(b): 13 | dmat_tmp = dist_mat[b_idx].clip(min=0) 14 | # Get NN indices and scores. 15 | idx = np.argmin(dmat_tmp, axis=1) 16 | scores = dmat_tmp[np.arange(dmat_tmp.shape[0]), idx] 17 | # Threshold the NN matches. 18 | keep = scores < nn_thresh 19 | if is_mutual_NN: 20 | # Check if nearest neighbor goes both directions and keep those. 21 | idx2 = np.argmin(dmat_tmp, axis=0) 22 | keep_bi = np.arange(len(idx)) == idx2[idx] 23 | keep = np.logical_and(keep, keep_bi) 24 | idx = idx[keep] 25 | scores = scores[keep] 26 | # Get the surviving point indices. 27 | m_idx1 = np.arange(n0)[keep] 28 | m_idx2 = idx 29 | mat_nn_[b_idx, m_idx1, m_idx2] = 1 30 | 31 | return mat_nn_ 32 | 33 | def nn_matcher(desc0, desc1, nn_thresh=0.8, is_mutual_NN=True): 34 | """ Nearest Neighbor Matching using two descriptors """ 35 | d, num0 = desc0.shape 36 | num1 = desc1.shape[1] 37 | desc0_, desc1_ = desc0.T, desc1.T 38 | 39 | dmat = desc0_ @ desc1_.T 40 | dist_mat = (2.0 - 2.0 * dmat).clip(min=0)[None] 41 | 42 | mat_nn = nn_matcher_distmat(dist_mat, nn_thresh, is_mutual_NN) 43 | return mat_nn, dist_mat -------------------------------------------------------------------------------- /limap/line2d/SOLD2/__init__.py: -------------------------------------------------------------------------------- 1 | from .sold2 import SOLD2Detector, SOLD2Matcher 2 | 3 | -------------------------------------------------------------------------------- /limap/line2d/SOLD2/config/export_line_features.yaml: -------------------------------------------------------------------------------- 1 | ### [Model config] 2 | model_cfg: 3 | ### [Model parameters] 4 | model_name: "lcnn_simple" 5 | model_architecture: "simple" 6 | # Backbone related config 7 | backbone: "lcnn" 8 | backbone_cfg: 9 | input_channel: 1 # Use RGB images or grayscale images. 10 | depth: 4 11 | num_stacks: 2 12 | num_blocks: 1 13 | num_classes: 5 14 | # Junction decoder related config 15 | junction_decoder: "superpoint_decoder" 16 | junc_decoder_cfg: 17 | # Heatmap decoder related config 18 | heatmap_decoder: "pixel_shuffle" 19 | heatmap_decoder_cfg: 20 | # Descriptor decoder related config 21 | descriptor_decoder: "superpoint_descriptor" 22 | descriptor_decoder_cfg: 23 | # Shared configurations 24 | grid_size: 8 25 | keep_border_valid: True 26 | # Threshold of junction detection 27 | detection_thresh: 0.0153846 # 1/65 28 | max_num_junctions: 300 29 | # Threshold of heatmap detection 30 | prob_thresh: 0.5 31 | 32 | ### [Loss parameters] 33 | weighting_policy: "dynamic" 34 | # [Heatmap loss] 35 | w_heatmap: 0. 36 | w_heatmap_class: 1 37 | heatmap_loss_func: "cross_entropy" 38 | heatmap_loss_cfg: 39 | policy: "dynamic" 40 | # [Junction loss] 41 | w_junc: 0. 42 | junction_loss_func: "superpoint" 43 | junction_loss_cfg: 44 | policy: "dynamic" 45 | # [Descriptor loss] 46 | w_desc: 0. 47 | descriptor_loss_func: "regular_sampling" 48 | descriptor_loss_cfg: 49 | dist_threshold: 8 50 | grid_size: 4 51 | margin: 1 52 | policy: "dynamic" 53 | 54 | ### [Multiscale option] 55 | multiscale_cfg: 56 | multiscale: False 57 | scales: [1., 2.] 58 | 59 | ### [Line detector config] 60 | line_detector_cfg: 61 | detect_thresh: 0.5 62 | num_samples: 64 63 | sampling_method: "local_max" 64 | inlier_thresh: 0.99 65 | use_candidate_suppression: True 66 | nms_dist_tolerance: 3. 67 | use_heatmap_refinement: True 68 | heatmap_refine_cfg: 69 | mode: "local" 70 | ratio: 0.2 71 | valid_thresh: 0.001 72 | num_blocks: 20 73 | overlap_ratio: 0.5 74 | use_junction_refinement: True 75 | junction_refine_cfg: 76 | num_perturbs: 9 77 | perturb_interval: 0.25 78 | 79 | ### [Line matcher config] 80 | line_matcher_cfg: 81 | cross_check: True 82 | num_samples: 5 83 | min_dist_pts: 8 84 | top_k_candidates: 10 85 | grid_size: 4 -------------------------------------------------------------------------------- /limap/line2d/SOLD2/experiment.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main file to launch training and testing experiments. 3 | """ 4 | 5 | import yaml 6 | import os 7 | import argparse 8 | import numpy as np 9 | import torch 10 | 11 | # Pytorch configurations 12 | torch.cuda.empty_cache() 13 | torch.backends.cudnn.benchmark = True 14 | 15 | def load_config(config_path): 16 | """ Load configurations from a given yaml file. """ 17 | # Check file exists 18 | if not os.path.exists(config_path): 19 | raise ValueError("[Error] The provided config path is not valid.") 20 | 21 | # Load the configuration 22 | with open(config_path, "r") as f: 23 | config = yaml.safe_load(f) 24 | 25 | return config 26 | 27 | 28 | def update_config(path, model_cfg=None, dataset_cfg=None): 29 | """ Update configuration file from the resume path. """ 30 | # Check we need to update or completely override. 31 | model_cfg = {} if model_cfg is None else model_cfg 32 | dataset_cfg = {} if dataset_cfg is None else dataset_cfg 33 | 34 | # Load saved configs 35 | with open(os.path.join(path, "model_cfg.yaml"), "r") as f: 36 | model_cfg_saved = yaml.safe_load(f) 37 | model_cfg.update(model_cfg_saved) 38 | with open(os.path.join(path, "dataset_cfg.yaml"), "r") as f: 39 | dataset_cfg_saved = yaml.safe_load(f) 40 | dataset_cfg.update(dataset_cfg_saved) 41 | 42 | # Update the saved yaml file 43 | if not model_cfg == model_cfg_saved: 44 | with open(os.path.join(path, "model_cfg.yaml"), "w") as f: 45 | yaml.dump(model_cfg, f) 46 | if not dataset_cfg == dataset_cfg_saved: 47 | with open(os.path.join(path, "dataset_cfg.yaml"), "w") as f: 48 | yaml.dump(dataset_cfg, f) 49 | 50 | return model_cfg, dataset_cfg 51 | 52 | 53 | def record_config(model_cfg, dataset_cfg, output_path): 54 | """ Record dataset config to the log path. """ 55 | # Record model config 56 | with open(os.path.join(output_path, "model_cfg.yaml"), "w") as f: 57 | yaml.safe_dump(model_cfg, f) 58 | 59 | # Record dataset config 60 | with open(os.path.join(output_path, "dataset_cfg.yaml"), "w") as f: 61 | yaml.safe_dump(dataset_cfg, f) 62 | 63 | -------------------------------------------------------------------------------- /limap/line2d/SOLD2/misc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/limap/line2d/SOLD2/misc/__init__.py -------------------------------------------------------------------------------- /limap/line2d/SOLD2/misc/train_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains some useful functions for train / val. 3 | """ 4 | import os 5 | import numpy as np 6 | import torch 7 | 8 | 9 | ################# 10 | ## image utils ## 11 | ################# 12 | def convert_image(input_tensor, axis): 13 | """ Convert single channel images to 3-channel images. """ 14 | image_lst = [input_tensor for _ in range(3)] 15 | outputs = np.concatenate(image_lst, axis) 16 | return outputs 17 | 18 | 19 | ###################### 20 | ## checkpoint utils ## 21 | ###################### 22 | def get_latest_checkpoint(checkpoint_root, checkpoint_name, 23 | device=torch.device("cuda")): 24 | """ Get the latest checkpoint or by filename. """ 25 | # Load specific checkpoint 26 | if checkpoint_name is not None: 27 | checkpoint = torch.load( 28 | os.path.join(checkpoint_root, checkpoint_name), 29 | map_location=device) 30 | # Load the latest checkpoint 31 | else: 32 | lastest_checkpoint = sorted(os.listdir(os.path.join( 33 | checkpoint_root, "*.tar")))[-1] 34 | checkpoint = torch.load(os.path.join( 35 | checkpoint_root, lastest_checkpoint), map_location=device) 36 | return checkpoint 37 | 38 | 39 | def remove_old_checkpoints(checkpoint_root, max_ckpt=15): 40 | """ Remove the outdated checkpoints. """ 41 | # Get sorted list of checkpoints 42 | checkpoint_list = sorted( 43 | [_ for _ in os.listdir(os.path.join(checkpoint_root)) 44 | if _.endswith(".tar")]) 45 | 46 | # Get the checkpoints to be removed 47 | if len(checkpoint_list) > max_ckpt: 48 | remove_list = checkpoint_list[:-max_ckpt] 49 | for _ in remove_list: 50 | full_name = os.path.join(checkpoint_root, _) 51 | os.remove(full_name) 52 | print("[Debug] Remove outdated checkpoint %s" % (full_name)) 53 | 54 | 55 | ################ 56 | ## HDF5 utils ## 57 | ################ 58 | def parse_h5_data(h5_data): 59 | """ Parse h5 dataset. """ 60 | output_data = {} 61 | for key in h5_data.keys(): 62 | output_data[key] = np.array(h5_data[key]) 63 | 64 | return output_data 65 | -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/limap/line2d/SOLD2/model/__init__.py -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file implements different learning rate schedulers 3 | """ 4 | import torch 5 | 6 | 7 | def get_lr_scheduler(lr_decay, lr_decay_cfg, optimizer): 8 | """ Get the learning rate scheduler according to the config. """ 9 | # If no lr_decay is specified => return None 10 | if (lr_decay == False) or (lr_decay_cfg is None): 11 | schduler = None 12 | # Exponential decay 13 | elif (lr_decay == True) and (lr_decay_cfg["policy"] == "exp"): 14 | schduler = torch.optim.lr_scheduler.ExponentialLR( 15 | optimizer, 16 | gamma=lr_decay_cfg["gamma"] 17 | ) 18 | # Unknown policy 19 | else: 20 | raise ValueError("[Error] Unknow learning rate decay policy!") 21 | 22 | return schduler -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/nets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/limap/line2d/SOLD2/model/nets/__init__.py -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/nets/backbone.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .lcnn_hourglass import MultitaskHead, hg 5 | 6 | 7 | class HourglassBackbone(nn.Module): 8 | """ Hourglass backbone. """ 9 | def __init__(self, input_channel=1, depth=4, num_stacks=2, 10 | num_blocks=1, num_classes=5): 11 | super(HourglassBackbone, self).__init__() 12 | self.head = MultitaskHead 13 | self.net = hg(**{ 14 | "head": self.head, 15 | "depth": depth, 16 | "num_stacks": num_stacks, 17 | "num_blocks": num_blocks, 18 | "num_classes": num_classes, 19 | "input_channels": input_channel 20 | }) 21 | 22 | def forward(self, input_images): 23 | return self.net(input_images)[1] 24 | 25 | 26 | class SuperpointBackbone(nn.Module): 27 | """ SuperPoint backbone. """ 28 | def __init__(self): 29 | super(SuperpointBackbone, self).__init__() 30 | self.relu = torch.nn.ReLU(inplace=True) 31 | self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) 32 | c1, c2, c3, c4 = 64, 64, 128, 128 33 | # Shared Encoder. 34 | self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, 35 | stride=1, padding=1) 36 | self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, 37 | stride=1, padding=1) 38 | self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, 39 | stride=1, padding=1) 40 | self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, 41 | stride=1, padding=1) 42 | self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, 43 | stride=1, padding=1) 44 | self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, 45 | stride=1, padding=1) 46 | self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, 47 | stride=1, padding=1) 48 | self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, 49 | stride=1, padding=1) 50 | 51 | def forward(self, input_images): 52 | # Shared Encoder. 53 | x = self.relu(self.conv1a(input_images)) 54 | x = self.relu(self.conv1b(x)) 55 | x = self.pool(x) 56 | x = self.relu(self.conv2a(x)) 57 | x = self.relu(self.conv2b(x)) 58 | x = self.pool(x) 59 | x = self.relu(self.conv3a(x)) 60 | x = self.relu(self.conv3b(x)) 61 | x = self.pool(x) 62 | x = self.relu(self.conv4a(x)) 63 | x = self.relu(self.conv4b(x)) 64 | 65 | return x 66 | -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/nets/descriptor_decoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SuperpointDescriptor(nn.Module): 6 | """ Descriptor decoder based on the SuperPoint arcihtecture. """ 7 | def __init__(self, input_feat_dim=128): 8 | super(SuperpointDescriptor, self).__init__() 9 | self.relu = torch.nn.ReLU(inplace=True) 10 | self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=3, 11 | stride=1, padding=1) 12 | self.convPb = torch.nn.Conv2d(256, 128, kernel_size=1, 13 | stride=1, padding=0) 14 | 15 | def forward(self, input_features): 16 | feat = self.relu(self.convPa(input_features)) 17 | semi = self.convPb(feat) 18 | 19 | return semi -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/nets/heatmap_decoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class PixelShuffleDecoder(nn.Module): 5 | """ Pixel shuffle decoder. """ 6 | def __init__(self, input_feat_dim=128, num_upsample=2, output_channel=2): 7 | super(PixelShuffleDecoder, self).__init__() 8 | # Get channel parameters 9 | self.channel_conf = self.get_channel_conf(num_upsample) 10 | 11 | # Define the pixel shuffle 12 | self.pixshuffle = nn.PixelShuffle(2) 13 | 14 | # Process the feature 15 | self.conv_block_lst = [] 16 | # The input block 17 | self.conv_block_lst.append( 18 | nn.Sequential( 19 | nn.Conv2d(input_feat_dim, self.channel_conf[0], 20 | kernel_size=3, stride=1, padding=1), 21 | nn.BatchNorm2d(self.channel_conf[0]), 22 | nn.ReLU(inplace=True) 23 | )) 24 | 25 | # Intermediate block 26 | for channel in self.channel_conf[1:-1]: 27 | self.conv_block_lst.append( 28 | nn.Sequential( 29 | nn.Conv2d(channel, channel, kernel_size=3, 30 | stride=1, padding=1), 31 | nn.BatchNorm2d(channel), 32 | nn.ReLU(inplace=True) 33 | )) 34 | 35 | # Output block 36 | self.conv_block_lst.append( 37 | nn.Conv2d(self.channel_conf[-1], output_channel, 38 | kernel_size=1, stride=1, padding=0) 39 | ) 40 | self.conv_block_lst = nn.ModuleList(self.conv_block_lst) 41 | 42 | # Get num of channels based on number of upsampling. 43 | def get_channel_conf(self, num_upsample): 44 | if num_upsample == 2: 45 | return [256, 64, 16] 46 | elif num_upsample == 3: 47 | return [256, 64, 16, 4] 48 | 49 | def forward(self, input_features): 50 | # Iterate til output block 51 | out = input_features 52 | for block in self.conv_block_lst[:-1]: 53 | out = block(out) 54 | out = self.pixshuffle(out) 55 | 56 | # Output layer 57 | out = self.conv_block_lst[-1](out) 58 | 59 | return out 60 | -------------------------------------------------------------------------------- /limap/line2d/SOLD2/model/nets/junction_decoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class SuperpointDecoder(nn.Module): 6 | """ Junction decoder based on the SuperPoint architecture. """ 7 | def __init__(self, input_feat_dim=128, backbone_name="lcnn"): 8 | super(SuperpointDecoder, self).__init__() 9 | self.relu = torch.nn.ReLU(inplace=True) 10 | # Perform strided convolution when using lcnn backbone. 11 | if backbone_name == "lcnn": 12 | self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=3, 13 | stride=2, padding=1) 14 | elif backbone_name == "superpoint": 15 | self.convPa = torch.nn.Conv2d(input_feat_dim, 256, kernel_size=3, 16 | stride=1, padding=1) 17 | else: 18 | raise ValueError("[Error] Unknown backbone option.") 19 | 20 | self.convPb = torch.nn.Conv2d(256, 65, kernel_size=1, 21 | stride=1, padding=0) 22 | 23 | def forward(self, input_features): 24 | feat = self.relu(self.convPa(input_features)) 25 | semi = self.convPb(feat) 26 | 27 | return semi -------------------------------------------------------------------------------- /limap/line2d/TP_LSD/__init__.py: -------------------------------------------------------------------------------- 1 | from .tp_lsd import TPLSDDetector 2 | -------------------------------------------------------------------------------- /limap/line2d/__init__.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 3 | 4 | from .register_detector import get_detector, get_extractor 5 | from .register_matcher import get_matcher 6 | 7 | # line utilization functions 8 | from .line_utils import * 9 | 10 | -------------------------------------------------------------------------------- /limap/line2d/endpoints/__init__.py: -------------------------------------------------------------------------------- 1 | from .extractor import SuperPointEndpointsExtractor 2 | from .matcher import NNEndpointsMatcher, SuperGlueEndpointsMatcher 3 | -------------------------------------------------------------------------------- /limap/line2d/line_utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .merge_lines import merge_lines 2 | -------------------------------------------------------------------------------- /limap/line2d/register_detector.py: -------------------------------------------------------------------------------- 1 | from .base_detector import BaseDetectorOptions 2 | 3 | def get_detector(cfg_detector, max_num_2d_segs=3000, 4 | do_merge_lines=False, visualize=False, weight_path=None): 5 | options = BaseDetectorOptions() 6 | options = options._replace( 7 | set_gray=True, max_num_2d_segs=max_num_2d_segs, 8 | do_merge_lines=do_merge_lines, visualize=visualize, weight_path=weight_path) 9 | 10 | method = cfg_detector["method"] 11 | if method == "lsd": 12 | from .LSD import LSDDetector 13 | return LSDDetector(options) 14 | elif method == "sold2": 15 | from .SOLD2 import SOLD2Detector 16 | return SOLD2Detector(options) 17 | elif method == "hawpv3": 18 | from .HAWPv3 import HAWPv3Detector 19 | return HAWPv3Detector(options) 20 | elif method == "tp_lsd": 21 | from .TP_LSD import TPLSDDetector 22 | return TPLSDDetector(options) 23 | elif method == "deeplsd": 24 | from .DeepLSD import DeepLSDDetector 25 | return DeepLSDDetector(options) 26 | else: 27 | raise NotImplementedError 28 | 29 | def get_extractor(cfg_extractor, weight_path=None): 30 | options = BaseDetectorOptions() 31 | options = options._replace(set_gray=True, weight_path=weight_path) 32 | 33 | method = cfg_extractor["method"] 34 | if method == "sold2": 35 | from .SOLD2 import SOLD2Detector 36 | return SOLD2Detector(options) 37 | elif method == "lbd": 38 | from .LBD import LBDExtractor 39 | return LBDExtractor(options) 40 | elif method == "linetr": 41 | from .LineTR import LineTRExtractor 42 | return LineTRExtractor(options) 43 | elif method == "l2d2": 44 | from .L2D2 import L2D2Extractor 45 | return L2D2Extractor(options) 46 | elif method == "superpoint_endpoints": 47 | from .endpoints import SuperPointEndpointsExtractor 48 | return SuperPointEndpointsExtractor(options) 49 | elif method == "wireframe": 50 | from .GlueStick import WireframeExtractor 51 | return WireframeExtractor(options) 52 | else: 53 | raise NotImplementedError 54 | 55 | -------------------------------------------------------------------------------- /limap/line2d/register_matcher.py: -------------------------------------------------------------------------------- 1 | from .base_matcher import BaseMatcherOptions 2 | 3 | def get_matcher(cfg_matcher, extractor, n_neighbors=20, weight_path=None): 4 | options = BaseMatcherOptions() 5 | options = options._replace( 6 | n_neighbors=n_neighbors, topk=cfg_matcher["topk"], 7 | n_jobs=cfg_matcher["n_jobs"], weight_path=weight_path) 8 | 9 | method = cfg_matcher["method"] 10 | if method == "sold2": 11 | from .SOLD2 import SOLD2Matcher 12 | return SOLD2Matcher(extractor, options) 13 | elif method == "lbd": 14 | from .LBD import LBDMatcher 15 | return LBDMatcher(extractor, options) 16 | elif method == "linetr": 17 | from .LineTR import LineTRMatcher 18 | return LineTRMatcher(extractor, options) 19 | elif method == "l2d2": 20 | from .L2D2 import L2D2Matcher 21 | return L2D2Matcher(extractor, options) 22 | elif method == "nn_endpoints": 23 | from .endpoints import NNEndpointsMatcher 24 | return NNEndpointsMatcher(extractor, options) 25 | elif method == "superglue_endpoints": 26 | from .endpoints import SuperGlueEndpointsMatcher 27 | return SuperGlueEndpointsMatcher( 28 | extractor, options, weights=cfg_matcher["superglue"]["weights"]) 29 | elif method == "gluestick": 30 | from .GlueStick import GlueStickMatcher 31 | return GlueStickMatcher(extractor, options) 32 | else: 33 | raise NotImplementedError 34 | -------------------------------------------------------------------------------- /limap/merging/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "merging") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | aggregator.h aggregator.cc 6 | merging_utils.h merging_utils.cc 7 | merging.h merging.cc 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /limap/merging/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._merging import * 2 | from .merging import * 3 | 4 | -------------------------------------------------------------------------------- /limap/merging/aggregator.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_MERGING_AGGREGATOR_H_ 2 | #define LIMAP_MERGING_AGGREGATOR_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | namespace py = pybind11; 9 | 10 | #include "base/linebase.h" 11 | 12 | namespace limap { 13 | 14 | namespace merging { 15 | 16 | class Aggregator { 17 | public: 18 | Aggregator() {} 19 | static Line3d aggregate_line3d_list_takebest(const std::vector& lines, 20 | const std::vector& scores); 21 | 22 | static Line3d aggregate_line3d_list_takelongest(const std::vector& lines, 23 | const std::vector& scores); 24 | 25 | static Line3d aggregate_line3d_list(const std::vector& lines, 26 | const std::vector& scores, 27 | const int num_outliers = 2); 28 | }; 29 | 30 | } // namespace merging 31 | 32 | } // namespace limap 33 | 34 | #endif 35 | 36 | -------------------------------------------------------------------------------- /limap/merging/merging.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_MERGING_MERGING_H_ 2 | #define LIMAP_MERGING_MERGING_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | namespace py = pybind11; 9 | 10 | #include "base/graph.h" 11 | #include "base/linebase.h" 12 | #include "base/linetrack.h" 13 | #include "base/line_linker.h" 14 | #include "base/image_collection.h" 15 | 16 | #include 17 | 18 | namespace limap { 19 | 20 | namespace merging { 21 | 22 | // Modified track computation based on Kruskal 23 | std::vector ComputeLineTrackLabelsGreedy(const Graph& graph, 24 | const std::vector& line3d_list_nodes); 25 | std::vector ComputeLineTrackLabelsExhaustive(const Graph& graph, 26 | const std::vector& line3d_list_nodes, 27 | LineLinker3d linker3d); 28 | std::vector ComputeLineTrackLabelsAvg(const Graph& graph, 29 | const std::vector& line3d_list_nodes, 30 | LineLinker3d linker3d); 31 | 32 | // for fitnmerge application 33 | void MergeToLineTracks(Graph& graph, 34 | std::vector& linetracks, 35 | const std::map>& all_lines_2d, 36 | const ImageCollection& imagecols, 37 | const std::map>& all_lines_3d, 38 | const std::map>& neighbors, 39 | LineLinker linker); 40 | 41 | // remerge line tracks 42 | std::vector RemergeLineTracks(const std::vector& linetracks, 43 | LineLinker3d linker3d, 44 | const int num_outliers = 2); 45 | 46 | } // namespace merging 47 | 48 | } // namespace limap 49 | 50 | #endif 51 | 52 | -------------------------------------------------------------------------------- /limap/merging/merging.py: -------------------------------------------------------------------------------- 1 | from _limap import _merging as _mrg 2 | from _limap import _base 3 | import numpy as np 4 | 5 | def merging(linker, all_2d_segs, imagecols, seg3d_list, neighbors, var2d=5.0): 6 | all_lines_2d, all_lines_3d = {}, {} 7 | for img_id in imagecols.get_img_ids(): 8 | all_lines_2d[img_id] = _base._GetLine2dVectorFromArray(all_2d_segs[img_id]) 9 | all_lines_3d[img_id] = _mrg._SetUncertaintySegs3d(_base._GetLine3dVectorFromArray(seg3d_list[img_id]), imagecols.camview(img_id), var2d) 10 | graph = _base.Graph() 11 | linetracks = _mrg._MergeToLineTracks(graph, all_lines_2d, imagecols, all_lines_3d, neighbors, linker) 12 | return graph, linetracks 13 | 14 | def remerge(linker3d, linetracks, num_outliers=2): 15 | if len(linetracks) == 0: 16 | return linetracks 17 | new_linetracks = linetracks 18 | num_tracks = len(new_linetracks) 19 | # iterative remerging 20 | while True: 21 | new_linetracks = _mrg._RemergeLineTracks(new_linetracks, linker3d, num_outliers=num_outliers) 22 | num_tracks_new = len(new_linetracks) 23 | if num_tracks == num_tracks_new: 24 | break 25 | num_tracks = num_tracks_new 26 | print("[LOG] tracks after iterative remerging: {0} / {1}".format(len(new_linetracks), len(linetracks))) 27 | return new_linetracks 28 | 29 | def checktrackbyreprojection(track, imagecols, th_angular2d, th_perp2d): 30 | results = _mrg._CheckReprojection(track, imagecols, th_angular2d, th_perp2d) 31 | return results 32 | 33 | def filtertracksbyreprojection(linetracks, imagecols, th_angular2d, th_perp2d, num_outliers=2): 34 | new_linetracks = _mrg._FilterSupportLines(linetracks, imagecols, th_angular2d, th_perp2d, num_outliers=num_outliers) 35 | return new_linetracks 36 | 37 | def checksensitivity(linetracks, imagecols, th_angular3d): 38 | results = _mrg._CheckSensitivity(linetracks, imagecols, th_angular3d) 39 | return results 40 | 41 | def filtertracksbysensitivity(linetracks, imagecols, th_angular3d, min_num_supports): 42 | new_linetracks = _mrg._FilterTracksBySensitivity(linetracks, imagecols, th_angular3d, min_num_supports) 43 | return new_linetracks 44 | 45 | def filtertracksbyoverlap(linetracks, imagecols, th_overlap, min_num_supports): 46 | new_linetracks = _mrg._FilterTracksByOverlap(linetracks, imagecols, th_overlap, min_num_supports) 47 | return new_linetracks 48 | 49 | -------------------------------------------------------------------------------- /limap/merging/merging_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_MERGING_MERGING_UTILS_H_ 2 | #define LIMAP_MERGING_MERGING_UTILS_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | namespace py = pybind11; 9 | 10 | #include "base/graph.h" 11 | #include "base/linebase.h" 12 | #include "base/linetrack.h" 13 | #include "base/camera_view.h" 14 | #include "base/image_collection.h" 15 | 16 | namespace limap { 17 | 18 | namespace merging { 19 | 20 | std::vector SetUncertaintySegs3d(const std::vector& lines, const CameraView& view, const double var2d=5.0); 21 | 22 | void CheckReprojection(std::vector& results, 23 | const LineTrack& linetrack, 24 | const ImageCollection& imagecols, 25 | const double& th_angular2d, const double& th_perp2d); 26 | 27 | void FilterSupportingLines(std::vector& new_linetracks, 28 | const std::vector& linetracks, 29 | const ImageCollection& imagecols, 30 | const double& th_angular2d, const double& th_perp2d, 31 | const int num_outliers = 2); 32 | 33 | void CheckSensitivity(std::vector& results, 34 | const LineTrack& linetrack, 35 | const ImageCollection& imagecols, 36 | const double& th_angular3d); 37 | 38 | void FilterTracksBySensitivity(std::vector& new_linetracks, 39 | const std::vector& linetracks, 40 | const ImageCollection& imagecols, 41 | const double& th_angular3d, const int& min_support_ns); 42 | 43 | void FilterTracksByOverlap(std::vector& new_linetracks, 44 | const std::vector& linetracks, 45 | const ImageCollection& imagecols, 46 | const double& th_overlap, const int& min_support_ns); 47 | 48 | } // namespace merging 49 | 50 | } // namespace limap 51 | 52 | #endif 53 | 54 | -------------------------------------------------------------------------------- /limap/optimize/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "optimize") 2 | 3 | add_subdirectory(line_refinement) 4 | add_subdirectory(hybrid_bundle_adjustment) 5 | add_subdirectory(global_pl_association) 6 | add_subdirectory(line_localization) 7 | 8 | LIMAP_ADD_SOURCES( 9 | bindings.cc 10 | ) 11 | 12 | -------------------------------------------------------------------------------- /limap/optimize/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._optimize import * 2 | from .line_refinement import * 3 | from .hybrid_bundle_adjustment import * 4 | from .global_pl_association import * 5 | from .line_localization import * 6 | 7 | -------------------------------------------------------------------------------- /limap/optimize/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include "optimize/line_refinement/bindings.cc" 11 | #include "optimize/hybrid_bundle_adjustment/bindings.cc" 12 | #include "optimize/global_pl_association/bindings.cc" 13 | #include "optimize/line_localization/bindings.cc" 14 | 15 | namespace py = pybind11; 16 | 17 | namespace limap { 18 | 19 | void bind_line_refinement(py::module& m); 20 | void bind_hybrid_bundle_adjustment(py::module& m); 21 | void bind_global_pl_association(py::module& m); 22 | void bind_line_localization(py::module& m); 23 | 24 | void bind_optimize(py::module &m) { 25 | bind_line_refinement(m); 26 | bind_hybrid_bundle_adjustment(m); 27 | bind_global_pl_association(m); 28 | bind_line_localization(m); 29 | } 30 | 31 | } // namespace limap 32 | 33 | 34 | -------------------------------------------------------------------------------- /limap/optimize/extract_heatmaps_sold2.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 5 | 6 | import limap.line2d 7 | import limap.base as _base 8 | import limap.util.io as limapio 9 | 10 | def extract_heatmaps_sold2(output_dir, imagecols, skip_exists=False): 11 | ''' 12 | Extract sold2 heatmaps from _base.ImageCollection object 13 | ''' 14 | # detect heatmaps 15 | detector_cfg = {} 16 | detector_cfg["method"] = "sold2" 17 | detector = limap.line2d.get_detector(detector_cfg) 18 | detector.extract_heatmaps_all_images(output_dir, imagecols, skip_exists=skip_exists) 19 | 20 | def parse_config(): 21 | import argparse 22 | arg_parser = argparse.ArgumentParser(description='extract sold2 heatmaps') 23 | arg_parser.add_argument('-i', '--input', type=str, required=True, help='imagecols.npy') 24 | arg_parser.add_argument('-o', '--output_dir', type=str, required=True, help='output folder') 25 | arg_parser.add_argument("--skip_exists", action="store_true", help="skip exists") 26 | args = arg_parser.parse_args() 27 | return args 28 | 29 | def main(args): 30 | if not args.input.endswith(".npy"): 31 | raise ValueError("input file should be with the .npy extension") 32 | imagecols = _base.ImageCollection(limapio.read_npy(args.input).item()) 33 | extract_heatmaps_sold2(args.output_dir, imagecols, skip_exists=args.skip_exists) 34 | 35 | if __name__ == '__main__': 36 | args = parse_config() 37 | main(args) 38 | 39 | -------------------------------------------------------------------------------- /limap/optimize/functions.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | from tqdm import tqdm 4 | 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 6 | 7 | import limap.visualize as limapvis 8 | 9 | def visualize_heatmap_intersections(prefix, imname_list, image_ids, p_heatmaps, ht_intersections, max_image_dim=None): 10 | import matplotlib.colors as colors 11 | import matplotlib.cm as cmx 12 | cNorm = colors.Normalize(vmin=0, vmax=1) 13 | scalarMap = cmx.ScalarMappable(norm=cNorm, cmap="viridis") 14 | 15 | path = os.path.dirname(prefix) 16 | if not os.path.exists(path): 17 | os.makedirs(path) 18 | for img_id, heatmap, intersections in zip(image_ids, p_heatmaps, ht_intersections): 19 | imname = imname_list[img_id] 20 | 21 | # visualize image 22 | img = utils.read_image(imname, max_image_dim=max_image_dim, set_gray=False) 23 | img = limapvis.draw_points(img, intersections, (255, 0, 0), 2) 24 | fname_out = prefix + '_img{0}.png'.format(img_id) 25 | cv2.imwrite(fname_out, img) 26 | 27 | # visualize heatmap 28 | heatmap_img = (scalarMap.to_rgba(heatmap)[:,:,:3] * 255).astype(np.uint8) 29 | heatmap_img = limapvis.draw_points(heatmap_img, intersections, (255, 0, 0), 2) 30 | fname_out_heatmap = prefix + '_heatmap{0}.png'.format(img_id) 31 | cv2.imwrite(fname_out_heatmap, heatmap_img) 32 | 33 | def visualize_fconsis_intersections(prefix, imname_list, image_ids, fc_intersections, max_image_dim=None, n_samples_vis=-1): 34 | if n_samples_vis != -1: 35 | fc_intersections = fc_intersections[:n_samples_vis] 36 | path = os.path.dirname(prefix) 37 | if not os.path.exists(path): 38 | os.makedirs(path) 39 | for sample_id, intersections in enumerate(tqdm(fc_intersections)): 40 | imgs = [] 41 | for data in intersections: 42 | img_id, point = image_ids[data[0]], data[1] 43 | img = utils.read_image(imname_list[img_id], max_image_dim=max_image_dim, set_gray=False) 44 | limapvis.draw_points(img, [point], (0, 0, 255), 1) 45 | img = limapvis.crop_to_patch(img, point, patch_size=100) 46 | imgs.append(img) 47 | bigimg = limapvis.make_bigimage(imgs, pad=20) 48 | fname_out = prefix + '_sample{0}.png'.format(sample_id) 49 | cv2.imwrite(fname_out, bigimg) 50 | 51 | def unit_test_add_noise_to_track(track): 52 | # for unit test 53 | tmptrack = _base.LineTrack(track) 54 | start = track.line.start + (np.random.rand(3) - 0.5) * 1e-1 55 | end = track.line.end + (np.random.rand(3) - 0.5) * 1e-1 56 | tmpline = _base.Line3d(start, end) 57 | tmptrack.line = tmpline 58 | return tmptrack 59 | 60 | 61 | -------------------------------------------------------------------------------- /limap/optimize/global_pl_association/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "global_pl_association") 2 | 3 | LIMAP_ADD_SOURCES( 4 | global_associator.h global_associator.cc 5 | cost_functions.h 6 | bindings.cc 7 | ) 8 | 9 | -------------------------------------------------------------------------------- /limap/optimize/global_pl_association/__init__.py: -------------------------------------------------------------------------------- 1 | from .solve import * 2 | -------------------------------------------------------------------------------- /limap/optimize/global_pl_association/solve.py: -------------------------------------------------------------------------------- 1 | from _limap import _base, _ceresbase, _optimize 2 | 3 | def solve_global_pl_association(cfg, imagecols, bpt3d, all_bpt2ds): 4 | cfg_associator = _optimize.GlobalAssociatorConfig(cfg) 5 | associator = _optimize.GlobalAssociator(cfg_associator) 6 | associator.InitCameras(imagecols) 7 | associator.InitBipartite_PointLine(bpt3d, all_bpt2ds) 8 | 9 | # soft association 10 | associator.SetUp() 11 | associator.Solve() 12 | res_bpt3d = associator.GetBipartite3d_PointLine() 13 | return res_bpt3d 14 | 15 | -------------------------------------------------------------------------------- /limap/optimize/hybrid_bundle_adjustment/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "hybrid_bundle_adjustment") 2 | 3 | LIMAP_ADD_SOURCES( 4 | hybrid_bundle_adjustment.h hybrid_bundle_adjustment.cc 5 | hybrid_bundle_adjustment_config.h 6 | bindings.cc 7 | ) 8 | 9 | -------------------------------------------------------------------------------- /limap/optimize/hybrid_bundle_adjustment/__init__.py: -------------------------------------------------------------------------------- 1 | from .solve import * 2 | -------------------------------------------------------------------------------- /limap/optimize/hybrid_bundle_adjustment/hybrid_bundle_adjustment.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_OPTIMIZE_HYBRIDBA_HYBRIDBA_H_ 2 | #define LIMAP_OPTIMIZE_HYBRIDBA_HYBRIDBA_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | #include "base/image_collection.h" 9 | #include "base/infinite_line.h" 10 | #include "base/pointtrack.h" 11 | #include "base/linetrack.h" 12 | #include "util/types.h" 13 | #include "vplib/vpbase.h" 14 | 15 | #include 16 | #include "optimize/hybrid_bundle_adjustment/hybrid_bundle_adjustment_config.h" 17 | 18 | namespace py = pybind11; 19 | 20 | namespace limap { 21 | 22 | namespace optimize { 23 | 24 | namespace hybrid_bundle_adjustment { 25 | 26 | class HybridBAEngine { 27 | protected: 28 | HybridBAConfig config_; 29 | 30 | // minimal data 31 | ImageCollection imagecols_; 32 | std::map points_; 33 | std::map lines_; 34 | 35 | // tracks 36 | std::map point_tracks_; 37 | std::map line_tracks_; 38 | 39 | // set up ceres problem 40 | void ParameterizeCameras(); 41 | void ParameterizePoints(); 42 | void ParameterizeLines(); 43 | void AddPointGeometricResiduals(const int track_id); 44 | void AddLineGeometricResiduals(const int track_id); 45 | 46 | public: 47 | HybridBAEngine() {} 48 | HybridBAEngine(const HybridBAConfig& cfg): config_(cfg) {} 49 | 50 | void InitImagecols(const ImageCollection& imagecols) { imagecols_ = imagecols; } 51 | void InitPointTracks(const std::vector& point_tracks); 52 | void InitPointTracks(const std::map& point_tracks); 53 | void InitLineTracks(const std::vector& line_tracks); 54 | void InitLineTracks(const std::map& line_tracks); 55 | void SetUp(); 56 | bool Solve(); 57 | 58 | // output 59 | ImageCollection GetOutputImagecols() const {return imagecols_; } 60 | std::map GetOutputPoints() const; 61 | std::map GetOutputPointTracks() const; 62 | std::map GetOutputLines(const int num_outliers) const; 63 | std::map GetOutputLineTracks(const int num_outliers) const; 64 | 65 | // ceres 66 | std::unique_ptr problem_; 67 | ceres::Solver::Summary summary_; 68 | }; 69 | 70 | } // namespace hybrid_bundle_adjustment 71 | 72 | } // namespace optimize 73 | 74 | } // namespace limap 75 | 76 | #endif 77 | 78 | -------------------------------------------------------------------------------- /limap/optimize/hybrid_bundle_adjustment/hybrid_bundle_adjustment_config.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_OPTIMIZE_HYBRIDBA_HYBRIDBA_CONFIG_H_ 2 | #define LIMAP_OPTIMIZE_HYBRIDBA_HYBRIDBA_CONFIG_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | #include "optimize/line_refinement/refinement_config.h" 9 | 10 | namespace py = pybind11; 11 | 12 | namespace limap { 13 | 14 | namespace optimize { 15 | 16 | namespace hybrid_bundle_adjustment { 17 | 18 | class HybridBAConfig: public line_refinement::RefinementConfig { 19 | public: 20 | HybridBAConfig(): line_refinement::RefinementConfig() { 21 | InitConfig(); 22 | } 23 | HybridBAConfig(py::dict dict): line_refinement::RefinementConfig(dict) { 24 | InitConfig(); 25 | ASSIGN_PYDICT_ITEM(dict, constant_intrinsics, bool); 26 | ASSIGN_PYDICT_ITEM(dict, constant_pose, bool); 27 | ASSIGN_PYDICT_ITEM(dict, constant_point, bool); 28 | ASSIGN_PYDICT_ITEM(dict, constant_line, bool); 29 | ASSIGN_PYDICT_ITEM(dict, lw_point, double); 30 | } 31 | bool constant_intrinsics = false; 32 | bool constant_pose = false; 33 | bool constant_point = false; 34 | bool constant_line = false; 35 | 36 | // point geometric config 37 | std::shared_ptr point_geometric_loss_function; 38 | double lw_point = 0.1; 39 | 40 | // functions 41 | void set_constant_camera() { 42 | constant_intrinsics = true; 43 | constant_pose = true; 44 | } 45 | private: 46 | void InitConfig() { 47 | point_geometric_loss_function.reset(new ceres::TrivialLoss()); 48 | } 49 | }; 50 | 51 | } // namespace hybrid_bundle_adjustment 52 | 53 | } // namespace optimize 54 | 55 | } // namespace limap 56 | 57 | #endif 58 | 59 | -------------------------------------------------------------------------------- /limap/optimize/hybrid_bundle_adjustment/solve.py: -------------------------------------------------------------------------------- 1 | from _limap import _base, _ceresbase, _optimize 2 | import numpy as np 3 | 4 | def _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=100): 5 | if type(cfg) == dict: 6 | ba_config = _optimize.HybridBAConfig(cfg) 7 | else: 8 | ba_config = cfg 9 | ba_config.solver_options.logging_type = _ceresbase.LoggingType.SILENT 10 | ba_config.solver_options.max_num_iterations = max_num_iterations 11 | ba_engine = _optimize.HybridBAEngine(ba_config) 12 | ba_engine.InitImagecols(imagecols) 13 | return ba_engine 14 | 15 | def _solve_bundle_adjustment(ba_engine): 16 | # setup and solve 17 | ba_engine.SetUp() 18 | ba_engine.Solve() 19 | return ba_engine 20 | 21 | def solve_point_bundle_adjustment(cfg, imagecols, pointtracks, max_num_iterations=100): 22 | ba_engine = _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=max_num_iterations) 23 | ba_engine.InitPointTracks(pointtracks) 24 | ba_engine = _solve_bundle_adjustment(ba_engine) 25 | return ba_engine 26 | 27 | def solve_line_bundle_adjustment(cfg, imagecols, linetracks, max_num_iterations=100): 28 | ba_engine = _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=max_num_iterations) 29 | ba_engine.InitLineTracks(linetracks) 30 | ba_engine = _solve_bundle_adjustment(ba_engine) 31 | return ba_engine 32 | 33 | def solve_hybrid_bundle_adjustment(cfg, imagecols, pointtracks, linetracks, max_num_iterations=100): 34 | ba_engine = _init_bundle_adjustment_engine(cfg, imagecols, max_num_iterations=max_num_iterations) 35 | ba_engine.InitPointTracks(pointtracks) 36 | ba_engine.InitLineTracks(linetracks) 37 | ba_engine = _solve_bundle_adjustment(ba_engine) 38 | return ba_engine 39 | 40 | -------------------------------------------------------------------------------- /limap/optimize/line_localization/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "line_localization") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | cost_functions.h 6 | lineloc.h lineloc.cc 7 | lineloc_config.h 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /limap/optimize/line_localization/__init__.py: -------------------------------------------------------------------------------- 1 | from .solve import * 2 | from .functions import * -------------------------------------------------------------------------------- /limap/optimize/line_refinement/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "line_refinement") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | cost_functions.h pixel_cost_functions.h 6 | refine.h refine.cc 7 | refinement_config.h 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /limap/optimize/line_refinement/__init__.py: -------------------------------------------------------------------------------- 1 | from .solve import * 2 | from .line_refinement import line_refinement 3 | -------------------------------------------------------------------------------- /limap/optimize/line_refinement/solve.py: -------------------------------------------------------------------------------- 1 | from _limap import _base, _ceresbase, _optimize 2 | import numpy as np 3 | 4 | def solve_line_refinement(cfg, track, p_camviews, p_vpresults=None, p_heatmaps=None, p_patches=None, p_features=None, dtype="float16"): 5 | ''' 6 | p_patches: list of PatchInfo_f objects 7 | ''' 8 | rf_config = _optimize.RefinementConfig(cfg) 9 | rf_config.solver_options.logging_type = _ceresbase.LoggingType.SILENT 10 | 11 | # initialize refinement engine 12 | if track.count_images() < rf_config.min_num_images: 13 | return None 14 | if p_patches is not None: 15 | channels = p_patches[0].array.shape[2] 16 | elif p_features is not None: 17 | channels = p_features[0].shape[2] 18 | else: 19 | channels = 128 20 | rf_engine_name = "RefinementEngine_f{0}_c{1}".format(dtype[-2:], channels) 21 | # print("Refinement type: ", rf_engine_name) 22 | rf_engine = getattr(_optimize, rf_engine_name)(rf_config) 23 | 24 | # initialize track and camview 25 | rf_engine.Initialize(track, p_camviews) 26 | 27 | # initialize data interpolator 28 | if p_vpresults is not None: 29 | rf_engine.InitializeVPs(p_vpresults) 30 | if p_heatmaps is not None: 31 | rf_engine.InitializeHeatmaps(p_heatmaps) 32 | if p_patches is not None: 33 | rf_engine.InitializeFeaturesAsPatches(p_patches) 34 | elif p_features is not None: 35 | rf_engine.InitializeFeatures(p_features) 36 | 37 | # setup and solve 38 | rf_engine.SetUp() 39 | rf_engine.Solve() 40 | return rf_engine 41 | 42 | -------------------------------------------------------------------------------- /limap/point2d/__init__.py: -------------------------------------------------------------------------------- 1 | from .superpoint import * 2 | from .superglue import * -------------------------------------------------------------------------------- /limap/point2d/superglue/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/limap/point2d/superglue/__init__.py -------------------------------------------------------------------------------- /limap/point2d/superpoint/__init__.py: -------------------------------------------------------------------------------- 1 | from .main import run_superpoint 2 | -------------------------------------------------------------------------------- /limap/pointsfm/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "pointsfm") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | sfm_model.h sfm_model.cc 6 | ) 7 | 8 | -------------------------------------------------------------------------------- /limap/pointsfm/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._pointsfm import * 2 | from .colmap_sfm import * 3 | from .functions import * 4 | from .colmap_reader import check_exists_colmap_model, PyReadCOLMAP, ReadPointTracks 5 | from .model_converter import * 6 | 7 | -------------------------------------------------------------------------------- /limap/pointsfm/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include "pointsfm/sfm_model.h" 11 | 12 | namespace py = pybind11; 13 | 14 | namespace limap { 15 | 16 | void bind_pointsfm(py::module &m) { 17 | using namespace pointsfm; 18 | 19 | // bind the colmap mvs image 20 | py::class_(m, "SfmImage") 21 | .def(py::init<>()) 22 | .def(py::init(&CreateSfmImage)) 23 | .def("GetR", &colmap::mvs::Image::GetR) 24 | .def("GetT", &colmap::mvs::Image::GetT) 25 | .def("GetK", &colmap::mvs::Image::GetK) 26 | .def("GetP", &colmap::mvs::Image::GetP) 27 | .def("GetInvP", &colmap::mvs::Image::GetInvP); 28 | 29 | // bind the new sfm model. 30 | py::class_(m, "SfmModel") 31 | .def(py::init<>()) 32 | .def("addImage", &SfmModel::addImage, py::arg("image"), py::arg("img_id") = -1) 33 | .def("addPoint", &SfmModel::addPoint) 34 | .def("ReadFromCOLMAP", &SfmModel::ReadFromCOLMAP) 35 | .def("GetImageNames", &SfmModel::GetImageNames) 36 | .def("ComputeNumPoints", &SfmModel::ComputeNumPoints) 37 | .def("ComputeSharedPoints", &SfmModel::ComputeSharedPoints) 38 | .def("GetMaxOverlapImages", &SfmModel::GetMaxOverlapImages) 39 | .def("GetMaxIoUImages", &SfmModel::GetMaxIoUImages) 40 | .def("GetMaxDiceCoeffImages", &SfmModel::GetMaxDiceCoeffImages) 41 | .def("ComputeNumPoints", &SfmModel::ComputeNumPoints) 42 | .def("ComputeRanges", &SfmModel::ComputeRanges); 43 | } 44 | 45 | } // namespace limap 46 | 47 | -------------------------------------------------------------------------------- /limap/pointsfm/sfm_model.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_POINTSFM_SFM_MODEL_H_ 2 | #define LIMAP_POINTSFM_SFM_MODEL_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | #include 9 | #include 10 | #include "util/types.h" 11 | 12 | namespace py = pybind11; 13 | 14 | namespace limap { 15 | 16 | namespace pointsfm { 17 | 18 | colmap::mvs::Image CreateSfmImage(const std::string& filename, 19 | const int width, const int height, 20 | const std::vector& K, 21 | const std::vector& R, 22 | const std::vector& T); 23 | 24 | class SfmModel: public colmap::mvs::Model { 25 | public: 26 | SfmModel(): colmap::mvs::Model() {} 27 | 28 | void addPoint(double x, double y, double z, const std::vector& image_ids); 29 | 30 | void addImage(const colmap::mvs::Image& image, const int img_id = -1); 31 | 32 | void ReadFromCOLMAP(const std::string& path, 33 | const std::string& sparse_path = "sparse", 34 | const std::string& images_path = "images"); 35 | 36 | std::vector GetImageNames() const; 37 | 38 | std::vector ComputeNumPoints() const; 39 | 40 | std::map> GetMaxOverlapImages( 41 | const size_t num_images, const double min_triangulationo_angle) const; 42 | 43 | std::map> GetMaxIoUImages( 44 | const size_t num_images, const double min_triangulationo_angle) const; 45 | 46 | std::map> GetMaxDiceCoeffImages( 47 | const size_t num_images, const double min_triangulationo_angle) const; 48 | 49 | std::pair ComputeRanges(const std::pair& range_robust, const double& kstretch) const; 50 | 51 | private: 52 | std::pair get_robust_range(std::vector& data, const std::pair& range_robust, const double& kstretch) const; 53 | 54 | std::vector reg_image_ids; 55 | std::map> neighbors_vec_to_map(const std::vector>& neighbors) const; 56 | }; 57 | 58 | } // namespace pointsfm 59 | 60 | } // namespace limap 61 | 62 | #endif 63 | 64 | -------------------------------------------------------------------------------- /limap/runners/__init__.py: -------------------------------------------------------------------------------- 1 | from .functions import * 2 | from .functions_structures import * 3 | from .line_fitnmerge import * 4 | from .line_triangulation import * 5 | from .line_localization import * 6 | 7 | -------------------------------------------------------------------------------- /limap/solvers/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "solvers") 2 | 3 | add_subdirectory(triangulation) 4 | 5 | LIMAP_ADD_SOURCES() 6 | 7 | -------------------------------------------------------------------------------- /limap/solvers/triangulation/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "triangulation") 2 | 3 | LIMAP_ADD_SOURCES( 4 | triangulate_line_with_one_point.h triangulate_line_with_one_point.cc 5 | ) 6 | 7 | -------------------------------------------------------------------------------- /limap/solvers/triangulation/triangulate_line_with_one_point.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_SOLVERS_TRIANGULATION_TRIANGULATE_LINE_WITH_ONE_POINT_H_ 2 | #define LIMAP_SOLVERS_TRIANGULATION_TRIANGULATE_LINE_WITH_ONE_POINT_H_ 3 | 4 | #include 5 | 6 | namespace limap { 7 | 8 | namespace solvers { 9 | 10 | namespace triangulation { 11 | 12 | // Author: Viktor Larsson 13 | // The input is as follow 14 | // 15 | // Line (nx, ny, alpha) - The 3D plane (nx, ny, nz, alpha) we want to be close to (projected from the other image) 16 | // 17 | // Point (px, py) - This is the projection of the 3D point onto the plane (the one we should be co-linear with) 18 | // 19 | // Directions (p1x, p1y) and (p2x, p2y). These are the direction vectors of the end-points from the reference image 20 | // These should be normalized (I think, not sure if it matters) 21 | // The backprojected points are then (lambda1*p1x, lambda1*p1y) and (lambda2*p2x, lambda2*p2y). 22 | // 23 | // We are solving for lambda. We reduce to quartic poly in mu (which is a lagrange multiplier, and then backsubst. to get lambda). 24 | // Since there are up to 4 solutions for mu, we plug into the cost (distance from backproj. points to line, and choose the best one) 25 | // 26 | // Coordinate system is chosen such that (0,0) is the camera center of the reference view 27 | 28 | std::pair triangulate_line_with_one_point(const Eigen::Vector4d& plane, const Eigen::Vector2d& p, const Eigen::Vector2d& v1, const Eigen::Vector2d& v2); 29 | 30 | } // namespace triangulation 31 | 32 | } // namespace solvers 33 | 34 | } // namespace limap 35 | 36 | #endif 37 | 38 | -------------------------------------------------------------------------------- /limap/structures/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "structures") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | pl_bipartite_base.h pl_bipartite_base.cc 6 | pl_bipartite.h pl_bipartite.cc 7 | vpline_bipartite.h vpline_bipartite.cc 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /limap/structures/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._structures import * 2 | -------------------------------------------------------------------------------- /limap/structures/pl_bipartite.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_STRUCTURES_PL_BIPARTITE_H 2 | #define LIMAP_STRUCTURES_PL_BIPARTITE_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace py = pybind11; 9 | 10 | #include "structures/pl_bipartite_base.h" 11 | #include "util/types.h" 12 | #include "base/linebase.h" 13 | #include "base/pointtrack.h" 14 | #include "base/linetrack.h" 15 | 16 | namespace limap { 17 | 18 | namespace structures { 19 | 20 | typedef Junction Junction2d; 21 | typedef Junction Junction3d; 22 | 23 | struct PL_Bipartite2dConfig { 24 | PL_Bipartite2dConfig() {} 25 | PL_Bipartite2dConfig(py::dict dict) { 26 | ASSIGN_PYDICT_ITEM(dict, threshold_intersection, double) 27 | ASSIGN_PYDICT_ITEM(dict, threshold_merge_junctions, double) 28 | ASSIGN_PYDICT_ITEM(dict, threshold_keypoints, double) 29 | } 30 | 31 | double threshold_keypoints = 2.0; // in pixels 32 | double threshold_intersection = 2.0; // in pixels 33 | double threshold_merge_junctions = 2.0; // in pixels 34 | }; 35 | 36 | class PL_Bipartite2d: public PL_Bipartite { 37 | public: 38 | PL_Bipartite2d() {} 39 | ~PL_Bipartite2d() {} 40 | PL_Bipartite2d(const PL_Bipartite2dConfig& config): config_(config) {} 41 | PL_Bipartite2d(const PL_Bipartite2d& obj): PL_Bipartite(obj) {} 42 | PL_Bipartite2d(py::dict dict); 43 | py::dict as_dict() const; 44 | 45 | void add_keypoint(const Point2d& p, int point_id = -1); // compute connection by point-line distance; 46 | void add_keypoints_with_point3D_ids(const std::vector& points, const std::vector& point3D_ids, const std::vector& ids = std::vector()); 47 | void compute_intersection(); // compute intersections 48 | void compute_intersection_with_points(const std::vector& points); // compute intersection and remove overlaps with input points 49 | 50 | private: 51 | PL_Bipartite2dConfig config_; 52 | std::pair intersect(const Line2d& l1, const Line2d& l2) const; 53 | Junction merge_junctions(const std::vector> juncs) const; 54 | }; 55 | 56 | class PL_Bipartite3d: public PL_Bipartite { 57 | public: 58 | PL_Bipartite3d() {} 59 | ~PL_Bipartite3d() {} 60 | PL_Bipartite3d(const PL_Bipartite3d& obj): PL_Bipartite(obj) {} 61 | PL_Bipartite3d(py::dict dict); 62 | py::dict as_dict() const; 63 | 64 | std::vector get_point_cloud() const; 65 | std::vector get_line_cloud() const; 66 | }; 67 | 68 | } // namespace structures 69 | 70 | } // namespace limap 71 | 72 | #endif 73 | 74 | -------------------------------------------------------------------------------- /limap/structures/vpline_bipartite.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_STRUCTURES_VPLINE_BIPARTITE_H 2 | #define LIMAP_STRUCTURES_VPLINE_BIPARTITE_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | namespace py = pybind11; 9 | 10 | #include "structures/pl_bipartite_base.h" 11 | #include "base/linetrack.h" 12 | #include "vplib/vpbase.h" 13 | #include "vplib/vptrack.h" 14 | 15 | namespace limap { 16 | 17 | namespace structures { 18 | 19 | typedef Junction VP_Junction2d; 20 | typedef Junction VP_Junction3d; 21 | 22 | class VPLine_Bipartite2d: public PL_Bipartite { 23 | public: 24 | VPLine_Bipartite2d() {} 25 | ~VPLine_Bipartite2d() {} 26 | VPLine_Bipartite2d(const VPLine_Bipartite2d& obj): PL_Bipartite(obj) {} 27 | VPLine_Bipartite2d(py::dict dict); 28 | py::dict as_dict() const; 29 | }; 30 | 31 | class VPLine_Bipartite3d: public PL_Bipartite { 32 | public: 33 | VPLine_Bipartite3d() {} 34 | ~VPLine_Bipartite3d() {} 35 | VPLine_Bipartite3d(const VPLine_Bipartite3d& obj): PL_Bipartite(obj) {} 36 | VPLine_Bipartite3d(py::dict dict); 37 | py::dict as_dict() const; 38 | }; 39 | 40 | std::map GetAllBipartites_VPLine2d(const std::map>& all_2d_lines, const std::map& vpresults, const std::vector& vptracks); 41 | 42 | } // namespace structures 43 | 44 | } // namespace limap 45 | 46 | #endif 47 | 48 | -------------------------------------------------------------------------------- /limap/triangulation/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "triangulation") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | functions.h functions.cc 6 | base_line_triangulator.h base_line_triangulator.cc 7 | global_line_triangulator.h global_line_triangulator.cc 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /limap/triangulation/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._triangulation import * 2 | from .triangulation import * 3 | -------------------------------------------------------------------------------- /limap/triangulation/triangulation.py: -------------------------------------------------------------------------------- 1 | from _limap import _triangulation as _tri 2 | from _limap import _base 3 | import numpy as np 4 | 5 | def get_normal_direction(l, view): 6 | return _tri.get_normal_direction(l, view) 7 | 8 | def get_direction_from_VP(vp, view): 9 | return _tri.get_direction_from_VP(vp, view) 10 | 11 | def compute_essential_matrix(view1, view2): 12 | return _tri.compute_essential_matrix(view1, view2) 13 | 14 | def compute_fundamental_matrix(view1, view2): 15 | return _tri.compute_fundamental_matrix(view1, view2) 16 | 17 | def compute_epipolar_IoU(l1, view1, l2, view2): 18 | return _tri.compute_epipolar_IoU(l1, view1, l2, view2) 19 | 20 | def point_triangulation(p1, view1, p2, view2): 21 | return _tri.point_triangulation(p1, view1, p2, view2) 22 | 23 | def triangulate_endpoints(l1, view1, l2, view2): 24 | return _tri.triangulate_endpoints(l1, view1, l2, view2) 25 | 26 | def triangulate(l1, view1, l2, view2): 27 | return _tri.triangulate(l1, view1, l2, view2) 28 | 29 | def triangulate_with_one_point(l1, view1, l2, view2, p): 30 | return _tri.triangulate_with_one_point(l1, view1, l2, view2, p) 31 | 32 | def triangulate_with_direction(l1, view1, l2, view2, direc): 33 | return _tri.triangulate_with_direction(l1, view1, l2, view2, direc) 34 | 35 | -------------------------------------------------------------------------------- /limap/undistortion/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "undistortion") 2 | 3 | LIMAP_ADD_SOURCES( 4 | bindings.cc 5 | undistort.h undistort.cc 6 | ) 7 | 8 | -------------------------------------------------------------------------------- /limap/undistortion/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._undistortion import * 2 | from .undistort import * 3 | -------------------------------------------------------------------------------- /limap/undistortion/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include 11 | #include "undistortion/undistort.h" 12 | 13 | namespace py = pybind11; 14 | 15 | namespace limap { 16 | 17 | void bind_undistortion(py::module &m) { 18 | using namespace undistortion; 19 | 20 | py::class_(m, "COLMAP_Bitmap") 21 | .def(py::init<>()) 22 | .def("Read", &colmap::Bitmap::Read, py::arg("imname"), py::arg("as_rgb") = true) 23 | .def("Write", &colmap::Bitmap::Write) 24 | .def("Width", &colmap::Bitmap::Width) 25 | .def("Height", &colmap::Bitmap::Height) 26 | .def("Channels", &colmap::Bitmap::Channels); 27 | 28 | m.def("_UndistortCamera", &UndistortCamera); 29 | m.def("_UndistortCameraView", &UndistortCameraView); 30 | m.def("_UndistortPoint", &UndistortPoint); 31 | m.def("_UndistortPoints", &UndistortPoints); 32 | } 33 | 34 | } // namespace limap 35 | 36 | -------------------------------------------------------------------------------- /limap/undistortion/undistort.cc: -------------------------------------------------------------------------------- 1 | #include "undistortion/undistort.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace limap { 9 | 10 | namespace undistortion { 11 | 12 | Camera UndistortCamera(const std::string& imname_in, const Camera& camera, const std::string& imname_out) { 13 | colmap::Bitmap img, img_undistorted; 14 | img.Read(imname_in); 15 | colmap::Camera cam = camera; 16 | 17 | bool exif_autorotate = false; 18 | if (cam.Height() != img.Height() || cam.Width() != img.Width()) { 19 | if (cam.Width() != img.Height() || cam.Height() != img.Width()) 20 | throw std::runtime_error("Error! The height and width of the given camera do not match the input image."); 21 | // std::cout<<"[WARNING] Auto rotating image (EXIF): "< UndistortPoints(const std::vector& points, const Camera& distorted_camera, const Camera& undistorted_camera) { 47 | std::vector new_points; 48 | for (auto it = points.begin(); it != points.end(); ++it) { 49 | new_points.push_back(UndistortPoint(*it, distorted_camera, undistorted_camera)); 50 | } 51 | return new_points; 52 | } 53 | 54 | } // namespace undistortion 55 | 56 | } // namespace limap 57 | 58 | -------------------------------------------------------------------------------- /limap/undistortion/undistort.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_UNDISTORTION_UNDISTORT_H_ 2 | #define LIMAP_UNDISTORTION_UNDISTORT_H_ 3 | 4 | #include 5 | #include 6 | #include "_limap/helpers.h" 7 | 8 | #include "base/camera.h" 9 | #include "base/camera_view.h" 10 | #include "util/types.h" 11 | 12 | namespace py = pybind11; 13 | 14 | namespace limap { 15 | 16 | namespace undistortion { 17 | 18 | Camera UndistortCamera(const std::string& imname_in, const Camera& camera, const std::string& imname_out); 19 | CameraView UndistortCameraView(const std::string& imname_in, const CameraView& view, const std::string& imname_out); 20 | 21 | V2D UndistortPoint(const V2D& point, const Camera& distorted_camera, const Camera& undistorted_camera); 22 | std::vector UndistortPoints(const std::vector& points, const Camera& distorted_camera, const Camera& undistorted_camera); 23 | 24 | } // namespace undistortion 25 | 26 | } // namespace limap 27 | 28 | #endif 29 | 30 | -------------------------------------------------------------------------------- /limap/undistortion/undistort.py: -------------------------------------------------------------------------------- 1 | from _limap import _base, _undistortion 2 | import os, sys 3 | import cv2 4 | import numpy as np 5 | import copy 6 | 7 | def UndistortImageCamera(camera, imname_in, imname_out): 8 | if camera.IsUndistorted(): # no distortion 9 | img = cv2.imread(imname_in) 10 | cv2.imwrite(imname_out, img) 11 | if camera.model_id() == 0 or camera.model_id() == 1: 12 | return camera 13 | # if "SIMPLE_RADIAL", update to "SIMPLE_PINHOLE" 14 | if camera.model_id() == 2: 15 | new_camera = _base.Camera("SIMPLE_PINHOLE", camera.K(), cam_id = camera.cam_id(), hw=[camera.h(), camera.w()]) 16 | else: 17 | # else change to pinhole 18 | new_camera = _base.Camera("PINHOLE", camera.K(), cam_id = camera.cam_id(), hw=[camera.h(), camera.w()]) 19 | return new_camera 20 | 21 | # undistort 22 | camera_undistorted = _undistortion._UndistortCamera(imname_in, camera, imname_out) 23 | return camera_undistorted 24 | 25 | def UndistortPoints(points, distorted_camera, undistorted_camera): 26 | return _undistortion._UndistortPoints(points, distorted_camera, undistorted_camera) 27 | 28 | -------------------------------------------------------------------------------- /limap/util/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "util") 2 | 3 | LIMAP_ADD_SOURCES( 4 | types.h 5 | simple_logger.h simple_logger.cc 6 | log_exceptions.h 7 | 8 | nanoflann.hpp 9 | kd_tree.h kd_tree.cpp 10 | ) 11 | 12 | -------------------------------------------------------------------------------- /limap/util/__init__.py: -------------------------------------------------------------------------------- 1 | from .io import * 2 | from .geometry import * 3 | from .config import * 4 | -------------------------------------------------------------------------------- /limap/util/evaluation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import limap.base as _base 4 | 5 | def compute_rot_err(R1, R2): 6 | rot_err = R1[0:3,0:3].T.dot(R2[0:3,0:3]) 7 | rot_err = cv2.Rodrigues(rot_err)[0] 8 | rot_err = np.reshape(rot_err, (1,3)) 9 | rot_err = np.reshape(np.linalg.norm(rot_err, axis = 1), -1) / np.pi * 180. 10 | return rot_err[0] 11 | 12 | def compute_pose_err(pose, pose_gt): 13 | ''' 14 | Inputs: 15 | - pose: _base.CameraPose 16 | - pose_gt: _base.CameraPose 17 | ''' 18 | trans_err = np.linalg.norm(pose.center() - pose_gt.center()) 19 | rot_err = compute_rot_err(pose.R(), pose_gt.R()) 20 | return trans_err, rot_err 21 | 22 | def eval_imagecols(imagecols, imagecols_gt): 23 | _, imagecols_aligned = _base.align_imagecols(imagecols, imagecols_gt) 24 | shared_img_ids = list(set(imagecols.get_img_ids()) & set(imagecols_gt.get_img_ids())) 25 | assert len(shared_img_ids) == imagecols.NumImages(); 26 | imagecols_gt = imagecols_gt.subset_by_image_ids(shared_img_ids) 27 | trans_errs, rot_errs = [], [] 28 | for img_id in shared_img_ids: 29 | pose = imagecols_aligned.camimage(img_id).pose 30 | pose_gt = imagecols_gt.camimage(img_id).pose 31 | trans_err, rot_err = compute_pose_err(pose, pose_gt) 32 | trans_errs.append(trans_err) 33 | rot_errs.append(rot_err) 34 | return trans_errs, rot_errs 35 | 36 | def eval_imagecols_relpose(imagecols, imagecols_gt): 37 | shared_img_ids = list(set(imagecols.get_img_ids()) & set(imagecols_gt.get_img_ids())) 38 | assert len(shared_img_ids) == imagecols.NumImages(); 39 | 40 | num_shared = len(shared_img_ids) 41 | err_list = [] 42 | for i in range(num_shared - 1): 43 | pose1 = imagecols.camimage(shared_img_ids[i]).pose 44 | pose1_gt = imagecols_gt.camimage(shared_img_ids[i]).pose 45 | for j in range(i + 1, num_shared): 46 | pose2 = imagecols.camimage(shared_img_ids[j]).pose 47 | pose2_gt = imagecols_gt.camimage(shared_img_ids[j]).pose 48 | 49 | relR = pose1.R() @ pose2.R().T 50 | relT = pose1.T() - relR @ pose2.T() 51 | relT_vec = relT / np.linalg.norm(relT) 52 | relR_gt = pose1_gt.R() @ pose2_gt.R().T 53 | relT_gt = pose1_gt.T() - relR_gt @ pose2_gt.T() 54 | relT_gt_vec = relT_gt / np.linalg.norm(relT_gt) 55 | 56 | rot_err = compute_rot_err(relR, relR_gt) 57 | t_angle = np.arccos(np.abs(relT_vec.dot(relT_gt_vec))) * 180.0 / np.pi 58 | err = max(rot_err, t_angle) 59 | err_list.append(err) 60 | return np.array(err_list) 61 | 62 | 63 | -------------------------------------------------------------------------------- /limap/util/geometry.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def rotation_from_euler_angles(rot_x, rot_y, rot_z): 4 | # Calculate rotation about y axis 5 | R_x = np.array([[1., 0., 0.], 6 | [0, np.cos(rot_x), -np.sin(rot_x)], 7 | [0, np.sin(rot_x), np.cos(rot_x)]]) 8 | # Calculate rotation about y axis 9 | R_y = np.array([[np.cos(rot_y), 0., np.sin(rot_y)], 10 | [0., 1., 0.], 11 | [-np.sin(rot_y), 0., np.cos(rot_y)]]) 12 | # Calculate rotation about z axis 13 | R_z = np.array([[np.cos(rot_z), -np.sin(rot_z), 0., ], 14 | [np.sin(rot_z), np.cos(rot_z), 0., ], 15 | [0., 0., 1.]]) 16 | return R_z @ R_y @ R_x 17 | 18 | def rotation_from_quaternion(quad): 19 | norm = np.linalg.norm(quad) 20 | if norm < 1e-10: 21 | raise ValueError("Error! the quaternion is not robust. quad.norm() = {0}".format(norm)) 22 | quad = quad / norm 23 | qr, qi, qj, qk = quad[0], quad[1], quad[2], quad[3] 24 | rot_mat = np.zeros((3, 3)) 25 | rot_mat[0,0] = 1 - 2 * (qj ** 2 + qk ** 2) 26 | rot_mat[0,1] = 2 * (qi * qj - qk * qr) 27 | rot_mat[0,2] = 2 * (qi * qk + qj * qr) 28 | rot_mat[1,0] = 2 * (qi * qj + qk * qr) 29 | rot_mat[1,1] = 1 - 2 * (qi ** 2 + qk ** 2) 30 | rot_mat[1,2] = 2 * (qj * qk - qi * qr) 31 | rot_mat[2,0] = 2 * (qi * qk - qj * qr) 32 | rot_mat[2,1] = 2 * (qj * qk + qi * qr) 33 | rot_mat[2,2] = 1 - 2 * (qi ** 2 + qj ** 2) 34 | return rot_mat 35 | 36 | -------------------------------------------------------------------------------- /limap/util/simple_logger.cc: -------------------------------------------------------------------------------- 1 | #include "simple_logger.h" 2 | 3 | namespace limap { 4 | 5 | structlog LOGCFG; 6 | 7 | } 8 | -------------------------------------------------------------------------------- /limap/util/types.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | #include "util/log_exceptions.h" 11 | 12 | namespace py = pybind11; 13 | 14 | namespace limap { 15 | 16 | using Node2d = std::pair; // (img_id, feature_id), change to int if there exists id > 65535 17 | 18 | using V2F = Eigen::Vector2f; 19 | using V3F = Eigen::Vector3f; 20 | using V2D = Eigen::Vector2d; 21 | using V3D = Eigen::Vector3d; 22 | using V4D = Eigen::Vector4d; 23 | 24 | using M2F = Eigen::Matrix2f; 25 | using M3F = Eigen::Matrix3f; 26 | using M4F = Eigen::Matrix4f; 27 | using M2D = Eigen::Matrix2d; 28 | using M3D = Eigen::Matrix3d; 29 | using M4D = Eigen::Matrix4d; 30 | using M6D = Eigen::Matrix; 31 | using M8D = Eigen::Matrix; 32 | 33 | const double EPS = 1e-12; 34 | 35 | inline V3D homogeneous(const V2D& v2d) { return V3D(v2d(0), v2d(1), 1.0); } 36 | inline V4D homogeneous(const V3D& v3d) { return V4D(v3d(0), v3d(1), v3d(2), 1.0); } 37 | inline V2D dehomogeneous(const V3D& v3d) { return V2D(v3d(0), v3d(1)) / (v3d(2) + EPS); } 38 | inline V3D dehomogeneous(const V4D& v4d) { return V3D(v4d(0), v4d(1), v4d(2)) / (v4d(3) + EPS); } 39 | 40 | } 41 | 42 | -------------------------------------------------------------------------------- /limap/visualize/__init__.py: -------------------------------------------------------------------------------- 1 | from .vis_utils import * 2 | from .vis_lines import * 3 | from .vis_matches import * 4 | from .vis_bipartite import * 5 | from .trackvis import * 6 | 7 | -------------------------------------------------------------------------------- /limap/visualize/trackvis/__init__.py: -------------------------------------------------------------------------------- 1 | from .pyvista import PyVistaTrackVisualizer 2 | from .open3d import Open3DTrackVisualizer 3 | from .rerun import RerunTrackVisualizer 4 | 5 | -------------------------------------------------------------------------------- /limap/visualize/trackvis/open3d.py: -------------------------------------------------------------------------------- 1 | from .base import BaseTrackVisualizer 2 | import os, sys 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | from vis_utils import compute_robust_range_lines 5 | from vis_lines import open3d_get_line_set, open3d_get_cameras 6 | import open3d as o3d 7 | 8 | class Open3DTrackVisualizer(BaseTrackVisualizer): 9 | def __init__(self, tracks): 10 | super(Open3DTrackVisualizer, self).__init__(tracks) 11 | 12 | def reset(self): 13 | app = o3d.visualization.gui.Application.instance 14 | app.initialize() 15 | return app 16 | 17 | def vis_all_lines(self, n_visible_views=4, width=2, scale=1.0): 18 | lines = self.get_lines_n_visible_views(n_visible_views) 19 | vis = o3d.visualization.Visualizer() 20 | vis.create_window(height=1080, width=1920) 21 | line_set = open3d_get_line_set(lines, width=width, ranges=ranges, scale=scale) 22 | vis.add_geometry(line_set) 23 | vis.run() 24 | vis.destroy_window() 25 | 26 | def vis_reconstruction(self, imagecols, n_visible_views=4, width=2, ranges=None, scale=1.0, cam_scale=1.0): 27 | lines = self.get_lines_n_visible_views(n_visible_views) 28 | lranges = compute_robust_range_lines(lines) 29 | scale_cam_geometry = abs(lranges[1, :] - lranges[0, :]).max() 30 | 31 | vis = o3d.visualization.Visualizer() 32 | vis.create_window(height=1080, width=1920) 33 | line_set = open3d_get_line_set(lines, width=width, ranges=ranges, scale=scale) 34 | vis.add_geometry(line_set) 35 | camera_set = open3d_get_cameras(imagecols, ranges=ranges, scale_cam_geometry=scale_cam_geometry * cam_scale, scale=scale) 36 | vis.add_geometry(camera_set) 37 | vis.run() 38 | vis.destroy_window() 39 | 40 | -------------------------------------------------------------------------------- /limap/visualize/trackvis/pyvista.py: -------------------------------------------------------------------------------- 1 | from .base import BaseTrackVisualizer 2 | import os, sys 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 4 | 5 | class PyVistaTrackVisualizer(BaseTrackVisualizer): 6 | def __init__(self, tracks): 7 | super(PyVistaTrackVisualizer, self).__init__(tracks) 8 | self.reset() 9 | 10 | def reset(self, img_hw=(600, 800)): 11 | import pyvista as pv 12 | from pyvista import themes 13 | my_theme = themes.DefaultTheme() 14 | my_theme.lighting = True 15 | my_theme.show_edges = True 16 | my_theme.edge_color = 'white' 17 | my_theme.background = 'white' 18 | self.plotter = pv.Plotter(window_size=[img_hw[1], img_hw[0]], theme=my_theme) 19 | 20 | def vis_all_lines(self, n_visible_views=4, width=2, scale=1.0): 21 | lines = self.get_lines_n_visible_views(n_visible_views) 22 | for line in lines: 23 | self.plotter.add_lines(line.as_array() * scale, color, width=width) 24 | self.plotter.show() 25 | 26 | def vis_all_lines_image(self, img_id, img_hw=(600, 800), n_visible_views=4, width=2): 27 | flags = [track.HasImage(img_id) for track in self.tracks] 28 | for track_id, line in enumerate(self.lines): 29 | if self.counts[track_id] < n_visible_views: 30 | continue 31 | color = "#ff0000" 32 | if flags[track_id]: 33 | color = "#00ff00" 34 | self.plotter.add_lines(line.as_array(), color, width=width) 35 | self.plotter.show() 36 | 37 | def vis_additional_lines(self, lines, img_hw=(600, 800), width=2): 38 | for track_id, line in enumerate(self.lines): 39 | color = "#ff0000" 40 | self.plotter.add_lines(line.as_array(), color, width=width) 41 | for line in lines: 42 | color = "#00ff00" 43 | self.plotter.add_lines(line.as_array(), color, width=width) 44 | self.plotter.show() 45 | 46 | -------------------------------------------------------------------------------- /limap/vplib/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "vplib") 2 | 3 | add_subdirectory(JLinkage) 4 | 5 | LIMAP_ADD_SOURCES( 6 | vpbase.h 7 | base_vp_detector.h base_vp_detector.cc 8 | 9 | vptrack.h vptrack.cc 10 | global_vptrack_constructor.h global_vptrack_constructor.cc 11 | 12 | bindings.cc 13 | ) 14 | 15 | -------------------------------------------------------------------------------- /limap/vplib/JLinkage/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FOLDER_NAME "JLinkage") 2 | 3 | LIMAP_ADD_SOURCES( 4 | JLinkage.h JLinkage.cc 5 | bindings.cc 6 | ) 7 | 8 | -------------------------------------------------------------------------------- /limap/vplib/JLinkage/JLinkage.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_VPLIB_JLINKAGE_H_ 2 | #define LIMAP_VPLIB_JLINKAGE_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "vplib/base_vp_detector.h" 10 | 11 | namespace py = pybind11; 12 | 13 | namespace limap { 14 | 15 | namespace vplib { 16 | 17 | namespace JLinkage { 18 | 19 | class JLinkageConfig: public BaseVPDetectorConfig { 20 | public: 21 | JLinkageConfig(): BaseVPDetectorConfig() {} 22 | JLinkageConfig(py::dict dict): BaseVPDetectorConfig(dict) {} 23 | py::dict as_dict() const { return BaseVPDetectorConfig::as_dict(); } 24 | }; 25 | 26 | class JLinkage: public BaseVPDetector { 27 | public: 28 | JLinkage(): BaseVPDetector() {} 29 | JLinkage(const JLinkageConfig& config): config_(config) {} 30 | JLinkage(py::dict dict): config_(JLinkageConfig(dict)) {} 31 | py::dict as_dict() const { return config_.as_dict(); }; 32 | JLinkageConfig config_; 33 | 34 | std::vector ComputeVPLabels(const std::vector& lines) const; // cluster id for each line, -1 for no associated vp 35 | VPResult AssociateVPs(const std::vector& lines) const; 36 | 37 | private: 38 | V3D fitVP(const std::vector& lines) const; 39 | }; 40 | 41 | } // namespace JLinkage 42 | 43 | } // namespace vplib 44 | 45 | } // namespace limap 46 | 47 | #endif 48 | 49 | -------------------------------------------------------------------------------- /limap/vplib/JLinkage/JLinkage.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | from base_vp_detector import BaseVPDetector, BaseVPDetectorOptions 4 | 5 | from _limap import _vplib 6 | 7 | class JLinkage(BaseVPDetector): 8 | def __init__(self, cfg_jlinkage, options = BaseVPDetectorOptions()): 9 | super(JLinkage, self).__init__(options) 10 | self.detector = _vplib.JLinkage(cfg_jlinkage) 11 | 12 | def get_module_name(self): 13 | return "JLinkage" 14 | 15 | def detect_vp(self, lines, camview=None): 16 | vpresult = self.detector.AssociateVPs(lines) 17 | return vpresult 18 | 19 | # comments for unification of the interfaces with n_jobs 20 | # # parallelization directly in cpp is faster at initializing threads 21 | # def detect_vp_all_images(self, all_lines, camviews=None): 22 | # return self.detector.AssociateVPsParallel(all_lines) 23 | 24 | -------------------------------------------------------------------------------- /limap/vplib/JLinkage/__init__.py: -------------------------------------------------------------------------------- 1 | from .JLinkage import JLinkage 2 | 3 | -------------------------------------------------------------------------------- /limap/vplib/JLinkage/bindings.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include "_limap/helpers.h" 9 | 10 | #include "vplib/JLinkage/JLinkage.h" 11 | 12 | namespace limap { 13 | 14 | void bind_jlinkage(py::module &m) { 15 | using namespace vplib::JLinkage; 16 | py::class_(m, "JLinkageConfig") 17 | .def(py::init<>()) 18 | .def(py::init()) 19 | .def(py::pickle( 20 | [](const JLinkageConfig& input) { // dump 21 | return input.as_dict(); 22 | }, 23 | [](const py::dict& dict) { // load 24 | return JLinkageConfig(dict); 25 | } 26 | )) 27 | .def_readwrite("min_length", &JLinkageConfig::min_length) 28 | .def_readwrite("inlier_threshold", &JLinkageConfig::inlier_threshold) 29 | .def_readwrite("min_num_supports", &JLinkageConfig::min_num_supports) 30 | .def_readwrite("th_perp_supports", &JLinkageConfig::th_perp_supports); 31 | 32 | py::class_(m, "JLinkage") 33 | .def(py::init<>()) 34 | .def(py::init()) 35 | .def(py::init()) 36 | .def(py::pickle( 37 | [](const JLinkage& input) { // dump 38 | return input.as_dict(); 39 | }, 40 | [](const py::dict& dict) { // load 41 | return JLinkage(dict); 42 | } 43 | )) 44 | .def("ComputeVPLabels", &JLinkage::ComputeVPLabels) 45 | .def("AssociateVPs", &JLinkage::AssociateVPs) 46 | .def("AssociateVPsParallel", &JLinkage::AssociateVPsParallel); 47 | } 48 | 49 | } // namespace limap 50 | 51 | -------------------------------------------------------------------------------- /limap/vplib/__init__.py: -------------------------------------------------------------------------------- 1 | from _limap._vplib import * 2 | 3 | import os, sys 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from .register_vp_detector import get_vp_detector 6 | 7 | -------------------------------------------------------------------------------- /limap/vplib/base_vp_detector.cc: -------------------------------------------------------------------------------- 1 | #include "vplib/base_vp_detector.h" 2 | 3 | #include "base/infinite_line.h" 4 | #include "base/graph.h" 5 | 6 | #include 7 | 8 | namespace limap { 9 | 10 | namespace vplib { 11 | 12 | py::dict BaseVPDetectorConfig::as_dict() const { 13 | py::dict output; 14 | output["min_length"] = min_length; 15 | output["inlier_threshold"] = inlier_threshold; 16 | output["min_num_supports"] = min_num_supports; 17 | output["th_perp_supports"] = th_perp_supports; 18 | return output; 19 | } 20 | 21 | std::map BaseVPDetector::AssociateVPsParallel(const std::map>& all_lines) const { 22 | std::vector image_ids; 23 | for (std::map>::const_iterator it = all_lines.begin(); it != all_lines.end(); ++it) { 24 | image_ids.push_back(it->first); 25 | } 26 | 27 | std::map vpresults; 28 | progressbar bar(image_ids.size()); 29 | #pragma omp parallel for 30 | for (const int& img_id: image_ids) { 31 | bar.update(); 32 | vpresults.insert(std::make_pair(img_id, AssociateVPs(all_lines.at(img_id)))); 33 | } 34 | return vpresults; 35 | } 36 | 37 | int BaseVPDetector::count_valid_supports_2d(const std::vector& lines) const { 38 | // count 2d supports that do that lie on the same infinite line 39 | size_t n_lines = lines.size(); 40 | std::vector parent_nodes(n_lines, -1); 41 | for (size_t i = 0; i < n_lines - 1; ++i) { 42 | size_t root_i = union_find_get_root(i, parent_nodes); 43 | for (size_t j = i + 1; j < n_lines; ++j) { 44 | size_t root_j = union_find_get_root(j, parent_nodes); 45 | if (root_j == root_i) 46 | continue; 47 | // test connection: project the shorter line to the longer one 48 | int k1 = i; int k2 = j; 49 | if (lines[i].length() > lines[j].length()) { 50 | k1 = j; k2 = i; 51 | } 52 | double ds = InfiniteLine2d(lines[k2]).point_distance(lines[k1].start); 53 | double de = InfiniteLine2d(lines[k2]).point_distance(lines[k1].end); 54 | double dist = std::max(ds, de); 55 | if (dist > config_.th_perp_supports) 56 | continue; 57 | parent_nodes[root_j] = root_i; 58 | } 59 | } 60 | int n_supports = 0; 61 | for (size_t i = 0; i < n_lines; ++i) { 62 | if (parent_nodes[i] == -1) 63 | n_supports++; 64 | } 65 | return n_supports; 66 | } 67 | 68 | } // namespace vplib 69 | 70 | } // namespace limap 71 | 72 | -------------------------------------------------------------------------------- /limap/vplib/base_vp_detector.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_VPLIB_BASE_VP_DETECTOR_H_ 2 | #define LIMAP_VPLIB_BASE_VP_DETECTOR_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "_limap/helpers.h" 10 | #include "base/linebase.h" 11 | #include "util/types.h" 12 | 13 | #include "vplib/vpbase.h" 14 | 15 | namespace py = pybind11; 16 | 17 | namespace limap { 18 | 19 | namespace vplib { 20 | 21 | class BaseVPDetectorConfig { 22 | public: 23 | BaseVPDetectorConfig() {} 24 | BaseVPDetectorConfig(py::dict dict) { 25 | ASSIGN_PYDICT_ITEM(dict, min_length, double) 26 | ASSIGN_PYDICT_ITEM(dict, inlier_threshold, double) 27 | ASSIGN_PYDICT_ITEM(dict, min_num_supports, int) 28 | ASSIGN_PYDICT_ITEM(dict, th_perp_supports, double) 29 | } 30 | py::dict as_dict() const; 31 | 32 | double min_length = 40; // in pixel 33 | double inlier_threshold = 1.0; // in pixel 34 | int min_num_supports = 5; 35 | double th_perp_supports = 3.0; // in pixel. separate different supports 36 | }; 37 | 38 | class BaseVPDetector { 39 | public: 40 | BaseVPDetector() {} 41 | BaseVPDetector(const BaseVPDetectorConfig& config): config_(config) {} 42 | BaseVPDetector(py::dict dict): config_(BaseVPDetectorConfig(dict)) {} 43 | BaseVPDetectorConfig config_; 44 | 45 | virtual VPResult AssociateVPs(const std::vector& lines) const = 0; 46 | std::map AssociateVPsParallel(const std::map>& all_lines) const; 47 | protected: 48 | int count_valid_supports_2d(const std::vector& lines) const; // count supports that lie on the different infinite 2d lines 49 | }; 50 | 51 | } // namespace vplib 52 | 53 | } // namespace limap 54 | 55 | #endif 56 | 57 | -------------------------------------------------------------------------------- /limap/vplib/base_vp_detector.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import joblib 4 | from tqdm import tqdm 5 | 6 | from collections import namedtuple 7 | BaseVPDetectorOptions = namedtuple("BaseVPDetectorOptions", 8 | ["n_jobs"], 9 | defaults = [1]) 10 | 11 | class BaseVPDetector(): 12 | def __init__(self, options = BaseVPDetectorOptions()): 13 | self.n_jobs = options.n_jobs 14 | 15 | # Module name needs to be set 16 | def get_module_name(self): 17 | raise NotImplementedError 18 | # The functions below are required for VP detectors 19 | def detect_vp(self, lines, camview=None): 20 | ''' 21 | Input: 22 | - lines type: std::vector 23 | Output: 24 | - vpresult type: limap.vplib.VPResult 25 | ''' 26 | raise NotImplementedError 27 | 28 | def detect_vp_all_images(self, all_lines, camviews=None): 29 | def process(self, lines): 30 | return self.detect_vp(lines) 31 | def process_camview(self, lines, camview): 32 | return self.detect_vp(lines, camview) 33 | if camviews is None: 34 | vpresults_vector = joblib.Parallel(self.n_jobs)(joblib.delayed(process)(self, lines) for (img_id, lines) in tqdm(all_lines.items())) 35 | else: 36 | vpresults_vector = joblib.Parallel(self.n_jobs)(joblib.delayed(process_camview)(self, lines, camviews[img_id]) for (img_id, lines) in tqdm(all_lines.items())) 37 | # map vector back to map 38 | vpresults = dict() 39 | for idx, img_id in enumerate(list(all_lines.keys())): 40 | vpresults[img_id] = vpresults_vector[idx] 41 | return vpresults 42 | 43 | def visualize(self, fname, img, lines, vpresult, show_original=False, endpoints=False): 44 | import cv2 45 | import limap.visualize as limapvis 46 | img = limapvis.vis_vpresult(img, lines, vpresult, show_original=show_original, endpoints=endpoints) 47 | cv2.imwrite(fname, img) 48 | 49 | -------------------------------------------------------------------------------- /limap/vplib/global_vptrack_constructor.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_VPLIB_GLOBAL_VPTRACK_CONSTRUCTOR_H_ 2 | #define LIMAP_VPLIB_GLOBAL_VPTRACK_CONSTRUCTOR_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace py = pybind11; 10 | 11 | #include "vplib/vpbase.h" 12 | #include "vplib/vptrack.h" 13 | #include "base/linetrack.h" 14 | #include "base/image_collection.h" 15 | 16 | namespace limap { 17 | 18 | namespace vplib { 19 | 20 | class GlobalVPTrackConstructorConfig { 21 | public: 22 | GlobalVPTrackConstructorConfig() {} 23 | GlobalVPTrackConstructorConfig(py::dict dict) { 24 | ASSIGN_PYDICT_ITEM(dict, min_common_lines, int) 25 | ASSIGN_PYDICT_ITEM(dict, th_angle_verify, double) 26 | ASSIGN_PYDICT_ITEM(dict, min_track_length, int) 27 | } 28 | int min_common_lines = 3; 29 | double th_angle_verify = 10.0; // in degree, verify edge with poses 30 | int min_track_length = 5; 31 | }; 32 | 33 | class GlobalVPTrackConstructor { 34 | public: 35 | GlobalVPTrackConstructor() {} 36 | GlobalVPTrackConstructor(const GlobalVPTrackConstructorConfig& config): config_(config) {} 37 | GlobalVPTrackConstructor(py::dict dict): config_(GlobalVPTrackConstructorConfig(dict)) {} 38 | 39 | void Init(const std::map& vpresults) { vpresults_ = vpresults; } 40 | 41 | std::vector ClusterLineTracks(const std::vector& linetracks, const ImageCollection& imagecols) const; 42 | 43 | private: 44 | GlobalVPTrackConstructorConfig config_; 45 | std::map vpresults_; 46 | }; 47 | 48 | } // namespace vplib 49 | 50 | } // namespace limap 51 | 52 | #endif 53 | 54 | -------------------------------------------------------------------------------- /limap/vplib/progressivex/__init__.py: -------------------------------------------------------------------------------- 1 | from .progressivex import ProgressiveX 2 | -------------------------------------------------------------------------------- /limap/vplib/progressivex/progressivex.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | from base_vp_detector import BaseVPDetector, BaseVPDetectorOptions 4 | 5 | from _limap import _vplib 6 | import pyprogressivex 7 | import numpy as np 8 | 9 | from collections import namedtuple 10 | ProgressiveXOptions = namedtuple("ProgressiveXOptions", 11 | ["min_length", "inlier_threshold"], 12 | defaults = [20, 1.0]) 13 | 14 | class ProgressiveX(BaseVPDetector): 15 | def __init__(self, cfg, options = BaseVPDetectorOptions()): 16 | super(ProgressiveX, self).__init__(options) 17 | self.options = ProgressiveXOptions() 18 | for fld in self.options._fields: 19 | if fld in cfg: 20 | self.options = self.options._replace(fld = cfg[fld]) 21 | 22 | def get_module_name(self): 23 | return "progressive-x" 24 | 25 | def detect_vp(self, lines, camview=None): 26 | if camview is None: 27 | raise NotImplementedError 28 | 29 | # Initialize 30 | labels = (np.ones(len(lines)) * -1).astype(int) 31 | flags = [True if line.length() >= self.options.min_length else False for line in lines] 32 | 33 | # Progressive-X inference 34 | lines = [line for line in lines if line.length() >= self.options.min_length] 35 | lines_array = np.array([line.as_array().reshape(-1) for line in lines]) 36 | weights_array = np.array([line.length() for line in lines]) 37 | 38 | vanishing_points, labeling = pyprogressivex.findVanishingPoints( 39 | np.ascontiguousarray(lines_array), 40 | np.ascontiguousarray(weights_array), 41 | camview.w(), camview.h(), 42 | threshold = self.options.inlier_threshold, 43 | conf = 0.99, 44 | spatial_coherence_weight = 0.0, 45 | neighborhood_ball_radius = 1.0, 46 | maximum_tanimoto_similarity = 1.0, 47 | max_iters = 1000, 48 | minimum_point_number = 5, 49 | maximum_model_number = -1, 50 | sampler_id = 0, 51 | scoring_exponent = 1.0, 52 | do_logging = False) 53 | 54 | # Output 55 | labels[flags] = labeling - 1 56 | vps = vanishing_points.tolist() 57 | vpres = _vplib.VPResult(labels, vps) 58 | return vpres 59 | 60 | -------------------------------------------------------------------------------- /limap/vplib/register_vp_detector.py: -------------------------------------------------------------------------------- 1 | from .base_vp_detector import BaseVPDetectorOptions 2 | 3 | def get_vp_detector(cfg_vp_detector, n_jobs=1): 4 | options = BaseVPDetectorOptions() 5 | options = options._replace(n_jobs = n_jobs) 6 | 7 | method = cfg_vp_detector["method"] 8 | if method == "jlinkage": 9 | from .JLinkage import JLinkage 10 | return JLinkage(cfg_vp_detector, options) 11 | elif method == "progressivex": 12 | from .progressivex import ProgressiveX 13 | return ProgressiveX(cfg_vp_detector, options) 14 | else: 15 | raise NotImplementedError 16 | 17 | -------------------------------------------------------------------------------- /limap/vplib/vpbase.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_VPLIB_VPBASE_H_ 2 | #define LIMAP_VPLIB_VPBASE_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "_limap/helpers.h" 10 | #include "base/linebase.h" 11 | #include "util/types.h" 12 | 13 | namespace py = pybind11; 14 | 15 | namespace limap { 16 | 17 | namespace vplib { 18 | 19 | class VPResult { 20 | public: 21 | VPResult() {} 22 | VPResult(const std::vector& labels_, const std::vector& vps_): labels(labels_), vps(vps_) {} 23 | VPResult(const VPResult& input): labels(input.labels), vps(input.vps) {} 24 | py::dict as_dict() const { py::dict output; output["labels"] = labels; output["vps"] = vps; return output; } 25 | VPResult(py::dict dict) { 26 | ASSIGN_PYDICT_ITEM(dict, labels, std::vector) 27 | ASSIGN_PYDICT_ITEM(dict, vps, std::vector) 28 | } 29 | 30 | std::vector labels; // -1 denotes the unassociated lines 31 | std::vector vps; 32 | 33 | size_t count_lines() const { return labels.size(); } 34 | size_t count_vps() const { return vps.size(); } 35 | int GetVPLabel(const int& line_id) const { return labels[line_id]; } 36 | V3D GetVPbyCluster(const int& vp_id) const { return vps[vp_id]; } 37 | bool HasVP(const int& line_id) const { return GetVPLabel(line_id) >= 0; } 38 | V3D GetVP(const int& line_id) const { 39 | THROW_CHECK_EQ(HasVP(line_id), true); 40 | return GetVPbyCluster(GetVPLabel(line_id)); 41 | } 42 | }; 43 | 44 | } // namespace vplib 45 | 46 | } // namespace limap 47 | 48 | #endif 49 | 50 | -------------------------------------------------------------------------------- /limap/vplib/vptrack.h: -------------------------------------------------------------------------------- 1 | #ifndef LIMAP_VPLIB_VPTRACK_H_ 2 | #define LIMAP_VPLIB_VPTRACK_H_ 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace py = pybind11; 10 | 11 | #include "_limap/helpers.h" 12 | #include "util/types.h" 13 | 14 | #include "base/pointtrack.h" 15 | 16 | namespace limap { 17 | 18 | namespace vplib { 19 | 20 | typedef Feature2dWith3dIndex VP2d; 21 | 22 | class VPTrack { 23 | public: 24 | VPTrack() {} 25 | VPTrack(const V3D& direction_, const std::vector& supports_): direction(direction_), supports(supports_) {} 26 | VPTrack(const VPTrack& obj): direction(obj.direction), supports(obj.supports) {} 27 | VPTrack(py::dict dict); 28 | py::dict as_dict() const; 29 | 30 | V3D direction; 31 | std::vector supports; 32 | size_t length() const { return supports.size(); } 33 | }; 34 | 35 | std::vector MergeVPTracksByDirection(const std::vector& vptracks, const double th_angle_merge = 1.0); 36 | 37 | } // namespace vplib 38 | 39 | } // namespace limap 40 | 41 | #endif 42 | 43 | -------------------------------------------------------------------------------- /misc/install/lbd.md: -------------------------------------------------------------------------------- 1 | ## LBD installation 2 | 3 | Install OpenCV dependency 4 | ```bash 5 | sudo apt-get install libopencv-dev libopencv-contrib-dev libarpack++2-dev libarpack2-dev libsuperlu-dev 6 | ``` 7 | Then install the pytlbd Python package from [Iago Suárez](https://github.com/iago-suarez) 8 | ```bash 9 | python -m pip install -e ./third-party/pytlbd 10 | ``` 11 | The LBD is listed as a dependency with separate installation to avoid the OpenCV dependency. When using the LBD matcher inside LIMAP, please use the parallelization with ``--line2d.matcher.n_jobs 8`` (8 cores, you can use even more CPU cores if applicable). 12 | 13 | -------------------------------------------------------------------------------- /misc/install/poselib.md: -------------------------------------------------------------------------------- 1 | ## Install PoseLib as Dependency 2 | Clone the repository 3 | ```bash 4 | git clone --recursive https://github.com/vlarsson/PoseLib.git 5 | cd PoseLib 6 | ``` 7 | Build and install: 8 | ```bash 9 | mkdir build && cd build 10 | cmake .. 11 | sudo make install -j8 12 | ``` 13 | -------------------------------------------------------------------------------- /misc/install/tp_lsd.md: -------------------------------------------------------------------------------- 1 | ## TP-LSD installation 2 | 3 | ```bash 4 | python -m pip install -e ./third-party/TP-LSD/tp_lsd/modeling/DCNv2 5 | python -m pip install -e ./third-party/TP-LSD 6 | ``` 7 | 8 | The TP-LSD Python package is tested under gcc-8 and gcc-9. However, it is known that gcc-10 is currently not supported for the installation of TP-LSD. 9 | 10 | The implementation of TP-LSD is originated from [https://github.com/Siyuada7/TP-LSD](https://github.com/Siyuada7/TP-LSD) and later adapted with pip installation by [Rémi Pautrat](https://github.com/rpautrat) in his [forked repo](https://github.com/rpautrat/TP-LSD). 11 | 12 | -------------------------------------------------------------------------------- /misc/media/barn_lsd.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/misc/media/barn_lsd.gif -------------------------------------------------------------------------------- /misc/media/supp_qualitative_5x3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/misc/media/supp_qualitative_5x3.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml 2 | tqdm 3 | attrdict 4 | h5py 5 | numpy 6 | scipy 7 | matplotlib 8 | seaborn 9 | brewer2mpl 10 | tensorboard 11 | tensorboardX 12 | opencv-python 13 | opencv-contrib-python 14 | scikit-learn 15 | scikit-image 16 | shapely 17 | jupyter 18 | bresenham 19 | pyvista 20 | omegaconf 21 | rtree 22 | plyfile 23 | pathlib 24 | open3d==0.16.0 25 | imagesize 26 | einops 27 | ninja 28 | yacs 29 | python-json-logger 30 | rerun-sdk>=0.9.0 31 | 32 | ./third-party/pytlsd 33 | ./third-party/hawp 34 | -e ./third-party/Hierarchical-Localization 35 | -e ./third-party/DeepLSD 36 | -e ./third-party/GlueStick 37 | -------------------------------------------------------------------------------- /runners/__init__.py: -------------------------------------------------------------------------------- 1 | from .colmap_triangulation import read_scene_colmap 2 | from .bundler_triangulation import read_scene_bundler 3 | from .visualsfm_triangulation import read_scene_visualsfm 4 | 5 | -------------------------------------------------------------------------------- /runners/eth3d/fitnmerge.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from ETH3D import ETH3D 6 | from loader import read_scene_eth3d 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=0): 13 | imagecols, neighbors, ranges, depths = read_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=cam_id, load_depth=True) 14 | linetracks = limap.runners.line_fitnmerge(cfg, imagecols, depths, neighbors=neighbors, ranges=ranges) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='fitnmerge 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/fitnmerge/eth3d.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/fitnmerge/default.yaml', help='default config file') 22 | arg_parser.add_argument('--info_reuse', action='store_true', help="whether to use infonpy at tmp/infos_eth3d.npy") 23 | arg_parser.add_argument('--info_path', type=str, default=None, help='load precomputed info') 24 | 25 | args, unknown = arg_parser.parse_known_args() 26 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 27 | shortcuts = dict() 28 | shortcuts['-nv'] = '--n_visible_views' 29 | shortcuts['-sid'] = '--scene_id' 30 | if args.info_reuse: 31 | cfg["info_path"] = "tmp/infos_eth3d.npy" 32 | cfg["info_path"] = args.info_path 33 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 34 | cfg["folder_to_load"] = os.path.join("precomputed", "eth3d", cfg["reso_type"], "{0}_cam{1}".format(cfg["scene_id"], cfg["cam_id"])) 35 | return cfg 36 | 37 | def main(): 38 | cfg = parse_config() 39 | dataset = ETH3D(cfg["data_dir"]) 40 | run_scene_eth3d(cfg, dataset, cfg["reso_type"], cfg["scene_id"], cfg["cam_id"]) 41 | 42 | if __name__ == '__main__': 43 | main() 44 | 45 | -------------------------------------------------------------------------------- /runners/eth3d/loader.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import cv2 4 | 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 6 | import limap.base as _base 7 | import limap.pointsfm as _psfm 8 | 9 | class ETH3DDepthReader(_base.BaseDepthReader): 10 | def __init__(self, filename): 11 | super(ETH3DDepthReader, self).__init__(filename) 12 | 13 | def read(self, filename): 14 | ref_depth = cv2.imread(filename, cv2.IMREAD_ANYDEPTH) 15 | ref_depth = ref_depth.astype(np.float32) / 256 16 | ref_depth[ref_depth == 0] = np.inf 17 | return ref_depth 18 | 19 | def read_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=0, load_depth=False): 20 | # set scene id 21 | dataset.set_scene_id(reso_type, scene_id, cam_id=cam_id) 22 | 23 | # get camviews, neighbors, and ranges 24 | if cfg["info_path"] is None: 25 | imagecols, neighbors, ranges = _psfm.read_infos_colmap(cfg["sfm"], dataset.scene_dir, model_path=dataset.sparse_folder, image_path=dataset.image_folder, n_neighbors=100000) 26 | with open(os.path.join("tmp", "infos_eth3d.npy"), 'wb') as f: 27 | np.savez(f, imagecols_np=imagecols.as_dict(), neighbors=neighbors, ranges=ranges) 28 | else: 29 | with open(cfg["info_path"], 'rb') as f: 30 | data = np.load(f, allow_pickle=True) 31 | imagecols_np, neighbors, ranges = data["imagecols_np"].item(), data["neighbors"].item(), data["ranges"] 32 | imagecols = _base.ImageCollection(imagecols_np) 33 | 34 | # filter by camera ids for eth3d 35 | if dataset.cam_id != -1: 36 | imagecols, neighbors = _psfm.filter_by_cam_id(dataset.cam_id, imagecols, neighbors) 37 | 38 | # resize cameras 39 | if cfg["max_image_dim"] != -1 and cfg["max_image_dim"] is not None: 40 | imagecols.set_max_image_dim(cfg["max_image_dim"]) 41 | 42 | # get depths 43 | if load_depth: 44 | depths = {} 45 | for img_id in imagecols.get_img_ids(): 46 | depth_fname = dataset.get_depth_fname(imagecols.camview(img_id).image_name()) 47 | depth = ETH3DDepthReader(depth_fname) 48 | # depth = dataset.get_depth(imagecols.camview(img_id).image_name()) 49 | depths[img_id] = depth 50 | return imagecols, neighbors, ranges, depths 51 | else: 52 | return imagecols, neighbors, ranges 53 | 54 | -------------------------------------------------------------------------------- /runners/eth3d/triangulation.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from ETH3D import ETH3D 6 | from loader import read_scene_eth3d 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=0): 13 | imagecols, neighbors, ranges = read_scene_eth3d(cfg, dataset, reso_type, scene_id, cam_id=cam_id, load_depth=False) 14 | linetracks = limap.runners.line_triangulation(cfg, imagecols, neighbors=neighbors, ranges=ranges) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='triangulate 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/triangulation/eth3d.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/triangulation/default.yaml', help='default config file') 22 | arg_parser.add_argument('--info_reuse', action='store_true', help="whether to use infonpy at tmp/infos_eth3d.npy") 23 | arg_parser.add_argument('--info_path', type=str, default=None, help='load precomputed info') 24 | 25 | args, unknown = arg_parser.parse_known_args() 26 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 27 | shortcuts = dict() 28 | shortcuts['-nv'] = '--n_visible_views' 29 | shortcuts['-nn'] = '--n_neighbors' 30 | shortcuts['-sid'] = '--scene_id' 31 | if args.info_reuse: 32 | cfg["info_path"] = "tmp/infos_eth3d.npy" 33 | cfg["info_path"] = args.info_path 34 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 35 | cfg["folder_to_load"] = os.path.join("precomputed", "eth3d", cfg["reso_type"], "{0}_cam{1}".format(cfg["scene_id"], cfg["cam_id"])) 36 | return cfg 37 | 38 | def main(): 39 | cfg = parse_config() 40 | dataset = ETH3D(cfg["data_dir"]) 41 | run_scene_eth3d(cfg, dataset, cfg["reso_type"], cfg["scene_id"], cfg["cam_id"]) 42 | 43 | if __name__ == '__main__': 44 | main() 45 | 46 | -------------------------------------------------------------------------------- /runners/hypersim/fitnmerge.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from Hypersim import Hypersim 6 | from loader import read_scene_hypersim 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): 13 | imagecols, depths = read_scene_hypersim(cfg, dataset, scene_id, cam_id=cam_id, load_depth=True) 14 | linetracks = limap.runners.line_fitnmerge(cfg, imagecols, depths) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='fit and merge 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/fitnmerge/hypersim.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/fitnmerge/default.yaml', help='default config file') 22 | arg_parser.add_argument('--npyfolder', type=str, default=None, help='folder to load precomputed results') 23 | 24 | args, unknown = arg_parser.parse_known_args() 25 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 26 | shortcuts = dict() 27 | shortcuts['-nv'] = '--n_visible_views' 28 | shortcuts['-sid'] = '--scene_id' 29 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 30 | cfg["folder_to_load"] = args.npyfolder 31 | if cfg["folder_to_load"] is None: 32 | cfg["folder_to_load"] = os.path.join("precomputed", "hypersim", cfg["scene_id"]) 33 | return cfg 34 | 35 | def main(): 36 | cfg = parse_config() 37 | dataset = Hypersim(cfg["data_dir"]) 38 | run_scene_hypersim(cfg, dataset, cfg["scene_id"], cam_id=cfg["cam_id"]) 39 | 40 | if __name__ == '__main__': 41 | main() 42 | 43 | -------------------------------------------------------------------------------- /runners/hypersim/loader.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from Hypersim import read_raydepth, raydepth2depth 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 8 | import limap.base as _base 9 | 10 | class HypersimDepthReader(_base.BaseDepthReader): 11 | def __init__(self, filename, K, img_hw): 12 | super(HypersimDepthReader, self).__init__(filename) 13 | self.K = K 14 | self.img_hw = img_hw 15 | 16 | def read(self, filename): 17 | raydepth = read_raydepth(filename, resize_hw=self.img_hw) 18 | depth = raydepth2depth(raydepth, self.K, self.img_hw) 19 | return depth 20 | 21 | def read_scene_hypersim(cfg, dataset, scene_id, cam_id=0, load_depth=False): 22 | # set scene id 23 | dataset.set_scene_id(scene_id) 24 | dataset.set_max_dim(cfg["max_image_dim"]) 25 | 26 | # generate image indexes 27 | index_list = np.arange(0, cfg["input_n_views"], cfg["input_stride"]).tolist() 28 | index_list = dataset.filter_index_list(index_list, cam_id=cam_id) 29 | 30 | # get image collections 31 | K = dataset.K.astype(np.float32) 32 | img_hw = [dataset.h, dataset.w] 33 | Ts, Rs = dataset.load_cameras(cam_id=cam_id) 34 | cameras, camimages = {}, {} 35 | cameras[0] = _base.Camera("SIMPLE_PINHOLE", K, cam_id=0, hw=img_hw) 36 | for image_id in index_list: 37 | pose = _base.CameraPose(Rs[image_id], Ts[image_id]) 38 | imname = dataset.load_imname(image_id, cam_id=cam_id) 39 | camimage = _base.CameraImage(0, pose, image_name=imname) 40 | camimages[image_id] = camimage 41 | imagecols = _base.ImageCollection(cameras, camimages) 42 | 43 | if load_depth: 44 | # get depths 45 | depths = {} 46 | for image_id in index_list: 47 | depth_fname = dataset.load_raydepth_fname(image_id, cam_id=cam_id) 48 | depth = HypersimDepthReader(depth_fname, K, img_hw) 49 | depths[image_id] = depth 50 | return imagecols, depths 51 | else: 52 | return imagecols 53 | 54 | -------------------------------------------------------------------------------- /runners/hypersim/triangulation.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from Hypersim import Hypersim 6 | from loader import read_scene_hypersim 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_hypersim(cfg, dataset, scene_id, cam_id=0): 13 | imagecols = read_scene_hypersim(cfg, dataset, scene_id, cam_id=cam_id, load_depth=False) 14 | linetracks = limap.runners.line_triangulation(cfg, imagecols) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='triangulate 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/triangulation/hypersim.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/triangulation/default.yaml', help='default config file') 22 | arg_parser.add_argument('--npyfolder', type=str, default=None, help='folder to load precomputed results') 23 | 24 | args, unknown = arg_parser.parse_known_args() 25 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 26 | shortcuts = dict() 27 | shortcuts['-nv'] = '--n_visible_views' 28 | shortcuts['-nn'] = '--n_neighbors' 29 | shortcuts['-sid'] = '--scene_id' 30 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 31 | cfg["folder_to_load"] = args.npyfolder 32 | if cfg["folder_to_load"] is None: 33 | cfg["folder_to_load"] = os.path.join("precomputed", "hypersim", cfg["scene_id"]) 34 | return cfg 35 | 36 | def main(): 37 | cfg = parse_config() 38 | dataset = Hypersim(cfg["data_dir"]) 39 | run_scene_hypersim(cfg, dataset, cfg["scene_id"], cam_id=cfg["cam_id"]) 40 | 41 | if __name__ == '__main__': 42 | main() 43 | 44 | -------------------------------------------------------------------------------- /runners/rome16k/statistics.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from Rome16K import Rome16K 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 8 | import limap.base as _base 9 | import limap.pointsfm as _psfm 10 | import limap.util.io as limapio 11 | import limap.util.config as cfgutils 12 | import limap.runners 13 | 14 | from runners.bundler_triangulation import read_scene_bundler 15 | 16 | def report_rome16k_statistics(cfg, bundler_path, list_path, model_path): 17 | ''' 18 | Run triangulation from Rome16K input 19 | ''' 20 | dataset = Rome16K(os.path.join(bundler_path, list_path), os.path.join(bundler_path, cfg["component_folder"])) 21 | counts = [] 22 | for comp_id in range(dataset.count_components()): 23 | count = dataset.count_images_in_component(comp_id) 24 | counts.append(count) 25 | indexes = np.argsort(counts)[::-1] 26 | for index in indexes.tolist(): 27 | print(index, counts[index]) 28 | 29 | def parse_config(): 30 | import argparse 31 | arg_parser = argparse.ArgumentParser(description='triangulate 3d lines from specific component of Rome16k (bundler format).') 32 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/triangulation/default.yaml', help='config file') 33 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/triangulation/default.yaml', help='default config file') 34 | arg_parser.add_argument('-a', '--bundler_path', type=str, required=True, help='bundler path') 35 | arg_parser.add_argument('-l', '--list_path', type=str, default='bundle/list.orig.txt', help='image list path') 36 | arg_parser.add_argument('-m', '--model_path', type=str, default='bundle/bundle.orig.out', help='model path') 37 | arg_parser.add_argument('--component_folder', type=str, default='bundle/components', help='component folder') 38 | 39 | args, unknown = arg_parser.parse_known_args() 40 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 41 | shortcuts = dict() 42 | shortcuts['-nv'] = '--n_visible_views' 43 | shortcuts['-nn'] = '--n_neighbors' 44 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 45 | cfg["bundler_path"] = args.bundler_path 46 | cfg["list_path"] = args.list_path 47 | cfg["model_path"] = args.model_path 48 | # components 49 | cfg["component_folder"] = args.component_folder 50 | return cfg 51 | 52 | def main(): 53 | cfg = parse_config() 54 | report_rome16k_statistics(cfg, cfg["bundler_path"], cfg["list_path"], cfg["model_path"]) 55 | 56 | if __name__ == '__main__': 57 | main() 58 | 59 | -------------------------------------------------------------------------------- /runners/scannet/fitnmerge.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from ScanNet import ScanNet 6 | from loader import read_scene_scannet 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_scannet(cfg, dataset, scene_id): 13 | imagecols, neighbors, depths = read_scene_scannet(cfg, dataset, scene_id, load_depth=True) 14 | linetracks = limap.runners.line_fitnmerge(cfg, imagecols, depths, neighbors=neighbors) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='fitnmerge 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/fitnmerge/scannet.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/fitnmerge/default.yaml', help='default config file') 22 | 23 | args, unknown = arg_parser.parse_known_args() 24 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 25 | shortcuts = dict() 26 | shortcuts['-nv'] = '--n_visible_views' 27 | shortcuts['-nn'] = '--n_neighbors' 28 | shortcuts['-sid'] = '--scene_id' 29 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 30 | cfg["folder_to_load"] = os.path.join("precomputed", "scannet", cfg["scene_id"]) 31 | return cfg 32 | 33 | def main(): 34 | cfg = parse_config() 35 | dataset = ScanNet(cfg["data_dir"]) 36 | run_scene_scannet(cfg, dataset, cfg["scene_id"]) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | 41 | -------------------------------------------------------------------------------- /runners/scannet/loader.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | import cv2 4 | 5 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 6 | import limap.base as _base 7 | 8 | class ScanNetDepthReader(_base.BaseDepthReader): 9 | def __init__(self, filename): 10 | super(ScanNetDepthReader, self).__init__(filename) 11 | 12 | def read(self, filename): 13 | ref_depth = cv2.imread(filename, cv2.IMREAD_UNCHANGED) 14 | ref_depth = ref_depth.astype(np.float32) / 1000.0 15 | return ref_depth 16 | 17 | def read_scene_scannet(cfg, dataset, scene_id, load_depth=False): 18 | # set scene id 19 | dataset.set_scene_id(scene_id) 20 | if load_depth: 21 | dataset.set_img_hw_resized((480, 640)) 22 | else: 23 | dataset.set_max_dim(cfg["max_image_dim"]) 24 | 25 | # get imname_list and cameras 26 | dataset.set_stride(cfg["stride"]) 27 | imname_list = dataset.load_imname_list() 28 | K = dataset.load_intrinsics() 29 | img_hw = dataset.get_img_hw() 30 | Ts, Rs = dataset.load_cameras() 31 | cameras = [_base.Camera("PINHOLE", K, cam_id=0, hw=img_hw)] 32 | camimages = [_base.CameraImage(0, _base.CameraPose(Rs[idx], Ts[idx]), image_name=imname_list[idx]) for idx in range(len(imname_list))] 33 | imagecols = _base.ImageCollection(cameras, camimages) 34 | 35 | # TODO: advanced implementation with the original ids 36 | # trivial neighbors 37 | index_list = np.arange(0, len(imname_list)).tolist() 38 | neighbors = {} 39 | for idx, image_id in enumerate(index_list): 40 | val = np.abs(np.array(index_list) - image_id) 41 | val[idx] = val.max() + 1 42 | neighbor = np.array(index_list)[np.argsort(val)[:cfg["n_neighbors"]]] 43 | neighbors[image_id] = neighbor.tolist() 44 | 45 | # get depth 46 | if load_depth: 47 | depths = {} 48 | for img_id, imname in enumerate(imname_list): 49 | depth_fname = dataset.get_depth_fname(imname) 50 | depth = ScanNetDepthReader(depth_fname) 51 | depths[img_id] = depth 52 | return imagecols, neighbors, depths 53 | else: 54 | return imagecols, neighbors 55 | 56 | 57 | -------------------------------------------------------------------------------- /runners/scannet/triangulation.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import numpy as np 3 | 4 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 5 | from ScanNet import ScanNet 6 | from loader import read_scene_scannet 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 9 | import limap.util.config as cfgutils 10 | import limap.runners 11 | 12 | def run_scene_scannet(cfg, dataset, scene_id): 13 | imagecols, neighbors = read_scene_scannet(cfg, dataset, scene_id, load_depth=False) 14 | linetracks = limap.runners.line_triangulation(cfg, imagecols, neighbors=neighbors) 15 | return linetracks 16 | 17 | def parse_config(): 18 | import argparse 19 | arg_parser = argparse.ArgumentParser(description='triangulate 3d lines') 20 | arg_parser.add_argument('-c', '--config_file', type=str, default='cfgs/triangulation/scannet.yaml', help='config file') 21 | arg_parser.add_argument('--default_config_file', type=str, default='cfgs/triangulation/default.yaml', help='default config file') 22 | 23 | args, unknown = arg_parser.parse_known_args() 24 | cfg = cfgutils.load_config(args.config_file, default_path=args.default_config_file) 25 | shortcuts = dict() 26 | shortcuts['-nv'] = '--n_visible_views' 27 | shortcuts['-nn'] = '--n_neighbors' 28 | shortcuts['-sid'] = '--scene_id' 29 | cfg = cfgutils.update_config(cfg, unknown, shortcuts) 30 | cfg["folder_to_load"] = os.path.join("precomputed", "scannet", cfg["scene_id"]) 31 | return cfg 32 | 33 | def main(): 34 | cfg = parse_config() 35 | dataset = ScanNet(cfg["data_dir"]) 36 | run_scene_scannet(cfg, dataset, cfg["scene_id"]) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | 41 | -------------------------------------------------------------------------------- /runners/tests/localization_test_data_stairs_1.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/runners/tests/localization_test_data_stairs_1.npy -------------------------------------------------------------------------------- /runners/tests/localization_test_data_stairs_2.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rerun-io/limap/a4993794ce87c4f1290db8e5f428c43517257aaa/runners/tests/localization_test_data_stairs_2.npy -------------------------------------------------------------------------------- /scripts/aachen_undistort.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | import numpy as np 4 | import cv2 5 | from tqdm import tqdm 6 | 7 | import limap.base as _base 8 | import limap.undistortion as _undist 9 | import pdb 10 | 11 | data_dir = os.path.expanduser('~/data/Localization/Aachen-1.1') 12 | img_orig_dir = os.path.join(data_dir, 'images_upright') 13 | img_undistort_dir = os.path.join(data_dir, 'undistorted') 14 | list_file = os.path.join(data_dir, 'queries', 'night_time_queries_with_intrinsics.txt') 15 | camerainfos_file = 'camerainfos_night_undistorted.txt' 16 | 17 | def load_list_file(fname): 18 | with open(fname, 'r') as f: 19 | lines = f.readlines() 20 | imname_list, cameras = [], [] 21 | for line in lines: 22 | line = line.strip('\n') 23 | k = line.split(' ') 24 | imname = k[0] 25 | # Aachen only uses simple radial model 26 | assert k[1] == 'SIMPLE_RADIAL' 27 | h, w = int(k[2]), int(k[3]) 28 | f = float(k[4]) 29 | cx, cy = float(k[5]), float(k[6]) 30 | k1 = float(k[7]) 31 | K = np.array([[f, 0, cx], [0, f, cy], [0, 0, 1.0]]) 32 | camera = _base.Camera(K, np.eye(3), np.zeros((3)), np.array([k1, 0, 0, 0, 0])) 33 | imname_list.append(imname) 34 | cameras.append(camera) 35 | return imname_list, cameras 36 | 37 | def process(image_list, cameras): 38 | with open(camerainfos_file, 'w') as f: 39 | n_images = len(image_list) 40 | for img_id in tqdm(range(n_images)): 41 | imname, camera = image_list[img_id], cameras[img_id] 42 | imname_orig = os.path.join(img_orig_dir, imname) 43 | imname_undist = os.path.join(img_undistort_dir, imname) 44 | path = os.path.dirname(imname_undist) 45 | if not os.path.exists(path): 46 | os.makedirs(path) 47 | camera_undistorted = _undist.UndistortImageCamera(camera, imname_orig, imname_undist) 48 | img = cv2.imread(imname_undist) 49 | h, w = img.shape[0], img.shape[1] 50 | assert camera_undistorted.K[0, 0] == camera_undistorted.K[1, 1] 51 | fx = camera_undistorted.K[0, 0] 52 | cx, cy = camera_undistorted.K[0, 2], camera_undistorted.K[1, 2] 53 | f.write("{0} SIMPLE_PINHOLE {1} {2} {3} {4} {5}\n".format(imname_undist, w, h, fx, cx, cy)) 54 | 55 | if __name__ == '__main__': 56 | image_list, cameras = load_list_file(list_file) 57 | process(image_list, cameras) 58 | 59 | -------------------------------------------------------------------------------- /scripts/convert_model.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | import limap.base as _base 4 | import limap.pointsfm as _psfm 5 | import limap.util.io as limapio 6 | 7 | if __name__ == "__main__": 8 | import argparse 9 | arg_parser = argparse.ArgumentParser(description="model conversion") 10 | arg_parser.add_argument("-i", "--input_path", required=True, type=str, help="input path") 11 | arg_parser.add_argument("-o", "--output_path", required=True, type=str, help="output path") 12 | arg_parser.add_argument("--type", type=str, default="imagecols2colmap", help="conversion type") 13 | args = arg_parser.parse_args() 14 | 15 | if args.type == "imagecols2colmap": 16 | imagecols = limapio.read_npy(args.input_path).item() 17 | if type(imagecols) == dict: 18 | imagecols = _base.ImageCollection(imagecols) 19 | _psfm.convert_imagecols_to_colmap(imagecols, args.output_path) 20 | elif args.type == "colmap2vsfm": 21 | _psfm.convert_colmap_to_visualsfm(args.input_path, args.output_path) 22 | else: 23 | raise NotImplementedError 24 | 25 | 26 | -------------------------------------------------------------------------------- /scripts/quickstart.sh: -------------------------------------------------------------------------------- 1 | # download hypersim ai_001_001 2 | wget https://docs-assets.developer.apple.com/ml-research/datasets/hypersim/v1/scenes/ai_001_001.zip 3 | unzip ai_001_001.zip 4 | rm ai_001_001.zip 5 | mkdir -p data 6 | mv ai_001_001 data/ 7 | 8 | -------------------------------------------------------------------------------- /scripts/tnt_colmap_runner.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import time 3 | 4 | path = 'training' 5 | scene_id = 'Courthouse' 6 | output_path = 'colmap' 7 | folder_list = os.listdir(path) 8 | 9 | for folder in folder_list: 10 | if folder != scene_id: 11 | continue 12 | input_folder = os.path.join(path, folder) 13 | output_folder = os.path.join(output_path, path, folder) 14 | if not os.path.exists(output_folder): 15 | os.makedirs(output_folder) 16 | sparse_folder = os.path.join(output_folder, "sparse") 17 | if not os.path.exists(sparse_folder): 18 | os.makedirs(sparse_folder) 19 | dense_folder = os.path.join(output_folder, "dense") 20 | if not os.path.exists(dense_folder): 21 | os.makedirs(dense_folder) 22 | database_path = os.path.join(output_folder, 'database.db') 23 | 24 | cmd = 'colmap feature_extractor --database_path {0} --image_path {1}'.format(database_path, input_folder) 25 | print(cmd) 26 | os.system(cmd) 27 | cmd = 'colmap exhaustive_matcher --database_path {0}'.format(database_path) 28 | print(cmd) 29 | os.system(cmd) 30 | cmd = 'colmap mapper --database_path {0} --image_path {1} --output_path {2}'.format(database_path, input_folder, sparse_folder) 31 | print(cmd) 32 | os.system(cmd) 33 | cmd = 'colmap image_undistorter --image_path {0} --input_path {1} --output_path {2} --output_type COLMAP'.format(input_folder, os.path.join(sparse_folder, '0'), dense_folder) 34 | print(cmd) 35 | os.system(cmd) 36 | time.sleep(1.0) 37 | 38 | -------------------------------------------------------------------------------- /third-party/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(HighFive) 2 | add_subdirectory(pybind11) 3 | add_subdirectory(JLinkage) 4 | add_subdirectory(libigl) 5 | add_subdirectory(RansacLib) 6 | --------------------------------------------------------------------------------