├── .gitignore
├── LICENSE
├── README.md
├── checkpoints
├── bimodal_NCL_Pretrained_Match.pth
└── bimodal_NCL_Pretrained_Match_best.pth
├── config
└── bimodal_NCL_Pretrained_Match.yaml
├── images
├── bridge
│ ├── bridge_map.png
│ ├── bridge_zoom_exp_door.png
│ ├── bridge_zoom_exp_ramp.png
│ ├── bridge_zoom_left.png
│ └── bridge_zoom_right.png
├── exp
│ ├── exp_map.png
│ ├── exp_zoom_bikes.png
│ ├── exp_zoom_bt_isec_and_garage.png
│ ├── exp_zoom_columbus_back.png
│ ├── exp_zoom_columbus_back_stairs_and_car.png
│ ├── exp_zoom_columbus_garage.png
│ ├── exp_zoom_dorm.png
│ ├── exp_zoom_isec.png
│ ├── exp_zoom_parked_cars.png
│ ├── exp_zoom_sign_and_pedestrian.png
│ ├── exp_zoom_soccer.png
│ ├── exp_zoom_squashbuster.png
│ └── exp_zoom_station.png
├── main_campus
│ ├── main_campus_map.png
│ ├── main_campus_zoom_BofA.png
│ ├── main_campus_zoom_Centennial_Common.png
│ ├── main_campus_zoom_Egan.png
│ ├── main_campus_zoom_Egan_Cross.png
│ ├── main_campus_zoom_Egan_Sidewalk.png
│ ├── main_campus_zoom_Forsyth_Sidewalk.png
│ ├── main_campus_zoom_Shillman_Hall.png
│ ├── main_campus_zoom_Snell.png
│ ├── main_campus_zoom_Willis.png
│ ├── main_campus_zoom_Willis_car.png
│ ├── main_campus_zoom_ground_pattern.png
│ ├── main_campus_zoom_law_school.png
│ ├── main_campus_zoom_near_Snell.png
│ └── main_campus_zoom_painting.png
├── map.png
├── map_neu.png
└── newer_college
│ ├── cloister.png
│ ├── long.png
│ ├── math_easy.png
│ ├── math_hard.png
│ ├── math_medium.png
│ ├── mount.png
│ ├── park.png
│ ├── quad_easy.png
│ ├── quad_hard.png
│ ├── quad_medium.png
│ ├── quad_w_dyn.png
│ ├── short.png
│ └── spin.png
├── keypoint_node
├── __init__.py
└── keypoint_node.py
├── model
├── bimodal_compressor.py
├── handcrafted_feature_extractor.py
├── kpconv.py
└── utils.py
├── package.xml
├── requirements.txt
├── setup.cfg
├── setup.py
└── submodules
└── octree_handler
├── .clang-format
├── .gitignore
├── .gitlab-ci.yml
├── CMakeLists.txt
├── setup.py
├── src
├── CMakeLists.txt
├── Octree.hpp
├── OctreeHandler.cpp
└── OctreeHandler.h
└── tests
└── octree_cpp_testing.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Northeastern Autonomy & Intelligence Laboratory
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LiDAR Inertial Odometry and Mapping Using Learned Registration-Relevant Features
2 |
3 | ## Update
4 |
5 | * Our paper has been accepted to IEEE ICRA 2025! Looking forward to meeting fellow researchers in Atlanta.
6 | * Code for feature extraction (Python ROS node) has been released!
7 | * Code for DLIOM (C++) will be released separately.
8 |
9 | ## About
10 |
11 | This repository contains the official implementation of the feature extractor network and ros2 node proposed in paper "LiDAR Inertial Odometry and Mapping Using Learned Registration-Relevant Features". It achieves robust and efficient real-time LiDAR Inertial Odometry using a light-weight neural network based feature extractor, as opposed to previous feature-based methods that relies on hand-crafted heuristics and parameters. More detailed maps are shown in the last section.
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | ## Dependencies
20 |
21 | * ROS2 Humble
22 | * Ubuntu 22.04
23 | * Core python packages and version can be found in `requirements.txt`.
24 | * Install `octree_handler`: ```cd submodules/octree_handler && pip3 install -U .```
25 |
26 | ## Running
27 |
28 | ### Building
29 | Build the ROS package using `Colcon`:
30 |
31 | ```
32 | colcon build --packages-select FeatureLIOM
33 | source install/setup.bash
34 | ros2 run FeatureLIOM extract
35 | ```
36 | The node listens to the specified `pcd_topic`, and publishes keypoint **indices** on `downsampled_topic`. This implementation assumes the odometry code maintains the point ordering until the dense point cloud gets compressed.
37 |
38 | ## Generated Maps
39 |
40 | We present detailed maps of the Northeastern University Campus and Newer College Dataset in this section.
41 |
42 | ### Northeastern University Main Campus (727.50m)
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 | ### Northeastern University ISEC and Columbus Garage (548.32m)
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 | ### Northeastern University ISEC Bridge
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 | ### Newer College Dataset
131 |
132 | |
|
|
133 | | ----- | ----- |
134 | | *Newer College Short* | *Newer College Long* |
135 |
136 | |
|
|
137 | | ----- | ----- |
138 | | *Newer College Mount* | *Newer College Park* |
139 |
140 | |
|
|
141 | | ----- | ----- |
142 | | *Newer College Quad with Dynamics* | *Newer College Quad Hard* |
143 |
144 | |
|
|
145 | | ----- | ----- |
146 | | *Newer College Quad Medium* | *Newer College Quad Easy* |
147 |
148 | |
|
|
149 | | ----- | ----- |
150 | | *Newer College Math Easy* | *Newer College Math Medium* |
151 |
152 | |
|
|
153 | | ----- | ----- |
154 | | *Newer College Math Hard* | *Newer College Cloister* |
155 |
156 | ## Acknowledgement
157 |
158 | We would also like to thank Alexander Estornell, Sahasrajit Anantharamakrishnan, and Yash Mewada for setting up the scout robot hardware, and Hanna Zhang, Yanlong Ma, Kenny Chen, and Nakul Joshi for help with data collection and comments.
159 |
--------------------------------------------------------------------------------
/checkpoints/bimodal_NCL_Pretrained_Match.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/checkpoints/bimodal_NCL_Pretrained_Match.pth
--------------------------------------------------------------------------------
/checkpoints/bimodal_NCL_Pretrained_Match_best.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/checkpoints/bimodal_NCL_Pretrained_Match_best.pth
--------------------------------------------------------------------------------
/config/bimodal_NCL_Pretrained_Match.yaml:
--------------------------------------------------------------------------------
1 | experiment_name: "bimodal_NCL_Pretrained_Match"
2 |
3 | # ==========================
4 | # Training arguments
5 | # ==========================
6 |
7 | # batch size is simulated using gradient accumulation
8 | batch_size : 8
9 | epoch : 400
10 | lr : 0.001
11 | weight_decay: 0.0001
12 | dataset : "attack"
13 | normalize : False
14 | data_root : "./random_attack/patch_adding_scored_cloud/Working_SemanticKITTI_no_ground"
15 | ckpt_dir : "checkpoints"
16 | pretrained_backbone : "voxelize_downsample_no_cov_input_ncl_contrastive_transformfree_dataset"
17 | reg_weight : 0.05
18 | v_res : 0.25
19 | cov_loss : False
20 | salient_margin : 0.5
21 | uniqueness_margin : 0.5
22 |
23 | # ==========================
24 | # lr scheduler arguments
25 | # ==========================
26 |
27 | scheduler : "OneCycle"
28 | max_lr : 0.001
29 | start_lr : 0.00004
30 | end_lr : 0.00000001
31 | pct_start : 0.1
32 | anneal_strategy : "cos"
33 |
34 | # ==========================
35 | # Network Architecture
36 | # ==========================
37 | model : "bimodal"
38 | input_feature_dim : 1
39 | output_feature_dim : 32
40 | use_position_embedding : True
41 | max_corr_dist : 0.2
42 | position_embedding_hidden_dim : 32
43 | position_embedding_dim : 64
44 | postprocessing_hidden_dim : 128
45 | processed_feature_dim : 256
46 | inv_score : False
47 | knn : 50
48 | n_c : 2000
49 | resolution : 0.25
50 | kernel_radius : 1.5
51 | num_kernel_points : 27
52 | deformable : True
53 |
54 | # ==========================
55 | # Logging
56 | # ==========================
57 | logdir : 'log'
--------------------------------------------------------------------------------
/images/bridge/bridge_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/bridge/bridge_map.png
--------------------------------------------------------------------------------
/images/bridge/bridge_zoom_exp_door.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/bridge/bridge_zoom_exp_door.png
--------------------------------------------------------------------------------
/images/bridge/bridge_zoom_exp_ramp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/bridge/bridge_zoom_exp_ramp.png
--------------------------------------------------------------------------------
/images/bridge/bridge_zoom_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/bridge/bridge_zoom_left.png
--------------------------------------------------------------------------------
/images/bridge/bridge_zoom_right.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/bridge/bridge_zoom_right.png
--------------------------------------------------------------------------------
/images/exp/exp_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_map.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_bikes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_bikes.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_bt_isec_and_garage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_bt_isec_and_garage.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_columbus_back.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_columbus_back.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_columbus_back_stairs_and_car.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_columbus_back_stairs_and_car.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_columbus_garage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_columbus_garage.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_dorm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_dorm.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_isec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_isec.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_parked_cars.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_parked_cars.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_sign_and_pedestrian.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_sign_and_pedestrian.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_soccer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_soccer.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_squashbuster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_squashbuster.png
--------------------------------------------------------------------------------
/images/exp/exp_zoom_station.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/exp/exp_zoom_station.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_map.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_BofA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_BofA.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Centennial_Common.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Centennial_Common.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Egan.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Egan.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Egan_Cross.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Egan_Cross.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Egan_Sidewalk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Egan_Sidewalk.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Forsyth_Sidewalk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Forsyth_Sidewalk.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Shillman_Hall.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Shillman_Hall.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Snell.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Snell.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Willis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Willis.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_Willis_car.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_Willis_car.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_ground_pattern.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_ground_pattern.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_law_school.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_law_school.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_near_Snell.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_near_Snell.png
--------------------------------------------------------------------------------
/images/main_campus/main_campus_zoom_painting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/main_campus/main_campus_zoom_painting.png
--------------------------------------------------------------------------------
/images/map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/map.png
--------------------------------------------------------------------------------
/images/map_neu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/map_neu.png
--------------------------------------------------------------------------------
/images/newer_college/cloister.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/cloister.png
--------------------------------------------------------------------------------
/images/newer_college/long.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/long.png
--------------------------------------------------------------------------------
/images/newer_college/math_easy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/math_easy.png
--------------------------------------------------------------------------------
/images/newer_college/math_hard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/math_hard.png
--------------------------------------------------------------------------------
/images/newer_college/math_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/math_medium.png
--------------------------------------------------------------------------------
/images/newer_college/mount.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/mount.png
--------------------------------------------------------------------------------
/images/newer_college/park.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/park.png
--------------------------------------------------------------------------------
/images/newer_college/quad_easy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/quad_easy.png
--------------------------------------------------------------------------------
/images/newer_college/quad_hard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/quad_hard.png
--------------------------------------------------------------------------------
/images/newer_college/quad_medium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/quad_medium.png
--------------------------------------------------------------------------------
/images/newer_college/quad_w_dyn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/quad_w_dyn.png
--------------------------------------------------------------------------------
/images/newer_college/short.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/short.png
--------------------------------------------------------------------------------
/images/newer_college/spin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/images/newer_college/spin.png
--------------------------------------------------------------------------------
/keypoint_node/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/neu-autonomy/FeatureLIOM/63ab56f2306cb71d7b6a87850ca453ffe9831571/keypoint_node/__init__.py
--------------------------------------------------------------------------------
/keypoint_node/keypoint_node.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import rclpy
4 | from rclpy.node import Node
5 | from sensor_msgs.msg import PointCloud2, PointField
6 | from std_msgs.msg import Header, Int32MultiArray
7 | import struct
8 | from sensor_msgs_py import point_cloud2
9 |
10 | from ruamel import yaml
11 | import os
12 | import time
13 |
14 | import torch
15 | import numpy as np
16 | import open3d as o3d
17 |
18 | from model.bimodal_compressor import BimodalCompressor
19 | from model.handcrafted_feature_extractor import compute_smoothness
20 | from model.utils import gridSampling
21 |
22 |
23 | '''
24 | Read point cloud from newer college dataset ply files
25 | '''
26 | def read_point_cloud(filename):
27 |
28 | pcd = o3d.t.io.read_point_cloud(filename)
29 | points = pcd.point.positions.numpy()
30 | dist_to_correspondence = pcd.point.intensities.numpy()
31 |
32 | return points, dist_to_correspondence
33 |
34 |
35 | class KeypointNode(Node):
36 |
37 | '''
38 | Parameters:
39 | pcd_topic : topic to subscribe to for deskewed point cloud
40 | downsampled_topic : topic to publish keypoints to
41 | cfg : config file to load model checkpoint
42 | mode : inference mode, chose from "bimodal", "match", "gicp", and "handcrafted"
43 | lidar_range : sensing range of the used LiDAR
44 | '''
45 | def __init__(self,
46 | pcd_topic="/dliom/odom_node/compress",
47 | downsampled_topic="/PointRec/descriptor_cloud",
48 | use_model=False,
49 | cfg="src/FeatureLIOM/config/bimodal_NCL_Pretrained_Match.yaml",
50 | mode="bimodal",
51 | lidar_range=50.0
52 | ):
53 |
54 | super().__init__('KeypointNode')
55 |
56 | self.use_model = use_model
57 | self.mode = mode
58 | self.range = lidar_range
59 |
60 | if self.mode not in ["match_only", "gicp_only", "bimodal", "handcrafted"]:
61 | print(f"Unknown compression mode {self.mode}")
62 | exit(1)
63 |
64 | self.deskewed_sub = self.create_subscription(
65 | PointCloud2,
66 | topic=pcd_topic,
67 | callback=self.pcd_callback,
68 | qos_profile=1
69 | )
70 |
71 | self.compressed_pub = self.create_publisher(
72 | Int32MultiArray,
73 | topic=downsampled_topic,
74 | qos_profile=1
75 | )
76 |
77 | self.scan_cnt = 0
78 |
79 | y = yaml.YAML(typ='safe', pure=True)
80 |
81 | self.bimodal_config = y.load(open(cfg, 'r'))
82 |
83 | ckpt_path = "./src/FeatureLIOM/" + self.bimodal_config['ckpt_dir'] + '/' + self.bimodal_config['experiment_name'] + "_best.pth"
84 |
85 | self.bimodal_model = BimodalCompressor(self.bimodal_config)
86 |
87 | if os.path.isfile(ckpt_path) and use_model:
88 | print(f"Loaded model from {ckpt_path}")
89 | self.bimodal_model.load_state_dict(torch.load(ckpt_path))
90 |
91 | self.bimodal_model = self.bimodal_model.cuda()
92 | self.bimodal_model.eval()
93 |
94 | for _ in range(5):
95 | randinput = torch.rand((1000, 3)).float().cuda()
96 | _ = self.bimodal_model(randinput, None)
97 |
98 | print("Initialization Finished")
99 |
100 |
101 | def pcd_callback(self, msg):
102 |
103 | start = time.time()
104 |
105 | point_cloud, rings = self.msg_to_torch_pcd(msg)
106 |
107 | if self.use_model:
108 |
109 | with torch.no_grad():
110 |
111 | all_indices = torch.arange(len(point_cloud)).cuda()
112 |
113 | if self.mode == 'handcrafted':
114 |
115 | smoothness = compute_smoothness(point_cloud, rings, k=10)
116 |
117 | # incoming scan is not voxelized
118 | # to maintain fair comparison we compute rough number of points if we use 0.25 voxel filter
119 | rough_indices = gridSampling(point_cloud, resolution=0.25)
120 | rough_num = len(rough_indices)
121 |
122 | # extract top 5% as edge
123 | edge_indices = torch.topk(
124 | smoothness,
125 | k = int(0.05*rough_num),
126 | largest = True
127 | ).indices.flatten()
128 |
129 | all_indices = torch.arange(0, len(smoothness)).cuda()
130 | planar_indices = ~torch.isin(all_indices, edge_indices).cuda()
131 | planar_indices = all_indices[planar_indices].flatten()
132 | indices = torch.randperm(len(planar_indices))[:int(0.15*rough_num)]
133 |
134 | # rest = point_cloud[planar_indices]
135 | # indices = gridSampling(rest, resolution=1.2)
136 |
137 | planar_indices = planar_indices[indices]
138 |
139 | direct_kp_indices = torch.cat([edge_indices, planar_indices])
140 |
141 | else:
142 |
143 | match_score, gicp_score, _ = self.bimodal_model(point_cloud, None)
144 |
145 | if self.mode == 'gicp_only':
146 |
147 | # Only choose top 20% GICP feature points
148 | gicp_indices = torch.topk(
149 | gicp_score,
150 | k = int(0.2*len(gicp_score)),
151 | largest = False
152 | ).indices.flatten()
153 |
154 | direct_kp_indices = gicp_indices
155 |
156 | elif self.mode == 'match_only':
157 |
158 | # Only choose top 20% Match feature points
159 | match_indices = torch.topk(
160 | match_score,
161 | k = int(0.2*len(match_score)),
162 | largest = True
163 | ).indices.flatten()
164 |
165 | direct_kp_indices = match_indices
166 |
167 | else:
168 |
169 | # Choose top 10% GICP feature and top 10% Match feature
170 | gicp_indices = torch.topk(
171 | gicp_score,
172 | k = int(0.1*len(gicp_score)),
173 | largest = False
174 | ).indices.flatten()
175 |
176 | # to avoid selecting same points
177 | match_score[gicp_indices] = torch.min(match_score)
178 |
179 | match_indices = torch.topk(
180 | match_score,
181 | k = int(0.1*len(match_score)),
182 | largest = True
183 | ).indices.flatten()
184 |
185 | direct_kp_indices = torch.cat([match_indices, gicp_indices])
186 |
187 | point_norms = torch.norm(point_cloud, dim=1)
188 | coverage_indices = point_norms >= 0.90 * self.range
189 | coverage_indices = coverage_indices.nonzero().flatten()
190 |
191 | if len(coverage_indices) >= 0.01 * point_cloud.shape[0]:
192 | direct_kp_indices = torch.cat([direct_kp_indices, coverage_indices])
193 |
194 | mask = torch.ones(len(point_cloud), dtype=torch.bool)
195 | mask[direct_kp_indices] = False
196 | to_be_compressed = point_cloud[mask]
197 | # to_be_compressed_score = score[mask]
198 | # _, once_indices = torch.topk(to_be_compressed_score, int(0.34*len(to_be_compressed_score)))
199 |
200 | to_be_compressed_indices = all_indices[mask]
201 |
202 | else:
203 |
204 | # randomly select 20% points from the dense point cloud
205 | all_indices = torch.arange(len(point_cloud)).cuda()
206 | direct_kp_indices = torch.randint(low=0, high=len(point_cloud), size=(int(0.2*len(point_cloud)),)).cuda()
207 |
208 | mask = torch.ones(len(point_cloud), dtype=torch.bool)
209 | mask[direct_kp_indices] = False
210 | to_be_compressed = point_cloud[mask]
211 |
212 | # once_indices = torch.randint(low=0, high=len(to_be_compressed), size=(int(0.34*len(to_be_compressed)),)).cuda()
213 |
214 | to_be_compressed_indices = all_indices[mask]
215 |
216 | # sample a sparse skeleton point cloud
217 | sparse_indices = gridSampling(to_be_compressed, resolution=3.5)
218 |
219 | all_indices = torch.hstack([
220 | direct_kp_indices,
221 | to_be_compressed_indices[sparse_indices]
222 | ])
223 |
224 | # publish indices of points to keep
225 | # This implementation is more efficient than sending points directly
226 | # however it requires the odometry code to maintain the ordering of points
227 | # until they gets compressed (receive indices from this node)
228 | published_msg = Int32MultiArray()
229 | published_msg.data = all_indices.to(torch.int32).tolist()
230 | self.compressed_pub.publish(published_msg)
231 |
232 | runtime = time.time() - start
233 | self.scan_cnt += 1
234 |
235 | dense_size = point_cloud.shape[0]
236 | if self.use_model and self.mode == 'handcrafted':
237 | dense_size = rough_num
238 |
239 | print(f"original cloud size: {dense_size}, compresed cloud size: {all_indices.shape[0]}. Runtime {runtime}s.")
240 |
241 |
242 | def msg_to_torch_pcd(self, msg):
243 |
244 | point_cloud = point_cloud2.read_points(
245 | msg,
246 | skip_nans=True,
247 | field_names=["x", "y", "z", "intensity", "t", "ring"],
248 | reshape_organized_cloud=True
249 | )
250 |
251 | points = np.hstack([
252 | point_cloud['x'].reshape(-1, 1),
253 | point_cloud['y'].reshape(-1, 1),
254 | point_cloud['z'].reshape(-1, 1)
255 | ])
256 |
257 | rings = point_cloud['ring'].reshape(-1, 1)
258 |
259 | return torch.from_numpy(points).float().cuda(), torch.from_numpy(rings).float().cuda()
260 |
261 |
262 | def convert_to_pc2_msg(self, points, frame_id='odom'):
263 |
264 | msg = PointCloud2()
265 | msg.header = Header(frame_id=frame_id)
266 |
267 | if not points.size == 0:
268 |
269 | # fill in meta data
270 | msg.height = 1
271 | msg.width = len(points)
272 | msg.fields = [
273 | PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),
274 | PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),
275 | PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1)
276 | ]
277 | msg.is_bigendian = False
278 | msg.point_step = 12
279 | msg.row_step = 12 * points.shape[0]
280 | msg.is_dense = False
281 |
282 | # convert numpy points to bytes and pass to msg data
283 | buffer = []
284 | for point in points:
285 | buffer.append(struct.pack('fff', *point))
286 | msg.data = b''.join(buffer)
287 |
288 | return msg
289 |
290 |
291 | def main(
292 | pcd_topic="/dliom/odom_node/compress",
293 | downsampled_topic="/PointRec/descriptor_cloud",
294 | use_model=True,
295 | bimodal_cfg="src/FeatureLIOM/config/bimodal_NCL_Pretrained_Match.yaml",
296 | mode="bimodal",
297 | lidar_range=128.0
298 | ):
299 |
300 | rclpy.init()
301 | node = KeypointNode(
302 | pcd_topic,
303 | downsampled_topic,
304 | use_model,
305 | bimodal_cfg,
306 | mode,
307 | lidar_range
308 | )
309 | rclpy.spin(node)
310 | rclpy.shutdown()
--------------------------------------------------------------------------------
/model/bimodal_compressor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 | import octree_handler
6 |
7 | from model.kpconv import KPConv
8 |
9 |
10 | class BimodalCompressor(nn.Module):
11 |
12 | def __init__(self, config):
13 |
14 | super().__init__()
15 |
16 | self.config = config
17 |
18 | self.input_feature_dim = config['input_feature_dim']
19 | self.output_feature_dim = config['output_feature_dim']
20 | self.postprocess_hidden_dim = config['postprocessing_hidden_dim']
21 | self.processed_feature_dim = config['processed_feature_dim']
22 | self.use_position_embedding = config['use_position_embedding'] if 'use_position_embedding' in config else False
23 |
24 | self.k = config['knn']
25 |
26 | # intialize octree
27 | self.octree = octree_handler.Octree()
28 |
29 | # initialize sampling resolution
30 | self.resolution = config['resolution']
31 |
32 | # preactivate to be applied on point covariance matrices
33 | self.preactivation = nn.Sequential(
34 | nn.Linear(
35 | in_features = self.input_feature_dim,
36 | out_features = self.output_feature_dim
37 | ),
38 | nn.BatchNorm1d(num_features = self.output_feature_dim),
39 | nn.LeakyReLU()
40 | )
41 |
42 | # initializing KPConv
43 | # Kernel radius should be larger than resolution to allow
44 | # efficient information exchanging between neighboring candidates
45 | kp_in_dim = self.output_feature_dim if self.input_feature_dim > 1 else self.input_feature_dim
46 | self.kernel_radius = config['kernel_radius']
47 | self.num_kernel_points = config['num_kernel_points']
48 | self.kp_extent = self.kernel_radius / (self.num_kernel_points**(1/3)-1)*1.5
49 | self.kp_conv = KPConv(
50 | kernel_size = self.num_kernel_points,
51 | p_dim = 3,
52 | in_channels = kp_in_dim,
53 | out_channels = self.output_feature_dim,
54 | KP_extent = self.kp_extent,
55 | radius = self.kernel_radius,
56 | deformable = config['deformable']
57 | )
58 |
59 | if self.use_position_embedding:
60 | self.position_embedding_hidden_dim = config['position_embedding_hidden_dim'] if 'position_embedding_hidden_dim' in config else 64
61 | self.position_embedding_dim = config['position_embedding_dim'] if 'position_embedding_dim' in config else 256
62 | self.position_embedding = nn.Sequential(
63 | nn.Linear(
64 | in_features = 3,
65 | out_features = self.position_embedding_hidden_dim
66 | ),
67 | nn.BatchNorm1d(num_features = self.position_embedding_hidden_dim),
68 | nn.LeakyReLU(),
69 | nn.Linear(
70 | in_features = self.position_embedding_hidden_dim,
71 | out_features = self.position_embedding_dim
72 | ),
73 | nn.BatchNorm1d(num_features = self.position_embedding_dim),
74 | nn.LeakyReLU()
75 | )
76 |
77 | # feature aggregation layer
78 | self.feature_aggregation = nn.Sequential(
79 | nn.Linear(
80 | in_features = self.output_feature_dim,
81 | out_features = self.postprocess_hidden_dim
82 | ),
83 | nn.BatchNorm1d(num_features = self.postprocess_hidden_dim),
84 | nn.LeakyReLU(),
85 | nn.Linear(
86 | in_features = self.postprocess_hidden_dim,
87 | out_features = self.processed_feature_dim
88 | ),
89 | nn.BatchNorm1d(num_features = self.processed_feature_dim),
90 | nn.LeakyReLU()
91 | )
92 |
93 | # score prediction layer
94 | self.keep_score_extraction = nn.Sequential(
95 | nn.Linear(
96 | in_features = self.processed_feature_dim,
97 | out_features = self.processed_feature_dim // 4
98 | ),
99 | nn.BatchNorm1d(num_features = self.processed_feature_dim // 4),
100 | nn.LeakyReLU(),
101 | nn.Linear(
102 | in_features = self.processed_feature_dim // 4,
103 | out_features = 1
104 | )
105 | )
106 |
107 | if self.use_position_embedding:
108 | first_input_dim = self.processed_feature_dim + self.position_embedding_dim
109 | else:
110 | first_input_dim = self.processed_feature_dim
111 | self.unique_score_extraction = nn.Sequential(
112 | nn.Linear(
113 | in_features = first_input_dim,
114 | out_features = first_input_dim // 4
115 | ),
116 | nn.BatchNorm1d(num_features = first_input_dim // 4),
117 | nn.LeakyReLU(),
118 | nn.Linear(
119 | in_features = first_input_dim // 4,
120 | out_features = 1
121 | )
122 | )
123 |
124 | self.sigmoid = nn.Sigmoid()
125 |
126 |
127 | def forward(self, points, covariances):
128 |
129 | if self.input_feature_dim > 1:
130 |
131 | covariances = covariances.view(-1, 9)
132 |
133 | # Shape: n_s, output_feature_dim
134 | features = self.preactivation(covariances)
135 |
136 | else:
137 |
138 | # Shape: n_s, 1
139 | features = torch.ones((points.shape[0], 1)).float().cuda()
140 |
141 | # Generate neighbor indices for KPConv
142 | self.octree.setInput(points.detach().cpu().numpy())
143 | # Shape: n_s, k
144 | neighbors_index = self.octree.radiusSearchIndices(
145 | range(len(points)), self.k, self.kernel_radius
146 | )
147 | neighbors_index = torch.from_numpy(neighbors_index).long().to(points.device)
148 |
149 | # Shape: n_s, output_feature_dim
150 | kpconv_feature = self.kp_conv(
151 | q_pts = points,
152 | s_pts = points,
153 | neighb_inds = neighbors_index,
154 | x = features
155 | )
156 |
157 | # Shape: n_s, processed_feature_dim
158 | if self.use_position_embedding:
159 | positional_embedding = self.position_embedding(points)
160 |
161 | aggregated_features = self.feature_aggregation(kpconv_feature)
162 |
163 | # Shape: n_s, 1
164 | match_score = self.keep_score_extraction(aggregated_features)
165 | match_score = self.sigmoid(match_score).flatten()
166 |
167 | if self.use_position_embedding:
168 | unique_score = self.unique_score_extraction(
169 | torch.hstack([positional_embedding, aggregated_features])
170 | )
171 | else:
172 | unique_score = self.unique_score_extraction(aggregated_features)
173 | unique_score = self.sigmoid(unique_score).flatten()
174 |
175 | return match_score, unique_score, neighbors_index
--------------------------------------------------------------------------------
/model/handcrafted_feature_extractor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from scipy.spatial import cKDTree
4 |
5 |
6 | def compute_smoothness(points, ring_idx, k=10):
7 |
8 | # Extract percentage% edge points and percentage% planar points
9 |
10 | x = points[:, 0].view(-1, 1)
11 | y = points[:, 1].view(-1, 1)
12 |
13 | # phi = torch.atan2(y, x).view(-1, 1) * (180.0 / torch.pi)
14 |
15 | # spherical_coords = torch.hstack([1e10*ring_idx, phi])
16 | # use ring * 10 as z coord, so closest neighbors are restricted to the points' respective rings
17 | new_coords = torch.hstack([x, y, ring_idx*10])
18 |
19 | tree = cKDTree(new_coords.cpu().numpy())
20 | _, indices = tree.query(new_coords.cpu().numpy(), k+1, workers=8)
21 | indices = torch.tensor(indices).cuda()
22 |
23 | # Gather neighbor points for each point
24 | neighbor_points = points[indices] # Shape: (N, k, D)
25 |
26 | # Compute the differences between the point and its neighbors
27 | point_cloud_expanded = points.unsqueeze(1).expand(-1, k+1, -1) # Shape: (N, k, D)
28 | differences = neighbor_points - point_cloud_expanded # Shape: (N, k, D)
29 |
30 | # Compute the sum of differences
31 | sum_diff = differences.sum(dim=1) # Shape: (N, D)
32 |
33 | # Compute the norms
34 | norm_X = points.norm(dim=1) # Shape: (N,)
35 | norm_sum_diff = sum_diff.norm(dim=1) # Shape: (N,)
36 |
37 | # Compute the curvature
38 | curvatures = norm_sum_diff / (k+1)
39 |
40 | return curvatures
--------------------------------------------------------------------------------
/model/kpconv.py:
--------------------------------------------------------------------------------
1 | #
2 | #
3 | # 0=================================0
4 | # | Kernel Point Convolutions |
5 | # 0=================================0
6 | #
7 | #
8 | # ----------------------------------------------------------------------------------------------------------------------
9 | #
10 | # Define network blocks
11 | #
12 | # ----------------------------------------------------------------------------------------------------------------------
13 | #
14 | # Hugues THOMAS - 06/03/2020
15 | #
16 |
17 |
18 | import time
19 | import math
20 | import torch
21 | import torch.nn as nn
22 | from torch.nn.parameter import Parameter
23 | from torch.nn.init import kaiming_uniform_
24 |
25 | # from kernels.kernel_points import load_kernels
26 |
27 | # from utils.ply import write_ply
28 |
29 | # ----------------------------------------------------------------------------------------------------------------------
30 | #
31 | # Simple functions
32 | # \**********************/
33 | #
34 |
35 |
36 | def gather(x, idx, method=0):
37 | """
38 | implementation of a custom gather operation for faster backwards.
39 | :param x: input with shape [N, D_1, ... D_d]
40 | :param idx: indexing with shape [n_1, ..., n_m]
41 | :param method: Choice of the method
42 | :return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d]
43 | """
44 |
45 | if method == 0:
46 | return x[idx]
47 | elif method == 1:
48 | x = x.unsqueeze(1)
49 | x = x.expand((-1, idx.shape[-1], -1))
50 | idx = idx.unsqueeze(2)
51 | idx = idx.expand((-1, -1, x.shape[-1]))
52 | return x.gather(0, idx)
53 | elif method == 2:
54 | for i, ni in enumerate(idx.size()[1:]):
55 | x = x.unsqueeze(i+1)
56 | new_s = list(x.size())
57 | new_s[i+1] = ni
58 | x = x.expand(new_s)
59 | n = len(idx.size())
60 | for i, di in enumerate(x.size()[n:]):
61 | idx = idx.unsqueeze(i+n)
62 | new_s = list(idx.size())
63 | new_s[i+n] = di
64 | idx = idx.expand(new_s)
65 | return x.gather(0, idx)
66 | else:
67 | raise ValueError('Unkown method')
68 |
69 |
70 | def radius_gaussian(sq_r, sig, eps=1e-9):
71 | """
72 | Compute a radius gaussian (gaussian of distance)
73 | :param sq_r: input radiuses [dn, ..., d1, d0]
74 | :param sig: extents of gaussians [d1, d0] or [d0] or float
75 | :return: gaussian of sq_r [dn, ..., d1, d0]
76 | """
77 | return torch.exp(-sq_r / (2 * sig**2 + eps))
78 |
79 |
80 | def closest_pool(x, inds):
81 | """
82 | Pools features from the closest neighbors. WARNING: this function assumes the neighbors are ordered.
83 | :param x: [n1, d] features matrix
84 | :param inds: [n2, max_num] Only the first column is used for pooling
85 | :return: [n2, d] pooled features matrix
86 | """
87 |
88 | # Add a last row with minimum features for shadow pools
89 | x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
90 |
91 | # Get features for each pooling location [n2, d]
92 | return gather(x, inds[:, 0])
93 |
94 |
95 | def max_pool(x, inds):
96 | """
97 | Pools features with the maximum values.
98 | :param x: [n1, d] features matrix
99 | :param inds: [n2, max_num] pooling indices
100 | :return: [n2, d] pooled features matrix
101 | """
102 |
103 | # Add a last row with minimum features for shadow pools
104 | x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
105 |
106 | # Get all features for each pooling location [n2, max_num, d]
107 | pool_features = gather(x, inds)
108 |
109 | # Pool the maximum [n2, d]
110 | max_features, _ = torch.max(pool_features, 1)
111 | return max_features
112 |
113 |
114 | def global_average(x, batch_lengths):
115 | """
116 | Block performing a global average over batch pooling
117 | :param x: [N, D] input features
118 | :param batch_lengths: [B] list of batch lengths
119 | :return: [B, D] averaged features
120 | """
121 |
122 | # Loop over the clouds of the batch
123 | averaged_features = []
124 | i0 = 0
125 | for b_i, length in enumerate(batch_lengths):
126 |
127 | # Average features for each batch cloud
128 | averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
129 |
130 | # Increment for next cloud
131 | i0 += length
132 |
133 | # Average features in each batch
134 | return torch.stack(averaged_features)
135 |
136 |
137 | # ----------------------------------------------------------------------------------------------------------------------
138 | #
139 | # KPConv class
140 | # \******************/
141 | #
142 |
143 |
144 | class KPConv(nn.Module):
145 |
146 | def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
147 | fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
148 | deformable=False, modulated=False):
149 | """
150 | Initialize parameters for KPConvDeformable.
151 | :param kernel_size: Number of kernel points.
152 | :param p_dim: dimension of the point space.
153 | :param in_channels: dimension of input features.
154 | :param out_channels: dimension of output features.
155 | :param KP_extent: influence radius of each kernel point.
156 | :param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius
157 | :param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
158 | :param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
159 | :param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
160 | :param deformable: choose deformable or not
161 | :param modulated: choose if kernel weights are modulated in addition to deformed
162 | """
163 | super(KPConv, self).__init__()
164 |
165 | # Save parameters
166 | self.K = kernel_size
167 | self.p_dim = p_dim
168 | self.in_channels = in_channels
169 | self.out_channels = out_channels
170 | self.radius = radius
171 | self.KP_extent = KP_extent
172 | self.fixed_kernel_points = fixed_kernel_points
173 | self.KP_influence = KP_influence
174 | self.aggregation_mode = aggregation_mode
175 | self.deformable = deformable
176 | self.modulated = modulated
177 |
178 | # Running variable containing deformed KP distance to input points. (used in regularization loss)
179 | self.min_d2 = None
180 | self.deformed_KP = None
181 | self.offset_features = None
182 |
183 | # Initialize weights
184 | self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
185 | requires_grad=True)
186 |
187 | # Initiate weights for offsets
188 | if deformable:
189 | if modulated:
190 | self.offset_dim = (self.p_dim + 1) * self.K
191 | else:
192 | self.offset_dim = self.p_dim * self.K
193 | self.offset_conv = KPConv(self.K,
194 | self.p_dim,
195 | self.in_channels,
196 | self.offset_dim,
197 | KP_extent,
198 | radius,
199 | fixed_kernel_points=fixed_kernel_points,
200 | KP_influence=KP_influence,
201 | aggregation_mode=aggregation_mode)
202 | self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
203 |
204 | else:
205 | self.offset_dim = None
206 | self.offset_conv = None
207 | self.offset_bias = None
208 |
209 | # Reset parameters
210 | self.reset_parameters()
211 |
212 | # Initialize kernel points
213 | self.kernel_points = self.init_KP()
214 |
215 | return
216 |
217 | def reset_parameters(self):
218 | kaiming_uniform_(self.weights, a=math.sqrt(5))
219 | if self.deformable:
220 | nn.init.zeros_(self.offset_bias)
221 | return
222 |
223 | def init_KP(self):
224 | """
225 | Initialize the kernel point positions in a sphere
226 | :return: the tensor of kernel points
227 | """
228 |
229 | # Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
230 | # K_points_numpy = load_kernels(self.radius,
231 | # self.K,
232 | # dimension=self.p_dim,
233 | # fixed=self.fixed_kernel_points)
234 |
235 | # return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
236 | # requires_grad=False)
237 |
238 | K_points_numpy = getKernelPoints(self.radius,
239 | self.K)
240 |
241 | return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
242 | requires_grad=False)
243 |
244 |
245 |
246 | def forward(self, q_pts, s_pts, neighb_inds, x):
247 |
248 | ###################
249 | # Offset generation
250 | ###################
251 |
252 | if self.deformable:
253 |
254 | # Get offsets with a KPConv that only takes part of the features
255 | self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
256 |
257 | if self.modulated:
258 |
259 | # Get offset (in normalized scale) from features
260 | unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
261 | unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
262 |
263 | # Get modulations
264 | modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
265 |
266 | else:
267 |
268 | # Get offset (in normalized scale) from features
269 | unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
270 |
271 | # No modulations
272 | modulations = None
273 |
274 | # Rescale offset for this layer
275 | offsets = unscaled_offsets * self.KP_extent
276 |
277 | else:
278 | offsets = None
279 | modulations = None
280 |
281 | ######################
282 | # Deformed convolution
283 | ######################
284 |
285 | # Add a fake point in the last row for shadow neighbors
286 | s_pts = torch.cat((s_pts, torch.zeros_like(s_pts[:1, :]) + 1e6), 0)
287 |
288 | # Get neighbor points [n_points, n_neighbors, dim]
289 | neighbors = s_pts[neighb_inds, :]
290 |
291 | # Center every neighborhood
292 | neighbors = neighbors - q_pts.unsqueeze(1)
293 |
294 | # Apply offsets to kernel points [n_points, n_kpoints, dim]
295 | if self.deformable:
296 | self.deformed_KP = offsets + self.kernel_points
297 | deformed_K_points = self.deformed_KP.unsqueeze(1)
298 | else:
299 | deformed_K_points = self.kernel_points
300 |
301 | # Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
302 | neighbors.unsqueeze_(2)
303 | differences = neighbors - deformed_K_points
304 |
305 | # Get the square distances [n_points, n_neighbors, n_kpoints]
306 | sq_distances = torch.sum(differences ** 2, dim=3)
307 | # if sq_distances.grad is not None:
308 | # print(sq_distances.grad.max())
309 | # print(sq_distances.requires_grad)
310 | # print(sq_distances.dtype)
311 | # Optimization by ignoring points outside a deformed KP range
312 | if self.deformable:
313 |
314 | # Save distances for loss
315 | self.min_d2, _ = torch.min(sq_distances, dim=1)
316 |
317 | # Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
318 | in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
319 |
320 | # New value of max neighbors
321 | new_max_neighb = torch.max(torch.sum(in_range, dim=1))
322 |
323 | # For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
324 | neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
325 |
326 | # Gather new neighbor indices [n_points, new_max_neighb]
327 | new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
328 |
329 | # Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
330 | neighb_row_inds.unsqueeze_(2)
331 | neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
332 | sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
333 |
334 | # New shadow neighbors have to point to the last shadow point
335 | new_neighb_inds *= neighb_row_bool
336 | new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
337 | else:
338 | new_neighb_inds = neighb_inds
339 |
340 | # Get Kernel point influences [n_points, n_kpoints, n_neighbors]
341 | if self.KP_influence == 'constant':
342 | # Every point get an influence of 1.
343 | all_weights = torch.ones_like(sq_distances)
344 | all_weights = torch.transpose(all_weights, 1, 2)
345 |
346 | elif self.KP_influence == 'linear':
347 | # Influence decrease linearly with the distance, and get to zero when d = KP_extent.
348 | all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
349 | # all_weights[all_weights==0]=0
350 | all_weights = torch.transpose(all_weights, 1, 2)
351 | elif self.KP_influence == 'gaussian':
352 | # Influence in gaussian of the distance.
353 | sigma = self.KP_extent * 0.3
354 | all_weights = radius_gaussian(sq_distances, sigma)
355 | all_weights = torch.transpose(all_weights, 1, 2)
356 | else:
357 | raise ValueError('Unknown influence function type (config.KP_influence)')
358 | # print('KP extend',self.KP_extent,'KP radius',self.radius)
359 |
360 | # In case of closest mode, only the closest KP can influence each point
361 | if self.aggregation_mode == 'closest':
362 | neighbors_1nn = torch.argmin(sq_distances, dim=2)
363 | all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
364 |
365 | elif self.aggregation_mode != 'sum':
366 | raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
367 |
368 | # Add a zero feature for shadow neighbors
369 | x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
370 |
371 | # Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
372 | neighb_x = gather(x, new_neighb_inds,method=0)
373 |
374 | # Apply distance weights [n_points, n_kpoints, in_fdim]
375 | weighted_features = torch.matmul(all_weights, neighb_x)
376 |
377 | # Apply modulations
378 | if self.deformable and self.modulated:
379 | weighted_features *= modulations.unsqueeze(2)
380 |
381 | # Apply network weights [n_kpoints, n_points, out_fdim]
382 | weighted_features = weighted_features.permute((1, 0, 2))
383 | kernel_outputs = torch.matmul(weighted_features, self.weights)
384 |
385 | # Convolution sum [n_points, out_fdim]
386 | return torch.sum(kernel_outputs, dim=0)
387 |
388 | def __repr__(self):
389 | return 'KPConv(radius: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius,
390 | self.in_channels,
391 | self.out_channels)
392 |
393 | # ----------------------------------------------------------------------------------------------------------------------
394 | #
395 | # Complex blocks
396 | # \********************/
397 | #
398 |
399 | def block_decider(block_name,
400 | radius,
401 | in_dim,
402 | out_dim,
403 | layer_ind,
404 | config):
405 |
406 | if block_name == 'unary':
407 | return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
408 |
409 | elif block_name in ['simple',
410 | 'simple_deformable',
411 | 'simple_invariant',
412 | 'simple_equivariant',
413 | 'simple_strided',
414 | 'simple_deformable_strided',
415 | 'simple_invariant_strided',
416 | 'simple_equivariant_strided']:
417 | return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
418 |
419 | elif block_name in ['resnetb',
420 | 'resnetb_invariant',
421 | 'resnetb_equivariant',
422 | 'resnetb_deformable',
423 | 'resnetb_strided',
424 | 'resnetb_deformable_strided',
425 | 'resnetb_equivariant_strided',
426 | 'resnetb_invariant_strided']:
427 | return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
428 |
429 | elif block_name == 'max_pool' or block_name == 'max_pool_wide':
430 | return MaxPoolBlock(layer_ind)
431 |
432 | elif block_name == 'global_average':
433 | return GlobalAverageBlock()
434 |
435 | elif block_name == 'nearest_upsample':
436 | return NearestUpsampleBlock(layer_ind)
437 |
438 | else:
439 | raise ValueError('Unknown block name in the architecture definition : ' + block_name)
440 |
441 |
442 | class BatchNormBlock(nn.Module):
443 |
444 | def __init__(self, in_dim, use_bn, bn_momentum):
445 | """
446 | Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
447 | :param in_dim: dimension input features
448 | :param use_bn: boolean indicating if we use Batch Norm
449 | :param bn_momentum: Batch norm momentum
450 | """
451 | super(BatchNormBlock, self).__init__()
452 | self.bn_momentum = bn_momentum
453 | self.use_bn = use_bn
454 | self.in_dim = in_dim
455 | if self.use_bn:
456 | self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
457 | #self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
458 | else:
459 | self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
460 | return
461 |
462 | def reset_parameters(self):
463 | nn.init.zeros_(self.bias)
464 |
465 | def forward(self, x):
466 | if self.use_bn:
467 |
468 | x = x.unsqueeze(2)
469 | x = x.transpose(0, 2)
470 | x = self.batch_norm(x)
471 | x = x.transpose(0, 2)
472 | return x.squeeze()
473 | else:
474 | return x + self.bias
475 |
476 | def __repr__(self):
477 | return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
478 | self.bn_momentum,
479 | str(not self.use_bn))
480 |
481 |
482 | class UnaryBlock(nn.Module):
483 |
484 | def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
485 | """
486 | Initialize a standard unary block with its ReLU and BatchNorm.
487 | :param in_dim: dimension input features
488 | :param out_dim: dimension input features
489 | :param use_bn: boolean indicating if we use Batch Norm
490 | :param bn_momentum: Batch norm momentum
491 | """
492 |
493 | super(UnaryBlock, self).__init__()
494 | self.bn_momentum = bn_momentum
495 | self.use_bn = use_bn
496 | self.no_relu = no_relu
497 | self.in_dim = in_dim
498 | self.out_dim = out_dim
499 | self.mlp = nn.Linear(in_dim, out_dim, bias=False)
500 | self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum)
501 | if not no_relu:
502 | self.leaky_relu = nn.LeakyReLU(0.1)
503 | return
504 |
505 | def forward(self, x, batch=None):
506 | x = self.mlp(x)
507 | x = self.batch_norm(x)
508 | if not self.no_relu:
509 | x = self.leaky_relu(x)
510 | return x
511 |
512 | def __repr__(self):
513 | return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
514 | self.out_dim,
515 | str(self.use_bn),
516 | str(not self.no_relu))
517 |
518 |
519 | class SimpleBlock(nn.Module):
520 |
521 | def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
522 | """
523 | Initialize a simple convolution block with its ReLU and BatchNorm.
524 | :param in_dim: dimension input features
525 | :param out_dim: dimension input features
526 | :param radius: current radius of convolution
527 | :param config: parameters
528 | """
529 | super(SimpleBlock, self).__init__()
530 |
531 | # get KP_extent from current radius
532 | current_extent = radius * config.KP_extent / config.conv_radius
533 |
534 | # Get other parameters
535 | self.bn_momentum = config.batch_norm_momentum
536 | self.use_bn = config.use_batch_norm
537 | self.layer_ind = layer_ind
538 | self.block_name = block_name
539 | self.in_dim = in_dim
540 | self.out_dim = out_dim
541 |
542 | # Define the KPConv class
543 | self.KPConv = KPConv(config.num_kernel_points,
544 | config.in_points_dim,
545 | in_dim,
546 | out_dim // 2,
547 | current_extent,
548 | radius,
549 | fixed_kernel_points=config.fixed_kernel_points,
550 | KP_influence=config.KP_influence,
551 | aggregation_mode=config.aggregation_mode,
552 | deformable='deform' in block_name,
553 | modulated=config.modulated)
554 |
555 | # Other opperations
556 | self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
557 | self.leaky_relu = nn.LeakyReLU(0.1)
558 |
559 | return
560 |
561 | def forward(self, x, batch):
562 |
563 | if 'strided' in self.block_name:
564 | q_pts = batch.points[self.layer_ind + 1]
565 | s_pts = batch.points[self.layer_ind]
566 | neighb_inds = batch.pools[self.layer_ind]
567 | else:
568 | q_pts = batch.points[self.layer_ind]
569 | s_pts = batch.points[self.layer_ind]
570 | neighb_inds = batch.neighbors[self.layer_ind]
571 |
572 | x = self.KPConv(q_pts, s_pts, neighb_inds, x)
573 | return self.leaky_relu(self.batch_norm(x))
574 |
575 |
576 | class ResnetBottleneckBlock(nn.Module):
577 |
578 | def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
579 | """
580 | Initialize a resnet bottleneck block.
581 | :param in_dim: dimension input features
582 | :param out_dim: dimension input features
583 | :param radius: current radius of convolution
584 | :param config: parameters
585 | """
586 | super(ResnetBottleneckBlock, self).__init__()
587 |
588 | # get KP_extent from current radius
589 | current_extent = radius * config.KP_extent / config.conv_radius
590 |
591 | # Get other parameters
592 | self.bn_momentum = config.batch_norm_momentum
593 | self.use_bn = config.use_batch_norm
594 | self.block_name = block_name
595 | self.layer_ind = layer_ind
596 | self.in_dim = in_dim
597 | self.out_dim = out_dim
598 |
599 | # First downscaling mlp
600 | if in_dim != out_dim // 4:
601 | self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
602 | else:
603 | self.unary1 = nn.Identity()
604 |
605 | # KPConv block
606 | self.KPConv = KPConv(config.num_kernel_points,
607 | config.in_points_dim,
608 | out_dim // 4,
609 | out_dim // 4,
610 | current_extent,
611 | radius,
612 | fixed_kernel_points=config.fixed_kernel_points,
613 | KP_influence=config.KP_influence,
614 | aggregation_mode=config.aggregation_mode,
615 | deformable='deform' in block_name,
616 | modulated=config.modulated)
617 | self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
618 |
619 | # Second upscaling mlp
620 | self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
621 |
622 | # Shortcut optional mpl
623 | if in_dim != out_dim:
624 | self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
625 | else:
626 | self.unary_shortcut = nn.Identity()
627 |
628 | # Other operations
629 | self.leaky_relu = nn.LeakyReLU(0.1)
630 |
631 | return
632 |
633 | def forward(self, features, batch):
634 |
635 | if 'strided' in self.block_name:
636 | q_pts = batch.points[self.layer_ind + 1]
637 | s_pts = batch.points[self.layer_ind]
638 | neighb_inds = batch.pools[self.layer_ind]
639 | else:
640 | q_pts = batch.points[self.layer_ind]
641 | s_pts = batch.points[self.layer_ind]
642 | neighb_inds = batch.neighbors[self.layer_ind]
643 |
644 | # First downscaling mlp
645 | x = self.unary1(features)
646 |
647 | # Convolution
648 | x = self.KPConv(q_pts, s_pts, neighb_inds, x)
649 | x = self.leaky_relu(self.batch_norm_conv(x))
650 |
651 | # Second upscaling mlp
652 | x = self.unary2(x)
653 |
654 | # Shortcut
655 | if 'strided' in self.block_name:
656 | shortcut = max_pool(features, neighb_inds)
657 | else:
658 | shortcut = features
659 | shortcut = self.unary_shortcut(shortcut)
660 |
661 | return self.leaky_relu(x + shortcut)
662 |
663 | #################### SampleNet ####################################################
664 | # class SampleResnetBottleneckBlock(nn.Module):
665 |
666 | # def __init__(self, config):
667 | # """
668 | # Initialize a resnet bottleneck block.
669 | # :param in_fdim: dimension input features
670 | # :param out_fdim: dimension input features
671 | # :param radius: current radius of convolution
672 | # :param config: parameters
673 | # """
674 | # super(SampleResnetBottleneckBlock, self).__init__()
675 | # radius = config['kernel_radius']
676 | # # get KP_extent from current radius
677 | # current_extent = radius * config.KP_extent / config.conv_radius
678 |
679 | # # Get other parameters
680 | # self.bn_momentum = config.batch_norm_momentum
681 | # self.use_bn = config.use_batch_norm
682 | # self.block_name = block_name
683 | # self.layer_ind = layer_ind
684 | # self.in_dim = in_dim
685 | # self.out_dim = out_dim
686 |
687 | # # First downscaling mlp
688 | # if in_dim != out_dim // 4:
689 | # self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
690 | # else:
691 | # self.unary1 = nn.Identity()
692 |
693 | # # KPConv block
694 | # self.KPConv = KPConv(config.num_kernel_points,
695 | # config.in_points_dim,
696 | # out_dim // 4,
697 | # out_dim // 4,
698 | # current_extent,
699 | # radius,
700 | # fixed_kernel_points=config.fixed_kernel_points,
701 | # KP_influence=config.KP_influence,
702 | # aggregation_mode=config.aggregation_mode,
703 | # deformable='deform' in block_name,
704 | # modulated=config.modulated)
705 | # self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
706 |
707 | # # Second upscaling mlp
708 | # self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
709 |
710 | # # Shortcut optional mpl
711 | # if in_dim != out_dim:
712 | # self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
713 | # else:
714 | # self.unary_shortcut = nn.Identity()
715 |
716 | # # Other operations
717 | # self.leaky_relu = nn.LeakyReLU(0.1)
718 |
719 | # return
720 |
721 | # def forward(self, features, batch):
722 |
723 | # if 'strided' in self.block_name:
724 | # q_pts = batch.points[self.layer_ind + 1]
725 | # s_pts = batch.points[self.layer_ind]
726 | # neighb_inds = batch.pools[self.layer_ind]
727 | # else:
728 | # q_pts = batch.points[self.layer_ind]
729 | # s_pts = batch.points[self.layer_ind]
730 | # neighb_inds = batch.neighbors[self.layer_ind]
731 |
732 | # # First downscaling mlp
733 | # x = self.unary1(features)
734 |
735 | # # Convolution
736 | # x = self.KPConv(q_pts, s_pts, neighb_inds, x)
737 | # x = self.leaky_relu(self.batch_norm_conv(x))
738 |
739 | # # Second upscaling mlp
740 | # x = self.unary2(x)
741 |
742 | # # Shortcut
743 | # if 'strided' in self.block_name:
744 | # shortcut = max_pool(features, neighb_inds)
745 | # else:
746 | # shortcut = features
747 | # shortcut = self.unary_shortcut(shortcut)
748 |
749 | # return self.leaky_relu(x + shortcut)
750 | ##################################################################################
751 |
752 | class GlobalAverageBlock(nn.Module):
753 |
754 | def __init__(self):
755 | """
756 | Initialize a global average block with its ReLU and BatchNorm.
757 | """
758 | super(GlobalAverageBlock, self).__init__()
759 | return
760 |
761 | def forward(self, x, batch):
762 | return global_average(x, batch.lengths[-1])
763 |
764 |
765 | class NearestUpsampleBlock(nn.Module):
766 |
767 | def __init__(self, layer_ind):
768 | """
769 | Initialize a nearest upsampling block with its ReLU and BatchNorm.
770 | """
771 | super(NearestUpsampleBlock, self).__init__()
772 | self.layer_ind = layer_ind
773 | return
774 |
775 | def forward(self, x, batch):
776 | return closest_pool(x, batch.upsamples[self.layer_ind - 1])
777 |
778 | def __repr__(self):
779 | return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
780 | self.layer_ind - 1)
781 |
782 |
783 | class MaxPoolBlock(nn.Module):
784 |
785 | def __init__(self, layer_ind):
786 | """
787 | Initialize a max pooling block with its ReLU and BatchNorm.
788 | """
789 | super(MaxPoolBlock, self).__init__()
790 | self.layer_ind = layer_ind
791 | return
792 |
793 | def forward(self, x, batch):
794 | return max_pool(x, batch.pools[self.layer_ind + 1])
795 |
796 | ########################################################################
797 | # Grid Based Kernel Point initialization
798 | import numpy as np
799 | def getKernelPoints(radius, num_kernel_points):
800 | if round(num_kernel_points**(1/3)) % 1 == 0:
801 | npoints = round(num_kernel_points**(1/3))
802 | xyz = np.linspace(-1, 1, npoints)
803 | points = np.meshgrid(xyz, xyz, xyz)
804 | points = [p.flatten() for p in points]
805 | points = np.vstack(points).T
806 | points /= 3**(0.5)
807 | return points*radius
808 | else:
809 | assert(False)
--------------------------------------------------------------------------------
/model/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.nn.functional import pdist
3 | import numpy as np
4 |
5 |
6 | # Efficient Voxel Based Sampling from DEPOCO
7 | # https://github.com/PRBonn/deep-point-map-compression/blob/b2e35bb05e70ae28b159c2c602bc187414173c06/depoco/architectures/network_blocks.py#L74
8 | def gridSampling(pcd: torch.tensor, resolution=0.02):
9 |
10 | grid = torch.floor(pcd/resolution)
11 | center = (grid+0.5)*resolution
12 | dist = ((pcd-center)**2).sum(dim=1)
13 | dist = dist/dist.max()*0.7
14 |
15 | max_v_count = max([
16 | torch.max(pcd[:,0]) - torch.min(pcd[:,0]),
17 | torch.max(pcd[:,1]) - torch.min(pcd[:,1]),
18 | torch.max(pcd[:,2]) - torch.min(pcd[:,2])
19 | ])
20 | v_size = torch.ceil(max_v_count/resolution)
21 | grid_idx = grid[:, 0] + grid[:, 1] * \
22 | v_size + grid[:, 2] * v_size * v_size
23 | grid_d = grid_idx+dist
24 | idx_orig = torch.argsort(grid_d)
25 |
26 | # trick from https://github.com/rusty1s/pytorch_unique
27 | unique, inverse, counts = torch.unique_consecutive(
28 | grid_idx[idx_orig], return_inverse=True, return_counts=True)
29 | perm = torch.arange(inverse.size(
30 | 0), dtype=inverse.dtype, device=inverse.device)
31 | inverse, perm = inverse.flip([0]), perm.flip([0])
32 |
33 | """
34 | HACK: workaround to get the first item. scatter overwrites indices on gpu not sequentially
35 | -> you get random points in the voxel not the first one
36 | """
37 | p= perm.cpu()
38 | i=inverse.cpu()
39 | idx = torch.empty(unique.shape,dtype=p.dtype).scatter_(0, i, p)
40 | return idx_orig[idx].tolist()
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | FeatureLIOM
4 | 0.0.1
5 | Dense Learned Compression for SLAM
6 | Zihao Dong
7 | MIT
8 |
9 |
10 | ament_cmake
11 | rclpy
12 | std_msgs
13 | sensor_msgs
14 | sensor_msgs_py
15 |
16 |
17 |
18 |
19 | ament_python
20 |
21 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.24.3
2 | open3d==0.18.0
3 | rcl-interfaces==1.2.1
4 | rclpy==3.3.12
5 | ruamel-yaml-conda @ file:///croot/ruamel_yaml_1667489728852/work
6 | ruamel.yaml==0.18.5
7 | ruamel.yaml.clib==0.2.8
8 | scikit-learn==1.4.0
9 | scipy==1.11.4
10 | sensor-msgs==4.2.3
11 | sensor-msgs-py==4.2.3
12 | std-msgs==4.2.3
13 | torch==2.1.0+cu118
14 | torchaudio==2.1.0+cu118
15 | torcheval==0.0.7
16 | torchvision==0.16.0+cu118
17 | tornado==6.4
18 | tqdm==4.66.1
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [develop]
2 | script_dir=$base/lib/FeatureLIOM
3 | [install]
4 | install_scripts=$base/lib/FeatureLIOM
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name = "FeatureLIOM",
5 | version = "0.0.1",
6 | packages = find_packages(),
7 | package_data = {
8 | 'FeatureLIOM': ['config/*.yaml', 'checkpoints/*']
9 | },
10 | install_requires = ['setuptools'],
11 | zip_safe = True,
12 | maintainer = 'Zihao Dong',
13 | maintainer_email = 'dong.zih@northeastern.edu',
14 | description = "Dense Learned Compression for SLAM",
15 | license = "MIT",
16 | entry_points = {
17 | 'console_scripts': [
18 | 'extract = keypoint_node.keypoint_node:main',
19 | ],
20 | }
21 | )
--------------------------------------------------------------------------------
/submodules/octree_handler/.clang-format:
--------------------------------------------------------------------------------
1 | ---
2 | Language: Cpp
3 | # BasedOnStyle: Google
4 | AccessModifierOffset: -1
5 | AlignAfterOpenBracket: Align
6 | AlignConsecutiveMacros: false
7 | AlignConsecutiveAssignments: false
8 | AlignConsecutiveDeclarations: false
9 | AlignEscapedNewlines: Left
10 | AlignOperands: true
11 | AlignTrailingComments: true
12 | AllowAllArgumentsOnNextLine: true
13 | AllowAllConstructorInitializersOnNextLine: true
14 | AllowAllParametersOfDeclarationOnNextLine: true
15 | AllowShortBlocksOnASingleLine: Never
16 | AllowShortCaseLabelsOnASingleLine: false
17 | AllowShortFunctionsOnASingleLine: All
18 | AllowShortLambdasOnASingleLine: All
19 | AllowShortIfStatementsOnASingleLine: WithoutElse
20 | AllowShortLoopsOnASingleLine: true
21 | AlwaysBreakAfterDefinitionReturnType: None
22 | AlwaysBreakAfterReturnType: None
23 | AlwaysBreakBeforeMultilineStrings: true
24 | AlwaysBreakTemplateDeclarations: Yes
25 | BinPackArguments: false
26 | BinPackParameters: false
27 | BraceWrapping:
28 | AfterCaseLabel: false
29 | AfterClass: false
30 | AfterControlStatement: false
31 | AfterEnum: false
32 | AfterFunction: false
33 | AfterNamespace: false
34 | AfterObjCDeclaration: false
35 | AfterStruct: false
36 | AfterUnion: false
37 | AfterExternBlock: false
38 | BeforeCatch: false
39 | BeforeElse: false
40 | IndentBraces: false
41 | SplitEmptyFunction: true
42 | SplitEmptyRecord: true
43 | SplitEmptyNamespace: true
44 | BreakBeforeBinaryOperators: None
45 | BreakBeforeBraces: Attach
46 | BreakBeforeInheritanceComma: false
47 | BreakInheritanceList: BeforeColon
48 | BreakBeforeTernaryOperators: true
49 | BreakConstructorInitializersBeforeComma: false
50 | BreakConstructorInitializers: BeforeColon
51 | BreakAfterJavaFieldAnnotations: false
52 | BreakStringLiterals: true
53 | ColumnLimit: 80
54 | CommentPragmas: '^ IWYU pragma:'
55 | CompactNamespaces: false
56 | ConstructorInitializerAllOnOneLineOrOnePerLine: true
57 | ConstructorInitializerIndentWidth: 4
58 | ContinuationIndentWidth: 4
59 | Cpp11BracedListStyle: true
60 | DeriveLineEnding: true
61 | DerivePointerAlignment: true
62 | DisableFormat: false
63 | ExperimentalAutoDetectBinPacking: false
64 | FixNamespaceComments: true
65 | ForEachMacros:
66 | - foreach
67 | - Q_FOREACH
68 | - BOOST_FOREACH
69 | IncludeBlocks: Regroup
70 | IncludeCategories:
71 | - Regex: '^<.*\.h>'
72 | Priority: 1
73 | - Regex: '^<.*\.hpp>'
74 | Priority: 3
75 | - Regex: '^<.*'
76 | Priority: 2
77 | - Regex: '.*'
78 | Priority: 4
79 | IncludeIsMainRegex: '([-_](test|unittest))?$'
80 | IncludeIsMainSourceRegex: ''
81 | IndentCaseLabels: true
82 | IndentGotoLabels: true
83 | IndentPPDirectives: None
84 | IndentWidth: 2
85 | IndentWrappedFunctionNames: false
86 | JavaScriptQuotes: Leave
87 | JavaScriptWrapImports: true
88 | KeepEmptyLinesAtTheStartOfBlocks: false
89 | MacroBlockBegin: ''
90 | MacroBlockEnd: ''
91 | MaxEmptyLinesToKeep: 1
92 | NamespaceIndentation: None
93 | ObjCBinPackProtocolList: Never
94 | ObjCBlockIndentWidth: 2
95 | ObjCSpaceAfterProperty: false
96 | ObjCSpaceBeforeProtocolList: true
97 | PenaltyBreakAssignment: 2
98 | PenaltyBreakBeforeFirstCallParameter: 1
99 | PenaltyBreakComment: 300
100 | PenaltyBreakFirstLessLess: 120
101 | PenaltyBreakString: 1000
102 | PenaltyBreakTemplateDeclaration: 10
103 | PenaltyExcessCharacter: 1000000
104 | PenaltyReturnTypeOnItsOwnLine: 200
105 | PointerAlignment: Left
106 | RawStringFormats:
107 | - Language: Cpp
108 | Delimiters:
109 | - cc
110 | - CC
111 | - cpp
112 | - Cpp
113 | - CPP
114 | - 'c++'
115 | - 'C++'
116 | CanonicalDelimiter: ''
117 | BasedOnStyle: google
118 | - Language: TextProto
119 | Delimiters:
120 | - pb
121 | - PB
122 | - proto
123 | - PROTO
124 | EnclosingFunctions:
125 | - EqualsProto
126 | - EquivToProto
127 | - PARSE_PARTIAL_TEXT_PROTO
128 | - PARSE_TEST_PROTO
129 | - PARSE_TEXT_PROTO
130 | - ParseTextOrDie
131 | - ParseTextProtoOrDie
132 | CanonicalDelimiter: ''
133 | BasedOnStyle: google
134 | ReflowComments: true
135 | SortIncludes: true
136 | SortUsingDeclarations: true
137 | SpaceAfterCStyleCast: false
138 | SpaceAfterLogicalNot: false
139 | SpaceAfterTemplateKeyword: true
140 | SpaceBeforeAssignmentOperators: true
141 | SpaceBeforeCpp11BracedList: false
142 | SpaceBeforeCtorInitializerColon: true
143 | SpaceBeforeInheritanceColon: true
144 | SpaceBeforeParens: ControlStatements
145 | SpaceBeforeRangeBasedForLoopColon: true
146 | SpaceInEmptyBlock: false
147 | SpaceInEmptyParentheses: false
148 | SpacesBeforeTrailingComments: 2
149 | SpacesInAngles: false
150 | SpacesInConditionalStatement: false
151 | SpacesInContainerLiterals: true
152 | SpacesInCStyleCastParentheses: false
153 | SpacesInParentheses: false
154 | SpacesInSquareBrackets: false
155 | SpaceBeforeSquareBrackets: false
156 | Standard: Auto
157 | StatementMacros:
158 | - Q_UNUSED
159 | - QT_REQUIRE_VERSION
160 | TabWidth: 8
161 | UseCRLF: false
162 | UseTab: Never
163 | ...
164 |
165 |
--------------------------------------------------------------------------------
/submodules/octree_handler/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # Created by https://www.toptal.com/developers/gitignore/api/python,cmake,c++
3 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,cmake,c++
4 |
5 | ### C++ ###
6 | # Prerequisites
7 | *.d
8 |
9 | # Compiled Object files
10 | *.slo
11 | *.lo
12 | *.o
13 | *.obj
14 |
15 | # Precompiled Headers
16 | *.gch
17 | *.pch
18 |
19 | # Compiled Dynamic libraries
20 | *.so
21 | *.dylib
22 | *.dll
23 |
24 | # Fortran module files
25 | *.mod
26 | *.smod
27 |
28 | # Compiled Static libraries
29 | *.lai
30 | *.la
31 | *.a
32 | *.lib
33 |
34 | # Executables
35 | *.exe
36 | *.out
37 | *.app
38 |
39 | ### CMake ###
40 | CMakeLists.txt.user
41 | CMakeCache.txt
42 | CMakeFiles
43 | CMakeScripts
44 | Testing
45 | Makefile
46 | cmake_install.cmake
47 | install_manifest.txt
48 | compile_commands.json
49 | CTestTestfile.cmake
50 | _deps
51 |
52 | ### CMake Patch ###
53 | # External projects
54 | *-prefix/
55 |
56 | ### Python ###
57 | # Byte-compiled / optimized / DLL files
58 | __pycache__/
59 | *.py[cod]
60 | *$py.class
61 |
62 | # C extensions
63 |
64 | # Distribution / packaging
65 | .Python
66 | build/
67 | develop-eggs/
68 | dist/
69 | downloads/
70 | eggs/
71 | .eggs/
72 | lib/
73 | lib64/
74 | parts/
75 | sdist/
76 | var/
77 | wheels/
78 | pip-wheel-metadata/
79 | share/python-wheels/
80 | *.egg-info/
81 | .installed.cfg
82 | *.egg
83 | MANIFEST
84 |
85 | # PyInstaller
86 | # Usually these files are written by a python script from a template
87 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
88 | *.manifest
89 | *.spec
90 |
91 | # Installer logs
92 | pip-log.txt
93 | pip-delete-this-directory.txt
94 |
95 | # Unit test / coverage reports
96 | htmlcov/
97 | .tox/
98 | .nox/
99 | .coverage
100 | .coverage.*
101 | .cache
102 | nosetests.xml
103 | coverage.xml
104 | *.cover
105 | *.py,cover
106 | .hypothesis/
107 | .pytest_cache/
108 | pytestdebug.log
109 |
110 | # Translations
111 | *.mo
112 | *.pot
113 |
114 | # Django stuff:
115 | *.log
116 | local_settings.py
117 | db.sqlite3
118 | db.sqlite3-journal
119 |
120 | # Flask stuff:
121 | instance/
122 | .webassets-cache
123 |
124 | # Scrapy stuff:
125 | .scrapy
126 |
127 | # Sphinx documentation
128 | docs/_build/
129 | doc/_build/
130 |
131 | # PyBuilder
132 | target/
133 |
134 | # Jupyter Notebook
135 | .ipynb_checkpoints
136 |
137 | # IPython
138 | profile_default/
139 | ipython_config.py
140 |
141 | # pyenv
142 | .python-version
143 |
144 | # pipenv
145 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
146 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
147 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
148 | # install all needed dependencies.
149 | #Pipfile.lock
150 |
151 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
152 | __pypackages__/
153 |
154 | # Celery stuff
155 | celerybeat-schedule
156 | celerybeat.pid
157 |
158 | # SageMath parsed files
159 | *.sage.py
160 |
161 | # Environments
162 | .env
163 | .venv
164 | env/
165 | venv/
166 | ENV/
167 | env.bak/
168 | venv.bak/
169 | pythonenv*
170 |
171 | # Spyder project settings
172 | .spyderproject
173 | .spyproject
174 |
175 | # Rope project settings
176 | .ropeproject
177 |
178 | # mkdocs documentation
179 | /site
180 |
181 | # mypy
182 | .mypy_cache/
183 | .dmypy.json
184 | dmypy.json
185 |
186 | # Pyre type checker
187 | .pyre/
188 |
189 | # pytype static type analyzer
190 | .pytype/
191 |
192 | # profiling data
193 | .prof
194 |
195 | # End of https://www.toptal.com/developers/gitignore/api/python,cmake,c++
196 |
--------------------------------------------------------------------------------
/submodules/octree_handler/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | image: gitlab.ipb.uni-bonn.de:4567/ipb-team/global/docker-images/ipb_default:latest
2 |
3 | run:
4 | script:
5 | - apt update && apt install -yqq pybind11-dev
6 | - pip3 install twine
7 | - python3 setup.py sdist bdist_wheel
8 | - TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python3 -m twine upload --repository-url https://gitlab.ipb.uni-bonn.de/api/v4/projects/${CI_PROJECT_ID}/packages/pypi dist/*
9 |
--------------------------------------------------------------------------------
/submodules/octree_handler/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.2.0)
2 | project(octree_handler)
3 |
4 | find_package(pybind11 REQUIRED)
5 | find_package(Eigen3 REQUIRED)
6 |
7 | # Default to release if not specified
8 | if(NOT CMAKE_BUILD_TYPE)
9 | set(CMAKE_BUILD_TYPE Release)
10 | endif()
11 |
12 | # Set aditional flags
13 | set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
14 | set(CMAKE_CXX_STANDARD 17)
15 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
16 | set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g3 -O0")
17 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -g0 -O3")
18 |
19 | # OpenMP flags
20 | include(FindOpenMP)
21 | if(OPENMP_FOUND)
22 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
23 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
24 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
25 | else(OPENMP_FOUND)
26 | message("ERROR: OpenMP could not be found.")
27 | endif(OPENMP_FOUND)
28 |
29 | include_directories(${EIGEN3_INCLUDE_DIR})
30 | add_subdirectory(src)
31 |
32 |
--------------------------------------------------------------------------------
/submodules/octree_handler/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import sys
4 | import platform
5 | import subprocess
6 |
7 | from setuptools import setup, Extension
8 | from setuptools.command.build_ext import build_ext
9 | from distutils.version import LooseVersion
10 |
11 |
12 | class CMakeExtension(Extension):
13 | def __init__(self, name, sourcedir=''):
14 | Extension.__init__(self, name, sources=[])
15 | self.sourcedir = os.path.abspath(sourcedir)
16 |
17 |
18 | class CMakeBuild(build_ext):
19 | def run(self):
20 | try:
21 | out = subprocess.check_output(['cmake', '--version'])
22 | except OSError:
23 | raise RuntimeError("CMake must be installed to build the following extensions: " +
24 | ", ".join(e.name for e in self.extensions))
25 |
26 | if platform.system() == "Windows":
27 | cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
28 | if cmake_version < '3.1.0':
29 | raise RuntimeError("CMake >= 3.1.0 is required on Windows")
30 |
31 | for ext in self.extensions:
32 | self.build_extension(ext)
33 |
34 | def build_extension(self, ext):
35 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
36 | # required for auto-detection of auxiliary "native" libs
37 | if not extdir.endswith(os.path.sep):
38 | extdir += os.path.sep
39 |
40 | cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
41 | '-DPYTHON_EXECUTABLE=' + sys.executable]
42 |
43 | cfg = 'Debug' if self.debug else 'Release'
44 | build_args = ['--config', cfg]
45 |
46 | if platform.system() == "Windows":
47 | cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
48 | if sys.maxsize > 2**32:
49 | cmake_args += ['-A', 'x64']
50 | build_args += ['--', '/m']
51 | else:
52 | cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
53 | build_args += ['--', '-j2']
54 |
55 | env = os.environ.copy()
56 | env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
57 | self.distribution.get_version())
58 | if not os.path.exists(self.build_temp):
59 | os.makedirs(self.build_temp)
60 | subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
61 | subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
62 |
63 | setup(
64 | name='octree_handler',
65 | version='0.0.2',
66 | author='Louis Wiesmann',
67 | author_email='lwiesmann@uni-bonn.com',
68 | description='A python wrapper for an Octree (todo)',
69 | long_description='',
70 | ext_modules=[CMakeExtension('octree_handler')],
71 | cmdclass=dict(build_ext=CMakeBuild),
72 | zip_safe=False,
73 | )
74 |
--------------------------------------------------------------------------------
/submodules/octree_handler/src/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | pybind11_add_module(octree_handler OctreeHandler.cpp)
2 |
--------------------------------------------------------------------------------
/submodules/octree_handler/src/Octree.hpp:
--------------------------------------------------------------------------------
1 | #ifndef UNIBN_OCTREE_H_
2 | #define UNIBN_OCTREE_H_
3 |
4 | // Copyright (c) 2015 Jens Behley, University of Bonn
5 | //
6 | // Permission is hereby granted, free of charge, to any person obtaining a copy
7 | // of this software and associated documentation files (the "Software"), to
8 | // deal in the Software without restriction, including without limitation the
9 | // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 | // sell copies of the Software, and to permit persons to whom the Software is
11 | // furnished to do so, subject to the following conditions:
12 | //
13 | // The above copyright notice and this permission notice shall be included in
14 | // all copies or substantial portions of the Software.
15 | //
16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 | // IN THE SOFTWARE.
23 |
24 | #include
25 |
26 | #include
27 | #include
28 | #include // memset.
29 | #include
30 | #include
31 |
32 | // needed for gtest access to protected/private members ...
33 | namespace {
34 | class OctreeTest;
35 | }
36 |
37 | namespace unibn {
38 |
39 | /**
40 | * Some traits to access coordinates regardless of the specific implementation
41 | * of point inspired by boost.geometry, which needs to be implemented by new
42 | * points.
43 | *
44 | */
45 | namespace traits {
46 |
47 | template
48 | struct access {};
49 |
50 | template
51 | struct access {
52 | static float get(const PointT &p) { return p.x; }
53 | };
54 |
55 | template
56 | struct access {
57 | static float get(const PointT &p) { return p.y; }
58 | };
59 |
60 | template
61 | struct access {
62 | static float get(const PointT &p) { return p.z; }
63 | };
64 | } // namespace traits
65 |
66 | /** convenience function for access of point coordinates **/
67 | template
68 | inline float get(const PointT &p) {
69 | return traits::access::get(p);
70 | }
71 |
72 | /**
73 | * Some generic distances: Manhattan, (squared) Euclidean, and Maximum distance.
74 | *
75 | * A Distance has to implement the methods
76 | * 1. compute of two points p and q to compute and return the distance between
77 | *two points, and
78 | * 2. norm of x,y,z coordinates to compute and return the norm of a point p =
79 | *(x,y,z)
80 | * 3. sqr and sqrt of value to compute the correct radius if a comparison is
81 | *performed using squared norms (see L2Distance)...
82 | */
83 | template
84 | struct L1Distance {
85 | static inline float compute(const PointT &p, const PointT &q) {
86 | float diff1 = get<0>(p) - get<0>(q);
87 | float diff2 = get<1>(p) - get<1>(q);
88 | float diff3 = get<2>(p) - get<2>(q);
89 |
90 | return std::abs(diff1) + std::abs(diff2) + std::abs(diff3);
91 | }
92 |
93 | static inline float norm(float x, float y, float z) {
94 | return std::abs(x) + std::abs(y) + std::abs(z);
95 | }
96 |
97 | static inline float sqr(float r) { return r; }
98 |
99 | static inline float sqrt(float r) { return r; }
100 | };
101 |
102 | template
103 | struct L2Distance {
104 | static inline float compute(const PointT &p, const PointT &q) {
105 | float diff1 = get<0>(p) - get<0>(q);
106 | float diff2 = get<1>(p) - get<1>(q);
107 | float diff3 = get<2>(p) - get<2>(q);
108 |
109 | return std::pow(diff1, 2) + std::pow(diff2, 2) + std::pow(diff3, 2);
110 | }
111 |
112 | static inline float norm(float x, float y, float z) {
113 | return std::pow(x, 2) + std::pow(y, 2) + std::pow(z, 2);
114 | }
115 |
116 | static inline float sqr(float r) { return r * r; }
117 |
118 | static inline float sqrt(float r) { return std::sqrt(r); }
119 | };
120 |
121 | template
122 | struct MaxDistance {
123 | static inline float compute(const PointT &p, const PointT &q) {
124 | float diff1 = std::abs(get<0>(p) - get<0>(q));
125 | float diff2 = std::abs(get<1>(p) - get<1>(q));
126 | float diff3 = std::abs(get<2>(p) - get<2>(q));
127 |
128 | float maximum = diff1;
129 | if (diff2 > maximum) maximum = diff2;
130 | if (diff3 > maximum) maximum = diff3;
131 |
132 | return maximum;
133 | }
134 |
135 | static inline float norm(float x, float y, float z) {
136 | float maximum = x;
137 | if (y > maximum) maximum = y;
138 | if (z > maximum) maximum = z;
139 | return maximum;
140 | }
141 |
142 | static inline float sqr(float r) { return r; }
143 |
144 | static inline float sqrt(float r) { return r; }
145 | };
146 |
147 | struct OctreeParams {
148 | public:
149 | OctreeParams(uint32_t bucketSize = 32,
150 | bool copyPoints = false,
151 | float minExtent = 0.0f)
152 | : bucketSize(bucketSize), copyPoints(copyPoints), minExtent(minExtent) {}
153 | uint32_t bucketSize;
154 | bool copyPoints;
155 | float minExtent;
156 | };
157 |
158 | /** \brief Index-based Octree implementation offering different queries and
159 | * insertion/removal of points.
160 | *
161 | * The index-based Octree uses a successor relation and a startIndex in each
162 | * Octant to improve runtime performance for radius queries. The efficient
163 | * storage of the points by relinking list elements bases on the insight that
164 | * children of an Octant contain disjoint subsets of points inside the Octant
165 | * and that we can reorganize the points such that we get an continuous single
166 | * connect list that we can use to store in each octant the start of this list.
167 | *
168 | * Special about the implementation is that it allows to search for neighbors
169 | * with arbitrary p-norms, which distinguishes it from most other Octree
170 | * implementations.
171 | *
172 | * We decided to implement the Octree using a template for points and
173 | * containers. The container must have an operator[], which allows to access the
174 | * points, and a size() member function, which allows to get the size of the
175 | * container. For the points, we used an access trait to access the coordinates
176 | * inspired by boost.geometry. The implementation already provides a general
177 | * access trait, which expects to have public member variables x,y,z.
178 | *
179 | * f you use the implementation or ideas from the corresponding paper in your
180 | * academic work, it would be nice if you cite the corresponding paper:
181 | *
182 | * J. Behley, V. Steinhage, A.B. Cremers. Efficient Radius Neighbor Search in
183 | * Three-dimensional Point Clouds, Proc. of the IEEE International Conference on
184 | * Robotics and Automation (ICRA), 2015.
185 | *
186 | * In future, we might add also other neighbor queries and implement the removal
187 | * and adding of points.
188 | *
189 | * \version 0.1-icra
190 | *
191 | * \author behley
192 | */
193 |
194 | template >
195 | class Octree {
196 | public:
197 | Octree();
198 | ~Octree();
199 |
200 | /** \brief initialize octree with all points **/
201 | void initialize(const ContainerT &pts,
202 | const OctreeParams ¶ms = OctreeParams());
203 |
204 | /** \brief initialize octree only from pts that are inside indexes. **/
205 | void initialize(const ContainerT &pts,
206 | const std::vector &indexes,
207 | const OctreeParams ¶ms = OctreeParams());
208 |
209 | /** \brief remove all data inside the octree. **/
210 | void clear();
211 |
212 | /** \brief radius neighbor queries where radius determines the maximal radius
213 | * of reported indices of points in resultIndices **/
214 | template
215 | void radiusNeighbors(const PointT &query,
216 | float radius,
217 | std::vector &resultIndices) const;
218 |
219 | /** \brief radius neighbor queries with explicit (squared) distance
220 | * computation. **/
221 | template
222 | void radiusNeighbors(const PointT &query,
223 | float radius,
224 | std::vector &resultIndices,
225 | std::vector &distances) const;
226 |
227 | /** \brief nearest neighbor queries. Using minDistance >= 0, we explicitly
228 | *disallow self-matches.
229 | * @return index of nearest neighbor n with Distance::compute(query, n) >
230 | *minDistance and otherwise -1.
231 | **/
232 | template
233 | int32_t findNeighbor(const PointT &query, float minDistance = -1) const;
234 |
235 | /** \brief returns one random point index of the deepest octants where
236 | * 2*extent > max_dist holds **/
237 | void randomUniformSampling(const float &max_dist,
238 | std::vector &result_indices);
239 |
240 | protected:
241 | class Octant {
242 | public:
243 | Octant();
244 | ~Octant();
245 |
246 | bool isLeaf;
247 |
248 | // bounding box of the octant needed for overlap and contains tests...
249 | float x, y, z; // center
250 | float extent; // half of side-length
251 |
252 | uint32_t start, end; // start and end in succ_
253 | uint32_t size; // number of points
254 |
255 | Octant *child[8];
256 | };
257 |
258 | // not copyable, not assignable ...
259 | Octree(Octree &);
260 | Octree &operator=(const Octree &oct);
261 |
262 | /**
263 | * \brief creation of an octant using the elements starting at startIdx.
264 | *
265 | * The method reorders the index such that all points are correctly linked to
266 | * successors belonging to the same octant.
267 | *
268 | * \param x,y,z center coordinates of octant
269 | * \param extent extent of octant
270 | * \param startIdx first index of points inside octant
271 | * \param endIdx last index of points inside octant
272 | * \param size number of points in octant
273 | *
274 | * \return octant with children nodes.
275 | */
276 | Octant *createOctant(float x,
277 | float y,
278 | float z,
279 | float extent,
280 | uint32_t startIdx,
281 | uint32_t endIdx,
282 | uint32_t size);
283 |
284 | /** @return true, if search finished, otherwise false. **/
285 | template
286 | bool findNeighbor(const Octant *octant,
287 | const PointT &query,
288 | float minDistance,
289 | float &maxDistance,
290 | int32_t &resultIndex) const;
291 |
292 | template
293 | void radiusNeighbors(const Octant *octant,
294 | const PointT &query,
295 | float radius,
296 | float sqrRadius,
297 | std::vector &resultIndices) const;
298 |
299 | template
300 | void radiusNeighbors(const Octant *octant,
301 | const PointT &query,
302 | float radius,
303 | float sqrRadius,
304 | std::vector &resultIndices,
305 | std::vector &distances) const;
306 |
307 | /** \brief test if search ball S(q,r) overlaps with octant
308 | *
309 | * @param query query point
310 | * @param radius "squared" radius
311 | * @param o pointer to octant
312 | *
313 | * @return true, if search ball overlaps with octant, false otherwise.
314 | */
315 | template
316 | static bool overlaps(const PointT &query,
317 | float radius,
318 | float sqRadius,
319 | const Octant *o);
320 |
321 | /** \brief test if search ball S(q,r) contains octant
322 | *
323 | * @param query query point
324 | * @param sqRadius "squared" radius
325 | * @param octant pointer to octant
326 | *
327 | * @return true, if search ball overlaps with octant, false otherwise.
328 | */
329 | template
330 | static bool contains(const PointT &query,
331 | float sqRadius,
332 | const Octant *octant);
333 |
334 | /** \brief test if search ball S(q,r) is completely inside octant.
335 | *
336 | * @param query query point
337 | * @param radius radius r
338 | * @param octant point to octant.
339 | *
340 | * @return true, if search ball is completely inside the octant, false
341 | * otherwise.
342 | */
343 | template
344 | static bool inside(const PointT &query, float radius, const Octant *octant);
345 |
346 | /** \brief returns one random point index of the deepest octants where
347 | * 2*extent > max_dist holds **/
348 | void randomUniformSampling(const Octant *octant,
349 | const float &max_dist,
350 | std::vector &result_indices);
351 |
352 | OctreeParams params_;
353 | Octant *root_;
354 | const ContainerT *data_;
355 |
356 | std::vector
357 | successors_; // single connected list of next point indices...
358 |
359 | friend class ::OctreeTest;
360 | };
361 |
362 | template
363 | Octree::Octant::Octant()
364 | : isLeaf(true),
365 | x(0.0f),
366 | y(0.0f),
367 | z(0.0f),
368 | extent(0.0f),
369 | start(0),
370 | end(0),
371 | size(0) {
372 | memset(&child, 0, 8 * sizeof(Octant *));
373 | }
374 |
375 | template
376 | Octree::Octant::~Octant() {
377 | for (uint32_t i = 0; i < 8; ++i) delete child[i];
378 | }
379 |
380 | template
381 | Octree::Octree() : root_(0), data_(0) {}
382 |
383 | template
384 | Octree::~Octree() {
385 | delete root_;
386 | if (params_.copyPoints) delete data_;
387 | }
388 |
389 | template
390 | void Octree::initialize(const ContainerT &pts,
391 | const OctreeParams ¶ms) {
392 | clear();
393 | params_ = params;
394 |
395 | if (params_.copyPoints)
396 | data_ = new ContainerT(pts);
397 | else
398 | data_ = &pts;
399 |
400 | const uint32_t N = pts.size();
401 | successors_ = std::vector(N);
402 |
403 | // determine axis-aligned bounding box.
404 | float min[3], max[3];
405 | min[0] = get<0>(pts[0]);
406 | min[1] = get<1>(pts[0]);
407 | min[2] = get<2>(pts[0]);
408 | max[0] = min[0];
409 | max[1] = min[1];
410 | max[2] = min[2];
411 |
412 | for (uint32_t i = 0; i < N; ++i) {
413 | // initially each element links simply to the following element.
414 | successors_[i] = i + 1;
415 |
416 | const PointT &p = pts[i];
417 |
418 | if (get<0>(p) < min[0]) min[0] = get<0>(p);
419 | if (get<1>(p) < min[1]) min[1] = get<1>(p);
420 | if (get<2>(p) < min[2]) min[2] = get<2>(p);
421 | if (get<0>(p) > max[0]) max[0] = get<0>(p);
422 | if (get<1>(p) > max[1]) max[1] = get<1>(p);
423 | if (get<2>(p) > max[2]) max[2] = get<2>(p);
424 | }
425 |
426 | float ctr[3] = {min[0], min[1], min[2]};
427 |
428 | float maxextent = 0.5f * (max[0] - min[0]);
429 | ctr[0] += maxextent;
430 | for (uint32_t i = 1; i < 3; ++i) {
431 | float extent = 0.5f * (max[i] - min[i]);
432 | ctr[i] += extent;
433 | if (extent > maxextent) maxextent = extent;
434 | }
435 |
436 | root_ = createOctant(ctr[0], ctr[1], ctr[2], maxextent, 0, N - 1, N);
437 | }
438 |
439 | template
440 | void Octree::initialize(
441 | const ContainerT &pts,
442 | const std::vector &indexes,
443 | const OctreeParams ¶ms) {
444 | clear();
445 | params_ = params;
446 |
447 | if (params_.copyPoints)
448 | data_ = new ContainerT(pts);
449 | else
450 | data_ = &pts;
451 |
452 | const uint32_t N = pts.size();
453 | successors_ = std::vector(N);
454 |
455 | if (indexes.size() == 0) return;
456 |
457 | // determine axis-aligned bounding box.
458 | uint32_t lastIdx = indexes[0];
459 | float min[3], max[3];
460 | min[0] = get<0>(pts[lastIdx]);
461 | min[1] = get<1>(pts[lastIdx]);
462 | min[2] = get<2>(pts[lastIdx]);
463 | max[0] = min[0];
464 | max[1] = min[1];
465 | max[2] = min[2];
466 |
467 | for (uint32_t i = 1; i < indexes.size(); ++i) {
468 | uint32_t idx = indexes[i];
469 | // initially each element links simply to the following element.
470 | successors_[lastIdx] = idx;
471 |
472 | const PointT &p = pts[idx];
473 |
474 | if (get<0>(p) < min[0]) min[0] = get<0>(p);
475 | if (get<1>(p) < min[1]) min[1] = get<1>(p);
476 | if (get<2>(p) < min[2]) min[2] = get<2>(p);
477 | if (get<0>(p) > max[0]) max[0] = get<0>(p);
478 | if (get<1>(p) > max[1]) max[1] = get<1>(p);
479 | if (get<2>(p) > max[2]) max[2] = get<2>(p);
480 |
481 | lastIdx = idx;
482 | }
483 |
484 | float ctr[3] = {min[0], min[1], min[2]};
485 |
486 | float maxextent = 0.5f * (max[0] - min[0]);
487 | ctr[0] += maxextent;
488 | for (uint32_t i = 1; i < 3; ++i) {
489 | float extent = 0.5f * (max[i] - min[i]);
490 | ctr[i] += extent;
491 | if (extent > maxextent) maxextent = extent;
492 | }
493 |
494 | root_ = createOctant(
495 | ctr[0], ctr[1], ctr[2], maxextent, indexes[0], lastIdx, indexes.size());
496 | }
497 |
498 | template
499 | void Octree::clear() {
500 | delete root_;
501 | if (params_.copyPoints) delete data_;
502 | root_ = 0;
503 | data_ = 0;
504 | successors_.clear();
505 | }
506 |
507 | template
508 | typename Octree::Octant *
509 | Octree::createOctant(float x,
510 | float y,
511 | float z,
512 | float extent,
513 | uint32_t startIdx,
514 | uint32_t endIdx,
515 | uint32_t size) {
516 | // For a leaf we don't have to change anything; points are already correctly
517 | // linked or correctly reordered.
518 | Octant *octant = new Octant;
519 |
520 | octant->isLeaf = true;
521 |
522 | octant->x = x;
523 | octant->y = y;
524 | octant->z = z;
525 | octant->extent = extent;
526 |
527 | octant->start = startIdx;
528 | octant->end = endIdx;
529 | octant->size = size;
530 |
531 | static const float factor[] = {-0.5f, 0.5f};
532 |
533 | // subdivide subset of points and re-link points according to Morton codes
534 | if (size > params_.bucketSize && extent > 2 * params_.minExtent) {
535 | octant->isLeaf = false;
536 |
537 | const ContainerT &points = *data_;
538 | std::vector childStarts(8, 0);
539 | std::vector childEnds(8, 0);
540 | std::vector childSizes(8, 0);
541 |
542 | // re-link disjoint child subsets...
543 | uint32_t idx = startIdx;
544 |
545 | for (uint32_t i = 0; i < size; ++i) {
546 | const PointT &p = points[idx];
547 |
548 | // determine Morton code for each point...
549 | uint32_t mortonCode = 0;
550 | if (get<0>(p) > x) mortonCode |= 1;
551 | if (get<1>(p) > y) mortonCode |= 2;
552 | if (get<2>(p) > z) mortonCode |= 4;
553 |
554 | // set child starts and update successors...
555 | if (childSizes[mortonCode] == 0)
556 | childStarts[mortonCode] = idx;
557 | else
558 | successors_[childEnds[mortonCode]] = idx;
559 | childSizes[mortonCode] += 1;
560 |
561 | childEnds[mortonCode] = idx;
562 | idx = successors_[idx];
563 | }
564 |
565 | // now, we can create the child nodes...
566 | float childExtent = 0.5f * extent;
567 | bool firsttime = true;
568 | uint32_t lastChildIdx = 0;
569 | for (uint32_t i = 0; i < 8; ++i) {
570 | if (childSizes[i] == 0) continue;
571 |
572 | float childX = x + factor[(i & 1) > 0] * extent;
573 | float childY = y + factor[(i & 2) > 0] * extent;
574 | float childZ = z + factor[(i & 4) > 0] * extent;
575 |
576 | octant->child[i] = createOctant(childX,
577 | childY,
578 | childZ,
579 | childExtent,
580 | childStarts[i],
581 | childEnds[i],
582 | childSizes[i]);
583 |
584 | if (firsttime)
585 | octant->start = octant->child[i]->start;
586 | else
587 | successors_[octant->child[lastChildIdx]->end] =
588 | octant->child[i]->start; // we have to ensure that also the child
589 | // ends link to the next child start.
590 |
591 | lastChildIdx = i;
592 | octant->end = octant->child[i]->end;
593 | firsttime = false;
594 | }
595 | }
596 | return octant;
597 | }
598 |
599 | template
600 | template
601 | void Octree::radiusNeighbors(
602 | const Octant *octant,
603 | const PointT &query,
604 | float radius,
605 | float sqrRadius,
606 | std::vector &resultIndices) const {
607 | const ContainerT &points = *data_;
608 |
609 | // if search ball S(q,r) contains octant, simply add point indexes.
610 | if (contains(query, sqrRadius, octant)) {
611 | uint32_t idx = octant->start;
612 | for (uint32_t i = 0; i < octant->size; ++i) {
613 | resultIndices.push_back(idx);
614 | idx = successors_[idx];
615 | }
616 |
617 | return; // early pruning.
618 | }
619 |
620 | if (octant->isLeaf) {
621 | uint32_t idx = octant->start;
622 | for (uint32_t i = 0; i < octant->size; ++i) {
623 | const PointT &p = points[idx];
624 | float dist = Distance::compute(query, p);
625 | if (dist < sqrRadius) resultIndices.push_back(idx);
626 | idx = successors_[idx];
627 | }
628 |
629 | return;
630 | }
631 |
632 | // check whether child nodes are in range.
633 | for (uint32_t c = 0; c < 8; ++c) {
634 | if (octant->child[c] == 0) continue;
635 | if (!overlaps(query, radius, sqrRadius, octant->child[c]))
636 | continue;
637 | radiusNeighbors(
638 | octant->child[c], query, radius, sqrRadius, resultIndices);
639 | }
640 | }
641 |
642 | template
643 | template
644 | void Octree::radiusNeighbors(
645 | const Octant *octant,
646 | const PointT &query,
647 | float radius,
648 | float sqrRadius,
649 | std::vector &resultIndices,
650 | std::vector &distances) const {
651 | const ContainerT &points = *data_;
652 |
653 | // if search ball S(q,r) contains octant, simply add point indexes and compute
654 | // squared distances.
655 | if (contains(query, sqrRadius, octant)) {
656 | uint32_t idx = octant->start;
657 | for (uint32_t i = 0; i < octant->size; ++i) {
658 | resultIndices.push_back(idx);
659 | distances.push_back(Distance::compute(query, points[idx]));
660 | idx = successors_[idx];
661 | }
662 |
663 | return; // early pruning.
664 | }
665 |
666 | if (octant->isLeaf) {
667 | uint32_t idx = octant->start;
668 | for (uint32_t i = 0; i < octant->size; ++i) {
669 | const PointT &p = points[idx];
670 | float dist = Distance::compute(query, p);
671 | if (dist < sqrRadius) {
672 | resultIndices.push_back(idx);
673 | distances.push_back(dist);
674 | }
675 | idx = successors_[idx];
676 | }
677 |
678 | return;
679 | }
680 |
681 | // check whether child nodes are in range.
682 | for (uint32_t c = 0; c < 8; ++c) {
683 | if (octant->child[c] == 0) continue;
684 | if (!overlaps(query, radius, sqrRadius, octant->child[c]))
685 | continue;
686 | radiusNeighbors(
687 | octant->child[c], query, radius, sqrRadius, resultIndices, distances);
688 | }
689 | }
690 |
691 | template
692 | template
693 | void Octree::radiusNeighbors(
694 | const PointT &query,
695 | float radius,
696 | std::vector &resultIndices) const {
697 | resultIndices.clear();
698 | if (root_ == 0) return;
699 |
700 | float sqrRadius = Distance::sqr(radius); // "squared" radius
701 | radiusNeighbors(root_, query, radius, sqrRadius, resultIndices);
702 | }
703 |
704 | template
705 | template
706 | void Octree::radiusNeighbors(
707 | const PointT &query,
708 | float radius,
709 | std::vector &resultIndices,
710 | std::vector &distances) const {
711 | resultIndices.clear();
712 | distances.clear();
713 | if (root_ == 0) return;
714 |
715 | float sqrRadius = Distance::sqr(radius); // "squared" radius
716 | radiusNeighbors(
717 | root_, query, radius, sqrRadius, resultIndices, distances);
718 | }
719 |
720 | template
721 | template
722 | bool Octree::overlaps(const PointT &query,
723 | float radius,
724 | float sqRadius,
725 | const Octant *o) {
726 | // we exploit the symmetry to reduce the test to testing if its inside the
727 | // Minkowski sum around the positive quadrant.
728 | float x = get<0>(query) - o->x;
729 | float y = get<1>(query) - o->y;
730 | float z = get<2>(query) - o->z;
731 |
732 | x = std::abs(x);
733 | y = std::abs(y);
734 | z = std::abs(z);
735 |
736 | float maxdist = radius + o->extent;
737 |
738 | // Completely outside, since q' is outside the relevant area.
739 | if (x > maxdist || y > maxdist || z > maxdist) return false;
740 |
741 | int32_t num_less_extent = (x < o->extent) + (y < o->extent) + (z < o->extent);
742 |
743 | // Checking different cases:
744 |
745 | // a. inside the surface region of the octant.
746 | if (num_less_extent > 1) return true;
747 |
748 | // b. checking the corner region && edge region.
749 | x = std::max(x - o->extent, 0.0f);
750 | y = std::max(y - o->extent, 0.0f);
751 | z = std::max(z - o->extent, 0.0f);
752 |
753 | return (Distance::norm(x, y, z) < sqRadius);
754 | }
755 |
756 | template
757 | template
758 | bool Octree::contains(const PointT &query,
759 | float sqRadius,
760 | const Octant *o) {
761 | // we exploit the symmetry to reduce the test to test
762 | // whether the farthest corner is inside the search ball.
763 | float x = get<0>(query) - o->x;
764 | float y = get<1>(query) - o->y;
765 | float z = get<2>(query) - o->z;
766 |
767 | x = std::abs(x);
768 | y = std::abs(y);
769 | z = std::abs(z);
770 | // reminder: (x, y, z) - (-e, -e, -e) = (x, y, z) + (e, e, e)
771 | x += o->extent;
772 | y += o->extent;
773 | z += o->extent;
774 |
775 | return (Distance::norm(x, y, z) < sqRadius);
776 | }
777 |
778 | template
779 | template
780 | int32_t Octree::findNeighbor(const PointT &query,
781 | float minDistance) const {
782 | float maxDistance = std::numeric_limits::infinity();
783 | int32_t resultIndex = -1;
784 | if (root_ == 0) return resultIndex;
785 |
786 | findNeighbor(root_, query, minDistance, maxDistance, resultIndex);
787 |
788 | return resultIndex;
789 | }
790 |
791 | template
792 | template
793 | bool Octree::findNeighbor(const Octant *octant,
794 | const PointT &query,
795 | float minDistance,
796 | float &maxDistance,
797 | int32_t &resultIndex) const {
798 | const ContainerT &points = *data_;
799 | // 1. first descend to leaf and check in leafs points.
800 | if (octant->isLeaf) {
801 | uint32_t idx = octant->start;
802 | float sqrMaxDistance = Distance::sqr(maxDistance);
803 | float sqrMinDistance =
804 | (minDistance < 0) ? minDistance : Distance::sqr(minDistance);
805 |
806 | for (uint32_t i = 0; i < octant->size; ++i) {
807 | const PointT &p = points[idx];
808 | float dist = Distance::compute(query, p);
809 | if (dist > sqrMinDistance && dist < sqrMaxDistance) {
810 | resultIndex = idx;
811 | sqrMaxDistance = dist;
812 | }
813 | idx = successors_[idx];
814 | }
815 |
816 | maxDistance = Distance::sqrt(sqrMaxDistance);
817 | return inside(query, maxDistance, octant);
818 | }
819 |
820 | // determine Morton code for each point...
821 | uint32_t mortonCode = 0;
822 | if (get<0>(query) > octant->x) mortonCode |= 1;
823 | if (get<1>(query) > octant->y) mortonCode |= 2;
824 | if (get<2>(query) > octant->z) mortonCode |= 4;
825 |
826 | if (octant->child[mortonCode] != 0) {
827 | if (findNeighbor(octant->child[mortonCode],
828 | query,
829 | minDistance,
830 | maxDistance,
831 | resultIndex))
832 | return true;
833 | }
834 |
835 | // 2. if current best point completely inside, just return.
836 | float sqrMaxDistance = Distance::sqr(maxDistance);
837 |
838 | // 3. check adjacent octants for overlap and check these if necessary.
839 | for (uint32_t c = 0; c < 8; ++c) {
840 | if (c == mortonCode) continue;
841 | if (octant->child[c] == 0) continue;
842 | if (!overlaps(
843 | query, maxDistance, sqrMaxDistance, octant->child[c]))
844 | continue;
845 | if (findNeighbor(
846 | octant->child[c], query, minDistance, maxDistance, resultIndex))
847 | return true; // early pruning
848 | }
849 |
850 | // all children have been checked...check if point is inside the current
851 | // octant...
852 | return inside(query, maxDistance, octant);
853 | }
854 |
855 | template
856 | template
857 | bool Octree::inside(const PointT &query,
858 | float radius,
859 | const Octant *octant) {
860 | // we exploit the symmetry to reduce the test to test
861 | // whether the farthest corner is inside the search ball.
862 | float x = get<0>(query) - octant->x;
863 | float y = get<1>(query) - octant->y;
864 | float z = get<2>(query) - octant->z;
865 |
866 | x = std::abs(x) + radius;
867 | y = std::abs(y) + radius;
868 | z = std::abs(z) + radius;
869 |
870 | if (x > octant->extent) return false;
871 | if (y > octant->extent) return false;
872 | if (z > octant->extent) return false;
873 |
874 | return true;
875 | }
876 |
877 | /** \brief returns one random point index of the deepest octants where 2*extent
878 | * > max_dist holds **/
879 | template
880 | void Octree::randomUniformSampling(
881 | const float &max_dist, std::vector &result_indices) {
882 | result_indices.reserve(root_->size);
883 | randomUniformSampling(root_, max_dist, result_indices);
884 | }
885 | /** \brief returns one random point index of the deepest octants where 2*extent
886 | * > max_dist holds **/
887 | template
888 | void Octree::randomUniformSampling(
889 | const Octant *octant,
890 | const float &max_dist,
891 | std::vector &result_indices) {
892 | // if octant is the leaf or the desired resolution is achieved: add points to
893 | // the list nr of points depends on the size of the voxel (most times 1)
894 | /*if (octant->extent < max_dist)
895 | {
896 | result_indices.emplace_back(octant->start);
897 | }
898 | else */
899 | if (octant->isLeaf || octant->extent < max_dist) {
900 | /*
901 | uint32_t max_theoretical_points = std::max(std::pow(2 * octant->extent /
902 | max_dist, 3), 1.0); uint32_t max_points = std::min(octant->size,
903 | max_theoretical_points); for (uint32_t i = 0; i < max_points; i++)
904 | {
905 | uint32_t pt_idx = (i * octant->size) / max_points;
906 | result_indices.emplace_back(octant->start + pt_idx);
907 | }*/
908 | const uint32_t &size = octant->size;
909 | // float distances[size][size];
910 | const ContainerT &points = *data_;
911 | // std::cout << "cont size " << points.size() << std::endl;
912 | uint32_t idx_i = octant->start;
913 | // uint32_t idx_j;
914 | // result_indices.emplace_back(idx_i);
915 | std::vector used_indices;
916 | for (uint32_t i = 0; i < size; ++i) {
917 | float min_dist = 2 * max_dist;
918 | // idx_j = octant->start;
919 | for (const uint32_t &idx_j : used_indices) {
920 | float dist = std::abs(get<0>(points[idx_i]) - get<0>(points[idx_j])) +
921 | std::abs(get<1>(points[idx_i]) - get<1>(points[idx_j])) +
922 | std::abs(get<2>(points[idx_i]) - get<2>(points[idx_j]));
923 | // distances[i][j] = dist;
924 | // distances[j][i] = dist;
925 | if (dist < min_dist) min_dist = dist;
926 | // idx_j = successors_[idx_j];
927 | }
928 | if (min_dist > max_dist) {
929 | // if (idx_i >= points.size())
930 | // {
931 | // std::cout << "add idx " << std::endl;
932 | // }
933 | used_indices.emplace_back(idx_i);
934 | result_indices.emplace_back(idx_i);
935 | }
936 | idx_i = successors_[idx_i];
937 | }
938 | } else {
939 | for (uint32_t c = 0; c < 8; ++c) {
940 | if (octant->child[c] == 0) continue;
941 | randomUniformSampling(octant->child[c], max_dist, result_indices);
942 | }
943 | }
944 | }
945 |
946 | } // namespace unibn
947 |
948 | #endif /* OCTREE_HPP_ */
949 |
--------------------------------------------------------------------------------
/submodules/octree_handler/src/OctreeHandler.cpp:
--------------------------------------------------------------------------------
1 | #include "OctreeHandler.h"
2 |
3 | #include
4 | #include
5 | #include
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | namespace py = pybind11;
12 |
13 | void Octree::setInput(Eigen::MatrixXf &cloud) {
14 | points_.clear();
15 | points_.reserve(cloud.rows());
16 | for (uint32_t i = 0; i < cloud.rows(); i++) {
17 | points_.emplace_back(
18 | Eigen::Vector3f(cloud(i, 0), cloud(i, 1), cloud(i, 2)));
19 | }
20 | octree_.initialize(points_);
21 | }
22 |
23 | std::vector Octree::radiusSearch(const Eigen::Vector3f &pt,
24 | const float &radius) {
25 | std::vector results;
26 | octree_.radiusNeighbors>(
27 | pt, radius, results);
28 | return results;
29 | }
30 |
31 | std::vector Octree::radiusSearch(const uint32_t &pt_idx,
32 | const float &radius) {
33 | assert(pt_idx < points_.size());
34 | std::vector results;
35 | const Eigen::Vector3f &q = points_[pt_idx];
36 | octree_.radiusNeighbors>(
37 | q, radius, results);
38 | return results;
39 | }
40 |
41 | Eigen::MatrixXi Octree::radiusSearchAll(const uint32_t &max_nr_neighbors,
42 | const float &radius) {
43 | uint32_t act_max_neighbors = 0;
44 | Eigen::MatrixXi indices =
45 | Eigen::MatrixXi::Ones(points_.size(), max_nr_neighbors) *
46 | (int)points_.size();
47 | for (uint32_t i = 0; i < points_.size(); i++) {
48 | std::vector results = radiusSearch(i, radius);
49 | uint32_t nr_n = std::min(max_nr_neighbors, (uint32_t)results.size());
50 | if (nr_n > act_max_neighbors) act_max_neighbors = nr_n;
51 | for (uint32_t j = 0; j < nr_n; j++) {
52 | indices(i, j) = results[j];
53 | }
54 | }
55 | return indices.leftCols(act_max_neighbors);
56 | }
57 |
58 | Eigen::MatrixXi Octree::radiusSearchIndices(
59 | const std::vector &pt_indices,
60 | const uint32_t &max_nr_neighbors,
61 | const float &radius) {
62 | uint32_t act_max_neighbors = 0;
63 | Eigen::MatrixXi indices =
64 | Eigen::MatrixXi::Ones(pt_indices.size(), max_nr_neighbors) *
65 | (int)points_.size();
66 | #pragma omp parallel for num_threads(omp_get_max_threads())
67 | for (uint32_t i = 0; i < pt_indices.size(); i++) {
68 | std::vector results = radiusSearch(pt_indices[i], radius);
69 | uint32_t nr_n = std::min(max_nr_neighbors, (uint32_t)results.size());
70 | if (nr_n > act_max_neighbors) act_max_neighbors = nr_n;
71 | for (uint32_t j = 0; j < nr_n; j++) {
72 | uint32_t step =
73 | std::max((uint32_t)(results.size() * j / max_nr_neighbors), j);
74 | assert(step < results.size());
75 | indices(i, j) = results[step];
76 | }
77 | }
78 | return indices.leftCols(act_max_neighbors);
79 | }
80 |
81 | Eigen::MatrixXi Octree::radiusSearchPoints(Eigen::MatrixXf &query_points,
82 | const uint32_t &max_nr_neighbors,
83 | const float &radius) {
84 | assert(query_points.cols() == 3);
85 | uint32_t act_max_neighbors = 0;
86 | Eigen::MatrixXi indices =
87 | Eigen::MatrixXi::Ones(query_points.rows(), max_nr_neighbors) *
88 | (int)points_.size();
89 | for (uint32_t i = 0; i < query_points.rows(); i++) {
90 | std::vector results = radiusSearch(query_points.row(i), radius);
91 | uint32_t nr_n = std::min(max_nr_neighbors, (uint32_t)results.size());
92 | if (nr_n > act_max_neighbors) act_max_neighbors = nr_n;
93 | for (uint32_t j = 0; j < nr_n; j++) {
94 | uint32_t step =
95 | std::max((uint32_t)(results.size() * j / max_nr_neighbors), j);
96 | assert(step < results.size());
97 | assert(results[step] <= points_.size());
98 | // std::cout< Octree::randomUniformSampling(const float &max_dist) {
106 | std::vector indices;
107 | if (max_dist > 0) {
108 | octree_.randomUniformSampling(max_dist, indices);
109 | } else {
110 | indices = std::vector(points_.size()); // return all points
111 | std::iota(std::begin(indices), std::end(indices), 0);
112 | }
113 | return indices;
114 | }
115 | Eigen::MatrixXf Octree::computeScatterMatrices(const float &radius) {
116 | uint32_t n = points_.size();
117 | Eigen::MatrixXf scatter_matrices = Eigen::MatrixXf(n, 6);
118 | for (uint32_t i = 0; i < n; i++) {
119 | std::vector neighbors_idx = radiusSearch(i, radius);
120 | uint32_t nr_neighbors = neighbors_idx.size();
121 | Eigen::Matrix3Xf neighbors = Eigen::Matrix3Xf(3, nr_neighbors);
122 | for (uint32_t j = 0; j < nr_neighbors; j++) {
123 | neighbors.col(j) = points_[neighbors_idx[j]];
124 | }
125 | // https://stackoverflow.com/questions/15138634/eigen-is-there-an-inbuilt-way-to-calculate-sample-covariance
126 | Eigen::Matrix3Xf centered =
127 | (neighbors.colwise() - neighbors.rowwise().mean()) / radius;
128 | Eigen::Matrix3f cov =
129 | (centered * centered.transpose()) / float(nr_neighbors - 1);
130 | Eigen::Matrix flat_cov;
131 | flat_cov << cov(0, 0), cov(1, 1), cov(2, 2), cov(0, 1), cov(0, 2),
132 | cov(1, 2);
133 | scatter_matrices.row(i) = flat_cov;
134 | }
135 | return scatter_matrices;
136 | }
137 |
138 | Eigen::MatrixXf Octree::spectralFeaturesAll(const float &radius) {
139 | uint32_t n = points_.size();
140 | Eigen::MatrixXf eigenvalues = Eigen::MatrixXf(n, 3);
141 | for (uint32_t i = 0; i < n; i++) {
142 | std::vector neighbors_idx = radiusSearch(i, radius);
143 | uint32_t nr_neighbors = neighbors_idx.size();
144 | Eigen::Matrix3Xf neighbors = Eigen::Matrix3Xf(3, nr_neighbors);
145 | for (uint32_t j = 0; j < nr_neighbors; j++) {
146 | neighbors.col(j) = points_[neighbors_idx[j]];
147 | }
148 | // https://stackoverflow.com/questions/15138634/eigen-is-there-an-inbuilt-way-to-calculate-sample-covariance
149 | Eigen::Matrix3Xf centered =
150 | (neighbors.colwise() - neighbors.rowwise().mean()) / radius;
151 | Eigen::Matrix3f cov =
152 | (centered * centered.transpose()) / float(nr_neighbors - 1);
153 | Eigen::Vector3f eigenv = cov.eigenvalues().real();
154 | std::sort(
155 | eigenv.data(), eigenv.data() + eigenv.size(), std::greater());
156 | eigenvalues.row(i) = eigenv.transpose();
157 | }
158 | return eigenvalues;
159 | }
160 | Eigen::MatrixXf Octree::computeEigenvaluesNormal(const float &radius) {
161 | uint32_t n = points_.size();
162 | Eigen::MatrixXf eigenvalues = Eigen::MatrixXf(n, 6);
163 | for (uint32_t i = 0; i < n; i++) {
164 | std::vector neighbors_idx = radiusSearch(i, radius);
165 | uint32_t nr_neighbors = neighbors_idx.size();
166 | Eigen::Matrix3Xf neighbors = Eigen::Matrix3Xf(3, nr_neighbors);
167 | for (uint32_t j = 0; j < nr_neighbors; j++) {
168 | neighbors.col(j) = points_[neighbors_idx[j]];
169 | }
170 | // https://stackoverflow.com/questions/15138634/eigen-is-there-an-inbuilt-way-to-calculate-sample-covariance
171 | Eigen::Matrix3Xf centered =
172 | (neighbors.colwise() - neighbors.rowwise().mean()) / radius;
173 | Eigen::Matrix3f cov =
174 | (centered * centered.transpose()) / float(nr_neighbors - 1);
175 | Eigen::JacobiSVD svd(cov, Eigen::ComputeFullU);
176 | Eigen::Matrix3f eigenvectors = svd.matrixU();
177 | // last column is the eigenvector of the smalles singular value
178 | Eigen::Vector3f normal = eigenvectors.col(2);
179 | normal.normalize();
180 | Eigen::Vector3f singularv = svd.singularValues();
181 | if (i < 1) {
182 | std::cout << "sing: " << singularv
183 | << "sqrt sing: " << singularv.array().sqrt().real()
184 | << "pow sing: " << singularv.array().pow(2) << "eigen "
185 | << cov.eigenvalues().real() << std::endl;
186 | }
187 | eigenvalues.row(i).leftCols(3) = singularv.transpose();
188 | eigenvalues.row(i).rightCols(3) = normal.transpose();
189 | }
190 | return eigenvalues;
191 | }
192 |
193 | // if fatal error: Python.h: No such file or directory
194 | // https://github.com/stevenlovegrove/Pangolin/issues/494
195 | // CPLUS_INCLUDE_PATH=/usr/include/python3.6
196 | // export CPLUS_INCLUDE_PATH
197 | PYBIND11_MODULE(octree_handler, m) {
198 | m.doc() = "pybind11 octree plugin"; // optional module docstring
199 |
200 | m.def("scale",
201 | [](pybind11::EigenDRef m, double c) { m *= c; });
202 | py::class_(m, "Octree")
203 | .def(py::init())
204 | .def("setInput",
205 | &Octree::setInput,
206 | "builds octree based on the input cloud",
207 | py::arg("points"))
208 | .def("radiusSearch",
209 | py::overload_cast(
210 | &Octree::radiusSearch))
211 | .def("radiusSearchAll", &Octree::radiusSearchAll)
212 | .def("radiusSearchIndices", &Octree::radiusSearchIndices)
213 | .def("radiusSearchPoints", &Octree::radiusSearchPoints)
214 | .def("spectralFeatureAll", &Octree::spectralFeaturesAll)
215 | .def("computeEigenvaluesNormal", &Octree::computeEigenvaluesNormal)
216 | .def("computeScatterMatrices", &Octree::computeScatterMatrices)
217 | .def("randomUniformSampling", &Octree::randomUniformSampling);
218 | }
219 |
--------------------------------------------------------------------------------
/submodules/octree_handler/src/OctreeHandler.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 |
6 | #include
7 |
8 | #include "Octree.hpp"
9 |
10 | namespace unibn {
11 | namespace traits {
12 |
13 | template <>
14 | struct access {
15 | static float get(const Eigen::Vector3f &p) { return p.x(); }
16 | };
17 |
18 | template <>
19 | struct access {
20 | static float get(const Eigen::Vector3f &p) { return p.y(); }
21 | };
22 |
23 | template <>
24 | struct access {
25 | static float get(const Eigen::Vector3f &p) { return p.z(); }
26 | };
27 | } // namespace traits
28 | } // namespace unibn
29 |
30 | class Octree {
31 | private:
32 | unibn::Octree octree_;
33 | std::vector points_;
34 |
35 | public:
36 | Octree() = default;
37 | ~Octree() = default;
38 | void setInput(Eigen::MatrixXf &cloud);
39 | std::vector radiusSearch(const uint32_t &pt_idx,
40 | const float &radius);
41 | std::vector radiusSearch(const Eigen::Vector3f &pt,
42 | const float &radius);
43 | Eigen::MatrixXi radiusSearchAll(const uint32_t &max_nr_neighbors,
44 | const float &radius);
45 | Eigen::MatrixXi radiusSearchIndices(const std::vector &pt_indices,
46 | const uint32_t &max_nr_neighbors,
47 | const float &radius);
48 | Eigen::MatrixXi radiusSearchPoints(Eigen::MatrixXf &query_points,
49 | const uint32_t &max_nr_neighbors,
50 | const float &radius);
51 | std::vector randomUniformSampling(const float &max_dist);
52 | Eigen::MatrixXf spectralFeaturesAll(const float &radius);
53 | Eigen::MatrixXf computeEigenvaluesNormal(const float &radius);
54 | Eigen::MatrixXf computeScatterMatrices(const float &radius);
55 | };
56 |
--------------------------------------------------------------------------------
/submodules/octree_handler/tests/octree_cpp_testing.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | import octree_handler
4 | import time
5 |
6 | if __name__ == "__main__":
7 | octree = octree_handler.Octree()
8 | points = np.array([[0, -5, 0], # 0
9 | [0, -4, 0], # 1
10 | [0, -3, 0], # 2
11 | [0, -2, 0], # 3
12 | [0, -1, 0], # 4
13 | [0, 0, 0], # 5
14 | [1, 0, 0], # 6
15 | [2, 0, 0], # 7
16 | [3, 0, 0], # 8
17 | [4, 0, 0], # 9
18 | [5, 0, 0], # 10
19 | # [100, 100, 100], # 11: not valid point
20 | ], dtype='float32')
21 | print(points)
22 | octree.setInput(points)
23 | b = octree.radiusSearch(5, 3.1)
24 | print(b)
25 | print(20*"-", "search all", 20*"-")
26 | ind = octree.radiusSearchAll(20, 2.1)
27 | print(ind)
28 | print(ind.shape, ind.dtype)
29 |
30 | ##############################
31 | ###### lot of points #########
32 | ##############################
33 | nr_p = int(1e4)
34 | print(20*"-", f"#points: {nr_p}", 20*"-")
35 | points = np.random.normal(scale=10.0, size=(nr_p, 3))
36 | t_init = time.time()
37 | octree.setInput(points)
38 | t_init = time.time()-t_init
39 | t_search = time.time()
40 | ind = octree.radiusSearchAll(10, 0.1)
41 | t_search = time.time() - t_search
42 | t_overall = t_init+t_search
43 | print(f'time: init {t_init}s, search {t_search}s, overall {t_overall}s')
44 | print(ind.shape)
45 |
--------------------------------------------------------------------------------