├── .gitignore
├── INSTALL.md
├── LICENSE.txt
├── README.md
├── cpp_wrappers
├── compile_wrappers.sh
├── cpp_neighbors
│ ├── build.bat
│ ├── neighbors
│ │ ├── neighbors.cpp
│ │ └── neighbors.h
│ ├── setup.py
│ └── wrapper.cpp
├── cpp_subsampling
│ ├── build.bat
│ ├── grid_subsampling
│ │ ├── grid_subsampling.cpp
│ │ └── grid_subsampling.h
│ ├── setup.py
│ └── wrapper.cpp
└── cpp_utils
│ ├── cloud
│ ├── cloud.cpp
│ └── cloud.h
│ └── nanoflann
│ └── nanoflann.hpp
├── datasets
├── ModelNet40.py
├── NPM3D.py
├── S3DIS.py
├── SemanticKitti.py
├── SensatUrban.py
├── Toronto3D.py
└── common.py
├── doc
├── Github_intro.png
├── object_classification_guide.md
├── pretrained_models_guide.md
├── scene_segmentation_guide.md
├── slam_segmentation_guide.md
└── visualization_guide.md
├── kernels
└── kernel_points.py
├── models
├── architectures.py
└── blocks.py
├── plot_convergence.py
├── test_models.py
├── train_ModelNet40.py
├── train_NPM3D.py
├── train_S3DIS.py
├── train_SemanticKitti.py
├── train_SensatUrban.py
├── train_Toronto3D.py
├── utils
├── config.py
├── mayavi_visu.py
├── metrics.py
├── ply.py
├── tester.py
├── trainer.py
└── visualizer.py
└── visualize_deformations.py
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | **/build
3 | **/desktop.ini
4 | /results
5 | /test
6 | /docker_scripts
7 | /kernels/dispositions
8 | core
9 |
10 | # VSCode related
11 | *.code-workspace
12 |
13 | # Byte-compiled / optimized / DLL files
14 | __pycache__/
15 | *.py[cod]
16 |
17 | # C extensions
18 | *.so
19 |
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 |
2 | # Installation instructions
3 |
4 | ## Ubuntu 18.04
5 |
6 | * Make sure CUDA and cuDNN are installed. One configuration has been tested:
7 | - PyTorch 1.4.0, CUDA 10.1 and cuDNN 7.6
8 |
9 | * Ensure all python packages are installed :
10 |
11 | sudo apt update
12 | sudo apt install python3-dev python3-pip python3-tk
13 |
14 | * Follow PyTorch installation procedure.
15 |
16 | * Install the other dependencies with pip:
17 | - numpy
18 | - scikit-learn
19 | - PyYAML
20 | - matplotlib (for visualization)
21 | - mayavi (for visualization)
22 | - PyQt5 (for visualization)
23 |
24 | * Compile the C++ extension modules for python located in `cpp_wrappers`. Open a terminal in this folder, and run:
25 |
26 | sh compile_wrappers.sh
27 |
28 | You should now be able to train Kernel-Point Convolution models
29 |
30 | ## Windows 10
31 |
32 | * Make sure CUDA and cuDNN are installed. One configuration has been tested:
33 | - PyTorch 1.4.0, CUDA 10.1 and cuDNN 7.5
34 |
35 | * Follow PyTorch installation procedure.
36 |
37 | * We used the PyCharm IDE to pip install all python dependencies (including PyTorch) in a venv:
38 | - torch
39 | - torchvision
40 | - numpy
41 | - scikit-learn
42 | - PyYAML
43 | - matplotlib (for visualization)
44 | - mayavi (for visualization)
45 | - PyQt5 (for visualization)
46 |
47 | * Compile the C++ extension modules for python located in `cpp_wrappers`. You just have to execute two .bat files:
48 |
49 | cpp_wrappers/cpp_neighbors/build.bat
50 |
51 | and
52 |
53 | cpp_wrappers/cpp_subsampling/build.bat
54 |
55 | You should now be able to train Kernel-Point Convolution models
56 |
57 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 HuguesTHOMAS
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | 
3 |
4 | Created by Hugues THOMAS
5 |
6 | ## Introduction
7 |
8 | This repository contains the implementation of **Kernel Point Convolution** (KPConv) in [PyTorch](https://pytorch.org/).
9 |
10 | KPConv is also available in [Tensorflow](https://github.com/HuguesTHOMAS/KPConv) (original but older implementation).
11 |
12 | Another implementation of KPConv is available in [PyTorch-Points-3D](https://github.com/nicolas-chaulet/torch-points3d)
13 |
14 | KPConv is a point convolution operator presented in our ICCV2019 paper ([arXiv](https://arxiv.org/abs/1904.08889)). If you find our work useful in your
15 | research, please consider citing:
16 |
17 | ```
18 | @article{thomas2019KPConv,
19 | Author = {Thomas, Hugues and Qi, Charles R. and Deschaud, Jean-Emmanuel and Marcotegui, Beatriz and Goulette, Fran{\c{c}}ois and Guibas, Leonidas J.},
20 | Title = {KPConv: Flexible and Deformable Convolution for Point Clouds},
21 | Journal = {Proceedings of the IEEE International Conference on Computer Vision},
22 | Year = {2019}
23 | }
24 | ```
25 |
26 | ## Installation
27 |
28 | This implementation has been tested on Ubuntu 18.04 and Windows 10. Details are provided in [INSTALL.md](./INSTALL.md).
29 |
30 |
31 | ## Experiments
32 |
33 | We provide scripts for three experiments: ModelNet40, S3DIS and SemanticKitti. The instructions to run these
34 | experiments are in the [doc](./doc) folder.
35 |
36 | * [Object Classification](./doc/object_classification_guide.md): Instructions to train KP-CNN on an object classification
37 | task (Modelnet40).
38 |
39 | * [Scene Segmentation](./doc/scene_segmentation_guide.md): Instructions to train KP-FCNN on a scene segmentation
40 | task (S3DIS).
41 |
42 | * [SLAM Segmentation](./doc/slam_segmentation_guide.md): Instructions to train KP-FCNN on a slam segmentation
43 | task (SemanticKitti).
44 |
45 | * [Pretrained models](./doc/pretrained_models_guide.md): We provide pretrained weights and instructions to load them.
46 |
47 | * [Visualization scripts](./doc/visualization_guide.md): For now only one visualization script has been implemented:
48 | the kernel deformations display.
49 |
50 | ## Acknowledgment
51 |
52 | Our code uses the nanoflann library.
53 |
54 | ## License
55 | Our code is released under MIT License (see LICENSE file for details).
56 |
57 | ## Updates
58 | * 27/04/2020: Initial release.
59 | * 27/04/2020: Added NPM3D support thanks to @GeoSur.
60 |
--------------------------------------------------------------------------------
/cpp_wrappers/compile_wrappers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Compile cpp subsampling
4 | cd cpp_subsampling
5 | python3 setup.py build_ext --inplace
6 | cd ..
7 |
8 | # Compile cpp neighbors
9 | cd cpp_neighbors
10 | python3 setup.py build_ext --inplace
11 | cd ..
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_neighbors/build.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | py setup.py build_ext --inplace
3 |
4 |
5 | pause
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_neighbors/neighbors/neighbors.cpp:
--------------------------------------------------------------------------------
1 |
2 | #include "neighbors.h"
3 |
4 |
5 | void brute_neighbors(vector& queries, vector& supports, vector& neighbors_indices, float radius, int verbose)
6 | {
7 |
8 | // Initialize variables
9 | // ******************
10 |
11 | // square radius
12 | float r2 = radius * radius;
13 |
14 | // indices
15 | int i0 = 0;
16 |
17 | // Counting vector
18 | int max_count = 0;
19 | vector> tmp(queries.size());
20 |
21 | // Search neigbors indices
22 | // ***********************
23 |
24 | for (auto& p0 : queries)
25 | {
26 | int i = 0;
27 | for (auto& p : supports)
28 | {
29 | if ((p0 - p).sq_norm() < r2)
30 | {
31 | tmp[i0].push_back(i);
32 | if (tmp[i0].size() > max_count)
33 | max_count = tmp[i0].size();
34 | }
35 | i++;
36 | }
37 | i0++;
38 | }
39 |
40 | // Reserve the memory
41 | neighbors_indices.resize(queries.size() * max_count);
42 | i0 = 0;
43 | for (auto& inds : tmp)
44 | {
45 | for (int j = 0; j < max_count; j++)
46 | {
47 | if (j < inds.size())
48 | neighbors_indices[i0 * max_count + j] = inds[j];
49 | else
50 | neighbors_indices[i0 * max_count + j] = -1;
51 | }
52 | i0++;
53 | }
54 |
55 | return;
56 | }
57 |
58 | void ordered_neighbors(vector& queries,
59 | vector& supports,
60 | vector& neighbors_indices,
61 | float radius)
62 | {
63 |
64 | // Initialize variables
65 | // ******************
66 |
67 | // square radius
68 | float r2 = radius * radius;
69 |
70 | // indices
71 | int i0 = 0;
72 |
73 | // Counting vector
74 | int max_count = 0;
75 | float d2;
76 | vector> tmp(queries.size());
77 | vector> dists(queries.size());
78 |
79 | // Search neigbors indices
80 | // ***********************
81 |
82 | for (auto& p0 : queries)
83 | {
84 | int i = 0;
85 | for (auto& p : supports)
86 | {
87 | d2 = (p0 - p).sq_norm();
88 | if (d2 < r2)
89 | {
90 | // Find order of the new point
91 | auto it = std::upper_bound(dists[i0].begin(), dists[i0].end(), d2);
92 | int index = std::distance(dists[i0].begin(), it);
93 |
94 | // Insert element
95 | dists[i0].insert(it, d2);
96 | tmp[i0].insert(tmp[i0].begin() + index, i);
97 |
98 | // Update max count
99 | if (tmp[i0].size() > max_count)
100 | max_count = tmp[i0].size();
101 | }
102 | i++;
103 | }
104 | i0++;
105 | }
106 |
107 | // Reserve the memory
108 | neighbors_indices.resize(queries.size() * max_count);
109 | i0 = 0;
110 | for (auto& inds : tmp)
111 | {
112 | for (int j = 0; j < max_count; j++)
113 | {
114 | if (j < inds.size())
115 | neighbors_indices[i0 * max_count + j] = inds[j];
116 | else
117 | neighbors_indices[i0 * max_count + j] = -1;
118 | }
119 | i0++;
120 | }
121 |
122 | return;
123 | }
124 |
125 | void batch_ordered_neighbors(vector& queries,
126 | vector& supports,
127 | vector& q_batches,
128 | vector& s_batches,
129 | vector& neighbors_indices,
130 | float radius)
131 | {
132 |
133 | // Initialize variables
134 | // ******************
135 |
136 | // square radius
137 | float r2 = radius * radius;
138 |
139 | // indices
140 | int i0 = 0;
141 |
142 | // Counting vector
143 | int max_count = 0;
144 | float d2;
145 | vector> tmp(queries.size());
146 | vector> dists(queries.size());
147 |
148 | // batch index
149 | int b = 0;
150 | int sum_qb = 0;
151 | int sum_sb = 0;
152 |
153 |
154 | // Search neigbors indices
155 | // ***********************
156 |
157 | for (auto& p0 : queries)
158 | {
159 | // Check if we changed batch
160 | if (i0 == sum_qb + q_batches[b])
161 | {
162 | sum_qb += q_batches[b];
163 | sum_sb += s_batches[b];
164 | b++;
165 | }
166 |
167 | // Loop only over the supports of current batch
168 | vector::iterator p_it;
169 | int i = 0;
170 | for(p_it = supports.begin() + sum_sb; p_it < supports.begin() + sum_sb + s_batches[b]; p_it++ )
171 | {
172 | d2 = (p0 - *p_it).sq_norm();
173 | if (d2 < r2)
174 | {
175 | // Find order of the new point
176 | auto it = std::upper_bound(dists[i0].begin(), dists[i0].end(), d2);
177 | int index = std::distance(dists[i0].begin(), it);
178 |
179 | // Insert element
180 | dists[i0].insert(it, d2);
181 | tmp[i0].insert(tmp[i0].begin() + index, sum_sb + i);
182 |
183 | // Update max count
184 | if (tmp[i0].size() > max_count)
185 | max_count = tmp[i0].size();
186 | }
187 | i++;
188 | }
189 | i0++;
190 | }
191 |
192 | // Reserve the memory
193 | neighbors_indices.resize(queries.size() * max_count);
194 | i0 = 0;
195 | for (auto& inds : tmp)
196 | {
197 | for (int j = 0; j < max_count; j++)
198 | {
199 | if (j < inds.size())
200 | neighbors_indices[i0 * max_count + j] = inds[j];
201 | else
202 | neighbors_indices[i0 * max_count + j] = supports.size();
203 | }
204 | i0++;
205 | }
206 |
207 | return;
208 | }
209 |
210 |
211 | void batch_nanoflann_neighbors(vector& queries,
212 | vector& supports,
213 | vector& q_batches,
214 | vector& s_batches,
215 | vector& neighbors_indices,
216 | float radius)
217 | {
218 |
219 | // Initialize variables
220 | // ******************
221 |
222 | // indices
223 | int i0 = 0;
224 |
225 | // Square radius
226 | float r2 = radius * radius;
227 |
228 | // Counting vector
229 | int max_count = 0;
230 | float d2;
231 | vector>> all_inds_dists(queries.size());
232 |
233 | // batch index
234 | int b = 0;
235 | int sum_qb = 0;
236 | int sum_sb = 0;
237 |
238 | // Nanoflann related variables
239 | // ***************************
240 |
241 | // CLoud variable
242 | PointCloud current_cloud;
243 |
244 | // Tree parameters
245 | nanoflann::KDTreeSingleIndexAdaptorParams tree_params(10 /* max leaf */);
246 |
247 | // KDTree type definition
248 | typedef nanoflann::KDTreeSingleIndexAdaptor< nanoflann::L2_Simple_Adaptor ,
249 | PointCloud,
250 | 3 > my_kd_tree_t;
251 |
252 | // Pointer to trees
253 | my_kd_tree_t* index;
254 |
255 | // Build KDTree for the first batch element
256 | current_cloud.pts = vector(supports.begin() + sum_sb, supports.begin() + sum_sb + s_batches[b]);
257 | index = new my_kd_tree_t(3, current_cloud, tree_params);
258 | index->buildIndex();
259 |
260 |
261 | // Search neigbors indices
262 | // ***********************
263 |
264 | // Search params
265 | nanoflann::SearchParams search_params;
266 | search_params.sorted = true;
267 |
268 | for (auto& p0 : queries)
269 | {
270 |
271 | // Check if we changed batch
272 | if (i0 == sum_qb + q_batches[b])
273 | {
274 | sum_qb += q_batches[b];
275 | sum_sb += s_batches[b];
276 | b++;
277 |
278 | // Change the points
279 | current_cloud.pts.clear();
280 | current_cloud.pts = vector(supports.begin() + sum_sb, supports.begin() + sum_sb + s_batches[b]);
281 |
282 | // Build KDTree of the current element of the batch
283 | delete index;
284 | index = new my_kd_tree_t(3, current_cloud, tree_params);
285 | index->buildIndex();
286 | }
287 |
288 | // Initial guess of neighbors size
289 | all_inds_dists[i0].reserve(max_count);
290 |
291 | // Find neighbors
292 | float query_pt[3] = { p0.x, p0.y, p0.z};
293 | size_t nMatches = index->radiusSearch(query_pt, r2, all_inds_dists[i0], search_params);
294 |
295 | // Update max count
296 | if (nMatches > max_count)
297 | max_count = nMatches;
298 |
299 | // Increment query idx
300 | i0++;
301 | }
302 |
303 | // Reserve the memory
304 | neighbors_indices.resize(queries.size() * max_count);
305 | i0 = 0;
306 | sum_sb = 0;
307 | sum_qb = 0;
308 | b = 0;
309 | for (auto& inds_dists : all_inds_dists)
310 | {
311 | // Check if we changed batch
312 | if (i0 == sum_qb + q_batches[b])
313 | {
314 | sum_qb += q_batches[b];
315 | sum_sb += s_batches[b];
316 | b++;
317 | }
318 |
319 | for (int j = 0; j < max_count; j++)
320 | {
321 | if (j < inds_dists.size())
322 | neighbors_indices[i0 * max_count + j] = inds_dists[j].first + sum_sb;
323 | else
324 | neighbors_indices[i0 * max_count + j] = supports.size();
325 | }
326 | i0++;
327 | }
328 |
329 | delete index;
330 |
331 | return;
332 | }
333 |
334 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_neighbors/neighbors/neighbors.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #include "../../cpp_utils/cloud/cloud.h"
4 | #include "../../cpp_utils/nanoflann/nanoflann.hpp"
5 |
6 | #include
7 | #include
8 |
9 | using namespace std;
10 |
11 |
12 | void ordered_neighbors(vector& queries,
13 | vector& supports,
14 | vector& neighbors_indices,
15 | float radius);
16 |
17 | void batch_ordered_neighbors(vector& queries,
18 | vector& supports,
19 | vector& q_batches,
20 | vector& s_batches,
21 | vector& neighbors_indices,
22 | float radius);
23 |
24 | void batch_nanoflann_neighbors(vector& queries,
25 | vector& supports,
26 | vector& q_batches,
27 | vector& s_batches,
28 | vector& neighbors_indices,
29 | float radius);
30 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_neighbors/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup, Extension
2 | import numpy.distutils.misc_util
3 |
4 | # Adding OpenCV to project
5 | # ************************
6 |
7 | # Adding sources of the project
8 | # *****************************
9 |
10 | SOURCES = ["../cpp_utils/cloud/cloud.cpp",
11 | "neighbors/neighbors.cpp",
12 | "wrapper.cpp"]
13 |
14 | module = Extension(name="radius_neighbors",
15 | sources=SOURCES,
16 | extra_compile_args=['-std=c++11',
17 | '-D_GLIBCXX_USE_CXX11_ABI=0'])
18 |
19 |
20 | setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_neighbors/wrapper.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "neighbors/neighbors.h"
4 | #include
5 |
6 |
7 |
8 | // docstrings for our module
9 | // *************************
10 |
11 | static char module_docstring[] = "This module provides two methods to compute radius neighbors from pointclouds or batch of pointclouds";
12 |
13 | static char batch_query_docstring[] = "Method to get radius neighbors in a batch of stacked pointclouds";
14 |
15 |
16 | // Declare the functions
17 | // *********************
18 |
19 | static PyObject *batch_neighbors(PyObject *self, PyObject *args, PyObject *keywds);
20 |
21 |
22 | // Specify the members of the module
23 | // *********************************
24 |
25 | static PyMethodDef module_methods[] =
26 | {
27 | { "batch_query", (PyCFunction)batch_neighbors, METH_VARARGS | METH_KEYWORDS, batch_query_docstring },
28 | {NULL, NULL, 0, NULL}
29 | };
30 |
31 |
32 | // Initialize the module
33 | // *********************
34 |
35 | static struct PyModuleDef moduledef =
36 | {
37 | PyModuleDef_HEAD_INIT,
38 | "radius_neighbors", // m_name
39 | module_docstring, // m_doc
40 | -1, // m_size
41 | module_methods, // m_methods
42 | NULL, // m_reload
43 | NULL, // m_traverse
44 | NULL, // m_clear
45 | NULL, // m_free
46 | };
47 |
48 | PyMODINIT_FUNC PyInit_radius_neighbors(void)
49 | {
50 | import_array();
51 | return PyModule_Create(&moduledef);
52 | }
53 |
54 |
55 | // Definition of the batch_subsample method
56 | // **********************************
57 |
58 | static PyObject* batch_neighbors(PyObject* self, PyObject* args, PyObject* keywds)
59 | {
60 |
61 | // Manage inputs
62 | // *************
63 |
64 | // Args containers
65 | PyObject* queries_obj = NULL;
66 | PyObject* supports_obj = NULL;
67 | PyObject* q_batches_obj = NULL;
68 | PyObject* s_batches_obj = NULL;
69 |
70 | // Keywords containers
71 | static char* kwlist[] = { "queries", "supports", "q_batches", "s_batches", "radius", NULL };
72 | float radius = 0.1;
73 |
74 | // Parse the input
75 | if (!PyArg_ParseTupleAndKeywords(args, keywds, "OOOO|$f", kwlist, &queries_obj, &supports_obj, &q_batches_obj, &s_batches_obj, &radius))
76 | {
77 | PyErr_SetString(PyExc_RuntimeError, "Error parsing arguments");
78 | return NULL;
79 | }
80 |
81 |
82 | // Interpret the input objects as numpy arrays.
83 | PyObject* queries_array = PyArray_FROM_OTF(queries_obj, NPY_FLOAT, NPY_IN_ARRAY);
84 | PyObject* supports_array = PyArray_FROM_OTF(supports_obj, NPY_FLOAT, NPY_IN_ARRAY);
85 | PyObject* q_batches_array = PyArray_FROM_OTF(q_batches_obj, NPY_INT, NPY_IN_ARRAY);
86 | PyObject* s_batches_array = PyArray_FROM_OTF(s_batches_obj, NPY_INT, NPY_IN_ARRAY);
87 |
88 | // Verify data was load correctly.
89 | if (queries_array == NULL)
90 | {
91 | Py_XDECREF(queries_array);
92 | Py_XDECREF(supports_array);
93 | Py_XDECREF(q_batches_array);
94 | Py_XDECREF(s_batches_array);
95 | PyErr_SetString(PyExc_RuntimeError, "Error converting query points to numpy arrays of type float32");
96 | return NULL;
97 | }
98 | if (supports_array == NULL)
99 | {
100 | Py_XDECREF(queries_array);
101 | Py_XDECREF(supports_array);
102 | Py_XDECREF(q_batches_array);
103 | Py_XDECREF(s_batches_array);
104 | PyErr_SetString(PyExc_RuntimeError, "Error converting support points to numpy arrays of type float32");
105 | return NULL;
106 | }
107 | if (q_batches_array == NULL)
108 | {
109 | Py_XDECREF(queries_array);
110 | Py_XDECREF(supports_array);
111 | Py_XDECREF(q_batches_array);
112 | Py_XDECREF(s_batches_array);
113 | PyErr_SetString(PyExc_RuntimeError, "Error converting query batches to numpy arrays of type int32");
114 | return NULL;
115 | }
116 | if (s_batches_array == NULL)
117 | {
118 | Py_XDECREF(queries_array);
119 | Py_XDECREF(supports_array);
120 | Py_XDECREF(q_batches_array);
121 | Py_XDECREF(s_batches_array);
122 | PyErr_SetString(PyExc_RuntimeError, "Error converting support batches to numpy arrays of type int32");
123 | return NULL;
124 | }
125 |
126 | // Check that the input array respect the dims
127 | if ((int)PyArray_NDIM(queries_array) != 2 || (int)PyArray_DIM(queries_array, 1) != 3)
128 | {
129 | Py_XDECREF(queries_array);
130 | Py_XDECREF(supports_array);
131 | Py_XDECREF(q_batches_array);
132 | Py_XDECREF(s_batches_array);
133 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : query.shape is not (N, 3)");
134 | return NULL;
135 | }
136 | if ((int)PyArray_NDIM(supports_array) != 2 || (int)PyArray_DIM(supports_array, 1) != 3)
137 | {
138 | Py_XDECREF(queries_array);
139 | Py_XDECREF(supports_array);
140 | Py_XDECREF(q_batches_array);
141 | Py_XDECREF(s_batches_array);
142 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : support.shape is not (N, 3)");
143 | return NULL;
144 | }
145 | if ((int)PyArray_NDIM(q_batches_array) > 1)
146 | {
147 | Py_XDECREF(queries_array);
148 | Py_XDECREF(supports_array);
149 | Py_XDECREF(q_batches_array);
150 | Py_XDECREF(s_batches_array);
151 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : queries_batches.shape is not (B,) ");
152 | return NULL;
153 | }
154 | if ((int)PyArray_NDIM(s_batches_array) > 1)
155 | {
156 | Py_XDECREF(queries_array);
157 | Py_XDECREF(supports_array);
158 | Py_XDECREF(q_batches_array);
159 | Py_XDECREF(s_batches_array);
160 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : supports_batches.shape is not (B,) ");
161 | return NULL;
162 | }
163 | if ((int)PyArray_DIM(q_batches_array, 0) != (int)PyArray_DIM(s_batches_array, 0))
164 | {
165 | Py_XDECREF(queries_array);
166 | Py_XDECREF(supports_array);
167 | Py_XDECREF(q_batches_array);
168 | Py_XDECREF(s_batches_array);
169 | PyErr_SetString(PyExc_RuntimeError, "Wrong number of batch elements: different for queries and supports ");
170 | return NULL;
171 | }
172 |
173 | // Number of points
174 | int Nq = (int)PyArray_DIM(queries_array, 0);
175 | int Ns= (int)PyArray_DIM(supports_array, 0);
176 |
177 | // Number of batches
178 | int Nb = (int)PyArray_DIM(q_batches_array, 0);
179 |
180 | // Call the C++ function
181 | // *********************
182 |
183 | // Convert PyArray to Cloud C++ class
184 | vector queries;
185 | vector supports;
186 | vector q_batches;
187 | vector s_batches;
188 | queries = vector((PointXYZ*)PyArray_DATA(queries_array), (PointXYZ*)PyArray_DATA(queries_array) + Nq);
189 | supports = vector((PointXYZ*)PyArray_DATA(supports_array), (PointXYZ*)PyArray_DATA(supports_array) + Ns);
190 | q_batches = vector((int*)PyArray_DATA(q_batches_array), (int*)PyArray_DATA(q_batches_array) + Nb);
191 | s_batches = vector((int*)PyArray_DATA(s_batches_array), (int*)PyArray_DATA(s_batches_array) + Nb);
192 |
193 | // Create result containers
194 | vector neighbors_indices;
195 |
196 | // Compute results
197 | //batch_ordered_neighbors(queries, supports, q_batches, s_batches, neighbors_indices, radius);
198 | batch_nanoflann_neighbors(queries, supports, q_batches, s_batches, neighbors_indices, radius);
199 |
200 | // Check result
201 | if (neighbors_indices.size() < 1)
202 | {
203 | PyErr_SetString(PyExc_RuntimeError, "Error");
204 | return NULL;
205 | }
206 |
207 | // Manage outputs
208 | // **************
209 |
210 | // Maximal number of neighbors
211 | int max_neighbors = neighbors_indices.size() / Nq;
212 |
213 | // Dimension of output containers
214 | npy_intp* neighbors_dims = new npy_intp[2];
215 | neighbors_dims[0] = Nq;
216 | neighbors_dims[1] = max_neighbors;
217 |
218 | // Create output array
219 | PyObject* res_obj = PyArray_SimpleNew(2, neighbors_dims, NPY_INT);
220 | PyObject* ret = NULL;
221 |
222 | // Fill output array with values
223 | size_t size_in_bytes = Nq * max_neighbors * sizeof(int);
224 | memcpy(PyArray_DATA(res_obj), neighbors_indices.data(), size_in_bytes);
225 |
226 | // Merge results
227 | ret = Py_BuildValue("N", res_obj);
228 |
229 | // Clean up
230 | // ********
231 |
232 | Py_XDECREF(queries_array);
233 | Py_XDECREF(supports_array);
234 | Py_XDECREF(q_batches_array);
235 | Py_XDECREF(s_batches_array);
236 |
237 | return ret;
238 | }
239 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_subsampling/build.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | py setup.py build_ext --inplace
3 |
4 |
5 | pause
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_subsampling/grid_subsampling/grid_subsampling.cpp:
--------------------------------------------------------------------------------
1 |
2 | #include "grid_subsampling.h"
3 |
4 |
5 | void grid_subsampling(vector& original_points,
6 | vector& subsampled_points,
7 | vector& original_features,
8 | vector& subsampled_features,
9 | vector& original_classes,
10 | vector& subsampled_classes,
11 | float sampleDl,
12 | int verbose) {
13 |
14 | // Initialize variables
15 | // ******************
16 |
17 | // Number of points in the cloud
18 | size_t N = original_points.size();
19 |
20 | // Dimension of the features
21 | size_t fdim = original_features.size() / N;
22 | size_t ldim = original_classes.size() / N;
23 |
24 | // Limits of the cloud
25 | PointXYZ minCorner = min_point(original_points);
26 | PointXYZ maxCorner = max_point(original_points);
27 | PointXYZ originCorner = floor(minCorner * (1/sampleDl)) * sampleDl;
28 |
29 | // Dimensions of the grid
30 | size_t sampleNX = (size_t)floor((maxCorner.x - originCorner.x) / sampleDl) + 1;
31 | size_t sampleNY = (size_t)floor((maxCorner.y - originCorner.y) / sampleDl) + 1;
32 | //size_t sampleNZ = (size_t)floor((maxCorner.z - originCorner.z) / sampleDl) + 1;
33 |
34 | // Check if features and classes need to be processed
35 | bool use_feature = original_features.size() > 0;
36 | bool use_classes = original_classes.size() > 0;
37 |
38 |
39 | // Create the sampled map
40 | // **********************
41 |
42 | // Verbose parameters
43 | int i = 0;
44 | int nDisp = N / 100;
45 |
46 | // Initialize variables
47 | size_t iX, iY, iZ, mapIdx;
48 | unordered_map data;
49 |
50 | for (auto& p : original_points)
51 | {
52 | // Position of point in sample map
53 | iX = (size_t)floor((p.x - originCorner.x) / sampleDl);
54 | iY = (size_t)floor((p.y - originCorner.y) / sampleDl);
55 | iZ = (size_t)floor((p.z - originCorner.z) / sampleDl);
56 | mapIdx = iX + sampleNX*iY + sampleNX*sampleNY*iZ;
57 |
58 | // If not already created, create key
59 | if (data.count(mapIdx) < 1)
60 | data.emplace(mapIdx, SampledData(fdim, ldim));
61 |
62 | // Fill the sample map
63 | if (use_feature && use_classes)
64 | data[mapIdx].update_all(p, original_features.begin() + i * fdim, original_classes.begin() + i * ldim);
65 | else if (use_feature)
66 | data[mapIdx].update_features(p, original_features.begin() + i * fdim);
67 | else if (use_classes)
68 | data[mapIdx].update_classes(p, original_classes.begin() + i * ldim);
69 | else
70 | data[mapIdx].update_points(p);
71 |
72 | // Display
73 | i++;
74 | if (verbose > 1 && i%nDisp == 0)
75 | std::cout << "\rSampled Map : " << std::setw(3) << i / nDisp << "%";
76 |
77 | }
78 |
79 | // Divide for barycentre and transfer to a vector
80 | subsampled_points.reserve(data.size());
81 | if (use_feature)
82 | subsampled_features.reserve(data.size() * fdim);
83 | if (use_classes)
84 | subsampled_classes.reserve(data.size() * ldim);
85 | for (auto& v : data)
86 | {
87 | subsampled_points.push_back(v.second.point * (1.0 / v.second.count));
88 | if (use_feature)
89 | {
90 | float count = (float)v.second.count;
91 | transform(v.second.features.begin(),
92 | v.second.features.end(),
93 | v.second.features.begin(),
94 | [count](float f) { return f / count;});
95 | subsampled_features.insert(subsampled_features.end(),v.second.features.begin(),v.second.features.end());
96 | }
97 | if (use_classes)
98 | {
99 | for (int i = 0; i < ldim; i++)
100 | subsampled_classes.push_back(max_element(v.second.labels[i].begin(), v.second.labels[i].end(),
101 | [](const pair&a, const pair&b){return a.second < b.second;})->first);
102 | }
103 | }
104 |
105 | return;
106 | }
107 |
108 |
109 | void batch_grid_subsampling(vector& original_points,
110 | vector& subsampled_points,
111 | vector& original_features,
112 | vector& subsampled_features,
113 | vector& original_classes,
114 | vector& subsampled_classes,
115 | vector& original_batches,
116 | vector& subsampled_batches,
117 | float sampleDl,
118 | int max_p)
119 | {
120 | // Initialize variables
121 | // ******************
122 |
123 | int b = 0;
124 | int sum_b = 0;
125 |
126 | // Number of points in the cloud
127 | size_t N = original_points.size();
128 |
129 | // Dimension of the features
130 | size_t fdim = original_features.size() / N;
131 | size_t ldim = original_classes.size() / N;
132 |
133 | // Handle max_p = 0
134 | if (max_p < 1)
135 | max_p = N;
136 |
137 | // Loop over batches
138 | // *****************
139 |
140 | for (b = 0; b < original_batches.size(); b++)
141 | {
142 |
143 | // Extract batch points features and labels
144 | vector b_o_points = vector(original_points.begin () + sum_b,
145 | original_points.begin () + sum_b + original_batches[b]);
146 |
147 | vector b_o_features;
148 | if (original_features.size() > 0)
149 | {
150 | b_o_features = vector(original_features.begin () + sum_b * fdim,
151 | original_features.begin () + (sum_b + original_batches[b]) * fdim);
152 | }
153 |
154 | vector b_o_classes;
155 | if (original_classes.size() > 0)
156 | {
157 | b_o_classes = vector(original_classes.begin () + sum_b * ldim,
158 | original_classes.begin () + sum_b + original_batches[b] * ldim);
159 | }
160 |
161 |
162 | // Create result containers
163 | vector b_s_points;
164 | vector b_s_features;
165 | vector b_s_classes;
166 |
167 | // Compute subsampling on current batch
168 | grid_subsampling(b_o_points,
169 | b_s_points,
170 | b_o_features,
171 | b_s_features,
172 | b_o_classes,
173 | b_s_classes,
174 | sampleDl,
175 | 0);
176 |
177 | // Stack batches points features and labels
178 | // ****************************************
179 |
180 | // If too many points remove some
181 | if (b_s_points.size() <= max_p)
182 | {
183 | subsampled_points.insert(subsampled_points.end(), b_s_points.begin(), b_s_points.end());
184 |
185 | if (original_features.size() > 0)
186 | subsampled_features.insert(subsampled_features.end(), b_s_features.begin(), b_s_features.end());
187 |
188 | if (original_classes.size() > 0)
189 | subsampled_classes.insert(subsampled_classes.end(), b_s_classes.begin(), b_s_classes.end());
190 |
191 | subsampled_batches.push_back(b_s_points.size());
192 | }
193 | else
194 | {
195 | subsampled_points.insert(subsampled_points.end(), b_s_points.begin(), b_s_points.begin() + max_p);
196 |
197 | if (original_features.size() > 0)
198 | subsampled_features.insert(subsampled_features.end(), b_s_features.begin(), b_s_features.begin() + max_p * fdim);
199 |
200 | if (original_classes.size() > 0)
201 | subsampled_classes.insert(subsampled_classes.end(), b_s_classes.begin(), b_s_classes.begin() + max_p * ldim);
202 |
203 | subsampled_batches.push_back(max_p);
204 | }
205 |
206 | // Stack new batch lengths
207 | sum_b += original_batches[b];
208 | }
209 |
210 | return;
211 | }
212 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_subsampling/grid_subsampling/grid_subsampling.h:
--------------------------------------------------------------------------------
1 |
2 |
3 | #include "../../cpp_utils/cloud/cloud.h"
4 |
5 | #include
6 | #include
7 |
8 | using namespace std;
9 |
10 | class SampledData
11 | {
12 | public:
13 |
14 | // Elements
15 | // ********
16 |
17 | int count;
18 | PointXYZ point;
19 | vector features;
20 | vector> labels;
21 |
22 |
23 | // Methods
24 | // *******
25 |
26 | // Constructor
27 | SampledData()
28 | {
29 | count = 0;
30 | point = PointXYZ();
31 | }
32 |
33 | SampledData(const size_t fdim, const size_t ldim)
34 | {
35 | count = 0;
36 | point = PointXYZ();
37 | features = vector(fdim);
38 | labels = vector>(ldim);
39 | }
40 |
41 | // Method Update
42 | void update_all(const PointXYZ p, vector::iterator f_begin, vector::iterator l_begin)
43 | {
44 | count += 1;
45 | point += p;
46 | transform (features.begin(), features.end(), f_begin, features.begin(), plus());
47 | int i = 0;
48 | for(vector::iterator it = l_begin; it != l_begin + labels.size(); ++it)
49 | {
50 | labels[i][*it] += 1;
51 | i++;
52 | }
53 | return;
54 | }
55 | void update_features(const PointXYZ p, vector::iterator f_begin)
56 | {
57 | count += 1;
58 | point += p;
59 | transform (features.begin(), features.end(), f_begin, features.begin(), plus());
60 | return;
61 | }
62 | void update_classes(const PointXYZ p, vector::iterator l_begin)
63 | {
64 | count += 1;
65 | point += p;
66 | int i = 0;
67 | for(vector::iterator it = l_begin; it != l_begin + labels.size(); ++it)
68 | {
69 | labels[i][*it] += 1;
70 | i++;
71 | }
72 | return;
73 | }
74 | void update_points(const PointXYZ p)
75 | {
76 | count += 1;
77 | point += p;
78 | return;
79 | }
80 | };
81 |
82 | void grid_subsampling(vector& original_points,
83 | vector& subsampled_points,
84 | vector& original_features,
85 | vector& subsampled_features,
86 | vector& original_classes,
87 | vector& subsampled_classes,
88 | float sampleDl,
89 | int verbose);
90 |
91 | void batch_grid_subsampling(vector& original_points,
92 | vector& subsampled_points,
93 | vector& original_features,
94 | vector& subsampled_features,
95 | vector& original_classes,
96 | vector& subsampled_classes,
97 | vector& original_batches,
98 | vector& subsampled_batches,
99 | float sampleDl,
100 | int max_p);
101 |
102 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_subsampling/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup, Extension
2 | import numpy.distutils.misc_util
3 |
4 | # Adding OpenCV to project
5 | # ************************
6 |
7 | # Adding sources of the project
8 | # *****************************
9 |
10 | SOURCES = ["../cpp_utils/cloud/cloud.cpp",
11 | "grid_subsampling/grid_subsampling.cpp",
12 | "wrapper.cpp"]
13 |
14 | module = Extension(name="grid_subsampling",
15 | sources=SOURCES,
16 | extra_compile_args=['-std=c++11',
17 | '-D_GLIBCXX_USE_CXX11_ABI=0'])
18 |
19 |
20 | setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_utils/cloud/cloud.cpp:
--------------------------------------------------------------------------------
1 | //
2 | //
3 | // 0==========================0
4 | // | Local feature test |
5 | // 0==========================0
6 | //
7 | // version 1.0 :
8 | // >
9 | //
10 | //---------------------------------------------------
11 | //
12 | // Cloud source :
13 | // Define usefull Functions/Methods
14 | //
15 | //----------------------------------------------------
16 | //
17 | // Hugues THOMAS - 10/02/2017
18 | //
19 |
20 |
21 | #include "cloud.h"
22 |
23 |
24 | // Getters
25 | // *******
26 |
27 | PointXYZ max_point(std::vector points)
28 | {
29 | // Initialize limits
30 | PointXYZ maxP(points[0]);
31 |
32 | // Loop over all points
33 | for (auto p : points)
34 | {
35 | if (p.x > maxP.x)
36 | maxP.x = p.x;
37 |
38 | if (p.y > maxP.y)
39 | maxP.y = p.y;
40 |
41 | if (p.z > maxP.z)
42 | maxP.z = p.z;
43 | }
44 |
45 | return maxP;
46 | }
47 |
48 | PointXYZ min_point(std::vector points)
49 | {
50 | // Initialize limits
51 | PointXYZ minP(points[0]);
52 |
53 | // Loop over all points
54 | for (auto p : points)
55 | {
56 | if (p.x < minP.x)
57 | minP.x = p.x;
58 |
59 | if (p.y < minP.y)
60 | minP.y = p.y;
61 |
62 | if (p.z < minP.z)
63 | minP.z = p.z;
64 | }
65 |
66 | return minP;
67 | }
--------------------------------------------------------------------------------
/cpp_wrappers/cpp_utils/cloud/cloud.h:
--------------------------------------------------------------------------------
1 | //
2 | //
3 | // 0==========================0
4 | // | Local feature test |
5 | // 0==========================0
6 | //
7 | // version 1.0 :
8 | // >
9 | //
10 | //---------------------------------------------------
11 | //
12 | // Cloud header
13 | //
14 | //----------------------------------------------------
15 | //
16 | // Hugues THOMAS - 10/02/2017
17 | //
18 |
19 |
20 | # pragma once
21 |
22 | #include
23 | #include
24 | #include