├── .gitignore
├── CMakeLists.txt
├── Dockerfile
├── LICENSE
├── README.md
├── src
├── ClusterGrowPLinkage.cpp
├── ClusterGrowPLinkage.h
├── PCAFunctions.cpp
├── PCAFunctions.h
├── PointGrowAngleDis.cpp
├── PointGrowAngleDis.h
├── PointGrowAngleOnly.cpp
├── PointGrowAngleOnly.h
├── main.cpp
├── nanoflann.hpp
└── utils.h
├── test_data.txt
└── test_segment.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | data
3 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # set project's name
2 | PROJECT( PointCloudSegmentation )
3 | macro(use_cxx11)
4 | if (CMAKE_VERSION VERSION_LESS "3.1")
5 | if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
6 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11")
7 | endif ()
8 | else ()
9 | set (CMAKE_CXX_STANDARD 11)
10 | endif ()
11 | endmacro(use_cxx11)
12 | ###############################################################################
13 | # CMake settings
14 | CMAKE_MINIMUM_REQUIRED(VERSION 2.8.3)
15 | use_cxx11()
16 |
17 | # OpenCV
18 | FIND_PACKAGE(OpenCV REQUIRED)
19 |
20 | FILE(GLOB_RECURSE HDRS_FILES "src/*.h" "src/*.hpp")
21 | FILE(GLOB_RECURSE SRCS_FILES "src/*.c" "src/*.cpp")
22 |
23 | ADD_EXECUTABLE(${PROJECT_NAME} ${SRCS_FILES} ${HDRS_FILES})
24 | TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${OpenCV_LIBS})
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:14.04
2 | MAINTAINER Xinyuan Gui
3 |
4 | RUN apt-get update && \
5 | apt-get install -y \
6 | build-essential \
7 | cmake \
8 | libgtk2.0-dev \
9 | pkg-config \
10 | libavcodec-dev libavformat-dev libswscale-dev \
11 | libjpeg-dev libpng-dev libtiff-dev libjasper-dev \
12 | libopencv-dev build-essential checkinstall cmake pkg-config yasm libjpeg-dev libjasper-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine-dev libgstreamer0.10-dev libgstreamer-plugins-base0.10-dev libv4l-dev python-dev python-numpy libtbb-dev libqt4-dev libgtk2.0-dev libmp3lame-dev libopencore-amrnb-dev libopencore-amrwb-dev libtheora-dev libvorbis-dev libxvidcore-dev x264 v4l-utils
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 2-Clause License
2 |
3 | Copyright (c) 2017, xiaohulugo
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PointCloudSegmentation-V2
2 |
3 | Modification: (1) use nanoflann.cpp to replace the ann library; (2) remove some cpps
4 |
5 | Three algorithms on point cloud segmentation used in the following paper:
6 |
7 | Pairwise Linkage for Point Cloud Segmentation, Xiaohu Lu, etc. ISPRS2016.
8 | https://github.com/xiaohulugo/xiaohulugo.github.com/blob/master/papers/PLinkage_Point_Segmentation_ISPRS2016.pdf
9 |
10 | *The algorithm used in the ISPRS2016 paper is ClusterGrowPLinkage.cpp
11 |
12 | Prerequisites:
13 | ---
14 | 1. OpenCV > 2.4.x
15 | 2. OpenMP
16 |
17 | Usage:
18 | ---
19 | 1. build the project with Cmake
20 | 2. run the code
21 | 3. see main.cpp for interfaces/demos
22 |
23 | Docker: (For linux)
24 | ---
25 | 1. Build the docker image by running `docker build -t opencv:cpp .` to build the docker image with name `opencv` and tag `cpp`
26 | 2. Modify the code as you wish. Also the default data folder for test is `./data` in the root path.
27 | 3. Modify the `test_segment.sh`. This is the script the container will run after it is set up.
28 | 4. Run the following command in the root path of the project.
29 |
30 | ```docker
31 | docker run -it --rm \
32 | -v $(pwd):/pointSegment \
33 | opencv:cpp \
34 | /pointSegment/test_segment.sh
35 | ```
36 |
37 | Performance:
38 | ---
39 |
40 |
41 |
42 |
43 | Please cite these two papers if you feel this code useful:
44 |
45 | @ARTICLE{Lu2016Pairwise,
46 | author = {Lu, Xiaohu and Yao, Jian and Tu, Jinge and Li, Kai and Li, Li and Liu, Yahui},
47 | title = {PAIRWISE LINKAGE FOR POINT CLOUD SEGMENTATION},
48 | journal = {ISPRS Annals of Photogrammetry, Remote Sensing \& Spatial Information Sciences},
49 | year = {2016},
50 | }
51 | }
52 |
53 | Feel free to correct my code, if you spotted the mistakes. You are also welcomed to Email me: fangzelu@gmail.com
54 |
--------------------------------------------------------------------------------
/src/ClusterGrowPLinkage.cpp:
--------------------------------------------------------------------------------
1 | #include "ClusterGrowPLinkage.h"
2 | #include
3 | #include
4 | #include
5 |
6 | using namespace std;
7 |
8 | ClusterGrowPLinkage::ClusterGrowPLinkage( int k, double theta, PLANE_MODE planeMode )
9 | {
10 | this->k = k;
11 | this->theta = theta;
12 | this->planeMode = planeMode;
13 | }
14 |
15 | ClusterGrowPLinkage::~ClusterGrowPLinkage()
16 | {
17 | }
18 |
19 | void ClusterGrowPLinkage::setData(PointCloud &data, std::vector &pcaInfos)
20 | {
21 | this->pointData = data;
22 | this->pointNum = data.pts.size();
23 | this->pcaInfos = pcaInfos;
24 | }
25 |
26 | void ClusterGrowPLinkage::run( std::vector > &clusters )
27 | {
28 | // create linkage
29 | std::vector clusterCenterIdx;
30 | std::vector > singleLinkage;
31 | createLinkage( pcaInfos, clusterCenterIdx, singleLinkage );
32 |
33 | // data clustering
34 | std::vector > clustersInit;
35 | clustering( pcaInfos, clusterCenterIdx, singleLinkage, clustersInit );
36 |
37 | // create patches via RDPCA
38 | std::vector patchesInit;
39 | createPatch( clustersInit, patchesInit );
40 |
41 | // patch merging
42 | patchMerging( patchesInit, pcaInfos, clusters );
43 | }
44 |
45 | void ClusterGrowPLinkage::createLinkage( std::vector &pcaInfos, std::vector &clusterCenterIdx, std::vector > &singleLinkage )
46 | {
47 | cout<<"create linkage"< lambdas( pcaInfos.size() );
51 | double lambdaAvg = 0.0;
52 | for ( int i=0; i > linkage( this->pointNum );
71 | std::vector clusterCenter;
72 | for ( int i=0; ipointNum; ++i )
73 | {
74 | if ( i % 10000 == 0 )
75 | {
76 | cout<= 100 )
80 | {
81 | int aa=0;
82 | }
83 | int idx = i;
84 | double lambda = pcaInfos[idx].lambda0;
85 | double x = pointData.pts[idx].x;
86 | double y = pointData.pts[idx].y;
87 | double z = pointData.pts[idx].z;
88 | cv::Matx31d normal = pcaInfos[idx].normal;
89 | double N = sqrt( normal.val[0] * normal.val[0] + normal.val[1] * normal.val[1] + normal.val[2] * normal.val[2] );
90 | normal *= double(1.0) / N;
91 |
92 | // find the closest neighbor point which is more likely to make a plane
93 | std::vector devAngles( pcaInfos[idx].idxIn.size(), 100000.0 );
94 | #pragma omp parallel for
95 | for ( int j=0; j= lambda )
100 | {
101 | continue;
102 | }
103 |
104 | bool isIn = false;
105 | for ( int m=0; m ODMat1 = normal.t() * offset;
129 | cv::Matx ODMat2 = normalCur.t() * offset;
130 |
131 | double OD1 = fabs( ODMat1.val[0] );
132 | double OD2 = fabs( ODMat2.val[0] );
133 | double devDis = ( OD1 + OD2 ) / 2.0;
134 |
135 | // normal deviation
136 | double devAngle = acos( normal.val[0] * normalCur.val[0] + normal.val[1] * normalCur.val[1] + normal.val[2] * normalCur.val[2] );
137 | devAngle = min( devAngle, CV_PI - devAngle );
138 | devAngles[j] = devAngle;
139 | }
140 |
141 | // find the neighbor point with closest normal direction
142 | int idxBest = -1;
143 | double devMin = 1000.0;
144 | for ( int j=0; j= 0 ) // find a single linkage
154 | {
155 | linkage[idxBest].push_back( idx );
156 | }
157 | else if ( lambda < lambdaSTD )
158 | {
159 | clusterCenter.push_back( idx );
160 | }
161 | }
162 |
163 | //
164 | singleLinkage = linkage;
165 | clusterCenterIdx = clusterCenter;
166 | }
167 |
168 | void ClusterGrowPLinkage::clustering( std::vector &pcaInfos, std::vector &clusterCenterIdx, std::vector > &singleLinkage, std::vector > &clusters )
169 | {
170 | cout<<"clustering"< seedIdx;
181 | seedIdx.push_back( idxCenter );
182 |
183 | std::vector cluster;
184 | cluster.push_back( idxCenter );
185 |
186 | int count2 = 0;
187 | while ( count2 < seedIdx.size() )
188 | {
189 | int idxSeed = seedIdx[count2];
190 | cv::Matx31d normalSeed = pcaInfos[idxCenter].normal;
191 |
192 | for ( j=0; jplaneMode == PLANE )
199 | {
200 | devAngle = acos( normalCenter.val[0] * normalPt.val[0] + normalCenter.val[1] * normalPt.val[1] + normalCenter.val[2] * normalPt.val[2] );
201 | }
202 | else
203 | {
204 | devAngle = acos( normalSeed.val[0] * normalPt.val[0] + normalSeed.val[1] * normalPt.val[1] + normalSeed.val[2] * normalPt.val[2] );
205 | }
206 |
207 | if ( devAngle < thAngleDev )
208 | {
209 | cluster.push_back( idxPt );
210 | seedIdx.push_back( idxPt );
211 | }
212 | else
213 | {
214 | clusterCenterIdx.push_back( idxPt );
215 | }
216 |
217 | }
218 | count2 ++;
219 | }
220 |
221 | if ( cluster.size() >= 10 )
222 | {
223 | clusters.push_back( cluster );
224 | }
225 |
226 | count1 ++;
227 | }
228 | }
229 |
230 | void ClusterGrowPLinkage::createPatch( std::vector > &clusters, std::vector &patches )
231 | {
232 | cout<<" clusters number: "< > pointDataPatch(clusters[i].size());
244 | for ( int j=0; jpointData.pts[idij].x;
249 | pointDataPatch[j][1] = this->pointData.pts[idij].y;
250 | pointDataPatch[j][2] = this->pointData.pts[idij].z;
251 | }
252 |
253 | PCAFunctions pcaer;
254 | //pcaer.PCASingle(pointDataPatch, patches[i] );
255 | pcaer.RDPCASingle(pointDataPatch, patches[i] );
256 |
257 | patches[i].idxAll = clusters[i];
258 | patches[i].planePt = cv::Matx31d( 0.0, 0.0, 0.0 );
259 | for ( int j=0; jpointData.pts[id].x;
266 | patches[i].planePt.val[1] += this->pointData.pts[id].y;
267 | patches[i].planePt.val[2] += this->pointData.pts[id].z;
268 | }
269 | patches[i].planePt *= ( 1.0 / double( patches[i].idxIn.size() ) );
270 | }
271 | }
272 |
273 | void ClusterGrowPLinkage::patchMerging( std::vector &patches, std::vector &pcaInfos, std::vector > &clusters )
274 | {
275 | cout<<"patch merging"<theta;
280 |
281 | int numPatch = patches.size();
282 | // sort the patches by point number
283 | std::sort( patches.begin(), patches.end(), [](const PCAInfo& lhs, const PCAInfo& rhs) { return lhs.idxIn.size() > rhs.idxIn.size(); } );
284 |
285 | // calculate the angle deviation threshold of each patch
286 | std::vector surfaceVariance( patches.size() );
287 | for ( int i=0; i surfaceVarianceSort = surfaceVariance;
300 | double medianValue = meadian( surfaceVarianceSort );
301 | std::vector().swap( surfaceVarianceSort );
302 |
303 | std::vector absDiff( patches.size() );
304 | for( int i = 0; i < patches.size(); ++i )
305 | {
306 | absDiff[i] = fabs( surfaceVariance[i] - medianValue );
307 | }
308 | double MAD = a * meadian( absDiff );
309 | std::vector().swap( absDiff );
310 |
311 | double surfaceVarianceMin = medianValue;
312 | double surfaceVarianceMax = medianValue + 2.5 * MAD ;
313 |
314 | double k = ( maxAngle - minAngle ) / ( surfaceVarianceMax - surfaceVarianceMin );
315 | std::vector thAngle( patches.size(), 0.0 );
316 | for ( int i=0; i surfaceVarianceMax )
323 | {
324 | thAngle[i] = maxAngle;
325 | }
326 | else
327 | {
328 | thAngle[i] = ( patches[i].lambda0 - surfaceVarianceMin ) * k + minAngle;
329 | }
330 | }
331 |
332 | // find the cluster for each data point
333 | std::vector clusterIdx( this->pointNum, -1 );
334 | for ( int i=0; i > patchAdjacent( numPatch );
345 | #pragma omp parallel for
346 | for ( int i=0; i patchAdjacentTemp;
349 | std::vector > pointAdjacentTemp;
350 | for ( int j=0; j=0 )
374 | {
375 | bool isIn = false;
376 | int n = 0;
377 | for ( n=0; n temp;
395 | temp.push_back( idxPoint );
396 | pointAdjacentTemp.push_back( temp );
397 | }
398 | }
399 | }
400 | }
401 |
402 | // repetition removal
403 | for ( int j=0; j pointTemp;
406 | for ( int m=0; m= 3 )
425 | {
426 | patchAdjacent[i].push_back( patchAdjacentTemp[j] );
427 | }
428 | }
429 | }
430 |
431 | // plane merging
432 | std::vector mergedIndex( numPatch, 0 );
433 | for ( int i=0; i seedIdx;
443 | std::vector patchIdx;
444 | seedIdx.push_back( idxStarter );
445 | patchIdx.push_back( idxStarter );
446 |
447 | int count = 0;
448 | while ( count < seedIdx.size() )
449 | {
450 | int idxSeed = seedIdx[count];
451 | cv::Matx31d normalSeed = patches[idxSeed].normal;
452 | cv::Matx31d ptSeed = patches[idxSeed].planePt;
453 | double thAngleSeed = thAngle[idxSeed];
454 |
455 | for ( int j=0; jplaneMode == PLANE )
471 | {
472 | cv::Matx31d ptVector = ptCur - ptStarter;
473 | devAngle = acos( normalStarter.val[0] * normalCur.val[0] + normalStarter.val[1] * normalCur.val[1] + normalStarter.val[2] * normalCur.val[2] );
474 | devDis = abs( normalStarter.val[0] * ptVector.val[0] + normalStarter.val[1] * ptVector.val[1] + normalStarter.val[2] * ptVector.val[2] );
475 | }
476 | else
477 | {
478 | cv::Matx31d ptVector = ptCur - ptSeed;
479 | devAngle = acos( normalSeed.val[0] * normalCur.val[0] + normalSeed.val[1] * normalCur.val[1] + normalSeed.val[2] * normalCur.val[2] );
480 | devDis = abs( normalStarter.val[0] * ptVector.val[0] + normalStarter.val[1] * ptVector.val[1] + normalStarter.val[2] * ptVector.val[2] );
481 | }
482 |
483 |
484 | //if ( min( devAngle, fabs( CV_PI - devAngle ) ) < thAngle && devDis < thDev )
485 | //if ( min( devAngle, fabs( CV_PI - devAngle ) ) < thAngle && devDis < 1.0 )
486 | if ( min( devAngle, fabs( CV_PI - devAngle ) ) < min( thAngleSeed, thAngleStarter ) )
487 | {
488 | seedIdx.push_back( idxCur );
489 | patchIdx.push_back( idxCur );
490 | mergedIndex[idxCur] = 1;
491 | }
492 | }
493 |
494 | count ++;
495 | }
496 |
497 | // create a new cluster
498 | std::vector patchNewCur;
499 | for ( int j=0; j &dataset )
518 | {
519 | std::sort( dataset.begin(), dataset.end(), []( const double& lhs, const double& rhs ){ return lhs < rhs; } );
520 |
521 | return dataset[dataset.size()/2];
522 | }
--------------------------------------------------------------------------------
/src/ClusterGrowPLinkage.h:
--------------------------------------------------------------------------------
1 | // Title = { PAIRWISE LINKAGE FOR POINT CLOUD SEGMENTATION },
2 | // Author = { Lu, Xiaohu and Yao, Jian and Tu, Jinge and Li, Kai and Li, Li and Liu, Yahui },
3 | // Journal = { ISPRS Annals of Photogrammetry, Remote Sensing \& Spatial Information Sciences },
4 | // Year = { 2016 }
5 |
6 | #ifndef _CLUSTER_GROW_PLINKAGE_H_
7 | #define _CLUSTER_GROW_PLINKAGE_H_
8 | #pragma once
9 |
10 | #include "PCAFunctions.h"
11 | //#include "opencv/cv.h"
12 | #include "opencv2/core.hpp"
13 |
14 | class ClusterGrowPLinkage
15 | {
16 | public:
17 | ClusterGrowPLinkage( int k, double theta, PLANE_MODE planeMode);
18 | ~ClusterGrowPLinkage();
19 |
20 | void run( std::vector > &clusters );
21 |
22 | void setData(PointCloud &data, std::vector &pcaInfos);
23 |
24 | void createLinkage( std::vector &pcaInfos, std::vector &clusterCenterIdx,
25 | std::vector > &singleLinkage );
26 |
27 | void clustering( std::vector &pcaInfos, std::vector &clusterCenterIdx,
28 | std::vector > &singleLinkage, std::vector > &clusters );
29 |
30 | void createPatch( std::vector > &clusters, std::vector &patches );
31 |
32 | void patchMerging( std::vector &patches, std::vector &pcaInfos,
33 | std::vector > &clusters );
34 |
35 | double meadian( std::vector &dataset );
36 |
37 | private:
38 | int k;
39 | double theta;
40 | PLANE_MODE planeMode;
41 |
42 | int pointNum;
43 | PointCloud pointData;
44 | std::vector pcaInfos;
45 | };
46 |
47 | #endif // _CLUSTER_GROW_PLINKAGE_H_
48 |
--------------------------------------------------------------------------------
/src/PCAFunctions.cpp:
--------------------------------------------------------------------------------
1 | #include "PCAFunctions.h"
2 | #include
3 | #include
4 |
5 | using namespace std;
6 |
7 | void PCAFunctions::PCA(PointCloud &cloud, int k, std::vector &pcaInfos)
8 | {
9 | cout << "building kd-tree ..." << endl;
10 | double MINVALUE = 1e-7;
11 | int pointNum = cloud.pts.size();
12 | double scale = 0.0, magnitd = 0.0;
13 |
14 | // 1. build kd-tree
15 | typedef KDTreeSingleIndexAdaptor< L2_Simple_Adaptor >, PointCloud, 3/*dim*/ > my_kd_tree_t;
16 | my_kd_tree_t index(3 /*dim*/, cloud, KDTreeSingleIndexAdaptorParams(10 /* max leaf */));
17 | index.buildIndex();
18 |
19 | // 2. knn search
20 | size_t *out_ks = new size_t[pointNum];
21 | size_t **out_indices = new size_t *[pointNum];
22 | #pragma omp parallel for
23 | for (int i = 0; i resultSet(k);
31 | resultSet.init(out_indices[i], dis_temp);
32 | index.findNeighbors(resultSet, &query_pt[0], nanoflann::SearchParams(10));
33 | out_ks[i] = resultSet.size();
34 |
35 | delete query_pt;
36 | delete dis_temp;
37 | }
38 | index.freeIndex(index);
39 |
40 | cout << "pca normal calculation ..." << endl;
41 |
42 | // 3. PCA normal estimation
43 | scale = 0.0;
44 | pcaInfos.resize(pointNum);
45 | #pragma omp parallel for
46 | for (int i = 0; i < pointNum; ++i)
47 | {
48 | //
49 | int ki = out_ks[i];
50 |
51 | double h_mean_x = 0.0, h_mean_y = 0.0, h_mean_z = 0.0;
52 | for (int j = 0; j < ki; ++j)
53 | {
54 | int idx = out_indices[i][j];
55 | h_mean_x += cloud.pts[idx].x;
56 | h_mean_y += cloud.pts[idx].y;
57 | h_mean_z += cloud.pts[idx].z;
58 | }
59 | h_mean_x *= 1.0 / ki; h_mean_y *= 1.0 / ki; h_mean_z *= 1.0 / ki;
60 |
61 | double h_cov_1 = 0.0, h_cov_2 = 0.0, h_cov_3 = 0.0;
62 | double h_cov_5 = 0.0, h_cov_6 = 0.0;
63 | double h_cov_9 = 0.0;
64 | double dx = 0.0, dy = 0.0, dz = 0.0;
65 | for (int j = 0; j < k; ++j)
66 | {
67 | int idx = out_indices[i][j];
68 | dx = cloud.pts[idx].x - h_mean_x;
69 | dy = cloud.pts[idx].y - h_mean_y;
70 | dz = cloud.pts[idx].z - h_mean_z;
71 |
72 | h_cov_1 += dx * dx; h_cov_2 += dx * dy; h_cov_3 += dx * dz;
73 | h_cov_5 += dy * dy; h_cov_6 += dy * dz;
74 | h_cov_9 += dz * dz;
75 | }
76 | cv::Matx33d h_cov(
77 | h_cov_1, h_cov_2, h_cov_3,
78 | h_cov_2, h_cov_5, h_cov_6,
79 | h_cov_3, h_cov_6, h_cov_9);
80 | h_cov *= 1.0 / ki;
81 |
82 | // eigenvector
83 | cv::Matx33d h_cov_evectors;
84 | cv::Matx31d h_cov_evals;
85 | cv::eigen(h_cov, h_cov_evals, h_cov_evectors);
86 |
87 | //
88 | pcaInfos[i].idxAll.resize(ki);
89 | for (int j = 0; j& cloud, int k, std::vector& pcaInfos)
121 | {
122 | // removed, bacause it's too slow
123 | }
124 |
125 | void PCAFunctions::PCASingle(std::vector > &pointData, PCAInfo &pcaInfo)
126 | {
127 | int i, j;
128 | int k = pointData.size();
129 | double a = 1.4826;
130 | double thRz = 2.5;
131 |
132 | //
133 | pcaInfo.idxIn.resize(k);
134 | cv::Matx31d h_mean(0, 0, 0);
135 | for (i = 0; i < k; ++i)
136 | {
137 | pcaInfo.idxIn[i] = i;
138 | h_mean += cv::Matx31d(pointData[i][0], pointData[i][1], pointData[i][2]);
139 | }
140 | h_mean *= (1.0 / k);
141 |
142 | cv::Matx33d h_cov(0, 0, 0, 0, 0, 0, 0, 0, 0);
143 | for (i = 0; i < k; ++i)
144 | {
145 | cv::Matx31d hi = cv::Matx31d(pointData[i][0], pointData[i][1], pointData[i][2]);
146 | h_cov += (hi - h_mean) * (hi - h_mean).t();
147 | }
148 | h_cov *= (1.0 / k);
149 |
150 | // eigenvector
151 | cv::Matx33d h_cov_evectors;
152 | cv::Matx31d h_cov_evals;
153 | cv::eigen(h_cov, h_cov_evals, h_cov_evectors);
154 |
155 | //
156 | pcaInfo.idxAll = pcaInfo.idxIn;
157 | //pcaInfo.lambda0 = h_cov_evals.row(2).val[0];
158 | pcaInfo.lambda0 = h_cov_evals.row(2).val[0] / (h_cov_evals.row(0).val[0] + h_cov_evals.row(1).val[0] + h_cov_evals.row(2).val[0]);
159 | pcaInfo.normal = h_cov_evectors.row(2).t();
160 | pcaInfo.planePt = h_mean;
161 |
162 | // outliers removal via MCMD
163 | MCMD_OutlierRemoval(pointData, pcaInfo);
164 | }
165 |
166 | void PCAFunctions::RDPCASingle(std::vector>& pointData, PCAInfo & pcaInfo)
167 | {
168 | int i, j;
169 | int h0 = 3;
170 | int k = pointData.size();
171 | int h = k / 2;
172 | int iterTime = log(1 - 0.9999) / log(1 - pow(1 - 0.5, h0));
173 | double a = 1.4826;
174 | double thRz = 2.5;
175 |
176 | std::vector > h_subset_idx_vec;
177 | for (int iter = 0; iter < iterTime; ++iter)
178 | {
179 | int h0_idx0 = rand() % k;
180 | int h0_idx1 = rand() % k;
181 | int h0_idx2 = rand() % k;
182 |
183 | cv::Matx31d h0_0(pointData[h0_idx0][0], pointData[h0_idx0][1], pointData[h0_idx0][2]);
184 | cv::Matx31d h0_1(pointData[h0_idx1][0], pointData[h0_idx1][1], pointData[h0_idx1][2]);
185 | cv::Matx31d h0_2(pointData[h0_idx2][0], pointData[h0_idx2][1], pointData[h0_idx2][2]);
186 | cv::Matx31d h0_mean = (h0_0 + h0_1 + h0_2) * (1.0 / 3.0);
187 |
188 | // PCA
189 | cv::Matx33d h0_cov = ((h0_0 - h0_mean) * (h0_0 - h0_mean).t()
190 | + (h0_1 - h0_mean) * (h0_1 - h0_mean).t()
191 | + (h0_2 - h0_mean) * (h0_2 - h0_mean).t()) * (1.0 / 3.0);
192 |
193 | cv::Matx33d h0_cov_evectors;
194 | cv::Matx31d h0_cov_evals;
195 | cv::eigen(h0_cov, h0_cov_evals, h0_cov_evectors);
196 |
197 | // OD
198 | std::vector > ODs(k);
199 | for (i = 0; i < k; ++i)
200 | {
201 | cv::Matx31d ptMat(pointData[i][0], pointData[i][1], pointData[i][2]);
202 | cv::Matx ODMat = (ptMat - h0_mean).t() * h0_cov_evectors.row(2).t();
203 | double OD = fabs(ODMat.val[0]);
204 | ODs[i].first = i;
205 | ODs[i].second = OD;
206 | }
207 | std::sort(ODs.begin(), ODs.end(), [](const std::pair& lhs, const std::pair& rhs) { return lhs.second < rhs.second; });
208 |
209 |
210 | // h-subset
211 | std::vector h_subset_idx;
212 | for (i = 0; i < h; i++)
213 | {
214 | h_subset_idx.push_back(ODs[i].first);
215 | }
216 | h_subset_idx_vec.push_back(h_subset_idx);
217 | }
218 |
219 | // calculate the PCA hypotheses
220 | std::vector S_PCA;
221 | S_PCA.resize(h_subset_idx_vec.size());
222 | for (i = 0; i < h_subset_idx_vec.size(); ++i)
223 | {
224 | cv::Matx31d h_mean(0, 0, 0);
225 | for (j = 0; j < h; ++j)
226 | {
227 | int index = h_subset_idx_vec[i][j];
228 | h_mean += cv::Matx31d(pointData[index][0], pointData[index][1], pointData[index][2]);
229 | }
230 | h_mean *= (1.0 / double(h));
231 |
232 | cv::Matx33d h_cov(0, 0, 0, 0, 0, 0, 0, 0, 0);
233 | for (j = 0; j < h; ++j)
234 | {
235 | int index = h_subset_idx_vec[i][j];
236 | cv::Matx31d hi = cv::Matx31d(pointData[index][0], pointData[index][1], pointData[index][2]);
237 | h_cov += (hi - h_mean) * (hi - h_mean).t();
238 | }
239 | h_cov *= (1.0 / double(h));
240 |
241 | //
242 | cv::Matx33d h_cov_evectors;
243 | cv::Matx31d h_cov_evals;
244 | cv::eigen(h_cov, h_cov_evals, h_cov_evectors);
245 |
246 | PCAInfo pcaEles;
247 | //pcaEles.lambda0 = h_cov_evals.row(2).val[0];
248 | pcaEles.lambda0 = h_cov_evals.row(2).val[0] / (h_cov_evals.row(0).val[0] + h_cov_evals.row(1).val[0] + h_cov_evals.row(2).val[0]);
249 | pcaEles.normal = h_cov_evectors.row(2).t();
250 | pcaEles.idxIn = h_subset_idx_vec[i];
251 | S_PCA[i] = pcaEles;
252 | }
253 |
254 | // find the best PCA
255 | std::sort(S_PCA.begin(), S_PCA.end(), [](const PCAInfo& lhs, const PCAInfo& rhs) { return lhs.lambda0 < rhs.lambda0; });
256 | pcaInfo.lambda0 = S_PCA[0].lambda0;
257 | pcaInfo.idxIn = S_PCA[0].idxIn;
258 |
259 | pcaInfo.normal = S_PCA[0].normal;
260 | double N = sqrt(pcaInfo.normal.val[0] * pcaInfo.normal.val[0] + pcaInfo.normal.val[1] * pcaInfo.normal.val[1] + pcaInfo.normal.val[2] * pcaInfo.normal.val[2]);
261 | pcaInfo.normal *= 1.0 / N;
262 |
263 | pcaInfo.idxAll.resize(k);
264 | for (i = 0; i < k; ++i)
265 | {
266 | pcaInfo.idxAll[i] = i;
267 | }
268 |
269 | // outliers removal via MCMD
270 | MCMD_OutlierRemoval(pointData, pcaInfo);
271 | }
272 |
273 | void PCAFunctions::MCMD_OutlierRemoval(std::vector > &pointData, PCAInfo &pcaInfo)
274 | {
275 | double a = 1.4826;
276 | double thRz = 2.5;
277 | int num = pcaInfo.idxAll.size();
278 |
279 | // ODs
280 | cv::Matx31d h_mean(0, 0, 0);
281 | for (int j = 0; j < pcaInfo.idxIn.size(); ++j)
282 | {
283 | int idx = pcaInfo.idxIn[j];
284 | h_mean += cv::Matx31d(pointData[idx][0], pointData[idx][1], pointData[idx][2]);
285 | }
286 | h_mean *= (1.0 / pcaInfo.idxIn.size());
287 |
288 | std::vector ODs(num);
289 | for (int j = 0; j < num; ++j)
290 | {
291 | int idx = pcaInfo.idxAll[j];
292 | cv::Matx31d pt(pointData[idx][0], pointData[idx][1], pointData[idx][2]);
293 | cv::Matx OD_mat = (pt - h_mean).t() * pcaInfo.normal;
294 | double OD = fabs(OD_mat.val[0]);
295 | ODs[j] = OD;
296 | }
297 |
298 | // calculate the Rz-score for all points using ODs
299 | std::vector sorted_ODs(ODs.begin(), ODs.end());
300 | double median_OD = meadian(sorted_ODs);
301 | std::vector().swap(sorted_ODs);
302 |
303 | std::vector abs_diff_ODs(num);
304 | for (int j = 0; j < num; ++j)
305 | {
306 | abs_diff_ODs[j] = fabs(ODs[j] - median_OD);
307 | }
308 | double MAD_OD = a * meadian(abs_diff_ODs) + 1e-6;
309 | std::vector().swap(abs_diff_ODs);
310 |
311 | // get inlier
312 | std::vector idxInlier;
313 | for (int j = 0; j < num; ++j)
314 | {
315 | double Rzi = fabs(ODs[j] - median_OD) / MAD_OD;
316 | if (Rzi < thRz)
317 | {
318 | int idx = pcaInfo.idxAll[j];
319 | idxInlier.push_back(idx);
320 | }
321 | }
322 |
323 | //
324 | pcaInfo.idxIn = idxInlier;
325 | }
326 |
327 | double PCAFunctions::meadian(std::vector dataset)
328 | {
329 | std::sort(dataset.begin(), dataset.end(), [](const double& lhs, const double& rhs) { return lhs < rhs; });
330 | if (dataset.size() % 2 == 0)
331 | {
332 | return dataset[dataset.size() / 2];
333 | }
334 | else
335 | {
336 | return (dataset[dataset.size() / 2] + dataset[dataset.size() / 2 + 1]) / 2.0;
337 | }
338 | }
--------------------------------------------------------------------------------
/src/PCAFunctions.h:
--------------------------------------------------------------------------------
1 | #ifndef _PCA_FUNCTIONS_H_
2 | #define _PCA_FUNCTIONS_H_
3 | #pragma once
4 |
5 | #include
6 | #include
7 |
8 | #include "nanoflann.hpp"
9 | #include "utils.h"
10 |
11 | using namespace cv;
12 | using namespace std;
13 | using namespace nanoflann;
14 |
15 | enum PLANE_MODE { PLANE, SURFACE };
16 |
17 | struct PCAInfo
18 | {
19 | double lambda0, scale;
20 | cv::Matx31d normal, planePt;
21 | std::vector idxAll, idxIn;
22 |
23 | PCAInfo &operator =(const PCAInfo &info)
24 | {
25 | this->lambda0 = info.lambda0;
26 | this->normal = info.normal;
27 | this->idxIn = info.idxIn;
28 | this->idxAll = info.idxAll;
29 | this->scale = scale;
30 | return *this;
31 | }
32 | };
33 |
34 | class PCAFunctions
35 | {
36 | public:
37 | PCAFunctions(void) {};
38 | ~PCAFunctions(void) {};
39 |
40 | void PCA(PointCloud &cloud, int k, std::vector &pcaInfos);
41 |
42 | void RDPCA(PointCloud &cloud, int k, std::vector &pcaInfos);
43 |
44 | void PCASingle(std::vector > &pointData, PCAInfo &pcaInfo);
45 |
46 | void RDPCASingle(std::vector > &pointData, PCAInfo &pcaInfo);
47 |
48 | void MCMD_OutlierRemoval(std::vector > &pointData, PCAInfo &pcaInfo);
49 |
50 | double meadian(std::vector dataset);
51 | };
52 |
53 | #endif //_PCA_FUNCTIONS_H_
54 |
--------------------------------------------------------------------------------
/src/PointGrowAngleDis.cpp:
--------------------------------------------------------------------------------
1 | #include "PointGrowAngleDis.h"
2 | #include
3 | #include
4 | #include
5 |
6 | using namespace std;
7 |
8 | PointGrowAngleDis::PointGrowAngleDis( double theta, int Rmin )
9 | {
10 | this->theta = theta;
11 | this->Rmin = Rmin;
12 | }
13 |
14 | PointGrowAngleDis::~PointGrowAngleDis()
15 | {
16 | }
17 |
18 | void PointGrowAngleDis::setData(PointCloud &data, std::vector &infos)
19 | {
20 | this->pointData = data;
21 | this->pointNum = data.pts.size();
22 | this->pcaInfos = infos;
23 | }
24 |
25 | void PointGrowAngleDis::run( std::vector > &clusters )
26 | {
27 | double b = 1.4826;
28 |
29 | // sort the data points according to their curvature
30 | std::vector > idxSorted( this->pointNum );
31 | for ( int i=0; ipointNum; ++i )
32 | {
33 | idxSorted[i].first = i;
34 | idxSorted[i].second = pcaInfos[i].lambda0;
35 | }
36 | std::sort( idxSorted.begin(), idxSorted.end(), [](const std::pair& lhs, const std::pair& rhs) { return lhs.second < rhs.second; } );
37 |
38 | // begin region growing
39 | std::vector used( this->pointNum, 0 );
40 | for ( int i=0; ipointNum; ++i )
41 | {
42 | if ( used[i] )
43 | {
44 | continue;
45 | }
46 |
47 | if ( i % 10000 == 0 )
48 | {
49 | cout< clusterNew;
54 | clusterNew.push_back( idxSorted[i].first );
55 | cv::Matx31d normalStart = pcaInfos[idxSorted[i].first].normal;
56 |
57 | int count = 0;
58 | while( count < clusterNew.size() )
59 | {
60 | int idxSeed = clusterNew[count];
61 | int num = pcaInfos[idxSeed].idxIn.size();
62 | cv::Matx31d normalSeed = pcaInfos[idxSeed].normal;
63 |
64 | // EDth
65 | std::vector EDs( num );
66 | for ( int j=0; jpointData.pts[idxSeed].x - this->pointData.pts[idx].x;
70 | double dy = this->pointData.pts[idxSeed].y - this->pointData.pts[idx].y;
71 | double dz = this->pointData.pts[idxSeed].z - this->pointData.pts[idx].z;
72 |
73 | EDs[j] = sqrt( dx * dx + dy * dy + dz * dz );
74 | }
75 | std::sort( EDs.begin(), EDs.end(), [](const double& lhs, const double& rhs) { return lhs < rhs; } );
76 | double EDth = EDs[ EDs.size() / 2 ];
77 |
78 | // ODth
79 | cv::Matx31d h_mean( 0, 0, 0 );
80 | for( int j = 0; j < num; ++j )
81 | {
82 | int idx = pcaInfos[idxSeed].idxIn[j];
83 | h_mean += cv::Matx31d(this->pointData.pts[idx].x, this->pointData.pts[idx].y, this->pointData.pts[idx].z);
84 | }
85 | h_mean *= ( 1.0 / num );
86 |
87 | std::vector ODs( num );
88 | for( int j = 0; j < num; ++j )
89 | {
90 | int idx = pcaInfos[idxSeed].idxIn[j];
91 | cv::Matx31d pt(this->pointData.pts[idx].x, this->pointData.pts[idx].y, this->pointData.pts[idx].z);
92 | cv::Matx OD_mat = ( pt - h_mean ).t() * pcaInfos[idxSeed].normal;
93 | double OD = fabs( OD_mat.val[0] );
94 | ODs[j] = OD;
95 | }
96 |
97 | // calculate the Rz-score for all points using ODs
98 | std::vector sorted_ODs( ODs.begin(), ODs.end() );
99 | double median_OD = meadian( sorted_ODs );
100 | std::vector().swap( sorted_ODs );
101 |
102 | std::vector abs_diff_ODs( num );
103 | for( int j = 0; j < num; ++j )
104 | {
105 | abs_diff_ODs[j] = fabs( ODs[j] - median_OD );
106 | }
107 | double MAD_OD = b * meadian( abs_diff_ODs );
108 | double ODth = median_OD + 2.0 * MAD_OD;
109 |
110 | // point cloud collection
111 | for( int j = 0; j < num; ++j )
112 | {
113 | int idx = pcaInfos[idxSeed].idxIn[j];
114 | if ( used[idx] )
115 | {
116 | continue;
117 | }
118 |
119 | if ( ODs[j] < ODth && EDs[j] < EDth )
120 | {
121 | cv::Matx31d normalCur = pcaInfos[idx].normal;
122 | double angle = acos( normalCur.val[0] * normalStart.val[0]
123 | + normalCur.val[1] * normalStart.val[1]
124 | + normalCur.val[2] * normalStart.val[2] );
125 | if (angle != angle)
126 | {
127 | continue;
128 | }
129 |
130 | if ( min( angle, CV_PI -angle ) < this->theta )
131 | {
132 | clusterNew.push_back( idx );
133 | used[idx] = 1;
134 | }
135 | }
136 | }
137 |
138 | count ++;
139 | }
140 |
141 | if ( clusterNew.size() > this->Rmin )
142 | {
143 | clusters.push_back( clusterNew );
144 | }
145 | }
146 |
147 | cout<<" number of clusters : "< &dataset )
151 | {
152 | std::sort( dataset.begin(), dataset.end(), []( const double& lhs, const double& rhs ){ return lhs < rhs; } );
153 |
154 | return dataset[dataset.size()/2];
155 | }
--------------------------------------------------------------------------------
/src/PointGrowAngleDis.h:
--------------------------------------------------------------------------------
1 | // Add point-to-plane distance to constrict the angle-only point growing algorithm
2 |
3 | #ifndef _POINT_GROW_ANGLE_DIS_H_
4 | #define _POINT_GROW_ANGLE_DIS_H_
5 | #pragma once
6 |
7 | #include "PCAFunctions.h"
8 | //#include "opencv/cv.h"
9 | #include "opencv2/core.hpp"
10 |
11 |
12 | class PointGrowAngleDis
13 | {
14 | public:
15 | PointGrowAngleDis( double theta, int Rmin );
16 | ~PointGrowAngleDis();
17 |
18 | void run( std::vector > &clusters );
19 |
20 | void setData(PointCloud &data, std::vector &pcaInfos);
21 |
22 | double meadian( std::vector &dataset );
23 |
24 | private:
25 | double theta;
26 | int Rmin;
27 |
28 | int pointNum;
29 | PointCloud pointData;
30 | std::vector pcaInfos;
31 | };
32 |
33 | #endif // _POINT_GROW_ANGLE_DIS_H_
34 |
--------------------------------------------------------------------------------
/src/PointGrowAngleOnly.cpp:
--------------------------------------------------------------------------------
1 | #include "PointGrowAngleOnly.h"
2 | #include
3 | #include
4 | #include
5 |
6 | using namespace std;
7 |
8 | PointGrowAngleOnly::PointGrowAngleOnly( double theta, double percent )
9 | {
10 | this->theta = theta;
11 | this->percent = percent;
12 | }
13 |
14 | PointGrowAngleOnly::~PointGrowAngleOnly()
15 | {
16 | }
17 |
18 | void PointGrowAngleOnly::setData(PointCloud &data, std::vector &infos)
19 | {
20 | this->pointData = data;
21 | this->pointNum = data.pts.size();
22 | this->pcaInfos = infos;
23 | }
24 |
25 |
26 | void PointGrowAngleOnly::run( std::vector > &clusters )
27 | {
28 | // residual threshold
29 | std::vector > idxSorted( this->pointNum );
30 | for ( int i=0; ipointNum; ++i )
31 | {
32 | idxSorted[i].first = i;
33 | idxSorted[i].second = pcaInfos[i].lambda0;
34 | }
35 | std::sort( idxSorted.begin(), idxSorted.end(), [](const std::pair& lhs, const std::pair& rhs) { return lhs.second < rhs.second; } );
36 | double Rth = idxSorted[this->pointNum * this->percent].second;
37 |
38 | // smooth constraint region growing
39 | std::vector used( this->pointNum, 0 );
40 | for ( int i=0; ipointNum; ++i )
41 | {
42 | if ( used[i] )
43 | {
44 | continue;
45 | }
46 |
47 | if ( i % 10000 == 0 )
48 | {
49 | cout< clusterIdx;
54 | clusterIdx.push_back( idxSorted[i].first );
55 |
56 | std::vector seedIdx;
57 | seedIdx.push_back( idxSorted[i].first );
58 |
59 | int count = 0;
60 | while( count < seedIdx.size() )
61 | {
62 | int idxSeed = seedIdx[count];
63 | int num = pcaInfos[idxSeed].idxAll.size();
64 | cv::Matx31d normalSeed = pcaInfos[idxSeed].normal;
65 |
66 | // point cloud collection
67 | for( int j = 0; j < num; ++j )
68 | {
69 | int idx = pcaInfos[idxSeed].idxAll[j];
70 | if ( used[idx] )
71 | {
72 | continue;
73 | }
74 |
75 | cv::Matx31d normalCur = pcaInfos[idx].normal;
76 | double angle = acos( normalCur.val[0] * normalSeed.val[0] + normalCur.val[1] * normalSeed.val[1] + normalCur.val[2] * normalSeed.val[2] );
77 | if ( min( angle, CV_PI -angle ) < this->theta )
78 | {
79 | clusterIdx.push_back( idx );
80 | used[idx] = 1;
81 | if ( pcaInfos[idx].lambda0 < Rth )
82 | {
83 | seedIdx.push_back( idx );
84 | }
85 | }
86 | }
87 |
88 | count ++;
89 | }
90 |
91 | if ( clusterIdx.size() > 10 )
92 | {
93 | clusters.push_back( clusterIdx );
94 | }
95 | }
96 |
97 | cout<<" number of clusters : "< > &clusters );
22 |
23 | void setData(PointCloud &data, std::vector &pcaInfos);
24 |
25 | private:
26 | double theta;
27 | double percent;
28 |
29 | int pointNum;
30 | PointCloud pointData;
31 | std::vector pcaInfos;
32 | };
33 |
34 | #endif // _POINT_GROW_ANGLE_ONLY_H_
35 |
--------------------------------------------------------------------------------
/src/main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include "nanoflann.hpp"
6 | #include "utils.h"
7 | //#include "cv.h"
8 | #include "opencv2/core.hpp"
9 |
10 |
11 | #include "PointGrowAngleOnly.h"
12 | #include "PointGrowAngleDis.h"
13 | #include "ClusterGrowPLinkage.h"
14 |
15 | using namespace cv;
16 | using namespace std;
17 |
18 | void readDataFromFile(std::string filepath, PointCloud &cloud)
19 | {
20 | cloud.pts.reserve(10000000);
21 | cout << "Reading data ..." << endl;
22 |
23 | // 1. read in point data
24 | std::ifstream ptReader(filepath);
25 | std::vector lidarPoints;
26 | double x = 0, y = 0, z = 0, color = 0;
27 | double nx, ny, nz;
28 | int a = 0, b = 0, c = 0;
29 | int labelIdx = 0;
30 | int count = 0;
31 | int countTotal = 0;
32 | if (ptReader.is_open())
33 | {
34 | while (!ptReader.eof())
35 | {
36 | //ptReader >> x >> y >> z >> a >> b >> c >> labelIdx;
37 | //ptReader >> x >> y >> z >> a >> b >> c >> color;
38 | //ptReader >> x >> y >> z >> color >> a >> b >> c;
39 | ptReader >> x >> y >> z >> a >> b >> c ;
40 | //ptReader >> x >> y >> z;
41 | //ptReader >> x >> y >> z >> color;
42 | //ptReader >> x >> y >> z >> nx >> ny >> nz;
43 |
44 | cloud.pts.push_back(PointCloud::PtData(x, y, z));
45 |
46 | }
47 | ptReader.close();
48 | }
49 |
50 | std::cout << "Total num of points: " << cloud.pts.size() << "\n";
51 | }
52 |
53 |
54 | void writeOutClusters(string filePath, PointCloud &pointData, std::vector > &clusters)
55 | {
56 | std::vector colors(30);
57 | colors[4] = cv::Scalar(0, 0, 255);
58 | colors[1] = cv::Scalar(0, 255, 0);
59 | colors[2] = cv::Scalar(255, 0, 0);
60 | colors[3] = cv::Scalar(0, 0, 116);
61 | colors[0] = cv::Scalar(34, 139, 34);
62 | colors[5] = cv::Scalar(18, 153, 255);
63 | colors[6] = cv::Scalar(226, 43, 138);
64 | for (int i = 0; i<30; ++i)
65 | {
66 | int R = rand() % 255;
67 | int G = rand() % 255;
68 | int B = rand() % 255;
69 | colors[i] = cv::Scalar(B, G, R);
70 | }
71 |
72 | FILE *fp = fopen(filePath.c_str(), "w");
73 | for (int i = 0; i pointData;
96 | readDataFromFile(fileData, pointData);
97 |
98 | // step2: build kd-tree
99 | int k = 100;
100 | std::vector pcaInfos;
101 | PCAFunctions pcaer;
102 | pcaer.PCA(pointData, 100, pcaInfos);
103 |
104 | // step3: run point segmentation algorithm
105 | int algorithm = 0;
106 | std::vector> clusters;
107 |
108 | // Algorithm1: segmentation via PLinkage based clustering
109 | if (algorithm == 0)
110 | {
111 | double theta = 90.0 / 180.0 * CV_PI;
112 | PLANE_MODE planeMode = SURFACE; // PLANE SURFACE
113 | ClusterGrowPLinkage segmenter(k, theta, planeMode);
114 | segmenter.setData(pointData, pcaInfos);
115 | segmenter.run(clusters);
116 | }
117 | // Algorithm2: segmentation via normal angle similarity
118 | else if (algorithm == 1)
119 | {
120 | double theta = 5.0 / 180.0 * CV_PI;
121 | double percent = 0.75;
122 | PointGrowAngleOnly segmenter(theta, percent);
123 | segmenter.setData(pointData, pcaInfos);
124 | segmenter.run(clusters);
125 | }
126 | // Algorithm3: segmentation via normal angle similarity and point-plane distance
127 | else
128 | {
129 | double theta = 10.0 / 180.0 * CV_PI;
130 | int RMin = 10; // minimal number of points per cluster
131 | PointGrowAngleDis segmenter(theta, RMin);
132 | segmenter.setData(pointData, pcaInfos);
133 | segmenter.run(clusters);
134 | }
135 |
136 | // step4: write out result
137 | writeOutClusters(fileResult, pointData, clusters);
138 | }
139 |
--------------------------------------------------------------------------------
/src/nanoflann.hpp:
--------------------------------------------------------------------------------
1 | /***********************************************************************
2 | * Software License Agreement (BSD License)
3 | *
4 | * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
5 | * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
6 | * Copyright 2011-2016 Jose Luis Blanco (joseluisblancoc@gmail.com).
7 | * All rights reserved.
8 | *
9 | * THE BSD LICENSE
10 | *
11 | * Redistribution and use in source and binary forms, with or without
12 | * modification, are permitted provided that the following conditions
13 | * are met:
14 | *
15 | * 1. Redistributions of source code must retain the above copyright
16 | * notice, this list of conditions and the following disclaimer.
17 | * 2. Redistributions in binary form must reproduce the above copyright
18 | * notice, this list of conditions and the following disclaimer in the
19 | * documentation and/or other materials provided with the distribution.
20 | *
21 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 | *************************************************************************/
32 |
33 | /** \mainpage nanoflann C++ API documentation
34 | * nanoflann is a C++ header-only library for building KD-Trees, mostly
35 | * optimized for 2D or 3D point clouds.
36 | *
37 | * nanoflann does not require compiling or installing, just an
38 | * #include in your code.
39 | *
40 | * See:
41 | * - C++ API organized by modules
42 | * - Online README
43 | * - Doxygen documentation
44 | */
45 |
46 | #ifndef NANOFLANN_HPP_
47 | #define NANOFLANN_HPP_
48 | #pragma once
49 |
50 | #include
51 | #include
52 | #include
53 | #include
54 | #include // for fwrite()
55 | #define _USE_MATH_DEFINES // Required by MSVC to define M_PI,etc. in
56 | #include // for abs()
57 | #include // for abs()
58 | #include
59 |
60 | // Avoid conflicting declaration of min/max macros in windows headers
61 | #if !defined(NOMINMAX) && (defined(_WIN32) || defined(_WIN32_) || defined(WIN32) || defined(_WIN64))
62 | # define NOMINMAX
63 | # ifdef max
64 | # undef max
65 | # undef min
66 | # endif
67 | #endif
68 |
69 | namespace nanoflann
70 | {
71 | /** @addtogroup nanoflann_grp nanoflann C++ library for ANN
72 | * @{ */
73 |
74 | /** Library version: 0xMmP (M=Major,m=minor,P=patch) */
75 | #define NANOFLANN_VERSION 0x123
76 |
77 | /** @addtogroup result_sets_grp Result set classes
78 | * @{ */
79 | template
80 | class KNNResultSet
81 | {
82 | IndexType * indices;
83 | DistanceType* dists;
84 | CountType capacity;
85 | CountType count;
86 |
87 | public:
88 | inline KNNResultSet(CountType capacity_) : indices(0), dists(0), capacity(capacity_), count(0)
89 | {
90 | }
91 |
92 | inline void init(IndexType* indices_, DistanceType* dists_)
93 | {
94 | indices = indices_;
95 | dists = dists_;
96 | count = 0;
97 | if (capacity)
98 | dists[capacity-1] = (std::numeric_limits::max)();
99 | }
100 |
101 | inline CountType size() const
102 | {
103 | return count;
104 | }
105 |
106 | inline bool full() const
107 | {
108 | return count == capacity;
109 | }
110 |
111 |
112 | /**
113 | * Called during search to add an element matching the criteria.
114 | * @return true if the search should be continued, false if the results are sufficient
115 | */
116 | inline bool addPoint(DistanceType dist, IndexType index)
117 | {
118 | CountType i;
119 | for (i = count; i > 0; --i) {
120 | #ifdef NANOFLANN_FIRST_MATCH // If defined and two points have the same distance, the one with the lowest-index will be returned first.
121 | if ( (dists[i-1] > dist) || ((dist == dists[i-1]) && (indices[i-1] > index)) ) {
122 | #else
123 | if (dists[i-1] > dist) {
124 | #endif
125 | if (i < capacity) {
126 | dists[i] = dists[i-1];
127 | indices[i] = indices[i-1];
128 | }
129 | }
130 | else break;
131 | }
132 | if (i < capacity) {
133 | dists[i] = dist;
134 | indices[i] = index;
135 | }
136 | if (count < capacity) count++;
137 |
138 | // tell caller that the search shall continue
139 | return true;
140 | }
141 |
142 | inline DistanceType worstDist() const
143 | {
144 | return dists[capacity-1];
145 | }
146 | };
147 |
148 | /** operator "<" for std::sort() */
149 | struct IndexDist_Sorter
150 | {
151 | /** PairType will be typically: std::pair */
152 | template
153 | inline bool operator()(const PairType &p1, const PairType &p2) const {
154 | return p1.second < p2.second;
155 | }
156 | };
157 |
158 | /**
159 | * A result-set class used when performing a radius based search.
160 | */
161 | template
162 | class RadiusResultSet
163 | {
164 | public:
165 | const DistanceType radius;
166 |
167 | std::vector > &m_indices_dists;
168 |
169 | inline RadiusResultSet(DistanceType radius_, std::vector > &indices_dists) : radius(radius_), m_indices_dists(indices_dists)
170 | {
171 | init();
172 | }
173 |
174 | inline void init() { clear(); }
175 | inline void clear() { m_indices_dists.clear(); }
176 |
177 | inline size_t size() const { return m_indices_dists.size(); }
178 |
179 | inline bool full() const { return true; }
180 |
181 | /**
182 | * Called during search to add an element matching the criteria.
183 | * @return true if the search should be continued, false if the results are sufficient
184 | */
185 | inline bool addPoint(DistanceType dist, IndexType index)
186 | {
187 | if (dist < radius)
188 | m_indices_dists.push_back(std::make_pair(index, dist));
189 | return true;
190 | }
191 |
192 | inline DistanceType worstDist() const { return radius; }
193 |
194 | /**
195 | * Find the worst result (furtherest neighbor) without copying or sorting
196 | * Pre-conditions: size() > 0
197 | */
198 | std::pair worst_item() const
199 | {
200 | if (m_indices_dists.empty()) throw std::runtime_error("Cannot invoke RadiusResultSet::worst_item() on an empty list of results.");
201 | typedef typename std::vector >::const_iterator DistIt;
202 | DistIt it = std::max_element(m_indices_dists.begin(), m_indices_dists.end(), IndexDist_Sorter());
203 | return *it;
204 | }
205 | };
206 |
207 |
208 | /** @} */
209 |
210 |
211 | /** @addtogroup loadsave_grp Load/save auxiliary functions
212 | * @{ */
213 | template
214 | void save_value(FILE* stream, const T& value, size_t count = 1)
215 | {
216 | fwrite(&value, sizeof(value), count, stream);
217 | }
218 |
219 | template
220 | void save_value(FILE* stream, const std::vector& value)
221 | {
222 | size_t size = value.size();
223 | fwrite(&size, sizeof(size_t), 1, stream);
224 | fwrite(&value[0], sizeof(T), size, stream);
225 | }
226 |
227 | template
228 | void load_value(FILE* stream, T& value, size_t count = 1)
229 | {
230 | size_t read_cnt = fread(&value, sizeof(value), count, stream);
231 | if (read_cnt != count) {
232 | throw std::runtime_error("Cannot read from file");
233 | }
234 | }
235 |
236 |
237 | template
238 | void load_value(FILE* stream, std::vector& value)
239 | {
240 | size_t size;
241 | size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);
242 | if (read_cnt != 1) {
243 | throw std::runtime_error("Cannot read from file");
244 | }
245 | value.resize(size);
246 | read_cnt = fread(&value[0], sizeof(T), size, stream);
247 | if (read_cnt != size) {
248 | throw std::runtime_error("Cannot read from file");
249 | }
250 | }
251 | /** @} */
252 |
253 |
254 | /** @addtogroup metric_grp Metric (distance) classes
255 | * @{ */
256 |
257 | struct Metric
258 | {
259 | };
260 |
261 | /** Manhattan distance functor (generic version, optimized for high-dimensionality data sets).
262 | * Corresponding distance traits: nanoflann::metric_L1
263 | * \tparam T Type of the elements (e.g. double, float, uint8_t)
264 | * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
265 | */
266 | template
267 | struct L1_Adaptor
268 | {
269 | typedef T ElementType;
270 | typedef _DistanceType DistanceType;
271 |
272 | const DataSource &data_source;
273 |
274 | L1_Adaptor(const DataSource &_data_source) : data_source(_data_source) { }
275 |
276 | inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const
277 | {
278 | DistanceType result = DistanceType();
279 | const T* last = a + size;
280 | const T* lastgroup = last - 3;
281 | size_t d = 0;
282 |
283 | /* Process 4 items with each loop for efficiency. */
284 | while (a < lastgroup) {
285 | const DistanceType diff0 = std::abs(a[0] - data_source.kdtree_get_pt(b_idx,d++));
286 | const DistanceType diff1 = std::abs(a[1] - data_source.kdtree_get_pt(b_idx,d++));
287 | const DistanceType diff2 = std::abs(a[2] - data_source.kdtree_get_pt(b_idx,d++));
288 | const DistanceType diff3 = std::abs(a[3] - data_source.kdtree_get_pt(b_idx,d++));
289 | result += diff0 + diff1 + diff2 + diff3;
290 | a += 4;
291 | if ((worst_dist > 0) && (result > worst_dist)) {
292 | return result;
293 | }
294 | }
295 | /* Process last 0-3 components. Not needed for standard vector lengths. */
296 | while (a < last) {
297 | result += std::abs( *a++ - data_source.kdtree_get_pt(b_idx, d++) );
298 | }
299 | return result;
300 | }
301 |
302 | template
303 | inline DistanceType accum_dist(const U a, const V b, int ) const
304 | {
305 | return std::abs(a-b);
306 | }
307 | };
308 |
309 | /** Squared Euclidean distance functor (generic version, optimized for high-dimensionality data sets).
310 | * Corresponding distance traits: nanoflann::metric_L2
311 | * \tparam T Type of the elements (e.g. double, float, uint8_t)
312 | * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
313 | */
314 | template
315 | struct L2_Adaptor
316 | {
317 | typedef T ElementType;
318 | typedef _DistanceType DistanceType;
319 |
320 | const DataSource &data_source;
321 |
322 | L2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { }
323 |
324 | inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size, DistanceType worst_dist = -1) const
325 | {
326 | DistanceType result = DistanceType();
327 | const T* last = a + size;
328 | const T* lastgroup = last - 3;
329 | size_t d = 0;
330 |
331 | /* Process 4 items with each loop for efficiency. */
332 | while (a < lastgroup) {
333 | const DistanceType diff0 = a[0] - data_source.kdtree_get_pt(b_idx,d++);
334 | const DistanceType diff1 = a[1] - data_source.kdtree_get_pt(b_idx,d++);
335 | const DistanceType diff2 = a[2] - data_source.kdtree_get_pt(b_idx,d++);
336 | const DistanceType diff3 = a[3] - data_source.kdtree_get_pt(b_idx,d++);
337 | result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
338 | a += 4;
339 | if ((worst_dist > 0) && (result > worst_dist)) {
340 | return result;
341 | }
342 | }
343 | /* Process last 0-3 components. Not needed for standard vector lengths. */
344 | while (a < last) {
345 | const DistanceType diff0 = *a++ - data_source.kdtree_get_pt(b_idx, d++);
346 | result += diff0 * diff0;
347 | }
348 | return result;
349 | }
350 |
351 | template
352 | inline DistanceType accum_dist(const U a, const V b, int ) const
353 | {
354 | return (a - b) * (a - b);
355 | }
356 | };
357 |
358 | /** Squared Euclidean (L2) distance functor (suitable for low-dimensionality datasets, like 2D or 3D point clouds)
359 | * Corresponding distance traits: nanoflann::metric_L2_Simple
360 | * \tparam T Type of the elements (e.g. double, float, uint8_t)
361 | * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double, int64_t)
362 | */
363 | template
364 | struct L2_Simple_Adaptor
365 | {
366 | typedef T ElementType;
367 | typedef _DistanceType DistanceType;
368 |
369 | const DataSource &data_source;
370 |
371 | L2_Simple_Adaptor(const DataSource &_data_source) : data_source(_data_source) { }
372 |
373 | inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const {
374 | DistanceType result = DistanceType();
375 | for (size_t i = 0; i < size; ++i) {
376 | const DistanceType diff = a[i] - data_source.kdtree_get_pt(b_idx, i);
377 | result += diff * diff;
378 | }
379 | return result;
380 | }
381 |
382 | template
383 | inline DistanceType accum_dist(const U a, const V b, int ) const
384 | {
385 | return (a - b) * (a - b);
386 | }
387 | };
388 |
389 | /** SO2 distance functor
390 | * Corresponding distance traits: nanoflann::metric_SO2
391 | * \tparam T Type of the elements (e.g. double, float)
392 | * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double)
393 | * orientation is constrained to be in [-pi, pi]
394 | */
395 | template
396 | struct SO2_Adaptor
397 | {
398 | typedef T ElementType;
399 | typedef _DistanceType DistanceType;
400 |
401 | const DataSource &data_source;
402 |
403 | SO2_Adaptor(const DataSource &_data_source) : data_source(_data_source) { }
404 |
405 | inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const {
406 | return accum_dist(a[size-1], data_source.kdtree_get_pt(b_idx, size - 1) , size - 1);
407 | }
408 |
409 | template
410 | inline DistanceType accum_dist(const U a, const V b, int ) const
411 | {
412 | DistanceType result = DistanceType();
413 | result = b - a;
414 | if (result > M_PI)
415 | result -= 2. * M_PI;
416 | else if (result < -M_PI)
417 | result += 2. * M_PI;
418 | return result;
419 | }
420 | };
421 |
422 | /** SO3 distance functor (Uses L2_Simple)
423 | * Corresponding distance traits: nanoflann::metric_SO3
424 | * \tparam T Type of the elements (e.g. double, float)
425 | * \tparam _DistanceType Type of distance variables (must be signed) (e.g. float, double)
426 | */
427 | template
428 | struct SO3_Adaptor
429 | {
430 | typedef T ElementType;
431 | typedef _DistanceType DistanceType;
432 |
433 | L2_Simple_Adaptor distance_L2_Simple;
434 |
435 | SO3_Adaptor(const DataSource &_data_source) : distance_L2_Simple(_data_source) { }
436 |
437 | inline DistanceType evalMetric(const T* a, const size_t b_idx, size_t size) const {
438 | return distance_L2_Simple.evalMetric(a, b_idx, size);
439 | }
440 |
441 | template
442 | inline DistanceType accum_dist(const U a, const V b, int idx) const
443 | {
444 | return distance_L2_Simple.accum_dist(a, b, idx);
445 | }
446 | };
447 |
448 | /** Metaprogramming helper traits class for the L1 (Manhattan) metric */
449 | struct metric_L1 : public Metric
450 | {
451 | template
452 | struct traits {
453 | typedef L1_Adaptor distance_t;
454 | };
455 | };
456 | /** Metaprogramming helper traits class for the L2 (Euclidean) metric */
457 | struct metric_L2 : public Metric
458 | {
459 | template
460 | struct traits {
461 | typedef L2_Adaptor distance_t;
462 | };
463 | };
464 | /** Metaprogramming helper traits class for the L2_simple (Euclidean) metric */
465 | struct metric_L2_Simple : public Metric
466 | {
467 | template
468 | struct traits {
469 | typedef L2_Simple_Adaptor distance_t;
470 | };
471 | };
472 | /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */
473 | struct metric_SO2 : public Metric
474 | {
475 | template
476 | struct traits {
477 | typedef SO2_Adaptor distance_t;
478 | };
479 | };
480 | /** Metaprogramming helper traits class for the SO3_InnerProdQuat metric */
481 | struct metric_SO3 : public Metric
482 | {
483 | template
484 | struct traits {
485 | typedef SO3_Adaptor distance_t;
486 | };
487 | };
488 |
489 | /** @} */
490 |
491 | /** @addtogroup param_grp Parameter structs
492 | * @{ */
493 |
494 | /** Parameters (see README.md) */
495 | struct KDTreeSingleIndexAdaptorParams
496 | {
497 | KDTreeSingleIndexAdaptorParams(size_t _leaf_max_size = 10) :
498 | leaf_max_size(_leaf_max_size)
499 | {}
500 |
501 | size_t leaf_max_size;
502 | };
503 |
504 | /** Search options for KDTreeSingleIndexAdaptor::findNeighbors() */
505 | struct SearchParams
506 | {
507 | /** Note: The first argument (checks_IGNORED_) is ignored, but kept for compatibility with the FLANN interface */
508 | SearchParams(int checks_IGNORED_ = 32, float eps_ = 0, bool sorted_ = true ) :
509 | checks(checks_IGNORED_), eps(eps_), sorted(sorted_) {}
510 |
511 | int checks; //!< Ignored parameter (Kept for compatibility with the FLANN interface).
512 | float eps; //!< search for eps-approximate neighbours (default: 0)
513 | bool sorted; //!< only for radius search, require neighbours sorted by distance (default: true)
514 | };
515 | /** @} */
516 |
517 |
518 | /** @addtogroup memalloc_grp Memory allocation
519 | * @{ */
520 |
521 | /**
522 | * Allocates (using C's malloc) a generic type T.
523 | *
524 | * Params:
525 | * count = number of instances to allocate.
526 | * Returns: pointer (of type T*) to memory buffer
527 | */
528 | template
529 | inline T* allocate(size_t count = 1)
530 | {
531 | T* mem = static_cast( ::malloc(sizeof(T)*count));
532 | return mem;
533 | }
534 |
535 |
536 | /**
537 | * Pooled storage allocator
538 | *
539 | * The following routines allow for the efficient allocation of storage in
540 | * small chunks from a specified pool. Rather than allowing each structure
541 | * to be freed individually, an entire pool of storage is freed at once.
542 | * This method has two advantages over just using malloc() and free(). First,
543 | * it is far more efficient for allocating small objects, as there is
544 | * no overhead for remembering all the information needed to free each
545 | * object or consolidating fragmented memory. Second, the decision about
546 | * how long to keep an object is made at the time of allocation, and there
547 | * is no need to track down all the objects to free them.
548 | *
549 | */
550 |
551 | const size_t WORDSIZE = 16;
552 | const size_t BLOCKSIZE = 8192;
553 |
554 | class PooledAllocator
555 | {
556 | /* We maintain memory alignment to word boundaries by requiring that all
557 | allocations be in multiples of the machine wordsize. */
558 | /* Size of machine word in bytes. Must be power of 2. */
559 | /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */
560 |
561 |
562 | size_t remaining; /* Number of bytes left in current block of storage. */
563 | void* base; /* Pointer to base of current block of storage. */
564 | void* loc; /* Current location in block to next allocate memory. */
565 |
566 | void internal_init()
567 | {
568 | remaining = 0;
569 | base = NULL;
570 | usedMemory = 0;
571 | wastedMemory = 0;
572 | }
573 |
574 | public:
575 | size_t usedMemory;
576 | size_t wastedMemory;
577 |
578 | /**
579 | Default constructor. Initializes a new pool.
580 | */
581 | PooledAllocator() {
582 | internal_init();
583 | }
584 |
585 | /**
586 | * Destructor. Frees all the memory allocated in this pool.
587 | */
588 | ~PooledAllocator() {
589 | free_all();
590 | }
591 |
592 | /** Frees all allocated memory chunks */
593 | void free_all()
594 | {
595 | while (base != NULL) {
596 | void *prev = *(static_cast( base)); /* Get pointer to prev block. */
597 | ::free(base);
598 | base = prev;
599 | }
600 | internal_init();
601 | }
602 |
603 | /**
604 | * Returns a pointer to a piece of new memory of the given size in bytes
605 | * allocated from the pool.
606 | */
607 | void* malloc(const size_t req_size)
608 | {
609 | /* Round size up to a multiple of wordsize. The following expression
610 | only works for WORDSIZE that is a power of 2, by masking last bits of
611 | incremented size to zero.
612 | */
613 | const size_t size = (req_size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);
614 |
615 | /* Check whether a new block must be allocated. Note that the first word
616 | of a block is reserved for a pointer to the previous block.
617 | */
618 | if (size > remaining) {
619 |
620 | wastedMemory += remaining;
621 |
622 | /* Allocate new storage. */
623 | const size_t blocksize = (size + sizeof(void*) + (WORDSIZE - 1) > BLOCKSIZE) ?
624 | size + sizeof(void*) + (WORDSIZE - 1) : BLOCKSIZE;
625 |
626 | // use the standard C malloc to allocate memory
627 | void* m = ::malloc(blocksize);
628 | if (!m) {
629 | fprintf(stderr, "Failed to allocate memory.\n");
630 | return NULL;
631 | }
632 |
633 | /* Fill first word of new block with pointer to previous block. */
634 | static_cast(m)[0] = base;
635 | base = m;
636 |
637 | size_t shift = 0;
638 | //int size_t = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
639 |
640 | remaining = blocksize - sizeof(void*) - shift;
641 | loc = (static_cast(m) + sizeof(void*) + shift);
642 | }
643 | void* rloc = loc;
644 | loc = static_cast(loc) + size;
645 | remaining -= size;
646 |
647 | usedMemory += size;
648 |
649 | return rloc;
650 | }
651 |
652 | /**
653 | * Allocates (using this pool) a generic type T.
654 | *
655 | * Params:
656 | * count = number of instances to allocate.
657 | * Returns: pointer (of type T*) to memory buffer
658 | */
659 | template
660 | T* allocate(const size_t count = 1)
661 | {
662 | T* mem = static_cast(this->malloc(sizeof(T)*count));
663 | return mem;
664 | }
665 |
666 | };
667 | /** @} */
668 |
669 | /** @addtogroup nanoflann_metaprog_grp Auxiliary metaprogramming stuff
670 | * @{ */
671 |
672 | // ---------------- CArray -------------------------
673 | /** A STL container (as wrapper) for arrays of constant size defined at compile time (class imported from the MRPT project)
674 | * This code is an adapted version from Boost, modifed for its integration
675 | * within MRPT (JLBC, Dec/2009) (Renamed array -> CArray to avoid possible potential conflicts).
676 | * See
677 | * http://www.josuttis.com/cppcode
678 | * for details and the latest version.
679 | * See
680 | * http://www.boost.org/libs/array for Documentation.
681 | * for documentation.
682 | *
683 | * (C) Copyright Nicolai M. Josuttis 2001.
684 | * Permission to copy, use, modify, sell and distribute this software
685 | * is granted provided this copyright notice appears in all copies.
686 | * This software is provided "as is" without express or implied
687 | * warranty, and with no claim as to its suitability for any purpose.
688 | *
689 | * 29 Jan 2004 - minor fixes (Nico Josuttis)
690 | * 04 Dec 2003 - update to synch with library TR1 (Alisdair Meredith)
691 | * 23 Aug 2002 - fix for Non-MSVC compilers combined with MSVC libraries.
692 | * 05 Aug 2001 - minor update (Nico Josuttis)
693 | * 20 Jan 2001 - STLport fix (Beman Dawes)
694 | * 29 Sep 2000 - Initial Revision (Nico Josuttis)
695 | *
696 | * Jan 30, 2004
697 | */
698 | template
699 | class CArray {
700 | public:
701 | T elems[N]; // fixed-size array of elements of type T
702 |
703 | public:
704 | // type definitions
705 | typedef T value_type;
706 | typedef T* iterator;
707 | typedef const T* const_iterator;
708 | typedef T& reference;
709 | typedef const T& const_reference;
710 | typedef std::size_t size_type;
711 | typedef std::ptrdiff_t difference_type;
712 |
713 | // iterator support
714 | inline iterator begin() { return elems; }
715 | inline const_iterator begin() const { return elems; }
716 | inline iterator end() { return elems+N; }
717 | inline const_iterator end() const { return elems+N; }
718 |
719 | // reverse iterator support
720 | #if !defined(BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION) && !defined(BOOST_MSVC_STD_ITERATOR) && !defined(BOOST_NO_STD_ITERATOR_TRAITS)
721 | typedef std::reverse_iterator reverse_iterator;
722 | typedef std::reverse_iterator const_reverse_iterator;
723 | #elif defined(_MSC_VER) && (_MSC_VER == 1300) && defined(BOOST_DINKUMWARE_STDLIB) && (BOOST_DINKUMWARE_STDLIB == 310)
724 | // workaround for broken reverse_iterator in VC7
725 | typedef std::reverse_iterator > reverse_iterator;
727 | typedef std::reverse_iterator > const_reverse_iterator;
729 | #else
730 | // workaround for broken reverse_iterator implementations
731 | typedef std::reverse_iterator reverse_iterator;
732 | typedef std::reverse_iterator const_reverse_iterator;
733 | #endif
734 |
735 | reverse_iterator rbegin() { return reverse_iterator(end()); }
736 | const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
737 | reverse_iterator rend() { return reverse_iterator(begin()); }
738 | const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
739 | // operator[]
740 | inline reference operator[](size_type i) { return elems[i]; }
741 | inline const_reference operator[](size_type i) const { return elems[i]; }
742 | // at() with range check
743 | reference at(size_type i) { rangecheck(i); return elems[i]; }
744 | const_reference at(size_type i) const { rangecheck(i); return elems[i]; }
745 | // front() and back()
746 | reference front() { return elems[0]; }
747 | const_reference front() const { return elems[0]; }
748 | reference back() { return elems[N-1]; }
749 | const_reference back() const { return elems[N-1]; }
750 | // size is constant
751 | static inline size_type size() { return N; }
752 | static bool empty() { return false; }
753 | static size_type max_size() { return N; }
754 | enum { static_size = N };
755 | /** This method has no effects in this class, but raises an exception if the expected size does not match */
756 | inline void resize(const size_t nElements) { if (nElements!=N) throw std::logic_error("Try to change the size of a CArray."); }
757 | // swap (note: linear complexity in N, constant for given instantiation)
758 | void swap (CArray& y) { std::swap_ranges(begin(),end(),y.begin()); }
759 | // direct access to data (read-only)
760 | const T* data() const { return elems; }
761 | // use array as C array (direct read/write access to data)
762 | T* data() { return elems; }
763 | // assignment with type conversion
764 | template CArray& operator= (const CArray& rhs) {
765 | std::copy(rhs.begin(),rhs.end(), begin());
766 | return *this;
767 | }
768 | // assign one value to all elements
769 | inline void assign (const T& value) { for (size_t i=0;i= size()) { throw std::out_of_range("CArray<>: index out of range"); } }
775 | }; // end of CArray
776 |
777 | /** Used to declare fixed-size arrays when DIM>0, dynamically-allocated vectors when DIM=-1.
778 | * Fixed size version for a generic DIM:
779 | */
780 | template
781 | struct array_or_vector_selector
782 | {
783 | typedef CArray container_t;
784 | };
785 | /** Dynamic size version */
786 | template
787 | struct array_or_vector_selector<-1, T> {
788 | typedef std::vector container_t;
789 | };
790 |
791 | /** @} */
792 |
793 | /** kd-tree base-class
794 | *
795 | * Contains the member functions common to the classes KDTreeSingleIndexAdaptor and KDTreeSingleIndexDynamicAdaptor_.
796 | *
797 | * \tparam Derived The name of the class which inherits this class.
798 | * \tparam DatasetAdaptor The user-provided adaptor (see comments above).
799 | * \tparam Distance The distance metric to use, these are all classes derived from nanoflann::Metric
800 | * \tparam DIM Dimensionality of data points (e.g. 3 for 3D points)
801 | * \tparam IndexType Will be typically size_t or int
802 | */
803 |
804 | template
805 | class KDTreeBaseClass
806 | {
807 |
808 | public:
809 | /** Frees the previously-built index. Automatically called within buildIndex(). */
810 | void freeIndex(Derived &obj)
811 | {
812 | obj.pool.free_all();
813 | obj.root_node = NULL;
814 | obj.m_size_at_index_build = 0;
815 | }
816 |
817 | typedef typename Distance::ElementType ElementType;
818 | typedef typename Distance::DistanceType DistanceType;
819 |
820 | /*--------------------- Internal Data Structures --------------------------*/
821 | struct Node
822 | {
823 | /** Union used because a node can be either a LEAF node or a non-leaf node, so both data fields are never used simultaneously */
824 | union {
825 | struct leaf
826 | {
827 | IndexType left, right; //!< Indices of points in leaf node
828 | } lr;
829 | struct nonleaf
830 | {
831 | int divfeat; //!< Dimension used for subdivision.
832 | DistanceType divlow, divhigh; //!< The values used for subdivision.
833 | } sub;
834 | } node_type;
835 | Node *child1, *child2; //!< Child nodes (both=NULL mean its a leaf node)
836 | };
837 |
838 | typedef Node* NodePtr;
839 |
840 | struct Interval
841 | {
842 | ElementType low, high;
843 | };
844 |
845 | /**
846 | * Array of indices to vectors in the dataset.
847 | */
848 | std::vector vind;
849 |
850 | NodePtr root_node;
851 |
852 | size_t m_leaf_max_size;
853 |
854 | size_t m_size; //!< Number of current points in the dataset
855 | size_t m_size_at_index_build; //!< Number of points in the dataset when the index was built
856 | int dim; //!< Dimensionality of each data point
857 |
858 | /** Define "BoundingBox" as a fixed-size or variable-size container depending on "DIM" */
859 | typedef typename array_or_vector_selector::container_t BoundingBox;
860 |
861 | /** Define "distance_vector_t" as a fixed-size or variable-size container depending on "DIM" */
862 | typedef typename array_or_vector_selector::container_t distance_vector_t;
863 |
864 | /** The KD-tree used to find neighbours */
865 |
866 | BoundingBox root_bbox;
867 |
868 | /**
869 | * Pooled memory allocator.
870 | *
871 | * Using a pooled memory allocator is more efficient
872 | * than allocating memory directly when there is a large
873 | * number small of memory allocations.
874 | */
875 | PooledAllocator pool;
876 |
877 | /** Returns number of points in dataset */
878 | size_t size(const Derived &obj) const { return obj.m_size; }
879 |
880 | /** Returns the length of each point in the dataset */
881 | size_t veclen(const Derived &obj) {
882 | return static_cast(DIM>0 ? DIM : obj.dim);
883 | }
884 |
885 | /// Helper accessor to the dataset points:
886 | inline ElementType dataset_get(const Derived &obj, size_t idx, int component) const{
887 | return obj.dataset.kdtree_get_pt(idx, component);
888 | }
889 |
890 | /**
891 | * Computes the inde memory usage
892 | * Returns: memory used by the index
893 | */
894 | size_t usedMemory(Derived &obj)
895 | {
896 | return obj.pool.usedMemory + obj.pool.wastedMemory + obj.dataset.kdtree_get_point_count() * sizeof(IndexType); // pool memory and vind array memory
897 | }
898 |
899 | void computeMinMax(const Derived &obj, IndexType* ind, IndexType count, int element, ElementType& min_elem, ElementType& max_elem)
900 | {
901 | min_elem = dataset_get(obj, ind[0],element);
902 | max_elem = dataset_get(obj, ind[0],element);
903 | for (IndexType i = 1; i < count; ++i) {
904 | ElementType val = dataset_get(obj, ind[i], element);
905 | if (val < min_elem) min_elem = val;
906 | if (val > max_elem) max_elem = val;
907 | }
908 | }
909 |
910 | /**
911 | * Create a tree node that subdivides the list of vecs from vind[first]
912 | * to vind[last]. The routine is called recursively on each sublist.
913 | *
914 | * @param left index of the first vector
915 | * @param right index of the last vector
916 | */
917 | NodePtr divideTree(Derived &obj, const IndexType left, const IndexType right, BoundingBox& bbox)
918 | {
919 | NodePtr node = obj.pool.template allocate(); // allocate memory
920 |
921 | /* If too few exemplars remain, then make this a leaf node. */
922 | if ( (right - left) <= static_cast(obj.m_leaf_max_size) ) {
923 | node->child1 = node->child2 = NULL; /* Mark as leaf node. */
924 | node->node_type.lr.left = left;
925 | node->node_type.lr.right = right;
926 |
927 | // compute bounding-box of leaf points
928 | for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
929 | bbox[i].low = dataset_get(obj, obj.vind[left], i);
930 | bbox[i].high = dataset_get(obj, obj.vind[left], i);
931 | }
932 | for (IndexType k = left + 1; k < right; ++k) {
933 | for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
934 | if (bbox[i].low > dataset_get(obj, obj.vind[k], i)) bbox[i].low = dataset_get(obj, obj.vind[k], i);
935 | if (bbox[i].high < dataset_get(obj, obj.vind[k], i)) bbox[i].high = dataset_get(obj, obj.vind[k], i);
936 | }
937 | }
938 | }
939 | else {
940 | IndexType idx;
941 | int cutfeat;
942 | DistanceType cutval;
943 | middleSplit_(obj, &obj.vind[0] + left, right - left, idx, cutfeat, cutval, bbox);
944 |
945 | node->node_type.sub.divfeat = cutfeat;
946 |
947 | BoundingBox left_bbox(bbox);
948 | left_bbox[cutfeat].high = cutval;
949 | node->child1 = divideTree(obj, left, left + idx, left_bbox);
950 |
951 | BoundingBox right_bbox(bbox);
952 | right_bbox[cutfeat].low = cutval;
953 | node->child2 = divideTree(obj, left + idx, right, right_bbox);
954 |
955 | node->node_type.sub.divlow = left_bbox[cutfeat].high;
956 | node->node_type.sub.divhigh = right_bbox[cutfeat].low;
957 |
958 | for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
959 | bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);
960 | bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);
961 | }
962 | }
963 |
964 | return node;
965 | }
966 |
967 | void middleSplit_(Derived &obj, IndexType* ind, IndexType count, IndexType& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
968 | {
969 | const DistanceType EPS = static_cast(0.00001);
970 | ElementType max_span = bbox[0].high-bbox[0].low;
971 | for (int i = 1; i < (DIM > 0 ? DIM : obj.dim); ++i) {
972 | ElementType span = bbox[i].high - bbox[i].low;
973 | if (span > max_span) {
974 | max_span = span;
975 | }
976 | }
977 | ElementType max_spread = -1;
978 | cutfeat = 0;
979 | for (int i = 0; i < (DIM > 0 ? DIM : obj.dim); ++i) {
980 | ElementType span = bbox[i].high-bbox[i].low;
981 | if (span > (1 - EPS) * max_span) {
982 | ElementType min_elem, max_elem;
983 | computeMinMax(obj, ind, count, i, min_elem, max_elem);
984 | ElementType spread = max_elem - min_elem;;
985 | if (spread > max_spread) {
986 | cutfeat = i;
987 | max_spread = spread;
988 | }
989 | }
990 | }
991 | // split in the middle
992 | DistanceType split_val = (bbox[cutfeat].low + bbox[cutfeat].high) / 2;
993 | ElementType min_elem, max_elem;
994 | computeMinMax(obj, ind, count, cutfeat, min_elem, max_elem);
995 |
996 | if (split_val < min_elem) cutval = min_elem;
997 | else if (split_val > max_elem) cutval = max_elem;
998 | else cutval = split_val;
999 |
1000 | IndexType lim1, lim2;
1001 | planeSplit(obj, ind, count, cutfeat, cutval, lim1, lim2);
1002 |
1003 | if (lim1 > count / 2) index = lim1;
1004 | else if (lim2 < count / 2) index = lim2;
1005 | else index = count/2;
1006 | }
1007 |
1008 | /**
1009 | * Subdivide the list of points by a plane perpendicular on axe corresponding
1010 | * to the 'cutfeat' dimension at 'cutval' position.
1011 | *
1012 | * On return:
1013 | * dataset[ind[0..lim1-1]][cutfeat]cutval
1016 | */
1017 | void planeSplit(Derived &obj, IndexType* ind, const IndexType count, int cutfeat, DistanceType &cutval, IndexType& lim1, IndexType& lim2)
1018 | {
1019 | /* Move vector indices for left subtree to front of list. */
1020 | IndexType left = 0;
1021 | IndexType right = count-1;
1022 | for (;; ) {
1023 | while (left <= right && dataset_get(obj, ind[left], cutfeat) < cutval) ++left;
1024 | while (right && left <= right && dataset_get(obj, ind[right], cutfeat) >= cutval) --right;
1025 | if (left > right || !right) break; // "!right" was added to support unsigned Index types
1026 | std::swap(ind[left], ind[right]);
1027 | ++left;
1028 | --right;
1029 | }
1030 | /* If either list is empty, it means that all remaining features
1031 | * are identical. Split in the middle to maintain a balanced tree.
1032 | */
1033 | lim1 = left;
1034 | right = count-1;
1035 | for (;; ) {
1036 | while (left <= right && dataset_get(obj, ind[left], cutfeat) <= cutval) ++left;
1037 | while (right && left <= right && dataset_get(obj, ind[right], cutfeat) > cutval) --right;
1038 | if (left > right || !right) break; // "!right" was added to support unsigned Index types
1039 | std::swap(ind[left], ind[right]);
1040 | ++left;
1041 | --right;
1042 | }
1043 | lim2 = left;
1044 | }
1045 |
1046 | DistanceType computeInitialDistances(const Derived &obj, const ElementType* vec, distance_vector_t& dists) const
1047 | {
1048 | assert(vec);
1049 | DistanceType distsq = DistanceType();
1050 |
1051 | for (int i = 0; i < (DIM>0 ? DIM : obj.dim); ++i) {
1052 | if (vec[i] < obj.root_bbox[i].low) {
1053 | dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].low, i);
1054 | distsq += dists[i];
1055 | }
1056 | if (vec[i] > obj.root_bbox[i].high) {
1057 | dists[i] = obj.distance.accum_dist(vec[i], obj.root_bbox[i].high, i);
1058 | distsq += dists[i];
1059 | }
1060 | }
1061 | return distsq;
1062 | }
1063 |
1064 | void save_tree(Derived &obj, FILE* stream, NodePtr tree)
1065 | {
1066 | save_value(stream, *tree);
1067 | if (tree->child1 != NULL) {
1068 | save_tree(obj, stream, tree->child1);
1069 | }
1070 | if (tree->child2 != NULL) {
1071 | save_tree(obj, stream, tree->child2);
1072 | }
1073 | }
1074 |
1075 |
1076 | void load_tree(Derived &obj, FILE* stream, NodePtr& tree)
1077 | {
1078 | tree = obj.pool.template allocate();
1079 | load_value(stream, *tree);
1080 | if (tree->child1 != NULL) {
1081 | load_tree(obj, stream, tree->child1);
1082 | }
1083 | if (tree->child2 != NULL) {
1084 | load_tree(obj, stream, tree->child2);
1085 | }
1086 | }
1087 |
1088 | /** Stores the index in a binary file.
1089 | * IMPORTANT NOTE: The set of data points is NOT stored in the file, so when loading the index object it must be constructed associated to the same source of data points used while building it.
1090 | * See the example: examples/saveload_example.cpp
1091 | * \sa loadIndex */
1092 | void saveIndex_(Derived &obj, FILE* stream)
1093 | {
1094 | save_value(stream, obj.m_size);
1095 | save_value(stream, obj.dim);
1096 | save_value(stream, obj.root_bbox);
1097 | save_value(stream, obj.m_leaf_max_size);
1098 | save_value(stream, obj.vind);
1099 | save_tree(obj, stream, obj.root_node);
1100 | }
1101 |
1102 | /** Loads a previous index from a binary file.
1103 | * IMPORTANT NOTE: The set of data points is NOT stored in the file, so the index object must be constructed associated to the same source of data points used while building the index.
1104 | * See the example: examples/saveload_example.cpp
1105 | * \sa loadIndex */
1106 | void loadIndex_(Derived &obj, FILE* stream)
1107 | {
1108 | load_value(stream, obj.m_size);
1109 | load_value(stream, obj.dim);
1110 | load_value(stream, obj.root_bbox);
1111 | load_value(stream, obj.m_leaf_max_size);
1112 | load_value(stream, obj.vind);
1113 | load_tree(obj, stream, obj.root_node);
1114 | }
1115 |
1116 | };
1117 |
1118 |
1119 | /** @addtogroup kdtrees_grp KD-tree classes and adaptors
1120 | * @{ */
1121 |
1122 | /** kd-tree static index
1123 | *
1124 | * Contains the k-d trees and other information for indexing a set of points
1125 | * for nearest-neighbor matching.
1126 | *
1127 | * The class "DatasetAdaptor" must provide the following interface (can be non-virtual, inlined methods):
1128 | *
1129 | * \code
1130 | * // Must return the number of data poins
1131 | * inline size_t kdtree_get_point_count() const { ... }
1132 | *
1133 | *
1134 | * // Must return the dim'th component of the idx'th point in the class:
1135 | * inline T kdtree_get_pt(const size_t idx, int dim) const { ... }
1136 | *
1137 | * // Optional bounding-box computation: return false to default to a standard bbox computation loop.
1138 | * // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again.
1139 | * // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds)
1140 | * template