├── Point2D.cpp
├── MyRANSAC.cpp
├── MyRANSAC.h
├── RANSAC.opt
├── RANSAC.vcxproj.user
├── RANSAC.plg
├── Point2D.h
├── RANSAC.dsw
├── RANSAC.sln
├── RANSAC.vcxproj.filters
├── ParameterEsitmator.h
├── LineParamEstimator.h
├── README.md
├── LineParamEstimator.cpp
├── RansacExample.cpp
├── RANSAC.dsp
├── RANSAC.vcxproj
└── Ransac.h
/Point2D.cpp:
--------------------------------------------------------------------------------
1 | #include "Point2D.h"
2 |
--------------------------------------------------------------------------------
/MyRANSAC.cpp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TotoroJason/RANSAC/HEAD/MyRANSAC.cpp
--------------------------------------------------------------------------------
/MyRANSAC.h:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TotoroJason/RANSAC/HEAD/MyRANSAC.h
--------------------------------------------------------------------------------
/RANSAC.opt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TotoroJason/RANSAC/HEAD/RANSAC.opt
--------------------------------------------------------------------------------
/RANSAC.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/RANSAC.plg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Build Log
5 |
6 | --------------------Configuration: RANSAC - Win32 Debug--------------------
7 |
8 | Command Lines
9 |
10 |
11 |
12 | Results
13 | RANSAC.exe - 0 error(s), 0 warning(s)
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/Point2D.h:
--------------------------------------------------------------------------------
1 | #ifndef _POINT2D_H_
2 | #define _POINT2D_H_
3 |
4 | #include
5 | using namespace std;
6 | /**
7 | * Primitive 2D point class used as input for the LineParamEstimator.
8 | *
9 | * Author: Ziv Yaniv (zivy@cs.huji.ac.il)
10 | */
11 | class Point2D {
12 | public:
13 | Point2D(double px, double py) : x(px), y(py) {}
14 | double x;
15 | double y;
16 | };
17 |
18 | inline ostream &operator<<(ostream &output,const Point2D &pnt)
19 | {
20 | output<
7 |
8 | Package=<5>
9 | {{{
10 | }}}
11 |
12 | Package=<4>
13 | {{{
14 | }}}
15 |
16 | ###############################################################################
17 |
18 | Global:
19 |
20 | Package=<5>
21 | {{{
22 | }}}
23 |
24 | Package=<3>
25 | {{{
26 | }}}
27 |
28 | ###############################################################################
29 |
30 |
--------------------------------------------------------------------------------
/RANSAC.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 11.00
3 | # Visual Studio 2010
4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "RANSAC", "RANSAC.vcxproj", "{B844E060-4F6C-4B16-E736-D481EFDE446E}"
5 | EndProject
6 | Global
7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
8 | Debug|Win32 = Debug|Win32
9 | Release|Win32 = Release|Win32
10 | EndGlobalSection
11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
12 | {B844E060-4F6C-4B16-E736-D481EFDE446E}.Debug|Win32.ActiveCfg = Debug|Win32
13 | {B844E060-4F6C-4B16-E736-D481EFDE446E}.Debug|Win32.Build.0 = Debug|Win32
14 | {B844E060-4F6C-4B16-E736-D481EFDE446E}.Release|Win32.ActiveCfg = Release|Win32
15 | {B844E060-4F6C-4B16-E736-D481EFDE446E}.Release|Win32.Build.0 = Release|Win32
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | EndGlobal
21 |
--------------------------------------------------------------------------------
/RANSAC.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {8bafb30f-1121-4288-acf1-6a74e33d889a}
6 | cpp;c;cxx;rc;def;r;odl;idl;hpj;bat
7 |
8 |
9 | {4df0d7d0-03e6-4796-a386-92989ddedbc6}
10 | h;hpp;hxx;hm;inl
11 |
12 |
13 | {adfa2469-8193-4545-b4a4-64f733f8fc88}
14 | ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe
15 |
16 |
17 |
18 |
19 | Source Files
20 |
21 |
22 | Source Files
23 |
24 |
25 | Source Files
26 |
27 |
28 | Source Files
29 |
30 |
31 |
32 |
33 | Header Files
34 |
35 |
36 | Header Files
37 |
38 |
39 | Header Files
40 |
41 |
42 | Header Files
43 |
44 |
45 | Header Files
46 |
47 |
48 |
--------------------------------------------------------------------------------
/ParameterEsitmator.h:
--------------------------------------------------------------------------------
1 | #ifndef _PARAMETER_ESTIMATOR_H_
2 | #define _PARAMETER_ESTIMATOR_H_
3 |
4 | #include
5 |
6 | /**
7 | * This class defines the interface for parameter estimators.
8 | * Classes which inherit from it can be used by the RanSac class to perfrom robust
9 | * parameter estimation.
10 | * The interface includes three methods:
11 | * 1.estimate() - Estimation of the parameters using the minimal
12 | * amount of data (exact estimate).
13 | * 2.leastSquaresEstimate() - Estimation of the parameters using
14 | * more than the minimal amount of data, so that the estimate
15 | * minimizes a least squres error criteria.
16 | * 3.agree() - Does the given data agree with the model parameters.
17 | *
18 | * Author: Ziv Yaniv (zivy@cs.huji.ac.il)
19 | */
20 |
21 | template
22 | class ParameterEsitmator {
23 | public:
24 | /**
25 | * Exact estimation of parameters.
26 | * @param data The data used for the estimate.
27 | * @param parameters This vector is cleared and then filled with the computed parameters.
28 | */
29 | virtual void estimate(std::vector &data, std::vector ¶meters) = 0;
30 |
31 |
32 | /**
33 | * Least squares estimation of parameters.
34 | * @param data The data used for the estimate.
35 | * @param parameters This vector is cleared and then filled with the computed parameters.
36 | */
37 | virtual void leastSquaresEstimate(std::vector &data, std::vector ¶meters) = 0;
38 |
39 | /**
40 | * This method tests if the given data agrees with the given model parameters.
41 | */
42 | virtual bool agree(std::vector ¶meters, T &data) = 0;
43 | };
44 |
45 | #endif //_PARAMETER_ESTIMATOR_H_
46 |
--------------------------------------------------------------------------------
/LineParamEstimator.h:
--------------------------------------------------------------------------------
1 | #ifndef _LINE_PARAM_ESTIMATOR_H_
2 | #define _LINE_PARAM_ESTIMATOR_H_
3 |
4 | #include "ParameterEsitmator.h"
5 | #include "Point2D.h"
6 |
7 | /**
8 | * This class estimates the parameters of 2D lines.
9 | * A 2D line is represented as: (*) dot(n,p-a)=0
10 | * where n is the line normal (|n| = 1) and 'a' is a
11 | * point on the line.
12 | * All points 'p' which satisfy equation (*) are on the line.
13 | *
14 | * The reason for choosing this line parametrization is that it can represent
15 | * all lines, including vertical and horizontal, unlike the slope intercept (y=ax+b)
16 | * parametrization.
17 | *
18 | * Author: Ziv Yaniv (zivy@cs.huji.ac.il)
19 | */
20 |
21 | class LineParamEstimator : public ParameterEsitmator {
22 | public:
23 | LineParamEstimator(double delta);
24 |
25 | /**
26 | * Compute the line defined by the given data points.
27 | * @param data A vector containing two 2D points.
28 | * @param This vector is cleared and then filled with the computed parameters.
29 | * The parameters of the line passing through these points [n_x,n_y,a_x,a_y]
30 | * where ||(n_x,ny)|| = 1.
31 | * If the vector contains less than two points then the resulting parameters
32 | * vector is empty (size = 0).
33 | */
34 | virtual void estimate(std::vector &data, std::vector ¶meters);
35 |
36 | /**
37 | * Compute a least squares estimate of the line defined by the given points.
38 | * This implementation is of an orthogonal least squares error.
39 | *
40 | * @param data The line should minimize the least squares error to these points.
41 | * @param parameters This vector is cleared and then filled with the computed parameters.
42 | * Fill this vector with the computed line parameters [n_x,n_y,a_x,a_y]
43 | * where ||(n_x,ny)|| = 1.
44 | * If the vector contains less than two points then the resulting parameters
45 | * vector is empty (size = 0).
46 | */
47 | virtual void leastSquaresEstimate(std::vector &data, std::vector ¶meters);
48 |
49 | /**
50 | * Return true if the distance between the line defined by the parameters and the
51 | * given point is smaller than 'delta' (see constructor).
52 | * @param parameters The line parameters [n_x,n_y,a_x,a_y].
53 | * @param data Check that the distance between this point and the line is smaller than 'delta'.
54 | */
55 | virtual bool agree(std::vector ¶meters, Point2D &data);
56 |
57 | /**
58 | * Test the class methods.
59 | */
60 | static void debugTest(ostream &out);
61 |
62 | private:
63 | double m_deltaSquared; //given line L and point P, if dist(L,P)^2 < m_delta^2 then the point is on the line
64 | };
65 |
66 | #endif //_LINE_PARAM_ESTIMATOR_H_
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RANSAC
2 | 随机采样一致性算法,是在Ziv Yaniv的c++实现上重新实现了一次,加了中文注释,修正了一个错误。便于理解算法实现
3 |
4 | RANSAC是"RANdom SAmple Consensus"(随机采样一致)的缩写。它可以从一组包含“局外点”的观测数据集中,通过迭代方式估计数学模型的参数,它是一种不确定的算法----它有一定的概率得出一个合理的结果;为了提高概率必须提高迭代次数。
5 |
6 | 基本假设是:
7 | 1. 数据由“局内点”组成, 例如,数据的分布可以用一些模型(比如直线方程)参数来解释;
8 | 2. “局外点”是不能适应该模型的参数;
9 | 3. 除此之外的数据属于噪声;
10 |
11 | 局外点产生的原因有:噪声的极值;错误的测量方法;对数据的错误假设;
12 |
13 | ### 概述:
14 | RANSAC算法的输入时一组观测数据,一个可以解释或者适应于观测数据的参数化模型,一些可行的参数。
15 |
16 | RANSAC通过反复选择数据中的一组随机子集来达成目标。被选取的子集被假设为局内点,并用下述方法进行验证:
17 |
18 | 1. 有一个模型适应于假定的局内点,即所有的未知参数都能从假设的局内点计算得出;
19 | 2. 用1中得到的模型去测试所有的其它数据,如果某个点适用于估计的模型,认为他也是局内点;
20 | 3. 如果有足够多的点呗归类为假设的局内点,那么估计的模型就足够合理;
21 | 4. 然后,用所有假设的局内点去重新估计模型,因为它仅仅被初始的假设局内点估计过;
22 | 5. 最后,通过估计局内点与模型的错误率来评估模型;
23 |
24 | 这个过程被重复执行固定的次数,每次产生的模型要么因为局内点太少而被抛弃,要么因为比现有的模型更好而被选用。
25 |
26 | 关于模型好坏算法实现上有两种方式:
27 | 1. 规定一个点数,达到这个点数后,算这些点与模型间的误差,找误差最小的模型。 对应下面算法一
28 | 2. 规定一个误差,找匹配模型并小于这个误差的所有点,匹配的点最多的模型,就是最好模型。 对应下面算法二
29 |
30 | #### 算法伪代码一:
31 |
32 | ```
33 | 输入:
34 | data ---- 一组观测数据
35 | model ---- 适应于数据的模型
36 | n ---- 适用于模型的最少数据个数
37 | k ---- 算法的迭代次数
38 | t ---- 用于决定数据是否适应于模型的阈值
39 | d ---- 判定模型是否适用于数据集的数据数目
40 |
41 | 输出:
42 | best_model —— 跟数据最匹配的模型参数(如果没有找到,返回null)
43 | best_consensus_set —— 估计出模型的数据点
44 | best_error —— 跟数据相关的估计出的模型的错误
45 |
46 | iterations = 0
47 | best_model = null
48 | best_consensus_set = null
49 | best_error = 无穷大
50 | while( iterations < k )
51 | maybe_inliers = 从数据集中随机选择n个点
52 | maybe_model = 适合于maybe_inliers的模型参数
53 | consensus_set = maybe_inliers
54 |
55 | for (每个数据集中不属于maybe_inliers的点)
56 | if (如果点适合于maybe_model,并且错误小于t)
57 | 将该点添加到consensus_set
58 |
59 | if (consensus_set中的点数大于d)
60 | 已经找到了好的模型, 现在测试该模型到底有多好
61 | better_model = 适用于consensus_set中所有点的模型参数
62 | this_error = better_model 究竟如何适合这些点的度量
63 |
64 | if (this_error < best_error)
65 | 发现比以前好的模型,保存该模型直到更好的模型出现
66 | best_model = better_model
67 | best_consensus_set = consensus_set
68 | best_error = this_error
69 |
70 | iterations ++
71 | 函数返回best_model, best_consensus_set, best_error
72 | ```
73 | RANSAC算法的可能变化包括以下几种:
74 | 1. 如果发现一种足够好的模型(该模型有足够下的错误率), 则跳出主循环,这样节约不必要的计算;设置一个错误率的阈值,小于这个值就跳出循环;
75 | 2. 可以直接从maybe_model计算this_error,而不从consensus_set重新估计模型,这样可能会节约时间,但是可能会对噪音敏感。
76 |
77 |
78 | ### 算法伪代码二:
79 |
80 | ```
81 | 输入:
82 | data ---- 一组观测数据
83 | numForEstimate ----- 初始模型需要的点数
84 | delta ------ 判定点符合模型的误差
85 | probability ----- 表示迭代过程中从数据集内随机选取出的点均为局内点的概率
86 |
87 | 输出:
88 | best_model —— 跟数据最匹配的模型参数(如果没有找到,返回null)
89 | best_consensus_set —— 估计出模型的数据点
90 |
91 | k = 1000 //设置初始值
92 |
93 | iterations = 0
94 | best_model = null
95 | best_consensus_set = null
96 |
97 | while( iterations < k )
98 | maybe_inliers = 从数据集中随机选择numForEstimate个点
99 | maybe_model = 适合于maybe_inliers的模型参数,比如直线,取两个点,得直线方程
100 |
101 | for (每个数据集中不属于maybe_inliers的点)
102 | if (如果点适合于maybe_model,并且错误小于delta)
103 | 将该点添加到maybe_inliers
104 |
105 | if(maybe_inliers的点数 > best_consensus_set 的点数) //找到更好的模型
106 | best_model = maybe_model
107 | best_consensus_set = maybe_inliers
108 | 根据公式k=log(1-p)/log(1-pow(w,n))重新计算k
109 | iterations ++
110 | 函数返回best_model, best_consensus_set,
111 | ```
112 |
113 |
114 |
115 |
116 | ### 参数
117 | 我们不得不根据特定的问题和数据集通过实验来确定参数t和d。然而参数k(迭代次数)可以从理论结果推断。当我们从估计模型参数时,用p表示一些迭代过程中从数据集内随机选取出的点均为局内点的概率;此时,结果模型很可能有用,因此p也表征了算法产生有用结果的概率。用w表示每次从数据集中选取一个局内点的概率,如下式所示:
118 | w = 局内点的数目 / 数据集的数目
119 | 通常情况下,我们事先并不知道w的值,但是可以给出一些鲁棒的值。假设估计模型需要选定n个点,wn是所有n个点均为局内点的概率;1 − wn是n个点中至少有一个点为局外点的概率,此时表明我们从数据集中估计出了一个不好的模型。 (1 − wn)k表示算法永远都不会选择到n个点均为局内点的概率,它和1-p相同。因此,
120 |
121 | ```math
122 | 1-p=(1 - w^n)^k
123 | ```
124 |
125 | 我们对上式的两边取对数,得出
126 |
127 | 
128 |
129 | 值得注意的是,这个结果假设n个点都是独立选择的;也就是说,某个点被选定之后,它可能会被后续的迭代过程重复选定到。这种方法通常都不合理,由此推导出的k值被看作是选取不重复点的上限。例如,要从上图中的数据集寻找适合的直线,RANSAC算法通常在每次迭代时选取2个点,计算通过这两点的直线maybe_model,要求这两点必须唯一。
130 |
131 | 为了得到更可信的参数,标准偏差或它的乘积可以被加到k上。k的标准偏差定义为:
132 |
133 | 
134 |
135 | ### 优点与缺点
136 | RANSAC的优点是它能鲁棒的估计模型参数。例如,它能从包含大量局外点的数据集中估计出高精度的参数。
137 |
138 | RANSAC的缺点是它计算参数的迭代次数没有上限;如果设置迭代次数的上限,得到的结果可能不是最优的结果,甚至可能得到错误的结果。RANSAC只有一定的概率得到可信的模型,概率与迭代次数成正比。RANSAC的另一个缺点是它要求设置跟问题相关的阀值。
139 |
140 | RANSAC只能从特定的数据集中估计出一个模型,如果存在两个(或多个)模型,RANSAC不能找到别的模型。如果有多个模型,可以先估算出一个,然后用剩余的数据重新运算,重复这个过程,直到没有模型。
141 |
142 | ### 参考文章
143 | http://www.cnblogs.com/xrwang/archive/2011/03/09/ransac-1.html
144 |
145 | ### 相关知识点
146 | #### 最小二乘法
147 | https://www.zhihu.com/question/37031188
148 |
149 |
--------------------------------------------------------------------------------
/LineParamEstimator.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include "LineParamEstimator.h"
3 |
4 | LineParamEstimator::LineParamEstimator(double delta) : m_deltaSquared(delta*delta) {}
5 | /*****************************************************************************/
6 | /*
7 | * Compute the line parameters [n_x,n_y,a_x,a_y]
8 | */
9 | void LineParamEstimator::estimate(std::vector &data,
10 | std::vector ¶meters)
11 | {
12 | parameters.clear();
13 | if(data.size()<2)
14 | return;
15 | double nx = data[1]->y - data[0]->y;
16 | double ny = data[0]->x - data[1]->x;
17 | double norm = sqrt(nx*nx + ny*ny);
18 |
19 | parameters.push_back(nx/norm);
20 | parameters.push_back(ny/norm);
21 | parameters.push_back(data[0]->x);
22 | parameters.push_back(data[0]->y);
23 | }
24 | /*****************************************************************************/
25 | /*
26 | * Compute the line parameters [n_x,n_y,a_x,a_y]
27 | */
28 | void LineParamEstimator::leastSquaresEstimate(std::vector &data,
29 | std::vector ¶meters)
30 | {
31 | double meanX, meanY, nx, ny, norm;
32 | double covMat11, covMat12, covMat21, covMat22; // The entries of the symmetric covarinace matrix
33 | int i, dataSize = data.size();
34 |
35 | parameters.clear();
36 | if(data.size()<2)
37 | return;
38 |
39 | meanX = meanY = 0.0;
40 | covMat11 = covMat12 = covMat21 = covMat22 = 0;
41 | for(i=0; ix;
43 | meanY +=data[i]->y;
44 |
45 | covMat11 +=data[i]->x * data[i]->x;
46 | covMat12 +=data[i]->x * data[i]->y;
47 | covMat22 +=data[i]->y * data[i]->y;
48 | }
49 |
50 | meanX/=dataSize;
51 | meanY/=dataSize;
52 |
53 | covMat11 -= dataSize*meanX*meanX;
54 | covMat12 -= dataSize*meanX*meanY;
55 | covMat22 -= dataSize*meanY*meanY;
56 | covMat21 = covMat12;
57 |
58 | if(covMat11<1e-12) {
59 | nx = 1.0;
60 | ny = 0.0;
61 | }
62 | else { //lamda1 is the largest eigen-value of the covariance matrix
63 | //and is used to compute the eigne-vector corresponding to the smallest
64 | //eigenvalue, which isn't computed explicitly.
65 | double lamda1 = (covMat11 + covMat22 + sqrt((covMat11-covMat22)*(covMat11-covMat22) + 4*covMat12*covMat12)) / 2.0;
66 | nx = -covMat12;
67 | ny = lamda1 - covMat22;
68 | norm = sqrt(nx*nx + ny*ny);
69 | nx/=norm;
70 | ny/=norm;
71 | }
72 | parameters.push_back(nx);
73 | parameters.push_back(ny);
74 | parameters.push_back(meanX);
75 | parameters.push_back(meanY);
76 | }
77 | /*****************************************************************************/
78 | /*
79 | * Given the line parameters [n_x,n_y,a_x,a_y] check if
80 | * [n_x, n_y] dot [data.x-a_x, data.y-a_y] < m_delta
81 | */
82 | bool LineParamEstimator::agree(std::vector ¶meters, Point2D &data)
83 | {
84 | double signedDistance = parameters[0]*(data.x-parameters[2]) + parameters[1]*(data.y-parameters[3]);
85 | return ((signedDistance*signedDistance) < m_deltaSquared);
86 | }
87 | /*****************************************************************************/
88 | void LineParamEstimator::debugTest(ostream &out)
89 | {
90 | std::vector lineParameters;
91 | LineParamEstimator lpEstimator(0.5);
92 | std::vector pointData;
93 |
94 | pointData.push_back(new Point2D(7,7));
95 | pointData.push_back(new Point2D(-1,-1));
96 | lpEstimator.estimate(pointData,lineParameters);
97 | out<<"[n_x,n_y,a_x,a_y] [ "< lineParameters;
13 | LineParamEstimator lpEstimator(0.5); //for a point to be on the line it has to be closer than 0.5 units from the line
14 | std::vector pointData;
15 | std::vector pointDataPtr;
16 | int numForEstimate = 2;
17 | int numSamples = 20;
18 | int numOutliers = 80;
19 | double desiredProbabilityForNoOutliers = 0.999;
20 | double maximalOutlierPercentage = 0.1 + (double)numOutliers/(double)(numSamples + numOutliers);
21 | double noiseSpreadRadius = 0.4;
22 | double outlierSpreadRadius = 10;
23 | int i;
24 | double newX, newY, dx, dy, norm;
25 |
26 | //1.Create data with outliers
27 |
28 | //randomly select a direction [dx,dy] and create a line passing through the origin
29 | //for each point sampled on the line add random noise, finally add outlying
30 | //points in the direction of the line normal.
31 |
32 | srand((unsigned)time(NULL)); //seed random number generator
33 |
34 | //get random direction
35 | dx = rand();
36 | dy = rand();
37 | norm = sqrt(dx*dx + dy*dy);
38 | dx/= norm;
39 | dy/= norm;
40 | dx *= (rand() > RAND_MAX/2 ? 1 : -1);
41 | dy *= (rand() > RAND_MAX/2 ? 1 : -1);
42 |
43 |
44 | //add 'numSamples' points
45 | for(i=0; i RAND_MAX/2 ? 1 : -1);
47 | newY = i*dy + noiseSpreadRadius*(double)rand()/(double)RAND_MAX * (rand() > RAND_MAX/2 ? 1 : -1);
48 | pointDataPtr.push_back(new Point2D(newX,newY));
49 | pointData.push_back(*(pointDataPtr[i]));
50 | }
51 |
52 | //'numOutliers' points
53 | double centerX = -dy*100;
54 | double centerY = dx*100;
55 | for(i=0; i RAND_MAX/2 ? 1 : -1);
57 | newY = centerY + outlierSpreadRadius * (double)rand()/(double)RAND_MAX * (rand() > RAND_MAX/2 ? 1 : -1);
58 | pointDataPtr.push_back(new Point2D(newX,newY));
59 | pointData.push_back(*(pointDataPtr[pointDataPtr.size()-1]));
60 | }
61 |
62 | double dotProd;
63 |
64 | //2. Compare least squares approach to Ransac
65 |
66 | cout<<"Total number of points used: "<::compute(lineParameters,
83 | &lpEstimator ,
84 | pointData,
85 | numForEstimate);
86 |
87 |
88 | cout<<"RANSAC line parameters [n_x,n_y,a_x,a_y]\n\t [ "<
2 | # Microsoft Developer Studio Generated Build File, Format Version 6.00
3 | # ** DO NOT EDIT **
4 |
5 | # TARGTYPE "Win32 (x86) Console Application" 0x0103
6 |
7 | CFG=RANSAC - Win32 Debug
8 | !MESSAGE This is not a valid makefile. To build this project using NMAKE,
9 | !MESSAGE use the Export Makefile command and run
10 | !MESSAGE
11 | !MESSAGE NMAKE /f "RANSAC.mak".
12 | !MESSAGE
13 | !MESSAGE You can specify a configuration when running NMAKE
14 | !MESSAGE by defining the macro CFG on the command line. For example:
15 | !MESSAGE
16 | !MESSAGE NMAKE /f "RANSAC.mak" CFG="RANSAC - Win32 Debug"
17 | !MESSAGE
18 | !MESSAGE Possible choices for configuration are:
19 | !MESSAGE
20 | !MESSAGE "RANSAC - Win32 Release" (based on "Win32 (x86) Console Application")
21 | !MESSAGE "RANSAC - Win32 Debug" (based on "Win32 (x86) Console Application")
22 | !MESSAGE
23 |
24 | # Begin Project
25 | # PROP AllowPerConfigDependencies 0
26 | # PROP Scc_ProjName ""
27 | # PROP Scc_LocalPath ""
28 | CPP=cl.exe
29 | RSC=rc.exe
30 |
31 | !IF "$(CFG)" == "RANSAC - Win32 Release"
32 |
33 | # PROP BASE Use_MFC 0
34 | # PROP BASE Use_Debug_Libraries 0
35 | # PROP BASE Output_Dir "Release"
36 | # PROP BASE Intermediate_Dir "Release"
37 | # PROP BASE Target_Dir ""
38 | # PROP Use_MFC 0
39 | # PROP Use_Debug_Libraries 0
40 | # PROP Output_Dir "Release"
41 | # PROP Intermediate_Dir "Release"
42 | # PROP Target_Dir ""
43 | # ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
44 | # ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /FD /c
45 | # SUBTRACT CPP /YX
46 | # ADD BASE RSC /l 0x409 /d "NDEBUG"
47 | # ADD RSC /l 0x409 /d "NDEBUG"
48 | BSC32=bscmake.exe
49 | # ADD BASE BSC32 /nologo
50 | # ADD BSC32 /nologo
51 | LINK32=link.exe
52 | # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
53 | # ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
54 |
55 | !ELSEIF "$(CFG)" == "RANSAC - Win32 Debug"
56 |
57 | # PROP BASE Use_MFC 0
58 | # PROP BASE Use_Debug_Libraries 1
59 | # PROP BASE Output_Dir "Debug"
60 | # PROP BASE Intermediate_Dir "Debug"
61 | # PROP BASE Target_Dir ""
62 | # PROP Use_MFC 0
63 | # PROP Use_Debug_Libraries 1
64 | # PROP Output_Dir "Debug"
65 | # PROP Intermediate_Dir "Debug"
66 | # PROP Target_Dir ""
67 | # ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
68 | # ADD CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /FD /GZ /c
69 | # SUBTRACT CPP /YX
70 | # ADD BASE RSC /l 0x409 /d "_DEBUG"
71 | # ADD RSC /l 0x409 /d "_DEBUG"
72 | BSC32=bscmake.exe
73 | # ADD BASE BSC32 /nologo
74 | # ADD BSC32 /nologo
75 | LINK32=link.exe
76 | # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
77 | # ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
78 |
79 | !ENDIF
80 |
81 | # Begin Target
82 |
83 | # Name "RANSAC - Win32 Release"
84 | # Name "RANSAC - Win32 Debug"
85 | # Begin Group "Source Files"
86 |
87 | # PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat"
88 | # Begin Source File
89 |
90 | SOURCE=.\LineParamEstimator.cpp
91 | # End Source File
92 | # Begin Source File
93 |
94 | SOURCE=.\Point2D.cpp
95 | # End Source File
96 | # Begin Source File
97 |
98 | SOURCE=.\RansacExample.cpp
99 | # End Source File
100 | # End Group
101 | # Begin Group "Header Files"
102 |
103 | # PROP Default_Filter "h;hpp;hxx;hm;inl"
104 | # Begin Source File
105 |
106 | SOURCE=.\LineParamEstimator.h
107 | # End Source File
108 | # Begin Source File
109 |
110 | SOURCE=.\ParameterEsitmator.h
111 | # End Source File
112 | # Begin Source File
113 |
114 | SOURCE=.\Point2D.h
115 | # End Source File
116 | # Begin Source File
117 |
118 | SOURCE=.\Ransac.h
119 | # End Source File
120 | # End Group
121 | # Begin Group "Resource Files"
122 |
123 | # PROP Default_Filter "ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe"
124 | # End Group
125 | # End Target
126 | # End Project
127 |
--------------------------------------------------------------------------------
/RANSAC.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 | Application
20 | false
21 | MultiByte
22 |
23 |
24 | Application
25 | false
26 | MultiByte
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | .\Debug\
42 | .\Debug\
43 | true
44 |
45 |
46 | .\Release\
47 | .\Release\
48 | false
49 |
50 |
51 |
52 | MultiThreadedDebug
53 | Default
54 | false
55 | Disabled
56 | true
57 | Level3
58 | true
59 | EditAndContinue
60 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
61 | .\Debug\
62 | .\Debug\RANSAC.pch
63 | .\Debug\
64 | .\Debug\
65 | EnableFastChecks
66 |
67 |
68 | .\Debug\RANSAC.tlb
69 |
70 |
71 | 0x0409
72 | _DEBUG;%(PreprocessorDefinitions)
73 |
74 |
75 | true
76 | .\Debug\RANSAC.bsc
77 |
78 |
79 | true
80 | true
81 | Console
82 | .\Debug\RANSAC.exe
83 | odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
84 |
85 |
86 |
87 |
88 | MultiThreaded
89 | OnlyExplicitInline
90 | true
91 | true
92 | MaxSpeed
93 | true
94 | Level3
95 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
96 | .\Release\
97 | .\Release\RANSAC.pch
98 | .\Release\
99 | .\Release\
100 |
101 |
102 | .\Release\RANSAC.tlb
103 |
104 |
105 | 0x0409
106 | NDEBUG;%(PreprocessorDefinitions)
107 |
108 |
109 | true
110 | .\Release\RANSAC.bsc
111 |
112 |
113 | true
114 | Console
115 | .\Release\RANSAC.exe
116 | odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
--------------------------------------------------------------------------------
/Ransac.h:
--------------------------------------------------------------------------------
1 | #ifndef _RANSAC_H_
2 | #define _RANSAC_H_
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include "ParameterEsitmator.h"
11 |
12 | /**
13 | * This class implements the Random Sample Consensus (Ransac) framework,
14 | * a framework for robust parameter estimation.
15 | * Given data containing outliers we estimate the model parameters using sub-sets of
16 | * the original data:
17 | * 1. Choose the minimal subset from the data for computing the exact model parameters.
18 | * 2. See how much of the input data agrees with the computed parameters.
19 | * 3. Goto step 1. This can be done up to (m choose N) times, where m is the number of
20 | * data objects required for an exact estimate and N is the total number of data objects.
21 | * 4. Take the largest subset of objects which agreed on the parameters and compute a
22 | * least squares fit using them.
23 | *
24 | * This is based on:
25 | * Martin A. Fischler, Robert C. Bolles,
26 | * ``Random Sample Consensus: A Paradigm for Model Fitting with Applications to Image Analysis and Automated Cartography'',
27 | * Communications of the ACM, Vol. 24(6), 1981.
28 | *
29 | * Richard I. Hartely, Andrew Zisserman, "Multiple View Geometry in Computer Vision", Cambridge University Press, 2000.
30 | *
31 | * The class template parameters are T - objects used for the parameter estimation (e.g. Point2D).
32 | * S - type of parameter (e.g. double).
33 | *
34 | * Author: Ziv Yaniv (zivy@cs.huji.ac.il)
35 | */
36 | template
37 | class Ransac {
38 |
39 | public:
40 | /**
41 | * Estimate the model parameters using the Ransac framework.
42 | * @param parameters A vector which will contain the estimated parameters.
43 | * If there is an error in the input then this vector will be empty.
44 | * Errors are: 1. Less data objects than required for an exact fit.
45 | * @param paramEstimator An object which can estimate the desired parameters using either an exact fit or a
46 | * least squares fit.
47 | * @param data The input from which the parameters will be estimated.
48 | * @param numForEstimate The number of data objects required for an exact fit.
49 | * @param desiredProbabilityForNoOutliers The probability that at least one of the selected subsets doesn't contain an
50 | * outlier.
51 | * @param maximalOutlierPercentage The maximal expected percentage of outliers.
52 | * @return Returns the percentage of data used in the least squares estimate.
53 | */
54 | static double compute(std::vector ¶meters,
55 | ParameterEsitmator *paramEstimator ,
56 | std::vector &data,
57 | int numForEstimate,
58 | double desiredProbabilityForNoOutliers,
59 | double maximalOutlierPercentage);
60 |
61 |
62 | /**
63 | * Estimate the model parameters using the maximal consensus set by going over ALL possible
64 | * subsets (brute force approach).
65 | * Given: n - data.size()
66 | * k - numForEstimate
67 | * We go over all n choose k subsets n!
68 | * ------------
69 | * (n-k)! * k!
70 | * @param parameters A vector which will contain the estimated parameters.
71 | * If there is an error in the input then this vector will be empty.
72 | * Errors are: 1. Less data objects than required for an exact fit.
73 | * @param paramEstimator An object which can estimate the desired parameters using either an exact fit or a
74 | * least squares fit.
75 | * @param data The input from which the parameters will be estimated.
76 | * @param numForEstimate The number of data objects required for an exact fit.
77 | * @return Returns the percentage of data used in the least squares estimate.
78 | *
79 | * NOTE: This method should be used only when n choose k is small (i.e. k or (n-k) are approximatly equal to n)
80 | *
81 | */
82 | static double compute(std::vector ¶meters,
83 | ParameterEsitmator *paramEstimator ,
84 | std::vector &data,
85 | int numForEstimate);
86 |
87 | private:
88 |
89 |
90 | static void computeAllChoices(ParameterEsitmator *paramEstimator, std::vector &data, int numForEstimate,
91 | short *bestVotes, short *curVotes, int &numVotesForBest, int startIndex, int n, int k, int arrIndex, int *arr);
92 |
93 | static void estimate(ParameterEsitmator *paramEstimator, std::vector &data, int numForEstimate,
94 | short *bestVotes, short *curVotes, int &numVotesForBest, int *arr);
95 |
96 | class SubSetIndexComparator {
97 | private:
98 | int m_length;
99 | public:
100 | SubSetIndexComparator(int arrayLength) : m_length(arrayLength){}
101 | bool operator()(const int *arr1, const int *arr2) const {
102 | for(int i=0; i
115 | double Ransac::compute(std::vector ¶meters,
116 | ParameterEsitmator *paramEstimator ,
117 | std::vector &data,
118 | int numForEstimate,
119 | double desiredProbabilityForNoOutliers,
120 | double maximalOutlierPercentage)
121 | {
122 | int numDataObjects = data.size();
123 | //there are less data objects than the minimum required for an exact fit, or
124 | //all the data is outliers?
125 | if(numDataObjects < numForEstimate || maximalOutlierPercentage>=1.0)
126 | return 0;
127 |
128 | std::vector exactEstimateData;
129 | std::vector leastSquaresEstimateData;
130 | std::vector exactEstimateParameters;
131 | int i, j, k, l, numVotesForBest, numVotesForCur, maxIndex, numTries;
132 | short *bestVotes = new short[numDataObjects]; //one if data[i] agrees with the best model, otherwise zero
133 | short *curVotes = new short[numDataObjects]; //one if data[i] agrees with the current model, otherwise zero
134 | short *notChosen = new short[numDataObjects]; //not zero if data[i] is NOT chosen for computing the exact fit, otherwise zero
135 | SubSetIndexComparator subSetIndexComparator(numForEstimate);
136 | std::set chosenSubSets(subSetIndexComparator);
137 | int *curSubSetIndexes;
138 | double outlierPercentage = maximalOutlierPercentage;
139 | double numerator = log(1.0-desiredProbabilityForNoOutliers);
140 | double denominator = log(1- pow(1-maximalOutlierPercentage, numForEstimate));
141 |
142 | parameters.clear();
143 |
144 |
145 | numVotesForBest = -1; //initalize with -1 so that the first computation will be set to best
146 | srand((unsigned)time(NULL)); //seed random number generator
147 | numTries = (int)(numerator/denominator + 0.5);
148 |
149 | for(i=0; i::iterator, bool > res = chosenSubSets.insert(curSubSetIndexes);
181 |
182 | if(res.second == true) { //first time we chose this sub set
183 |
184 | //use the selected data for an exact model parameter fit
185 | paramEstimator->estimate(exactEstimateData,exactEstimateParameters);
186 | //see how many agree on this estimate
187 | numVotesForCur = 0;
188 | memset(curVotes,'\0',numDataObjects*sizeof(short));
189 | for(j=0; jagree(exactEstimateParameters, data[j])) {
191 | curVotes[j] = 1;
192 | numVotesForCur++;
193 | }
194 | }
195 | if(numVotesForCur > numVotesForBest) {
196 | numVotesForBest = numVotesForCur;
197 | memcpy(bestVotes,curVotes, numDataObjects*sizeof(short));
198 | }
199 | //update the estimate of outliers and the number of iterations we need
200 | outlierPercentage = 1 - (double)numVotesForCur/(double)numDataObjects;
201 | if(outlierPercentage < maximalOutlierPercentage) {
202 | maximalOutlierPercentage = outlierPercentage;
203 | denominator = log(1- pow(1-maximalOutlierPercentage, numForEstimate));
204 | numTries = (int)(numerator/denominator + 0.5);
205 | }
206 | }
207 | else { //this sub set already appeared, don't count this iteration
208 | delete [] curSubSetIndexes;
209 | i--;
210 | }
211 | }
212 |
213 | //release the memory
214 | std::set::iterator it = chosenSubSets.begin();
215 | std::set::iterator chosenSubSetsEnd = chosenSubSets.end();
216 | while(it!=chosenSubSetsEnd) {
217 | delete [] (*it);
218 | it++;
219 | }
220 | chosenSubSets.clear();
221 |
222 | //compute the least squares estimate using the largest sub set
223 | for(j=0; jleastSquaresEstimate(leastSquaresEstimateData,parameters);
228 |
229 | delete [] bestVotes;
230 | delete [] curVotes;
231 | delete [] notChosen;
232 |
233 | return (double)numVotesForBest/(double)numDataObjects;
234 | }
235 | /*****************************************************************************/
236 | template
237 | double Ransac::compute(std::vector ¶meters,
238 | ParameterEsitmator *paramEstimator ,
239 | std::vector &data,
240 | int numForEstimate)
241 | {
242 | std::vector leastSquaresEstimateData;
243 | int numDataObjects = data.size();
244 | int numVotesForBest = -1;
245 | int *arr = new int[numForEstimate];
246 | short *curVotes = new short[numDataObjects]; //one if data[i] agrees with the current model, otherwise zero
247 | short *bestVotes = new short[numDataObjects]; //one if data[i] agrees with the best model, otherwise zero
248 |
249 |
250 | //there are less data objects than the minimum required for an exact fit
251 | if(numDataObjects < numForEstimate)
252 | return 0;
253 |
254 | computeAllChoices(paramEstimator,data,numForEstimate,
255 | bestVotes, curVotes, numVotesForBest, 0, data.size(), numForEstimate, 0, arr);
256 |
257 | //compute the least squares estimate using the largest sub set
258 | for(int j=0; jleastSquaresEstimate(leastSquaresEstimateData,parameters);
263 |
264 | delete [] arr;
265 | delete [] bestVotes;
266 | delete [] curVotes;
267 |
268 | return (double)leastSquaresEstimateData.size()/(double)numDataObjects;
269 | }
270 | /*****************************************************************************/
271 | template
272 | void Ransac::computeAllChoices(ParameterEsitmator *paramEstimator, std::vector &data, int numForEstimate,
273 | short *bestVotes, short *curVotes, int &numVotesForBest, int startIndex, int n, int k, int arrIndex, int *arr)
274 | {
275 | //we have a new choice of indexes
276 | if(k==0) {
277 | estimate(paramEstimator, data, numForEstimate, bestVotes, curVotes, numVotesForBest, arr);
278 | return;
279 | }
280 |
281 | //continue to recursivly generate the choice of indexes
282 | int endIndex = n-k;
283 | for(int i=startIndex; i<=endIndex; i++) {
284 | arr[arrIndex] = i;
285 | computeAllChoices(paramEstimator, data, numForEstimate, bestVotes, curVotes, numVotesForBest,
286 | i+1, n, k-1, arrIndex+1, arr);
287 | }
288 |
289 | }
290 | /*****************************************************************************/
291 | template
292 | void Ransac::estimate(ParameterEsitmator *paramEstimator, std::vector &data, int numForEstimate,
293 | short *bestVotes, short *curVotes, int &numVotesForBest, int *arr)
294 | {
295 | std::vector exactEstimateData;
296 | std::vector exactEstimateParameters;
297 | int numDataObjects;
298 | int numVotesForCur;//initalize with -1 so that the first computation will be set to best
299 | int j;
300 |
301 | numDataObjects = data.size();
302 | memset(curVotes,'\0',numDataObjects*sizeof(short));
303 | numVotesForCur=0;
304 |
305 | for(j=0; jestimate(exactEstimateData,exactEstimateParameters);
308 |
309 | for(j=0; jagree(exactEstimateParameters, data[j])) {
311 | curVotes[j] = 1;
312 | numVotesForCur++;
313 | }
314 | }
315 | if(numVotesForCur > numVotesForBest) {
316 | numVotesForBest = numVotesForCur;
317 | memcpy(bestVotes,curVotes, numDataObjects*sizeof(short));
318 | }
319 | }
320 | #endif //_RANSAC_H_
321 |
--------------------------------------------------------------------------------