├── README.md
├── code
├── Common.cpp
├── Common.h
├── Visualization.cpp
├── Visualization.h
└── main.cpp
├── img
├── matching.png
├── orignImg.gif
└── reconstruction.gif
└── opencv2413_ssba_pcl.props
/README.md:
--------------------------------------------------------------------------------
1 | # structure-from-motion
2 | This is an experimental project for 3D reconstruction. I follow the structure from motion project from [1], and reimplement the orignial prject from object oriented programming style to procedure programming style for easily analysing the code and learning the algorithm procedure.
3 |
4 | ## Overview
5 | Spare 3D point cloud is produced from multiple images, the General steps of this method including:
6 | 1. Extract dense key points and key points matching by optical flow;
7 | 2. 3D reconstruction from two view(cameras matrix estimation and triangulation )
8 | 3. 3D reconstruction from multiple view by incrementally adding new image(incremental cameras matrix estimation, triangulation and Bundle Adjustment (BA))
9 |
10 | ## Installation Dependencies
11 | 1. OpenCV v2.3 or higher(http://opencv.org/)
12 | 2. SSBA v3.0 or higher
13 | 3. The Point Clouds Library (PCL,v1.6 or higher) from "http://pointclouds.org/" for visualization
14 |
15 | ## Installation
16 | 1. install the dependence liabraies from the above section using CMake and MS Visual Studio 2013
17 | 2. Edit the property sheet opencv2413_ssba_pcl.props at the main directory
18 | 3. Make a new project, add the property sheet and the code in the source folder
19 |
20 | ## example
21 | The input 8 images:
22 | 
23 | Example of corresponding points estimated from two images:
24 | 
25 | Constructed sparse 3D point cloud
26 | 
27 |
28 | ## To be continued
29 | 1. Dense 3D point cloud reconstruction (example:http://www.di.ens.fr/pmvs/)
30 |
31 | ## References
32 |
33 | [1] Baggio, Daniel Lélis. Mastering OpenCV with practical computer vision projects. Packt Publishing Ltd, 2012.
34 |
35 | ## Disclaimer
36 | This work is highly based on the following repos:
37 | 1. [MasteringOpenCV/code] (https://github.com/MasteringOpenCV/code)
38 |
--------------------------------------------------------------------------------
/code/Common.cpp:
--------------------------------------------------------------------------------
1 | #include "Common.h"
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | #define V3DLIB_ENABLE_SUITESPARSE
9 | #include "Math/v3d_linear.h"
10 | #include "Base/v3d_vrmlio.h"
11 | #include "Geometry/v3d_metricbundle.h"
12 |
13 |
14 | #include
15 | #include
16 | #ifndef WIN32
17 | #include
18 | #endif
19 |
20 | using namespace V3D;
21 | using namespace std;
22 | using namespace cv;
23 |
24 | #define DECOMPOSE_SVD
25 | #ifndef CV_PCA_DATA_AS_ROW
26 | #define CV_PCA_DATA_AS_ROW 0
27 | #endif
28 |
29 | #define EPSILON 0.0001
30 |
31 | bool hasEnding(std::string const &fullString, std::string const &ending)
32 | {
33 | if (fullString.length() >= ending.length()) {
34 | return (0 == fullString.compare(fullString.length() - ending.length(), ending.length(), ending));
35 | }
36 | else {
37 | return false;
38 | }
39 | }
40 |
41 | bool hasEndingLower(string const &fullString_, string const &_ending)
42 | {
43 | string fullstring = fullString_, ending = _ending;
44 | transform(fullString_.begin(), fullString_.end(), fullstring.begin(), ::tolower); // to lower
45 | return hasEnding(fullstring, ending);
46 | }
47 |
48 |
49 | void read_images_and_calibration_matrix(std::string dir_name,double downscale_factor)
50 | {
51 | string dir_name_ = dir_name;
52 | vector files_;
53 |
54 | #ifndef WIN32
55 | //open a directory the POSIX way
56 |
57 | DIR *dp;
58 | struct dirent *ep;
59 | dp = opendir(dir_name);
60 |
61 | if (dp != NULL)
62 | {
63 | while (ep = readdir(dp)) {
64 | if (ep->d_name[0] != '.')
65 | files_.push_back(ep->d_name);
66 | }
67 |
68 | (void)closedir(dp);
69 | }
70 | else {
71 | cerr << ("Couldn't open the directory");
72 | return;
73 | }
74 |
75 | #else
76 | //open a directory the WIN32 way
77 | HANDLE hFind = INVALID_HANDLE_VALUE;
78 | WIN32_FIND_DATA fdata;
79 |
80 | if (dir_name_[dir_name_.size() - 1] == '\\' || dir_name_[dir_name_.size() - 1] == '/') {
81 | dir_name_ = dir_name_.substr(0, dir_name_.size() - 1);
82 | }
83 |
84 | hFind = FindFirstFile(string(dir_name_).append("\\*").c_str(), &fdata);
85 | if (hFind != INVALID_HANDLE_VALUE)
86 | {
87 | do
88 | {
89 | if (strcmp(fdata.cFileName, ".") != 0 &&
90 | strcmp(fdata.cFileName, "..") != 0)
91 | {
92 | if (fdata.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
93 | {
94 | continue; // a diretory
95 | }
96 | else
97 | {
98 | files_.push_back(fdata.cFileName);
99 | }
100 | }
101 | } while (FindNextFile(hFind, &fdata) != 0);
102 | }
103 | else {
104 | cerr << "can't open directory\n";
105 | return;
106 | }
107 |
108 | if (GetLastError() != ERROR_NO_MORE_FILES)
109 | {
110 | FindClose(hFind);
111 | cerr << "some other error with opening directory: " << GetLastError() << endl;
112 | return;
113 | }
114 |
115 | FindClose(hFind);
116 | hFind = INVALID_HANDLE_VALUE;
117 | #endif
118 |
119 | for (unsigned int i = 0; i < files_.size(); i++) {
120 | if (files_[i][0] == '.' || !(hasEndingLower(files_[i], "jpg") || hasEndingLower(files_[i], "png"))) {
121 | continue;
122 | }
123 | cv::Mat m_ = cv::imread(string(dir_name_).append("/").append(files_[i]));
124 | if (downscale_factor != 1.0)
125 | cv::resize(m_, m_, Size(), downscale_factor, downscale_factor);
126 | images_names.push_back(files_[i]);
127 | images.push_back(m_);
128 | }
129 |
130 | std::cout << "=========================== Load Images ===========================\n";
131 | //ensure images are CV_8UC3
132 | for (unsigned int i = 0; i < images.size(); i++) {
133 | imgs_orig.push_back(cv::Mat_());
134 | if (!images[i].empty()) {
135 | if (images[i].type() == CV_8UC1) {
136 | cvtColor(images[i], imgs_orig[i], CV_GRAY2BGR);
137 | }
138 | else if (images[i].type() == CV_32FC3 || images[i].type() == CV_64FC3) {
139 | images[i].convertTo(imgs_orig[i], CV_8UC3, 255.0);
140 | }
141 | else {
142 | images[i].copyTo(imgs_orig[i]);
143 | }
144 | }
145 |
146 | grey_imgs.push_back(cv::Mat());
147 | cvtColor(imgs_orig[i], grey_imgs[i], CV_BGR2GRAY);
148 |
149 | imgpts.push_back(std::vector());
150 | imgpts_good.push_back(std::vector());
151 | std::cout << ".";
152 | }
153 | std::cout << std::endl;
154 |
155 | //load calibration matrix
156 | cv::FileStorage fs;
157 | if (fs.open(dir_name_ + "\\out_camera_data.yml", cv::FileStorage::READ)) {
158 | fs["camera_matrix"] >> cam_matrix;
159 | fs["distortion_coefficients"] >> distortion_coeff;
160 | }
161 | else {
162 | //no calibration matrix file - mockup calibration
163 | cv::Size imgs_size = images[0].size();
164 | double max_w_h = MAX(imgs_size.height, imgs_size.width);
165 | cam_matrix = (cv::Mat_(3, 3) << max_w_h, 0, imgs_size.width / 2.0,
166 | 0, max_w_h, imgs_size.height / 2.0,
167 | 0, 0, 1);
168 | distortion_coeff = cv::Mat_::zeros(1, 4);
169 | }
170 |
171 | K = cam_matrix;
172 | invert(K, Kinv); //get inverse of camera matrix
173 |
174 | distortion_coeff.convertTo(distcoeff_32f, CV_32FC1);
175 | K.convertTo(K_32f, CV_32FC1);
176 | }
177 |
178 |
179 | void optical_flow_feature_match()
180 | {
181 | //detect keypoints for all images
182 | FastFeatureDetector ffd;
183 | // DenseFeatureDetector ffd;
184 | ffd.detect(grey_imgs, imgpts);
185 |
186 | int loop1_top = grey_imgs.size() - 1, loop2_top = grey_imgs.size();
187 | int frame_num_i = 0;
188 | #pragma omp parallel for
189 | for (frame_num_i = 0; frame_num_i < loop1_top; frame_num_i++) {
190 | for (int frame_num_j = frame_num_i + 1; frame_num_j < loop2_top; frame_num_j++)
191 | {
192 | std::cout << "------------ Match " << images_names[frame_num_i] << "," << images_names[frame_num_j] << " ------------\n";
193 | std::vector matches_tmp;
194 | MatchFeatures(frame_num_i, frame_num_j, &matches_tmp);
195 | matches_matrix[std::make_pair(frame_num_i, frame_num_j)] = matches_tmp;
196 |
197 | std::vector matches_tmp_flip = FlipMatches(matches_tmp);
198 | matches_matrix[std::make_pair(frame_num_j, frame_num_i)] = matches_tmp_flip;
199 | }
200 | }
201 | }
202 |
203 | void MatchFeatures(int idx_i, int idx_j, vector* matches) {
204 | vector i_pts;
205 | KeyPointsToPoints(imgpts[idx_i], i_pts);
206 |
207 | vector j_pts(i_pts.size());
208 |
209 | // making sure images are grayscale
210 | Mat prevgray, gray;
211 | if (grey_imgs[idx_i].channels() == 3) {
212 | cvtColor(grey_imgs[idx_i], prevgray, CV_RGB2GRAY);
213 | cvtColor(grey_imgs[idx_j], gray, CV_RGB2GRAY);
214 | }
215 | else {
216 | prevgray = grey_imgs[idx_i];
217 | gray = grey_imgs[idx_j];
218 | }
219 |
220 | vector vstatus(i_pts.size()); vector verror(i_pts.size());
221 | calcOpticalFlowPyrLK(prevgray, gray, i_pts, j_pts, vstatus, verror);
222 |
223 | double thresh = 1.0;
224 | vector to_find;
225 | vector to_find_back_idx;
226 | for (unsigned int i = 0; i < vstatus.size(); i++) {
227 | if (vstatus[i] && verror[i] < 12.0) {
228 | to_find_back_idx.push_back(i);
229 | to_find.push_back(j_pts[i]);
230 | }
231 | else {
232 | vstatus[i] = 0;
233 | }
234 | }
235 |
236 | std::set found_in_imgpts_j;
237 | Mat to_find_flat = Mat(to_find).reshape(1, to_find.size());
238 |
239 | vector j_pts_to_find;
240 | KeyPointsToPoints(imgpts[idx_j], j_pts_to_find);
241 | Mat j_pts_flat = Mat(j_pts_to_find).reshape(1, j_pts_to_find.size());
242 |
243 | vector > knn_matches;
244 | //FlannBasedMatcher matcher;
245 | BFMatcher matcher(CV_L2);
246 | matcher.radiusMatch(to_find_flat, j_pts_flat, knn_matches, 2.0f);
247 | //Prune
248 | for (int i = 0; i < knn_matches.size(); i++) {
249 | DMatch _m;
250 | if (knn_matches[i].size() == 1) {
251 | _m = knn_matches[i][0];
252 | }
253 | else if (knn_matches[i].size()>1) {
254 | if (knn_matches[i][0].distance / knn_matches[i][1].distance < 0.7) {
255 | _m = knn_matches[i][0];
256 | }
257 | else {
258 | continue; // did not pass ratio test
259 | }
260 | }
261 | else {
262 | continue; // no match
263 | }
264 | if (found_in_imgpts_j.find(_m.trainIdx) == found_in_imgpts_j.end()) { // prevent duplicates
265 | _m.queryIdx = to_find_back_idx[_m.queryIdx]; //back to original indexing of points for
266 | matches->push_back(_m);
267 | found_in_imgpts_j.insert(_m.trainIdx);
268 | }
269 | }
270 |
271 | cout << "pruned " << matches->size() << " / " << knn_matches.size() << " matches" << endl;
272 | #if 0
273 | {
274 | // draw flow field
275 | Mat img_matches; cvtColor(grey_imgs[idx_i], img_matches, CV_GRAY2BGR);
276 | i_pts.clear(); j_pts.clear();
277 | for (int i = 0; i < matches->size(); i++) {
278 | //if (i%2 != 0) {
279 | // continue;
280 | // }
281 | Point i_pt = imgpts[idx_i][(*matches)[i].queryIdx].pt;
282 | Point j_pt = imgpts[idx_j][(*matches)[i].trainIdx].pt;
283 | i_pts.push_back(i_pt);
284 | j_pts.push_back(j_pt);
285 | vstatus[i] = 1;
286 | }
287 | drawArrows(img_matches, i_pts, j_pts, vstatus, verror, Scalar(0, 255));
288 | stringstream ss;
289 | ss << matches->size() << " matches";
290 | ss.clear(); ss << "flow_field_"< and all views using the Fundamental matrix to prune
307 | //#pragma omp parallel for
308 | for (int _i = 0; _i < grey_imgs.size() - 1; _i++)
309 | {
310 | for (unsigned int _j = _i + 1; _j < grey_imgs.size(); _j++) {
311 | int older_view = _i, working_view = _j;
312 |
313 | GetFundamentalMat(imgpts[older_view],
314 | imgpts[working_view],
315 | imgpts_good[older_view],
316 | imgpts_good[working_view],
317 | matches_matrix[std::make_pair(older_view, working_view)]
318 | #ifdef __SFM__DEBUG__
319 | ,older_view, working_view
320 | #endif
321 | );
322 | //update flip matches as well
323 | #pragma omp critical
324 | matches_matrix[std::make_pair(working_view, older_view)] = FlipMatches(matches_matrix[std::make_pair(older_view, working_view)]);
325 | }
326 | }
327 | }
328 |
329 | Mat GetFundamentalMat(const vector& imgpts1,
330 | const vector& imgpts2,
331 | vector& imgpts1_good,
332 | vector& imgpts2_good,
333 | vector& matches
334 | #ifdef __SFM__DEBUG__
335 | , int older_view,
336 | int working_view
337 | #endif
338 | )
339 | {
340 |
341 | Mat img_1 = imgs_orig[older_view];
342 | Mat img_2 = imgs_orig[working_view];
343 | //Try to eliminate keypoints based on the fundamental matrix
344 | //(although this is not the proper way to do this)
345 | vector status(imgpts1.size());
346 |
347 | #ifdef __SFM__DEBUG__
348 | std::vector< DMatch > good_matches_;
349 | std::vector keypoints_1, keypoints_2;
350 | #endif
351 | imgpts1_good.clear(); imgpts2_good.clear();
352 |
353 | vector imgpts1_tmp;
354 | vector imgpts2_tmp;
355 | if (matches.size() <= 0) {
356 | //points already aligned...
357 | imgpts1_tmp = imgpts1;
358 | imgpts2_tmp = imgpts2;
359 | }
360 | else {
361 | GetAlignedPointsFromMatch(imgpts1, imgpts2, matches, imgpts1_tmp, imgpts2_tmp);
362 | }
363 |
364 | Mat F;
365 | {
366 | vector pts1, pts2;
367 | KeyPointsToPoints(imgpts1_tmp, pts1);
368 | KeyPointsToPoints(imgpts2_tmp, pts2);
369 | #ifdef __SFM__DEBUG__
370 | cout << "pts1 " << pts1.size() << " (orig pts " << imgpts1_tmp.size() << ")" << endl;
371 | cout << "pts2 " << pts2.size() << " (orig pts " << imgpts2_tmp.size() << ")" << endl;
372 | #endif
373 | double minVal, maxVal;
374 | cv::minMaxIdx(pts1, &minVal, &maxVal);
375 | F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.006 * maxVal, 0.99, status); //threshold from [Snavely07 4.1]
376 | }
377 |
378 | vector new_matches;
379 | cout << "F keeping " << countNonZero(status) << " / " << status.size() << endl;
380 | for (unsigned int i = 0; i < status.size(); i++) {
381 | if (status[i])
382 | {
383 | imgpts1_good.push_back(imgpts1_tmp[i]);
384 | imgpts2_good.push_back(imgpts2_tmp[i]);
385 |
386 | if (matches.size() <= 0) { //points already aligned...
387 | new_matches.push_back(DMatch(matches[i].queryIdx, matches[i].trainIdx, matches[i].distance));
388 | }
389 | else {
390 | new_matches.push_back(matches[i]);
391 | }
392 |
393 | #ifdef __SFM__DEBUG__
394 | good_matches_.push_back(DMatch(imgpts1_good.size() - 1, imgpts1_good.size() - 1, 1.0));
395 | keypoints_1.push_back(imgpts1_tmp[i]);
396 | keypoints_2.push_back(imgpts2_tmp[i]);
397 | #endif
398 | }
399 | }
400 |
401 | cout << matches.size() << " matches before, " << new_matches.size() << " new matches after Fundamental Matrix\n";
402 | matches = new_matches; //keep only those points who survived the fundamental matrix
403 |
404 | #if 0
405 | //-- Draw only "good" matches
406 | #ifdef __SFM__DEBUG__
407 | if (!img_1.empty() && !img_2.empty()) {
408 | vector i_pts, j_pts;
409 | Mat img_orig_matches;
410 | { //draw original features in red
411 | vector vstatus(imgpts1_tmp.size(), 1);
412 | vector verror(imgpts1_tmp.size(), 1.0);
413 | img_1.copyTo(img_orig_matches);
414 | KeyPointsToPoints(imgpts1_tmp, i_pts);
415 | KeyPointsToPoints(imgpts2_tmp, j_pts);
416 | drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0, 0, 255));
417 | }
418 | { //superimpose filtered features in green
419 | vector vstatus(imgpts1_good.size(), 1);
420 | vector verror(imgpts1_good.size(), 1.0);
421 | i_pts.resize(imgpts1_good.size());
422 | j_pts.resize(imgpts2_good.size());
423 | KeyPointsToPoints(imgpts1_good, i_pts);
424 | KeyPointsToPoints(imgpts2_good, j_pts);
425 | drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0, 255, 0));
426 | imshow("Filtered Matches", img_orig_matches);
427 | }
428 |
429 |
430 | stringstream ss;
431 | ss.clear(); ss << "pruned_flow_field_" << older_view << "and" << working_view << ".png";//<< omp_get_thread_num()
432 |
433 | //direct wirte
434 | imwrite(ss.str(), img_orig_matches);
435 | /*
436 | int c = waitKey(0);
437 | if (c == 's') {
438 | imwrite("fundamental_mat_matches.png", img_orig_matches);
439 | }*/
440 | destroyWindow("Filtered Matches");
441 | }
442 | #endif
443 | #endif
444 |
445 | return F;
446 | }
447 |
448 | //Following Snavely07 4.2 - find how many inliers are in the Homography between 2 views
449 | int FindHomographyInliers2Views(int vi, int vj)
450 | {
451 | vector ikpts, jkpts; vector ipts, jpts;
452 | GetAlignedPointsFromMatch(imgpts[vi], imgpts[vj], matches_matrix[make_pair(vi, vj)], ikpts, jkpts);
453 | KeyPointsToPoints(ikpts, ipts); KeyPointsToPoints(jkpts, jpts);
454 |
455 | double minVal, maxVal; cv::minMaxIdx(ipts, &minVal, &maxVal); //TODO flatten point2d?? or it takes max of width and height
456 |
457 | vector status;
458 | cv::Mat H = cv::findHomography(ipts, jpts, status, CV_RANSAC, 0.004 * maxVal); //threshold from Snavely07
459 | return cv::countNonZero(status); //number of inliers
460 | }
461 |
462 |
463 | //count number of 2D measurements
464 | int Count2DMeasurements(const vector& pointcloud) {
465 | int K = 0;
466 | for (unsigned int i = 0; i < pointcloud.size(); i++) {
467 | for (unsigned int ii = 0; ii < pointcloud[i].imgpt_for_img.size(); ii++) {
468 | if (pointcloud[i].imgpt_for_img[ii] >= 0) {
469 | K++;
470 | }
471 | }
472 | }
473 | return K;
474 | }
475 |
476 | inline void showErrorStatistics(double const f0,
477 | StdDistortionFunction const& distortion,
478 | vector const& cams,
479 | vector const& Xs,
480 | vector const& measurements,
481 | vector const& correspondingView,
482 | vector const& correspondingPoint)
483 | {
484 | int const K = measurements.size();
485 |
486 | double meanReprojectionError = 0.0;
487 | for (int k = 0; k < K; ++k)
488 | {
489 | int const i = correspondingView[k];
490 | int const j = correspondingPoint[k];
491 | Vector2d p = cams[i].projectPoint(distortion, Xs[j]);
492 |
493 | double reprojectionError = norm_L2(f0 * (p - measurements[k]));
494 | meanReprojectionError += reprojectionError;
495 | }
496 | cout << "mean reprojection error (in pixels): " << meanReprojectionError / K << endl;
497 | }
498 |
499 |
500 | void adjustBundle(vector& pointcloud,
501 | Mat& cam_matrix,
502 | const std::vector >& imgpts,
503 | std::map& Pmats
504 | )
505 | {
506 | int N = Pmats.size(), M = pointcloud.size(), K = Count2DMeasurements(pointcloud);
507 |
508 | cout << "N (cams) = " << N << " M (points) = " << M << " K (measurements) = " << K << endl;
509 |
510 | StdDistortionFunction distortion;
511 |
512 | //conver camera intrinsics to BA datastructs
513 | Matrix3x3d KMat;
514 | makeIdentityMatrix(KMat);
515 | KMat[0][0] = cam_matrix.at(0, 0); //fx
516 | KMat[1][1] = cam_matrix.at(1, 1); //fy
517 | KMat[0][1] = cam_matrix.at(0, 1); //skew
518 | KMat[0][2] = cam_matrix.at(0, 2); //ppx
519 | KMat[1][2] = cam_matrix.at(1, 2); //ppy
520 |
521 | double const f0 = KMat[0][0];
522 | cout << "intrinsic before bundle = "; displayMatrix(KMat);
523 | Matrix3x3d Knorm = KMat;
524 | // Normalize the intrinsic to have unit focal length.
525 | scaleMatrixIP(1.0 / f0, Knorm);
526 | Knorm[2][2] = 1.0;
527 |
528 | vector pointIdFwdMap(M);
529 | map pointIdBwdMap;
530 |
531 | //conver 3D point cloud to BA datastructs
532 | vector Xs(M);
533 | for (int j = 0; j < M; ++j)
534 | {
535 | int pointId = j;
536 | Xs[j][0] = pointcloud[j].pt.x;
537 | Xs[j][1] = pointcloud[j].pt.y;
538 | Xs[j][2] = pointcloud[j].pt.z;
539 | pointIdFwdMap[j] = pointId;
540 | pointIdBwdMap.insert(make_pair(pointId, j));
541 | }
542 | cout << "Read the 3D points." << endl;
543 |
544 | vector camIdFwdMap(N, -1);
545 | map camIdBwdMap;
546 |
547 | //convert cameras to BA datastructs
548 | vector cams(N);
549 | for (int i = 0; i < N; ++i)
550 | {
551 | int camId = i;
552 | Matrix3x3d R;
553 | Vector3d T;
554 |
555 | Matx34d& P = Pmats[i];
556 |
557 | R[0][0] = P(0, 0); R[0][1] = P(0, 1); R[0][2] = P(0, 2); T[0] = P(0, 3);
558 | R[1][0] = P(1, 0); R[1][1] = P(1, 1); R[1][2] = P(1, 2); T[1] = P(1, 3);
559 | R[2][0] = P(2, 0); R[2][1] = P(2, 1); R[2][2] = P(2, 2); T[2] = P(2, 3);
560 |
561 | camIdFwdMap[i] = camId;
562 | camIdBwdMap.insert(make_pair(camId, i));
563 |
564 | cams[i].setIntrinsic(Knorm);
565 | cams[i].setRotation(R);
566 | cams[i].setTranslation(T);
567 | }
568 | cout << "Read the cameras." << endl;
569 |
570 | vector measurements;
571 | vector correspondingView;
572 | vector correspondingPoint;
573 |
574 | measurements.reserve(K);
575 | correspondingView.reserve(K);
576 | correspondingPoint.reserve(K);
577 |
578 | //convert 2D measurements to BA datastructs
579 | for (unsigned int k = 0; k < pointcloud.size(); ++k)
580 | {
581 | for (unsigned int i = 0; i < pointcloud[k].imgpt_for_img.size(); i++) {
582 | if (pointcloud[k].imgpt_for_img[i] >= 0) {
583 | int view = i, point = k;
584 | Vector3d p, np;
585 |
586 | Point cvp = imgpts[i][pointcloud[k].imgpt_for_img[i]].pt;
587 | p[0] = cvp.x;
588 | p[1] = cvp.y;
589 | p[2] = 1.0;
590 |
591 | if (camIdBwdMap.find(view) != camIdBwdMap.end() &&
592 | pointIdBwdMap.find(point) != pointIdBwdMap.end())
593 | {
594 | // Normalize the measurements to match the unit focal length.
595 | scaleVectorIP(1.0 / f0, p);
596 | measurements.push_back(Vector2d(p[0], p[1]));
597 | correspondingView.push_back(camIdBwdMap[view]);
598 | correspondingPoint.push_back(pointIdBwdMap[point]);
599 | }
600 | }
601 | }
602 | } // end for (k)
603 |
604 | K = measurements.size();
605 |
606 | cout << "Read " << K << " valid 2D measurements." << endl;
607 |
608 | showErrorStatistics(f0, distortion, cams, Xs, measurements, correspondingView, correspondingPoint);
609 |
610 | // V3D::optimizerVerbosenessLevel = 1;
611 | double const inlierThreshold = 2.0 / fabs(f0);
612 |
613 | Matrix3x3d K0 = cams[0].getIntrinsic();
614 | cout << "K0 = "; displayMatrix(K0);
615 |
616 | bool good_adjustment = false;
617 | {
618 | ScopedBundleExtrinsicNormalizer extNorm(cams, Xs);
619 | ScopedBundleIntrinsicNormalizer intNorm(cams, measurements, correspondingView);
620 | CommonInternalsMetricBundleOptimizer opt(V3D::FULL_BUNDLE_FOCAL_LENGTH_PP, inlierThreshold, K0, distortion, cams, Xs,
621 | measurements, correspondingView, correspondingPoint);
622 | // StdMetricBundleOptimizer opt(inlierThreshold,cams,Xs,measurements,correspondingView,correspondingPoint);
623 |
624 | opt.tau = 1e-3;
625 | opt.maxIterations = 50;
626 | opt.minimize();
627 |
628 | cout << "optimizer status = " << opt.status << endl;
629 |
630 | good_adjustment = (opt.status != 2);
631 | }
632 |
633 | cout << "refined K = "; displayMatrix(K0);
634 |
635 | for (int i = 0; i < N; ++i) cams[i].setIntrinsic(K0);
636 |
637 | Matrix3x3d Knew = K0;
638 | scaleMatrixIP(f0, Knew);
639 | Knew[2][2] = 1.0;
640 | cout << "Knew = "; displayMatrix(Knew);
641 |
642 | showErrorStatistics(f0, distortion, cams, Xs, measurements, correspondingView, correspondingPoint);
643 |
644 | if (good_adjustment) { //good adjustment?
645 |
646 | //Vector3d mean(0.0, 0.0, 0.0);
647 | //for (unsigned int j = 0; j < Xs.size(); ++j) addVectorsIP(Xs[j], mean);
648 | //scaleVectorIP(1.0/Xs.size(), mean);
649 | //
650 | //vector norms(Xs.size());
651 | //for (unsigned int j = 0; j < Xs.size(); ++j)
652 | // norms[j] = distance_L2(Xs[j], mean);
653 | //
654 | //std::sort(norms.begin(), norms.end());
655 | //float distThr = norms[int(norms.size() * 0.9f)];
656 | //cout << "90% quantile distance: " << distThr << endl;
657 |
658 | //extract 3D points
659 | for (unsigned int j = 0; j < Xs.size(); ++j)
660 | {
661 | //if (distance_L2(Xs[j], mean) > 3*distThr) makeZeroVector(Xs[j]);
662 |
663 | pointcloud[j].pt.x = Xs[j][0];
664 | pointcloud[j].pt.y = Xs[j][1];
665 | pointcloud[j].pt.z = Xs[j][2];
666 | }
667 |
668 | //extract adjusted cameras
669 | for (int i = 0; i < N; ++i)
670 | {
671 | Matrix3x3d R = cams[i].getRotation();
672 | Vector3d T = cams[i].getTranslation();
673 |
674 | Matx34d P;
675 | P(0, 0) = R[0][0]; P(0, 1) = R[0][1]; P(0, 2) = R[0][2]; P(0, 3) = T[0];
676 | P(1, 0) = R[1][0]; P(1, 1) = R[1][1]; P(1, 2) = R[1][2]; P(1, 3) = T[1];
677 | P(2, 0) = R[2][0]; P(2, 1) = R[2][1]; P(2, 2) = R[2][2]; P(2, 3) = T[2];
678 |
679 | Pmats[i] = P;
680 | }
681 |
682 |
683 | //TODO: extract camera intrinsics
684 | cam_matrix.at(0, 0) = Knew[0][0];
685 | cam_matrix.at(0, 1) = Knew[0][1];
686 | cam_matrix.at(0, 2) = Knew[0][2];
687 | cam_matrix.at(1, 1) = Knew[1][1];
688 | cam_matrix.at(1, 2) = Knew[1][2];
689 | }
690 | }
691 |
692 | void attach(SfMUpdateListener *sul)
693 | {
694 | listeners.push_back(sul);
695 | }
696 | void update()
697 | {
698 | for (int i = 0; i < listeners.size(); i++)
699 | listeners[i]->update(getPointCloud(),
700 | getPointCloudRGB(),
701 | getPointCloudBeforeBA(),
702 | getPointCloudRGBBeforeBA(),
703 | getCameras());
704 | }
705 |
706 |
707 | void Find2D3DCorrespondences(int working_view,
708 | std::vector& ppcloud,
709 | std::vector& imgPoints)
710 | {
711 | ppcloud.clear(); imgPoints.clear();
712 |
713 | vector pcloud_status(pcloud.size(), 0);
714 | for (set::iterator done_view = good_views.begin(); done_view != good_views.end(); ++done_view)
715 | {
716 | int old_view = *done_view;
717 | //check for matches_from_old_to_working between i'th frame and 'th frame (and thus the current cloud)
718 | std::vector matches_from_old_to_working = matches_matrix[std::make_pair(old_view, working_view)];
719 |
720 | for (unsigned int match_from_old_view = 0; match_from_old_view < matches_from_old_to_working.size(); match_from_old_view++) {
721 | // the index of the matching point in
722 | int idx_in_old_view = matches_from_old_to_working[match_from_old_view].queryIdx;
723 |
724 | //scan the existing cloud (pcloud) to see if this point from exists
725 | for (unsigned int pcldp = 0; pcldp < pcloud.size(); pcldp++) {
726 | // see if corresponding point was found in this point
727 | if (idx_in_old_view == pcloud[pcldp].imgpt_for_img[old_view] && pcloud_status[pcldp] == 0) //prevent duplicates
728 | {
729 | //3d point in cloud
730 | ppcloud.push_back(pcloud[pcldp].pt);
731 | //2d point in image i
732 | imgPoints.push_back(imgpts[working_view][matches_from_old_to_working[match_from_old_view].trainIdx].pt);
733 |
734 | pcloud_status[pcldp] = 1;
735 | break;
736 | }
737 | }
738 | }
739 | }
740 | cout << "found " << ppcloud.size() << " 3d-2d point correspondences" << endl;
741 | }
742 |
743 | bool FindPoseEstimation(
744 | int working_view,
745 | cv::Mat_& rvec,
746 | cv::Mat_& t,
747 | cv::Mat_& R,
748 | std::vector ppcloud,
749 | std::vector imgPoints
750 | )
751 | {
752 | if (ppcloud.size() <= 7 || imgPoints.size() <= 7 || ppcloud.size() != imgPoints.size()) {
753 | //something went wrong aligning 3D to 2D points..
754 | cerr << "couldn't find [enough] corresponding cloud points... (only " << ppcloud.size() << ")" << endl;
755 | return false;
756 | }
757 |
758 | vector inliers;
759 | double minVal, maxVal; cv::minMaxIdx(imgPoints, &minVal, &maxVal);
760 | //"solvePnPRansac",
761 | cv::solvePnPRansac(ppcloud, imgPoints, K, distortion_coeff, rvec, t, true, 1000, 0.006 * maxVal, 0.25 * (double)(imgPoints.size()), inliers, CV_EPNP);
762 |
763 | vector projected3D;
764 | cv::projectPoints(ppcloud, rvec, t, K, distortion_coeff, projected3D);
765 |
766 | if (inliers.size() == 0) { //get inliers
767 | for (int i = 0; i < projected3D.size(); i++) {
768 | if (norm(projected3D[i] - imgPoints[i]) < 10.0)
769 | inliers.push_back(i);
770 | }
771 | }
772 | //cv::Rodrigues(rvec, R);
773 | //visualizerShowCamera(R,t,0,255,0,0.1);
774 |
775 | if (inliers.size() < (double)(imgPoints.size()) / 5.0) {
776 | cerr << "not enough inliers to consider a good pose (" << inliers.size() << "/" << imgPoints.size() << ")" << endl;
777 | return false;
778 | }
779 |
780 | if (cv::norm(t) > 200.0) {
781 | // this is bad...
782 | cerr << "estimated camera movement is too big, skip this camera\r\n";
783 | return false;
784 | }
785 |
786 | cv::Rodrigues(rvec, R);
787 | if (!CheckCoherentRotation(R)) {
788 | cerr << "rotation is incoherent. we should try a different base view..." << endl;
789 | return false;
790 | }
791 |
792 | std::cout << "found t = " << t << "\nR = \n" << R << std::endl;
793 | return true;
794 | }
795 | void RecoverCamerasIncremental()
796 | {
797 | cv::Matx34d P1 = Pmats[m_second_view];
798 | cv::Mat_ t = (cv::Mat_(1, 3) << P1(0, 3), P1(1, 3), P1(2, 3));
799 | cv::Mat_ R = (cv::Mat_(3, 3) << P1(0, 0), P1(0, 1), P1(0, 2),
800 | P1(1, 0), P1(1, 1), P1(1, 2),
801 | P1(2, 0), P1(2, 1), P1(2, 2));
802 | cv::Mat_ rvec(1, 3); Rodrigues(R, rvec);
803 |
804 | done_views.insert(m_first_view);
805 | done_views.insert(m_second_view);
806 | good_views.insert(m_first_view);
807 | good_views.insert(m_second_view);
808 |
809 | //loop images to incrementally recover more cameras
810 | //for (unsigned int i=0; i < imgs.size(); i++)
811 | while (done_views.size() != grey_imgs.size())
812 | {
813 | //find image with highest 2d-3d correspondance [Snavely07 4.2]
814 | unsigned int max_2d3d_view = -1, max_2d3d_count = 0;
815 | vector max_3d; vector max_2d;
816 | for (unsigned int _i = 0; _i < grey_imgs.size(); _i++) {
817 | if (done_views.find(_i) != done_views.end()) continue; //already done with this view
818 |
819 | vector tmp3d; vector tmp2d;
820 | cout << images_names[_i] << ": ";
821 | Find2D3DCorrespondences(_i, tmp3d, tmp2d);
822 | if (tmp3d.size() > max_2d3d_count) {
823 | max_2d3d_count = tmp3d.size();
824 | max_2d3d_view = _i;
825 | max_3d = tmp3d; max_2d = tmp2d;
826 | }
827 | }
828 | int i = max_2d3d_view; //highest 2d3d matching view
829 |
830 | std::cout << "-------------------------- " << images_names[i] << " --------------------------\n";
831 | done_views.insert(i); // don't repeat it for now
832 |
833 | bool pose_estimated = FindPoseEstimation(i, rvec, t, R, max_3d, max_2d);
834 | if (!pose_estimated)
835 | continue;
836 |
837 | //store estimated pose
838 | Pmats[i] = cv::Matx34d(R(0, 0), R(0, 1), R(0, 2), t(0),
839 | R(1, 0), R(1, 1), R(1, 2), t(1),
840 | R(2, 0), R(2, 1), R(2, 2), t(2));
841 |
842 | // start triangulating with previous GOOD views
843 | for (set::iterator done_view = good_views.begin(); done_view != good_views.end(); ++done_view)
844 | {
845 | int view = *done_view;
846 | if (view == i) continue; //skip current...
847 |
848 | cout << " -> " << images_names[view] << endl;
849 |
850 | vector new_triangulated;
851 | vector add_to_cloud;
852 | bool good_triangulation = TriangulatePointsBetweenViews(i, view, new_triangulated, add_to_cloud);
853 | if (!good_triangulation) continue;
854 |
855 | std::cout << "before triangulation: " << pcloud.size();
856 | for (int j = 0; j < add_to_cloud.size(); j++) {
857 | if (add_to_cloud[j] == 1)
858 | pcloud.push_back(new_triangulated[j]);
859 | }
860 | std::cout << " after " << pcloud.size() << std::endl;
861 | //break;
862 | }
863 | good_views.insert(i);
864 |
865 | AdjustCurrentBundle();
866 | update();
867 | }
868 |
869 | cout << "======================================================================\n";
870 | cout << "========================= Depth Recovery DONE ========================\n";
871 | cout << "======================================================================\n";
872 | }
873 |
874 | void AdjustCurrentBundle() {
875 | cout << "======================== Bundle Adjustment ==========================\n";
876 |
877 | pointcloud_beforeBA = pcloud;
878 | GetRGBForPointCloud(pointcloud_beforeBA, pointCloudRGB_beforeBA);
879 |
880 | cv::Mat _cam_matrix = K;
881 | adjustBundle(pcloud, _cam_matrix, imgpts, Pmats);
882 | K = cam_matrix;
883 | Kinv = K.inv();
884 |
885 | cout << "use new K " << endl << K << endl;
886 |
887 | GetRGBForPointCloud(pcloud, pointCloudRGB);
888 | }
889 |
890 | void GetRGBForPointCloud(
891 | const std::vector& _pcloud,
892 | std::vector& RGBforCloud
893 | )
894 | {
895 | RGBforCloud.resize(_pcloud.size());
896 | for (unsigned int i = 0; i < _pcloud.size(); i++) {
897 | unsigned int good_view = 0;
898 | std::vector point_colors;
899 | for (; good_view < imgs_orig.size(); good_view++) {
900 | if (_pcloud[i].imgpt_for_img[good_view] != -1) {
901 | int pt_idx = _pcloud[i].imgpt_for_img[good_view];
902 | if (pt_idx >= imgpts[good_view].size()) {
903 | std::cerr << "BUG: point id:" << pt_idx << " should not exist for img #" << good_view << " which has only " << imgpts[good_view].size() << std::endl;
904 | continue;
905 | }
906 | cv::Point _pt = imgpts[good_view][pt_idx].pt;
907 | assert(good_view < imgs_orig.size() && _pt.x < imgs_orig[good_view].cols && _pt.y < imgs_orig[good_view].rows);
908 |
909 | point_colors.push_back(imgs_orig[good_view].at(_pt));
910 |
911 | // std::stringstream ss; ss << "patch " << good_view;
912 | // imshow_250x250(ss.str(), imgs_orig[good_view](cv::Range(_pt.y-10,_pt.y+10),cv::Range(_pt.x-10,_pt.x+10)));
913 | }
914 | }
915 | // cv::waitKey(0);
916 | cv::Scalar res_color = cv::mean(point_colors);
917 | RGBforCloud[i] = (cv::Vec3b(res_color[0], res_color[1], res_color[2])); //bgr2rgb
918 | if (good_view == grey_imgs.size()) //nothing found.. put red dot
919 | RGBforCloud.push_back(cv::Vec3b(255, 0, 0));
920 | }
921 | }
922 |
923 |
924 | bool sort_by_first(pair > a, pair > b) { return a.first < b.first; }
925 | /**
926 | * Get an initial 3D point cloud from 2 views only
927 | */
928 | void GetBaseLineTriangulation() {
929 | std::cout << "=========================== Baseline triangulation ===========================\n";
930 |
931 | cv::Matx34d P(1, 0, 0, 0,
932 | 0, 1, 0, 0,
933 | 0, 0, 1, 0),
934 | P1(1, 0, 0, 0,
935 | 0, 1, 0, 0,
936 | 0, 0, 1, 0);
937 |
938 | std::vector tmp_pcloud;
939 |
940 | //sort pairwise matches to find the lowest Homography inliers [Snavely07 4.2]
941 | cout << "Find highest match...";
942 | list > > matches_sizes;
943 | //TODO: parallelize!
944 | for (std::map, std::vector >::iterator i = matches_matrix.begin(); i != matches_matrix.end(); ++i) {
945 | if ((*i).second.size() < 100)
946 | matches_sizes.push_back(make_pair(100, (*i).first));
947 | else {
948 | int Hinliers = FindHomographyInliers2Views((*i).first.first, (*i).first.second);
949 | int percent = (int)(((double)Hinliers) / ((double)(*i).second.size()) * 100.0);
950 | cout << "[" << (*i).first.first << "," << (*i).first.second << " = " << percent << "] ";
951 | matches_sizes.push_back(make_pair((int)percent, (*i).first));
952 | }
953 | }
954 | cout << endl;
955 | matches_sizes.sort(sort_by_first);
956 |
957 | //Reconstruct from two views
958 | bool goodF = false;
959 | int highest_pair = 0;
960 | m_first_view = m_second_view = 0;
961 | //reverse iterate by number of matches
962 | for (list > >::iterator highest_pair = matches_sizes.begin();
963 | highest_pair != matches_sizes.end() && !goodF;
964 | ++highest_pair)
965 | {
966 | m_second_view = (*highest_pair).second.second;
967 | m_first_view = (*highest_pair).second.first;
968 |
969 | std::cout << " -------- " << images_names[m_first_view] << " and " << images_names[m_second_view] << " -------- " << std::endl;
970 | //what if reconstrcution of first two views is bad? fallback to another pair
971 | //See if the Fundamental Matrix between these two views is good
972 | goodF = FindCameraMatrices(K, Kinv, distortion_coeff,
973 | imgpts[m_first_view],
974 | imgpts[m_second_view],
975 | imgpts_good[m_first_view],
976 | imgpts_good[m_second_view],
977 | P,
978 | P1,
979 | matches_matrix[std::make_pair(m_first_view, m_second_view)],
980 | tmp_pcloud
981 | #ifdef __SFM__DEBUG__
982 | , m_first_view, m_second_view
983 | #endif
984 | );
985 | if (goodF) {
986 | vector new_triangulated;
987 | vector add_to_cloud;
988 |
989 | Pmats[m_first_view] = P;
990 | Pmats[m_second_view] = P1;
991 |
992 | bool good_triangulation = TriangulatePointsBetweenViews(m_second_view, m_first_view, new_triangulated, add_to_cloud);
993 | if (!good_triangulation || cv::countNonZero(add_to_cloud) < 10) {
994 | std::cout << "triangulation failed" << std::endl;
995 | goodF = false;
996 | Pmats[m_first_view] = 0;
997 | Pmats[m_second_view] = 0;
998 | m_second_view++;
999 | }
1000 | else {
1001 | std::cout << "before triangulation: " << pcloud.size();
1002 | for (unsigned int j = 0; j& new_triangulated,
1025 | vector& add_to_cloud
1026 | )
1027 | {
1028 | cout << " Triangulate " << images_names[working_view] << " and " << images_names[older_view] << endl;
1029 | //get the left camera matrix
1030 | //TODO: potential bug - the P mat for may not exist? or does it...
1031 | cv::Matx34d P = Pmats[older_view];
1032 | cv::Matx34d P1 = Pmats[working_view];
1033 |
1034 | std::vector pt_set1, pt_set2;
1035 | std::vector matches = matches_matrix[std::make_pair(older_view, working_view)];
1036 | GetAlignedPointsFromMatch(imgpts[older_view], imgpts[working_view], matches, pt_set1, pt_set2);
1037 |
1038 |
1039 | //adding more triangulated points to general cloud
1040 | double reproj_error = TriangulatePoints(pt_set1, pt_set2, K, Kinv, distortion_coeff, P, P1, new_triangulated, correspImg1Pt);
1041 | std::cout << "triangulation reproj error " << reproj_error << std::endl;
1042 |
1043 | vector trig_status;
1044 | if (!TestTriangulation(new_triangulated, P, trig_status) || !TestTriangulation(new_triangulated, P1, trig_status)) {
1045 | cerr << "Triangulation did not succeed" << endl;
1046 | return false;
1047 | }
1048 |
1049 | //filter out outlier points with high reprojection
1050 | vector reprj_errors;
1051 | for (int i = 0; i < new_triangulated.size(); i++) { reprj_errors.push_back(new_triangulated[i].reprojection_error); }
1052 | std::sort(reprj_errors.begin(), reprj_errors.end());
1053 | //get the 80% precentile
1054 | double reprj_err_cutoff = reprj_errors[4 * reprj_errors.size() / 5] * 2.4; //threshold from Snavely07 4.2
1055 |
1056 | vector new_triangulated_filtered;
1057 | std::vector new_matches;
1058 | for (int i = 0; i < new_triangulated.size(); i++) {
1059 | if (trig_status[i] == 0)
1060 | continue; //point was not in front of camera
1061 | if (new_triangulated[i].reprojection_error > 16.0) {
1062 | continue; //reject point
1063 | }
1064 | if (new_triangulated[i].reprojection_error < 4.0 ||
1065 | new_triangulated[i].reprojection_error < reprj_err_cutoff)
1066 | {
1067 | new_triangulated_filtered.push_back(new_triangulated[i]);
1068 | new_matches.push_back(matches[i]);
1069 | }
1070 | else
1071 | {
1072 | continue;
1073 | }
1074 | }
1075 |
1076 | cout << "filtered out " << (new_triangulated.size() - new_triangulated_filtered.size()) << " high-error points" << endl;
1077 |
1078 | //all points filtered?
1079 | if (new_triangulated_filtered.size() <= 0) return false;
1080 |
1081 | new_triangulated = new_triangulated_filtered;
1082 |
1083 | matches = new_matches;
1084 | matches_matrix[std::make_pair(older_view, working_view)] = new_matches; //just to make sure, remove if unneccesary
1085 | matches_matrix[std::make_pair(working_view, older_view)] = FlipMatches(new_matches);
1086 | add_to_cloud.clear();
1087 | add_to_cloud.resize(new_triangulated.size(), 1);
1088 | int found_other_views_count = 0;
1089 | int num_views = grey_imgs.size();
1090 |
1091 | //scan new triangulated points, if they were already triangulated before - strengthen cloud
1092 | //#pragma omp parallel for num_threads(1)
1093 | for (int j = 0; j < new_triangulated.size(); j++) {
1094 | new_triangulated[j].imgpt_for_img = std::vector(grey_imgs.size(), -1);
1095 |
1096 | //matches[j] corresponds to new_triangulated[j]
1097 | //matches[j].queryIdx = point in
1098 | //matches[j].trainIdx = point in
1099 | new_triangulated[j].imgpt_for_img[older_view] = matches[j].queryIdx; //2D reference to
1100 | new_triangulated[j].imgpt_for_img[working_view] = matches[j].trainIdx; //2D reference to
1101 | bool found_in_other_view = false;
1102 | for (unsigned int view_ = 0; view_ < num_views; view_++) {
1103 | if (view_ != older_view) {
1104 | //Look for points in that match to points in
1105 | std::vector submatches = matches_matrix[std::make_pair(view_, working_view)];
1106 | for (unsigned int ii = 0; ii < submatches.size(); ii++) {
1107 | if (submatches[ii].trainIdx == matches[j].trainIdx &&
1108 | !found_in_other_view)
1109 | {
1110 | //Point was already found in - strengthen it in the known cloud, if it exists there
1111 |
1112 | //cout << "2d pt " << submatches[ii].queryIdx << " in img " << view_ << " matched 2d pt " << submatches[ii].trainIdx << " in img " << i << endl;
1113 | for (unsigned int pt3d = 0; pt3d < pcloud.size(); pt3d++) {
1114 | if (pcloud[pt3d].imgpt_for_img[view_] == submatches[ii].queryIdx)
1115 | {
1116 | //pcloud[pt3d] - a point that has 2d reference in
1117 |
1118 | //cout << "3d point "<& imgpts1,
1151 | const vector& imgpts2,
1152 | vector& imgpts1_good,
1153 | vector& imgpts2_good,
1154 | Matx34d& P,
1155 | Matx34d& P1,
1156 | vector& matches,
1157 | vector& outCloud
1158 | #ifdef __SFM__DEBUG__
1159 | , int older_view,
1160 | int working_view
1161 | #endif
1162 | )
1163 | {
1164 | //Find camera matrices
1165 | {
1166 | cout << "Find camera matrices...";
1167 | double t = getTickCount();
1168 |
1169 | Mat F = GetFundamentalMat(imgpts1,imgpts2,imgpts1_good,imgpts2_good,matches
1170 | #ifdef __SFM__DEBUG__
1171 | , older_view, working_view
1172 | #endif
1173 | );
1174 | if(matches.size() < 100) { // || ((double)imgpts1_good.size() / (double)imgpts1.size()) < 0.25
1175 | cerr << "not enough inliers after F matrix" << endl;
1176 | return false;
1177 | }
1178 |
1179 | //Essential matrix: compute then extract cameras [R|t]
1180 | Mat_ E = K.t() * F * K; //according to HZ (9.12)
1181 |
1182 | //according to http://en.wikipedia.org/wiki/Essential_matrix#Properties_of_the_essential_matrix
1183 | if(fabsf(determinant(E)) > 1e-07) {
1184 | cout << "det(E) != 0 : " << determinant(E) << "\n";
1185 | P1 = 0;
1186 | return false;
1187 | }
1188 |
1189 | Mat_ R1(3,3);
1190 | Mat_ R2(3,3);
1191 | Mat_ t1(1,3);
1192 | Mat_ t2(1,3);
1193 |
1194 | //decompose E to P' , HZ (9.19)
1195 | {
1196 | if (!DecomposeEtoRandT(E,R1,R2,t1,t2)) return false;
1197 |
1198 | if(determinant(R1)+1.0 < 1e-09) {
1199 | //according to http://en.wikipedia.org/wiki/Essential_matrix#Showing_that_it_is_valid
1200 | cout << "det(R) == -1 ["< pcloud,pcloud1; vector corresp;
1216 | double reproj_error1 = TriangulatePoints(imgpts1_good, imgpts2_good, K, Kinv, distcoeff, P, P1, pcloud, corresp);
1217 | double reproj_error2 = TriangulatePoints(imgpts2_good, imgpts1_good, K, Kinv, distcoeff, P1, P, pcloud1, corresp);
1218 | vector tmp_status;
1219 | //check if pointa are triangulated --in front-- of cameras for all 4 ambiguations
1220 | if (!TestTriangulation(pcloud,P1,tmp_status) || !TestTriangulation(pcloud1,P,tmp_status) || reproj_error1 > 100.0 || reproj_error2 > 100.0) {
1221 | P1 = Matx34d(R1(0,0), R1(0,1), R1(0,2), t2(0),
1222 | R1(1,0), R1(1,1), R1(1,2), t2(1),
1223 | R1(2,0), R1(2,1), R1(2,2), t2(2));
1224 | cout << "Testing P1 "<< endl << Mat(P1) << endl;
1225 |
1226 | pcloud.clear(); pcloud1.clear(); corresp.clear();
1227 | reproj_error1 = TriangulatePoints(imgpts1_good, imgpts2_good, K, Kinv, distcoeff, P, P1, pcloud, corresp);
1228 | reproj_error2 = TriangulatePoints(imgpts2_good, imgpts1_good, K, Kinv, distcoeff, P1, P, pcloud1, corresp);
1229 |
1230 | if (!TestTriangulation(pcloud,P1,tmp_status) || !TestTriangulation(pcloud1,P,tmp_status) || reproj_error1 > 100.0 || reproj_error2 > 100.0) {
1231 | if (!CheckCoherentRotation(R2)) {
1232 | cout << "resulting rotation is not coherent\n";
1233 | P1 = 0;
1234 | return false;
1235 | }
1236 |
1237 | P1 = Matx34d(R2(0,0), R2(0,1), R2(0,2), t1(0),
1238 | R2(1,0), R2(1,1), R2(1,2), t1(1),
1239 | R2(2,0), R2(2,1), R2(2,2), t1(2));
1240 | cout << "Testing P1 "<< endl << Mat(P1) << endl;
1241 |
1242 | pcloud.clear(); pcloud1.clear(); corresp.clear();
1243 | reproj_error1 = TriangulatePoints(imgpts1_good, imgpts2_good, K, Kinv, distcoeff, P, P1, pcloud, corresp);
1244 | reproj_error2 = TriangulatePoints(imgpts2_good, imgpts1_good, K, Kinv, distcoeff, P1, P, pcloud1, corresp);
1245 |
1246 | if (!TestTriangulation(pcloud,P1,tmp_status) || !TestTriangulation(pcloud1,P,tmp_status) || reproj_error1 > 100.0 || reproj_error2 > 100.0) {
1247 | P1 = Matx34d(R2(0,0), R2(0,1), R2(0,2), t2(0),
1248 | R2(1,0), R2(1,1), R2(1,2), t2(1),
1249 | R2(2,0), R2(2,1), R2(2,2), t2(2));
1250 | cout << "Testing P1 "<< endl << Mat(P1) << endl;
1251 |
1252 | pcloud.clear(); pcloud1.clear(); corresp.clear();
1253 | reproj_error1 = TriangulatePoints(imgpts1_good, imgpts2_good, K, Kinv, distcoeff, P, P1, pcloud, corresp);
1254 | reproj_error2 = TriangulatePoints(imgpts2_good, imgpts1_good, K, Kinv, distcoeff, P1, P, pcloud1, corresp);
1255 |
1256 | if (!TestTriangulation(pcloud,P1,tmp_status) || !TestTriangulation(pcloud1,P,tmp_status) || reproj_error1 > 100.0 || reproj_error2 > 100.0) {
1257 | cout << "Shit." << endl;
1258 | return false;
1259 | }
1260 | }
1261 | }
1262 | }
1263 | for (unsigned int i=0; i CloudPointsToPoints(const std::vector cpts) {
1276 | std::vector out;
1277 | for (unsigned int i = 0; i < cpts.size(); i++) {
1278 | out.push_back(cpts[i].pt);
1279 | }
1280 | return out;
1281 | }
1282 |
1283 | bool TestTriangulation(const vector& pcloud, const Matx34d& P, vector& status) {
1284 | vector pcloud_pt3d = CloudPointsToPoints(pcloud);
1285 | vector pcloud_pt3d_projected(pcloud_pt3d.size());
1286 |
1287 | Matx44d P4x4 = Matx44d::eye();
1288 | for (int i = 0; i < 12; i++) P4x4.val[i] = P.val[i];
1289 |
1290 | perspectiveTransform(pcloud_pt3d, pcloud_pt3d_projected, P4x4);
1291 |
1292 | status.resize(pcloud.size(), 0);
1293 | for (int i = 0; i < pcloud.size(); i++) {
1294 | status[i] = (pcloud_pt3d_projected[i].z > 0) ? 1 : 0;
1295 | }
1296 | int count = countNonZero(status);
1297 |
1298 | double percentage = ((double)count / (double)pcloud.size());
1299 | cout << count << "/" << pcloud.size() << " = " << percentage*100.0 << "% are in front of camera" << endl;
1300 | if (percentage < 0.75)
1301 | return false; //less than 75% of the points are in front of the camera
1302 |
1303 | //check for coplanarity of points
1304 | if (false) //not
1305 | {
1306 | cv::Mat_ cldm(pcloud.size(), 3);
1307 | for (unsigned int i = 0; i < pcloud.size(); i++) {
1308 | cldm.row(i)(0) = pcloud[i].pt.x;
1309 | cldm.row(i)(1) = pcloud[i].pt.y;
1310 | cldm.row(i)(2) = pcloud[i].pt.z;
1311 | }
1312 | cv::Mat_ mean;
1313 | cv::PCA pca(cldm, mean, CV_PCA_DATA_AS_ROW);
1314 |
1315 | int num_inliers = 0;
1316 | cv::Vec3d nrm = pca.eigenvectors.row(2); nrm = nrm / norm(nrm);
1317 | cv::Vec3d x0 = pca.mean;
1318 | double p_to_plane_thresh = sqrt(pca.eigenvalues.at(2));
1319 |
1320 | for (int i = 0; i < pcloud.size(); i++) {
1321 | Vec3d w = Vec3d(pcloud[i].pt) - x0;
1322 | double D = fabs(nrm.dot(w));
1323 | if (D < p_to_plane_thresh) num_inliers++;
1324 | }
1325 |
1326 | cout << num_inliers << "/" << pcloud.size() << " are coplanar" << endl;
1327 | if ((double)num_inliers / (double)(pcloud.size()) > 0.85)
1328 | return false;
1329 | }
1330 |
1331 | return true;
1332 | }
1333 |
1334 | //Triagulate points
1335 | double TriangulatePoints(const vector& pt_set1,
1336 | const vector& pt_set2,
1337 | const Mat& K,
1338 | const Mat& Kinv,
1339 | const Mat& distcoeff,
1340 | const Matx34d& P,
1341 | const Matx34d& P1,
1342 | vector& pointcloud,
1343 | vector& correspImg1Pt)
1344 | {
1345 | #ifdef __SFM__DEBUG__
1346 | vector depths;
1347 | #endif
1348 |
1349 | // pointcloud.clear();
1350 | correspImg1Pt.clear();
1351 |
1352 | Matx44d P1_(P1(0, 0), P1(0, 1), P1(0, 2), P1(0, 3),
1353 | P1(1, 0), P1(1, 1), P1(1, 2), P1(1, 3),
1354 | P1(2, 0), P1(2, 1), P1(2, 2), P1(2, 3),
1355 | 0, 0, 0, 1);
1356 | Matx44d P1inv(P1_.inv());
1357 |
1358 | cout << "Triangulating...";
1359 | double t = getTickCount();
1360 | vector reproj_error;
1361 | unsigned int pts_size = pt_set1.size();
1362 |
1363 | Mat_ KP1 = K * Mat(P1);
1364 | #pragma omp parallel for num_threads(1)
1365 | for (int i = 0; i < pts_size; i++) {
1366 | Point2f kp = pt_set1[i].pt;
1367 | Point3d u(kp.x, kp.y, 1.0);
1368 | Mat_ um = Kinv * Mat_(u);
1369 | u.x = um(0); u.y = um(1); u.z = um(2);
1370 |
1371 | Point2f kp1 = pt_set2[i].pt;
1372 | Point3d u1(kp1.x, kp1.y, 1.0);
1373 | Mat_ um1 = Kinv * Mat_(u1);
1374 | u1.x = um1(0); u1.y = um1(1); u1.z = um1(2);
1375 |
1376 | Mat_ X = IterativeLinearLSTriangulation(u, P, u1, P1);
1377 | // cout << "3D Point: " << X << endl;
1378 | // Mat_ x = Mat(P1) * X;
1379 | // cout << "P1 * Point: " << x << endl;
1380 | // Mat_ xPt = (Mat_(3,1) << x(0),x(1),x(2));
1381 | // cout << "Point: " << xPt << endl;
1382 | Mat_ xPt_img = KP1 * X; //reproject
1383 | // cout << "Point * K: " << xPt_img << endl;
1384 | Point2f xPt_img_(xPt_img(0) / xPt_img(2), xPt_img(1) / xPt_img(2));
1385 |
1386 | #pragma omp critical
1387 | {
1388 | double reprj_err = norm(xPt_img_ - kp1);
1389 | reproj_error.push_back(reprj_err);
1390 |
1391 | CloudPoint cp;
1392 | cp.pt = Point3d(X(0), X(1), X(2));
1393 | cp.reprojection_error = reprj_err;
1394 |
1395 | pointcloud.push_back(cp);
1396 | correspImg1Pt.push_back(pt_set1[i]);
1397 | }
1398 | }
1399 |
1400 | Scalar mse = mean(reproj_error);
1401 | t = ((double)getTickCount() - t) / getTickFrequency();
1402 | cout << "Done. (" << pointcloud.size() << "points, " << t << "s, mean reproj err = " << mse[0] << ")" << endl;
1403 |
1404 | return mse[0];
1405 | }
1406 |
1407 | /**
1408 | From "Triangulation", Hartley, R.I. and Sturm, P., Computer vision and image understanding, 1997
1409 | */
1410 | Mat_ LinearLSTriangulation(Point3d u, //homogenous image point (u,v,1)
1411 | Matx34d P, //camera 1 matrix
1412 | Point3d u1, //homogenous image point in 2nd camera
1413 | Matx34d P1 //camera 2 matrix
1414 | )
1415 | {
1416 |
1417 | //build matrix A for homogenous equation system Ax = 0
1418 | //assume X = (x,y,z,1), for Linear-LS method
1419 | //which turns it into a AX = B system, where A is 4x3, X is 3x1 and B is 4x1
1420 | // cout << "u " << u <<", u1 " << u1 << endl;
1421 | // Matx A; //this is for the AX=0 case, and with linear dependence..
1422 | // A(0) = u.x*P(2)-P(0);
1423 | // A(1) = u.y*P(2)-P(1);
1424 | // A(2) = u.x*P(1)-u.y*P(0);
1425 | // A(3) = u1.x*P1(2)-P1(0);
1426 | // A(4) = u1.y*P1(2)-P1(1);
1427 | // A(5) = u1.x*P(1)-u1.y*P1(0);
1428 | // Matx43d A; //not working for some reason...
1429 | // A(0) = u.x*P(2)-P(0);
1430 | // A(1) = u.y*P(2)-P(1);
1431 | // A(2) = u1.x*P1(2)-P1(0);
1432 | // A(3) = u1.y*P1(2)-P1(1);
1433 | Matx43d A(u.x*P(2, 0) - P(0, 0), u.x*P(2, 1) - P(0, 1), u.x*P(2, 2) - P(0, 2),
1434 | u.y*P(2, 0) - P(1, 0), u.y*P(2, 1) - P(1, 1), u.y*P(2, 2) - P(1, 2),
1435 | u1.x*P1(2, 0) - P1(0, 0), u1.x*P1(2, 1) - P1(0, 1), u1.x*P1(2, 2) - P1(0, 2),
1436 | u1.y*P1(2, 0) - P1(1, 0), u1.y*P1(2, 1) - P1(1, 1), u1.y*P1(2, 2) - P1(1, 2)
1437 | );
1438 | Matx41d B(-(u.x*P(2, 3) - P(0, 3)),
1439 | -(u.y*P(2, 3) - P(1, 3)),
1440 | -(u1.x*P1(2, 3) - P1(0, 3)),
1441 | -(u1.y*P1(2, 3) - P1(1, 3)));
1442 |
1443 | Mat_ X;
1444 | solve(A, B, X, DECOMP_SVD);
1445 |
1446 | return X;
1447 | }
1448 |
1449 |
1450 | /**
1451 | From "Triangulation", Hartley, R.I. and Sturm, P., Computer vision and image understanding, 1997
1452 | */
1453 | Mat_ IterativeLinearLSTriangulation(Point3d u, //homogenous image point (u,v,1)
1454 | Matx34d P, //camera 1 matrix
1455 | Point3d u1, //homogenous image point in 2nd camera
1456 | Matx34d P1 //camera 2 matrix
1457 | ) {
1458 | double wi = 1, wi1 = 1;
1459 | Mat_ X(4, 1);
1460 |
1461 | Mat_ X_ = LinearLSTriangulation(u, P, u1, P1);
1462 | X(0) = X_(0); X(1) = X_(1); X(2) = X_(2); X(3) = 1.0;
1463 |
1464 | for (int i = 0; i<10; i++) { //Hartley suggests 10 iterations at most
1465 |
1466 | //recalculate weights
1467 | double p2x = Mat_(Mat_(P).row(2)*X)(0);
1468 | double p2x1 = Mat_(Mat_(P1).row(2)*X)(0);
1469 |
1470 | //breaking point
1471 | if (fabsf(wi - p2x) <= EPSILON && fabsf(wi1 - p2x1) <= EPSILON) break;
1472 |
1473 | wi = p2x;
1474 | wi1 = p2x1;
1475 |
1476 | //reweight equations and solve
1477 | Matx43d A((u.x*P(2, 0) - P(0, 0)) / wi, (u.x*P(2, 1) - P(0, 1)) / wi, (u.x*P(2, 2) - P(0, 2)) / wi,
1478 | (u.y*P(2, 0) - P(1, 0)) / wi, (u.y*P(2, 1) - P(1, 1)) / wi, (u.y*P(2, 2) - P(1, 2)) / wi,
1479 | (u1.x*P1(2, 0) - P1(0, 0)) / wi1, (u1.x*P1(2, 1) - P1(0, 1)) / wi1, (u1.x*P1(2, 2) - P1(0, 2)) / wi1,
1480 | (u1.y*P1(2, 0) - P1(1, 0)) / wi1, (u1.y*P1(2, 1) - P1(1, 1)) / wi1, (u1.y*P1(2, 2) - P1(1, 2)) / wi1
1481 | );
1482 | Mat_ B = (Mat_(4, 1) << -(u.x*P(2, 3) - P(0, 3)) / wi,
1483 | -(u.y*P(2, 3) - P(1, 3)) / wi,
1484 | -(u1.x*P1(2, 3) - P1(0, 3)) / wi1,
1485 | -(u1.y*P1(2, 3) - P1(1, 3)) / wi1
1486 | );
1487 |
1488 | solve(A, B, X_, DECOMP_SVD);
1489 | X(0) = X_(0); X(1) = X_(1); X(2) = X_(2); X(3) = 1.0;
1490 | }
1491 | return X;
1492 | }
1493 |
1494 |
1495 | bool CheckCoherentRotation(cv::Mat_& R) {
1496 |
1497 | if (fabsf(determinant(R)) - 1.0 > 1e-07) {
1498 | cerr << "det(R) != +-1.0, this is not a rotation matrix" << endl;
1499 | return false;
1500 | }
1501 |
1502 | return true;
1503 | }
1504 |
1505 | bool DecomposeEtoRandT(
1506 | Mat_& E,
1507 | Mat_& R1,
1508 | Mat_& R2,
1509 | Mat_& t1,
1510 | Mat_& t2)
1511 | {
1512 | //Using HZ E decomposition
1513 | Mat svd_u, svd_vt, svd_w;
1514 | TakeSVDOfE(E, svd_u, svd_vt, svd_w);
1515 |
1516 | //check if first and second singular values are the same (as they should be)
1517 | double singular_values_ratio = fabsf(svd_w.at(0) / svd_w.at(1));
1518 | if (singular_values_ratio > 1.0) singular_values_ratio = 1.0 / singular_values_ratio; // flip ratio to keep it [0,1]
1519 | if (singular_values_ratio < 0.7) {
1520 | cout << "singular values are too far apart\n";
1521 | return false;
1522 | }
1523 |
1524 | Matx33d W(0, -1, 0, //HZ 9.13
1525 | 1, 0, 0,
1526 | 0, 0, 1);
1527 | Matx33d Wt(0, 1, 0,
1528 | -1, 0, 0,
1529 | 0, 0, 1);
1530 | R1 = svd_u * Mat(W) * svd_vt; //HZ 9.19
1531 | R2 = svd_u * Mat(Wt) * svd_vt; //HZ 9.19
1532 | t1 = svd_u.col(2); //u3
1533 | t2 = -svd_u.col(2); //u3
1534 | return true;
1535 | }
1536 |
1537 |
1538 | void TakeSVDOfE(Mat_& E, Mat& svd_u, Mat& svd_vt, Mat& svd_w) {
1539 | //Using OpenCV's SVD
1540 | cv::SVD svd(E, cv::SVD::MODIFY_A);
1541 | svd_u = svd.u;
1542 | svd_vt = svd.vt;
1543 | svd_w = svd.w;
1544 |
1545 | cout << "----------------------- SVD ------------------------\n";
1546 | cout << "U:\n" << svd_u << "\nW:\n" << svd_w << "\nVt:\n" << svd_vt << endl;
1547 | cout << "----------------------------------------------------\n";
1548 | }
1549 |
1550 | void KeyPointsToPoints(const vector& kps, vector& ps) {
1551 | ps.clear();
1552 | for (unsigned int i = 0; i < kps.size(); i++) ps.push_back(kps[i].pt);
1553 | }
1554 |
1555 | void PointsToKeyPoints(const vector& ps, vector& kps) {
1556 | kps.clear();
1557 | for (unsigned int i = 0; i < ps.size(); i++) kps.push_back(KeyPoint(ps[i], 1.0f));
1558 | }
1559 |
1560 |
1561 | #define intrpmnmx(val,min,max) (max==min ? 0.0 : ((val)-min)/(max-min))
1562 |
1563 | void drawArrows(Mat& frame, const vector& prevPts, const vector& nextPts, const vector& status, const vector& verror, const Scalar& _line_color)
1564 | {
1565 | double minVal, maxVal; minMaxIdx(verror, &minVal, &maxVal, 0, 0, status);
1566 | int line_thickness = 1;
1567 |
1568 | for (size_t i = 0; i < prevPts.size(); ++i)
1569 | {
1570 | if (status[i])
1571 | {
1572 | double alpha = intrpmnmx(verror[i], minVal, maxVal); alpha = 1.0 - alpha;
1573 | Scalar line_color(alpha*_line_color[0],
1574 | alpha*_line_color[1],
1575 | alpha*_line_color[2]);
1576 |
1577 | Point p = prevPts[i];
1578 | Point q = nextPts[i];
1579 |
1580 | double angle = atan2((double)p.y - q.y, (double)p.x - q.x);
1581 |
1582 | double hypotenuse = sqrt((double)(p.y - q.y)*(p.y - q.y) + (double)(p.x - q.x)*(p.x - q.x));
1583 |
1584 | if (hypotenuse < 1.0)
1585 | continue;
1586 |
1587 | // Here we lengthen the arrow by a factor of three.
1588 | q.x = (int)(p.x - 3 * hypotenuse * cos(angle));
1589 | q.y = (int)(p.y - 3 * hypotenuse * sin(angle));
1590 |
1591 | // Now we draw the main line of the arrow.
1592 | line(frame, p, q, line_color, line_thickness);
1593 |
1594 | // Now draw the tips of the arrow. I do some scaling so that the
1595 | // tips look proportional to the main line of the arrow.
1596 |
1597 | p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
1598 | p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
1599 | line(frame, p, q, line_color, line_thickness);
1600 |
1601 | p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
1602 | p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
1603 | line(frame, p, q, line_color, line_thickness);
1604 | }
1605 | }
1606 | }
1607 |
1608 |
1609 | std::vector FlipMatches(const std::vector& matches) {
1610 | std::vector flip;
1611 | for (int i = 0; i < matches.size(); i++) {
1612 | flip.push_back(matches[i]);
1613 | swap(flip.back().queryIdx, flip.back().trainIdx);
1614 | }
1615 | return flip;
1616 | }
1617 |
1618 |
1619 | void GetAlignedPointsFromMatch(const std::vector& imgpts1,
1620 | const std::vector& imgpts2,
1621 | const std::vector& matches,
1622 | std::vector& pt_set1,
1623 | std::vector& pt_set2)
1624 | {
1625 | for (unsigned int i = 0; i < matches.size(); i++) {
1626 | // cout << "matches[i].queryIdx " << matches[i].queryIdx << " matches[i].trainIdx " << matches[i].trainIdx << endl;
1627 | assert(matches[i].queryIdx < imgpts1.size());
1628 | pt_set1.push_back(imgpts1[matches[i].queryIdx]);
1629 | assert(matches[i].trainIdx < imgpts2.size());
1630 | pt_set2.push_back(imgpts2[matches[i].trainIdx]);
1631 | }
1632 | }
1633 |
1634 |
1635 | std::vector getPointCloud() { return CloudPointsToPoints(pcloud); }
1636 | const cv::Mat& get_im_orig(int frame_num) { return imgs_orig[frame_num]; }
1637 | const std::vector& getcorrespImg1Pt() { return correspImg1Pt; }
1638 | const std::vector& getPointCloudRGB() { if (pointCloudRGB.size() == 0) { GetRGBForPointCloud(pcloud, pointCloudRGB); } return pointCloudRGB; }
1639 | std::vector getCameras() {
1640 | std::vector v;
1641 | for (std::map::const_iterator it = Pmats.begin(); it != Pmats.end(); ++it) {
1642 | v.push_back(it->second);
1643 | }
1644 | return v;
1645 | }
1646 | std::vector getPointCloudBeforeBA() { return CloudPointsToPoints(pointcloud_beforeBA); }
1647 | const std::vector& getPointCloudRGBBeforeBA() { return pointCloudRGB_beforeBA; }
1648 |
--------------------------------------------------------------------------------
/code/Common.h:
--------------------------------------------------------------------------------
1 | #ifndef _COMMON_H
2 | #define _COMMON_H
3 | #pragma once
4 | #pragma warning(disable: 4244 18 4996 4800)
5 |
6 | #define __SFM__DEBUG__
7 | #define _SCL_SECURE_NO_WARNINGS
8 |
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 |
17 | #ifdef DEFINE_GLOBALS
18 | #define EXTERN
19 | #else
20 | #define EXTERN extern
21 | #endif
22 | //////////////////////////////////////////////////data structure
23 | struct CloudPoint {
24 | cv::Point3d pt;
25 | std::vector imgpt_for_img;
26 | double reprojection_error;
27 | };
28 | //images
29 | EXTERN std::vector images;
30 | EXTERN std::vector images_names;
31 | EXTERN std::vector > imgs_orig;
32 | EXTERN std::vector grey_imgs;
33 |
34 | //image keypoints
35 | EXTERN std::vector > imgpts;
36 | EXTERN std::vector > fullpts;
37 | EXTERN std::vector > imgpts_good;
38 |
39 | //match
40 | EXTERN std::map, std::vector > matches_matrix;
41 |
42 | //calibration matrix
43 | EXTERN cv::Mat K;
44 | EXTERN cv::Mat_ Kinv;
45 | EXTERN cv::Mat cam_matrix, distortion_coeff;
46 | EXTERN cv::Mat distcoeff_32f;
47 | EXTERN cv::Mat K_32f;
48 |
49 | //cloud
50 | EXTERN std::vector pcloud;
51 | EXTERN std::vector pointCloudRGB;
52 | EXTERN std::vector correspImg1Pt; //TODO: remove
53 | EXTERN std::vector pointcloud_beforeBA;
54 | EXTERN std::vector pointCloudRGB_beforeBA;
55 |
56 | EXTERN int m_first_view;
57 | EXTERN int m_second_view; //baseline's second view other to 0
58 | EXTERN std::set done_views;
59 | EXTERN std::set good_views;
60 |
61 | EXTERN std::map Pmats;
62 |
63 |
64 | class SfMUpdateListener
65 | {
66 | public:
67 | virtual void update(std::vector pcld,
68 | std::vector pcldrgb,
69 | std::vector pcld_alternate,
70 | std::vector pcldrgb_alternate,
71 | std::vector cameras) = 0;
72 | };
73 | EXTERN std::vector < SfMUpdateListener * > listeners;
74 |
75 | //////////////////////////////////////////////procedural programming
76 | void read_images_and_calibration_matrix(std::string dir_name, double downscale_factor);
77 | void optical_flow_feature_match();
78 | void PruneMatchesBasedOnF();
79 | void GetBaseLineTriangulation();
80 | void AdjustCurrentBundle();
81 | void attach(SfMUpdateListener *sul);
82 | void update();
83 | void RecoverCamerasIncremental();
84 | //////////////////////////////assist functions
85 | using namespace std;
86 | using namespace cv;
87 | void MatchFeatures(int idx_i, int idx_j, vector* matches);
88 | std::vector FlipMatches(const std::vector& matches);
89 | void drawArrows(Mat& frame, const vector