├── .gitignore
├── Duke
├── Duke.pro
├── Resource
│ ├── en.qm
│ └── zh.qm
├── Set.ui
├── baslercamera.cpp
├── baslercamera.h
├── blobdetector.cpp
├── blobdetector.h
├── camera.cpp
├── camera.h
├── cameracalibration.cpp
├── cameracalibration.h
├── dahengcamera.cpp
├── dahengcamera.h
├── dotmatch.cpp
├── dotmatch.h
├── en.ts
├── focusassistant.cpp
├── focusassistant.h
├── focusassistant.ui
├── glwidget.cpp
├── glwidget.h
├── graycodes.cpp
├── graycodes.h
├── imageviewer.cpp
├── imageviewer.h
├── imageviewer.ui
├── main.cpp
├── mainwindow.cpp
├── mainwindow.h
├── mainwindow.ui
├── manualmatch.cpp
├── manualmatch.h
├── manualmatch.ui
├── meshcreator.cpp
├── meshcreator.h
├── mfreconstruct.cpp
├── mfreconstruct.h
├── multifrequency.cpp
├── multifrequency.h
├── plyloader.cpp
├── plyloader.h
├── pointcloudimage.cpp
├── pointcloudimage.h
├── projector.cpp
├── projector.h
├── reconstruct.cpp
├── reconstruct.h
├── set.cpp
├── set.h
├── stereorect.cpp
├── stereorect.h
├── utilities.cpp
├── utilities.h
├── virtualcamera.cpp
├── virtualcamera.h
└── zh.ts
├── README.md
└── build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug
├── debug
├── en.qm
├── moc_dahengcamera.cpp
├── moc_dotmatch.cpp
├── moc_glwidget.cpp
├── moc_imageviewer.cpp
├── moc_mfreconstruct.cpp
├── moc_plyloader.cpp
├── moc_project.cpp
└── moc_stereorect.cpp
├── ui_Set.h
├── ui_imageviewer.h
├── ui_mainwindow.h
├── ui_manualmatch.h
└── ui_project.h
/.gitignore:
--------------------------------------------------------------------------------
1 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/pointcloudimage.obj
2 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/qrc_res.cpp
3 | *.obj
4 | *.manifest
5 | *.res
6 | *.pdb
7 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/Makefile
8 | *.Release
9 | *.ilk
10 | *.rc
11 | *.exe
12 | *.Debug
13 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_camera.cpp
14 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_mainwindow.cpp
15 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_set.cpp
16 | Duke/deployment.pri
17 | Duke/Duke.pro.user
18 | *.qrc
19 | *.png
20 | Duke/Duke.pro
21 | Duke/Duke.pro
22 | Duke/Duke.pro
23 | Duke/Duke.pro
24 | Duke/Duke.pro
25 | *.ini
26 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_mytransform.cpp
27 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_pregistration.cpp
28 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_registration.cpp
29 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_transform.cpp
30 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_transformwithhorn.cpp
31 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_multifrequency.cpp
32 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_mainwindow.h
33 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_Set.h
34 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_baslercamera.cpp
35 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_mainwindow.h
36 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_mainwindow.h
37 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_focusassistant.cpp
38 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_focusassistant.h
39 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/ui_mainwindow.h
40 | Duke/dotmatch.cpp.autosave
41 | build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug/debug/moc_manualmatch.cpp
--------------------------------------------------------------------------------
/Duke/Duke.pro:
--------------------------------------------------------------------------------
1 | TEMPLATE = app
2 |
3 | TARGET = Duke
4 |
5 | QT += quick widgets opengl
6 |
7 | SOURCES += \
8 | graycodes.cpp \
9 | main.cpp \
10 | mainwindow.cpp \
11 | meshcreator.cpp \
12 | pointcloudimage.cpp \
13 | projector.cpp \
14 | reconstruct.cpp \
15 | set.cpp \
16 | utilities.cpp \
17 | virtualcamera.cpp \
18 | plyloader.cpp \
19 | glwidget.cpp \
20 | cameracalibration.cpp \
21 | dotmatch.cpp \
22 | multifrequency.cpp \
23 | blobdetector.cpp \
24 | dahengcamera.cpp \
25 | baslercamera.cpp \
26 | focusassistant.cpp \
27 | manualmatch.cpp \
28 | imageviewer.cpp \
29 | stereorect.cpp \
30 | mfreconstruct.cpp
31 |
32 | RESOURCES += \
33 | Resource/res.qrc
34 |
35 | INCLUDEPATH += E:\opencv\build\include\
36 | D:\VC\inc\
37 | D:\mrpt\libs\base\include\
38 | D:\mrpt\libs\scanmatching\include\
39 | D:\mrpt\include\mrpt\mrpt-config\
40 | D:\glm\
41 | E:\freeglut-2.8.1\include\
42 | D:\genicam\library\cpp\include\
43 | D:\pylon\include\
44 |
45 | LIBS += -LD:\mrpt\lib\
46 | -LE:\freeglut-2.8.1\lib\x86\Debug\
47 | -LE:\opencv\build\x86\vc10\lib\
48 | -LD:\VC\lib\
49 | -LD:\genicam\library\cpp\lib\win32_i86\
50 | -LD:\pylon\lib\Win32\
51 | -lopencv_core249d\
52 | -lopencv_highgui249d\
53 | -lopencv_imgproc249d\
54 | -lopencv_features2d249d\
55 | -lopencv_calib3d249d\
56 | -lopencv_nonfree249d\
57 | -lopencv_flann249d\
58 | -lHVDAILT\
59 | -lHVExtend\
60 | -lHVUtil\
61 | -lRaw2Rgb\
62 | -llibmrpt-base122-dbg\
63 | -llibmrpt-scanmatching122-dbg\
64 | -lfreeglut\
65 | -lGCBase_MD_VC100_v2_3\
66 | -lPylonBase_MD_VC100\
67 | -lPylonBootstrapper\
68 | -lPylonGigE_MD_VC100_TL\
69 | -lPylonUsb_MD_VC100_TL\
70 | -lPylonUtility_MD_VC100\
71 |
72 |
73 | # Default rules for deployment.
74 | include(deployment.pri)
75 |
76 | HEADERS += \
77 | graycodes.h \
78 | mainwindow.h \
79 | meshcreator.h \
80 | pointcloudimage.h \
81 | projector.h \
82 | reconstruct.h \
83 | set.h \
84 | utilities.h \
85 | virtualcamera.h \
86 | plyloader.h \
87 | glwidget.h \
88 | cameracalibration.h \
89 | dotmatch.h \
90 | multifrequency.h \
91 | blobdetector.h \
92 | dahengcamera.h \
93 | baslercamera.h \
94 | focusassistant.h \
95 | manualmatch.h \
96 | imageviewer.h \
97 | stereorect.h \
98 | mfreconstruct.h
99 |
100 | FORMS += \
101 | mainwindow.ui \
102 | Set.ui \
103 | focusassistant.ui \
104 | manualmatch.ui \
105 | imageviewer.ui
106 |
107 | TRANSLATIONS += en.ts zh.ts
108 |
109 |
--------------------------------------------------------------------------------
/Duke/Resource/en.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DrawZeroPoint/Structure-Light-Reconstructor/4a79a80336fa52f06e4eb871e8dffe79e222a53e/Duke/Resource/en.qm
--------------------------------------------------------------------------------
/Duke/Resource/zh.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DrawZeroPoint/Structure-Light-Reconstructor/4a79a80336fa52f06e4eb871e8dffe79e222a53e/Duke/Resource/zh.qm
--------------------------------------------------------------------------------
/Duke/Set.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | SetDialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 455
10 | 356
11 |
12 |
13 |
14 | Settings
15 |
16 |
17 | font: 9pt "Calibri";
18 |
19 |
20 | -
21 |
22 |
-
23 |
24 |
25 | Qt::Horizontal
26 |
27 |
28 |
29 | 40
30 | 20
31 |
32 |
33 |
34 |
35 | -
36 |
37 |
38 |
39 | 0
40 | 0
41 |
42 |
43 |
44 | false
45 |
46 |
47 |
48 |
49 |
50 | Qt::Horizontal
51 |
52 |
53 | QDialogButtonBox::Apply|QDialogButtonBox::Cancel|QDialogButtonBox::Ok
54 |
55 |
56 |
57 |
58 |
59 | -
60 |
61 |
62 | 3
63 |
64 |
65 |
66 | Calibration Board
67 |
68 |
69 |
70 |
71 | 10
72 | 10
73 | 411
74 | 161
75 |
76 |
77 |
78 |
79 | 0
80 | 0
81 |
82 |
83 |
84 | Geometry
85 |
86 |
87 |
88 | 10
89 |
90 |
-
91 |
92 |
-
93 |
94 |
95 | Cell Width(mm):
96 |
97 |
98 |
99 | -
100 |
101 |
102 | 20
103 |
104 |
105 |
106 | -
107 |
108 |
109 | Cell Height(mm):
110 |
111 |
112 |
113 | -
114 |
115 |
116 | 20
117 |
118 |
119 |
120 |
121 |
122 | -
123 |
124 |
-
125 |
126 |
127 | Cell Number(Horizontal):
128 |
129 |
130 |
131 | -
132 |
133 |
134 | 10
135 |
136 |
137 |
138 | -
139 |
140 |
141 | Cell Number(Vertical):
142 |
143 |
144 | 0
145 |
146 |
147 |
148 | -
149 |
150 |
151 | 8
152 |
153 |
154 |
155 |
156 |
157 | -
158 |
159 |
160 | Qt::Horizontal
161 |
162 |
163 |
164 | 6
165 | 20
166 |
167 |
168 |
169 |
170 | -
171 |
172 |
-
173 |
174 |
175 | Board Width(mm):
176 |
177 |
178 |
179 | -
180 |
181 |
182 |
183 | 0
184 | 0
185 |
186 |
187 |
188 | 1000
189 |
190 |
191 | 200
192 |
193 |
194 |
195 | -
196 |
197 |
198 | Board Height(mm):
199 |
200 |
201 |
202 | -
203 |
204 |
205 |
206 | 0
207 | 0
208 |
209 |
210 |
211 | 1000
212 |
213 |
214 | 200
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 | Camera
226 |
227 |
228 |
229 |
230 | 10
231 | 9
232 | 411
233 | 121
234 |
235 |
236 |
237 |
238 | 0
239 | 0
240 |
241 |
242 |
243 | Camera Resolution
244 |
245 |
246 |
247 |
248 | 20
249 | 40
250 | 89
251 | 16
252 |
253 |
254 |
255 | 1280X1024
256 |
257 |
258 | true
259 |
260 |
261 |
262 |
263 |
264 |
265 | Projector
266 |
267 |
268 |
269 |
270 | 9
271 | 9
272 | 413
273 | 111
274 |
275 |
276 |
277 | Project Region
278 |
279 |
280 |
281 |
282 | 20
283 | 20
284 | 176
285 | 80
286 |
287 |
288 |
289 |
290 | 0
291 | 0
292 |
293 |
294 |
295 | Projector Resolution
296 |
297 |
298 | -
299 |
300 |
-
301 |
302 |
303 | Horizontal(pixel):
304 |
305 |
306 |
307 | -
308 |
309 |
310 | 1280
311 |
312 |
313 | 1280
314 |
315 |
316 |
317 | -
318 |
319 |
320 | Vertical(pixel):
321 |
322 |
323 |
324 | -
325 |
326 |
327 | 1024
328 |
329 |
330 | 1024
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 | 210
342 | 20
343 | 176
344 | 80
345 |
346 |
347 |
348 |
349 | 0
350 | 0
351 |
352 |
353 |
354 | Scan Resolution
355 |
356 |
357 | -
358 |
359 |
-
360 |
361 |
362 | Horizontal(pixel):
363 |
364 |
365 |
366 | -
367 |
368 |
369 | 1280
370 |
371 |
372 | 1280
373 |
374 |
375 |
376 | -
377 |
378 |
379 | Vertical(pixel):
380 |
381 |
382 |
383 | -
384 |
385 |
386 | 1024
387 |
388 |
389 | 1024
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 | Reconstruction
402 |
403 |
404 | -
405 |
406 |
407 |
408 | 0
409 | 0
410 |
411 |
412 |
413 | Threshold
414 |
415 |
416 |
-
417 |
418 |
419 | 10
420 |
421 |
-
422 |
423 |
424 | Black Threshold
425 |
426 |
427 |
428 | -
429 |
430 |
431 | 40
432 |
433 |
434 |
435 | -
436 |
437 |
438 | White Threshold
439 |
440 |
441 |
442 | -
443 |
444 |
445 |
446 |
447 | -
448 |
449 |
450 | Qt::Horizontal
451 |
452 |
453 |
454 | 244
455 | 20
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 | -
464 |
465 |
466 |
467 | 0
468 | 0
469 |
470 |
471 |
472 |
473 |
474 |
475 |
-
476 |
477 |
478 | Code Pattern
479 |
480 |
481 |
-
482 |
483 |
484 | Use Gray
485 |
486 |
487 | false
488 |
489 |
490 |
491 | -
492 |
493 |
494 | Use Gray with epipolar rectify
495 |
496 |
497 | true
498 |
499 |
500 |
501 | -
502 |
503 |
504 | Use MultiFrequency
505 |
506 |
507 | false
508 |
509 |
510 |
511 |
512 |
513 |
514 | -
515 |
516 |
517 | 10
518 |
519 |
-
520 |
521 |
522 | Auto Contrast
523 |
524 |
525 |
526 | -
527 |
528 |
529 | Ray Sampling
530 |
531 |
532 |
533 | -
534 |
535 |
536 | Export Obj
537 |
538 |
539 | false
540 |
541 |
542 | false
543 |
544 |
545 |
546 | -
547 |
548 |
549 | Export Ply
550 |
551 |
552 | true
553 |
554 |
555 |
556 | -
557 |
558 |
559 | Display Color
560 |
561 |
562 | false
563 |
564 |
565 |
566 |
567 |
568 | -
569 |
570 |
571 | Qt::Horizontal
572 |
573 |
574 |
575 | 221
576 | 20
577 |
578 |
579 |
580 |
581 |
582 |
583 |
584 |
585 |
586 |
587 |
588 |
589 |
590 |
591 |
592 |
593 | buttonBox
594 | accepted()
595 | SetDialog
596 | accept()
597 |
598 |
599 | 248
600 | 254
601 |
602 |
603 | 157
604 | 274
605 |
606 |
607 |
608 |
609 | buttonBox
610 | rejected()
611 | SetDialog
612 | reject()
613 |
614 |
615 | 316
616 | 260
617 |
618 |
619 | 286
620 | 274
621 |
622 |
623 |
624 |
625 |
626 |
--------------------------------------------------------------------------------
/Duke/baslercamera.cpp:
--------------------------------------------------------------------------------
1 | #include "baslercamera.h"
2 | #include
3 |
4 | BaslerCamera::BaslerCamera(QObject *parent) :
5 | QObject(parent)
6 | {
7 | // Automagically call PylonInitialize and PylonTerminate to ensure the pylon runtime system.
8 | // is initialized during the lifetime of this object
9 | Pylon::PylonAutoInitTerm autoInitTerm;
10 |
11 | }
12 |
13 | void BaslerCamera::openCamera()
14 | {
15 | CTlFactory& tlFactory = CTlFactory::GetInstance();
16 |
17 | // Get all attached devices and exit application if no device is found.
18 | DeviceInfoList_t devices;
19 | if ( tlFactory.EnumerateDevices(devices) == 0 )
20 | {
21 | QMessageBox::warning(NULL,tr("Basler Camera"),tr("Basler cameras were not found."));
22 | }
23 |
24 | // Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.
25 | cameras.Initialize(min( devices.size(), c_maxCamerasToUse));
26 |
27 | // Create and attach all Pylon Devices.
28 | for ( size_t i = 0; i < cameras.GetSize(); ++i)
29 | {
30 | cameras[i].Attach( tlFactory.CreateDevice( devices[i]));
31 | }
32 |
33 | // Starts grabbing for all cameras starting with index 0. The grabbing
34 | // is started for one camera after the other. That's why the images of all
35 | // cameras are not taken at the same time.
36 | // However, a hardware trigger setup can be used to cause all cameras to grab images synchronously.
37 | // According to their default configuration, the cameras are
38 | // set up for free-running continuous acquisition.
39 | cameras.StartGrabbing();
40 |
41 | for( int i = 0; i < cameras.IsGrabbing(); ++i)
42 | {
43 | cameras.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
44 |
45 | // When the cameras in the array are created the camera context value
46 | // is set to the index of the camera in the array.
47 | // The camera context is a user settable value.
48 | // This value is attached to each grab result and can be used
49 | // to determine the camera that produced the grab result.
50 | // Now, the image data can be processed.
51 | pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
52 | }
53 | }
54 |
55 | void BaslerCamera::closeCamera()
56 | {
57 | cameras.StopGrabbing();
58 | cameras.Close();
59 | }
60 |
--------------------------------------------------------------------------------
/Duke/baslercamera.h:
--------------------------------------------------------------------------------
1 | #ifndef BASLERCAMERA_H
2 | #define BASLERCAMERA_H
3 |
4 | //Qt
5 | #include
6 | //Pylon
7 | #include
8 |
9 | // Namespace for using pylon objects.
10 | using namespace Pylon;
11 |
12 | // Namespace for using cout.
13 | using namespace std;
14 |
15 | // Number of images to be grabbed.
16 | static const uint32_t c_countOfImagesToGrab = 10;
17 |
18 | static const size_t c_maxCamerasToUse = 2;
19 |
20 |
21 | class BaslerCamera : public QObject
22 | {
23 | Q_OBJECT
24 | public:
25 | BaslerCamera(QObject *parent = 0);
26 |
27 | const uint8_t *pImageBuffer;
28 | // This smart pointer will receive the grab result data.
29 | CGrabResultPtr ptrGrabResult;
30 |
31 | void openCamera();
32 | void closeCamera();
33 |
34 | private:
35 | CInstantCameraArray cameras;
36 |
37 | };
38 |
39 | #endif // BASLERCAMERA_H
40 |
--------------------------------------------------------------------------------
/Duke/blobdetector.cpp:
--------------------------------------------------------------------------------
1 | #include "blobdetector.h"
2 |
3 | int thresholdStep = 10;
4 | int minThreshold = 50;
5 | int maxThreshold = 220;
6 | int minRepeatability = 2;
7 | int minDistBetweenBlobs = 10;
8 |
9 | bool filterByColor = true;
10 | int blobColor = 0;
11 |
12 | bool filterByArea = true;
13 | int minArea = 25;
14 | int maxArea = 5000;
15 |
16 | bool filterByCircularity = false;
17 | float minCircularity = 0.8f;
18 | float maxCircularity = std::numeric_limits::max();
19 |
20 | bool filterByInertia = true;
21 | //minInertiaRatio = 0.6;
22 | float minInertiaRatio = 0.1f;
23 | float maxInertiaRatio = std::numeric_limits::max();
24 |
25 | bool filterByConvexity = true;
26 | //minConvexity = 0.8;
27 | float minConvexity = 0.95f;
28 | float maxConvexity = std::numeric_limits::max();
29 |
30 | BlobDetector::BlobDetector()
31 | {
32 | }
33 |
34 | void BlobDetector::findBlobs(const cv::Mat &binaryImage, vector ¢ers) const
35 | {
36 | centers.clear();
37 |
38 | vector < vector > contours;
39 | Mat tmpBinaryImage = binaryImage.clone();
40 | findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
41 |
42 | for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
43 | {
44 | Point2d center;
45 | Moments moms = moments(Mat(contours[contourIdx]));
46 | if (filterByArea)
47 | {
48 | double area = moms.m00;
49 | if (area < minArea || area >= maxArea)
50 | continue;
51 | }
52 |
53 | if (filterByCircularity)
54 | {
55 | double area = moms.m00;
56 | double perimeter = arcLength(Mat(contours[contourIdx]), true);
57 | double ratio = 4 * CV_PI * area / (perimeter * perimeter);
58 | if (ratio < minCircularity || ratio >= maxCircularity)
59 | continue;
60 | }
61 |
62 | if (filterByInertia)
63 | {
64 | double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
65 | const double eps = 1e-2;
66 | double ratio;
67 | if (denominator > eps)
68 | {
69 | double cosmin = (moms.mu20 - moms.mu02) / denominator;
70 | double sinmin = 2 * moms.mu11 / denominator;
71 | double cosmax = -cosmin;
72 | double sinmax = -sinmin;
73 |
74 | double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
75 | double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
76 | ratio = imin / imax;
77 | }
78 | else
79 | {
80 | ratio = 1;
81 | }
82 |
83 | if (ratio < minInertiaRatio || ratio >= maxInertiaRatio)
84 | continue;
85 | }
86 |
87 | if (filterByConvexity)
88 | {
89 | vector < Point > hull;
90 | convexHull(Mat(contours[contourIdx]), hull);
91 | double area = contourArea(Mat(contours[contourIdx]));
92 | double hullArea = contourArea(Mat(hull));
93 | double ratio = area / hullArea;
94 | if (ratio < minConvexity || ratio >= maxConvexity)
95 | continue;
96 | }
97 |
98 | center = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
99 |
100 | if (filterByColor)
101 | {
102 | if (binaryImage.at (cvRound(center.y), cvRound(center.x)) != blobColor)
103 | continue;
104 | }
105 |
106 | centers.push_back(center);
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/Duke/blobdetector.h:
--------------------------------------------------------------------------------
1 | #ifndef BLOBDETECTOR_H
2 | #define BLOBDETECTOR_H
3 |
4 | #include
5 |
6 | using namespace cv;
7 |
8 | class BlobDetector
9 | {
10 | public:
11 | BlobDetector();
12 | void findBlobs(const cv::Mat &binaryImage, vector ¢ers) const;
13 | private:
14 |
15 | };
16 |
17 | #endif // BLOBDETECTOR_H
18 |
--------------------------------------------------------------------------------
/Duke/camera.cpp:
--------------------------------------------------------------------------------
1 | #include "camera.h"
2 | #include
3 | #include
4 |
5 | #define MY_ZERO 0.000000001
6 |
7 | const HV_RESOLUTION Resolution = RES_MODE0;
8 | const HV_SNAP_MODE SnapMode = CONTINUATION;
9 | const HV_BAYER_CONVERT_TYPE ConvertType = BAYER2RGB_NEIGHBOUR1;
10 |
11 | const long Gain = 10;
12 | const long ExposureTint_Upper = 60;
13 | const long ExposureTint_Lower = 1000;
14 | const long ShutterDelay = 0;
15 | const long ADCLevel = ADC_LEVEL2;
16 | const int XStart = 0;
17 | const int YStart = 0;
18 | const long lVBlanking = 0;
19 | const HV_SNAP_SPEED SnapSpeed = HIGH_SPEED;
20 |
21 | Camera::Camera()
22 | {
23 | Width = 640;
24 | Height = 480;
25 | rawBuffer_1 = NULL;
26 | rawBuffer_2 = NULL;
27 | m_pImageBuffer = NULL;
28 | m_lHBlanking = 0;
29 | m_lVBlanking = 0;
30 | HVSTATUS status = STATUS_OK;
31 |
32 | status = BeginHVDevice(1, &camera_1);//打开数字摄像机 1
33 | if(status==STATUS_OK)
34 | m_bOpen = true;
35 | else
36 | return;
37 | status = BeginHVDevice(2, &camera_2);//打开数字摄像机 2
38 |
39 | HVSetResolution(camera_1, Resolution);// 设置数字摄像机分辨率
40 | HVSetResolution(camera_2, Resolution);
41 | HVSetSnapMode(camera_1, SnapMode);//采集模式,包括 CONTINUATION(连续)、TRIGGER(外触发)
42 | HVSetSnapMode(camera_2, SnapMode);
43 | HVADCControl(camera_1, ADC_BITS, ADCLevel);//设置ADC的级别
44 | HVADCControl(camera_2, ADC_BITS, ADCLevel);
45 |
46 | HVTYPE type = UNKNOWN_TYPE;//获取设备类型
47 | int size = sizeof(HVTYPE);
48 | HVGetDeviceInfo(camera_1,DESC_DEVICE_TYPE, &type, &size);
49 |
50 | HVSetBlanking(camera_1, m_lHBlanking, m_lVBlanking);//设置消隐
51 | HVSetBlanking(camera_2, m_lHBlanking, m_lVBlanking);
52 | HVSetOutputWindow(camera_1, XStart, YStart, Width, Height);
53 | HVSetOutputWindow(camera_2, XStart, YStart, Width, Height);
54 | HVSetSnapSpeed(camera_1, SnapSpeed);//设置采集速度
55 | HVSetSnapSpeed(camera_2, SnapSpeed);
56 |
57 | SetExposureTime(Width, ExposureTint_Upper, ExposureTint_Lower, m_lHBlanking, SnapSpeed, Resolution);//设置曝光时间
58 |
59 | // m_pBmpInfo即指向m_chBmpBuf缓冲区,用户可以自己分配BTIMAPINFO缓冲区
60 | m_pBmpInfo = (BITMAPINFO *)m_chBmpBuf;
61 | // 初始化BITMAPINFO 结构,此结构在保存bmp文件、显示采集图像时使用
62 | m_pBmpInfo->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
63 | // 图像宽度,一般为输出窗口宽度
64 | m_pBmpInfo->bmiHeader.biWidth = Width;
65 | // 图像宽度,一般为输出窗口高度
66 | m_pBmpInfo->bmiHeader.biHeight = Height;
67 |
68 | m_pBmpInfo->bmiHeader.biPlanes = 1;
69 | m_pBmpInfo->bmiHeader.biBitCount = 24;
70 | m_pBmpInfo->bmiHeader.biCompression = BI_RGB;
71 | m_pBmpInfo->bmiHeader.biSizeImage = 0;
72 | m_pBmpInfo->bmiHeader.biXPelsPerMeter = 0;
73 | m_pBmpInfo->bmiHeader.biYPelsPerMeter = 0;
74 | m_pBmpInfo->bmiHeader.biClrUsed = 0;
75 | m_pBmpInfo->bmiHeader.biClrImportant = 0;
76 |
77 | rawBuffer_1 = new BYTE[Width * Height];
78 | rawBuffer_2 = new BYTE[Width * Height];
79 | m_pImageBuffer = new BYTE[Width * Height * 3];
80 |
81 | QTimer *timer = new QTimer();
82 | timer->start(30);
83 | connect(timer, SIGNAL(timeout()), this, SLOT(CaptureFrame()));
84 | }
85 |
86 | Camera::~Camera()
87 | {
88 | HVSTATUS status = STATUS_OK;
89 | // 关闭数字摄像机,释放数字摄像机内部资源
90 | status = EndHVDevice(camera_1);
91 | status = EndHVDevice(camera_2);
92 | OnSnapexStop();
93 | OnSnapexClose();
94 | // 回收图像缓冲区
95 | delete []rawBuffer_1;
96 | delete []rawBuffer_2;
97 | delete []m_pImageBuffer;
98 | }
99 |
100 | void Camera::OnSnapexOpen()
101 | {
102 | HVSTATUS status = STATUS_OK;
103 | status = HVOpenSnap(camera_1, SnapThreadCallback, this);
104 | status = HVOpenSnap(camera_2, SnapThreadCallback, this);
105 | }
106 |
107 | void Camera::OnSnapexStart()
108 | {
109 | HVSTATUS status = STATUS_OK;
110 | BYTE *ppBuf_1[1];
111 | BYTE *ppBuf_2[1];
112 | ppBuf_1[0] = rawBuffer_1;
113 | ppBuf_2[0] = rawBuffer_2;
114 | status = HVStartSnap(camera_1, ppBuf_1,1);
115 | status = HVStartSnap(camera_2, ppBuf_2,1);
116 | }
117 |
118 | void Camera::OnSnapexStop()
119 | {
120 | HVSTATUS status = STATUS_OK;
121 | status = HVStopSnap(camera_1);
122 | status = HVStopSnap(camera_2);
123 | }
124 |
125 | void Camera::OnSnapexClose()
126 | {
127 | HVSTATUS status = STATUS_OK;
128 | status = HVCloseSnap(camera_1);
129 | status = HVCloseSnap(camera_2);
130 | }
131 |
132 | int CALLBACK Camera::SnapThreadCallback(HV_SNAP_INFO *pInfo)
133 | {
134 | return 1;
135 | }
136 |
137 | void Camera::CaptureFrame()
138 | {
139 | image_1 = new QImage(rawBuffer_1, Width, Height, QImage::Format_Indexed8);
140 | image_2 = new QImage(rawBuffer_2, Width, Height, QImage::Format_Indexed8);
141 | }
142 |
143 | int Camera::OnSnapChange()
144 | {
145 | return 1;
146 | }
147 |
148 | HVSTATUS Camera::SetExposureTime(int nWindWidth, long lTintUpper, long lTintLower, long HBlanking, HV_SNAP_SPEED SnapSpeed, HV_RESOLUTION Resolution)
149 | {
150 | HVTYPE type = UNKNOWN_TYPE;
151 | int size = sizeof(HVTYPE);
152 | HVGetDeviceInfo(camera_1,DESC_DEVICE_TYPE, &type, &size);
153 |
154 | int nOutputWid = nWindWidth;
155 |
156 | double dExposure = 0.0;
157 | double dTint = max((double)lTintUpper/(double)lTintLower,MY_ZERO);
158 |
159 | double lClockFreq = 0.0;
160 |
161 | lClockFreq = (SnapSpeed == HIGH_SPEED)? 24000000:12000000;
162 | long lTb = HBlanking;
163 | lTb += 9;
164 | lTb -= 19;
165 | if(lTb <= 0) lTb =0;
166 | if(((double)nOutputWid + 244.0 + lTb ) > 552)
167 | dExposure = (dTint* lClockFreq + 180.0)/((double)nOutputWid + 244.0 + lTb);
168 | else
169 | dExposure = ((dTint * lClockFreq)+ 180.0) / 552 ;
170 |
171 | if((dExposure-(int)dExposure) >0.5)
172 | dExposure += 1.0;
173 | if(dExposure <= 0)
174 | dExposure = 1;
175 | else if(dExposure > 16383)
176 | dExposure = 16383;
177 |
178 | return HVAECControl(camera_1, AEC_EXPOSURE_TIME, (long)dExposure);
179 | }
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
--------------------------------------------------------------------------------
/Duke/camera.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERA_H
2 | #define CAMERA_H
3 |
4 | #include
5 | #include
6 | #include "Windows.h"//加载此头文件以解决大恒相机头文件类型未定义问题
7 | #include
8 | #include
9 |
10 | #define WM_SNAP_CHANGE (WM_USER + 100)
11 |
12 | class Camera : public QWidget
13 | {
14 | Q_OBJECT
15 | public:
16 | Camera();
17 | ~Camera();
18 |
19 | BYTE *rawBuffer_1;
20 | BYTE *rawBuffer_2;///< 采集图像原始数据缓冲区
21 | QImage *image_1;
22 | QImage *image_2;
23 |
24 | int Width;
25 | int Height;
26 | BOOL m_bOpen; ///< 初始化标志
27 |
28 | void OnSnapexOpen();
29 | void OnSnapexStart();
30 | void OnSnapexStop();
31 | void OnSnapexClose();
32 | int OnSnapChange();
33 | private slots:
34 | void CaptureFrame();
35 | private:
36 | HHV camera_1;
37 | HHV camera_2;///< 数字摄像机句柄
38 |
39 | BOOL m_bStart; ///< 启动标志
40 | long m_lHBlanking; ///< 水平消隐
41 | long m_lVBlanking; ///< 垂直消隐
42 | BITMAPINFO *m_pBmpInfo; ///< BITMAPINFO 结构指针,显示图像时使用
43 |
44 | BYTE *m_pImageBuffer; ///< Bayer转换后缓冲区
45 | char m_chBmpBuf[2048]; ///< BIMTAPINFO 存储缓冲区,m_pBmpInfo即指向此缓冲区
46 | /// 采集回调函数,用户也可以定义为全局函数,如果作为类的成员函数,必须为静态成员函数。
47 | static int CALLBACK SnapThreadCallback(HV_SNAP_INFO *pInfo);
48 | /// 设置曝光时间
49 | HVSTATUS SetExposureTime(int nWindWidth, long lTintUpper, long lTintLower, long HBlanking, HV_SNAP_SPEED SnapSpeed, HV_RESOLUTION Resolution);
50 | };
51 |
52 | #endif // CAMERA_H
53 |
--------------------------------------------------------------------------------
/Duke/cameracalibration.cpp:
--------------------------------------------------------------------------------
1 | #include "cameracalibration.h"
2 | #include "utilities.h"
3 |
4 | #include
5 |
6 | CameraCalibration::CameraCalibration()
7 | {
8 | squareSize.width = 0;
9 | squareSize.height = 0;
10 | numOfCamImgs = 14;
11 | camCalibrated = false;
12 | }
13 |
14 | CameraCalibration::~CameraCalibration()
15 | {
16 | unloadCameraImgs();
17 | }
18 |
19 | void CameraCalibration::exportTxtFiles(const char *path, int CAMCALIB_OUT_PARAM)
20 | {
21 | cv::Mat out;
22 | switch (CAMCALIB_OUT_PARAM)
23 | {
24 | case CAMCALIB_OUT_MATRIX:
25 | out = camMatrix;
26 | break;
27 | case CAMCALIB_OUT_DISTORTION:
28 | out = distortion;
29 | break;
30 | case CAMCALIB_OUT_ROTATION:
31 | out = rotationMatrix;
32 | break;
33 | case CAMCALIB_OUT_TRANSLATION:
34 | out = translationVector;
35 | break;
36 | case CAMCALIB_OUT_FUNDAMENTAL:
37 | out = fundamentalMatrix;
38 | break;
39 | case CAMCALIB_OUT_STATUS:
40 | out = statusMatrix;
41 | break;
42 | case CAMCALIB_OUT_H1:
43 | out = H1;
44 | break;
45 | case CAMCALIB_OUT_H2:
46 | out = H2;
47 | break;
48 | #ifdef TEST_STEREO
49 | case STEREOCALIB_OUT_MATRIXL:
50 | out = camMatrixL;
51 | break;
52 | case STEREOCALIB_OUT_MATRIXR:
53 | out = camMatrixR;
54 | break;
55 | case STEREOCALIB_OUT_DISL:
56 | out = distortionL;
57 | break;
58 | case STEREOCALIB_OUT_DISR:
59 | out = distortionR;
60 | break;
61 | case STEREOCALIB_OUT_R:
62 | out = R;
63 | break;
64 | case STEREOCALIB_OUT_T:
65 | out = T;
66 | break;
67 | case STEREOCALIB_OUT_F:
68 | out = F;
69 | break;
70 | #endif
71 | }
72 | Utilities::exportMat(path, out);
73 | }
74 |
75 |
76 | void CameraCalibration::loadCameraImgs(QString fpath)
77 | {
78 | if (calibImgs.size())
79 | calibImgs.clear();
80 |
81 | for(int i = 0; i < numOfCamImgs-1; i++)
82 | {
83 | //这里假定每个相机的标定图片数为13,folderPath应包括前缀L、R
84 | QString path = fpath;
85 | path += QString::number(i+1) + ".png";
86 | cv::Mat img = cv::imread(path.toStdString());
87 | if(img.empty()){
88 | QMessageBox::warning(NULL, QObject::tr("Images Not Found"), QObject::tr("The camera calibration images are not found."));
89 | return;
90 | }
91 | calibImgs.push_back(img);
92 | }
93 |
94 | QString path = fpath;
95 | path += "1.png";//用第1幅图作为外部参数标定图像
96 | extrImg = cv::imread(path.toStdString());
97 | if(extrImg.empty()){
98 | QMessageBox::warning(NULL, QObject::tr("Image Not Found"), QObject::tr("The images for extrinsicts calibration are missing."));
99 | return;
100 | }
101 | if(!calibImgs[0].empty())
102 | camImageSize = calibImgs[0].size();
103 | }
104 |
105 | void CameraCalibration::unloadCameraImgs()
106 | {
107 | for(int i = 0; i < calibImgs.size(); i++)
108 | calibImgs[i].release();
109 | extrImg.release();
110 | }
111 |
112 |
113 | void CameraCalibration::undistortCameraImgPoints(cv::vector points_in,cv::vector &points_out)
114 | {
115 | cv::undistortPoints(points_in,points_out,camMatrix,distortion);
116 | float fX = camMatrix.at(0,0);
117 | float fY = camMatrix.at(1,1);
118 | float cX = camMatrix.at(0,2);
119 | float cY = camMatrix.at(1,2);
120 |
121 | for(int j=0; j &camCorners, cv::vector *objCorners)
130 | {
131 | cv::Mat img_grey;
132 | cv::Mat img_copy;
133 | img.copyTo(img_copy);
134 |
135 | if(!useSymmetric){
136 | numOfCornersX = 4;
137 | numOfCornersY = 11;
138 | }
139 | else{
140 | numOfCornersX = 11;//这里按标准双目标定板确定横向和纵向方格数目,进一步应改为从set获取
141 | numOfCornersY = 9;
142 | }
143 |
144 | bool found = false;
145 | cv::cvtColor(img, img_grey, CV_RGB2GRAY);
146 | img.copyTo(img_copy);
147 |
148 | ///这里尝试采用opencv自带的找圆心功能
149 | cv::Size patternsize(numOfCornersX, numOfCornersY);
150 |
151 | cv::bitwise_not(img_grey, img_grey);//黑底白色圆圈的标定板需要反相处理
152 |
153 | if(!useSymmetric)
154 | found = cv::findCirclesGrid(img_grey, patternsize, camCorners,cv::CALIB_CB_ASYMMETRIC_GRID);
155 | else
156 | found = cv::findCirclesGrid(img_grey, patternsize, camCorners,cv::CALIB_CB_SYMMETRIC_GRID);
157 |
158 | if(!found){
159 | return false;
160 | }
161 | ///要实现全自动可以屏蔽下面的while循环
162 | #ifdef DEBUG
163 | cv::drawChessboardCorners(img_copy, patternsize, camCorners, found);
164 | int key = cv::waitKey(1);
165 | while(found)
166 | {
167 | cv::imshow("Calibration", img_copy);
168 | key = cv::waitKey(1);
169 | if(key==27)
170 | found=false;
171 | if(key==13)
172 | break;
173 | }
174 | #endif
175 |
176 | if(found){
177 | if(squareSize.height == 0){
178 | squareSize.height = 20;
179 | squareSize.width = 20;
180 | }
181 | if(!useSymmetric){
182 | for (int i = 0; i < numOfCornersY; i++){
183 | for (int j = 0; j < numOfCornersX; j++){
184 | objCorners->push_back(cv::Point3f(float((2*j + i % 2)*squareSize.width),float(i*squareSize.width),0));
185 | }
186 | }
187 | }
188 | else{
189 | for(int i = 0; ipush_back(p);
196 | }
197 | }
198 | }
199 | return true;
200 | }
201 | else
202 | return false;
203 | }
204 |
205 |
206 | int CameraCalibration::extractImageCorners()//返回值大于0说明处理不成功,等于零表示处理成功
207 | {
208 | if(calibImgs.size() == 0)
209 | return numOfCamImgs+1;
210 | imgBoardCornersCam.clear();
211 | objBoardCornersCam.clear();
212 |
213 | for(size_t i = 0; i < calibImgs.size(); i++){
214 | cv::vector cCam;
215 | cv::vector cObj;
216 | bool found = findCornersInCamImg(calibImgs[i], cCam, &cObj );
217 |
218 | if(!found){
219 | QString cam = (isleft)?("L"):("R");
220 | if(QMessageBox::warning(NULL,NULL,tr("Couldn't find circles in image ") + cam + QString::number(i+1)
221 | + ", Recapture?",
222 | QMessageBox::Yes,
223 | QMessageBox::Cancel) == QMessageBox::Yes){
224 | return i+1;
225 | }
226 | else
227 | return numOfCamImgs+1;//返回未能读取的图像序号
228 | }
229 |
230 | if(cCam.size()){
231 | imgBoardCornersCam.push_back(cCam);
232 | objBoardCornersCam.push_back(cObj);
233 | if(isleft)
234 | imgBoardCornersCamL.push_back(cCam);
235 | else
236 | imgBoardCornersCamR.push_back(cCam);
237 | }
238 | }
239 |
240 | /***********为求解基础矩阵,采样点来自第十二组图片(L12,R12)的角点数据*************/
241 | if (isleft){
242 | for (int i = 0; i < numOfCornersX*numOfCornersY; i++){
243 | findFunLeft.push_back(imgBoardCornersCam[11][i]);
244 | }
245 | }
246 | else{
247 | for (int i = 0; i < numOfCornersX*numOfCornersY; i++){
248 | findFunRight.push_back(imgBoardCornersCam[11][i]);
249 | }
250 | }
251 | return 0;
252 | }
253 |
254 | int CameraCalibration::calibrateCamera()
255 | {
256 | //check if corners for camera calib has been extracted
257 | if(imgBoardCornersCam.size() != numOfCamImgs-1){
258 | if(!extractImageCorners()){
259 | return 0;
260 | }
261 | }
262 |
263 | cv::vector camRotationVectors;
264 | cv::vector camTranslationVectors;
265 |
266 | rms = cv::calibrateCamera(objBoardCornersCam, imgBoardCornersCam, camImageSize, camMatrix, distortion, camRotationVectors,camTranslationVectors,0);
267 | //rms = cv::calibrateCamera(objBoardCornersCam, imgBoardCornersCam, camImageSize, camMatrix,distortion, camRotationVectors, camTranslationVectors, CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
268 | if(isleft){
269 | undistortCameraImgPoints(findFunLeft,findFunLeft);
270 | camMatrixL = camMatrix;
271 | distortionL = distortion;
272 | }
273 | else{
274 | undistortCameraImgPoints(findFunRight,findFunRight);
275 | camMatrixR = camMatrix;
276 | distortionR = distortion;
277 | }
278 | camCalibrated = true;
279 | return 1;
280 | }
281 |
282 |
283 | bool CameraCalibration::findCameraExtrisics()
284 | {
285 | cv::vector imgPoints;
286 | cv::vector objPoints3D;
287 | findCornersInCamImg(extrImg, imgPoints, &objPoints3D);
288 | cv::Mat rVec;
289 | //find extrinsics rotation & translation
290 | bool r = cv::solvePnP(objPoints3D,imgPoints,camMatrix,distortion,rVec,translationVector);
291 | cv::Rodrigues(rVec,rotationMatrix);
292 | return r;
293 | }
294 |
295 | void CameraCalibration::findFundamental()
296 | {
297 | fundamentalMatrix = cv::findFundamentalMat(findFunLeft, findFunRight, statusMatrix, cv::FM_RANSAC);
298 | //cv::stereoRectifyUncalibrated(findFunLeft, findFunRight, fundamentalMatrix, camImageSize, H1, H2);
299 | cv::stereoRectifyUncalibrated(cv::Mat(findFunLeft), cv::Mat(findFunRight), fundamentalMatrix, camImageSize, H1, H2);
300 | findFunLeft.clear();
301 | findFunRight.clear();
302 | #ifdef TEST_STEREO
303 | rms = stereoCalibrate(objBoardCornersCam,imgBoardCornersCamL,imgBoardCornersCamR,camMatrixL,distortionL,camMatrixR,distortionR
304 | ,camImageSize,R,T,E,F);
305 | #endif
306 | }
307 |
308 | void CameraCalibration::setSquareSize(cv::Size size_in_mm)
309 | {
310 | squareSize = size_in_mm;
311 | }
312 |
313 | cv::Size CameraCalibration::getSquareSize()
314 | {
315 | return squareSize;
316 | }
317 |
318 | void CameraCalibration::setNumberOfCameraImgs(int num)
319 | {
320 | numOfCamImgs = num;
321 | }
322 |
323 | int CameraCalibration::getNumberOfCameraImgs()
324 | {
325 | return numOfCamImgs;
326 | }
327 |
328 |
--------------------------------------------------------------------------------
/Duke/cameracalibration.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERACALIBRATION_H
2 | #define CAMERACALIBRATION_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | //#define DEBUG
10 | #define TEST_STEREO
11 |
12 | #define CAMCALIB_OUT_MATRIX 1
13 | #define CAMCALIB_OUT_DISTORTION 2
14 | #define CAMCALIB_OUT_ROTATION 3
15 | #define CAMCALIB_OUT_TRANSLATION 4
16 | #define CAMCALIB_OUT_FUNDAMENTAL 5
17 | #define CAMCALIB_OUT_STATUS 6
18 | #define CAMCALIB_OUT_H1 7
19 | #define CAMCALIB_OUT_H2 8
20 | #define STEREOCALIB_OUT_MATRIXL 9
21 | #define STEREOCALIB_OUT_MATRIXR 10
22 | #define STEREOCALIB_OUT_DISL 11
23 | #define STEREOCALIB_OUT_DISR 12
24 | #define STEREOCALIB_OUT_R 13
25 | #define STEREOCALIB_OUT_T 14
26 | #define STEREOCALIB_OUT_F 15
27 |
28 | using namespace cv;
29 |
30 | class CameraCalibration :public QObject
31 | {
32 | public:
33 | CameraCalibration();
34 | ~CameraCalibration();
35 |
36 | int calibrateCamera();
37 |
38 | void loadCameraImgs(QString fpath);
39 | void unloadCameraImgs();
40 |
41 | bool findCameraExtrisics();
42 | void findFundamental();
43 |
44 | void setSquareSize(cv::Size size);
45 | cv::Size getSquareSize();
46 |
47 | void setNumberOfCameraImgs(int num);
48 | int getNumberOfCameraImgs();
49 | void exportTxtFiles(const char *pathNfileName, int CAMCALIB_OUT_PARAM);
50 | void printData();
51 | int extractImageCorners();
52 |
53 | cv::Mat camMatrix;
54 | cv::Mat distortion;
55 | cv::Mat rotationMatrix;
56 | cv::Mat translationVector;
57 | cv::Mat fundamentalMatrix;
58 | cv::Mat H1;
59 | cv::Mat H2;
60 | cv::Mat statusMatrix;
61 |
62 | /****stereoCalib****/
63 | Mat camMatrixL;
64 | Mat camMatrixR;
65 | Mat distortionL;
66 | Mat distortionR;
67 | Mat R;
68 | Mat T;
69 | Mat E;
70 | Mat F;
71 | cv::vector> imgBoardCornersCamL;
72 | cv::vector> imgBoardCornersCamR;
73 |
74 | cv::vector> imgBoardCornersCam;
75 |
76 | cv::vector findFunLeft;//存储基础矩阵计算用的左相机图像角点
77 | cv::vector findFunRight;
78 |
79 | bool isleft;
80 | bool useSymmetric;//采用对称标定板或非对称标定板
81 | double rms;
82 |
83 | private:
84 | //bool findCornersInCamImg(cv::Mat camImg,cv::vector *camCorners,cv::vector *objCorners);
85 | bool findCornersInCamImg(cv::Mat camImg,cv::vector &camCorners,cv::vector *objCorners);
86 | /*******无效功能
87 | void drawOutsideOfRectangle(cv::Mat img,cv::vector rectanglePoints, float color);
88 | cv::vector manualMarkCheckBoard(cv::Mat img);
89 | float markWhite(cv::Mat img);
90 | void manualMarkCalibBoardCorners(cv::Mat img,cv::vector &imgPoints_out, cv::vector &objPoints_out);
91 | void perspectiveTransformation(cv::vector corners_in,cv::Mat homoMatrix, cv::vector &points_out);
92 | ********/
93 | void undistortCameraImgPoints(cv::vector points_in,cv::vector &points_out);
94 |
95 | cv::vector> objBoardCornersCam;
96 |
97 | cv::Vector calibImgs;
98 | cv::Mat extrImg;
99 |
100 | cv::Size squareSize;
101 | int numOfCornersX;
102 | int numOfCornersY;
103 | int numOfCamImgs;
104 | cv::Size camImageSize;
105 | bool camCalibrated;
106 | };
107 |
108 | #endif // CAMERACALIBRATION_H
109 |
--------------------------------------------------------------------------------
/Duke/dahengcamera.cpp:
--------------------------------------------------------------------------------
1 | #include "dahengcamera.h"
2 | #include "qmessagebox.h"
3 |
4 | const int XStart = 0;//图像左上角点在相机幅面1280X1024上相对于幅面左上角点坐标
5 | const int YStart = 0;
6 | const HV_RESOLUTION Resolution = RES_MODE0;
7 | const HV_SNAP_MODE SnapMode = CONTINUATION;
8 | const HV_BAYER_CONVERT_TYPE ConvertType = BAYER2RGB_NEIGHBOUR1;
9 | const HV_SNAP_SPEED SnapSpeed = HIGH_SPEED;
10 | int ADCLevel = ADC_LEVEL2;
11 |
12 | DaHengCamera::DaHengCamera(QObject *parent) :
13 | QObject(parent)
14 | {
15 | cameraOpened=false;
16 | }
17 |
18 | DaHengCamera::~DaHengCamera()
19 | {
20 | if(cameraOpened){
21 | OnSnapexStop();
22 | OnSnapexClose();
23 | HVSTATUS status = STATUS_OK;
24 | // 关闭数字摄像机,释放数字摄像机内部资源
25 | status = EndHVDevice(m_hhv_1);
26 | status = EndHVDevice(m_hhv_2);
27 | // 回收图像缓冲区
28 | delete []m_pRawBuffer_1;
29 | delete []m_pRawBuffer_2;
30 | }
31 | }
32 |
33 | ///---------------------相机-----------------------///
34 | void DaHengCamera::daHengExposure(int leftexposure, int rightexposure)
35 | {
36 | switch (leftexposure) {
37 | case 0:
38 | ADCLevel = ADC_LEVEL3;
39 | break;
40 | case 1:
41 | ADCLevel = ADC_LEVEL2;
42 | break;
43 | case 2:
44 | ADCLevel = ADC_LEVEL1;
45 | break;
46 | case 3:
47 | ADCLevel = ADC_LEVEL0;
48 | break;
49 | }
50 | HVADCControl(m_hhv_1, ADC_BITS, ADCLevel);
51 | switch (rightexposure) {
52 | case 0:
53 | ADCLevel = ADC_LEVEL3;
54 | break;
55 | case 1:
56 | ADCLevel = ADC_LEVEL2;
57 | break;
58 | case 2:
59 | ADCLevel = ADC_LEVEL1;
60 | break;
61 | case 3:
62 | ADCLevel = ADC_LEVEL0;
63 | break;
64 | }
65 | HVADCControl(m_hhv_2, ADC_BITS, ADCLevel);
66 | }
67 |
68 | void DaHengCamera::openDaHengCamera(int camerawidth, int cameraheight)
69 | {
70 | cam_w = camerawidth;
71 | cam_h = cameraheight;
72 | if (!cameraOpened){
73 | HVSTATUS status_1 = STATUS_OK;
74 | HVSTATUS status_2 = STATUS_OK;
75 | m_pRawBuffer_1 = NULL;
76 | m_pRawBuffer_2 = NULL;
77 |
78 | status_1 = BeginHVDevice(1, &m_hhv_1);
79 | status_2 = BeginHVDevice(2, &m_hhv_2);
80 | if(status_1==STATUS_OK&&status_2==STATUS_OK)
81 | cameraOpened = true;
82 | else{
83 | cameraOpened = false;
84 | QMessageBox::warning(NULL, tr("Cameras not found"), tr("Make sure two Daheng cameras have connected to the computer."));
85 | return;
86 | }
87 | HVSetResolution(m_hhv_1, Resolution);//Set the resolution of cameras
88 | HVSetResolution(m_hhv_2, Resolution);
89 |
90 | HVSetSnapMode(m_hhv_1, SnapMode);//Snap mode include CONTINUATION、TRIGGER
91 | HVSetSnapMode(m_hhv_2, SnapMode);
92 |
93 | HVADCControl(m_hhv_1, ADC_BITS, ADCLevel);//设置ADC的级别
94 | HVADCControl(m_hhv_2, ADC_BITS, ADCLevel);
95 |
96 | HVTYPE type = UNKNOWN_TYPE;//获取设备类型
97 | int size = sizeof(HVTYPE);
98 | HVGetDeviceInfo(m_hhv_1,DESC_DEVICE_TYPE, &type, &size);//由于两相机型号相同,故只获取一个
99 |
100 | HVSetOutputWindow(m_hhv_1, XStart, YStart, camerawidth, cameraheight);
101 | HVSetOutputWindow(m_hhv_2, XStart, YStart, camerawidth, cameraheight);
102 |
103 | HVSetSnapSpeed(m_hhv_1, SnapSpeed);//设置采集速度
104 | HVSetSnapSpeed(m_hhv_2, SnapSpeed);
105 |
106 | m_pRawBuffer_1 = new BYTE[camerawidth * cameraheight];
107 | m_pRawBuffer_2 = new BYTE[camerawidth * cameraheight];
108 | }
109 | OnSnapexOpen();
110 | OnSnapexStart();
111 | }
112 |
113 | void DaHengCamera::daHengSnapShot(int camNo)
114 | {
115 | //m_pRawBuffer_1 = new BYTE[cam_w * cam_h];
116 | //m_pRawBuffer_2 = new BYTE[cam_w * cam_h];
117 | (camNo==1)?(HVSnapShot(m_hhv_1, &m_pRawBuffer_1, 1)):(HVSnapShot(m_hhv_2, &m_pRawBuffer_2, 1));
118 | }
119 |
120 | void DaHengCamera::OnSnapexOpen()
121 | {
122 | HVSTATUS status = STATUS_OK;
123 | status = HVOpenSnap(m_hhv_1, SnapThreadCallback, this);
124 | status = HVOpenSnap(m_hhv_2, SnapThreadCallback, this);
125 | }
126 |
127 | void DaHengCamera::OnSnapexStart()
128 | {
129 | HVSTATUS status = STATUS_OK;
130 | //ppBuf_1[0] = m_pRawBuffer_1;
131 | //ppBuf_2[0] = m_pRawBuffer_2;
132 | status = HVStartSnap(m_hhv_1, &m_pRawBuffer_1,1);
133 | status = HVStartSnap(m_hhv_2, &m_pRawBuffer_2,1);
134 | }
135 |
136 | void DaHengCamera::OnSnapexStop()
137 | {
138 | HVSTATUS status = STATUS_OK;
139 | status = HVStopSnap(m_hhv_1);
140 | status = HVStopSnap(m_hhv_2);
141 | }
142 |
143 | void DaHengCamera::OnSnapexClose()
144 | {
145 | HVSTATUS status = STATUS_OK;
146 | status = HVCloseSnap(m_hhv_1);
147 | status = HVCloseSnap(m_hhv_2);
148 | }
149 |
150 | void DaHengCamera::closeCamera()
151 | {
152 | OnSnapexStop();
153 | OnSnapexClose();
154 | }
155 |
156 | int CALLBACK DaHengCamera::SnapThreadCallback(HV_SNAP_INFO *pInfo)
157 | {
158 | return 1;
159 | }
160 |
--------------------------------------------------------------------------------
/Duke/dahengcamera.h:
--------------------------------------------------------------------------------
1 | #ifndef DAHENGCAMERA_H
2 | #define DAHENGCAMERA_H
3 |
4 | #include
5 |
6 | #include "Windows.h"//加载此头文件以解决大恒相机头文件类型未定义问题
7 | #include
8 | #include
9 |
10 | class DaHengCamera : public QObject
11 | {
12 | Q_OBJECT
13 | public:
14 | DaHengCamera(QObject *parent = 0);
15 | ~DaHengCamera();
16 |
17 | bool cameraOpened;
18 |
19 | void daHengExposure(int leftexposure, int rightexposure);
20 | void openDaHengCamera(int camerawidth, int cameraheight);
21 | void daHengSnapShot(int camNo);
22 | void closeCamera();
23 |
24 | BYTE *m_pRawBuffer_1;
25 | BYTE *m_pRawBuffer_2;
26 |
27 | private:
28 | HHV m_hhv_1;
29 | HHV m_hhv_2;
30 |
31 | int cam_w;
32 | int cam_h;
33 |
34 | static int CALLBACK SnapThreadCallback(HV_SNAP_INFO *pInfo);
35 |
36 | ///---------------相机相关函数---------------///
37 | void OnSnapexOpen();
38 | void OnSnapexStart();
39 | void OnSnapexStop();
40 | void OnSnapexClose();
41 |
42 | };
43 |
44 | #endif // DAHENGCAMERA_H
45 |
--------------------------------------------------------------------------------
/Duke/dotmatch.h:
--------------------------------------------------------------------------------
1 | #ifndef DOTMATCH_H
2 | #define DOTMATCH_H
3 |
4 | //调试用宏定义
5 | //#define DEBUG
6 | //#define USE_ADAPTIVE_THRESHOLD
7 | #define USE_FOUR_POINT
8 | //#define TEST_SURF
9 |
10 | // Qt
11 | #include
12 |
13 | // OpenCV
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 |
23 | // STL
24 | #include
25 |
26 | // SLR
27 | #include "reconstruct.h"
28 | #include "virtualcamera.h"
29 | #include "utilities.h"
30 | #include "blobdetector.h"
31 | #include "manualmatch.h"
32 | #include "stereorect.h"
33 |
34 | // MRPT
35 | #include
36 |
37 | using namespace cv;
38 | using namespace std;
39 |
40 | class DotMatch : public QObject
41 | {
42 | Q_OBJECT
43 | public:
44 |
45 | DotMatch(QObject *parent = 0, QString projectPath = NULL, bool useManual = true, bool useepi = true);
46 | ~DotMatch();
47 |
48 | ManualMatch *mm;
49 |
50 | vector> findDot(Mat &image, bool isleft);
51 | bool matchDot(Mat &limage, Mat &rimage);
52 | void setUpManual(Mat LImage, Mat RImage);//初始化手工标记窗口
53 | void activeManual();//启动手工标记窗口
54 | void finishMatch();
55 | int OSTU_Region(cv::Mat &image);
56 |
57 | vector> dotInOrder;
58 |
59 | //表示新点与原有点对应关系,如果该点实际为原有点
60 | //x值为该点在dotFeature中的序号
61 | //y值为该点在dotPosition(even或odd)中的序号
62 | cv::vector correspondPointEven;
63 | cv::vector correspondPointOdd;
64 |
65 | //标记并显示标志点所需数据,int值有6个,依次为
66 | //左点x,y,右点x,y,是否为已知点(0,1,0代表未知)
67 | //如果已知,对应的唯一编号
68 | vector> dotForMark;
69 |
70 | int bwThreshold;
71 | int blocksize;//二值化的一个参数,由用户给定
72 | bool firstFind;
73 | int scanSN;
74 |
75 | private:
76 | QString path;
77 | bool useManualMatch;
78 | bool useEpi;//表示是否采用极线校正,若采用,计算标记点坐标采用Q矩阵法
79 | stereoRect *sr;
80 |
81 | bool triangleCalculate();
82 | cv::vector > calFeature(cv::vector dotP);
83 | bool dotClassify(cv::vector > featureTemp);
84 | bool FTTM(cv::vector &correspondPoint, cv::vector dotPositionCurrent, cv::vector dotPositionFormer);
85 | void updateDot(cv::vector &correspondPoint, cv::vector &dotPositionCurrent, cv::vector dotPositionFormer);
86 | vector calNeighbor(vector > input, int num);
87 | bool checkNeighbor(vector referance, vector needcheck);
88 | void calMatrix();
89 | //void hornTransform(double &data[], cv::vector target, cv::vector move);
90 | void markPoint();
91 |
92 | bool isBelongTo(size_t e, vector C);//判断C中是否含有元素e
93 |
94 | vector subPixel(Mat img, vector> vec);
95 | Reconstruct *rc;
96 | BlobDetector *bd;
97 |
98 | Mat fundMat;
99 | Mat Homo1;
100 | Mat Homo2;
101 |
102 | vector dotRemove;//不能成功三角计算的点需要去除,该向量存储待去除点在当次扫描中的序号
103 |
104 | Mat outR;//保存后一次到前一次扫描的旋转变换阵
105 | Mat outT;//保存相后一次到前一次扫描的平移变换阵
106 | Mat outRInv;//保存前一次到后一次扫描的旋转变换阵
107 | Mat outTInv;//保存前一次到后一次扫描的旋转变换阵
108 | Mat matRotation;//保存广义旋转矩阵
109 | Mat matTransform;//保存广义平移向量
110 | cv::vector dotPositionEven;//偶数次扫描所得点的绝对坐标
111 | cv::vector dotPositionOdd;//奇数次扫描所得点的绝对坐标
112 | cv::vector> dotFeature;
113 | vector> neighborFeature;
114 |
115 | private slots:
116 | void onfinishmanual();
117 |
118 | signals:
119 | void receivedmanualmatch();//由槽函数onfinishmanual()发出,通知MainWindow执行finishmanualmatch()槽,在该槽中又调用了finishMatch
120 |
121 | };
122 |
123 | class Triangle{
124 | public:
125 | Triangle(int Vertex_0, int Vertex_1, int Vertex_2, float distance_12, float distance_02, float distance_01);
126 |
127 | static bool copmareTriangle(Triangle tri_known, Triangle tri_unknown, vector &corr, float &error);//比较两三角形是否相等
128 | static float calDistance(Point3f point_1,Point3f point_2);
129 |
130 | int ver_0;
131 | int ver_1;
132 | int ver_2;
133 | float dis_0;
134 | float dis_1;
135 | float dis_2;
136 | };
137 |
138 | #endif // DOTMATCH_H
139 |
--------------------------------------------------------------------------------
/Duke/focusassistant.cpp:
--------------------------------------------------------------------------------
1 | #include "focusassistant.h"
2 | #include "ui_focusassistant.h"
3 |
4 | FocusAssistant::FocusAssistant(QWidget *parent) :
5 | QWidget(parent),
6 | ui(new Ui::FocusAssistant)
7 | {
8 | ui->setupUi(this);
9 |
10 | displayLeft = checkstate();
11 | connect(ui->leftCamera, SIGNAL(toggled(bool)), this, SLOT(checkchange()));
12 | connect(ui->okButton, SIGNAL(clicked()), SIGNAL(winhide()));
13 | connect(ui->okButton, SIGNAL(clicked()), this, SLOT(hide()));
14 | //connect(this, SIGNAL(destroyed()), SIGNAL(winhide()));
15 | }
16 |
17 | FocusAssistant::~FocusAssistant()
18 | {
19 | delete ui;
20 | }
21 |
22 | bool FocusAssistant::checkstate()
23 | {
24 | return (ui->leftCamera->isChecked())?(true):(false);
25 | }
26 |
27 | void FocusAssistant::checkchange()
28 | {
29 | displayLeft = checkstate();
30 | }
31 |
32 | void FocusAssistant::playImage(QPixmap img)
33 | {
34 | ui->imageDisplay->setPixmap(img);
35 | }
36 |
--------------------------------------------------------------------------------
/Duke/focusassistant.h:
--------------------------------------------------------------------------------
1 | #ifndef FOCUSASSISTANT_H
2 | #define FOCUSASSISTANT_H
3 |
4 | #include
5 |
6 | namespace Ui {
7 | class FocusAssistant;
8 | }
9 |
10 | class FocusAssistant : public QWidget
11 | {
12 | Q_OBJECT
13 |
14 | public:
15 | explicit FocusAssistant(QWidget *parent = 0);
16 | ~FocusAssistant();
17 |
18 | bool displayLeft;
19 | void playImage(QPixmap img);
20 |
21 | private:
22 | Ui::FocusAssistant *ui;
23 | bool checkstate();
24 | signals:
25 | void winhide();
26 | private slots:
27 | void checkchange();
28 | };
29 |
30 | #endif // FOCUSASSISTANT_H
31 |
--------------------------------------------------------------------------------
/Duke/focusassistant.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | FocusAssistant
4 |
5 |
6 |
7 | 0
8 | 0
9 | 835
10 | 592
11 |
12 |
13 |
14 | Focus Assistant
15 |
16 |
17 |
18 | :/splash.png:/splash.png
19 |
20 |
21 | -
22 |
23 |
24 |
25 | 0
26 | 0
27 |
28 |
29 |
30 |
31 | 1024
32 | 800
33 |
34 |
35 |
36 | Live View
37 |
38 |
39 | true
40 |
41 |
42 |
43 | -
44 |
45 |
-
46 |
47 |
-
48 |
49 |
50 |
51 | 0
52 | 0
53 |
54 |
55 |
56 | Select Camera
57 |
58 |
59 |
-
60 |
61 |
62 |
63 | 0
64 | 0
65 |
66 |
67 |
68 | Left Camera
69 |
70 |
71 | true
72 |
73 |
74 |
75 | -
76 |
77 |
78 |
79 | 0
80 | 0
81 |
82 |
83 |
84 | Right Camera
85 |
86 |
87 |
88 |
89 |
90 |
91 | -
92 |
93 |
94 |
95 | 0
96 | 0
97 |
98 |
99 |
100 | Confirm
101 |
102 |
103 |
104 |
105 |
106 | -
107 |
108 |
109 | Qt::Horizontal
110 |
111 |
112 |
113 | 402
114 | 20
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/Duke/glwidget.cpp:
--------------------------------------------------------------------------------
1 | #include "glwidget.h"
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | GLfloat scale = 1;
9 | GLfloat percent = 0.1;
10 | GLfloat speed = 20;
11 | bool hasModel = false;
12 | glm::mat4 transform_camera(1.0f); // 摄像机的位置和定向,即摄像机在世界坐标系中位置
13 | glm::mat4 transform_model(1.0f); // 模型变换矩阵,即物体坐标到世界坐标
14 | glm::mat4 model_view_matrix;
15 |
16 | bool keyPressed[2] = {false, false};
17 |
18 | GLWidget::GLWidget(QWidget *parent) :
19 | QGLWidget(QGLFormat(QGL::SampleBuffers), parent, 0, Qt::FramelessWindowHint)
20 | {
21 | setFormat(QGLFormat(QGL::DoubleBuffer | QGL::DepthBuffer));
22 | rotationX = 0.0;
23 | rotationY = 0.0;
24 | rotationZ = 0.0;
25 | offsetX = 0.0;
26 | offsetY = 0.0;
27 | pointSize = 1;
28 | backColor = QColor::fromCmykF(0.5, 0.4, 0.4, 0.2);
29 | createGradient();
30 | plyloader = new PlyLoader(this);
31 | }
32 |
33 | GLWidget::~GLWidget()
34 | {
35 | delete plyloader;
36 | }
37 |
38 | void GLWidget::LoadModel(QString loadpath)
39 | {
40 | hasModel = plyloader->LoadModel(loadpath);
41 | if(hasModel)
42 | updateGL();
43 | else
44 | return;
45 | }
46 |
47 | void GLWidget::initializeGL()
48 | {
49 | glMatrixMode(GL_PROJECTION);
50 | glLoadIdentity();
51 | gluPerspective(30, float(this->width())/this->height(), 1.0, 1.0e10);
52 |
53 | qglClearColor(backColor);
54 | glShadeModel(GL_SMOOTH);
55 | glEnable(GL_CULL_FACE);
56 | SetupLights();
57 | glEnable(GL_DEPTH_TEST);
58 |
59 | glEnable(GL_BLEND);
60 | glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
61 | glEnable(GL_NORMALIZE);
62 |
63 | transform_camera = glm::affineInverse(glm::lookAt(glm::vec3(0,0,40), glm::vec3(0,0,0), glm::vec3(0,-1,0)));
64 | transform_model = glm::translate(glm::vec3(-10,-10,0));
65 | }
66 |
67 | void GLWidget::resizeGL(int width, int height)
68 | {
69 | glViewport(0, 0, this->width(), this->height());
70 | glMatrixMode(GL_PROJECTION);
71 | glLoadIdentity();
72 | gluPerspective(28.0,float(this->width())/this->height(), 1.0, 1.0e10);
73 | }
74 |
75 | void GLWidget::paintGL()
76 | {
77 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
78 | model_view_matrix = glm::affineInverse(transform_camera);
79 | glMatrixMode(GL_MODELVIEW);
80 | glLoadMatrixf(&model_view_matrix[0][0]);
81 | model_view_matrix *= transform_model;
82 | glMatrixMode(GL_MODELVIEW);
83 | glLoadMatrixf(&model_view_matrix[0][0]);
84 | draw();
85 | }
86 |
87 | void GLWidget::draw()
88 | {
89 | GLfloat sizes[]=
90 | {
91 | scale, scale, scale
92 | };
93 | glRotatef(rotationX, 1.0, 0.0, 0.0);
94 | glRotatef(rotationY, 0.0, 1.0, 0.0);
95 | glRotatef(rotationZ, 0.0, 0.0, 1.0);
96 | glScalef(sizes[0], sizes[1], sizes[2]);
97 |
98 | long int total = plyloader->m_totalConnectedPoints;
99 | for(int p = 0; p < total; ++p)
100 | {
101 | glPointSize(pointSize);
102 | glBegin(GL_POINTS);
103 | qglColor(QColor::fromCmyk(255,0,255,0));
104 | glVertex3f((plyloader->mp_vertexXYZ[p*3]) * percent,
105 | (plyloader->mp_vertexXYZ[p*3 + 1]) * percent,
106 | (plyloader->mp_vertexXYZ[p*3 + 2]) * percent);
107 | glEnd();
108 | }
109 | glFlush();
110 | }
111 |
112 | void GLWidget::mousePressEvent(QMouseEvent *event)
113 | {
114 | lastPos = event->pos();
115 | }
116 |
117 | void GLWidget::mouseMoveEvent(QMouseEvent *event)
118 | {
119 |
120 | GLfloat dx = GLfloat(event->x() - lastPos.x()) / width();
121 | GLfloat dy = GLfloat(event->y() - lastPos.y()) / height();
122 |
123 | if(event->buttons() == Qt::LeftButton){
124 | rotationX += 180 * dy;
125 | rotationY += 180 * dx;
126 | updateGL();
127 | }
128 | else if(event->buttons() == Qt::RightButton){
129 | rotationX += 180 * dx;
130 | rotationZ += 180 * dy;
131 | updateGL();
132 | }
133 | else if(event->buttons()==Qt::MiddleButton) {
134 | transform_camera *= glm::translate(glm::vec3(-speed*dx,speed*dy,0));
135 | updateGL();
136 | }
137 | lastPos = event->pos();
138 | }
139 |
140 | void GLWidget::mouseDoubleClickEvent(QMouseEvent * /*event*/)
141 | {
142 | QColor color = QColorDialog::getColor(backColor, this);
143 | if(color.isValid())
144 | {
145 | backColor = color;
146 | qglClearColor(backColor);
147 | }
148 | }
149 |
150 | void GLWidget::wheelEvent(QWheelEvent *event)
151 | {
152 | double numDegrees = - event->delta() / 3600.0;
153 | scale += numDegrees;
154 | if(scale < 10 && scale > 0)
155 | {
156 | updateGL();
157 | }
158 | else
159 | {
160 | scale = 1;
161 | }
162 | }
163 |
164 |
165 | void GLWidget::SetupLights()
166 | {
167 | GLfloat ambientLight[] = {0.6f, 0.6f, 0.6f, 1.0f};//环境光
168 | GLfloat diffuseLight[] = {0.7f, 0.7f, 0.7f, 1.0f};//漫反射
169 | GLfloat specularLight[] = {0.9f, 0.9f, 0.9f, 1.0f};//镜面光
170 | GLfloat lightPos[] = {50.0f, 80.0f, 60.0f, 1.0f};//光源位置
171 |
172 | glEnable(GL_LIGHTING); //启用光照
173 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); //设置环境光源
174 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); //设置漫反射光源
175 | glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); //设置镜面光源
176 | glLightfv(GL_LIGHT0, GL_POSITION, lightPos); //设置灯光位置
177 | glEnable(GL_LIGHT0); //打开第一个灯光
178 |
179 | glEnable(GL_COLOR_MATERIAL); //启用材质的颜色跟踪
180 | glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); //指定材料着色的面
181 | glMaterialfv(GL_FRONT, GL_SPECULAR, specularLight); //指定材料对镜面光的反应
182 | glMateriali(GL_FRONT, GL_SHININESS, 100); //指定反射系数
183 | }
184 |
185 |
186 | void GLWidget::createGradient()
187 | {
188 | gradient.setCoordinateMode(QGradient::ObjectBoundingMode);
189 | gradient.setCenter(0.45, 0.50);
190 | gradient.setFocalPoint(0.40, 0.45);
191 | gradient.setColorAt(0.0, QColor(105, 146, 182));
192 | gradient.setColorAt(0.4, QColor(81, 113, 150));
193 | gradient.setColorAt(0.8, QColor(16, 56, 121));
194 | }
195 |
196 |
197 | void GLWidget::drawBackground(QPainter *painter)
198 | {
199 | painter->setPen(Qt::NoPen);
200 | painter->setBrush(gradient);
201 | painter->drawRect(rect());
202 | }
203 |
204 | void GLWidget::setPoint(int psize)
205 | {
206 | pointSize = psize;
207 | updateGL();
208 | }
209 |
210 |
211 | void GLWidget::drag_ball(int x1, int y1, int x2, int y2, glm::mat4& Tmodel, glm::mat4& Tcamera)
212 | {
213 | float r = (float)std::min(this->height(), this->width())/3;
214 | float r2 = r*0.9f;
215 | float ax = x1 - (float)this->width()/2;
216 | float ay = y1 - (float)this->height()/2;
217 | float bx = x2 - (float)this->width()/2;
218 | float by = y2 - (float)this->height()/2;
219 | float da = std::sqrt(ax*ax+ay*ay);
220 | float db = std::sqrt(bx*bx+by*by);
221 | if(std::max(da,db)>r2){
222 | float dx, dy;
223 | if(da>db){
224 | dx = (r2/da-1)*ax;
225 | dy = (r2/da-1)*ay;
226 | }else{
227 | dx = (r2/db-1)*bx;
228 | dy = (r2/db-1)*by;
229 | }
230 | ax += dx; ay +=dy; bx += dx; by += dy;
231 | }
232 | float az = std::sqrt( r*r-(ax*ax+ay*ay) );
233 | float bz = std::sqrt( r*r-(bx*bx+by*by) );
234 | glm::vec3 a = glm::vec3(ax,ay,az);
235 | glm::vec3 b = glm::vec3(bx,by,bz);
236 | float theta = std::acos(glm::dot(a,b)/(r*r));
237 | glm::vec3 v2 = glm::cross(a,b);
238 | // v2是视觉坐标系的向量,v是v2在物体坐标系中的坐标
239 | glm::vec3 v = glm::vec3(
240 | glm::affineInverse(Tmodel) * Tcamera
241 | * glm::vec4(v2[0],v2[1],v2[2],0) );
242 | Tmodel *= glm::rotate( theta*180/3.14f, v );
243 | }
244 |
--------------------------------------------------------------------------------
/Duke/glwidget.h:
--------------------------------------------------------------------------------
1 | #ifndef GLWIDGET_H
2 | #define GLWIDGET_H
3 |
4 | #include
5 | #include "plyloader.h"
6 | #include
7 | #include
8 | #include
9 |
10 | class GLWidget : public QGLWidget
11 | {
12 | Q_OBJECT
13 | public:
14 | explicit GLWidget(QWidget *parent = 0);
15 | ~GLWidget();
16 |
17 | int pointSize;
18 | void LoadModel(QString loadpath);
19 | void setPoint(int psize);
20 |
21 | protected:
22 | void initializeGL();
23 | void resizeGL(int width = 300, int height =300);
24 | void paintGL();
25 |
26 | void mousePressEvent(QMouseEvent *event);
27 | void mouseMoveEvent(QMouseEvent *event);
28 | void mouseDoubleClickEvent(QMouseEvent *event);
29 | void wheelEvent(QWheelEvent *event);
30 |
31 | private:
32 | void draw();
33 | void SetupLights();
34 | void createGradient();
35 | void drawBackground(QPainter *painter);
36 | GLfloat rotationX;
37 | GLfloat rotationY;
38 | GLfloat rotationZ;
39 | GLfloat offsetX;
40 | GLfloat offsetY;
41 | QPoint lastPos;
42 | PlyLoader *plyloader;
43 |
44 | QRadialGradient gradient;
45 | QColor backColor;
46 |
47 | void drag_ball(int x1, int y1, int x2, int y2, glm::mat4& Tmodel, glm::mat4& Tcamera);
48 | };
49 |
50 | #endif // GLWIDGET_H
51 |
--------------------------------------------------------------------------------
/Duke/graycodes.cpp:
--------------------------------------------------------------------------------
1 | #include "graycodes.h"
2 |
3 | GrayCodes::GrayCodes(int scanW, int scanH, bool useepi)
4 | {
5 | for (int i=0; i(i,j) = pixel_color;
77 | if(pixel_color > 0)
78 | pixel_color = 0;
79 | else
80 | pixel_color = 255;
81 | grayCodes[2*numOfColImgs-2*k+1].at(i,j) = pixel_color;
82 | }
83 | prevRem=rem;
84 | }
85 | }
86 |
87 | if (!useEpi){//如果不使用极限校正,则也生成行条纹
88 | for (int i=0;i < height;i++){
89 | int rem=0, num=i, prevRem=i%2;
90 | for (int k=0; k(i,j) = pixel_color;
102 | if(pixel_color > 0)
103 | pixel_color = 0;
104 | else
105 | pixel_color = 255;
106 | grayCodes[2*numOfRowImgs-2*k+2*numOfColImgs+1].at(i, j) = pixel_color;
107 | }
108 | prevRem=rem;
109 | }
110 | }
111 | }
112 | ///保存格雷码
113 | //saveGray();
114 | }
115 |
116 | int GrayCodes::grayToDec(cv::vector gray)//convert a gray code sequence to a decimal number
117 | {
118 | int dec = 0;
119 | bool tmp = gray[0];
120 | if(tmp)
121 | dec += (int) pow((float)2, int(gray.size() - 1));
122 | for(int i = 1; i < gray.size(); i++){
123 | tmp=Utilities::XOR(tmp,gray[i]);
124 | if(tmp)
125 | dec+= (int) pow((float)2,int (gray.size() - i - 1) );
126 | }
127 | return dec;
128 | }
129 |
130 | void GrayCodes::saveGray()
131 | {
132 | if (useEpi){
133 | for (size_t i = 0; i<2+2*numOfColImgs;i++){
134 | QString f = projectPath + "/" + QString::number(i) + ".png";
135 | cv::imwrite(f.toStdString(),grayCodes[i]);
136 | }
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/Duke/graycodes.h:
--------------------------------------------------------------------------------
1 | #ifndef GRAYCODES_H
2 | #define GRAYCODES_H
3 |
4 | #include
5 | #include
6 | using std::ofstream;
7 | #include
8 | #include
9 | #include
10 | #include "utilities.h"
11 |
12 | #define GRAY_MAX_NUM 44
13 |
14 | class GrayCodes
15 | {
16 | public:
17 | GrayCodes(int scanW, int scanH, bool useepi);
18 | ~GrayCodes();
19 |
20 | cv::Mat grayCodes[GRAY_MAX_NUM];
21 | QString projectPath;
22 | bool useEpi;//是否应用极线校正
23 |
24 | int getNumOfImgs();
25 |
26 | void generateGrays();
27 |
28 | static int grayToDec(cv::vector gray);
29 | int getNumOfRowBits();
30 | int getNumOfColBits();
31 |
32 | protected:
33 |
34 | void calNumOfImgs();
35 | void allocMemForImgs();
36 | void saveGray();
37 |
38 | bool imgsLoaded;
39 | int numOfImgs;
40 | int numOfRowImgs;
41 | int numOfColImgs;
42 | int currentImgNum;
43 | int height;
44 | int width;
45 | };
46 |
47 | #endif // GRAYCODES_H
48 |
--------------------------------------------------------------------------------
/Duke/imageviewer.cpp:
--------------------------------------------------------------------------------
1 | #include "imageviewer.h"
2 | #include "ui_imageviewer.h"
3 |
4 | #include
5 | #include
6 |
7 | bool imageseted = false;
8 |
9 | ImageViewer::ImageViewer(QWidget *parent) :
10 | QWidget(parent),
11 | ui(new Ui::ImageViewer)
12 | {
13 | ui->setupUi(this);
14 | connect(ui->actionSave_Image,SIGNAL(triggered()),this,SLOT(saveimage()));
15 | }
16 |
17 | ImageViewer::~ImageViewer()
18 | {
19 | delete ui;
20 | }
21 |
22 | void ImageViewer::showImage(QPixmap img)
23 | {
24 | ui->imageLabel->setPixmap(img);
25 | imageseted=true;
26 | }
27 |
28 | void ImageViewer::contextMenuEvent(QContextMenuEvent *)
29 | {
30 | QList actions;
31 | actions.push_back(ui->actionSave_Image);
32 | QCursor cur=this->cursor();
33 | QMenu *menu=new QMenu(this);
34 | menu->addActions(actions);
35 | menu->exec(cur.pos());
36 | }
37 |
38 | void ImageViewer::saveimage()
39 | {
40 | if (imageseted){
41 | QString dir = QFileDialog::getSaveFileName(this,tr("Save Image"),
42 | "/home/untitled.png",
43 | tr("Images (*.png *.jpg)"));
44 | ui->imageLabel->pixmap()->save(dir);
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/Duke/imageviewer.h:
--------------------------------------------------------------------------------
1 | #ifndef IMAGEVIEWER_H
2 | #define IMAGEVIEWER_H
3 |
4 | #include
5 |
6 | namespace Ui {
7 | class ImageViewer;
8 | }
9 |
10 | class ImageViewer : public QWidget
11 | {
12 | Q_OBJECT
13 |
14 | public:
15 | explicit ImageViewer(QWidget *parent = 0);
16 | ~ImageViewer();
17 |
18 | void showImage(QPixmap img);
19 |
20 | protected:
21 | void contextMenuEvent(QContextMenuEvent *);
22 |
23 | private:
24 | Ui::ImageViewer *ui;
25 |
26 | private slots:
27 | void saveimage();
28 | };
29 |
30 | #endif // IMAGEVIEWER_H
31 |
--------------------------------------------------------------------------------
/Duke/imageviewer.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | ImageViewer
4 |
5 |
6 |
7 | 0
8 | 0
9 | 829
10 | 596
11 |
12 |
13 |
14 | Form
15 |
16 |
17 | -
18 |
19 |
20 | true
21 |
22 |
23 |
24 |
25 | 0
26 | 0
27 | 809
28 | 576
29 |
30 |
31 |
32 |
-
33 |
34 |
35 |
36 | 0
37 | 0
38 |
39 |
40 |
41 |
42 | 1280
43 | 1024
44 |
45 |
46 |
47 | Image
48 |
49 |
50 | true
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 | :/save.png:/save.png
63 |
64 |
65 | Save Image
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/Duke/main.cpp:
--------------------------------------------------------------------------------
1 | #include "mainwindow.h"
2 | #include
3 | #include
4 |
5 | int main(int argc, char *argv[])
6 | {
7 | QApplication a(argc, argv);
8 | QSplashScreen *splash = new QSplashScreen;
9 | splash->setPixmap(QPixmap(":/splash.png"));
10 | splash->show();
11 |
12 | MainWindow w;
13 | w.showMaximized();
14 | w.show();
15 | splash->finish(&w);
16 | delete splash;
17 | return a.exec();
18 | }
19 |
20 | /***************************************************
21 | * 截止到2014-10-12日以前的工作汇总:
22 | * 实现功能:新建工程(即选择一个空的文件夹作为图片的存放目录)
23 | * 打开摄像头,能够实时预览图像以及对在校准过程中采集的图像进行预览,保存图像到指定文件夹
24 | * 开关投影仪,其中关投影通过投射黑色图像实现
25 | * 投射条纹图像,投射窗口由OpenCV生成
26 | * 采集扫描图像并保存
27 | * 设置工作过程中可能用到的全局变量并写入xml文件
28 | * ***************************************************
29 | * 10-13
30 | * 对文件存放路径的生成进行了调整,新建工程直接生成calib、reconstruction、scan三个文件夹
31 | * 通过选择文件夹函数selectPath选择当前存储路径projChildPath
32 | * 对配置文件set.xml的存储进行了调整,储存在工程根目录下
33 | * 自定义类调用时,应防止头文件相互包含,可以采用前置声明方式
34 | * 目前将set作为参数储存库,有些参数不需要手工设置,应在初始化时自动设置
35 | * *****************************************************
36 | * 10-18
37 | * 完成了重建相关代码的添加,构建无错误
38 | * *****************************************************
39 | * 10-19
40 | * 优化reconstruct类代码
41 | * *****************************************************
42 | * 10-20
43 | * 成功将大恒相机移植到Qt环境,对rawbuffer的调取没有采用callback函数,而是通过定时器触发信号槽的形式
44 | * 对QImage*指针指向图片的操作,应首先将其转换为QPixmap,再进行缩放、变形等操作
45 | * ******************************************************
46 | * 10-25
47 | * 改进了扫描图像的采集方式,利用单帧采集函数采集,在投影和采集之间添加延时
48 | *
49 | *
50 | *
51 | *
52 | *
53 | *
54 | *
55 | *
56 | */
57 |
--------------------------------------------------------------------------------
/Duke/mainwindow.h:
--------------------------------------------------------------------------------
1 | #ifndef MAINWINDOW_H
2 | #define MAINWINDOW_H
3 |
4 | //Qt
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | //openCV
12 | #include
13 | #include
14 |
15 | //SLR
16 | #include "blobdetector.h"
17 | #include "cameracalibration.h"
18 | #include "dotmatch.h"
19 | #include "glwidget.h"
20 |
21 | #include "projector.h"
22 | #include "reconstruct.h"
23 | #include "mfreconstruct.h"
24 | #include "meshcreator.h"
25 |
26 | #include "set.h"
27 | #include "focusassistant.h"
28 |
29 | #include "graycodes.h"
30 | #include "multifrequency.h"
31 |
32 | #include "dahengcamera.h"
33 | #include "baslercamera.h"
34 |
35 | #include "imageviewer.h"
36 |
37 | #include "stereorect.h"
38 |
39 | #define DEBUG//用来观察标记点圆心定位精度
40 |
41 | #define WM_SNAP_CHANGE (WM_USER + 100)
42 |
43 | #define CALIBIMGNUM 14
44 |
45 | #define PATHCALIB 0
46 | #define PATHSCAN 1
47 | #define PATHRECON 2
48 |
49 | namespace Ui {
50 | class MainWindow;
51 | }
52 |
53 | class MainWindow : public QMainWindow
54 | {
55 | Q_OBJECT
56 |
57 | public:
58 | MainWindow(QWidget *parent = 0);
59 | ~MainWindow();
60 | FocusAssistant *fa;
61 | Set *setDialog;
62 | DotMatch *dm;
63 | GLWidget *displayModel;
64 |
65 | QString projectPath;
66 | QString projChildPath;
67 |
68 | int screenWidth;//主屏幕几何尺寸
69 | int screenHeight;
70 | int projectorWidth;//投影屏幕几何尺寸
71 | int projectorHeight;
72 | int scanWidth;//扫描区域尺寸
73 | int scanHeight;
74 | int cameraWidth;//相机分辨率
75 | int cameraHeight;
76 |
77 | int scanSN;//表示当前正在进行的扫描序列数,从0开始
78 |
79 | private:
80 | Ui::MainWindow *ui;
81 | CameraCalibration *calibrator;
82 | BlobDetector *blob;
83 | GrayCodes *grayCode;
84 | MultiFrequency *mf;
85 | Projector *pj;
86 | Reconstruct *reconstructor;
87 | MFReconstruct *mfr;
88 |
89 | DaHengCamera *DHC;
90 | BaslerCamera *BC;
91 | bool usebc;//是否使用Basler相机
92 | bool showFocus;//是否显示对焦辅助窗口
93 |
94 | ///表示编码及解码重建方法,依次为:经典横竖条纹格雷编码,竖条纹格雷码+极线校正,多频外差条纹+极线校正
95 | enum codePattern{GRAY_ONLY, GRAY_EPI, MULTIFREQ_EPI};
96 | codePattern codePatternUsed;
97 |
98 | void createConnections();
99 | void createCentralWindow(QWidget *parent);
100 | void captureImage(QString pref, int saveCount, bool dispaly);
101 | void findPoint();
102 | void paintPoints();
103 | void getScreenGeometry();
104 | void closeCamera();
105 | void generatePath(int type);
106 |
107 | ///---------------辅助功能---------------///
108 |
109 | void progressPop(int up);
110 | void drawCross(QPainter &p, int x, int y);
111 |
112 | QLabel *msgLabel;//show message in the bottom of the window
113 |
114 | QTimer *timer;
115 | QImage image_1;
116 | QImage image_2;
117 | cv::Mat matShow_1;
118 | cv::Mat matShow_2;
119 | QPixmap pimage_1;//由图像指针得到的.png格式图像
120 | QPixmap pimage_2;
121 |
122 | bool isProjectorOpened;
123 | bool isConfigured;
124 | int saveCount;//count the photo captured.
125 |
126 | QString path_1;
127 | QString path_2;
128 |
129 | ///与set对话框有关的变量
130 | int black_ ;
131 | int white_;
132 | bool isAutoContrast;
133 | bool isRaySampling;
134 | bool isExportObj;
135 | bool isExportPly;
136 | bool haveColor;
137 |
138 | private slots:
139 | void newproject();
140 | void openproject();
141 |
142 | void opencamera();
143 | void startfocusassistant();
144 | void closefocus();
145 | void setexposure();
146 | void readframe();
147 |
148 | void usebasler();
149 |
150 | void selectPath(int PATH);
151 |
152 | void capturecalib();
153 | void redocapture();
154 | void projectorcontrol();
155 |
156 | void calib();
157 | void calibration();
158 |
159 | void scan();
160 | void pointmatch();
161 | void refindmatch();
162 | void showhidemanual();
163 | void finishmanualmatch();
164 | void startscan();
165 | void test();
166 |
167 | void reconstruct();
168 | void startreconstruct();
169 |
170 | void set();
171 | void getSetInfo();
172 |
173 | void changePointSize(int psize);
174 | void loadTestModel();
175 | void switchlanguage();
176 |
177 | };
178 |
179 | #endif // MAINWINDOW_H
180 |
--------------------------------------------------------------------------------
/Duke/manualmatch.cpp:
--------------------------------------------------------------------------------
1 | #include "manualmatch.h"
2 | #include "ui_manualmatch.h"
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | QFont textfont("Calibri",50);
10 | QColor gcolor(0,255,0);
11 | QColor rcolor(255,0,0);
12 |
13 | ManualMatch::ManualMatch(QWidget *parent) :
14 | QWidget(parent),
15 | ui(new Ui::ManualMatch)
16 | {
17 | ui->setupUi(this);
18 |
19 | connect(ui->confirmButton,SIGNAL(clicked()),this,SLOT(confirmID()));
20 | connect(ui->finishButton,SIGNAL(clicked()),this,SLOT(finish()));
21 | connect(ui->cancelButton,SIGNAL(clicked()),this,SLOT(hide()));
22 | connect(ui->resetButton,SIGNAL(clicked()),this,SLOT(reset()));
23 | connect(ui->deleteButton,SIGNAL(clicked()),this,SLOT(deletepoint()));
24 | //connect(ui->idEdit,SIGNAL(textEdited(QString)),ui->confirmButton,SLOT(setEnabled(bool)));
25 |
26 | onMark = 0;
27 | }
28 |
29 | ManualMatch::~ManualMatch()
30 | {
31 | delete ui;
32 | }
33 |
34 | void ManualMatch::setImage()
35 | {
36 | QImage pimage_1 = QImage(leftImage.data,leftImage.cols,leftImage.rows,QImage::Format_Indexed8);
37 | QImage pimage_2 = QImage(rightImage.data,rightImage.cols,rightImage.rows,QImage::Format_Indexed8);
38 | QPixmap pcopy_1 = QPixmap::fromImage(pimage_1);
39 | QPixmap pcopy_2 = QPixmap::fromImage(pimage_2);
40 | QPainter pt_1(&pcopy_1);
41 | QPainter pt_2(&pcopy_2);
42 | pt_1.setFont(textfont);
43 | pt_2.setFont(textfont);
44 |
45 | for(size_t i = 0;i < dotInOrder.size();i++){
46 | pt_1.setPen(gcolor);
47 | pt_2.setPen(gcolor);
48 |
49 | drawCross(pt_1, dotInOrder[i][0].x ,dotInOrder[i][0].y);
50 | drawCross(pt_2, dotInOrder[i][1].x, dotInOrder[i][1].y);
51 |
52 | int ID;
53 | if (refinedCorr.size()>i){//根据refinedCorr中的数据(如果有)显示i点ID
54 | for (size_t r = 0; r < refinedCorr.size(); r++){
55 | if (i == refinedCorr[r].y)
56 | ID = refinedCorr[r].x;
57 | }
58 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,QString::number(ID));
59 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,QString::number(ID));
60 | }
61 | else{//若refinedPoint还未被赋予空间,根据correspond中的数据显示i点ID
62 | bool idexist = false;//表示ID点的对应点存在,只关系到ID的显示状态
63 | for (size_t c = 0; c < correspond.size(); c++){
64 | if (i == correspond[c].y){
65 | ID = correspond[c].x;
66 | idexist = true;
67 | }
68 | }
69 | if (idexist){
70 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,QString::number(ID));
71 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,QString::number(ID));
72 | }
73 | else{
74 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,"?");
75 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,"?");
76 | }
77 | }
78 | }
79 |
80 | ///用红色方框标记当前准备赋予编号的点
81 | pt_1.setPen(rcolor);
82 | pt_2.setPen(rcolor);
83 | pt_1.drawRect(dotInOrder[onMark][0].x-15,dotInOrder[onMark][0].y-15,30,30);
84 | pt_2.drawRect(dotInOrder[onMark][1].x-15,dotInOrder[onMark][1].y-15,30,30);
85 |
86 | ui->leftImage->setPixmap(pcopy_1);
87 | ui->rightImage->setPixmap(pcopy_2);
88 |
89 | int ID;
90 | if (!refinedCorr.empty()){
91 | if (refinedCorr[onMark].x >= 0){//如果onMark点已经被标记过,则在idEdit中显示ID值
92 | ID = refinedCorr[onMark].x;
93 | ui->idEdit->setText(QString::number(ID));
94 | }
95 | else
96 | ui->idEdit->clear();
97 | }
98 | }
99 |
100 | void ManualMatch::confirmID()
101 | {
102 | if (refinedCorr.size() == 0)
103 | refinedCorr.resize(dotInOrder.size(),cv::Point2i(-1,-1));
104 |
105 | int id = ui->idEdit->text().toInt();
106 | cv::Point2i corr;
107 | corr.x = id;
108 | corr.y = onMark;
109 | refinedCorr.at(onMark) = corr;
110 |
111 | if (onMark == dotInOrder.size() - 1)
112 | onMark = 0;
113 | else
114 | onMark++;
115 | ui->current->setText(QString::number(onMark));
116 |
117 | setImage();//根据新的信息重绘图像
118 | }
119 |
120 | void ManualMatch::deletepoint()
121 | {
122 | if (onMark < dotInOrder.size() - 1){
123 | dotInOrder.erase(dotInOrder.begin() + onMark);
124 | correspond.erase(correspond.begin() + onMark);
125 | //refinedCorr.erase(refinedCorr.begin() + onMark);
126 | }
127 | else{
128 | dotInOrder.erase(dotInOrder.end() - 1);
129 | correspond.erase(correspond.end() - 1);
130 | //refinedCorr.erase(refinedCorr.end());
131 | }
132 | setImage();
133 | }
134 |
135 | void ManualMatch::finish()
136 | {
137 | if (refinedCorr.size() == 0){
138 | for (size_t i = 0;i < correspond.size(); i++){
139 | refinedCorr.push_back(correspond[i]);
140 | }
141 | }
142 | else{
143 | for (size_t i = 0;i < refinedCorr.size(); i++){
144 | if (refinedCorr[i].x < 0)
145 | QMessageBox::warning(NULL,"Manual Match",tr("Point ") + QString::number(i) + tr(" hasn't been marked."));
146 | }
147 | }
148 | this->hide();
149 | emit outputdata();
150 | }
151 |
152 | void ManualMatch::reset()
153 | {
154 | refinedCorr.clear();
155 | onMark = 0;
156 | setImage();
157 | }
158 |
159 | void ManualMatch::keyPressEvent(QKeyEvent *e)
160 | {
161 | if (e->key() == Qt::Key_Enter)
162 | confirmID();
163 | else if (e->key() == Qt::Key_Delete)
164 | deletepoint();
165 | }
166 |
167 | void ManualMatch::drawCross(QPainter &p, int x, int y)
168 | {
169 | int len = 25;
170 | p.drawLine(x - len, y, x + len, y);
171 | p.drawLine(x, y - len, x, y + len);
172 | }
173 |
174 |
175 |
176 |
--------------------------------------------------------------------------------
/Duke/manualmatch.h:
--------------------------------------------------------------------------------
1 | #ifndef MANUALMATCH_H
2 | #define MANUALMATCH_H
3 |
4 | #include
5 | #include
6 |
7 | // OpenCV
8 | #include
9 | #include
10 |
11 | namespace Ui {
12 | class ManualMatch;
13 | }
14 |
15 | class ManualMatch : public QWidget
16 | {
17 | Q_OBJECT
18 |
19 | public:
20 | explicit ManualMatch(QWidget *parent = 0);
21 | ~ManualMatch();
22 |
23 | cv::Mat leftImage;
24 | cv::Mat rightImage;
25 | cv::vector correspond;
26 | cv::vector refinedCorr;//x表示点ID,y值表示序号
27 | cv::vector> dotInOrder;//向量<向量<左图像标记点坐标,右图像标记点坐标>>
28 |
29 | void setImage();
30 |
31 | private:
32 | Ui::ManualMatch *ui;
33 |
34 | void drawCross(QPainter &p, int x, int y);
35 |
36 | size_t onMark;//表示当前光标停留的待标记点
37 |
38 | protected:
39 | void keyPressEvent(QKeyEvent *e);
40 |
41 | private slots:
42 | void confirmID();
43 | void deletepoint();//delete mismatched point
44 | void finish();//点击完成标记按钮触发的动作
45 | void reset();//点击重置标记按钮触发的动作
46 | signals:
47 | void outputdata();//由finish按钮所发出的信号,作用是通知dotmatch对refinedCorr进行处理,使用onfinishmanual()槽
48 | };
49 |
50 | #endif // MANUALMATCH_H
51 |
--------------------------------------------------------------------------------
/Duke/manualmatch.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | ManualMatch
4 |
5 |
6 |
7 | 0
8 | 0
9 | 815
10 | 575
11 |
12 |
13 |
14 |
15 | 0
16 | 0
17 |
18 |
19 |
20 | Manual Match Assistant
21 |
22 |
23 |
24 | :/splash.png:/splash.png
25 |
26 |
27 | true
28 |
29 |
30 |
31 | 6
32 |
33 |
34 | 10
35 |
36 |
37 | 6
38 |
39 |
40 | 10
41 |
42 |
43 | 10
44 |
45 |
46 | 15
47 |
48 | -
49 |
50 |
51 | 20
52 |
53 |
-
54 |
55 |
56 |
57 | 0
58 | 0
59 |
60 |
61 |
62 |
63 | 640
64 | 512
65 |
66 |
67 |
68 | QFrame::Box
69 |
70 |
71 | Left Image
72 |
73 |
74 | true
75 |
76 |
77 |
78 | -
79 |
80 |
81 |
82 | 0
83 | 0
84 |
85 |
86 |
87 |
88 | 640
89 | 512
90 |
91 |
92 |
93 | QFrame::Box
94 |
95 |
96 | Right Image
97 |
98 |
99 | true
100 |
101 |
102 |
103 |
104 |
105 | -
106 |
107 |
108 | 0
109 |
110 |
111 | 100
112 |
113 |
114 | 100
115 |
116 |
-
117 |
118 |
119 | 10
120 |
121 |
-
122 |
123 |
124 |
125 | 0
126 | 0
127 |
128 |
129 |
130 | <html><head/><body><p>Current Point Num:</p></body></html>
131 |
132 |
133 |
134 | -
135 |
136 |
137 |
138 | 0
139 | 0
140 |
141 |
142 |
143 | 0
144 |
145 |
146 |
147 | -
148 |
149 |
150 |
151 | 0
152 | 0
153 |
154 |
155 |
156 | Set ID
157 |
158 |
159 |
160 | -
161 |
162 |
163 |
164 | 0
165 | 0
166 |
167 |
168 |
169 |
170 | 60
171 | 16777215
172 |
173 |
174 |
175 | false
176 |
177 |
178 |
179 | -
180 |
181 |
182 | Confirm
183 |
184 |
185 |
186 | -
187 |
188 |
189 | Delete
190 |
191 |
192 |
193 |
194 |
195 | -
196 |
197 |
198 | Qt::Horizontal
199 |
200 |
201 |
202 | 40
203 | 20
204 |
205 |
206 |
207 |
208 | -
209 |
210 |
211 | 10
212 |
213 |
-
214 |
215 |
216 | Finish
217 |
218 |
219 |
220 | -
221 |
222 |
223 | Reset
224 |
225 |
226 |
227 | -
228 |
229 |
230 | Cancel
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
--------------------------------------------------------------------------------
/Duke/meshcreator.cpp:
--------------------------------------------------------------------------------
1 | #include "meshcreator.h"
2 |
3 | MeshCreator::MeshCreator(PointCloudImage *in)
4 | {
5 | cloud = in;
6 | w = cloud->getWidth();
7 | h = cloud->getHeight();
8 | pixelNum = new int[w*h];
9 | }
10 |
11 | MeshCreator::~MeshCreator(void)
12 | {
13 | delete pixelNum;
14 | }
15 |
16 | void MeshCreator::exportObjMesh(QString path)
17 | {
18 | int count = 1;
19 | bool return_val;
20 | cv::Point3f point;
21 | std::ofstream out1;
22 | std::string cstr = path.toStdString();
23 | out1.open(cstr);
24 |
25 | for(int i = 0; i getPoint(i, j, point);
28 | if(return_val){
29 | pixelNum[access(i, j)]=count;
30 | out1<<"v "<< point.x<< " "<< point.y<< " "< 0&&i < w - 1)
56 | v3 = pixelNum[access(i+1,j-1)];
57 | else
58 | v3=0;
59 |
60 | if(v1!=0 && v2!=0 && v3!=0)
61 | out1<<"f "<< v1<<"/"<getPoint(i, j, point)){
79 | pixelNum[access(i, j)] = vertexCount;
80 | vertexCount++;
81 | }
82 | else
83 | pixelNum[access(i, j)]=0;
84 | }
85 | }
86 | int facesCount = 0;//find faces num
87 | for(int i=0; i 0) && (i < w-1))
104 | v3=pixelNum[access(i+1,j-1)];
105 | else
106 | v3=0;
107 |
108 | if(v1!=0 && v2!=0 && v3!=0)
109 | facesCount++;
110 | }
111 | }
112 |
113 | //ply headers
114 | out1<<"ply\n";
115 | out1<<"format ascii 1.0\n";
116 | out1<<"element vertex " << vertexCount << "\n";
117 | out1<<"property float x\n";
118 | out1<<"property float y\n";
119 | out1<<"property float z\n";
120 | out1<<"property uchar red\n";
121 | out1<<"property uchar green\n";
122 | out1<<"property uchar blue\n";
123 | out1<<"element face " << facesCount << "\n";
124 | out1<<"property list uchar int vertex_indices\n";
125 | out1<<"end_header\n";
126 |
127 | for(int i=0; igetPoint(i, j, point, color);
130 | if(return_val){
131 | out1<< point.x << " " << point.y << " " << point.z << " "<< color[2] << " " << color[1] << " " << color[0] << "\n";
132 | //out1<< point.x << " " << point.y << " " << point.z << " "<< (int) color[2] << " " << (int) color[1] << " " << (int) color[0] << "\n";
133 | }
134 | else
135 | pixelNum[access(i, j)]=0;
136 | }
137 | }
138 |
139 | for(int i = 0; i < w;i++){
140 | for(int j = 0; j < h; j++){
141 | int v1 = pixelNum[access(i, j)], v2, v3;
142 |
143 | if(i < w - 1)
144 | v2 = pixelNum[access(i + 1, j)];
145 | else
146 | v2 = 0;
147 |
148 | if(j 0 && i< w - 1)
157 | v3 = pixelNum[access(i + 1, j - 1)];
158 | else
159 | v3 = 0;
160 |
161 | if(v1!=0 && v2!=0 && v3!=0)
162 | out1 << "3 " << v1 << " " << v3 << " " << v2 << "\n";
163 | }
164 | }
165 | out1.close();
166 | }
167 |
168 | int MeshCreator::access(int i, int j)
169 | {
170 | return i * h + j;
171 | }
172 |
173 |
--------------------------------------------------------------------------------
/Duke/meshcreator.h:
--------------------------------------------------------------------------------
1 | #ifndef MESHCREATOR_H
2 | #define MESHCREATOR_H
3 |
4 | #include "pointcloudimage.h"
5 | #include
6 |
7 | class MeshCreator
8 | {
9 | public:
10 | MeshCreator(PointCloudImage *in);
11 | ~MeshCreator(void);
12 | void exportObjMesh(QString path);
13 | void exportPlyMesh(QString path);
14 | private:
15 | int *pixelNum;
16 | PointCloudImage *cloud;
17 | int MeshCreator::access(int i,int j);
18 | int MeshCreator::access(int i,int j, int z);
19 |
20 | int w;
21 | int h;
22 | };
23 |
24 | #endif // MESHCREATOR_H
25 |
--------------------------------------------------------------------------------
/Duke/mfreconstruct.cpp:
--------------------------------------------------------------------------------
1 | #include "mfreconstruct.h"
2 | #include
3 |
4 | bool processl = true;
5 | float PI = 3.1416;
6 |
7 | MFReconstruct::MFReconstruct(QObject *parent) :
8 | QObject(parent)
9 | {
10 | mask = NULL;
11 | decRows = NULL;
12 | decCols = NULL;
13 | points3DProjView = NULL;
14 | cameras = new VirtualCamera[2];//生成virtualcamera的两个实例,保存在数组cameras[2]
15 | camsPixels = new cv::vector*[2];
16 | calibFolder = new QString[2];
17 | scanFolder = new QString[2];
18 | imgPrefix = new QString[2];
19 | pathSet = false;
20 | imgSuffix = ".png";//这里暂时认为图片后缀为.png
21 | numOfColBits = 12;
22 | numberOfImgs = 14;
23 | }
24 |
25 |
26 | void MFReconstruct::getParameters(int scansn, int scanw, int scanh,
27 | int camw, int camh, int blackt, int whitet, QString savePath)
28 | {
29 | scanSN = scansn;
30 | scan_w = scanw;
31 | scan_h = scanh;
32 | cameraWidth = camw;
33 | cameraHeight = camh;
34 | blackThreshold = blackt;
35 | whiteThreshold = whitet;
36 | savePath_ = savePath;//equal to projectPath
37 |
38 | sr = new stereoRect(savePath, cv::Size(camw,camh));
39 | sr->getParameters();
40 |
41 | for (int i = 0; i < 2; i++){
42 | camsPixels[i] = NULL;
43 | QString pathI;
44 | if (i == 0){
45 | pathI = savePath + "/scan/left/";//Load Images for reconstruction
46 | }
47 | else{
48 | pathI = savePath + "/scan/right/";
49 | }
50 | scanFolder[i] = pathI;
51 | if (i == 0){
52 | imgPrefix[i] = QString::number(scanSN) + "/L";
53 | calibFolder[i] = savePath + "/calib/left/";
54 | }
55 | else{
56 | imgPrefix[i] = QString::number(scanSN) +"/R";
57 | calibFolder[i] = savePath + "/calib/right/";
58 | }
59 | }
60 | pathSet = true;
61 |
62 | if (!loadCameras())
63 | QMessageBox::warning(NULL,tr("Get Param"),tr("Load Calibration files failed."));
64 | }
65 |
66 |
67 | bool MFReconstruct::loadCameras()//Load calibration data into camera[i]
68 | {
69 | bool loaded;
70 | for(int i = 0; i < 2; i++)
71 | {
72 | QString path;
73 | path = calibFolder[i];
74 | #ifndef USE_STEREOCALIB_DATA
75 | path += "cam_matrix.txt";
76 | loaded = cameras[i].loadCameraMatrix(path);//defined in visualcamera
77 | if(!loaded)
78 | break;
79 |
80 | path = calibFolder[i];
81 | path += "cam_distortion.txt";
82 | cameras[i].loadDistortion(path);//注意loaddistortion方法加载一个5X1矩阵
83 | #else
84 | path += "cam_stereo.txt";
85 | loaded = cameras[i].loadCameraMatrix(path);//defined in visualcamera
86 | if(!loaded)
87 | break;
88 | path = calibFolder[i];
89 | path += "distortion_stereo.txt";
90 | cameras[i].loadDistortion(path);
91 | #endif
92 | path = calibFolder[i];
93 | path += "cam_rotation_matrix.txt";
94 | cameras[i].loadRotationMatrix(path);
95 |
96 | path = calibFolder[i];
97 | path += "cam_trans_vectror.txt";
98 | cameras[i].loadTranslationVector(path);
99 |
100 | path = savePath_;
101 | path += "/calib/fundamental_stereo.txt";//测试表明,采用立体标定得到的F效果好于单独标定得到的
102 | cameras[i].loadFundamentalMatrix(path);
103 |
104 | cameras[i].height = cameraHeight;
105 | cameras[i].width = cameraWidth;
106 | }
107 | return loaded;
108 | }
109 |
110 |
111 | bool MFReconstruct::loadCamImgs(QString folder, QString prefix, QString suffix)//load camera images
112 | {
113 | cv::Mat tmp;
114 | if(!camImgs.empty())
115 | unloadCamImgs();
116 |
117 | sr->calParameters();
118 |
119 | for(int i = 0; i < numberOfImgs; i++){
120 | QString path;
121 | path = folder;//这里folder要达到left/right一层
122 | path += prefix + QString::number(i) + suffix;
123 | tmp.release();
124 |
125 | tmp = cv::imread(path.toStdString(),0);//flag=0 Return a grayscale image
126 |
127 | if (processl){//第一次调用loadImg时认为是加载左相机图像
128 | sr->doStereoRectify(tmp,true);
129 | //cv::imwrite(path.toStdString(),tmp);
130 | }
131 | else{
132 | sr->doStereoRectify(tmp,false);
133 | //cv::imwrite(path.toStdString(),tmp);
134 | }
135 |
136 | if(tmp.empty()){
137 | QMessageBox::warning(NULL,tr("Load Images"),tr("Scan Images not found!"));
138 | break;
139 | }
140 | else{
141 | camImgs.push_back(tmp);
142 | }
143 | }
144 | processl = !processl;//每调用一次加载图像都对是否处理左图像取反
145 | return !tmp.empty();
146 | }
147 |
148 |
149 | void MFReconstruct::unloadCamImgs()//unload camera images
150 | {
151 | if(camImgs.size()){
152 | for(int i = 0; i[cameraHeight * cameraWidth];//将每个相机图像中的每个像素在投影区域中的横坐标记录
166 |
167 | ///camera在loadCamImgs中进行了赋值
168 | camera = &cameras[i];
169 | runSucess = loadCamImgs(scanFolder[i], imgPrefix[i], imgSuffix);
170 | camPixels = camsPixels[i];
171 |
172 | ///截至这一步,实例camera的position、width、height属性已被赋值,camera对应cameras[i]
173 |
174 | if(!runSucess)//如果加载图片失败,中断
175 | break;
176 | else{
177 | computeShadows();
178 | decodePatterns();
179 | unloadCamImgs();
180 | }
181 | }
182 | if(runSucess){
183 | points3DProjView = new PointCloudImage(scan_w, scan_h, false); //最后一个bool值代表是否上色,这里改为false
184 | triangulation(camsPixels[0],cameras[0],camsPixels[1],cameras[1]);
185 | }
186 | return runSucess;
187 | }
188 |
189 |
190 | void MFReconstruct::computeShadows()
191 | {
192 | int w = cameraWidth;
193 | int h = cameraHeight;
194 | mask.release();
195 | mask = cv::Mat(h, w, CV_8U,cv::Scalar(0));//注意h=行数rows,w=列数cols
196 | for(int col = 0; col < w; col++){
197 | for(int row = 0; row < h; row++){
198 | float blackVal, whiteVal;
199 | blackVal = (float) Utilities::matGet2D(camImgs[1], row, col);//camImgs[1]表示全黑图像
200 | whiteVal = (float) Utilities::matGet2D(camImgs[0], row, col);//camImgs[0]表示全白图像
201 | if(whiteVal - blackVal > blackThreshold)//同一像素点在全黑、全白投影下反差大于blackThreshold,说明该点不在阴影里
202 | Utilities::matSet2D(mask, row, col, 1);
203 | else
204 | Utilities::matSet2D(mask, row, col, 0);
205 | }
206 | }
207 | }
208 |
209 |
210 | void MFReconstruct::decodePatterns()
211 | {
212 | int w = cameraWidth;
213 | int h = cameraHeight;
214 | float phase;//表示图像上(w,h)点在绝对相位展开图上的相位值
215 | cv::Mat out(h,w,CV_8U);
216 | for(int row = 0; row < h; row++){
217 | for(int col = 0; col < w; col++){
218 | ///mask是根据相机拍摄的图片生成的,因此其大小就是w*h
219 | if(mask.at(row, col)){//if the pixel is not shadow reconstruct
220 | getPhase(row, col, phase);
221 | camPixels[(row*cameraWidth+col)].push_back(phase);
222 | out.at(row,col) = phase;
223 | }
224 | }
225 | }
226 | //out.convertTo(out,CV_8UC1);
227 | cv::imwrite(savePath_.toStdString()+"/p.png",out);
228 | }
229 |
230 |
231 | void MFReconstruct::getPhase(int row, int col, float &phase)
232 | {
233 | double P[3];//三组相对相位
234 | float P12, P23, P123;//展开相位
235 |
236 | ///prosses column images
237 | for(int count = 0; count < 3; count++){//3表示共进行3次4步相移
238 | int G1, G2, G3, G4;//点在四步相移图像中的灰度值
239 | G1 = Utilities::matGet2D(camImgs[4*count + 2], row, col);
240 | G2 = Utilities::matGet2D(camImgs[4*count + 3], row, col);
241 | G3 = Utilities::matGet2D(camImgs[4*count + 4], row, col);
242 | G4 = Utilities::matGet2D(camImgs[4*count + 5], row, col);
243 |
244 | ///计算相对相位,注意结果是弧度制还是角度制,加PI使取值范围为
245 | /// 0~2PI
246 | if (G4 == G2 && G1 > G3)
247 | P[count] = 0;
248 | else if (G4 == G2 && G1 < G3)
249 | P[count] = PI;
250 | else if (G1 == G3 && G4 > G2)
251 | P[count] = 3*PI/2;
252 | else if (G1 == G3 && G4 < G2)
253 | P[count] = PI/2;
254 | else if (G1 == G3 && G4 == G2)
255 | Utilities::matSet2D(mask, row, col, 0);
256 | else if (G1 < G3)
257 | P[count] = atan(float((G4-G2)/(G1-G3))) + PI;
258 | else if (G1 > G3 && G4 > G2)
259 | P[count] = atan(float((G4-G2)/(G1-G3))) + 2*PI;
260 | else
261 | P[count] = atan(float((G4-G2)/(G1-G3)));
262 | }
263 |
264 | ///将相对相位利用外差原理进行相位展开
265 | P12 = (P[0] > P[1])?(P[0] - P[1]):(P[0] - P[1] + 2*PI);
266 | P23 = (P[1] > P[2])?(P[1] - P[2]):(P[1] - P[2] + 2*PI);
267 | P123 = (P12 > P23)?(P12 - P23):(P12 - P23 + 2*PI);
268 | phase = P123/(2*PI)*255;
269 | }
270 |
271 |
272 | void MFReconstruct::triangulation(cv::vector *cam1Pixels, VirtualCamera camera1, cv::vector *cam2Pixels, VirtualCamera camera2)
273 | {
274 | int width = cameraWidth;
275 | int height = cameraHeight;
276 |
277 | cv::Mat matCoordTrans(3,4,CV_32F);//定义变换矩阵将当前次扫描坐标系对齐至首次扫描坐标系
278 | if (scanSN > 0){
279 | ///加载刚体变换矩阵
280 | QString loadPath = savePath_ + "/scan/transfer_mat" + QString::number(scanSN) + ".txt";
281 | camera1.loadMatrix(matCoordTrans, 3, 4, loadPath.toStdString());
282 | }
283 |
284 | for (int i = 0; i < height;i++){//遍历图像高度方向
285 | for (int j = 0;j < width;j++){//遍历图像宽度方向
286 | cv::vector cam1Pix = cam1Pixels[i * width + j];//注意这里cam1Pix是一个向量
287 | if (cam1Pix.size() == 0)
288 | continue;
289 | for (int k = 0;k < width;k++){
290 | cv::vector cam2Pix = cam2Pixels[i * width + k];
291 |
292 | if (cam2Pix.size() == 0)
293 | continue;
294 |
295 | if (fabs(cam1Pix[0] - cam2Pix[0]) < 0.1){//说明左相机(j,i)点与右相机(k,i)点匹配
296 | ///以左图像该点二维坐标、对应点视差构建该点二维齐次坐标
297 | cv::Point2f camPixelUDL = Utilities::undistortPoints(cv::Point2f(j, i),camera1);
298 | cv::Point2f camPixelUDR = Utilities::undistortPoints(cv::Point2f(k, i),camera2);
299 | double point2D[] = {camPixelUDL.x, camPixelUDL.y, camPixelUDL.x - camPixelUDR.x, 1};//二维坐标
300 | cv::Mat p2D = cv::Mat(4,1,CV_64F,point2D);//构建坐标矩阵
301 | cv::Mat p3D;
302 | p3D = sr->Q * p2D;//此处调试以观察是否正确计算
303 | double x = p3D.at(0,0);
304 | double y = p3D.at(1,0);
305 | double z = p3D.at(2,0);
306 | double w = p3D.at(3,0);
307 | double ax = x/w;
308 | double ay = y/w;
309 | double az = z/w;
310 |
311 | cv::Point3f interPoint(ax,ay,az);
312 | cv::Point3f refinedPoint;
313 |
314 | ///以下判断为多次重建得到的点云拼接做准备
315 | if (scanSN > 0){
316 | float point[] = {interPoint.x, interPoint.y, interPoint.z, 1};
317 | cv::Mat pointMat(4, 1, CV_32F, point);
318 | cv::Mat refineMat(3, 1, CV_32F);
319 | refineMat = matCoordTrans * pointMat;
320 | refinedPoint.x = refineMat.at(0, 0);
321 | refinedPoint.y = refineMat.at(1, 0);
322 | refinedPoint.z = refineMat.at(2, 0);
323 | }
324 | else
325 | refinedPoint = interPoint;
326 | points3DProjView->addPoint(i, j, refinedPoint);
327 | break;//若左图像某点与右图像点已发生了匹配,则不再检索右图像其余点
328 | }
329 | else
330 | continue;
331 | }
332 | }
333 | }
334 | }
335 |
336 |
--------------------------------------------------------------------------------
/Duke/mfreconstruct.h:
--------------------------------------------------------------------------------
1 | #ifndef MFRECONSTRUCT_H
2 | #define MFRECONSTRUCT_H
3 |
4 | #include
5 |
6 | #include
7 |
8 | #include "virtualcamera.h"
9 | #include "pointcloudimage.h"
10 | #include "stereorect.h"
11 |
12 | class MFReconstruct : public QObject
13 | {
14 | Q_OBJECT
15 | public:
16 | explicit MFReconstruct(QObject *parent = 0);
17 | void getParameters(int scansn, int scanw, int scanh, int camw, int camh, int blackt, int whitet, QString savePath);
18 | bool runReconstruction();
19 |
20 | PointCloudImage *points3DProjView;
21 |
22 | private:
23 | int scanSN;//表示当前重建的扫描数据序列号,也是输出模型的序列号
24 |
25 | QString savePath_;//same as projectPath
26 | QString *calibFolder;
27 | QString *scanFolder;
28 | QString *imgPrefix;
29 | QString imgSuffix;
30 |
31 | int numberOfImgs;
32 | int numOfColBits;
33 |
34 | int blackThreshold;
35 | int whiteThreshold;
36 |
37 | cv::Mat mask;//matrix with vals 0 and 1 , CV_8U , uchar
38 | cv::Mat decRows;
39 | cv::Mat decCols;
40 | cv::vector camImgs;
41 |
42 | cv::vector **camsPixels;
43 | cv::vector *camPixels;
44 |
45 | bool pathSet;
46 | int cameraWidth;
47 | int cameraHeight;
48 | int scan_w;
49 | int scan_h;
50 |
51 | VirtualCamera *camera;//general functions use this instead of camera1 or camera2
52 | VirtualCamera *cameras;
53 | stereoRect *sr;
54 |
55 | bool loadCameras();
56 | bool loadCamImgs(QString folder, QString prefix, QString suffix);
57 | void unloadCamImgs();
58 | void computeShadows();
59 | void decodePatterns();
60 | void getPhase(int row, int col, float &phase);
61 | void triangulation(cv::vector *cam1Pixels, VirtualCamera cameras1, cv::vector *cam2Pixels, VirtualCamera cameras2);
62 |
63 | signals:
64 |
65 | public slots:
66 |
67 | };
68 |
69 | #endif // MFRECONSTRUCT_H
70 |
--------------------------------------------------------------------------------
/Duke/multifrequency.cpp:
--------------------------------------------------------------------------------
1 | #include "multifrequency.h"
2 |
3 | int frequency[3] = {70,64,59};
4 | int testCount = 0;//用来做多频外插投影的测试
5 |
6 | MultiFrequency::MultiFrequency(QObject *parent, int projwidth, int projheight) :
7 | QObject(parent)
8 | {
9 | projW = projwidth;
10 | projH = projheight;
11 | imgsLoaded = false;
12 | }
13 |
14 | void MultiFrequency::generateMutiFreq()
15 | {
16 | MultiFreqImages[0] = cv::Mat(projH,projW,CV_8U,cvScalar(255));
17 | MultiFreqImages[1] = cv::Mat(projH,projW,CV_8U,cvScalar(0));
18 | for (size_t i = 2; i < MULTI_NUM + 2; i++)//+2表示全白全黑图像
19 | {
20 | MultiFreqImages[i] = cv::Mat(projH, projW, CV_8U);
21 | }
22 | for (size_t f = 0; f < 3; f++){
23 | for (size_t phi = 0; phi < 4; phi++){
24 | cv::Mat temp(projH,projW,CV_8U);
25 | for (size_t w = 0; w < projW; w++){
26 | for (size_t h = 0; h < projH; h++){
27 | temp.at(h,w) = 135+79*cos(float(PI*2*w*frequency[f]/projW+PI*phi/2));
28 | }
29 | }
30 | MultiFreqImages[4*f + phi + 2] = temp;
31 | }
32 | }
33 | }
34 |
35 |
36 | int MultiFrequency::getNumOfImgs()
37 | {
38 | return 14;
39 | }
40 |
--------------------------------------------------------------------------------
/Duke/multifrequency.h:
--------------------------------------------------------------------------------
1 | #ifndef MULTIFREQUENCY_H
2 | #define MULTIFREQUENCY_H
3 |
4 | #define MULTI_NUM 12
5 | #define PI 3.1416
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | class MultiFrequency : public QObject
12 | {
13 | Q_OBJECT
14 | public:
15 | MultiFrequency(QObject *parent = 0, int projwidth = 1280, int projheight = 1024);
16 | void generateMutiFreq();
17 | int getNumOfImgs();
18 | cv::Mat MultiFreqImages[MULTI_NUM + 2];
19 |
20 | private:
21 | bool imgsLoaded;
22 | int projW;
23 | int projH;
24 | };
25 |
26 | #endif // MULTIFREQUENCY_H
27 |
--------------------------------------------------------------------------------
/Duke/plyloader.cpp:
--------------------------------------------------------------------------------
1 | #include "plyloader.h"
2 | #include
3 |
4 | PlyLoader::PlyLoader(QObject *parent) :
5 | QObject(parent)
6 | {
7 | }
8 |
9 | bool PlyLoader::LoadModel(QString filename)
10 | {
11 | if (filename != NULL)
12 | {
13 | /*
14 | QFile file(filename);
15 | if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
16 | return -1;
17 | */
18 | string name = filename.toStdString();
19 | FILE *file = NULL;
20 | file = fopen(name.data(),"r");
21 | fseek(file, 0, SEEK_END);//获取文件全部数据
22 | mp_vertexXYZ = (float*)malloc(10000000);//long int ftell (FILE *stream); Returns the current value of the position indicator of the stream.
23 | fseek(file, 0, SEEK_SET);//操作符指向文件流开头
24 |
25 | if (file)
26 | {
27 | int i = 0;
28 | char buffer[3000];
29 | fgets(buffer, 300, file);//char *fgets (char *str, int num, FILE *stream); Get string from stream
30 | // READ HEADER
31 | // Find number of vertexes
32 | while (strncmp("element vertex", buffer,strlen("element vertex")) != 0 )//int strncmp (const char *str1, const char *str2, size_t num); Compare characters of two strings
33 | {
34 | fgets(buffer, 300, file); //如果一直没有找到element vertex字符串,就一直从文件流中取出300个元素,直到找到为止
35 | }
36 | strcpy(buffer, buffer + strlen("element vertex"));//char *strcpy ( char *destination, const char *source ); Copy string
37 | sscanf(buffer, "%i", &this->m_totalConnectedPoints);//int sscanf ( const char * s, const char * format, ...); Read formatted data from string
38 | // go to end_header
39 | while (strncmp( "end_header", buffer,strlen("end_header")) != 0 )
40 | {
41 | fgets(buffer, 600, file);
42 | }
43 |
44 | // read vertices
45 | i =0;
46 | for (int iterator = 0; iterator < this->m_totalConnectedPoints; iterator++)
47 | {
48 | fgets(buffer, 600, file);
49 | sscanf(buffer,"%f %f %f", &mp_vertexXYZ[i], &mp_vertexXYZ[i+1], &mp_vertexXYZ[i+2]);
50 | i += 3;
51 | }
52 | fclose(file);
53 | }
54 | return true;
55 | }
56 | else
57 | return false;
58 | }
59 |
--------------------------------------------------------------------------------
/Duke/plyloader.h:
--------------------------------------------------------------------------------
1 | #ifndef PLYLOADER_H
2 | #define PLYLOADER_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | using namespace std;
11 |
12 | class PlyLoader : public QObject
13 | {
14 | Q_OBJECT
15 | public:
16 | explicit PlyLoader(QObject *parent = 0);
17 | bool LoadModel(QString filename);
18 | float* mp_vertexXYZ;
19 | int m_totalConnectedPoints;
20 | };
21 |
22 | #endif // PLYLOADER_H
23 |
--------------------------------------------------------------------------------
/Duke/pointcloudimage.cpp:
--------------------------------------------------------------------------------
1 | #include "pointcloudimage.h"
2 |
3 | PointCloudImage::PointCloudImage(int imageW,int imageH, bool colorFlag)
4 | {
5 | w = imageW;
6 | h = imageH;
7 | points = cv::Mat(h, w, CV_32FC3);
8 | if(colorFlag == true)
9 | color = cv::Mat(h, w, CV_8UC3,cv::Scalar(0));//由于相机所摄为灰度图像,因此将32FC3改为8UC3
10 | else
11 | color = NULL;
12 | numOfPointsForPixel = cv::Mat(h, w, CV_8U, cv::Scalar(0));
13 | }
14 |
15 | PointCloudImage::~PointCloudImage(void)
16 | {
17 | }
18 |
19 | bool PointCloudImage::setPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3i colorgray)
20 | {
21 | if(i_w>=w || j_h>=h)
22 | return false;
23 | setPoint(i_w,j_h,point);
24 | Utilities::matSet3D(color,i_w,j_h,colorgray);
25 | return true;
26 | }
27 |
28 | bool PointCloudImage::setPoint(int i_w, int j_h, cv::Point3f point)
29 | {
30 | if(i_w>=w || j_h>=h)
31 | return false;
32 |
33 | Utilities::matSet3D(points, i_w, j_h, (cv::Vec3f)point);
34 | Utilities::matSet2D(numOfPointsForPixel, j_h, i_w, 1);
35 |
36 | return true;
37 | }
38 |
39 | bool PointCloudImage::getPoint(int i_w, int j_h, cv::Point3f &pointOut, cv::Vec3i &colorOut)
40 | {
41 | if(i_w>=w || j_h>=h)
42 | return false;
43 | uchar num = numOfPointsForPixel.at(j_h,i_w);
44 | if(num > 0){
45 | pointOut = (cv::Point3f) (Utilities::matGet3D(points,i_w,j_h) / (float) num);
46 | if(!color.empty())
47 | colorOut = (cv::Point3i) (Utilities::matGet3D(color,i_w,j_h) / (float) num);
48 | else
49 | colorOut = (cv::Point3i) (100,100,100);//如果color为空,则人为赋一个值
50 | return true;
51 | }
52 | else
53 | return false;
54 | }
55 |
56 | bool PointCloudImage::getPoint(int i_w, int j_h, cv::Point3f &pointOut)
57 | {
58 | if(i_w>=w || j_h>=h)
59 | return false;
60 | uchar num = numOfPointsForPixel.at(j_h,i_w);
61 | if(num > 0){
62 | pointOut = (cv::Point3f) (Utilities::matGet3D(points, i_w, j_h) / (float) num);
63 | return true;
64 | }
65 | else
66 | return false;
67 | }
68 |
69 | bool PointCloudImage::addPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3i colorgray)
70 | {
71 | if(i_w >= w || j_h >= h)//由于i_w为[0,w)取值,因此其不可能等于w,若出现等于,直接判定false
72 | return false;
73 | uchar num = numOfPointsForPixel.at(j_h, i_w);
74 | if(num == 0)
75 | return setPoint(i_w, j_h, point, colorgray);
76 | addPoint(i_w, j_h, point);
77 | if(!color.empty()){
78 | cv::Vec3i c = Utilities::matGet3D(color, i_w, j_h);
79 | Utilities::matSet3D(color, i_w, j_h, colorgray + c);
80 | }
81 | else
82 | return false;
83 | return true;
84 | }
85 |
86 | bool PointCloudImage::addPoint(int i_w, int j_h, cv::Point3f point)
87 | {
88 | if(i_w>=w || j_h>=h)
89 | return false;
90 | uchar num = numOfPointsForPixel.at(j_h,i_w);
91 | if(num == 0)
92 | return setPoint(i_w,j_h,point);
93 | cv::Point3f p = Utilities::matGet3D(points,i_w,j_h);
94 | Utilities::matSet3D(points,i_w,j_h,(cv::Vec3f)(point + p));
95 | numOfPointsForPixel.at(j_h,i_w) = num + 1;
96 | return true;
97 | }
98 |
99 | void PointCloudImage::exportXYZ(char path[], bool exportOffPixels, bool colorFlag)
100 | {
101 | std::ofstream out;
102 | out.open(path);
103 | cv::Point3f p;
104 | cv::Vec3i c;
105 | for(int i = 0; i(j,i);
108 | if(!exportOffPixels && num == 0)
109 | continue;
110 | getPoint(i,j,p,c);
111 | if(exportOffPixels && num == 0){
112 | p = cv::Point3f(0,0,0);
113 | c = cv::Point3i(0,0,0);
114 | }
115 | out<(j,i);
133 | if(num > max){
134 | max = num;
135 | maxX=i;
136 | maxY=j;
137 | }
138 | }
139 | }
140 |
141 | for(int i=0; i(j,i);
144 | Utilities::matSet2D(projToCamRays, j, i, num/(float)(max * 255.0));
145 | }
146 | }
147 | cv::imwrite("reconstruction/projToCamRays.png",projToCamRays);
148 | std::ofstream out1;
149 | std::stringstream txt;
150 | txt<
5 | #include
6 | #include "utilities.h"
7 |
8 | class PointCloudImage
9 | {
10 | public:
11 | PointCloudImage(int imageW,int imageH, bool color);
12 | ~PointCloudImage(void);
13 |
14 | bool setPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3i colorgray);//灰度,改cv::Vec3f colorBGR,下同,vec3b表示3维uchar向量
15 | bool setPoint(int i_w, int j_h, cv::Point3f point);
16 |
17 | bool getPoint(int i_w, int j_h, cv::Point3f &pointOut);
18 | bool getPoint(int i_w, int j_h, cv::Point3f &pointOut, cv::Vec3i &colorgray);
19 |
20 | bool addPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3i colorgray);
21 | bool addPoint(int i_w, int j_h, cv::Point3f point);
22 |
23 | void exportNumOfPointsPerPixelImg(char path[]);
24 | void exportXYZ(char *path,bool exportOffPixels=true, bool colorFlag=true);
25 |
26 | int getWidth();
27 | int getHeight();
28 |
29 | private:
30 | int w;
31 | int h;
32 | cv::Mat points;
33 | cv::Mat numOfPointsForPixel;
34 | cv::Mat color;
35 | };
36 |
37 | #endif // POINTCLOUDIMAGE_H
38 |
--------------------------------------------------------------------------------
/Duke/projector.cpp:
--------------------------------------------------------------------------------
1 | #include "projector.h"
2 | #include
3 | #include
4 | #include
5 |
6 | int proj_w;
7 | int proj_h;
8 |
9 | Projector::Projector(QWidget *parent, int scanW, int scanH, int projW, int projH, int xos, int yos)
10 | : QWidget(parent)
11 | {
12 | width = scanW;
13 | height = scanH;
14 | proj_w = projW;
15 | proj_h = projH;
16 | xoffset = xos;
17 | yoffset = yos;
18 | crossVisible = true;
19 | label = new QLabel;
20 | QHBoxLayout *lo = new QHBoxLayout;
21 | lo->addWidget(label);
22 | //setLayout(lo);
23 | }
24 |
25 | Projector::~Projector()
26 | {
27 | }
28 |
29 | void Projector::paintEvent(QPaintEvent *event)
30 | {
31 | QPainter painter(this);
32 | if(crossVisible)//QEvent::User 1000
33 | {
34 | painter.setPen(QPen(Qt::yellow, 5));
35 | //else
36 | //painter.setPen(QPen(Qt::white, 5));
37 | painter.drawLine(proj_w/2 - 60, proj_h/2, proj_w/2 + 60, proj_h/2);
38 | painter.drawLine(proj_w/2, proj_h/2 - 60, proj_w/2, proj_h/2 + 60);
39 | }
40 | if(imageAva){
41 | painter.drawImage(0,0,pshow);
42 | }
43 | }
44 |
45 | void Projector::opencvWindow()
46 | {
47 | cv::namedWindow("w");
48 | //cvResizeWindow("w",width,height);
49 | cv::moveWindow("w", xoffset, yoffset);
50 | }
51 |
52 | void Projector::showMatImg(cv::Mat img)
53 | {
54 | cv::imshow("w", img);
55 | }
56 |
57 | void Projector::showImg(cv::Mat img)
58 | {
59 | pshow = QImage(img.data,img.cols,img.rows,QImage::Format_Indexed8);
60 | imageAva = true;
61 | this->update();
62 | //QPixmap p = QPixmap::fromImage(pshow);
63 | //label->setPixmap(p);
64 | }
65 |
66 | void Projector::destoryWindow()
67 | {
68 | cvDestroyWindow("w");
69 | }
70 |
71 | void Projector::displaySwitch(bool isWhite)
72 | {
73 | if(isWhite)
74 | this->setPalette(Qt::white);
75 | else
76 | this->setPalette(Qt::black);
77 | }
78 |
79 | void Projector::setCrossVisable(bool flag)
80 | {
81 | crossVisible = flag;
82 | this->update();
83 | }
84 |
--------------------------------------------------------------------------------
/Duke/projector.h:
--------------------------------------------------------------------------------
1 | #ifndef PROJECTOR_H
2 | #define PROJECTOR_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | #include
13 |
14 | class Projector : public QWidget
15 | {
16 | public:
17 | Projector(QWidget *parent, int scanW, int scanH, int projW, int projH, int xos, int yos);
18 | ~Projector();
19 | void showMatImg(cv::Mat img);
20 | void showImg(cv::Mat img);//直接利用窗口显示由Mat转换而来的Qt图像
21 |
22 | void displaySwitch(bool isWhite);
23 | void opencvWindow();
24 | void destoryWindow();//delete the projector window created by cv after showImg
25 |
26 | void setCrossVisable(bool flag);
27 | void paintEvent(QPaintEvent *event);
28 |
29 | private:
30 | QLabel *label;
31 |
32 | QImage pshow;
33 | bool crossVisible;
34 | bool imageAva;
35 | int xoffset;
36 | int yoffset;
37 | int height;
38 | int width;
39 | };
40 |
41 | #endif // PROJECTOR_H
42 |
--------------------------------------------------------------------------------
/Duke/reconstruct.h:
--------------------------------------------------------------------------------
1 | #ifndef RECONSTRUCT_H
2 | #define RECONSTRUCT_H
3 |
4 | #include
5 | #include
6 | #include "graycodes.h"
7 | #include "virtualcamera.h"
8 | #include "pointcloudimage.h"
9 | #include "set.h"
10 | #include "stereorect.h"
11 |
12 | //#define USE_STEREOCALIB_DATA
13 |
14 | class Reconstruct
15 | {
16 | public:
17 | Reconstruct(bool useEpi);
18 | ~Reconstruct();
19 |
20 | bool loadCameras();
21 |
22 | bool runReconstruction();
23 | bool runReconstruction_GE();
24 |
25 | VirtualCamera *cameras;
26 | QString *calibFolder;
27 | PointCloudImage *points3DProjView;
28 | void setBlackThreshold(int val);
29 | void setWhiteThreshold(int val);
30 | void setCalibPath(QString path1st, int cam_no );
31 | void saveShadowImg(const char path[]);
32 | void saveDecodedRowImg(const char path[]);
33 | void saveDecodedColImg(const char path[]);
34 |
35 | void enableRaySampling();
36 | void disableRaySampling();
37 |
38 | void cam2WorldSpace(VirtualCamera cam, cv::Point3f &p);
39 |
40 | void getParameters(int scanw, int scanh, int camw, int camh, bool autocontrast, bool havecolor, QString savePath);
41 | QString savePath_;//same as projectPath
42 | int scanSN;//表示当前重建的扫描数据序列号,也是输出模型的序列号
43 |
44 | private:
45 | bool EPI;//是否使用极线校正
46 | int numOfCams;
47 | VirtualCamera *camera;//general functions use this instead of camera1 or camera2
48 | stereoRect *sr;
49 |
50 | cv::vector **camsPixels;
51 | cv::vector **camsPixels_GE;
52 | cv::vector *camPixels; //general functions use this instead of cam1Pixels or cam2Pixels
53 | cv::vector *camPixels_GE;
54 |
55 | bool loadCamImgs(QString folder, QString prefix, QString suffix);
56 |
57 | void unloadCamImgs();//对不同编码模式都适用的卸载图像方法
58 | void computeShadows();
59 |
60 | ///不同编码模式对应的图像点身份获取方法
61 | bool getProjPixel(int row, int col, cv::Point &p_out);//GRAY_ONLY
62 | bool getProjPixel_GE(int row, int col, int &xDec);//GRAY_EPI
63 |
64 | void decodePaterns();
65 | void decodePatterns_GE();
66 |
67 | void triangulation(cv::vector *cam1Pixels, VirtualCamera cameras1, cv::vector *cam2Pixels, VirtualCamera cameras2);
68 | void triangulation_ge(cv::vector *cam1Pixels, VirtualCamera camera1, cv::vector *cam2Pixels, VirtualCamera camera2);
69 |
70 | QString *scanFolder;
71 | QString *imgPrefix;
72 | QString imgSuffix;
73 | int numberOfImgs;
74 | int numOfColBits;
75 | int numOfRowBits;
76 | int blackThreshold;
77 | int whiteThreshold;
78 |
79 | cv::vector camImgs;//用来存放条纹图像序列,不同编码方式通用
80 | cv::vector colorImgs;
81 |
82 | cv::Mat mask;//matrix with vals 0 and 1 , CV_8U , uchar
83 | cv::Mat color;
84 | cv::Mat decRows;
85 | cv::Mat decCols;
86 | bool pathSet;
87 | bool autoContrast_;
88 | bool raySampling_;
89 | bool haveColor;
90 | int cameraWidth;
91 | int cameraHeight;
92 |
93 | //access
94 | int Reconstruct::ac(int x,int y)
95 | {
96 | return x*scan_h + y;
97 | }
98 |
99 | int scan_w;
100 | int scan_h;
101 | };
102 |
103 | #endif // RECONSTRUCT_H
104 |
--------------------------------------------------------------------------------
/Duke/set.cpp:
--------------------------------------------------------------------------------
1 | #include "set.h"
2 | #include
3 | #include
4 | #include //对于connect函数是必要的,否则出现C2664类型转换错误
5 | #include
6 |
7 | #include "ui_Set.h"
8 |
9 |
10 | Set::Set(QMainWindow *parent) : QDialog(parent),
11 | set(new Ui::SetDialog)
12 | {
13 | set->setupUi(this);
14 | createConfigurationFile();
15 | connect(set->buttonBox->button(QDialogButtonBox::Ok), SIGNAL(clicked()), this, SLOT(createConfigurationFile()));
16 | connect(set->buttonBox->button(QDialogButtonBox::Ok), SIGNAL(clicked()), SIGNAL(outputSet()));
17 | connect(set->buttonBox->button(QDialogButtonBox::Ok),SIGNAL(clicked()), this, SLOT(hide()));
18 | connect(set->buttonBox->button(QDialogButtonBox::Apply), SIGNAL(clicked()), this, SLOT(createConfigurationFile()));
19 | connect(set->buttonBox->button(QDialogButtonBox::Apply), SIGNAL(clicked()), SIGNAL(outputSet()));
20 | connect(set->buttonBox->button(QDialogButtonBox::Cancel),SIGNAL(clicked()),this,SLOT(hide()));
21 | }
22 |
23 | void Set::test(bool flag)
24 | {
25 | if(flag == true)
26 | QMessageBox::information(NULL, tr("Test"), tr("Successed!"));
27 | else
28 | QMessageBox::warning(NULL, tr("Test"), tr("Failed!"));
29 | }
30 |
31 | void Set::createConfigurationFile()//如果是槽函数,那么void声明不可少
32 | {
33 | board_w = set->boardWidth->value();
34 | board_h = set->boardHeight->value();
35 | proj_w = set->projResH->value();
36 | proj_h = set->projResV->value();
37 | scan_w = set->scanResH->value();
38 | scan_h = set->scanResV->value();
39 | if (set->resMode0->isChecked()){
40 | cam_w = 1280;
41 | cam_h = 1024;
42 | }
43 | cell_w = set->cellWidth->value();
44 | cell_h = set->cellHeight->value();
45 | black_threshold = set->blackThresholdEdit->value();
46 | white_threshold = set->whiteThresholdEdit->value();
47 | if(set->autoContrastCheck->isChecked())
48 | autoContrast = true;
49 | else
50 | autoContrast = false;
51 | if(set->raySamplingCheck->isChecked())
52 | raySampling = true;
53 | else
54 | raySampling = false;
55 | if(set->exportObjCheck->isChecked())
56 | exportObj = 1;
57 | else
58 | exportObj = 0;
59 | if(set->exportPlyCheck->isChecked())
60 | exportPly = 1;
61 | else
62 | exportPly = 0;
63 | if (set->GrayOnly->isChecked())
64 | usedPattern = 0;
65 | else if (set->grayEpi->isChecked())
66 | usedPattern = 1;
67 | else
68 | usedPattern = 2;
69 | haveColor = (set->haveColorCheck->isChecked())?(true):(false);
70 | //createSetFile();
71 | }
72 |
73 | void Set::createSetFile()
74 | {
75 | int autoc, autocs, ray;
76 | autoc = boolToInt(autoContrast);
77 | ray = boolToInt(raySampling);
78 |
79 | const QString &fileName = saveSetPath +"/set.xml";
80 | QFile file(fileName);
81 | file.open(QIODevice::WriteOnly);
82 | QXmlStreamWriter xmlWriter(&file);
83 | xmlWriter.setAutoFormatting(true);
84 | xmlWriter.writeStartDocument();//写入
85 | xmlWriter.writeStartElement("Settings");
86 | xmlWriter.writeStartElement("ProjectorResolution");
87 | xmlWriter.writeTextElement("Width",QString::number(proj_w, 10));
88 | xmlWriter.writeTextElement("Height",QString::number(proj_h, 10));
89 | xmlWriter.writeEndElement();
90 | xmlWriter.writeStartElement("CalibrationBoard");
91 | xmlWriter.writeTextElement("BoardWidth",QString::number(board_w, 10));
92 | xmlWriter.writeTextElement("BoardHeight",QString::number(board_h, 10));
93 | xmlWriter.writeEndElement();
94 | xmlWriter.writeStartElement("ProjectorWindow");
95 | xmlWriter.writeStartElement("ScanResolution");
96 | xmlWriter.writeTextElement("Width",QString::number(scan_w, 10));
97 | xmlWriter.writeTextElement("Height",QString::number(scan_h, 10));
98 | xmlWriter.writeEndElement();//由于start两次所以end两次
99 | xmlWriter.writeEndElement();
100 | xmlWriter.writeStartElement("Reconstruction");
101 | xmlWriter.writeTextElement("AutoContrast",0);
102 | xmlWriter.writeTextElement("SaveAutoContrastImages",0);
103 | xmlWriter.writeTextElement("RaySampling",0);
104 | xmlWriter.writeTextElement("BlackThreshold",QString::number(black_threshold, 10));
105 | xmlWriter.writeTextElement("WhiteThreshold",QString::number(white_threshold, 10));
106 | xmlWriter.writeEndElement();
107 | xmlWriter.writeStartElement("Export");
108 | xmlWriter.writeTextElement("Obj",QString::number(exportObj, 10));
109 | xmlWriter.writeTextElement("Ply",QString::number(exportPly, 10));
110 | xmlWriter.writeEndElement();
111 | xmlWriter.writeEndDocument();//写入
112 | file.close();
113 | if(file.error()){
114 | test(false);
115 | }
116 | }
117 |
118 | int Set::boolToInt(bool input)
119 | {
120 | if(input)
121 | return 1;
122 | else
123 | return 0;
124 | }
125 |
126 | void Set::switchLang()
127 | {
128 | set->retranslateUi(this);
129 | }
130 |
--------------------------------------------------------------------------------
/Duke/set.h:
--------------------------------------------------------------------------------
1 | #ifndef SET_H
2 | #define SET_H
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | namespace Ui {
14 | class SetDialog;
15 | }
16 |
17 | class Set : public QDialog
18 | {
19 | Q_OBJECT
20 | public:
21 | Set(QMainWindow *parent = 0);
22 |
23 | QString saveSetPath;
24 | int proj_h;
25 | int proj_w;
26 | int scan_w;
27 | int scan_h;
28 | int cam_w;
29 | int cam_h;
30 | int cell_w;
31 | int cell_h;
32 |
33 | int black_threshold;
34 | int white_threshold;
35 | int board_w;
36 | int board_h;
37 | int projectorWinPos_x;
38 | int projectorWinPos_y;
39 | bool autoContrast;
40 | bool raySampling;
41 | bool haveColor;//是否使点云显示原色
42 | int exportObj;
43 | int exportPly;
44 | int usedPattern;
45 |
46 | void switchLang();
47 |
48 | private:
49 | Ui::SetDialog *set;
50 | int boolToInt(bool input);
51 |
52 | signals:
53 | void outputSet();
54 |
55 | private slots:
56 | void test(bool flag);
57 | void createConfigurationFile();
58 | void createSetFile();
59 | };
60 |
61 | #endif // SET_H
62 |
--------------------------------------------------------------------------------
/Duke/stereorect.cpp:
--------------------------------------------------------------------------------
1 | #include "stereorect.h"
2 |
3 | stereoRect::stereoRect(QString projectPath, cv::Size size)
4 | {
5 | ppath = projectPath;
6 | img_size = size;
7 | }
8 |
9 | void stereoRect::getParameters()
10 | {
11 | QString path;
12 | path = ppath + "/calib/left/cam_stereo.txt";
13 | loadMatrix(M1, 3, 3, path);
14 | path = ppath + "/calib/left/distortion_stereo.txt";
15 | loadMatrix(D1, 5, 1, path);
16 | path = ppath + "/calib/right/cam_stereo.txt";
17 | loadMatrix(M2, 3, 3, path);
18 | path = ppath + "/calib/right/distortion_stereo.txt";
19 | loadMatrix(D2, 5, 1, path);
20 | path = ppath + "/calib/R_stereo.txt";
21 | loadMatrix(R, 3, 3, path);
22 | path = ppath + "/calib/T_stereo.txt";
23 | loadMatrix(T, 3, 1, path);
24 | }
25 |
26 | void stereoRect::doStereoRectify(cv::Mat &img, bool isleft)
27 | {
28 | cv::Mat imgr;
29 | if (isleft)
30 | cv::remap(img, imgr, map11, map12, cv::INTER_LINEAR);
31 | else
32 | cv::remap(img, imgr, map21, map22, cv::INTER_LINEAR);
33 | img = imgr;
34 | }
35 |
36 | void stereoRect::calParameters()
37 | {
38 | ///该矫正函数在使用时注意两点:
39 | /// 1、flag不要设为CV_CALIB_ZERO_DISPARITY,而是设为0
40 | /// 2、所有输入矩阵都采用CV_64F格式,否则出现类型不匹配错误
41 | cv::stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, 0, -1);
42 | cv::initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
43 | cv::initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
44 | }
45 |
46 | void stereoRect::loadMatrix(cv::Mat &matrix, int rows, int cols, QString file)
47 | {
48 | std:: ifstream in1;
49 | in1.open(file.toStdString());
50 | if(!in1)
51 | return;
52 | if(!matrix.empty())
53 | matrix.release();
54 | matrix = cv::Mat(rows, cols, CV_64F);
55 | for(int i = 0; i < rows; i++){
56 | for(int j = 0; j < cols; j++){
57 | float val;
58 | in1>>val;
59 | Utilities::matSet2D(matrix, i, j, val);
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/Duke/stereorect.h:
--------------------------------------------------------------------------------
1 | #ifndef STEREORECT_H
2 | #define STEREORECT_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | #include "utilities.h"
12 |
13 | class stereoRect : public QObject
14 | {
15 | Q_OBJECT
16 | public:
17 | stereoRect(QString projectPath, cv::Size size);
18 | void doStereoRectify(cv::Mat &img, bool isleft);
19 | void getParameters();
20 | void calParameters();
21 | cv::Mat R1, P1, R2, P2, Q;//输出参数
22 |
23 | private:
24 | QString ppath;
25 | cv::Size img_size;
26 | cv::Mat M1, D1, M2, D2, R, T;//输入参数
27 | cv::Mat map11, map12, map21, map22;//中间参数
28 | void loadMatrix(cv::Mat &matrix, int rows, int cols, QString file);
29 | };
30 | #endif // STEREORECT_H
31 |
--------------------------------------------------------------------------------
/Duke/utilities.cpp:
--------------------------------------------------------------------------------
1 | #include "utilities.h"
2 |
3 | Utilities::Utilities(void)
4 | {
5 | }
6 |
7 | Utilities::~Utilities(void)
8 | {
9 | }
10 |
11 | bool Utilities::XOR(bool val1, bool val2)
12 | {
13 | if(val1==val2)
14 | return 0;
15 | else
16 | return 1;
17 | }
18 |
19 | void Utilities::normalize(cv::Vec3f &vec)
20 | {
21 | double mag = sqrt( vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2]);
22 |
23 | vec[0] /= (float) max(0.000001, mag);
24 | vec[1] /= (float) max(0.000001, mag);
25 | vec[2] /= (float) max(0.000001, mag);
26 |
27 | return;
28 | }
29 |
30 | void Utilities::normalize3dtable(double vec[3])
31 | {
32 | double mag = sqrt( vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2]);
33 |
34 | vec[0] /= max(0.000001, mag);
35 | vec[1] /= max(0.000001, mag);
36 | vec[2] /= max(0.000001, mag);
37 | }
38 |
39 | //convert image pixel to image 3d space point
40 | void Utilities::pixelToImageSpace(double p[3], CvScalar fc, CvScalar cc)
41 | {
42 | p[0]=(p[0]-cc.val[0])/fc.val[0];
43 | p[1]=(p[1]-cc.val[1])/fc.val[1];
44 | p[2]=1;
45 | }
46 |
47 | cv::Point3f Utilities::pixelToImageSpace(cv::Point2f p, VirtualCamera cam)
48 | {
49 | cv::Point3f point;
50 |
51 | point.x = (p.x-cam.cc.x) / cam.fc.x;
52 | point.y = (p.y-cam.cc.y) / cam.fc.y;
53 | point.z = 1;
54 |
55 | return point;
56 | }
57 |
58 | cv::Point2f Utilities::undistortPoints(cv::Point2f p, VirtualCamera cam)
59 | {
60 | double k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
61 | int iters = 1;
62 | k[0] = cam.distortion.at(0);
63 | k[1] = cam.distortion.at(1);
64 | k[2] = cam.distortion.at(2);
65 | k[3] = cam.distortion.at(3);
66 | k[4]=0;
67 | iters = 5;
68 | fx = cam.fc.x;
69 | fy = cam.fc.y;
70 | ifx = 1./fx;
71 | ify = 1./fy;
72 | cx = cam.cc.x;
73 | cy = cam.cc.y;
74 |
75 | double x, y, x0, y0;
76 |
77 | x=p.x;
78 | y=p.y;
79 |
80 | x0 = x = (x - cx)*ifx;
81 | y0 = y = (y - cy)*ify;
82 |
83 | for(int jj = 0; jj < iters; jj++ )
84 | {
85 | double r2 = x*x + y*y;
86 | double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
87 | double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
88 | double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
89 | x = (x0 - deltaX)*icdist;
90 | y = (y0 - deltaY)*icdist;
91 | }
92 |
93 | return cv::Point2f((float)(x*fx)+cx,(float)(y*fy)+cy);
94 | }
95 |
96 | //calculate the intersection point of a ray and a plane, given the normal and a point of the plane, and a point and the vector of the ray
97 | CvScalar Utilities::planeRayInter(CvScalar planeNormal,CvScalar planePoint, CvScalar rayVector, CvScalar rayPoint )
98 | {
99 | double l;
100 | CvScalar point;
101 |
102 | CvScalar pSub;
103 |
104 | pSub.val[0] = - rayPoint.val[0] + planePoint.val[0];
105 | pSub.val[1] = - rayPoint.val[1] + planePoint.val[1];
106 | pSub.val[2] = - rayPoint.val[2] + planePoint.val[2];
107 |
108 | double dotProd1 = pSub.val[0] * planeNormal.val[0] + pSub.val[1] * planeNormal.val[1] + pSub.val[2] * planeNormal.val[2];
109 | double dotProd2 = rayVector.val[0] * planeNormal.val[0] + rayVector.val[1] * planeNormal.val[1] + rayVector.val[2] * planeNormal.val[2];
110 |
111 | if(fabs(dotProd2)<0.00001)
112 | {
113 | point.val[0]=0;
114 | point.val[1]=0;
115 | point.val[2]=0;
116 | return point;
117 | }
118 | l = dotProd1 / dotProd2;
119 | point.val[0] = rayPoint.val[0] + l * rayVector.val[0];
120 | point.val[1] = rayPoint.val[1] + l * rayVector.val[1];
121 | point.val[2] = rayPoint.val[2] + l * rayVector.val[2];
122 | return point;
123 | }
124 |
125 | double Utilities::matGet2D(cv::Mat m, int row, int col)//输入量先行后列,即先y后x,先h后w
126 | {
127 | int type = m.type();
128 | switch(type)
129 | {
130 | case CV_8U:
131 | return m.at(row,col);//opencv中的at函数是先行后列,即按照矩阵的一般顺序取值的
132 | break;
133 | case CV_8S:
134 | return m.at(row,col);
135 | break;
136 | case CV_16U:
137 | return m.at(row,col);
138 | break;
139 | case CV_16S:
140 | return m.at(row,col);
141 | break;
142 | case CV_32S:
143 | return m.at(row,col);
144 | break;
145 | case CV_32F:
146 | return m.at(row,col);
147 | break;
148 | case CV_64F:
149 | return m.at(row,col);
150 | break;
151 | }
152 | }
153 |
154 | double Utilities::matGet3D(cv::Mat m, int x, int y, int i)
155 | {
156 | int type = m.type();
157 | switch(type)
158 | {
159 | case CV_8U:
160 | case CV_MAKETYPE(CV_8U,3):
161 | return m.at(y,x,i);
162 | break;
163 | case CV_8S:
164 | case CV_MAKETYPE(CV_8S,3):
165 | return m.at(y,x,i);
166 | break;
167 | case CV_16U:
168 | case CV_MAKETYPE(CV_16U,3):
169 | return m.at(y,x,i);
170 | break;
171 | case CV_16S:
172 | case CV_MAKETYPE(CV_16S,3):
173 | return m.at(y,x,i);
174 | break;
175 | case CV_32S:
176 | case CV_MAKETYPE(CV_32S,3):
177 | return m.at(y,x,i);
178 | break;
179 | case CV_32F:
180 | case CV_MAKETYPE(CV_32F,3):
181 | return m.at(y,x,i);
182 | break;
183 | case CV_64F:
184 | case CV_MAKETYPE(CV_64F,3):
185 | return m.at(y,x,i);
186 | break;
187 | }
188 | }
189 |
190 | cv::Vec3d Utilities::matGet3D(cv::Mat m, int x, int y)
191 | {
192 | int type = m.type();
193 | switch(type)
194 | {
195 | case CV_8U:
196 | case CV_MAKETYPE(CV_8U,3):
197 | return m.at(y,x);
198 | break;
199 | case CV_8S:
200 | case CV_MAKETYPE(CV_8S,3):
201 | return m.at(y,x);
202 | break;
203 | case CV_16U:
204 | case CV_MAKETYPE(CV_16U,3):
205 | return m.at(y,x);
206 | break;
207 | case CV_16S:
208 | case CV_MAKETYPE(CV_16S,3):
209 | return m.at(y,x);
210 | break;
211 | case CV_32S:
212 | case CV_MAKETYPE(CV_32S,3):
213 | return m.at