├── .gitignore
├── Duke
├── Duke.pro
├── Duke.pro.user
├── Resource
│ ├── 1.png
│ ├── 10.png
│ ├── 11.png
│ ├── 12.png
│ ├── 13.png
│ ├── 14.png
│ ├── 2.png
│ ├── 3.png
│ ├── 4.png
│ ├── 5.png
│ ├── 6.png
│ ├── 7.png
│ ├── 8.png
│ ├── 9.png
│ ├── cal.png
│ ├── calib.png
│ ├── camera.png
│ ├── china.png
│ ├── close.png
│ ├── en.qm
│ ├── english.png
│ ├── new.png
│ ├── open.png
│ ├── projector.png
│ ├── projoff.png
│ ├── projon.png
│ ├── reconstruct.png
│ ├── res.qrc
│ ├── save.png
│ ├── scan.png
│ ├── set.png
│ ├── splash.png
│ ├── switch.png
│ └── zh.qm
├── Resourec.qrc
├── Set.ui
├── baslercamera.cpp
├── baslercamera.h
├── blobdetector.cpp
├── blobdetector.h
├── camera.cpp
├── camera.h
├── cameracalibration.cpp
├── cameracalibration.h
├── dahengcamera.cpp
├── dahengcamera.h
├── deployment.pri
├── dotmatch.cpp
├── dotmatch.h
├── en.ts
├── focusassistant.cpp
├── focusassistant.h
├── focusassistant.ui
├── glwidget.cpp
├── glwidget.h
├── graycodes.cpp
├── graycodes.h
├── imageviewer.cpp
├── imageviewer.h
├── imageviewer.ui
├── main.cpp
├── mainwindow.cpp
├── mainwindow.h
├── mainwindow.ui
├── manualmatch.cpp
├── manualmatch.h
├── manualmatch.ui
├── meshcreator.cpp
├── meshcreator.h
├── multifrequency.cpp
├── multifrequency.h
├── plyloader.cpp
├── plyloader.h
├── pointcloudimage.cpp
├── pointcloudimage.h
├── projector.cpp
├── projector.h
├── reconstruct.cpp
├── reconstruct.h
├── set.cpp
├── set.h
├── stereorect.cpp
├── stereorect.h
├── utilities.cpp
├── utilities.h
├── virtualcamera.cpp
├── virtualcamera.h
└── zh.ts
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | Duke/Duke.pro.user.cf61093
2 | Duke/Duke.pro.user.4967388
3 | Duke/Duke.pro.user
4 | Duke/Duke.pro.user
5 | *.user
6 | *.user
7 | Duke/Duke.pro.user
--------------------------------------------------------------------------------
/Duke/Duke.pro:
--------------------------------------------------------------------------------
1 | TEMPLATE = app
2 |
3 | TARGET = Duke
4 |
5 | QT += quick widgets opengl
6 |
7 | SOURCES += \
8 | graycodes.cpp \
9 | main.cpp \
10 | mainwindow.cpp \
11 | meshcreator.cpp \
12 | pointcloudimage.cpp \
13 | projector.cpp \
14 | reconstruct.cpp \
15 | set.cpp \
16 | utilities.cpp \
17 | virtualcamera.cpp \
18 | plyloader.cpp \
19 | glwidget.cpp \
20 | cameracalibration.cpp \
21 | dotmatch.cpp \
22 | multifrequency.cpp \
23 | blobdetector.cpp \
24 | dahengcamera.cpp \
25 | baslercamera.cpp \
26 | focusassistant.cpp \
27 | manualmatch.cpp \
28 | imageviewer.cpp \
29 | stereorect.cpp
30 |
31 | RESOURCES += \
32 | Resource/res.qrc
33 |
34 | INCLUDEPATH += E:\opencv\build\include\
35 | D:\VC\inc\
36 | D:\mrpt\libs\base\include\
37 | D:\mrpt\libs\scanmatching\include\
38 | D:\mrpt\include\mrpt\mrpt-config\
39 | D:\glm\
40 | E:\freeglut-2.8.1\include\
41 | D:\genicam\library\cpp\include\
42 | D:\pylon\include\
43 |
44 | LIBS += -LD:\mrpt\lib\
45 | -LE:\freeglut-2.8.1\lib\x86\Debug\
46 | -LE:\opencv\build\x86\vc10\lib\
47 | -LD:\VC\lib\
48 | -LD:\genicam\library\cpp\lib\win32_i86\
49 | -LD:\pylon\lib\Win32\
50 | -lopencv_core249d\
51 | -lopencv_highgui249d\
52 | -lopencv_imgproc249d\
53 | -lopencv_features2d249d\
54 | -lopencv_calib3d249d\
55 | -lopencv_nonfree249d\
56 | -lopencv_flann249d\
57 | -lHVDAILT\
58 | -lHVExtend\
59 | -lHVUtil\
60 | -lRaw2Rgb\
61 | -llibmrpt-base122-dbg\
62 | -llibmrpt-scanmatching122-dbg\
63 | -lfreeglut\
64 | -lGCBase_MD_VC100_v2_3\
65 | -lPylonBase_MD_VC100\
66 | -lPylonBootstrapper\
67 | -lPylonGigE_MD_VC100_TL\
68 | -lPylonUsb_MD_VC100_TL\
69 | -lPylonUtility_MD_VC100\
70 |
71 |
72 | # Default rules for deployment.
73 | include(deployment.pri)
74 |
75 | HEADERS += \
76 | graycodes.h \
77 | mainwindow.h \
78 | meshcreator.h \
79 | pointcloudimage.h \
80 | projector.h \
81 | reconstruct.h \
82 | set.h \
83 | utilities.h \
84 | virtualcamera.h \
85 | plyloader.h \
86 | glwidget.h \
87 | cameracalibration.h \
88 | dotmatch.h \
89 | multifrequency.h \
90 | blobdetector.h \
91 | dahengcamera.h \
92 | baslercamera.h \
93 | focusassistant.h \
94 | manualmatch.h \
95 | imageviewer.h \
96 | stereorect.h
97 |
98 | FORMS += \
99 | mainwindow.ui \
100 | Set.ui \
101 | focusassistant.ui \
102 | manualmatch.ui \
103 | imageviewer.ui
104 |
105 | TRANSLATIONS += en.ts zh.ts
106 |
107 |
--------------------------------------------------------------------------------
/Duke/Duke.pro.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | EnvironmentId
7 | {cf610932-2678-494f-bc46-28d2f9b1f964}
8 |
9 |
10 | ProjectExplorer.Project.ActiveTarget
11 | 0
12 |
13 |
14 | ProjectExplorer.Project.EditorSettings
15 |
16 | true
17 | false
18 | true
19 |
20 | Cpp
21 |
22 | CppGlobal
23 |
24 |
25 |
26 | QmlJS
27 |
28 | QmlJSGlobal
29 |
30 |
31 | 2
32 | UTF-8
33 | false
34 | 4
35 | false
36 | 80
37 | true
38 | true
39 | 1
40 | true
41 | false
42 | 0
43 | true
44 | 0
45 | 8
46 | true
47 | 1
48 | true
49 | true
50 | true
51 | false
52 |
53 |
54 |
55 | ProjectExplorer.Project.PluginSettings
56 |
57 |
58 |
59 | ProjectExplorer.Project.Target.0
60 |
61 | Desktop Qt 5.3 MSVC2010 OpenGL 32bit
62 | Desktop Qt 5.3 MSVC2010 OpenGL 32bit
63 | qt.53.win32_msvc2010_opengl_kit
64 | 0
65 | 0
66 | 0
67 |
68 | D:/Structure-Light-Reconstructor/build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Debug
69 |
70 |
71 | true
72 | qmake
73 |
74 | QtProjectManager.QMakeBuildStep
75 | false
76 | true
77 |
78 | false
79 |
80 |
81 | true
82 | Make
83 |
84 | Qt4ProjectManager.MakeStep
85 |
86 | false
87 |
88 |
89 |
90 | 2
91 | 构建
92 |
93 | ProjectExplorer.BuildSteps.Build
94 |
95 |
96 |
97 | true
98 | Make
99 |
100 | Qt4ProjectManager.MakeStep
101 |
102 | true
103 | clean
104 |
105 |
106 | 1
107 | 清理
108 |
109 | ProjectExplorer.BuildSteps.Clean
110 |
111 | 2
112 | false
113 |
114 | Debug
115 |
116 | Qt4ProjectManager.Qt4BuildConfiguration
117 | 2
118 | true
119 |
120 |
121 | D:/Duke/build-Duke-Desktop_Qt_5_3_MSVC2010_OpenGL_32bit-Release
122 |
123 |
124 | true
125 | qmake
126 |
127 | QtProjectManager.QMakeBuildStep
128 | false
129 | true
130 |
131 | false
132 |
133 |
134 | true
135 | Make
136 |
137 | Qt4ProjectManager.MakeStep
138 |
139 | false
140 |
141 |
142 |
143 | 2
144 | 构建
145 |
146 | ProjectExplorer.BuildSteps.Build
147 |
148 |
149 |
150 | true
151 | Make
152 |
153 | Qt4ProjectManager.MakeStep
154 |
155 | true
156 | clean
157 |
158 |
159 | 1
160 | 清理
161 |
162 | ProjectExplorer.BuildSteps.Clean
163 |
164 | 2
165 | false
166 |
167 | Release
168 |
169 | Qt4ProjectManager.Qt4BuildConfiguration
170 | 0
171 | true
172 |
173 | 2
174 |
175 |
176 | 0
177 | 部署
178 |
179 | ProjectExplorer.BuildSteps.Deploy
180 |
181 | 1
182 | 在本地部署
183 |
184 | ProjectExplorer.DefaultDeployConfiguration
185 |
186 | 1
187 |
188 |
189 |
190 | false
191 | false
192 | false
193 | false
194 | true
195 | 0.01
196 | 10
197 | true
198 | 1
199 | 25
200 |
201 | 1
202 | true
203 | false
204 | true
205 | valgrind
206 |
207 | 0
208 | 1
209 | 2
210 | 3
211 | 4
212 | 5
213 | 6
214 | 7
215 | 8
216 | 9
217 | 10
218 | 11
219 | 12
220 | 13
221 | 14
222 |
223 | 2
224 |
225 | Duke
226 | Duke2
227 | Qt4ProjectManager.Qt4RunConfiguration:D:/Structure-Light-Reconstructor/Duke/Duke.pro
228 |
229 | Duke.pro
230 | false
231 | false
232 |
233 | 3768
234 | false
235 | true
236 | false
237 | false
238 | true
239 |
240 | 1
241 |
242 |
243 |
244 | ProjectExplorer.Project.TargetCount
245 | 1
246 |
247 |
248 | ProjectExplorer.Project.Updater.FileVersion
249 | 16
250 |
251 |
252 | Version
253 | 16
254 |
255 |
256 |
--------------------------------------------------------------------------------
/Duke/Resource/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/1.png
--------------------------------------------------------------------------------
/Duke/Resource/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/10.png
--------------------------------------------------------------------------------
/Duke/Resource/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/11.png
--------------------------------------------------------------------------------
/Duke/Resource/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/12.png
--------------------------------------------------------------------------------
/Duke/Resource/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/13.png
--------------------------------------------------------------------------------
/Duke/Resource/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/14.png
--------------------------------------------------------------------------------
/Duke/Resource/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/2.png
--------------------------------------------------------------------------------
/Duke/Resource/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/3.png
--------------------------------------------------------------------------------
/Duke/Resource/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/4.png
--------------------------------------------------------------------------------
/Duke/Resource/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/5.png
--------------------------------------------------------------------------------
/Duke/Resource/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/6.png
--------------------------------------------------------------------------------
/Duke/Resource/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/7.png
--------------------------------------------------------------------------------
/Duke/Resource/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/8.png
--------------------------------------------------------------------------------
/Duke/Resource/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/9.png
--------------------------------------------------------------------------------
/Duke/Resource/cal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/cal.png
--------------------------------------------------------------------------------
/Duke/Resource/calib.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/calib.png
--------------------------------------------------------------------------------
/Duke/Resource/camera.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/camera.png
--------------------------------------------------------------------------------
/Duke/Resource/china.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/china.png
--------------------------------------------------------------------------------
/Duke/Resource/close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/close.png
--------------------------------------------------------------------------------
/Duke/Resource/en.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/en.qm
--------------------------------------------------------------------------------
/Duke/Resource/english.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/english.png
--------------------------------------------------------------------------------
/Duke/Resource/new.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/new.png
--------------------------------------------------------------------------------
/Duke/Resource/open.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/open.png
--------------------------------------------------------------------------------
/Duke/Resource/projector.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/projector.png
--------------------------------------------------------------------------------
/Duke/Resource/projoff.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/projoff.png
--------------------------------------------------------------------------------
/Duke/Resource/projon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/projon.png
--------------------------------------------------------------------------------
/Duke/Resource/reconstruct.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/reconstruct.png
--------------------------------------------------------------------------------
/Duke/Resource/res.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | calib.png
4 | close.png
5 | new.png
6 | open.png
7 | save.png
8 | set.png
9 | splash.png
10 | camera.png
11 | projector.png
12 | cal.png
13 | scan.png
14 | reconstruct.png
15 | 1.png
16 | 2.png
17 | 3.png
18 | 4.png
19 | 5.png
20 | 6.png
21 | 7.png
22 | 8.png
23 | 9.png
24 | 10.png
25 | 11.png
26 | 12.png
27 | china.png
28 | english.png
29 | projoff.png
30 | projon.png
31 | en.qm
32 | zh.qm
33 | 13.png
34 | 14.png
35 | switch.png
36 |
37 |
38 |
--------------------------------------------------------------------------------
/Duke/Resource/save.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/save.png
--------------------------------------------------------------------------------
/Duke/Resource/scan.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/scan.png
--------------------------------------------------------------------------------
/Duke/Resource/set.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/set.png
--------------------------------------------------------------------------------
/Duke/Resource/splash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/splash.png
--------------------------------------------------------------------------------
/Duke/Resource/switch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/switch.png
--------------------------------------------------------------------------------
/Duke/Resource/zh.qm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NEU-TEAM/Structure-Light-Reconstructor/da7e7d9776e0294432f53dbecef0cbd29f273f22/Duke/Resource/zh.qm
--------------------------------------------------------------------------------
/Duke/Resourec.qrc:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Duke/Set.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | SetDialog
4 |
5 |
6 |
7 | 0
8 | 0
9 | 455
10 | 356
11 |
12 |
13 |
14 | Settings
15 |
16 |
17 | font: 9pt "Calibri";
18 |
19 |
20 | -
21 |
22 |
-
23 |
24 |
25 | Qt::Horizontal
26 |
27 |
28 |
29 | 40
30 | 20
31 |
32 |
33 |
34 |
35 | -
36 |
37 |
38 |
39 | 0
40 | 0
41 |
42 |
43 |
44 | false
45 |
46 |
47 |
48 |
49 |
50 | Qt::Horizontal
51 |
52 |
53 | QDialogButtonBox::Apply|QDialogButtonBox::Cancel|QDialogButtonBox::Ok
54 |
55 |
56 |
57 |
58 |
59 | -
60 |
61 |
62 | 3
63 |
64 |
65 |
66 | Calibration Board
67 |
68 |
69 |
70 |
71 | 10
72 | 10
73 | 411
74 | 161
75 |
76 |
77 |
78 |
79 | 0
80 | 0
81 |
82 |
83 |
84 | Geometry
85 |
86 |
87 |
88 | 10
89 |
90 |
-
91 |
92 |
-
93 |
94 |
95 | Cell Width(mm):
96 |
97 |
98 |
99 | -
100 |
101 |
102 | 20
103 |
104 |
105 |
106 | -
107 |
108 |
109 | Cell Height(mm):
110 |
111 |
112 |
113 | -
114 |
115 |
116 | 20
117 |
118 |
119 |
120 |
121 |
122 | -
123 |
124 |
-
125 |
126 |
127 | Cell Number(Horizontal):
128 |
129 |
130 |
131 | -
132 |
133 |
134 | 10
135 |
136 |
137 |
138 | -
139 |
140 |
141 | Cell Number(Vertical):
142 |
143 |
144 | 0
145 |
146 |
147 |
148 | -
149 |
150 |
151 | 8
152 |
153 |
154 |
155 |
156 |
157 | -
158 |
159 |
160 | Qt::Horizontal
161 |
162 |
163 |
164 | 6
165 | 20
166 |
167 |
168 |
169 |
170 | -
171 |
172 |
-
173 |
174 |
175 | Board Width(mm):
176 |
177 |
178 |
179 | -
180 |
181 |
182 |
183 | 0
184 | 0
185 |
186 |
187 |
188 | 1000
189 |
190 |
191 | 200
192 |
193 |
194 |
195 | -
196 |
197 |
198 | Board Height(mm):
199 |
200 |
201 |
202 | -
203 |
204 |
205 |
206 | 0
207 | 0
208 |
209 |
210 |
211 | 1000
212 |
213 |
214 | 200
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 | Camera
226 |
227 |
228 |
229 |
230 | 10
231 | 9
232 | 411
233 | 121
234 |
235 |
236 |
237 |
238 | 0
239 | 0
240 |
241 |
242 |
243 | Camera Resolution
244 |
245 |
246 |
247 |
248 | 20
249 | 40
250 | 89
251 | 16
252 |
253 |
254 |
255 | 1280X1024
256 |
257 |
258 | true
259 |
260 |
261 |
262 |
263 |
264 |
265 | Projector
266 |
267 |
268 |
269 |
270 | 9
271 | 9
272 | 413
273 | 111
274 |
275 |
276 |
277 | Project Region
278 |
279 |
280 |
281 |
282 | 20
283 | 20
284 | 176
285 | 80
286 |
287 |
288 |
289 |
290 | 0
291 | 0
292 |
293 |
294 |
295 | Projector Resolution
296 |
297 |
298 | -
299 |
300 |
-
301 |
302 |
303 | Horizontal(pixel):
304 |
305 |
306 |
307 | -
308 |
309 |
310 | 1280
311 |
312 |
313 | 1280
314 |
315 |
316 |
317 | -
318 |
319 |
320 | Vertical(pixel):
321 |
322 |
323 |
324 | -
325 |
326 |
327 | 1024
328 |
329 |
330 | 1024
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 | 210
342 | 20
343 | 176
344 | 80
345 |
346 |
347 |
348 |
349 | 0
350 | 0
351 |
352 |
353 |
354 | Scan Resolution
355 |
356 |
357 | -
358 |
359 |
-
360 |
361 |
362 | Horizontal(pixel):
363 |
364 |
365 |
366 | -
367 |
368 |
369 | 1280
370 |
371 |
372 | 1280
373 |
374 |
375 |
376 | -
377 |
378 |
379 | Vertical(pixel):
380 |
381 |
382 |
383 | -
384 |
385 |
386 | 1024
387 |
388 |
389 | 1024
390 |
391 |
392 |
393 |
394 |
395 |
396 |
397 |
398 |
399 |
400 |
401 | Reconstruction
402 |
403 |
404 | -
405 |
406 |
407 |
408 | 0
409 | 0
410 |
411 |
412 |
413 | Threshold
414 |
415 |
416 |
-
417 |
418 |
419 | 10
420 |
421 |
-
422 |
423 |
424 | Black Threshold
425 |
426 |
427 |
428 | -
429 |
430 |
431 | 40
432 |
433 |
434 |
435 | -
436 |
437 |
438 | White Threshold
439 |
440 |
441 |
442 | -
443 |
444 |
445 |
446 |
447 | -
448 |
449 |
450 | Qt::Horizontal
451 |
452 |
453 |
454 | 244
455 | 20
456 |
457 |
458 |
459 |
460 |
461 |
462 |
463 | -
464 |
465 |
466 |
467 | 0
468 | 0
469 |
470 |
471 |
472 |
473 |
474 |
475 |
-
476 |
477 |
478 | Code Pattern
479 |
480 |
481 |
-
482 |
483 |
484 | Use Gray
485 |
486 |
487 | false
488 |
489 |
490 |
491 | -
492 |
493 |
494 | Use Gray with epipolar rectify
495 |
496 |
497 | true
498 |
499 |
500 |
501 | -
502 |
503 |
504 | Use MultiFrequency
505 |
506 |
507 |
508 |
509 |
510 |
511 | -
512 |
513 |
514 | 10
515 |
516 |
-
517 |
518 |
519 | Auto Contrast
520 |
521 |
522 |
523 | -
524 |
525 |
526 | Ray Sampling
527 |
528 |
529 |
530 | -
531 |
532 |
533 | Export Obj
534 |
535 |
536 | false
537 |
538 |
539 | false
540 |
541 |
542 |
543 | -
544 |
545 |
546 | Export Ply
547 |
548 |
549 | true
550 |
551 |
552 |
553 |
554 |
555 | -
556 |
557 |
558 | Qt::Horizontal
559 |
560 |
561 |
562 | 221
563 | 20
564 |
565 |
566 |
567 |
568 |
569 |
570 |
571 |
572 |
573 |
574 |
575 |
576 |
577 |
578 |
579 |
580 | buttonBox
581 | accepted()
582 | SetDialog
583 | accept()
584 |
585 |
586 | 248
587 | 254
588 |
589 |
590 | 157
591 | 274
592 |
593 |
594 |
595 |
596 | buttonBox
597 | rejected()
598 | SetDialog
599 | reject()
600 |
601 |
602 | 316
603 | 260
604 |
605 |
606 | 286
607 | 274
608 |
609 |
610 |
611 |
612 |
613 |
--------------------------------------------------------------------------------
/Duke/baslercamera.cpp:
--------------------------------------------------------------------------------
1 | #include "baslercamera.h"
2 | #include
3 |
4 | BaslerCamera::BaslerCamera(QObject *parent) :
5 | QObject(parent)
6 | {
7 | // Automagically call PylonInitialize and PylonTerminate to ensure the pylon runtime system.
8 | // is initialized during the lifetime of this object
9 | Pylon::PylonAutoInitTerm autoInitTerm;
10 |
11 | }
12 |
13 | void BaslerCamera::openCamera()
14 | {
15 | CTlFactory& tlFactory = CTlFactory::GetInstance();
16 |
17 | // Get all attached devices and exit application if no device is found.
18 | DeviceInfoList_t devices;
19 | if ( tlFactory.EnumerateDevices(devices) == 0 )
20 | {
21 | QMessageBox::warning(NULL,tr("Basler Camera"),tr("Basler cameras were not found."));
22 | }
23 |
24 | // Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.
25 | cameras.Initialize(min( devices.size(), c_maxCamerasToUse));
26 |
27 | // Create and attach all Pylon Devices.
28 | for ( size_t i = 0; i < cameras.GetSize(); ++i)
29 | {
30 | cameras[i].Attach( tlFactory.CreateDevice( devices[i]));
31 | }
32 |
33 | // Starts grabbing for all cameras starting with index 0. The grabbing
34 | // is started for one camera after the other. That's why the images of all
35 | // cameras are not taken at the same time.
36 | // However, a hardware trigger setup can be used to cause all cameras to grab images synchronously.
37 | // According to their default configuration, the cameras are
38 | // set up for free-running continuous acquisition.
39 | cameras.StartGrabbing();
40 |
41 | for( int i = 0; i < cameras.IsGrabbing(); ++i)
42 | {
43 | cameras.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException);
44 |
45 | // When the cameras in the array are created the camera context value
46 | // is set to the index of the camera in the array.
47 | // The camera context is a user settable value.
48 | // This value is attached to each grab result and can be used
49 | // to determine the camera that produced the grab result.
50 | // Now, the image data can be processed.
51 | pImageBuffer = (uint8_t *) ptrGrabResult->GetBuffer();
52 | }
53 | }
54 |
55 | void BaslerCamera::closeCamera()
56 | {
57 | cameras.StopGrabbing();
58 | cameras.Close();
59 | }
60 |
--------------------------------------------------------------------------------
/Duke/baslercamera.h:
--------------------------------------------------------------------------------
1 | #ifndef BASLERCAMERA_H
2 | #define BASLERCAMERA_H
3 |
4 | //Qt
5 | #include
6 | //Pylon
7 | #include
8 |
9 | // Namespace for using pylon objects.
10 | using namespace Pylon;
11 |
12 | // Namespace for using cout.
13 | using namespace std;
14 |
15 | // Number of images to be grabbed.
16 | static const uint32_t c_countOfImagesToGrab = 10;
17 |
18 | static const size_t c_maxCamerasToUse = 2;
19 |
20 |
21 | class BaslerCamera : public QObject
22 | {
23 | Q_OBJECT
24 | public:
25 | BaslerCamera(QObject *parent = 0);
26 |
27 | const uint8_t *pImageBuffer;
28 | // This smart pointer will receive the grab result data.
29 | CGrabResultPtr ptrGrabResult;
30 |
31 | void openCamera();
32 | void closeCamera();
33 |
34 | private:
35 | CInstantCameraArray cameras;
36 |
37 | };
38 |
39 | #endif // BASLERCAMERA_H
40 |
--------------------------------------------------------------------------------
/Duke/blobdetector.cpp:
--------------------------------------------------------------------------------
1 | #include "blobdetector.h"
2 |
3 | int thresholdStep = 10;
4 | int minThreshold = 50;
5 | int maxThreshold = 220;
6 | int minRepeatability = 2;
7 | int minDistBetweenBlobs = 10;
8 |
9 | bool filterByColor = true;
10 | int blobColor = 0;
11 |
12 | bool filterByArea = true;
13 | int minArea = 25;
14 | int maxArea = 5000;
15 |
16 | bool filterByCircularity = false;
17 | float minCircularity = 0.8f;
18 | float maxCircularity = std::numeric_limits::max();
19 |
20 | bool filterByInertia = true;
21 | //minInertiaRatio = 0.6;
22 | float minInertiaRatio = 0.1f;
23 | float maxInertiaRatio = std::numeric_limits::max();
24 |
25 | bool filterByConvexity = true;
26 | //minConvexity = 0.8;
27 | float minConvexity = 0.95f;
28 | float maxConvexity = std::numeric_limits::max();
29 |
30 | BlobDetector::BlobDetector()
31 | {
32 | }
33 |
34 | void BlobDetector::findBlobs(const cv::Mat &binaryImage, vector ¢ers) const
35 | {
36 | centers.clear();
37 |
38 | vector < vector > contours;
39 | Mat tmpBinaryImage = binaryImage.clone();
40 | findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
41 |
42 | for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
43 | {
44 | Point2d center;
45 | Moments moms = moments(Mat(contours[contourIdx]));
46 | if (filterByArea)
47 | {
48 | double area = moms.m00;
49 | if (area < minArea || area >= maxArea)
50 | continue;
51 | }
52 |
53 | if (filterByCircularity)
54 | {
55 | double area = moms.m00;
56 | double perimeter = arcLength(Mat(contours[contourIdx]), true);
57 | double ratio = 4 * CV_PI * area / (perimeter * perimeter);
58 | if (ratio < minCircularity || ratio >= maxCircularity)
59 | continue;
60 | }
61 |
62 | if (filterByInertia)
63 | {
64 | double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
65 | const double eps = 1e-2;
66 | double ratio;
67 | if (denominator > eps)
68 | {
69 | double cosmin = (moms.mu20 - moms.mu02) / denominator;
70 | double sinmin = 2 * moms.mu11 / denominator;
71 | double cosmax = -cosmin;
72 | double sinmax = -sinmin;
73 |
74 | double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
75 | double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
76 | ratio = imin / imax;
77 | }
78 | else
79 | {
80 | ratio = 1;
81 | }
82 |
83 | if (ratio < minInertiaRatio || ratio >= maxInertiaRatio)
84 | continue;
85 | }
86 |
87 | if (filterByConvexity)
88 | {
89 | vector < Point > hull;
90 | convexHull(Mat(contours[contourIdx]), hull);
91 | double area = contourArea(Mat(contours[contourIdx]));
92 | double hullArea = contourArea(Mat(hull));
93 | double ratio = area / hullArea;
94 | if (ratio < minConvexity || ratio >= maxConvexity)
95 | continue;
96 | }
97 |
98 | center = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
99 |
100 | if (filterByColor)
101 | {
102 | if (binaryImage.at (cvRound(center.y), cvRound(center.x)) != blobColor)
103 | continue;
104 | }
105 |
106 | centers.push_back(center);
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/Duke/blobdetector.h:
--------------------------------------------------------------------------------
1 | #ifndef BLOBDETECTOR_H
2 | #define BLOBDETECTOR_H
3 |
4 | #include
5 |
6 | using namespace cv;
7 |
8 | class BlobDetector
9 | {
10 | public:
11 | BlobDetector();
12 | void findBlobs(const cv::Mat &binaryImage, vector ¢ers) const;
13 | private:
14 |
15 | };
16 |
17 | #endif // BLOBDETECTOR_H
18 |
--------------------------------------------------------------------------------
/Duke/camera.cpp:
--------------------------------------------------------------------------------
1 | #include "camera.h"
2 | #include
3 | #include
4 |
5 | #define MY_ZERO 0.000000001
6 |
7 | const HV_RESOLUTION Resolution = RES_MODE0;
8 | const HV_SNAP_MODE SnapMode = CONTINUATION;
9 | const HV_BAYER_CONVERT_TYPE ConvertType = BAYER2RGB_NEIGHBOUR1;
10 |
11 | const long Gain = 10;
12 | const long ExposureTint_Upper = 60;
13 | const long ExposureTint_Lower = 1000;
14 | const long ShutterDelay = 0;
15 | const long ADCLevel = ADC_LEVEL2;
16 | const int XStart = 0;
17 | const int YStart = 0;
18 | const long lVBlanking = 0;
19 | const HV_SNAP_SPEED SnapSpeed = HIGH_SPEED;
20 |
21 | Camera::Camera()
22 | {
23 | Width = 640;
24 | Height = 480;
25 | rawBuffer_1 = NULL;
26 | rawBuffer_2 = NULL;
27 | m_pImageBuffer = NULL;
28 | m_lHBlanking = 0;
29 | m_lVBlanking = 0;
30 | HVSTATUS status = STATUS_OK;
31 |
32 | status = BeginHVDevice(1, &camera_1);//打开数字摄像机 1
33 | if(status==STATUS_OK)
34 | m_bOpen = true;
35 | else
36 | return;
37 | status = BeginHVDevice(2, &camera_2);//打开数字摄像机 2
38 |
39 | HVSetResolution(camera_1, Resolution);// 设置数字摄像机分辨率
40 | HVSetResolution(camera_2, Resolution);
41 | HVSetSnapMode(camera_1, SnapMode);//采集模式,包括 CONTINUATION(连续)、TRIGGER(外触发)
42 | HVSetSnapMode(camera_2, SnapMode);
43 | HVADCControl(camera_1, ADC_BITS, ADCLevel);//设置ADC的级别
44 | HVADCControl(camera_2, ADC_BITS, ADCLevel);
45 |
46 | HVTYPE type = UNKNOWN_TYPE;//获取设备类型
47 | int size = sizeof(HVTYPE);
48 | HVGetDeviceInfo(camera_1,DESC_DEVICE_TYPE, &type, &size);
49 |
50 | HVSetBlanking(camera_1, m_lHBlanking, m_lVBlanking);//设置消隐
51 | HVSetBlanking(camera_2, m_lHBlanking, m_lVBlanking);
52 | HVSetOutputWindow(camera_1, XStart, YStart, Width, Height);
53 | HVSetOutputWindow(camera_2, XStart, YStart, Width, Height);
54 | HVSetSnapSpeed(camera_1, SnapSpeed);//设置采集速度
55 | HVSetSnapSpeed(camera_2, SnapSpeed);
56 |
57 | SetExposureTime(Width, ExposureTint_Upper, ExposureTint_Lower, m_lHBlanking, SnapSpeed, Resolution);//设置曝光时间
58 |
59 | // m_pBmpInfo即指向m_chBmpBuf缓冲区,用户可以自己分配BTIMAPINFO缓冲区
60 | m_pBmpInfo = (BITMAPINFO *)m_chBmpBuf;
61 | // 初始化BITMAPINFO 结构,此结构在保存bmp文件、显示采集图像时使用
62 | m_pBmpInfo->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
63 | // 图像宽度,一般为输出窗口宽度
64 | m_pBmpInfo->bmiHeader.biWidth = Width;
65 | // 图像宽度,一般为输出窗口高度
66 | m_pBmpInfo->bmiHeader.biHeight = Height;
67 |
68 | m_pBmpInfo->bmiHeader.biPlanes = 1;
69 | m_pBmpInfo->bmiHeader.biBitCount = 24;
70 | m_pBmpInfo->bmiHeader.biCompression = BI_RGB;
71 | m_pBmpInfo->bmiHeader.biSizeImage = 0;
72 | m_pBmpInfo->bmiHeader.biXPelsPerMeter = 0;
73 | m_pBmpInfo->bmiHeader.biYPelsPerMeter = 0;
74 | m_pBmpInfo->bmiHeader.biClrUsed = 0;
75 | m_pBmpInfo->bmiHeader.biClrImportant = 0;
76 |
77 | rawBuffer_1 = new BYTE[Width * Height];
78 | rawBuffer_2 = new BYTE[Width * Height];
79 | m_pImageBuffer = new BYTE[Width * Height * 3];
80 |
81 | QTimer *timer = new QTimer();
82 | timer->start(30);
83 | connect(timer, SIGNAL(timeout()), this, SLOT(CaptureFrame()));
84 | }
85 |
86 | Camera::~Camera()
87 | {
88 | HVSTATUS status = STATUS_OK;
89 | // 关闭数字摄像机,释放数字摄像机内部资源
90 | status = EndHVDevice(camera_1);
91 | status = EndHVDevice(camera_2);
92 | OnSnapexStop();
93 | OnSnapexClose();
94 | // 回收图像缓冲区
95 | delete []rawBuffer_1;
96 | delete []rawBuffer_2;
97 | delete []m_pImageBuffer;
98 | }
99 |
100 | void Camera::OnSnapexOpen()
101 | {
102 | HVSTATUS status = STATUS_OK;
103 | status = HVOpenSnap(camera_1, SnapThreadCallback, this);
104 | status = HVOpenSnap(camera_2, SnapThreadCallback, this);
105 | }
106 |
107 | void Camera::OnSnapexStart()
108 | {
109 | HVSTATUS status = STATUS_OK;
110 | BYTE *ppBuf_1[1];
111 | BYTE *ppBuf_2[1];
112 | ppBuf_1[0] = rawBuffer_1;
113 | ppBuf_2[0] = rawBuffer_2;
114 | status = HVStartSnap(camera_1, ppBuf_1,1);
115 | status = HVStartSnap(camera_2, ppBuf_2,1);
116 | }
117 |
118 | void Camera::OnSnapexStop()
119 | {
120 | HVSTATUS status = STATUS_OK;
121 | status = HVStopSnap(camera_1);
122 | status = HVStopSnap(camera_2);
123 | }
124 |
125 | void Camera::OnSnapexClose()
126 | {
127 | HVSTATUS status = STATUS_OK;
128 | status = HVCloseSnap(camera_1);
129 | status = HVCloseSnap(camera_2);
130 | }
131 |
132 | int CALLBACK Camera::SnapThreadCallback(HV_SNAP_INFO *pInfo)
133 | {
134 | return 1;
135 | }
136 |
137 | void Camera::CaptureFrame()
138 | {
139 | image_1 = new QImage(rawBuffer_1, Width, Height, QImage::Format_Indexed8);
140 | image_2 = new QImage(rawBuffer_2, Width, Height, QImage::Format_Indexed8);
141 | }
142 |
143 | int Camera::OnSnapChange()
144 | {
145 | return 1;
146 | }
147 |
148 | HVSTATUS Camera::SetExposureTime(int nWindWidth, long lTintUpper, long lTintLower, long HBlanking, HV_SNAP_SPEED SnapSpeed, HV_RESOLUTION Resolution)
149 | {
150 | HVTYPE type = UNKNOWN_TYPE;
151 | int size = sizeof(HVTYPE);
152 | HVGetDeviceInfo(camera_1,DESC_DEVICE_TYPE, &type, &size);
153 |
154 | int nOutputWid = nWindWidth;
155 |
156 | double dExposure = 0.0;
157 | double dTint = max((double)lTintUpper/(double)lTintLower,MY_ZERO);
158 |
159 | double lClockFreq = 0.0;
160 |
161 | lClockFreq = (SnapSpeed == HIGH_SPEED)? 24000000:12000000;
162 | long lTb = HBlanking;
163 | lTb += 9;
164 | lTb -= 19;
165 | if(lTb <= 0) lTb =0;
166 | if(((double)nOutputWid + 244.0 + lTb ) > 552)
167 | dExposure = (dTint* lClockFreq + 180.0)/((double)nOutputWid + 244.0 + lTb);
168 | else
169 | dExposure = ((dTint * lClockFreq)+ 180.0) / 552 ;
170 |
171 | if((dExposure-(int)dExposure) >0.5)
172 | dExposure += 1.0;
173 | if(dExposure <= 0)
174 | dExposure = 1;
175 | else if(dExposure > 16383)
176 | dExposure = 16383;
177 |
178 | return HVAECControl(camera_1, AEC_EXPOSURE_TIME, (long)dExposure);
179 | }
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
--------------------------------------------------------------------------------
/Duke/camera.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERA_H
2 | #define CAMERA_H
3 |
4 | #include
5 | #include
6 | #include "Windows.h"//加载此头文件以解决大恒相机头文件类型未定义问题
7 | #include
8 | #include
9 |
10 | #define WM_SNAP_CHANGE (WM_USER + 100)
11 |
12 | class Camera : public QWidget
13 | {
14 | Q_OBJECT
15 | public:
16 | Camera();
17 | ~Camera();
18 |
19 | BYTE *rawBuffer_1;
20 | BYTE *rawBuffer_2;///< 采集图像原始数据缓冲区
21 | QImage *image_1;
22 | QImage *image_2;
23 |
24 | int Width;
25 | int Height;
26 | BOOL m_bOpen; ///< 初始化标志
27 |
28 | void OnSnapexOpen();
29 | void OnSnapexStart();
30 | void OnSnapexStop();
31 | void OnSnapexClose();
32 | int OnSnapChange();
33 | private slots:
34 | void CaptureFrame();
35 | private:
36 | HHV camera_1;
37 | HHV camera_2;///< 数字摄像机句柄
38 |
39 | BOOL m_bStart; ///< 启动标志
40 | long m_lHBlanking; ///< 水平消隐
41 | long m_lVBlanking; ///< 垂直消隐
42 | BITMAPINFO *m_pBmpInfo; ///< BITMAPINFO 结构指针,显示图像时使用
43 |
44 | BYTE *m_pImageBuffer; ///< Bayer转换后缓冲区
45 | char m_chBmpBuf[2048]; ///< BIMTAPINFO 存储缓冲区,m_pBmpInfo即指向此缓冲区
46 | /// 采集回调函数,用户也可以定义为全局函数,如果作为类的成员函数,必须为静态成员函数。
47 | static int CALLBACK SnapThreadCallback(HV_SNAP_INFO *pInfo);
48 | /// 设置曝光时间
49 | HVSTATUS SetExposureTime(int nWindWidth, long lTintUpper, long lTintLower, long HBlanking, HV_SNAP_SPEED SnapSpeed, HV_RESOLUTION Resolution);
50 | };
51 |
52 | #endif // CAMERA_H
53 |
--------------------------------------------------------------------------------
/Duke/cameracalibration.cpp:
--------------------------------------------------------------------------------
1 | #include "cameracalibration.h"
2 | #include "utilities.h"
3 |
4 | #include
5 |
6 | CameraCalibration::CameraCalibration()
7 | {
8 | squareSize.width = 0;
9 | squareSize.height = 0;
10 | numOfCamImgs = 14;
11 | camCalibrated = false;
12 | }
13 |
14 | CameraCalibration::~CameraCalibration()
15 | {
16 | unloadCameraImgs();
17 | }
18 |
19 | void CameraCalibration::exportTxtFiles(const char *path, int CAMCALIB_OUT_PARAM)
20 | {
21 | cv::Mat out;
22 | switch (CAMCALIB_OUT_PARAM)
23 | {
24 | case CAMCALIB_OUT_MATRIX:
25 | out = camMatrix;
26 | break;
27 | case CAMCALIB_OUT_DISTORTION:
28 | out = distortion;
29 | break;
30 | case CAMCALIB_OUT_ROTATION:
31 | out = rotationMatrix;
32 | break;
33 | case CAMCALIB_OUT_TRANSLATION:
34 | out = translationVector;
35 | break;
36 | case CAMCALIB_OUT_FUNDAMENTAL:
37 | out = fundamentalMatrix;
38 | break;
39 | case CAMCALIB_OUT_STATUS:
40 | out = statusMatrix;
41 | break;
42 | case CAMCALIB_OUT_H1:
43 | out = H1;
44 | break;
45 | case CAMCALIB_OUT_H2:
46 | out = H2;
47 | break;
48 | #ifdef TEST_STEREO
49 | case STEREOCALIB_OUT_MATRIXL:
50 | out = camMatrixL;
51 | break;
52 | case STEREOCALIB_OUT_MATRIXR:
53 | out = camMatrixR;
54 | break;
55 | case STEREOCALIB_OUT_DISL:
56 | out = distortionL;
57 | break;
58 | case STEREOCALIB_OUT_DISR:
59 | out = distortionR;
60 | break;
61 | case STEREOCALIB_OUT_R:
62 | out = R;
63 | break;
64 | case STEREOCALIB_OUT_T:
65 | out = T;
66 | break;
67 | case STEREOCALIB_OUT_F:
68 | out = F;
69 | break;
70 | #endif
71 | }
72 | Utilities::exportMat(path, out);
73 | }
74 |
75 |
76 | void CameraCalibration::loadCameraImgs(QString fpath)
77 | {
78 | if (calibImgs.size())
79 | calibImgs.clear();
80 |
81 | for(int i = 0; i < numOfCamImgs-1; i++)
82 | {
83 | //这里假定每个相机的标定图片数为13,folderPath应包括前缀L、R
84 | QString path = fpath;
85 | path += QString::number(i+1) + ".png";
86 | cv::Mat img = cv::imread(path.toStdString());
87 | if(img.empty()){
88 | QMessageBox::warning(NULL, QObject::tr("Images Not Found"), QObject::tr("The camera calibration images are not found."));
89 | return;
90 | }
91 | calibImgs.push_back(img);
92 | }
93 |
94 | QString path = fpath;
95 | path += "1.png";//用第1幅图作为外部参数标定图像
96 | extrImg = cv::imread(path.toStdString());
97 | if(extrImg.empty()){
98 | QMessageBox::warning(NULL, QObject::tr("Image Not Found"), QObject::tr("The images for extrinsicts calibration are missing."));
99 | return;
100 | }
101 | if(!calibImgs[0].empty())
102 | camImageSize = calibImgs[0].size();
103 | }
104 |
105 | void CameraCalibration::unloadCameraImgs()
106 | {
107 | for(int i = 0; i < calibImgs.size(); i++)
108 | calibImgs[i].release();
109 | extrImg.release();
110 | }
111 |
112 |
113 | void CameraCalibration::undistortCameraImgPoints(cv::vector points_in,cv::vector &points_out)
114 | {
115 | cv::undistortPoints(points_in,points_out,camMatrix,distortion);
116 | float fX = camMatrix.at(0,0);
117 | float fY = camMatrix.at(1,1);
118 | float cX = camMatrix.at(0,2);
119 | float cY = camMatrix.at(1,2);
120 |
121 | for(int j=0; j &camCorners, cv::vector *objCorners)
130 | {
131 | cv::Mat img_grey;
132 | cv::Mat img_copy;
133 | img.copyTo(img_copy);
134 |
135 | if(!useSymmetric){
136 | numOfCornersX = 4;
137 | numOfCornersY = 11;
138 | }
139 | else{
140 | numOfCornersX = 11;//这里按标准双目标定板确定横向和纵向方格数目,进一步应改为从set获取
141 | numOfCornersY = 9;
142 | }
143 |
144 | bool found = false;
145 | cv::cvtColor(img, img_grey, CV_RGB2GRAY);
146 | img.copyTo(img_copy);
147 |
148 | ///这里尝试采用opencv自带的找圆心功能
149 | cv::Size patternsize(numOfCornersX, numOfCornersY);
150 |
151 | cv::bitwise_not(img_grey, img_grey);//黑底白色圆圈的标定板需要反相处理
152 |
153 | if(!useSymmetric)
154 | found = cv::findCirclesGrid(img_grey, patternsize, camCorners,cv::CALIB_CB_ASYMMETRIC_GRID);
155 | else
156 | found = cv::findCirclesGrid(img_grey, patternsize, camCorners,cv::CALIB_CB_SYMMETRIC_GRID);
157 |
158 | if(!found){
159 | return false;
160 | }
161 | ///要实现全自动可以屏蔽下面的while循环
162 | #ifdef DEBUG
163 | cv::drawChessboardCorners(img_copy, patternsize, camCorners, found);
164 | int key = cv::waitKey(1);
165 | while(found)
166 | {
167 | cv::imshow("Calibration", img_copy);
168 | key = cv::waitKey(1);
169 | if(key==27)
170 | found=false;
171 | if(key==13)
172 | break;
173 | }
174 | #endif
175 |
176 | if(found){
177 | if(squareSize.height == 0){
178 | squareSize.height = 20;
179 | squareSize.width = 20;
180 | }
181 | if(!useSymmetric){
182 | for (int i = 0; i < numOfCornersY; i++){
183 | for (int j = 0; j < numOfCornersX; j++){
184 | objCorners->push_back(cv::Point3f(float((2*j + i % 2)*squareSize.width),float(i*squareSize.width),0));
185 | }
186 | }
187 | }
188 | else{
189 | for(int i = 0; ipush_back(p);
196 | }
197 | }
198 | }
199 | return true;
200 | }
201 | else
202 | return false;
203 | }
204 |
205 |
206 | int CameraCalibration::extractImageCorners()//返回值大于0说明处理不成功,等于零表示处理成功
207 | {
208 | if(calibImgs.size() == 0)
209 | return numOfCamImgs+1;
210 | imgBoardCornersCam.clear();
211 | objBoardCornersCam.clear();
212 |
213 | for(size_t i = 0; i < calibImgs.size(); i++){
214 | cv::vector cCam;
215 | cv::vector cObj;
216 | bool found = findCornersInCamImg(calibImgs[i], cCam, &cObj );
217 |
218 | if(!found){
219 | QString cam = (isleft)?("L"):("R");
220 | if(QMessageBox::warning(NULL,NULL,tr("Couldn't find circles in image ") + cam + QString::number(i+1)
221 | + ", Recapture?",
222 | QMessageBox::Yes,
223 | QMessageBox::Cancel) == QMessageBox::Yes){
224 | return i+1;
225 | }
226 | else
227 | return numOfCamImgs+1;//返回未能读取的图像序号
228 | }
229 |
230 | if(cCam.size()){
231 | imgBoardCornersCam.push_back(cCam);
232 | objBoardCornersCam.push_back(cObj);
233 | if(isleft)
234 | imgBoardCornersCamL.push_back(cCam);
235 | else
236 | imgBoardCornersCamR.push_back(cCam);
237 | }
238 | }
239 |
240 | /***********为求解基础矩阵,采样点来自第十二组图片(L12,R12)的角点数据*************/
241 | if (isleft){
242 | for (int i = 0; i < numOfCornersX*numOfCornersY; i++){
243 | findFunLeft.push_back(imgBoardCornersCam[11][i]);
244 | }
245 | }
246 | else{
247 | for (int i = 0; i < numOfCornersX*numOfCornersY; i++){
248 | findFunRight.push_back(imgBoardCornersCam[11][i]);
249 | }
250 | }
251 | return 0;
252 | }
253 |
254 | int CameraCalibration::calibrateCamera()
255 | {
256 | //check if corners for camera calib has been extracted
257 | if(imgBoardCornersCam.size() != numOfCamImgs-1){
258 | if(!extractImageCorners()){
259 | return 0;
260 | }
261 | }
262 |
263 | cv::vector camRotationVectors;
264 | cv::vector camTranslationVectors;
265 |
266 | rms = cv::calibrateCamera(objBoardCornersCam, imgBoardCornersCam, camImageSize, camMatrix, distortion, camRotationVectors,camTranslationVectors,0,
267 | cv::TermCriteria( (cv::TermCriteria::COUNT)+(cv::TermCriteria::EPS), 30, DBL_EPSILON) );
268 | //rms = cv::calibrateCamera(objBoardCornersCam, imgBoardCornersCam, camImageSize, camMatrix,
269 | //distortion, camRotationVectors, camTranslationVectors, CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
270 | if(isleft){
271 | undistortCameraImgPoints(findFunLeft,findFunLeft);
272 | camMatrixL = camMatrix;
273 | distortionL = distortion;
274 | }
275 | else{
276 | undistortCameraImgPoints(findFunRight,findFunRight);
277 | camMatrixR = camMatrix;
278 | distortionR = distortion;
279 | }
280 | camCalibrated = true;
281 | return 1;
282 | }
283 |
284 |
285 | bool CameraCalibration::findCameraExtrisics()
286 | {
287 | cv::vector imgPoints;
288 | cv::vector objPoints3D;
289 | findCornersInCamImg(extrImg, imgPoints, &objPoints3D);
290 | cv::Mat rVec;
291 | //find extrinsics rotation & translation
292 | bool r = cv::solvePnP(objPoints3D,imgPoints,camMatrix,distortion,rVec,translationVector);
293 | cv::Rodrigues(rVec,rotationMatrix);
294 | return r;
295 | }
296 |
297 | void CameraCalibration::findFundamental()
298 | {
299 | fundamentalMatrix = cv::findFundamentalMat(findFunLeft, findFunRight, statusMatrix, cv::FM_RANSAC);
300 | //cv::stereoRectifyUncalibrated(findFunLeft, findFunRight, fundamentalMatrix, camImageSize, H1, H2);
301 | cv::stereoRectifyUncalibrated(cv::Mat(findFunLeft), cv::Mat(findFunRight), fundamentalMatrix, camImageSize, H1, H2);
302 | findFunLeft.clear();
303 | findFunRight.clear();
304 | #ifdef TEST_STEREO
305 | rms = stereoCalibrate(objBoardCornersCam,imgBoardCornersCamL,imgBoardCornersCamR,camMatrixL,distortionL,camMatrixR,distortionR
306 | ,camImageSize,R,T,E,F);
307 | #endif
308 | }
309 |
310 | void CameraCalibration::setSquareSize(cv::Size size_in_mm)
311 | {
312 | squareSize = size_in_mm;
313 | }
314 |
315 | cv::Size CameraCalibration::getSquareSize()
316 | {
317 | return squareSize;
318 | }
319 |
320 | void CameraCalibration::setNumberOfCameraImgs(int num)
321 | {
322 | numOfCamImgs = num;
323 | }
324 |
325 | int CameraCalibration::getNumberOfCameraImgs()
326 | {
327 | return numOfCamImgs;
328 | }
329 |
330 |
--------------------------------------------------------------------------------
/Duke/cameracalibration.h:
--------------------------------------------------------------------------------
1 | #ifndef CAMERACALIBRATION_H
2 | #define CAMERACALIBRATION_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | //#define DEBUG
10 | #define TEST_STEREO
11 |
12 | #define CAMCALIB_OUT_MATRIX 1
13 | #define CAMCALIB_OUT_DISTORTION 2
14 | #define CAMCALIB_OUT_ROTATION 3
15 | #define CAMCALIB_OUT_TRANSLATION 4
16 | #define CAMCALIB_OUT_FUNDAMENTAL 5
17 | #define CAMCALIB_OUT_STATUS 6
18 | #define CAMCALIB_OUT_H1 7
19 | #define CAMCALIB_OUT_H2 8
20 | #define STEREOCALIB_OUT_MATRIXL 9
21 | #define STEREOCALIB_OUT_MATRIXR 10
22 | #define STEREOCALIB_OUT_DISL 11
23 | #define STEREOCALIB_OUT_DISR 12
24 | #define STEREOCALIB_OUT_R 13
25 | #define STEREOCALIB_OUT_T 14
26 | #define STEREOCALIB_OUT_F 15
27 |
28 | using namespace cv;
29 |
30 | class CameraCalibration :public QObject
31 | {
32 | public:
33 | CameraCalibration();
34 | ~CameraCalibration();
35 |
36 | int calibrateCamera();
37 |
38 | void loadCameraImgs(QString fpath);
39 | void unloadCameraImgs();
40 |
41 | bool findCameraExtrisics();
42 | void findFundamental();
43 |
44 | void setSquareSize(cv::Size size);
45 | cv::Size getSquareSize();
46 |
47 | void setNumberOfCameraImgs(int num);
48 | int getNumberOfCameraImgs();
49 | void exportTxtFiles(const char *pathNfileName, int CAMCALIB_OUT_PARAM);
50 | void printData();
51 | int extractImageCorners();
52 |
53 | cv::Mat camMatrix;
54 | cv::Mat distortion;
55 | cv::Mat rotationMatrix;
56 | cv::Mat translationVector;
57 | cv::Mat fundamentalMatrix;
58 | cv::Mat H1;
59 | cv::Mat H2;
60 | cv::Mat statusMatrix;
61 |
62 | /****stereoCalib****/
63 | Mat camMatrixL;
64 | Mat camMatrixR;
65 | Mat distortionL;
66 | Mat distortionR;
67 | Mat R;
68 | Mat T;
69 | Mat E;
70 | Mat F;
71 | cv::vector> imgBoardCornersCamL;
72 | cv::vector> imgBoardCornersCamR;
73 |
74 | cv::vector> imgBoardCornersCam;
75 |
76 | cv::vector findFunLeft;//存储基础矩阵计算用的左相机图像角点
77 | cv::vector findFunRight;
78 |
79 | bool isleft;
80 | bool useSymmetric;//采用对称标定板或非对称标定板
81 | double rms;
82 |
83 | private:
84 | //bool findCornersInCamImg(cv::Mat camImg,cv::vector *camCorners,cv::vector *objCorners);
85 | bool findCornersInCamImg(cv::Mat camImg,cv::vector &camCorners,cv::vector *objCorners);
86 | /*******无效功能
87 | void drawOutsideOfRectangle(cv::Mat img,cv::vector rectanglePoints, float color);
88 | cv::vector manualMarkCheckBoard(cv::Mat img);
89 | float markWhite(cv::Mat img);
90 | void manualMarkCalibBoardCorners(cv::Mat img,cv::vector &imgPoints_out, cv::vector &objPoints_out);
91 | void perspectiveTransformation(cv::vector corners_in,cv::Mat homoMatrix, cv::vector &points_out);
92 | ********/
93 | void undistortCameraImgPoints(cv::vector points_in,cv::vector &points_out);
94 |
95 | cv::vector> objBoardCornersCam;
96 |
97 | cv::Vector calibImgs;
98 | cv::Mat extrImg;
99 |
100 | cv::Size squareSize;
101 | int numOfCornersX;
102 | int numOfCornersY;
103 | int numOfCamImgs;
104 | cv::Size camImageSize;
105 | bool camCalibrated;
106 | };
107 |
108 | #endif // CAMERACALIBRATION_H
109 |
--------------------------------------------------------------------------------
/Duke/dahengcamera.cpp:
--------------------------------------------------------------------------------
1 | #include "dahengcamera.h"
2 | #include "qmessagebox.h"
3 |
4 | const int XStart = 0;//图像左上角点在相机幅面1280X1024上相对于幅面左上角点坐标
5 | const int YStart = 0;
6 | const HV_RESOLUTION Resolution = RES_MODE0;
7 | const HV_SNAP_MODE SnapMode = CONTINUATION;
8 | const HV_BAYER_CONVERT_TYPE ConvertType = BAYER2RGB_NEIGHBOUR1;
9 | const HV_SNAP_SPEED SnapSpeed = HIGH_SPEED;
10 | int ADCLevel = ADC_LEVEL2;
11 |
12 | DaHengCamera::DaHengCamera(QObject *parent) :
13 | QObject(parent)
14 | {
15 | cameraOpened=false;
16 | }
17 |
18 | DaHengCamera::~DaHengCamera()
19 | {
20 | if(cameraOpened){
21 | OnSnapexStop();
22 | OnSnapexClose();
23 | HVSTATUS status = STATUS_OK;
24 | // 关闭数字摄像机,释放数字摄像机内部资源
25 | status = EndHVDevice(m_hhv_1);
26 | status = EndHVDevice(m_hhv_2);
27 | // 回收图像缓冲区
28 | delete []m_pRawBuffer_1;
29 | delete []m_pRawBuffer_2;
30 | }
31 | }
32 |
33 | ///---------------------相机-----------------------///
34 | void DaHengCamera::daHengExposure(int leftexposure, int rightexposure)
35 | {
36 | switch (leftexposure) {
37 | case 0:
38 | ADCLevel = ADC_LEVEL3;
39 | break;
40 | case 1:
41 | ADCLevel = ADC_LEVEL2;
42 | break;
43 | case 2:
44 | ADCLevel = ADC_LEVEL1;
45 | break;
46 | case 3:
47 | ADCLevel = ADC_LEVEL0;
48 | break;
49 | }
50 | HVADCControl(m_hhv_1, ADC_BITS, ADCLevel);
51 | switch (rightexposure) {
52 | case 0:
53 | ADCLevel = ADC_LEVEL3;
54 | break;
55 | case 1:
56 | ADCLevel = ADC_LEVEL2;
57 | break;
58 | case 2:
59 | ADCLevel = ADC_LEVEL1;
60 | break;
61 | case 3:
62 | ADCLevel = ADC_LEVEL0;
63 | break;
64 | }
65 | HVADCControl(m_hhv_2, ADC_BITS, ADCLevel);
66 | }
67 |
68 | void DaHengCamera::openDaHengCamera(int camerawidth, int cameraheight)
69 | {
70 | cam_w = camerawidth;
71 | cam_h = cameraheight;
72 | if (!cameraOpened){
73 | HVSTATUS status_1 = STATUS_OK;
74 | HVSTATUS status_2 = STATUS_OK;
75 | m_pRawBuffer_1 = NULL;
76 | m_pRawBuffer_2 = NULL;
77 |
78 | status_1 = BeginHVDevice(1, &m_hhv_1);
79 | status_2 = BeginHVDevice(2, &m_hhv_2);
80 | if(status_1==STATUS_OK&&status_2==STATUS_OK)
81 | cameraOpened = true;
82 | else{
83 | cameraOpened = false;
84 | QMessageBox::warning(NULL, tr("Cameras not found"), tr("Make sure two Daheng cameras have connected to the computer."));
85 | return;
86 | }
87 | HVSetResolution(m_hhv_1, Resolution);//Set the resolution of cameras
88 | HVSetResolution(m_hhv_2, Resolution);
89 |
90 | HVSetSnapMode(m_hhv_1, SnapMode);//Snap mode include CONTINUATION、TRIGGER
91 | HVSetSnapMode(m_hhv_2, SnapMode);
92 |
93 | HVADCControl(m_hhv_1, ADC_BITS, ADCLevel);//设置ADC的级别
94 | HVADCControl(m_hhv_2, ADC_BITS, ADCLevel);
95 |
96 | HVTYPE type = UNKNOWN_TYPE;//获取设备类型
97 | int size = sizeof(HVTYPE);
98 | HVGetDeviceInfo(m_hhv_1,DESC_DEVICE_TYPE, &type, &size);//由于两相机型号相同,故只获取一个
99 |
100 | HVSetOutputWindow(m_hhv_1, XStart, YStart, camerawidth, cameraheight);
101 | HVSetOutputWindow(m_hhv_2, XStart, YStart, camerawidth, cameraheight);
102 |
103 | HVSetSnapSpeed(m_hhv_1, SnapSpeed);//设置采集速度
104 | HVSetSnapSpeed(m_hhv_2, SnapSpeed);
105 |
106 | m_pRawBuffer_1 = new BYTE[camerawidth * cameraheight];
107 | m_pRawBuffer_2 = new BYTE[camerawidth * cameraheight];
108 | }
109 | OnSnapexOpen();
110 | OnSnapexStart();
111 | }
112 |
113 | void DaHengCamera::daHengSnapShot(int camNo)
114 | {
115 | //m_pRawBuffer_1 = new BYTE[cam_w * cam_h];
116 | //m_pRawBuffer_2 = new BYTE[cam_w * cam_h];
117 | (camNo==1)?(HVSnapShot(m_hhv_1, &m_pRawBuffer_1, 1)):(HVSnapShot(m_hhv_2, &m_pRawBuffer_2, 1));
118 | }
119 |
120 | void DaHengCamera::OnSnapexOpen()
121 | {
122 | HVSTATUS status = STATUS_OK;
123 | status = HVOpenSnap(m_hhv_1, SnapThreadCallback, this);
124 | status = HVOpenSnap(m_hhv_2, SnapThreadCallback, this);
125 | }
126 |
127 | void DaHengCamera::OnSnapexStart()
128 | {
129 | HVSTATUS status = STATUS_OK;
130 | //ppBuf_1[0] = m_pRawBuffer_1;
131 | //ppBuf_2[0] = m_pRawBuffer_2;
132 | status = HVStartSnap(m_hhv_1, &m_pRawBuffer_1,1);
133 | status = HVStartSnap(m_hhv_2, &m_pRawBuffer_2,1);
134 | }
135 |
136 | void DaHengCamera::OnSnapexStop()
137 | {
138 | HVSTATUS status = STATUS_OK;
139 | status = HVStopSnap(m_hhv_1);
140 | status = HVStopSnap(m_hhv_2);
141 | }
142 |
143 | void DaHengCamera::OnSnapexClose()
144 | {
145 | HVSTATUS status = STATUS_OK;
146 | status = HVCloseSnap(m_hhv_1);
147 | status = HVCloseSnap(m_hhv_2);
148 | }
149 |
150 | void DaHengCamera::closeCamera()
151 | {
152 | OnSnapexStop();
153 | OnSnapexClose();
154 | }
155 |
156 | int CALLBACK DaHengCamera::SnapThreadCallback(HV_SNAP_INFO *pInfo)
157 | {
158 | return 1;
159 | }
160 |
--------------------------------------------------------------------------------
/Duke/dahengcamera.h:
--------------------------------------------------------------------------------
1 | #ifndef DAHENGCAMERA_H
2 | #define DAHENGCAMERA_H
3 |
4 | #include
5 |
6 | #include "Windows.h"//加载此头文件以解决大恒相机头文件类型未定义问题
7 | #include
8 | #include
9 |
10 | class DaHengCamera : public QObject
11 | {
12 | Q_OBJECT
13 | public:
14 | DaHengCamera(QObject *parent = 0);
15 | ~DaHengCamera();
16 |
17 | bool cameraOpened;
18 |
19 | void daHengExposure(int leftexposure, int rightexposure);
20 | void openDaHengCamera(int camerawidth, int cameraheight);
21 | void daHengSnapShot(int camNo);
22 | void closeCamera();
23 |
24 | BYTE *m_pRawBuffer_1;
25 | BYTE *m_pRawBuffer_2;
26 |
27 | private:
28 | HHV m_hhv_1;
29 | HHV m_hhv_2;
30 |
31 | int cam_w;
32 | int cam_h;
33 |
34 | static int CALLBACK SnapThreadCallback(HV_SNAP_INFO *pInfo);
35 |
36 | ///---------------相机相关函数---------------///
37 | void OnSnapexOpen();
38 | void OnSnapexStart();
39 | void OnSnapexStop();
40 | void OnSnapexClose();
41 |
42 | };
43 |
44 | #endif // DAHENGCAMERA_H
45 |
--------------------------------------------------------------------------------
/Duke/deployment.pri:
--------------------------------------------------------------------------------
1 | android-no-sdk {
2 | target.path = /data/user/qt
3 | export(target.path)
4 | INSTALLS += target
5 | } else:android {
6 | x86 {
7 | target.path = /libs/x86
8 | } else: armeabi-v7a {
9 | target.path = /libs/armeabi-v7a
10 | } else {
11 | target.path = /libs/armeabi
12 | }
13 | export(target.path)
14 | INSTALLS += target
15 | } else:unix {
16 | isEmpty(target.path) {
17 | qnx {
18 | target.path = /tmp/$${TARGET}/bin
19 | } else {
20 | target.path = /opt/$${TARGET}/bin
21 | }
22 | export(target.path)
23 | }
24 | INSTALLS += target
25 | }
26 |
27 | export(INSTALLS)
28 |
--------------------------------------------------------------------------------
/Duke/dotmatch.h:
--------------------------------------------------------------------------------
1 | #ifndef DOTMATCH_H
2 | #define DOTMATCH_H
3 |
4 | //调试用宏定义
5 | //#define DEBUG
6 | #define USE_ADAPTIVE_THRESHOLD
7 | #define USE_FOUR_POINT
8 | //#define TEST_SURF
9 |
10 | // Qt
11 | #include
12 |
13 | // OpenCV
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 |
23 | // STL
24 | #include
25 |
26 | // SLR
27 | #include "reconstruct.h"
28 | #include "virtualcamera.h"
29 | #include "utilities.h"
30 | #include "blobdetector.h"
31 | #include "manualmatch.h"
32 |
33 | // MRPT
34 | #include
35 |
36 | using namespace cv;
37 | using namespace std;
38 |
39 | class DotMatch : public QObject
40 | {
41 | Q_OBJECT
42 | public:
43 |
44 | DotMatch(QObject *parent = 0, QString projectPath = NULL, bool useManual = true);
45 | ~DotMatch();
46 |
47 | ManualMatch *mm;
48 |
49 | vector> findDot(Mat image);
50 | bool matchDot(Mat limage, Mat rimage);
51 | void setUpManual(Mat LImage, Mat RImage);//初始化手工标记窗口
52 | void activeManual();//启动手工标记窗口
53 | void finishMatch();
54 | int OSTU_Region(cv::Mat &image);
55 |
56 | vector> dotInOrder;
57 |
58 | //表示新点与原有点对应关系,如果该点实际为原有点
59 | //x值为该点在dotFeature中的序号
60 | //y值为该点在dotPosition(even或odd)中的序号
61 | cv::vector correspondPointEven;
62 | cv::vector correspondPointOdd;
63 |
64 | //标记并显示标志点所需数据,int值有6个,依次为
65 | //左点x,y,右点x,y,是否为已知点(0,1,0代表未知)
66 | //如果已知,对应的唯一编号
67 | vector> dotForMark;
68 |
69 | int bwThreshold;
70 | int blocksize;//二值化的一个参数,由用户给定
71 | bool firstFind;
72 | int scanSN;
73 |
74 | private:
75 | QString path;
76 | bool useManualMatch;
77 |
78 | bool triangleCalculate();
79 | cv::vector > calFeature(cv::vector dotP);
80 | bool dotClassify(cv::vector > featureTemp);
81 | bool FTTM(cv::vector &correspondPoint, cv::vector dotPositionCurrent, cv::vector dotPositionFormer);
82 | void updateDot(cv::vector &correspondPoint, cv::vector &dotPositionCurrent, cv::vector dotPositionFormer);
83 | vector calNeighbor(vector > input, int num);
84 | bool checkNeighbor(vector referance, vector needcheck);
85 | void calMatrix();
86 | //void hornTransform(double &data[], cv::vector target, cv::vector move);
87 | void markPoint();
88 |
89 | bool isBelongTo(size_t e, vector C);//判断C中是否含有元素e
90 |
91 | vector subPixel(Mat img, vector> vec);
92 | Reconstruct *rc;
93 | BlobDetector *bd;
94 |
95 | Mat fundMat;
96 | Mat Homo1;
97 | Mat Homo2;
98 |
99 | vector dotRemove;//不能成功三角计算的点需要去除,该向量存储待去除点在当次扫描中的序号
100 |
101 | Mat outR;//保存后一次到前一次扫描的旋转变换阵
102 | Mat outT;//保存相后一次到前一次扫描的平移变换阵
103 | Mat outRInv;//保存前一次到后一次扫描的旋转变换阵
104 | Mat outTInv;//保存前一次到后一次扫描的旋转变换阵
105 | Mat matRotation;//保存广义旋转矩阵
106 | Mat matTransform;//保存广义平移向量
107 | cv::vector dotPositionEven;//偶数次扫描所得点的绝对坐标
108 | cv::vector dotPositionOdd;//奇数次扫描所得点的绝对坐标
109 | cv::vector> dotFeature;
110 | vector> neighborFeature;
111 |
112 | private slots:
113 | void onfinishmanual();
114 |
115 | signals:
116 | void receivedmanualmatch();//由槽函数onfinishmanual()发出,通知MainWindow执行finishmanualmatch()槽,在该槽中又调用了finishMatch
117 |
118 | };
119 |
120 | class Triangle{
121 | public:
122 | Triangle(int Vertex_0, int Vertex_1, int Vertex_2, float distance_12, float distance_02, float distance_01);
123 |
124 | static bool copmareTriangle(Triangle tri_known, Triangle tri_unknown, vector &corr, float &error);//比较两三角形是否相等
125 | static float calDistance(Point3f point_1,Point3f point_2);
126 |
127 | int ver_0;
128 | int ver_1;
129 | int ver_2;
130 | float dis_0;
131 | float dis_1;
132 | float dis_2;
133 | };
134 |
135 | #endif // DOTMATCH_H
136 |
--------------------------------------------------------------------------------
/Duke/focusassistant.cpp:
--------------------------------------------------------------------------------
1 | #include "focusassistant.h"
2 | #include "ui_focusassistant.h"
3 |
4 | FocusAssistant::FocusAssistant(QWidget *parent) :
5 | QWidget(parent),
6 | ui(new Ui::FocusAssistant)
7 | {
8 | ui->setupUi(this);
9 |
10 | displayLeft = checkstate();
11 | connect(ui->leftCamera, SIGNAL(toggled(bool)), this, SLOT(checkchange()));
12 | connect(ui->okButton, SIGNAL(clicked()), SIGNAL(winhide()));
13 | connect(ui->okButton, SIGNAL(clicked()), this, SLOT(hide()));
14 | //connect(this, SIGNAL(destroyed()), SIGNAL(winhide()));
15 | }
16 |
17 | FocusAssistant::~FocusAssistant()
18 | {
19 | delete ui;
20 | }
21 |
22 | bool FocusAssistant::checkstate()
23 | {
24 | return (ui->leftCamera->isChecked())?(true):(false);
25 | }
26 |
27 | void FocusAssistant::checkchange()
28 | {
29 | displayLeft = checkstate();
30 | }
31 |
32 | void FocusAssistant::playImage(QPixmap img)
33 | {
34 | ui->imageDisplay->setPixmap(img);
35 | }
36 |
--------------------------------------------------------------------------------
/Duke/focusassistant.h:
--------------------------------------------------------------------------------
1 | #ifndef FOCUSASSISTANT_H
2 | #define FOCUSASSISTANT_H
3 |
4 | #include
5 |
6 | namespace Ui {
7 | class FocusAssistant;
8 | }
9 |
10 | class FocusAssistant : public QWidget
11 | {
12 | Q_OBJECT
13 |
14 | public:
15 | explicit FocusAssistant(QWidget *parent = 0);
16 | ~FocusAssistant();
17 |
18 | bool displayLeft;
19 | void playImage(QPixmap img);
20 |
21 | private:
22 | Ui::FocusAssistant *ui;
23 | bool checkstate();
24 | signals:
25 | void winhide();
26 | private slots:
27 | void checkchange();
28 | };
29 |
30 | #endif // FOCUSASSISTANT_H
31 |
--------------------------------------------------------------------------------
/Duke/focusassistant.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | FocusAssistant
4 |
5 |
6 |
7 | 0
8 | 0
9 | 835
10 | 592
11 |
12 |
13 |
14 | Focus Assistant
15 |
16 |
17 |
18 | :/splash.png:/splash.png
19 |
20 |
21 | -
22 |
23 |
24 |
25 | 0
26 | 0
27 |
28 |
29 |
30 |
31 | 1024
32 | 800
33 |
34 |
35 |
36 | Live View
37 |
38 |
39 | true
40 |
41 |
42 |
43 | -
44 |
45 |
-
46 |
47 |
-
48 |
49 |
50 |
51 | 0
52 | 0
53 |
54 |
55 |
56 | Select Camera
57 |
58 |
59 |
-
60 |
61 |
62 |
63 | 0
64 | 0
65 |
66 |
67 |
68 | Left Camera
69 |
70 |
71 | true
72 |
73 |
74 |
75 | -
76 |
77 |
78 |
79 | 0
80 | 0
81 |
82 |
83 |
84 | Right Camera
85 |
86 |
87 |
88 |
89 |
90 |
91 | -
92 |
93 |
94 |
95 | 0
96 | 0
97 |
98 |
99 |
100 | Confirm
101 |
102 |
103 |
104 |
105 |
106 | -
107 |
108 |
109 | Qt::Horizontal
110 |
111 |
112 |
113 | 402
114 | 20
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/Duke/glwidget.cpp:
--------------------------------------------------------------------------------
1 | #include "glwidget.h"
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | GLfloat scale = 1;
9 | GLfloat percent = 0.1;
10 | GLfloat speed = 20;
11 | bool hasModel = false;
12 | glm::mat4 transform_camera(1.0f); // 摄像机的位置和定向,即摄像机在世界坐标系中位置
13 | glm::mat4 transform_model(1.0f); // 模型变换矩阵,即物体坐标到世界坐标
14 | glm::mat4 model_view_matrix;
15 |
16 | bool keyPressed[2] = {false, false};
17 |
18 | GLWidget::GLWidget(QWidget *parent) :
19 | QGLWidget(QGLFormat(QGL::SampleBuffers), parent, 0, Qt::FramelessWindowHint)
20 | {
21 | setFormat(QGLFormat(QGL::DoubleBuffer | QGL::DepthBuffer));
22 | rotationX = 0.0;
23 | rotationY = 0.0;
24 | rotationZ = 0.0;
25 | offsetX = 0.0;
26 | offsetY = 0.0;
27 | pointSize = 1;
28 | backColor = QColor::fromCmykF(0.5, 0.4, 0.4, 0.2);
29 | createGradient();
30 | plyloader = new PlyLoader(this);
31 | }
32 |
33 | GLWidget::~GLWidget()
34 | {
35 | delete plyloader;
36 | }
37 |
38 | void GLWidget::LoadModel(QString loadpath)
39 | {
40 | hasModel = plyloader->LoadModel(loadpath);
41 | if(hasModel)
42 | updateGL();
43 | else
44 | return;
45 | }
46 |
47 | void GLWidget::initializeGL()
48 | {
49 | glMatrixMode(GL_PROJECTION);
50 | glLoadIdentity();
51 | gluPerspective(30, float(this->width())/this->height(), 1.0, 1.0e10);
52 |
53 | qglClearColor(backColor);
54 | glShadeModel(GL_SMOOTH);
55 | glEnable(GL_CULL_FACE);
56 | SetupLights();
57 | glEnable(GL_DEPTH_TEST);
58 |
59 | glEnable(GL_BLEND);
60 | glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
61 | glEnable(GL_NORMALIZE);
62 |
63 | transform_camera = glm::affineInverse(glm::lookAt(glm::vec3(0,0,40), glm::vec3(0,0,0), glm::vec3(0,-1,0)));
64 | transform_model = glm::translate(glm::vec3(-10,-10,0));
65 | }
66 |
67 | void GLWidget::resizeGL(int width, int height)
68 | {
69 | glViewport(0, 0, this->width(), this->height());
70 | glMatrixMode(GL_PROJECTION);
71 | glLoadIdentity();
72 | gluPerspective(28.0,float(this->width())/this->height(), 1.0, 1.0e10);
73 | }
74 |
75 | void GLWidget::paintGL()
76 | {
77 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
78 | model_view_matrix = glm::affineInverse(transform_camera);
79 | glMatrixMode(GL_MODELVIEW);
80 | glLoadMatrixf(&model_view_matrix[0][0]);
81 | model_view_matrix *= transform_model;
82 | glMatrixMode(GL_MODELVIEW);
83 | glLoadMatrixf(&model_view_matrix[0][0]);
84 | draw();
85 | }
86 |
87 | void GLWidget::draw()
88 | {
89 | GLfloat sizes[]=
90 | {
91 | scale, scale, scale
92 | };
93 | glRotatef(rotationX, 1.0, 0.0, 0.0);
94 | glRotatef(rotationY, 0.0, 1.0, 0.0);
95 | glRotatef(rotationZ, 0.0, 0.0, 1.0);
96 | glScalef(sizes[0], sizes[1], sizes[2]);
97 |
98 | long int total = plyloader->m_totalConnectedPoints;
99 | for(int p = 0; p < total; ++p)
100 | {
101 | glPointSize(pointSize);
102 | glBegin(GL_POINTS);
103 | qglColor(QColor::fromCmyk(255,0,255,0));
104 | glVertex3f((plyloader->mp_vertexXYZ[p*3]) * percent,
105 | (plyloader->mp_vertexXYZ[p*3 + 1]) * percent,
106 | (plyloader->mp_vertexXYZ[p*3 + 2]) * percent);
107 | glEnd();
108 | }
109 | glFlush();
110 | }
111 |
112 | void GLWidget::mousePressEvent(QMouseEvent *event)
113 | {
114 | lastPos = event->pos();
115 | }
116 |
117 | void GLWidget::mouseMoveEvent(QMouseEvent *event)
118 | {
119 |
120 | GLfloat dx = GLfloat(event->x() - lastPos.x()) / width();
121 | GLfloat dy = GLfloat(event->y() - lastPos.y()) / height();
122 |
123 | if(event->buttons() == Qt::LeftButton){
124 | rotationX += 180 * dy;
125 | rotationY += 180 * dx;
126 | updateGL();
127 | }
128 | else if(event->buttons() == Qt::RightButton){
129 | rotationX += 180 * dx;
130 | rotationZ += 180 * dy;
131 | updateGL();
132 | }
133 | else if(event->buttons()==Qt::MiddleButton) {
134 | transform_camera *= glm::translate(glm::vec3(-speed*dx,speed*dy,0));
135 | updateGL();
136 | }
137 | lastPos = event->pos();
138 | }
139 |
140 | void GLWidget::mouseDoubleClickEvent(QMouseEvent * /*event*/)
141 | {
142 | QColor color = QColorDialog::getColor(backColor, this);
143 | if(color.isValid())
144 | {
145 | backColor = color;
146 | qglClearColor(backColor);
147 | }
148 | }
149 |
150 | void GLWidget::wheelEvent(QWheelEvent *event)
151 | {
152 | double numDegrees = - event->delta() / 3600.0;
153 | scale += numDegrees;
154 | if(scale < 10 && scale > 0)
155 | {
156 | updateGL();
157 | }
158 | else
159 | {
160 | scale = 1;
161 | }
162 | }
163 |
164 |
165 | void GLWidget::SetupLights()
166 | {
167 | GLfloat ambientLight[] = {0.6f, 0.6f, 0.6f, 1.0f};//环境光
168 | GLfloat diffuseLight[] = {0.7f, 0.7f, 0.7f, 1.0f};//漫反射
169 | GLfloat specularLight[] = {0.9f, 0.9f, 0.9f, 1.0f};//镜面光
170 | GLfloat lightPos[] = {50.0f, 80.0f, 60.0f, 1.0f};//光源位置
171 |
172 | glEnable(GL_LIGHTING); //启用光照
173 | glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); //设置环境光源
174 | glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); //设置漫反射光源
175 | glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); //设置镜面光源
176 | glLightfv(GL_LIGHT0, GL_POSITION, lightPos); //设置灯光位置
177 | glEnable(GL_LIGHT0); //打开第一个灯光
178 |
179 | glEnable(GL_COLOR_MATERIAL); //启用材质的颜色跟踪
180 | glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); //指定材料着色的面
181 | glMaterialfv(GL_FRONT, GL_SPECULAR, specularLight); //指定材料对镜面光的反应
182 | glMateriali(GL_FRONT, GL_SHININESS, 100); //指定反射系数
183 | }
184 |
185 |
186 | void GLWidget::createGradient()
187 | {
188 | gradient.setCoordinateMode(QGradient::ObjectBoundingMode);
189 | gradient.setCenter(0.45, 0.50);
190 | gradient.setFocalPoint(0.40, 0.45);
191 | gradient.setColorAt(0.0, QColor(105, 146, 182));
192 | gradient.setColorAt(0.4, QColor(81, 113, 150));
193 | gradient.setColorAt(0.8, QColor(16, 56, 121));
194 | }
195 |
196 |
197 | void GLWidget::drawBackground(QPainter *painter)
198 | {
199 | painter->setPen(Qt::NoPen);
200 | painter->setBrush(gradient);
201 | painter->drawRect(rect());
202 | }
203 |
204 | void GLWidget::setPoint(int psize)
205 | {
206 | pointSize = psize;
207 | updateGL();
208 | }
209 |
210 |
211 | void GLWidget::drag_ball(int x1, int y1, int x2, int y2, glm::mat4& Tmodel, glm::mat4& Tcamera)
212 | {
213 | float r = (float)std::min(this->height(), this->width())/3;
214 | float r2 = r*0.9f;
215 | float ax = x1 - (float)this->width()/2;
216 | float ay = y1 - (float)this->height()/2;
217 | float bx = x2 - (float)this->width()/2;
218 | float by = y2 - (float)this->height()/2;
219 | float da = std::sqrt(ax*ax+ay*ay);
220 | float db = std::sqrt(bx*bx+by*by);
221 | if(std::max(da,db)>r2){
222 | float dx, dy;
223 | if(da>db){
224 | dx = (r2/da-1)*ax;
225 | dy = (r2/da-1)*ay;
226 | }else{
227 | dx = (r2/db-1)*bx;
228 | dy = (r2/db-1)*by;
229 | }
230 | ax += dx; ay +=dy; bx += dx; by += dy;
231 | }
232 | float az = std::sqrt( r*r-(ax*ax+ay*ay) );
233 | float bz = std::sqrt( r*r-(bx*bx+by*by) );
234 | glm::vec3 a = glm::vec3(ax,ay,az);
235 | glm::vec3 b = glm::vec3(bx,by,bz);
236 | float theta = std::acos(glm::dot(a,b)/(r*r));
237 | glm::vec3 v2 = glm::cross(a,b);
238 | // v2是视觉坐标系的向量,v是v2在物体坐标系中的坐标
239 | glm::vec3 v = glm::vec3(
240 | glm::affineInverse(Tmodel) * Tcamera
241 | * glm::vec4(v2[0],v2[1],v2[2],0) );
242 | Tmodel *= glm::rotate( theta*180/3.14f, v );
243 | }
244 |
--------------------------------------------------------------------------------
/Duke/glwidget.h:
--------------------------------------------------------------------------------
1 | #ifndef GLWIDGET_H
2 | #define GLWIDGET_H
3 |
4 | #include
5 | #include "plyloader.h"
6 | #include
7 | #include
8 | #include
9 |
10 | class GLWidget : public QGLWidget
11 | {
12 | Q_OBJECT
13 | public:
14 | explicit GLWidget(QWidget *parent = 0);
15 | ~GLWidget();
16 |
17 | int pointSize;
18 | void LoadModel(QString loadpath);
19 | void setPoint(int psize);
20 |
21 | protected:
22 | void initializeGL();
23 | void resizeGL(int width = 300, int height =300);
24 | void paintGL();
25 |
26 | void mousePressEvent(QMouseEvent *event);
27 | void mouseMoveEvent(QMouseEvent *event);
28 | void mouseDoubleClickEvent(QMouseEvent *event);
29 | void wheelEvent(QWheelEvent *event);
30 |
31 | private:
32 | void draw();
33 | void SetupLights();
34 | void createGradient();
35 | void drawBackground(QPainter *painter);
36 | GLfloat rotationX;
37 | GLfloat rotationY;
38 | GLfloat rotationZ;
39 | GLfloat offsetX;
40 | GLfloat offsetY;
41 | QPoint lastPos;
42 | PlyLoader *plyloader;
43 |
44 | QRadialGradient gradient;
45 | QColor backColor;
46 |
47 | void drag_ball(int x1, int y1, int x2, int y2, glm::mat4& Tmodel, glm::mat4& Tcamera);
48 | };
49 |
50 | #endif // GLWIDGET_H
51 |
--------------------------------------------------------------------------------
/Duke/graycodes.cpp:
--------------------------------------------------------------------------------
1 | #include "graycodes.h"
2 |
3 | GrayCodes::GrayCodes(int scanW, int scanH, bool useepi)
4 | {
5 | for (int i=0; i(i,j) = pixel_color;
77 | if(pixel_color > 0)
78 | pixel_color = 0;
79 | else
80 | pixel_color = 255;
81 | grayCodes[2*numOfColImgs-2*k+1].at(i,j) = pixel_color;
82 | }
83 | prevRem=rem;
84 | }
85 | }
86 |
87 | if (!useEpi){//如果不使用极限校正,则也生成行条纹
88 | for (int i=0;i < height;i++){
89 | int rem=0, num=i, prevRem=i%2;
90 | for (int k=0; k(i,j) = pixel_color;
102 | if(pixel_color > 0)
103 | pixel_color = 0;
104 | else
105 | pixel_color = 255;
106 | grayCodes[2*numOfRowImgs-2*k+2*numOfColImgs+1].at(i, j) = pixel_color;
107 | }
108 | prevRem=rem;
109 | }
110 | }
111 | }
112 | }
113 |
114 | int GrayCodes::grayToDec(cv::vector gray)//convert a gray code sequence to a decimal number
115 | {
116 | int dec = 0;
117 | bool tmp = gray[0];
118 | if(tmp)
119 | dec += (int) pow((float)2, int(gray.size() - 1));
120 | for(int i = 1; i < gray.size(); i++){
121 | tmp=Utilities::XOR(tmp,gray[i]);
122 | if(tmp)
123 | dec+= (int) pow((float)2,int (gray.size() - i - 1) );
124 | }
125 | return dec;
126 | }
127 |
--------------------------------------------------------------------------------
/Duke/graycodes.h:
--------------------------------------------------------------------------------
1 | #ifndef GRAYCODES_H
2 | #define GRAYCODES_H
3 |
4 | #include
5 | #include
6 | using std::ofstream;
7 | #include
8 | #include
9 | #include
10 | #include "utilities.h"
11 |
12 | #define GRAY_MAX_NUM 44
13 |
14 | class GrayCodes
15 | {
16 | public:
17 | GrayCodes(int scanW, int scanH, bool useepi);
18 | ~GrayCodes();
19 |
20 | cv::Mat grayCodes[GRAY_MAX_NUM];
21 | bool useEpi;//是否应用极线校正
22 |
23 | int getNumOfImgs();
24 |
25 | void generateGrays();
26 |
27 | static int grayToDec(cv::vector gray);
28 | int getNumOfRowBits();
29 | int getNumOfColBits();
30 |
31 | protected:
32 |
33 | void calNumOfImgs();
34 | void allocMemForImgs();
35 |
36 | bool imgsLoaded;
37 | int numOfImgs;
38 | int numOfRowImgs;
39 | int numOfColImgs;
40 | int currentImgNum;
41 | int height;
42 | int width;
43 | };
44 |
45 | #endif // GRAYCODES_H
46 |
--------------------------------------------------------------------------------
/Duke/imageviewer.cpp:
--------------------------------------------------------------------------------
1 | #include "imageviewer.h"
2 | #include "ui_imageviewer.h"
3 |
4 | #include
5 | #include
6 |
7 | bool imageseted = false;
8 |
9 | ImageViewer::ImageViewer(QWidget *parent) :
10 | QWidget(parent),
11 | ui(new Ui::ImageViewer)
12 | {
13 | ui->setupUi(this);
14 | connect(ui->actionSave_Image,SIGNAL(triggered()),this,SLOT(saveimage()));
15 | }
16 |
17 | ImageViewer::~ImageViewer()
18 | {
19 | delete ui;
20 | }
21 |
22 | void ImageViewer::showImage(QPixmap img)
23 | {
24 | ui->imageLabel->setPixmap(img);
25 | imageseted=true;
26 | }
27 |
28 | void ImageViewer::contextMenuEvent(QContextMenuEvent *)
29 | {
30 | QList actions;
31 | actions.push_back(ui->actionSave_Image);
32 | QCursor cur=this->cursor();
33 | QMenu *menu=new QMenu(this);
34 | menu->addActions(actions);
35 | menu->exec(cur.pos());
36 | }
37 |
38 | void ImageViewer::saveimage()
39 | {
40 | if (imageseted){
41 | QString dir = QFileDialog::getSaveFileName(this,tr("Save Image"),
42 | "/home/untitled.png",
43 | tr("Images (*.png *.jpg)"));
44 | ui->imageLabel->pixmap()->save(dir);
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/Duke/imageviewer.h:
--------------------------------------------------------------------------------
1 | #ifndef IMAGEVIEWER_H
2 | #define IMAGEVIEWER_H
3 |
4 | #include
5 |
6 | namespace Ui {
7 | class ImageViewer;
8 | }
9 |
10 | class ImageViewer : public QWidget
11 | {
12 | Q_OBJECT
13 |
14 | public:
15 | explicit ImageViewer(QWidget *parent = 0);
16 | ~ImageViewer();
17 |
18 | void showImage(QPixmap img);
19 |
20 | protected:
21 | void contextMenuEvent(QContextMenuEvent *);
22 |
23 | private:
24 | Ui::ImageViewer *ui;
25 |
26 | private slots:
27 | void saveimage();
28 | };
29 |
30 | #endif // IMAGEVIEWER_H
31 |
--------------------------------------------------------------------------------
/Duke/imageviewer.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | ImageViewer
4 |
5 |
6 |
7 | 0
8 | 0
9 | 829
10 | 596
11 |
12 |
13 |
14 | Form
15 |
16 |
17 | -
18 |
19 |
20 | true
21 |
22 |
23 |
24 |
25 | 0
26 | 0
27 | 809
28 | 576
29 |
30 |
31 |
32 |
-
33 |
34 |
35 |
36 | 0
37 | 0
38 |
39 |
40 |
41 |
42 | 1280
43 | 1024
44 |
45 |
46 |
47 | Image
48 |
49 |
50 | true
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 | :/save.png:/save.png
63 |
64 |
65 | Save Image
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
--------------------------------------------------------------------------------
/Duke/main.cpp:
--------------------------------------------------------------------------------
1 | #include "mainwindow.h"
2 | #include
3 | #include
4 |
5 | int main(int argc, char *argv[])
6 | {
7 | QApplication a(argc, argv);
8 | QSplashScreen *splash = new QSplashScreen;
9 | splash->setPixmap(QPixmap(":/splash.png"));
10 | splash->show();
11 |
12 | MainWindow w;
13 | w.showMaximized();
14 | w.show();
15 | splash->finish(&w);
16 | delete splash;
17 | return a.exec();
18 | }
19 |
20 | /***************************************************
21 | * 截止到2014-10-12日以前的工作汇总:
22 | * 实现功能:新建工程(即选择一个空的文件夹作为图片的存放目录)
23 | * 打开摄像头,能够实时预览图像以及对在校准过程中采集的图像进行预览,保存图像到指定文件夹
24 | * 开关投影仪,其中关投影通过投射黑色图像实现
25 | * 投射条纹图像,投射窗口由OpenCV生成
26 | * 采集扫描图像并保存
27 | * 设置工作过程中可能用到的全局变量并写入xml文件
28 | * ***************************************************
29 | * 10-13
30 | * 对文件存放路径的生成进行了调整,新建工程直接生成calib、reconstruction、scan三个文件夹
31 | * 通过选择文件夹函数selectPath选择当前存储路径projChildPath
32 | * 对配置文件set.xml的存储进行了调整,储存在工程根目录下
33 | * 自定义类调用时,应防止头文件相互包含,可以采用前置声明方式
34 | * 目前将set作为参数储存库,有些参数不需要手工设置,应在初始化时自动设置
35 | * *****************************************************
36 | * 10-18
37 | * 完成了重建相关代码的添加,构建无错误
38 | * *****************************************************
39 | * 10-19
40 | * 优化reconstruct类代码
41 | * *****************************************************
42 | * 10-20
43 | * 成功将大恒相机移植到Qt环境,对rawbuffer的调取没有采用callback函数,而是通过定时器触发信号槽的形式
44 | * 对QImage*指针指向图片的操作,应首先将其转换为QPixmap,再进行缩放、变形等操作
45 | * ******************************************************
46 | * 10-25
47 | * 改进了扫描图像的采集方式,利用单帧采集函数采集,在投影和采集之间添加延时
48 | *
49 | *
50 | *
51 | *
52 | *
53 | *
54 | *
55 | *
56 | */
57 |
--------------------------------------------------------------------------------
/Duke/mainwindow.h:
--------------------------------------------------------------------------------
1 | #ifndef MAINWINDOW_H
2 | #define MAINWINDOW_H
3 |
4 | //Qt
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | //openCV
12 | #include
13 | #include
14 |
15 | //SLR
16 | #include "blobdetector.h"
17 | #include "cameracalibration.h"
18 | #include "dotmatch.h"
19 | #include "glwidget.h"
20 |
21 | #include "projector.h"
22 | #include "reconstruct.h"
23 | #include "meshcreator.h"
24 |
25 | #include "set.h"
26 | #include "focusassistant.h"
27 |
28 | #include "graycodes.h"
29 | #include "multifrequency.h"
30 |
31 | #include "dahengcamera.h"
32 | #include "baslercamera.h"
33 |
34 | #include "imageviewer.h"
35 |
36 | #include "stereorect.h"
37 |
38 | #define DEBUG//用来观察标记点圆心定位精度
39 |
40 | #define WM_SNAP_CHANGE (WM_USER + 100)
41 |
42 | #define CALIBIMGNUM 14
43 |
44 | #define PATHCALIB 0
45 | #define PATHSCAN 1
46 | #define PATHRECON 2
47 |
48 | namespace Ui {
49 | class MainWindow;
50 | }
51 |
52 | class MainWindow : public QMainWindow
53 | {
54 | Q_OBJECT
55 |
56 | public:
57 | MainWindow(QWidget *parent = 0);
58 | ~MainWindow();
59 | FocusAssistant *fa;
60 | Set *setDialog;
61 | DotMatch *dm;
62 | GLWidget *displayModel;
63 |
64 | QString projectPath;
65 | QString projChildPath;
66 |
67 | int screenWidth;//主屏幕几何尺寸
68 | int screenHeight;
69 | int projectorWidth;//投影屏幕几何尺寸
70 | int projectorHeight;
71 | int scanWidth;//扫描区域尺寸
72 | int scanHeight;
73 | int cameraWidth;//相机分辨率
74 | int cameraHeight;
75 |
76 | int scanSN;//表示当前正在进行的扫描序列数,从0开始
77 |
78 | private:
79 | Ui::MainWindow *ui;
80 | CameraCalibration *calibrator;
81 | BlobDetector *blob;
82 | GrayCodes *grayCode;
83 | MultiFrequency *mf;
84 | Projector *pj;
85 | Reconstruct *reconstructor;
86 |
87 | DaHengCamera *DHC;
88 | BaslerCamera *BC;
89 | bool usebc;//是否使用Basler相机
90 | bool showFocus;//是否显示对焦辅助窗口
91 |
92 | ///表示编码及解码重建方法,依次为:经典横竖条纹格雷编码,竖条纹格雷码+极线校正,多频外差条纹+极线校正
93 | enum codePattern{GRAY_ONLY, GRAY_EPI, MULTIFREQ_EPI};
94 | codePattern codePatternUsed;
95 |
96 | void createConnections();
97 | void createCentralWindow(QWidget *parent);
98 | void captureImage(QString pref, int saveCount, bool dispaly);
99 | void findPoint();
100 | void paintPoints();
101 | void getScreenGeometry();
102 | void closeCamera();
103 | void generatePath(int type);
104 |
105 | ///---------------辅助功能---------------///
106 |
107 | void progressPop(int up);
108 | void drawCross(QPainter &p, int x, int y);
109 |
110 | QLabel *msgLabel;//show message in the bottom of the window
111 |
112 | QTimer *timer;
113 | QImage image_1;
114 | QImage image_2;
115 | QPixmap pimage_1;//由图像指针得到的.png格式图像
116 | QPixmap pimage_2;
117 |
118 | bool isProjectorOpened;
119 | bool isConfigured;
120 | int saveCount;//count the photo captured.
121 |
122 | QString path_1;
123 | QString path_2;
124 |
125 | ///与set对话框有关的变量
126 | int black_ ;
127 | int white_;
128 | bool isAutoContrast;
129 | bool isRaySampling;
130 | bool isExportObj;
131 | bool isExportPly;
132 |
133 | private slots:
134 | void newproject();
135 | void openproject();
136 |
137 | void opencamera();
138 | void startfocusassistant();
139 | void closefocus();
140 | void setexposure();
141 | void readframe();
142 |
143 | void usebasler();
144 |
145 | void selectPath(int PATH);
146 |
147 | void capturecalib();
148 | void redocapture();
149 | void projectorcontrol();
150 |
151 | void calib();
152 | void calibration();
153 |
154 | void scan();
155 | void pointmatch();
156 | void refindmatch();
157 | void showhidemanual();
158 | void finishmanualmatch();
159 | void startscan();
160 | void testmulitfreq();
161 | void test();
162 |
163 | void reconstruct();
164 | void startreconstruct();
165 |
166 | void set();
167 | void getSetInfo();
168 |
169 | void changePointSize(int psize);
170 | void loadTestModel();
171 | void switchlanguage();
172 |
173 | };
174 |
175 | #endif // MAINWINDOW_H
176 |
--------------------------------------------------------------------------------
/Duke/manualmatch.cpp:
--------------------------------------------------------------------------------
1 | #include "manualmatch.h"
2 | #include "ui_manualmatch.h"
3 |
4 | #include
5 | #include
6 | #include
7 |
8 |
9 | QFont textfont("Calibri",50);
10 | QColor gcolor(0,255,0);
11 | QColor rcolor(255,0,0);
12 |
13 | ManualMatch::ManualMatch(QWidget *parent) :
14 | QWidget(parent),
15 | ui(new Ui::ManualMatch)
16 | {
17 | ui->setupUi(this);
18 |
19 | connect(ui->confirmButton,SIGNAL(clicked()),this,SLOT(confirmID()));
20 | connect(ui->finishButton,SIGNAL(clicked()),this,SLOT(finish()));
21 | connect(ui->cancelButton,SIGNAL(clicked()),this,SLOT(hide()));
22 | connect(ui->resetButton,SIGNAL(clicked()),this,SLOT(reset()));
23 | //connect(ui->idEdit,SIGNAL(textEdited(QString)),ui->confirmButton,SLOT(setEnabled(bool)));
24 |
25 | onMark = 0;
26 | }
27 |
28 | ManualMatch::~ManualMatch()
29 | {
30 | delete ui;
31 | }
32 |
33 | void ManualMatch::setImage()
34 | {
35 | QImage pimage_1 = QImage(leftImage.data,leftImage.cols,leftImage.rows,QImage::Format_Indexed8);
36 | QImage pimage_2 = QImage(rightImage.data,rightImage.cols,rightImage.rows,QImage::Format_Indexed8);
37 | QPixmap pcopy_1 = QPixmap::fromImage(pimage_1);
38 | QPixmap pcopy_2 = QPixmap::fromImage(pimage_2);
39 | QPainter pt_1(&pcopy_1);
40 | QPainter pt_2(&pcopy_2);
41 | pt_1.setFont(textfont);
42 | pt_2.setFont(textfont);
43 |
44 | for(size_t i = 0;i < dotInOrder.size();i++)
45 | {
46 | pt_1.setPen(gcolor);
47 | pt_2.setPen(gcolor);
48 |
49 | drawCross(pt_1, dotInOrder[i][0].x ,dotInOrder[i][0].y);
50 | drawCross(pt_2, dotInOrder[i][1].x, dotInOrder[i][1].y);
51 |
52 | int ID;
53 | if (refinedCorr.size()){//根据refinedCorr中的数据(如果有)显示i点ID
54 | for (size_t r = 0; r < refinedCorr.size(); r++){
55 | if (i == refinedCorr[r].y)
56 | ID = refinedCorr[r].x;
57 | }
58 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,QString::number(ID));
59 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,QString::number(ID));
60 | }
61 | else{//若refinedPoint还未被赋予空间,根据correspond中的数据显示i点ID
62 | bool idexist = false;
63 | for (size_t c = 0; c < correspond.size(); c++){
64 | if (i == correspond[c].y){
65 | ID = correspond[c].x;
66 | idexist = true;//表示ID点的对应点存在,只关系到ID的显示状态
67 | }
68 | }
69 | if (idexist){
70 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,QString::number(ID));
71 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,QString::number(ID));
72 | }
73 | else{
74 | pt_1.drawText(dotInOrder[i][0].x,dotInOrder[i][0].y,"?");
75 | pt_2.drawText(dotInOrder[i][1].x,dotInOrder[i][1].y,"?");
76 | }
77 | }
78 | }
79 |
80 | ///用红色方框标记当前准备赋予编号的点
81 | pt_1.setPen(rcolor);
82 | pt_2.setPen(rcolor);
83 | pt_1.drawRect(dotInOrder[onMark][0].x-15,dotInOrder[onMark][0].y-15,30,30);
84 | pt_2.drawRect(dotInOrder[onMark][1].x-15,dotInOrder[onMark][1].y-15,30,30);
85 |
86 | ui->leftImage->setPixmap(pcopy_1);
87 | ui->rightImage->setPixmap(pcopy_2);
88 |
89 | int ID;
90 | if (!refinedCorr.empty()){
91 | if (refinedCorr[onMark].x >= 0){//如果onMark点已经被标记过,则在idEdit中显示ID值
92 | ID = refinedCorr[onMark].x;
93 | ui->idEdit->setText(QString::number(ID));
94 | }
95 | else
96 | ui->idEdit->clear();
97 | }
98 | }
99 |
100 | void ManualMatch::confirmID()
101 | {
102 | if (refinedCorr.size() == 0)
103 | refinedCorr.resize(dotInOrder.size(),cv::Point2i(-1,-1));
104 |
105 | int id = ui->idEdit->text().toInt();
106 | cv::Point2i corr;
107 | corr.x = id;
108 | corr.y = onMark;
109 | refinedCorr.at(onMark) = corr;
110 |
111 | if (onMark == dotInOrder.size()-1)
112 | onMark = 0;
113 | else
114 | onMark++;
115 | ui->current->setText(QString::number(onMark));
116 |
117 | setImage();//根据新的信息重绘图像
118 | }
119 |
120 | void ManualMatch::finish()
121 | {
122 | if (refinedCorr.size() == 0){
123 | for (size_t i = 0;i < correspond.size(); i++){
124 | refinedCorr.push_back(correspond[i]);
125 | }
126 | }
127 | else{
128 | for (size_t i = 0;i < refinedCorr.size(); i++){
129 | if (!(refinedCorr[i].x >= 0))
130 | QMessageBox::warning(NULL,"Manual Match",tr("Point ") + QString::number(i) + tr("hasn't been marked."));
131 | }
132 | }
133 | this->hide();
134 | emit outputdata();
135 | }
136 |
137 | void ManualMatch::reset()
138 | {
139 | refinedCorr.clear();
140 | onMark = 0;
141 | setImage();
142 | }
143 |
144 | void ManualMatch::keyPressEvent(QKeyEvent *e)
145 | {
146 | if (e->key() == Qt::Key_Enter)
147 | confirmID();
148 | }
149 |
150 | void ManualMatch::drawCross(QPainter &p, int x, int y)
151 | {
152 | int len = 25;
153 | p.drawLine(x - len, y, x + len, y);
154 | p.drawLine(x, y - len, x, y + len);
155 | }
156 |
157 |
158 |
159 |
--------------------------------------------------------------------------------
/Duke/manualmatch.h:
--------------------------------------------------------------------------------
1 | #ifndef MANUALMATCH_H
2 | #define MANUALMATCH_H
3 |
4 | #include
5 | #include
6 |
7 | // OpenCV
8 | #include
9 | #include
10 |
11 | namespace Ui {
12 | class ManualMatch;
13 | }
14 |
15 | class ManualMatch : public QWidget
16 | {
17 | Q_OBJECT
18 |
19 | public:
20 | explicit ManualMatch(QWidget *parent = 0);
21 | ~ManualMatch();
22 |
23 | cv::Mat leftImage;
24 | cv::Mat rightImage;
25 | cv::vector correspond;
26 | cv::vector refinedCorr;//x表示点ID,y值表示序号
27 | cv::vector> dotInOrder;
28 |
29 | void setImage();
30 |
31 | private:
32 | Ui::ManualMatch *ui;
33 |
34 | void drawCross(QPainter &p, int x, int y);
35 |
36 | size_t onMark;//表示当前光标停留的待标记点
37 |
38 | protected:
39 | void keyPressEvent(QKeyEvent *e);
40 |
41 | private slots:
42 | void confirmID();
43 | void finish();//点击完成标记按钮触发的动作
44 | void reset();//点击重置标记按钮触发的动作
45 | signals:
46 | void outputdata();//由finish按钮所发出的信号,作用是通知dotmatch对refinedCorr进行处理,使用onfinishmanual()槽
47 | };
48 |
49 | #endif // MANUALMATCH_H
50 |
--------------------------------------------------------------------------------
/Duke/manualmatch.ui:
--------------------------------------------------------------------------------
1 |
2 |
3 | ManualMatch
4 |
5 |
6 |
7 | 0
8 | 0
9 | 815
10 | 575
11 |
12 |
13 |
14 |
15 | 0
16 | 0
17 |
18 |
19 |
20 | Manual Match Assistant
21 |
22 |
23 |
24 | :/splash.png:/splash.png
25 |
26 |
27 | true
28 |
29 |
30 |
31 | 6
32 |
33 |
34 | 10
35 |
36 |
37 | 6
38 |
39 |
40 | 10
41 |
42 |
43 | 10
44 |
45 |
46 | 15
47 |
48 | -
49 |
50 |
51 | 20
52 |
53 |
-
54 |
55 |
56 |
57 | 0
58 | 0
59 |
60 |
61 |
62 |
63 | 640
64 | 512
65 |
66 |
67 |
68 | QFrame::Box
69 |
70 |
71 | Left Image
72 |
73 |
74 | true
75 |
76 |
77 |
78 | -
79 |
80 |
81 |
82 | 0
83 | 0
84 |
85 |
86 |
87 |
88 | 640
89 | 512
90 |
91 |
92 |
93 | QFrame::Box
94 |
95 |
96 | Right Image
97 |
98 |
99 | true
100 |
101 |
102 |
103 |
104 |
105 | -
106 |
107 |
108 | 0
109 |
110 |
111 | 100
112 |
113 |
114 | 100
115 |
116 |
-
117 |
118 |
119 | 10
120 |
121 |
-
122 |
123 |
124 |
125 | 0
126 | 0
127 |
128 |
129 |
130 | <html><head/><body><p>Current Point Num:</p></body></html>
131 |
132 |
133 |
134 | -
135 |
136 |
137 |
138 | 0
139 | 0
140 |
141 |
142 |
143 | 0
144 |
145 |
146 |
147 | -
148 |
149 |
150 |
151 | 0
152 | 0
153 |
154 |
155 |
156 | Set ID
157 |
158 |
159 |
160 | -
161 |
162 |
163 |
164 | 0
165 | 0
166 |
167 |
168 |
169 |
170 | 60
171 | 16777215
172 |
173 |
174 |
175 | false
176 |
177 |
178 |
179 | -
180 |
181 |
182 | Confirm
183 |
184 |
185 |
186 |
187 |
188 | -
189 |
190 |
191 | Qt::Horizontal
192 |
193 |
194 |
195 | 40
196 | 20
197 |
198 |
199 |
200 |
201 | -
202 |
203 |
204 | 10
205 |
206 |
-
207 |
208 |
209 | Finish
210 |
211 |
212 |
213 | -
214 |
215 |
216 | Reset
217 |
218 |
219 |
220 | -
221 |
222 |
223 | Cancel
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
--------------------------------------------------------------------------------
/Duke/meshcreator.cpp:
--------------------------------------------------------------------------------
1 | #include "meshcreator.h"
2 |
3 | MeshCreator::MeshCreator(PointCloudImage *in)
4 | {
5 | cloud = in;
6 | w = cloud->getWidth();
7 | h = cloud->getHeight();
8 | pixelNum = new int[w*h];
9 | }
10 |
11 | MeshCreator::~MeshCreator(void)
12 | {
13 | delete pixelNum;
14 | }
15 |
16 | void MeshCreator::exportObjMesh(QString path)
17 | {
18 | int count = 1;
19 | bool return_val;
20 | cv::Point3f point;
21 | std::ofstream out1;
22 | std::string cstr = path.toStdString();
23 | out1.open(cstr);
24 |
25 | for(int i = 0; i getPoint(i, j, point);
30 | if(return_val)
31 | {
32 | pixelNum[access(i, j)]=count;
33 | out1<<"v "<< point.x<< " "<< point.y<< " "< 0&&i < w - 1)
61 | v3 = pixelNum[access(i+1,j-1)];
62 | else
63 | v3=0;
64 |
65 | if(v1!=0 && v2!=0 && v3!=0)
66 | out1<<"f "<< v1<<"/"<getPoint(i, j, point))
87 | {
88 | pixelNum[access(i, j)] = vertexCount;
89 | vertexCount++;
90 | }
91 | else
92 | pixelNum[access(i, j)]=0;
93 | }
94 | }
95 | int facesCount = 0;//find faces num
96 | for(int i=0; i 0) && (i < w-1))
115 | v3=pixelNum[access(i+1,j-1)];
116 | else
117 | v3=0;
118 |
119 | if(v1!=0 && v2!=0 && v3!=0)
120 | facesCount++;
121 | }
122 | }
123 |
124 | //ply headers
125 | out1<<"ply\n";
126 | out1<<"format ascii 1.0\n";
127 | out1<<"element vertex " << vertexCount << "\n";
128 | out1<<"property float x\n";
129 | out1<<"property float y\n";
130 | out1<<"property float z\n";
131 | out1<<"property uchar red\n";
132 | out1<<"property uchar green\n";
133 | out1<<"property uchar blue\n";
134 | out1<<"element face " << facesCount << "\n";
135 | out1<<"property list uchar int vertex_indices\n";
136 | out1<<"end_header\n";
137 |
138 | for(int i=0; igetPoint(i, j, point);
143 | if(return_val)
144 | {
145 | out1<< point.x << " " << point.y << " " << point.z << " "<< 128 << " " << 128 << " " << 128 << "\n";
146 | //这里去掉了表示颜色的项,统一赋值为128
147 | //out1<< point.x << " " << point.y << " " << point.z << " "<< (int) color[2] << " " << (int) color[1] << " " << (int) color[0] << "\n";
148 | }
149 | else
150 | pixelNum[access(i, j)]=0;
151 | }
152 | }
153 |
154 | for(int i = 0; i < w;i++)
155 | {
156 | for(int j = 0; j < h; j++)
157 | {
158 | int v1 = pixelNum[access(i, j)], v2, v3;
159 |
160 | if(i < w - 1)
161 | v2 = pixelNum[access(i + 1, j)];
162 | else
163 | v2 = 0;
164 |
165 | if(j 0 && i< w - 1)
174 | v3 = pixelNum[access(i + 1, j - 1)];
175 | else
176 | v3 = 0;
177 |
178 | if(v1!=0 && v2!=0 && v3!=0)
179 | out1 << "3 " << v1 << " " << v3 << " " << v2 << "\n";
180 | }
181 | }
182 | out1.close();
183 | }
184 |
185 | int MeshCreator::access(int i, int j)
186 | {
187 | return i * h + j;
188 | }
189 |
190 |
--------------------------------------------------------------------------------
/Duke/meshcreator.h:
--------------------------------------------------------------------------------
1 | #ifndef MESHCREATOR_H
2 | #define MESHCREATOR_H
3 |
4 | #include "pointcloudimage.h"
5 | #include
6 |
7 | class MeshCreator
8 | {
9 | public:
10 | MeshCreator(PointCloudImage *in);
11 | ~MeshCreator(void);
12 | void exportObjMesh(QString path);
13 | void exportPlyMesh(QString path);
14 | private:
15 | int *pixelNum;
16 | PointCloudImage *cloud;
17 | int MeshCreator::access(int i,int j);
18 | int MeshCreator::access(int i,int j, int z);
19 |
20 | int w;
21 | int h;
22 | };
23 |
24 | #endif // MESHCREATOR_H
25 |
--------------------------------------------------------------------------------
/Duke/multifrequency.cpp:
--------------------------------------------------------------------------------
1 | #include "multifrequency.h"
2 |
3 | int frequency[3] = {70,64,59};
4 | int testCount = 0;//用来做多频外插投影的测试
5 |
6 | MultiFrequency::MultiFrequency(QObject *parent, int projwidth, int projheight) :
7 | QObject(parent)
8 | {
9 | projW = projwidth;
10 | projH = projheight;
11 | imgsLoaded = false;
12 | }
13 |
14 | void MultiFrequency::generateMutiFreq()
15 | {
16 | for (size_t i = 0; i < MULTI_MAX_NUM; i++)
17 | {
18 | MultiFreqImages[i] = cv::Mat(projH, projW, CV_32F);
19 | }
20 | for (size_t f = 0; f < 3; f++)
21 | {
22 | for (size_t phi = 0; phi < 4; phi++)
23 | {
24 | cv::Mat temp(projH,projW,CV_32F);
25 | for (size_t w = 0; w < projW; w++)
26 | {
27 | for (size_t h = 0; h < projH; h++)
28 | {
29 | temp.at(h,w) = 0.502+0.498*sin(float(PI*2*w*frequency[f]/projW+PI*phi/2));
30 | }
31 | }
32 | MultiFreqImages[4*f+phi] = temp;
33 | }
34 | }
35 | }
36 |
37 | cv::Mat MultiFrequency::getNextMultiFreq()
38 | {
39 | testCount++;
40 | if (testCount == 13)
41 | testCount = 1;
42 | return MultiFreqImages[testCount-1];
43 | }
44 |
45 | int MultiFrequency::getNumOfImgs()
46 | {
47 | return 12;
48 | }
49 |
--------------------------------------------------------------------------------
/Duke/multifrequency.h:
--------------------------------------------------------------------------------
1 | #ifndef MULTIFREQUENCY_H
2 | #define MULTIFREQUENCY_H
3 |
4 | #define MULTI_MAX_NUM 12
5 | #define PI 3.1416
6 |
7 | #include
8 | #include
9 | #include
10 |
11 | class MultiFrequency : public QObject
12 | {
13 | Q_OBJECT
14 | public:
15 | MultiFrequency(QObject *parent = 0, int projwidth = 1280, int projheight = 1024);
16 | void generateMutiFreq();
17 | int getNumOfImgs();
18 | cv::Mat getNextMultiFreq();
19 |
20 | private:
21 | bool imgsLoaded;
22 | int projW;
23 | int projH;
24 | cv::Mat MultiFreqImages[MULTI_MAX_NUM];
25 | };
26 |
27 | #endif // MULTIFREQUENCY_H
28 |
--------------------------------------------------------------------------------
/Duke/plyloader.cpp:
--------------------------------------------------------------------------------
1 | #include "plyloader.h"
2 | #include
3 |
4 | PlyLoader::PlyLoader(QObject *parent) :
5 | QObject(parent)
6 | {
7 | }
8 |
9 | bool PlyLoader::LoadModel(QString filename)
10 | {
11 | if (filename != NULL)
12 | {
13 | /*
14 | QFile file(filename);
15 | if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
16 | return -1;
17 | */
18 | string name = filename.toStdString();
19 | FILE *file = NULL;
20 | file = fopen(name.data(),"r");
21 | fseek(file, 0, SEEK_END);//获取文件全部数据
22 | mp_vertexXYZ = (float*)malloc(10000000);//long int ftell (FILE *stream); Returns the current value of the position indicator of the stream.
23 | fseek(file, 0, SEEK_SET);//操作符指向文件流开头
24 |
25 | if (file)
26 | {
27 | int i = 0;
28 | char buffer[3000];
29 | fgets(buffer, 300, file);//char *fgets (char *str, int num, FILE *stream); Get string from stream
30 | // READ HEADER
31 | // Find number of vertexes
32 | while (strncmp("element vertex", buffer,strlen("element vertex")) != 0 )//int strncmp (const char *str1, const char *str2, size_t num); Compare characters of two strings
33 | {
34 | fgets(buffer, 300, file); //如果一直没有找到element vertex字符串,就一直从文件流中取出300个元素,直到找到为止
35 | }
36 | strcpy(buffer, buffer + strlen("element vertex"));//char *strcpy ( char *destination, const char *source ); Copy string
37 | sscanf(buffer, "%i", &this->m_totalConnectedPoints);//int sscanf ( const char * s, const char * format, ...); Read formatted data from string
38 | // go to end_header
39 | while (strncmp( "end_header", buffer,strlen("end_header")) != 0 )
40 | {
41 | fgets(buffer, 600, file);
42 | }
43 |
44 | // read vertices
45 | i =0;
46 | for (int iterator = 0; iterator < this->m_totalConnectedPoints; iterator++)
47 | {
48 | fgets(buffer, 600, file);
49 | sscanf(buffer,"%f %f %f", &mp_vertexXYZ[i], &mp_vertexXYZ[i+1], &mp_vertexXYZ[i+2]);
50 | i += 3;
51 | }
52 | fclose(file);
53 | }
54 | return true;
55 | }
56 | else
57 | return false;
58 | }
59 |
--------------------------------------------------------------------------------
/Duke/plyloader.h:
--------------------------------------------------------------------------------
1 | #ifndef PLYLOADER_H
2 | #define PLYLOADER_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | using namespace std;
11 |
12 | class PlyLoader : public QObject
13 | {
14 | Q_OBJECT
15 | public:
16 | explicit PlyLoader(QObject *parent = 0);
17 | bool LoadModel(QString filename);
18 | float* mp_vertexXYZ;
19 | int m_totalConnectedPoints;
20 | };
21 |
22 | #endif // PLYLOADER_H
23 |
--------------------------------------------------------------------------------
/Duke/pointcloudimage.cpp:
--------------------------------------------------------------------------------
1 | #include "pointcloudimage.h"
2 |
3 | PointCloudImage::PointCloudImage(int imageW,int imageH, bool colorFlag)
4 | {
5 | w = imageW;
6 | h = imageH;
7 | points = cv::Mat(h, w, CV_32FC3);
8 | if(colorFlag == true)
9 | {
10 | color = cv::Mat(h, w, CV_32FC3,cv::Scalar(0));
11 | }
12 | else
13 | color = NULL;
14 | numOfPointsForPixel = cv::Mat(h, w, CV_8U, cv::Scalar(0));
15 | }
16 |
17 | PointCloudImage::~PointCloudImage(void)
18 | {
19 | }
20 |
21 | bool PointCloudImage::setPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3f colorBGR)
22 | {
23 | if(i_w>w || j_h>h)
24 | return false;
25 | setPoint(i_w,j_h,point);
26 | //Utilities::matSet3D(color,i_w,j_h,colorBGR);//这里暂时注释掉
27 | return true;
28 | }
29 |
30 | bool PointCloudImage::setPoint(int i_w, int j_h, cv::Point3f point)
31 | {
32 | if(i_w>w || j_h>h)
33 | return false;
34 |
35 | Utilities::matSet3D(points, i_w, j_h, (cv::Vec3f)point);
36 | Utilities::matSet2D(numOfPointsForPixel, j_h, i_w, 1);
37 |
38 | return true;
39 | }
40 |
41 | bool PointCloudImage::getPoint(int i_w, int j_h, cv::Point3f &pointOut, cv::Vec3f &colorOut)
42 | {
43 | if(i_w>w || j_h>h)
44 | return false;
45 | uchar num = numOfPointsForPixel.at(j_h,i_w);
46 | if(num > 0)
47 | {
48 | pointOut = (cv::Point3f) (Utilities::matGet3D(points,i_w,j_h) / (float) num);
49 | if(!color.empty())
50 | {
51 | colorOut = (cv::Point3f) (Utilities::matGet3D(color,i_w,j_h) / (float) num);
52 | }
53 | else
54 | {
55 | return false;
56 | }
57 | return true;
58 | }
59 | else
60 | {
61 | return false;
62 | }
63 | }
64 |
65 | bool PointCloudImage::getPoint(int i_w, int j_h, cv::Point3f &pointOut)
66 | {
67 | if(i_w>w || j_h>h)
68 | return false;
69 | uchar num = numOfPointsForPixel.at(j_h,i_w);
70 | if(num > 0)
71 | {
72 | pointOut = (cv::Point3f) (Utilities::matGet3D(points, i_w, j_h) / (float) num);
73 | return true;
74 | }
75 | else
76 | {
77 | return false;
78 | }
79 | }
80 |
81 | bool PointCloudImage::addPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3f colorBGR)
82 | {
83 | if(i_w > w || j_h > h)
84 | return false;
85 | uchar num = numOfPointsForPixel.at(j_h, i_w);
86 | if(num == 0)
87 | return setPoint(i_w, j_h, point, colorBGR);
88 | addPoint(i_w, j_h, point);
89 | if(!color.empty())
90 | {
91 | cv::Vec3f c = Utilities::matGet3D(color, i_w, j_h);
92 | Utilities::matSet3D(color, i_w, j_h, colorBGR + c);
93 | }
94 | else
95 | {
96 | return false;
97 | }
98 | return true;
99 | }
100 |
101 | bool PointCloudImage::addPoint(int i_w, int j_h, cv::Point3f point)
102 | {
103 | if(i_w>w || j_h>h)
104 | return false;
105 | uchar num = numOfPointsForPixel.at(j_h,i_w);
106 | if(num == 0)
107 | return setPoint(i_w,j_h,point);
108 | cv::Point3f p = Utilities::matGet3D(points,i_w,j_h);
109 | Utilities::matSet3D(points,i_w,j_h,(cv::Vec3f)(point + p));
110 | numOfPointsForPixel.at(j_h,i_w) = num + 1;
111 | return true;
112 | }
113 |
114 | void PointCloudImage::exportXYZ(char path[], bool exportOffPixels, bool colorFlag)
115 | {
116 | std::ofstream out;
117 | out.open(path);
118 | int load;
119 | cv::Point3f p;
120 | cv::Vec3f c;
121 | for(int i = 0; i(j,i);
126 | if(!exportOffPixels && num == 0)
127 | continue;
128 | getPoint(i,j,p,c);
129 | if(exportOffPixels && num == 0)
130 | {
131 | p = cv::Point3f(0,0,0);
132 | c = cv::Point3f(0,0,0);
133 | }
134 | out<(j,i);
158 | if(num > max)
159 | {
160 | max = num;
161 | maxX=i;
162 | maxY=j;
163 | }
164 | }
165 | }
166 |
167 | for(int i=0; i(j,i);
172 | Utilities::matSet2D(projToCamRays, j, i, num/(float)(max * 255.0));
173 | }
174 | }
175 | cv::imwrite("reconstruction/projToCamRays.png",projToCamRays);
176 | std::ofstream out1;
177 | std::stringstream txt;
178 | txt<
5 | #include
6 | #include "utilities.h"
7 |
8 | class PointCloudImage
9 | {
10 | public:
11 | PointCloudImage(int imageW,int imageH, bool color);
12 | ~PointCloudImage(void);
13 |
14 | bool setPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3f colorBGR);
15 | bool setPoint(int i_w, int j_h, cv::Point3f point);
16 |
17 | bool getPoint(int i_w, int j_h, cv::Point3f &pointOut);
18 | bool getPoint(int i_w, int j_h, cv::Point3f &pointOut, cv::Vec3f &colorOut);
19 |
20 | bool addPoint(int i_w, int j_h, cv::Point3f point, cv::Vec3f color);
21 | bool addPoint(int i_w, int j_h, cv::Point3f point);
22 |
23 | void exportNumOfPointsPerPixelImg(char path[]);
24 | void exportXYZ(char *path,bool exportOffPixels=true, bool colorFlag=true);
25 |
26 | int getWidth();
27 | int getHeight();
28 |
29 | private:
30 | int w;
31 | int h;
32 | cv::Mat points;
33 | cv::Mat numOfPointsForPixel;
34 | cv::Mat color;
35 | };
36 |
37 | #endif // POINTCLOUDIMAGE_H
38 |
--------------------------------------------------------------------------------
/Duke/projector.cpp:
--------------------------------------------------------------------------------
1 | #include "projector.h"
2 | #include
3 | #include
4 | #include
5 |
6 | int proj_w;
7 | int proj_h;
8 |
9 | Projector::Projector(QWidget *parent, int scanW, int scanH, int projW, int projH, int xos, int yos)
10 | : QWidget(parent)
11 | {
12 | width = scanW;
13 | height = scanH;
14 | proj_w = projW;
15 | proj_h = projH;
16 | xoffset = xos;
17 | yoffset = yos;
18 | crossVisible = true;
19 | }
20 |
21 | Projector::~Projector()
22 | {
23 | }
24 |
25 | void Projector::paintEvent(QPaintEvent *event)
26 | {
27 | QPainter painter(this);
28 | if(crossVisible)//QEvent::User 1000
29 | {
30 | painter.setPen(QPen(Qt::yellow, 5));
31 | //else
32 | //painter.setPen(QPen(Qt::white, 5));
33 | painter.drawLine(proj_w/2 - 60, proj_h/2, proj_w/2 + 60, proj_h/2);
34 | painter.drawLine(proj_w/2, proj_h/2 - 60, proj_w/2, proj_h/2 + 60);
35 | }
36 | }
37 |
38 | void Projector::opencvWindow()
39 | {
40 | cvNamedWindow("Projector Window",CV_WINDOW_AUTOSIZE|CV_WINDOW_KEEPRATIO|CV_GUI_NORMAL);
41 | cvResizeWindow("Projector Window",width,height);
42 | cvMoveWindow("Projector Window", xoffset, yoffset);
43 | }
44 |
45 | void Projector::showMatImg(cv::Mat img)
46 | {
47 | cv::imshow("Projector Window", img);
48 | }
49 |
50 | void Projector::destoryWindow()
51 | {
52 | cvDestroyWindow("Projector Window");
53 | }
54 |
55 | void Projector::displaySwitch(bool isWhite)
56 | {
57 | if(isWhite)
58 | this->setPalette(Qt::white);
59 | else
60 | this->setPalette(Qt::black);
61 | }
62 |
63 | void Projector::setCrossVisable(bool flag)
64 | {
65 | crossVisible = flag;
66 | this->update();
67 | }
68 |
--------------------------------------------------------------------------------
/Duke/projector.h:
--------------------------------------------------------------------------------
1 | #ifndef PROJECTOR_H
2 | #define PROJECTOR_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 |
12 | #include
13 |
14 | class Projector : public QWidget
15 | {
16 | public:
17 | Projector(QWidget *parent, int scanW, int scanH, int projW, int projH, int xos, int yos);
18 | ~Projector();
19 | void showMatImg(cv::Mat img);
20 |
21 | void displaySwitch(bool isWhite);
22 | void opencvWindow();
23 | void destoryWindow();//delete the projector window created by cv after showImg
24 |
25 | void setCrossVisable(bool flag);
26 | void paintEvent(QPaintEvent *event);
27 |
28 | private:
29 | bool crossVisible;
30 | int xoffset;
31 | int yoffset;
32 | int height;
33 | int width;
34 | };
35 |
36 | #endif // PROJECTOR_H
37 |
--------------------------------------------------------------------------------
/Duke/reconstruct.cpp:
--------------------------------------------------------------------------------
1 | #include "reconstruct.h"
2 | #include
3 |
4 | bool processleft = true;
5 |
6 | Reconstruct::Reconstruct(bool useEpi)
7 | {
8 | numOfCams = 2;
9 | mask = NULL;
10 | decRows = NULL;
11 | decCols = NULL;
12 | points3DProjView = NULL;
13 | autoContrast_ = false;
14 | cameras = new VirtualCamera[2];//生成virtualcamera的两个实例,保存在数组cameras[2]
15 | camsPixels = new cv::vector*[2];
16 | camsPixels_GE = new cv::vector*[2];
17 | calibFolder = new QString[2];
18 | scanFolder = new QString[2];
19 | imgPrefix = new QString[2];
20 | pathSet = false;
21 | imgSuffix = ".png";//这里暂时认为图片后缀为.png
22 | EPI = useEpi;
23 | }
24 | Reconstruct::~Reconstruct()
25 | {
26 | unloadCamImgs();
27 | if (points3DProjView)
28 | delete points3DProjView ;
29 | if (EPI)
30 | delete sr;
31 | }
32 |
33 | void Reconstruct::enableRaySampling()
34 | {
35 | raySampling_ = true;
36 | }
37 |
38 | void Reconstruct::disableRaySampling()
39 | {
40 | raySampling_ = false;
41 | }
42 |
43 | void Reconstruct::setBlackThreshold(int val)
44 | {
45 | blackThreshold = val;
46 | }
47 |
48 | void Reconstruct::setWhiteThreshold(int val)
49 | {
50 | whiteThreshold = val;
51 | }
52 |
53 | ///
54 | /// \brief Reconstruct::decodePaterns 由runReconstruction内部调用,用来求解图像点格雷码对应的投影区域十进制坐标
55 | ///
56 | void Reconstruct::decodePaterns()
57 | {
58 | int w = cameraWidth;
59 | int h = cameraHeight;
60 | cv::Point projPixel;//这个变量储存了相片上(w,h)点在投影区域上的坐标projPixel.x,projPixel.y
61 | for(int col = 0; col < w; col++){
62 | for(int row = 0; row < h; row++){
63 | ///mask是根据相机拍摄的图片生成的,因此其大小就是w*h
64 | if(mask.at(row, col)){//if the pixel is not shadow reconstruct
65 | bool error = getProjPixel(row, col, projPixel);//get the projector pixel for camera (i,j) pixel
66 | if(error){
67 | mask.at(row, col) = 0;//进一步补充遮罩区域,相机视野内不属于投影区域的部分都被过滤掉
68 | continue;
69 | }
70 | camPixels[ac(projPixel.x, projPixel.y)].push_back(cv::Point(col, row));
71 | }
72 | }
73 | }
74 | }
75 |
76 | ///
77 | /// \brief Reconstruct::decodePatterns_GE 用于格雷码+极线校正解码,由runReconstruction_GE内部调用
78 | ///
79 | void Reconstruct::decodePatterns_GE()
80 | {
81 | int w = cameraWidth;
82 | int h = cameraHeight;
83 | int xDec;//这个变量储存了相片上(w,h)点在投影区域上的坐标projPixel.x,projPixel.y
84 | for(int row = 0; row < h; row++){
85 | for(int col = 0; col < w; col++){
86 | ///mask是根据相机拍摄的图片生成的,因此其大小就是w*h
87 | if(mask.at(row, col)){//if the pixel is not shadow reconstruct
88 | bool error = getProjPixel_GE(row, col, xDec);
89 | if(error){
90 | mask.at(row, col) = 0;//进一步补充遮罩区域,相机视野内不属于投影区域的部分都被过滤掉
91 | continue;
92 | }
93 | camPixels_GE[(row*cameraWidth+col)].push_back(xDec);
94 | }
95 | }
96 | }
97 | }
98 |
99 | bool Reconstruct::loadCameras()//Load calibration data into camera[i]
100 | {
101 | bool loaded;
102 | for(int i = 0; i < 2; i++)//这里为了处理方便将numofcam直接替换为2
103 | {
104 | QString path;
105 | path = calibFolder[i];
106 | #ifndef USE_STEREOCALIB_DATA
107 | path += "cam_matrix.txt";
108 | loaded = cameras[i].loadCameraMatrix(path);//defined in visualcamera
109 | if(!loaded)
110 | break;
111 |
112 | path = calibFolder[i];
113 | path += "cam_distortion.txt";
114 | cameras[i].loadDistortion(path);//注意loaddistortion方法加载一个5X1矩阵
115 | #else
116 | path += "cam_stereo.txt";
117 | loaded = cameras[i].loadCameraMatrix(path);//defined in visualcamera
118 | if(!loaded)
119 | break;
120 | path = calibFolder[i];
121 | path += "distortion_stereo.txt";
122 | cameras[i].loadDistortion(path);
123 | #endif
124 | path = calibFolder[i];
125 | path += "cam_rotation_matrix.txt";
126 | cameras[i].loadRotationMatrix(path);
127 |
128 | path = calibFolder[i];
129 | path += "cam_trans_vectror.txt";
130 | cameras[i].loadTranslationVector(path);
131 |
132 | path = savePath_;
133 | path += "/calib/fundamental_stereo.txt";//测试表明,采用立体标定得到的F效果好于单独标定得到的
134 | cameras[i].loadFundamentalMatrix(path);
135 |
136 | path = savePath_;
137 | path += "/calib/H1_mat.txt";
138 | cameras[i].loadHomoMatrix(path, 1);
139 |
140 | path = savePath_;
141 | path += "/calib/H2_mat.txt";
142 | cameras[i].loadHomoMatrix(path, 2);
143 |
144 | cameras[i].height = 0;
145 | cameras[i].width = 0;
146 | }
147 | return loaded;
148 | }
149 |
150 | bool Reconstruct::loadCamImgs(QString folder, QString prefix, QString suffix)//load camera images
151 | {
152 | cv::Mat tmp;
153 | if(!camImgs.empty())
154 | unloadCamImgs();
155 |
156 | for(int i = 0; i < numberOfImgs; i++){
157 | QString path;
158 | path = folder;//这里folder要达到left/right一层
159 | path += prefix + QString::number(i) + suffix;
160 | tmp.release();
161 |
162 | tmp = cv::imread(path.toStdString(),0);//flag=0 Return a grayscale image
163 |
164 | if (EPI){
165 | if (processleft){//第一次调用loadImg时认为是加载左图像
166 | sr->doStereoRectify(tmp,true);
167 | //cv::imwrite(path.toStdString(),tmp);是否保存校正后的图像
168 | }
169 | else{
170 | sr->doStereoRectify(tmp,false);
171 | //cv::imwrite(path.toStdString(),tmp);
172 | }
173 | }
174 |
175 | if(tmp.empty()){
176 | QMessageBox::warning(NULL,"Warning","Images not found!");
177 | break;
178 | }
179 | else{
180 | if(autoContrast_){
181 | Utilities::autoContrast(tmp,tmp);
182 | }
183 | camImgs.push_back(tmp);
184 | }
185 |
186 | if(camera->width == 0){
187 | camera->height = camImgs[0].rows;
188 | camera->width = camImgs[0].cols;
189 | }
190 | }
191 | processleft = !processleft;//每调用一次加载图像都对是否处理左图像取反
192 | return !tmp.empty();
193 | }
194 |
195 |
196 | void Reconstruct::unloadCamImgs()//unload camera images
197 | {
198 | if(camImgs.size()){
199 | for(int i = 0; iwidth;
210 | int h = camera->height;
211 | mask.release();
212 | mask = cv::Mat(h, w, CV_8U,cv::Scalar(0));//注意h=行数rows,w=列数cols
213 | for(int col = 0; col < w; col++){
214 | for(int row = 0; row < h; row++){
215 | float blackVal, whiteVal;
216 | blackVal = (float) Utilities::matGet2D(camImgs[1], row, col);//camImgs[1]表示全黑图像
217 | whiteVal = (float) Utilities::matGet2D(camImgs[0], row, col);//camImgs[0]表示全白图像
218 | if(whiteVal - blackVal > blackThreshold)//同一像素点在全黑、全白投影下反差大于blackThreshold,说明该点不在阴影里
219 | Utilities::matSet2D(mask, row, col, 1);
220 | else
221 | Utilities::matSet2D(mask, row, col, 0);
222 | }
223 | }
224 | }
225 |
226 |
227 | bool Reconstruct::runReconstruction()
228 | {
229 | bool runSucess = false;
230 | GrayCodes grays(scan_w, scan_h, false);//scan_w scan_h get var getparameter
231 | numOfColBits = grays.getNumOfColBits();
232 | numOfRowBits = grays.getNumOfRowBits();
233 | numberOfImgs = grays.getNumOfImgs();
234 |
235 | for(int i = 0; i < numOfCams; i++){
236 | cameras[i].position = cv::Point3f(0,0,0);//findProjectorCenter();
237 | cam2WorldSpace(cameras[i],cameras[i].position);
238 | camera = &cameras[i];//将position属性已转化到世界坐标系的cameras[i]赋给camera
239 | //在此之前camera相当于一个temp,注意二者单复数有区别
240 | camsPixels[i] = new cv::vector[scan_h*scan_w];
241 | camPixels = camsPixels[i];
242 | runSucess = loadCamImgs(scanFolder[i], imgPrefix[i], imgSuffix);
243 | ///截至这一步,实例camera的position、width、height属性已被赋值,camera对应cameras[i]
244 |
245 | if(!runSucess)//如果加载图片失败,中断
246 | break;
247 | else{
248 | //colorImgs.push_back(cv::Mat());//这里暂时注释掉
249 | //colorImgs[i] = color;//在loadCamImgs中生成了color
250 | computeShadows();
251 | decodePaterns();
252 | unloadCamImgs();
253 | }
254 | }
255 | if(runSucess){
256 | points3DProjView = new PointCloudImage(scan_w, scan_h, false); //最后一个bool值代表是否上色,这里改为false
257 | triangulation(camsPixels[0],cameras[0],camsPixels[1],cameras[1]);
258 | }
259 | return runSucess;
260 | }
261 |
262 | ///
263 | /// \brief Reconstruct::runReconstruction_GE
264 | /// \return 重建是否成功
265 | ///
266 | bool Reconstruct::runReconstruction_GE()
267 | {
268 | bool runSucess = false;
269 | GrayCodes grays(scan_w, scan_h, true);//scan_w scan_h get var getparameter
270 | numOfColBits = grays.getNumOfColBits();
271 | numberOfImgs = grays.getNumOfImgs();
272 |
273 | for(int i = 0; i < numOfCams; i++){
274 | cameras[i].position = cv::Point3f(0,0,0);//findProjectorCenter();
275 | cam2WorldSpace(cameras[i],cameras[i].position);
276 | camsPixels_GE[i] = new cv::vector[cameraHeight * cameraWidth];//将每个相机图像中的每个像素在投影区域中的横坐标记录
277 |
278 | ///camera在loadCamImgs中进行了赋值
279 | camera = &cameras[i];
280 | runSucess = loadCamImgs(scanFolder[i], imgPrefix[i], imgSuffix);
281 | camPixels_GE = camsPixels_GE[i];
282 |
283 | ///截至这一步,实例camera的position、width、height属性已被赋值,camera对应cameras[i]
284 |
285 | if(!runSucess)//如果加载图片失败,中断
286 | break;
287 | else{
288 | computeShadows();
289 | decodePatterns_GE();//对camPixels_GE进行了赋值
290 | unloadCamImgs();
291 | }
292 | }
293 | if(runSucess){
294 | points3DProjView = new PointCloudImage(scan_w, scan_h, false); //最后一个bool值代表是否上色,这里改为false
295 | triangulation_ge(camsPixels_GE[0],cameras[0],camsPixels_GE[1],cameras[1]);
296 | }
297 | return runSucess;
298 | }
299 |
300 |
301 | void Reconstruct::cam2WorldSpace(VirtualCamera cam, cv::Point3f &p)//convert a point from camera to world space
302 | {
303 | cv::Mat tmp(3,1,CV_32F);
304 | cv::Mat tmpPoint(3,1,CV_32F);
305 | tmpPoint.at(0) = p.x;
306 | tmpPoint.at(1) = p.y;
307 | tmpPoint.at(2) = p.z;
308 | tmp = -cam.rotationMatrix.t() * cam.translationVector ;
309 | tmpPoint = cam.rotationMatrix.t() * tmpPoint;
310 | p.x = tmp.at(0) + tmpPoint.at(0);
311 | p.y = tmp.at(1) + tmpPoint.at(1);
312 | p.z = tmp.at(2) + tmpPoint.at(2);
313 | }
314 |
315 |
316 | bool Reconstruct::getProjPixel(int row, int col, cv::Point &p_out)//for a (x,y) pixel of the camera returns the corresponding projector pixel
317 | {
318 | cv::vector grayCol;
319 | cv::vector grayRow;
320 | bool error = false;
321 | int xDec, yDec;
322 |
323 | ///prosses column images
324 | for(int count = 0; count < numOfColBits; count++){
325 | ///get pixel intensity for regular pattern projection and it's inverse
326 | double val1, val2;
327 | val1 = Utilities::matGet2D(camImgs[count * 2 + 2], row, col);
328 | val2 = Utilities::matGet2D(camImgs[count * 2 + 2 +1], row, col);
329 | ///check if intensity deference is in a valid rage
330 | if(abs(val1 - val2) < whiteThreshold )
331 | error = true;
332 | ///determine if projection pixel is on or off
333 | if(val1 > val2)
334 | grayCol.push_back(1);
335 | else
336 | grayCol.push_back(0);
337 | }
338 | xDec = GrayCodes::grayToDec(grayCol);//由灰度序列grayCol求解其对应的十进制数xDec
339 | ///prosses row images
340 | for(int count=0; count < numOfRowBits; count++)
341 | {
342 | double val1, val2;
343 | val1 = Utilities::matGet2D(camImgs[count*2+2+numOfColBits*2], row, col);
344 | val2 = Utilities::matGet2D(camImgs[count*2+2+numOfColBits*2+1], row, col);
345 | if(abs(val1-val2) < whiteThreshold ) //check if the difference between the values of the normal and it's inverce projection image is valid
346 | error = true;
347 | if(val1 > val2)
348 | grayRow.push_back(1);
349 | else
350 | grayRow.push_back(0);
351 | }
352 | ///decode
353 | yDec = GrayCodes::grayToDec(grayRow);
354 |
355 | if((yDec > scan_h || xDec > scan_w)){
356 | error = true;//求出的xy坐标超出了投影范围,说明不是投影点,将其遮罩
357 | }
358 | p_out.x = xDec;//返回相机照片上像素点在投影仪投影范围内的对应十进制坐标
359 | p_out.y = yDec;
360 | return error;
361 | }
362 |
363 |
364 |
365 | ///
366 | /// \brief Reconstruct::getProjPixel_GE 格雷码+极线校正下的图像点身份确定算法
367 | /// \param row 图像点行号
368 | /// \param col 图像点列号
369 | /// \param xDec 图像点在投影区域内的横坐标
370 | /// \return 图像点能否被重建
371 | ///
372 | bool Reconstruct::getProjPixel_GE(int row, int col, int &xDec)
373 | {
374 | cv::vector grayCol;
375 | bool error = false;
376 |
377 | ///prosses column images
378 | for(int count = 0; count < numOfColBits; count++){
379 | ///get pixel intensity for regular pattern projection and it's inverse
380 | double val1, val2;
381 | val1 = Utilities::matGet2D(camImgs[count * 2 + 2], row, col);
382 | val2 = Utilities::matGet2D(camImgs[count * 2 + 2 +1], row, col);
383 | ///check if intensity deference is in a valid rage
384 | if(abs(val1 - val2) < whiteThreshold )
385 | error = true;
386 | ///determine if projection pixel is on or off
387 | if(val1 > val2)
388 | grayCol.push_back(1);
389 | else
390 | grayCol.push_back(0);
391 | }
392 | xDec = GrayCodes::grayToDec(grayCol);//由灰度序列grayCol求解其对应的十进制数xDec
393 |
394 | if(xDec > scan_w){
395 | error = true;//求出的x坐标超出了投影范围,说明不是投影点,将其遮罩
396 | }
397 | return error;
398 | }
399 |
400 |
401 | void Reconstruct::setCalibPath(QString folder, int cam_no )
402 | {
403 | calibFolder[cam_no] = folder;//projectPath+"/calib/left/"或projectPath+"/calib/right/"
404 | pathSet = true;
405 | }
406 |
407 |
408 | void Reconstruct::triangulation(cv::vector *cam1Pixels, VirtualCamera camera1, cv::vector *cam2Pixels, VirtualCamera camera2)
409 | {
410 | int w = scan_w;
411 | int h = scan_h;
412 | cv::Mat matCoordTrans(3,4,CV_32F);//定义变换矩阵将当前次扫描坐标系对齐至首次扫描坐标系
413 | if (scanSN > 0){
414 | ///加载刚体变换矩阵
415 | QString loadPath = savePath_ + "/scan/transfer_mat" + QString::number(scanSN) + ".txt";
416 | camera1.loadMatrix(matCoordTrans, 3, 4, loadPath.toStdString());
417 | }
418 |
419 | for(int i = 0; i < w; i++){
420 | for(int j = 0; j < h; j++){
421 | cv::vector cam1Pixs,cam2Pixs;
422 | ///cam1Pixels和cam2Pixels是长度为scan_w*scan_h的向量,元素为各个投影点对应相机图像点在原图像上的坐标
423 | cam1Pixs = cam1Pixels[ac(i, j)];
424 | cam2Pixs = cam2Pixels[ac(i, j)];
425 |
426 | if( cam1Pixs.size() == 0 || cam2Pixs.size() == 0)//如果投影区域(i,j)处未对应原图像上点,说明其被遮罩
427 | continue;
428 |
429 | for(int c1 = 0; c1 < cam1Pixs.size(); c1++)
430 | {
431 | cv::Point2f camPixelUD = Utilities::undistortPoints(cv::Point2f(cam1Pixs[c1].x,cam1Pixs[c1].y),camera1);//camera 3d point p for (i,j) pixel
432 | cv::Point3f cam1Point = Utilities::pixelToImageSpace(camPixelUD,camera1); //convert camera pixel to image space
433 | cam2WorldSpace(camera1, cam1Point);
434 |
435 | cv::Vec3f ray1Vector = (cv::Vec3f) (camera1.position - cam1Point); //compute ray vector
436 | Utilities::normalize(ray1Vector);
437 |
438 | for(int c2 = 0; c2 < cam2Pixs.size(); c2++)
439 | {
440 | camPixelUD = Utilities::undistortPoints(cv::Point2f(cam2Pixs[c2].x,cam2Pixs[c2].y),camera2);//camera 3d point p for (i,j) pixel
441 | cv::Point3f cam2Point = Utilities::pixelToImageSpace(camPixelUD,camera2); //convert camera pixel to image space
442 | cam2WorldSpace(camera2, cam2Point);
443 |
444 | cv::Vec3f ray2Vector = (cv::Vec3f) (camera2.position - cam2Point); //compute ray vector
445 | Utilities::normalize(ray2Vector);
446 |
447 | cv::Point3f interPoint;
448 | cv::Point3f refinedPoint;
449 |
450 | bool ok = Utilities::line_lineIntersection(camera1.position,ray1Vector,camera2.position,ray2Vector,interPoint);
451 |
452 | if(!ok)
453 | continue;
454 | /*
455 | float X = interPoint.x;
456 | float Y = interPoint.y;
457 | float Z = interPoint.z;
458 | interPoint.z = -Z;
459 | interPoint.x = -Y;
460 | interPoint.y = -X;
461 | */
462 |
463 | ///以下判断为多次重建得到的点云拼接做准备
464 | if (scanSN > 0){
465 | float point[] = {interPoint.x, interPoint.y, interPoint.z, 1};
466 | cv::Mat pointMat(4, 1, CV_32F, point);
467 | cv::Mat refineMat(3, 1, CV_32F);
468 | refineMat = matCoordTrans * pointMat;
469 | refinedPoint.x = refineMat.at(0, 0);
470 | refinedPoint.y = refineMat.at(1, 0);
471 | refinedPoint.z = refineMat.at(2, 0);
472 | }
473 | else
474 | refinedPoint = interPoint;
475 | points3DProjView->addPoint(i, j, refinedPoint);
476 | }
477 | }
478 | }
479 | }
480 | }
481 |
482 |
483 | void Reconstruct::triangulation_ge(cv::vector *cam1Pixels, VirtualCamera camera1, cv::vector *cam2Pixels, VirtualCamera camera2)
484 | {
485 | int w = cameraWidth;
486 | int h = cameraHeight;
487 |
488 | cv::Mat matCoordTrans(3,4,CV_32F);//定义变换矩阵将当前次扫描坐标系对齐至首次扫描坐标系
489 | if (scanSN > 0){
490 | ///加载刚体变换矩阵
491 | QString loadPath = savePath_ + "/scan/transfer_mat" + QString::number(scanSN) + ".txt";
492 | camera1.loadMatrix(matCoordTrans, 3, 4, loadPath.toStdString());
493 | }
494 |
495 | for (int i = 0; i < h;i++){
496 | for (int j = 0;j < w;j++){
497 | cv::vector cam1Pix = cam1Pixels[i*cameraWidth + j];//注意这里cam1Pix是一个向量,若类型设为int则出错
498 | if (cam1Pix.size() == 0)
499 | continue;
500 | for (int k = 0;k < w;k++){
501 | cv::vector cam2Pix = cam2Pixels[i*cameraWidth + k];
502 |
503 | if (cam2Pix.size() == 0)
504 | continue;
505 |
506 | if (cam1Pix[0] == cam2Pix[0]){//说明左相机(j,i)点与右相机(k,i)点匹配
507 | cv::Point2f camPixelUD = Utilities::undistortPoints(cv::Point2f(j, i),camera1);
508 | cv::Point3f cam1Point = Utilities::pixelToImageSpace(camPixelUD,camera1);
509 | cam2WorldSpace(camera1, cam1Point);
510 |
511 | cv::Vec3f ray1Vector = (cv::Vec3f) (camera1.position - cam1Point);
512 | Utilities::normalize(ray1Vector);
513 |
514 | camPixelUD = Utilities::undistortPoints(cv::Point2f(k, i),camera2);
515 | cv::Point3f cam2Point = Utilities::pixelToImageSpace(camPixelUD,camera2);
516 | cam2WorldSpace(camera2, cam2Point);
517 |
518 | cv::Vec3f ray2Vector = (cv::Vec3f) (camera2.position - cam2Point);
519 | Utilities::normalize(ray2Vector);
520 |
521 | cv::Point3f interPoint;
522 | cv::Point3f refinedPoint;
523 |
524 | bool ok = Utilities::line_lineIntersection(camera1.position,ray1Vector,camera2.position,ray2Vector,interPoint);
525 |
526 | if(!ok)
527 | continue;
528 |
529 | ///以下判断为多次重建得到的点云拼接做准备
530 | if (scanSN > 0){
531 | float point[] = {interPoint.x, interPoint.y, interPoint.z, 1};
532 | cv::Mat pointMat(4, 1, CV_32F, point);
533 | cv::Mat refineMat(3, 1, CV_32F);
534 | refineMat = matCoordTrans * pointMat;
535 | refinedPoint.x = refineMat.at(0, 0);
536 | refinedPoint.y = refineMat.at(1, 0);
537 | refinedPoint.z = refineMat.at(2, 0);
538 | }
539 | else
540 | refinedPoint = interPoint;
541 | points3DProjView->addPoint(i, j, refinedPoint);
542 | break;//若左图像某点与右图像点已发生了匹配,则不再检索右图像其余点
543 | }
544 | else
545 | continue;
546 | }
547 | }
548 | }
549 | }
550 |
551 |
552 | void Reconstruct::getParameters(int scanw, int scanh, int camw, int camh, bool autocontrast, QString savePath)
553 | {
554 | scan_w = scanw;
555 | scan_h = scanh;
556 | cameraWidth = camw;
557 | cameraHeight = camh;
558 | autoContrast_ = autocontrast;
559 | savePath_ = savePath;//equal to projectPath
560 |
561 | if (EPI){
562 | sr = new stereoRect(savePath_);
563 | sr->getParameters();
564 | }
565 |
566 | for(int i = 0; i < 2; i++)
567 | {
568 | QString pathI;
569 | if(i==0){
570 | pathI = savePath + "/scan/left/";//Load Images for reconstruction
571 | }
572 | else{
573 | pathI = savePath + "/scan/right/";
574 | }
575 | camsPixels[i] = NULL;
576 | camsPixels_GE[i] = NULL;
577 | scanFolder[i] = pathI;
578 | if(i == 0)
579 | imgPrefix[i] = QString::number(scanSN) + "/L";
580 | else
581 | imgPrefix[i] = QString::number(scanSN) +"/R";
582 | }
583 | }
584 |
--------------------------------------------------------------------------------
/Duke/reconstruct.h:
--------------------------------------------------------------------------------
1 | #ifndef RECONSTRUCT_H
2 | #define RECONSTRUCT_H
3 |
4 | #include
5 | #include
6 | #include "graycodes.h"
7 | #include "virtualcamera.h"
8 | #include "pointcloudimage.h"
9 | #include "set.h"
10 | #include "stereorect.h"
11 |
12 | //#define USE_STEREOCALIB_DATA
13 |
14 | class Reconstruct
15 | {
16 | public:
17 | Reconstruct(bool useEpi);
18 | ~Reconstruct();
19 |
20 | bool loadCameras();
21 |
22 | bool runReconstruction();
23 | bool runReconstruction_GE();
24 |
25 | VirtualCamera *cameras;
26 | QString *calibFolder;
27 | PointCloudImage *points3DProjView;
28 | void setBlackThreshold(int val);
29 | void setWhiteThreshold(int val);
30 | void setCalibPath(QString path1st, int cam_no );
31 | void saveShadowImg(const char path[]);
32 | void saveDecodedRowImg(const char path[]);
33 | void saveDecodedColImg(const char path[]);
34 |
35 | void enableRaySampling();
36 | void disableRaySampling();
37 |
38 | void cam2WorldSpace(VirtualCamera cam, cv::Point3f &p);
39 |
40 | void getParameters(int scanw, int scanh, int camw, int camh, bool autocontrast, QString savePath);
41 | QString savePath_;//same as projectPath
42 | int scanSN;//表示当前重建的扫描数据序列号,也是输出模型的序列号
43 |
44 | private:
45 | bool EPI;//是否使用极线校正
46 | int numOfCams;
47 | VirtualCamera *camera;//general functions use this instead of camera1 or camera2
48 | stereoRect *sr;
49 |
50 | cv::vector **camsPixels;
51 | cv::vector **camsPixels_GE;
52 | cv::vector *camPixels; //general functions use this instead of cam1Pixels or cam2Pixels
53 | cv::vector *camPixels_GE;
54 |
55 | bool loadCamImgs(QString folder, QString prefix, QString suffix);
56 |
57 | void unloadCamImgs();//对不同编码模式都适用的卸载图像方法
58 | void computeShadows();
59 |
60 | ///不同编码模式对应的图像点身份获取方法
61 | bool getProjPixel(int row, int col, cv::Point &p_out);//GRAY_ONLY
62 | bool getProjPixel_GE(int row, int col, int &xDec);//GRAY_EPI
63 |
64 | void decodePaterns();
65 | void decodePatterns_GE();
66 |
67 | void triangulation(cv::vector *cam1Pixels, VirtualCamera cameras1, cv::vector *cam2Pixels, VirtualCamera cameras2);
68 | void triangulation_ge(cv::vector *cam1Pixels, VirtualCamera camera1, cv::vector *cam2Pixels, VirtualCamera camera2);
69 |
70 | QString *scanFolder;
71 | QString *imgPrefix;
72 | QString imgSuffix;
73 | int numberOfImgs;
74 | int numOfColBits;
75 | int numOfRowBits;
76 | int blackThreshold;
77 | int whiteThreshold;
78 |
79 | cv::vector camImgs;//用来存放条纹图像序列,不同编码方式通用
80 |
81 | cv::Mat mask;//matrix with vals 0 and 1 , CV_8U , uchar
82 | cv::Mat decRows;
83 | cv::Mat decCols;
84 | bool pathSet;
85 | bool autoContrast_;
86 | bool raySampling_;
87 | int cameraWidth;
88 | int cameraHeight;
89 |
90 | //access
91 | int Reconstruct::ac(int x,int y)
92 | {
93 | return x*scan_h + y;
94 | }
95 |
96 | int scan_w;
97 | int scan_h;
98 | };
99 |
100 | #endif // RECONSTRUCT_H
101 |
--------------------------------------------------------------------------------
/Duke/set.cpp:
--------------------------------------------------------------------------------
1 | #include "set.h"
2 | #include
3 | #include
4 | #include //对于connect函数是必要的,否则出现C2664类型转换错误
5 | #include
6 |
7 | #include "ui_Set.h"
8 |
9 |
10 | Set::Set(QMainWindow *parent) : QDialog(parent),
11 | set(new Ui::SetDialog)
12 | {
13 | set->setupUi(this);
14 | createConfigurationFile();
15 | connect(set->buttonBox->button(QDialogButtonBox::Ok), SIGNAL(clicked()), this, SLOT(createConfigurationFile()));
16 | connect(set->buttonBox->button(QDialogButtonBox::Ok), SIGNAL(clicked()), SIGNAL(outputSet()));
17 | connect(set->buttonBox->button(QDialogButtonBox::Ok),SIGNAL(clicked()), this, SLOT(hide()));
18 | connect(set->buttonBox->button(QDialogButtonBox::Apply), SIGNAL(clicked()), this, SLOT(createConfigurationFile()));
19 | connect(set->buttonBox->button(QDialogButtonBox::Apply), SIGNAL(clicked()), SIGNAL(outputSet()));
20 | connect(set->buttonBox->button(QDialogButtonBox::Cancel),SIGNAL(clicked()),this,SLOT(hide()));
21 | }
22 |
23 | void Set::test(bool flag)
24 | {
25 | if(flag == true)
26 | QMessageBox::information(NULL, tr("Test"), tr("Successed!"));
27 | else
28 | QMessageBox::warning(NULL, tr("Test"), tr("Failed!"));
29 | }
30 |
31 | void Set::createConfigurationFile()//如果是槽函数,那么void声明不可少
32 | {
33 | board_w = set->boardWidth->value();
34 | board_h = set->boardHeight->value();
35 | proj_w = set->projResH->value();
36 | proj_h = set->projResV->value();
37 | scan_w = set->scanResH->value();
38 | scan_h = set->scanResV->value();
39 | if (set->resMode0->isChecked()){
40 | cam_w = 1280;
41 | cam_h = 1024;
42 | }
43 | cell_w = set->cellWidth->value();
44 | cell_h = set->cellHeight->value();
45 | black_threshold = set->blackThresholdEdit->value();
46 | white_threshold = set->whiteThresholdEdit->value();
47 | if(set->autoContrastCheck->isChecked())
48 | autoContrast = true;
49 | else
50 | autoContrast = false;
51 | if(set->raySamplingCheck->isChecked())
52 | raySampling = true;
53 | else
54 | raySampling = false;
55 | if(set->exportObjCheck->isChecked())
56 | exportObj = 1;
57 | else
58 | exportObj = 0;
59 | if(set->exportPlyCheck->isChecked())
60 | exportPly = 1;
61 | else
62 | exportPly = 0;
63 | if (set->GrayOnly->isChecked())
64 | usedPattern = 0;
65 | else if (set->grayEpi->isChecked())
66 | usedPattern = 1;
67 | else
68 | usedPattern = 2;
69 | //createSetFile();
70 | }
71 |
72 | void Set::createSetFile()
73 | {
74 | int autoc, autocs, ray;
75 | autoc = boolToInt(autoContrast);
76 | ray = boolToInt(raySampling);
77 |
78 | const QString &fileName = saveSetPath +"/set.xml";
79 | QFile file(fileName);
80 | file.open(QIODevice::WriteOnly);
81 | QXmlStreamWriter xmlWriter(&file);
82 | xmlWriter.setAutoFormatting(true);
83 | xmlWriter.writeStartDocument();//写入
84 | xmlWriter.writeStartElement("Settings");
85 | xmlWriter.writeStartElement("ProjectorResolution");
86 | xmlWriter.writeTextElement("Width",QString::number(proj_w, 10));
87 | xmlWriter.writeTextElement("Height",QString::number(proj_h, 10));
88 | xmlWriter.writeEndElement();
89 | xmlWriter.writeStartElement("CalibrationBoard");
90 | xmlWriter.writeTextElement("BoardWidth",QString::number(board_w, 10));
91 | xmlWriter.writeTextElement("BoardHeight",QString::number(board_h, 10));
92 | xmlWriter.writeEndElement();
93 | xmlWriter.writeStartElement("ProjectorWindow");
94 | xmlWriter.writeStartElement("ScanResolution");
95 | xmlWriter.writeTextElement("Width",QString::number(scan_w, 10));
96 | xmlWriter.writeTextElement("Height",QString::number(scan_h, 10));
97 | xmlWriter.writeEndElement();//由于start两次所以end两次
98 | xmlWriter.writeEndElement();
99 | xmlWriter.writeStartElement("Reconstruction");
100 | xmlWriter.writeTextElement("AutoContrast",0);
101 | xmlWriter.writeTextElement("SaveAutoContrastImages",0);
102 | xmlWriter.writeTextElement("RaySampling",0);
103 | xmlWriter.writeTextElement("BlackThreshold",QString::number(black_threshold, 10));
104 | xmlWriter.writeTextElement("WhiteThreshold",QString::number(white_threshold, 10));
105 | xmlWriter.writeEndElement();
106 | xmlWriter.writeStartElement("Export");
107 | xmlWriter.writeTextElement("Obj",QString::number(exportObj, 10));
108 | xmlWriter.writeTextElement("Ply",QString::number(exportPly, 10));
109 | xmlWriter.writeEndElement();
110 | xmlWriter.writeEndDocument();//写入
111 | file.close();
112 | if(file.error()){
113 | test(false);
114 | }
115 | }
116 |
117 | int Set::boolToInt(bool input)
118 | {
119 | if(input)
120 | return 1;
121 | else
122 | return 0;
123 | }
124 |
125 | void Set::switchLang()
126 | {
127 | set->retranslateUi(this);
128 | }
129 |
--------------------------------------------------------------------------------
/Duke/set.h:
--------------------------------------------------------------------------------
1 | #ifndef SET_H
2 | #define SET_H
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | namespace Ui {
14 | class SetDialog;
15 | }
16 |
17 | class Set : public QDialog
18 | {
19 | Q_OBJECT
20 | public:
21 | Set(QMainWindow *parent = 0);
22 |
23 | QString saveSetPath;
24 | int proj_h;
25 | int proj_w;
26 | int scan_w;
27 | int scan_h;
28 | int cam_w;
29 | int cam_h;
30 | int cell_w;
31 | int cell_h;
32 |
33 | int black_threshold;
34 | int white_threshold;
35 | int board_w;
36 | int board_h;
37 | int projectorWinPos_x;
38 | int projectorWinPos_y;
39 | bool autoContrast;
40 | bool raySampling;
41 | int exportObj;
42 | int exportPly;
43 | int usedPattern;
44 |
45 | void switchLang();
46 |
47 | private:
48 | Ui::SetDialog *set;
49 | int boolToInt(bool input);
50 |
51 | signals:
52 | void outputSet();
53 |
54 | private slots:
55 | void test(bool flag);
56 | void createConfigurationFile();
57 | void createSetFile();
58 | };
59 |
60 | #endif // SET_H
61 |
--------------------------------------------------------------------------------
/Duke/stereorect.cpp:
--------------------------------------------------------------------------------
1 | #include "stereorect.h"
2 |
3 | stereoRect::stereoRect(QString projectPath)
4 | {
5 | ppath = projectPath;
6 | }
7 |
8 | void stereoRect::getParameters()
9 | {
10 | QString path;
11 | path = ppath + "/calib/left/cam_stereo.txt";
12 | loadMatrix(M1, 3, 3, path);
13 | path = ppath + "/calib/left/distortion_stereo.txt";
14 | loadMatrix(D1, 5, 1, path);
15 | path = ppath + "/calib/right/cam_stereo.txt";
16 | loadMatrix(M2, 3, 3, path);
17 | path = ppath + "/calib/right/distortion_stereo.txt";
18 | loadMatrix(D2, 5, 1, path);
19 | path = ppath + "/calib/R_stereo.txt";
20 | loadMatrix(R, 3, 3, path);
21 | path = ppath + "/calib/T_stereo.txt";
22 | loadMatrix(T, 3, 1, path);
23 | }
24 |
25 | void stereoRect::doStereoRectify(cv::Mat &img, bool isleft)
26 | {
27 | cv::Size img_size = img.size();
28 | cv::Mat R1, P1, R2, P2, Q;
29 |
30 | ///该矫正函数在使用时注意两点:
31 | /// 1、flag不要设为CV_CALIB_ZERO_DISPARITY,而是设为0
32 | /// 2、所有输入矩阵都采用CV_64F格式,否则出现类型不匹配错误
33 | cv::stereoRectify(M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, 0, -1);
34 |
35 | cv::Mat map11, map12, map21, map22;
36 | if (isleft)
37 | cv::initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
38 | else
39 | cv::initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
40 |
41 | cv::Mat imgr;
42 | if (isleft)
43 | cv::remap(img, imgr, map11, map12, cv::INTER_LINEAR);
44 | else
45 | cv::remap(img, imgr, map21, map22, cv::INTER_LINEAR);
46 |
47 | img = imgr;
48 | }
49 |
50 |
51 | void stereoRect::loadMatrix(cv::Mat &matrix, int rows, int cols, QString file)
52 | {
53 | std:: ifstream in1;
54 | in1.open(file.toStdString());
55 | if(!in1)
56 | return;
57 | if(!matrix.empty())
58 | matrix.release();
59 | matrix = cv::Mat(rows, cols, CV_64F);
60 | for(int i = 0; i < rows; i++){
61 | for(int j = 0; j < cols; j++){
62 | float val;
63 | in1>>val;
64 | Utilities::matSet2D(matrix, i, j, val);
65 | }
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/Duke/stereorect.h:
--------------------------------------------------------------------------------
1 | #ifndef STEREORECT_H
2 | #define STEREORECT_H
3 |
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | #include "utilities.h"
12 |
13 | class stereoRect : public QObject
14 | {
15 | Q_OBJECT
16 | public:
17 | stereoRect(QString projectPath);
18 | void doStereoRectify(cv::Mat &img, bool isleft);
19 | void getParameters();
20 |
21 | private:
22 | QString ppath;
23 | cv::Mat M1, D1, M2, D2, R, R64, T;
24 | void loadMatrix(cv::Mat &matrix, int rows, int cols, QString file);
25 | };
26 | #endif // STEREORECT_H
27 |
--------------------------------------------------------------------------------
/Duke/utilities.cpp:
--------------------------------------------------------------------------------
1 | #include "utilities.h"
2 |
3 | Utilities::Utilities(void)
4 | {
5 | }
6 |
7 | Utilities::~Utilities(void)
8 | {
9 | }
10 |
11 | bool Utilities::XOR(bool val1, bool val2)
12 | {
13 | if(val1==val2)
14 | return 0;
15 | else
16 | return 1;
17 | }
18 |
19 | void Utilities::normalize(cv::Vec3f &vec)
20 | {
21 | double mag = sqrt( vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2]);
22 |
23 | vec[0] /= (float) max(0.000001, mag);
24 | vec[1] /= (float) max(0.000001, mag);
25 | vec[2] /= (float) max(0.000001, mag);
26 |
27 | return;
28 | }
29 |
30 | void Utilities::normalize3dtable(double vec[3])
31 | {
32 | double mag = sqrt( vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2]);
33 |
34 | vec[0] /= max(0.000001, mag);
35 | vec[1] /= max(0.000001, mag);
36 | vec[2] /= max(0.000001, mag);
37 | }
38 |
39 | //convert image pixel to image 3d space point
40 | void Utilities::pixelToImageSpace(double p[3], CvScalar fc, CvScalar cc)
41 | {
42 | p[0]=(p[0]-cc.val[0])/fc.val[0];
43 | p[1]=(p[1]-cc.val[1])/fc.val[1];
44 | p[2]=1;
45 | }
46 |
47 | cv::Point3f Utilities::pixelToImageSpace(cv::Point2f p, VirtualCamera cam)
48 | {
49 | cv::Point3f point;
50 |
51 | point.x = (p.x-cam.cc.x) / cam.fc.x;
52 | point.y = (p.y-cam.cc.y) / cam.fc.y;
53 | point.z = 1;
54 |
55 | return point;
56 | }
57 |
58 | cv::Point2f Utilities::undistortPoints( cv::Point2f p, VirtualCamera cam)
59 | {
60 | double k[5]={0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
61 | int iters = 1;
62 | k[0] = cam.distortion.at(0);
63 | k[1] = cam.distortion.at(1);
64 | k[2] = cam.distortion.at(2);
65 | k[3] = cam.distortion.at(3);
66 | k[4]=0;
67 | iters = 5;
68 | fx = cam.fc.x;
69 | fy = cam.fc.y;
70 | ifx = 1./fx;
71 | ify = 1./fy;
72 | cx = cam.cc.x;
73 | cy = cam.cc.y;
74 |
75 | double x, y, x0, y0;
76 |
77 | x=p.x;
78 | y=p.y;
79 |
80 | x0 = x = (x - cx)*ifx;
81 | y0 = y = (y - cy)*ify;
82 |
83 | for(int jj = 0; jj < iters; jj++ )
84 | {
85 | double r2 = x*x + y*y;
86 | double icdist = 1./(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
87 | double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
88 | double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
89 | x = (x0 - deltaX)*icdist;
90 | y = (y0 - deltaY)*icdist;
91 | }
92 |
93 | return cv::Point2f((float)(x*fx)+cx,(float)(y*fy)+cy);
94 | }
95 |
96 | //calculate the intersection point of a ray and a plane, given the normal and a point of the plane, and a point and the vector of the ray
97 | CvScalar Utilities::planeRayInter(CvScalar planeNormal,CvScalar planePoint, CvScalar rayVector, CvScalar rayPoint )
98 | {
99 | double l;
100 | CvScalar point;
101 |
102 | CvScalar pSub;
103 |
104 | pSub.val[0] = - rayPoint.val[0] + planePoint.val[0];
105 | pSub.val[1] = - rayPoint.val[1] + planePoint.val[1];
106 | pSub.val[2] = - rayPoint.val[2] + planePoint.val[2];
107 |
108 | double dotProd1 = pSub.val[0] * planeNormal.val[0] + pSub.val[1] * planeNormal.val[1] + pSub.val[2] * planeNormal.val[2];
109 | double dotProd2 = rayVector.val[0] * planeNormal.val[0] + rayVector.val[1] * planeNormal.val[1] + rayVector.val[2] * planeNormal.val[2];
110 |
111 | if(fabs(dotProd2)<0.00001)
112 | {
113 | point.val[0]=0;
114 | point.val[1]=0;
115 | point.val[2]=0;
116 | return point;
117 | }
118 | l = dotProd1 / dotProd2;
119 | point.val[0] = rayPoint.val[0] + l * rayVector.val[0];
120 | point.val[1] = rayPoint.val[1] + l * rayVector.val[1];
121 | point.val[2] = rayPoint.val[2] + l * rayVector.val[2];
122 | return point;
123 | }
124 |
125 | double Utilities::matGet2D(cv::Mat m, int row, int col)//输入量先行后列,即先y后x,先h后w
126 | {
127 | int type = m.type();
128 | switch(type)
129 | {
130 | case CV_8U:
131 | return m.at(row,col);//opencv中的at函数是先行后列,即按照矩阵的一般顺序取值的
132 | break;
133 | case CV_8S:
134 | return m.at(row,col);
135 | break;
136 | case CV_16U:
137 | return m.at(row,col);
138 | break;
139 | case CV_16S:
140 | return m.at(row,col);
141 | break;
142 | case CV_32S:
143 | return m.at(row,col);
144 | break;
145 | case CV_32F:
146 | return m.at(row,col);
147 | break;
148 | case CV_64F:
149 | return m.at(row,col);
150 | break;
151 | }
152 | }
153 |
154 | double Utilities::matGet3D(cv::Mat m, int x, int y, int i)
155 | {
156 | int type = m.type();
157 | switch(type)
158 | {
159 | case CV_8U:
160 | case CV_MAKETYPE(CV_8U,3):
161 | return m.at