├── BRDFNet ├── BRDFNetTraining.py ├── BRDF_Net_Config.ini ├── RenderBRDFNetData.py ├── TestBRDF.py ├── folderPath.txt ├── homogeneous_vis.ini ├── lvlist_1.txt └── template.html ├── Readme.md ├── Render ├── BRDFEvaluate.h ├── FastRendererCUDA.py ├── GeometryEvaluate.h ├── PixelShader.cu ├── exception.h ├── helper_cuda.h ├── helper_functions.h ├── helper_image.h ├── helper_math.h ├── helper_string.h ├── helper_timer.h ├── plane.obj ├── sphere.obj └── sphere_normal.pfm ├── SVBRDFNet ├── RenderSVBRDFDataset.py ├── SVBRDFNetTraining.py ├── SVBRDF_Net_Config.ini ├── TestSVBRDF.py ├── folderPath_SVBRDF.txt └── template_visSVBRDFNet_Real.html └── Utils ├── NetClass.py ├── solver_template.prototxt └── utils.py /BRDFNet/BRDF_Net_Config.ini: -------------------------------------------------------------------------------- 1 | [device] 2 | randomSeed = 23333 3 | 4 | [solver] 5 | SolverType = Adam 6 | 7 | ;learning rate 8 | lr = 0.001 9 | momentum = 0.9 10 | lrDecay = 0.0001 11 | batchSize = 64 12 | weightDecay = 0 13 | 14 | [stopping] 15 | ;enter -1 to disable certain stopping critiria 16 | nMaxEpoch = -1 17 | nMaxIter = 250000 18 | 19 | [loop] 20 | ;self-augment on/off 21 | renderLoop = 1 22 | 23 | ;automatic compute ratio between labeled data/unlabeled data usage 24 | autoLoopRatio = 0 25 | 26 | ;or manually set the ratio 27 | normalBatchLength = 1 28 | loopBatchLength = 3 29 | 30 | ;how many iter/epoch before self-augment 31 | loopStartEpoch = -1 32 | loopStartIteration = 20000 33 | 34 | [network] 35 | NetworkType = Ratio 36 | Channal = Full 37 | BN = 1 38 | color = 0 39 | 40 | [dataset] 41 | NormalizeInput = 0 42 | dataset = /media/v-xil/New Volume/BRDF_envlight_CUDA/train_envlight/train_full.txt 43 | unlabelDataset = /media/v-xil/New Volume/BRDF_envlight_CUDA/train_envlight/train_full.txt 44 | testDataset = /media/v-xil/New Volume/BRDF_envlight_CUDA/test_envlight/test_full.txt 45 | 46 | ;use of labeled data - defualt are corner data only 47 | ;rest are left as unlabeled data 48 | albedoRange = 0,9 49 | specRange = 0,9 50 | roughnessRange = 0,14 51 | 52 | unlabellightfile = lvlist_1.txt 53 | unlabellightcondition = 0 54 | 55 | ;test data, deafult is test on full test set 56 | testalbedoRange = 0,1,2,3,4,5,6,7,8 57 | testspecRange = 0,1,2,3,4,5,6,7,8 58 | testroughnessRange = 0,1,2,3,4,5,6,7,8,9,10,11,12,13 59 | 60 | ;deprecated 61 | thetaRange = 0,1,2,3,4,5,6,7,8,9,10 62 | phiRange = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 63 | envLighting = 1 64 | 65 | [display] 66 | displayStep = 1000 67 | loopdisplayStep = 500 68 | checkpointStepIteration = 20000 69 | checkpointStepEpoch = -1 70 | -------------------------------------------------------------------------------- /BRDFNet/RenderBRDFNetData.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | working_path = os.path.dirname(os.path.realpath(__file__)) 3 | root_path = os.path.dirname(working_path) 4 | sys.path.append(root_path + r'/Render') 5 | sys.path.append(root_path + r'/Utils') 6 | import numpy as np 7 | import math 8 | from utils import load_pfm, save_pfm, pfmFromBuffer, pfmToBuffer, make_dir, toLDR 9 | from operator import itemgetter 10 | import caffe 11 | 12 | from FastRendererCUDA import FastRenderEngine 13 | 14 | params = {} 15 | os.chdir(working_path) 16 | 17 | #dense sampled dataset, single channal 18 | albedoCnt = 10 19 | specCnt = 10 20 | roughnessCnt = 15 21 | thetaCnt = 15 22 | phiCnt = 15 23 | 24 | RenderOutput = r'' 25 | 26 | with open('folderPath.txt', 'r') as f: 27 | params['geometryPath'] = r'../Render/sphere.obj' 28 | params['scriptRoot'] = r'../Utils' 29 | params['outFolder'] = f.readline().strip() 30 | params['envMapFolder'] = f.readline().strip() 31 | 32 | def idToBRDFid(id, aCnt, sCnt, rCnt): 33 | aCnt = 10 34 | sCnt = 10 35 | rCnt = 15 36 | 37 | aid = id / (sCnt * rCnt) 38 | sid = (id % (sCnt * rCnt)) / rCnt 39 | rid = id - aid * (sCnt * rCnt) - sid * rCnt 40 | 41 | return aid, sid, rid 42 | 43 | def sampleCube(aid_list, sid_list, rid_list, full_list, dataset_name): 44 | out_list = [] 45 | for a in aid_list: 46 | for s in sid_list: 47 | for r in rid_list: 48 | for tid in range(0, thetaCnt): 49 | for pid in range(0, phiCnt): 50 | out_list.append('{}_{}_{}_{}_{}'.format(a, s, r, tid, pid)) 51 | unlabel_list = list(set(full_list) - set(out_list)) 52 | with open(RenderOutput + r'/train/train_{}.txt'.format(dataset_name), 'w') as f: 53 | for x in out_list: 54 | f.write(x) 55 | f.write('\n') 56 | 57 | with open(RenderOutput + r'/train/train_unlabel_{}.txt'.format(dataset_name), 'w') as f: 58 | for x in unlabel_list: 59 | f.write(x) 60 | f.write('\n') 61 | 62 | def getLightTransList(numX, numY): 63 | lightList = [] 64 | angleXList = [] 65 | angleYList = [] 66 | # np.random.seed(23333) 67 | for i in range(0, numX): 68 | for j in range(0, numY): 69 | angleY = np.random.uniform(0.0, 360.0) 70 | angleX = np.random.uniform(-30.0, 10.0) 71 | lightList.append('r,0,1,0,{}/r,1,0,0,{}/end'.format(angleY, angleX)) 72 | angleXList.append(angleX) 73 | angleYList.append(angleY) 74 | return lightList, angleXList, angleYList 75 | 76 | 77 | if __name__ == '__main__': 78 | gpuid = int(sys.argv[1]) 79 | out_root = sys.argv[2] 80 | 81 | make_dir(out_root + r'/train_envlight') 82 | make_dir(out_root + r'/test_envlight') 83 | 84 | trainAlbedo = np.linspace(0.05, 1.0, albedoCnt) 85 | trainSpec = np.linspace(0.05, 1.0, specCnt) 86 | trainRoughness = np.exp(np.linspace(math.log(0.02), math.log(1.0), roughnessCnt)) 87 | 88 | testAlbedo = np.linspace(0.1, 0.95, albedoCnt - 1) 89 | testSpec = np.linspace(0.1, 0.95, specCnt - 1) 90 | testRoughness = np.exp(np.linspace(math.log(0.03), math.log(0.87), roughnessCnt - 1)) 91 | 92 | imageCnt = albedoCnt*specCnt*roughnessCnt*thetaCnt*phiCnt 93 | 94 | envMapFolder = params['envMapFolder'] 95 | with open(envMapFolder + '/light.txt', 'r') as f: 96 | lightID = map(int, f.read().strip().split('\n')) 97 | lightID = list(np.array(lightID) - 1) 98 | 99 | np.random.seed(gpuid) 100 | OnlineRender = FastRenderEngine(gpuid) 101 | OnlineRender.SetGeometry('Sphere') 102 | OnlineRender.PreLoadAllLight(r'{}/light.txt'.format(envMapFolder)) 103 | 104 | fovRadian = 60.0 / 180.0 * math.pi 105 | cameraDist = 1.5 / (math.tan(fovRadian / 2.0)) 106 | OnlineRender.SetCamera(0, 0, cameraDist, 0, 0, 0, 0, 1, 0, fovRadian, 0.01, 100, 128, 128) 107 | OnlineRender.SetSampleCount(128, 1024) 108 | OnlineRender.SetRenderMode(0) 109 | 110 | albedoCnt = len(trainAlbedo) 111 | specCnt = len(trainSpec) 112 | roughnessCnt = len(trainRoughness) 113 | trainCube = np.zeros((albedoCnt, specCnt, roughnessCnt, 3)) 114 | testCube = np.zeros((albedoCnt-1, specCnt-1, roughnessCnt-1, 3)) 115 | 116 | print('Rendering Training data...\n') 117 | ftrain = open(out_root + r'/train_envlight/train_full.txt', 'w') 118 | for aid, a in enumerate(trainAlbedo): 119 | for sid, s in enumerate(trainSpec): 120 | for rid, r in enumerate(trainRoughness): 121 | lightMatrix = np.zeros((len(lightID), 9, 2)) 122 | print('...{}_{}_{}\n'.format(aid, sid, rid)) 123 | 124 | trainCube[aid,sid,rid] = [a,s,r] 125 | brdfFolder = out_root + r'/train_envlight/{}_{}_{}'.format(aid, sid, rid) 126 | make_dir(brdfFolder) 127 | 128 | OnlineRender.SetAlbedoValue([a, a, a]) 129 | OnlineRender.SetSpecValue([s, s, s]) 130 | OnlineRender.SetRoughnessValue(r) 131 | 132 | for lid, l in enumerate(lightID): 133 | OnlineRender.SetEnvLightByID(l+1) 134 | lightView, lightX, lightY = getLightTransList(3, 3) 135 | for vid, v in enumerate(lightView): 136 | OnlineRender.SetLightXform(lightX[vid], lightY[vid]) 137 | img = OnlineRender.Render() 138 | save_pfm(brdfFolder + r'/{}_{}.pfm'.format(lid, vid), img) 139 | ftrain.write('{}_{}_{}_{}_{}\n'.format(aid, sid, rid, lid, vid)) 140 | lightMatrix[lid, vid, 0] = lightX[vid] 141 | lightMatrix[lid, vid, 1] = lightY[vid] 142 | 143 | np.savetxt(out_root + r'/train_envlight/lightMatrix_{}_{}_{}.txt'.format(aid, sid, rid), lightMatrix.flatten()) 144 | 145 | ftrain.close() 146 | np.savetxt(out_root + r'/train_envlight/brdfcube.txt', trainCube.flatten()) 147 | print('Done.\n') 148 | 149 | print('Rendering Test data...\n') 150 | ftest = open(out_root + r'/test_envlight/test_full.txt', 'w') 151 | for aid, a in enumerate(testAlbedo): 152 | for sid, s in enumerate(testSpec): 153 | for rid, r in enumerate(testRoughness): 154 | print('...{}_{}_{}\n'.format(aid, sid, rid)) 155 | testCube[aid, sid, rid] = [a, s, r] 156 | brdfFolder = out_root + r'/test_envlight/{}_{}_{}'.format(aid, sid, rid)#+ offSetAlbedo[gpuid], sid + offSetSpec[gpuid], rid) 157 | make_dir(brdfFolder) 158 | 159 | OnlineRender.SetAlbedoValue([a, a, a]) 160 | OnlineRender.SetSpecValue([s, s, s]) 161 | OnlineRender.SetRoughnessValue(r) 162 | 163 | for lid, l in enumerate(lightID): 164 | OnlineRender.SetEnvLightByID(l+1)#(envMapFolder + r'\{:04d}.pfm'.format(l+1), '') 165 | img = OnlineRender.Render() 166 | save_pfm(brdfFolder + r'/{}_{}.pfm'.format(lid, 0), img) 167 | ftest.write('{}_{}_{}_{}_{}\n'.format(aid, sid, rid, lid, 0)) 168 | 169 | ftest.close() 170 | np.savetxt(out_root + r'/test_envlight/brdfcube.txt', testCube.flatten()) 171 | -------------------------------------------------------------------------------- /BRDFNet/folderPath.txt: -------------------------------------------------------------------------------- 1 | ../TrainedResult 2 | /home/v-xil/v-xil/envmap_pfm_cube 3 | -------------------------------------------------------------------------------- /BRDFNet/homogeneous_vis.ini: -------------------------------------------------------------------------------- 1 | [dataset] 2 | testset = /media/v-xil/New Volume/BRDF_envlight_CUDA/test_envlight/test_full.txt 3 | albedorange = 0,1,2,3,4,5,6,7,8 4 | specrange = 0,1,2,3,4,5,6,7,8 5 | roughnessrange = 0,1,2,3,4,5,6,7,8,9,10,11,12,13 6 | 7 | [sample] 8 | albedocnt = 10 9 | speccnt = 10 10 | roughnesscnt = 15 11 | resample = 0 12 | 13 | [light] 14 | envlighting = 1 15 | 16 | [visualList] 17 | diffusevisuallist = 18 | specvisuallist = 19 | roughnessvisuallist = 20 | 21 | [network] 22 | outchannals = Full 23 | ratio = 1 24 | 25 | [output] 26 | outtag = test 27 | 28 | -------------------------------------------------------------------------------- /BRDFNet/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ResultVisulization 5 | 14 | 15 | 16 | 17 | 18 |
19 |
20 | 21 | 22 |

ResultVisulization - {{experimentTag}}

23 | 85 |

#0: Training Curve

86 | 87 | 88 | {% if trainingCurvePath.albedo is defined %} 89 | 90 | {% endif %} 91 | {% if trainingCurvePath.spec is defined %} 92 | 93 | {% endif %} 94 | {% if trainingCurvePath.roughness is defined %} 95 | 96 | {% endif %} 97 | {% if trainingCurvePath.total is defined %} 98 | 99 | {% endif %} 100 | 101 | 102 | {% if trainingCurvePath.albedo is defined %} 103 | 108 | {% endif %} 109 | {% if trainingCurvePath.spec is defined %} 110 | 115 | {% endif %} 116 | {% if trainingCurvePath.roughness is defined %} 117 | 122 | {% endif %} 123 | {% if trainingCurvePath.total is defined %} 124 | 129 | {% endif %} 130 | 131 |
AlbedoSpecRoughnessTotal
104 |
105 | 106 |
107 |
111 |
112 | 113 |
114 |
118 |
119 | 120 |
121 |
125 |
126 | 127 |
128 |
132 |
133 | 134 |

#2: Error Analysis

135 |

#2-1: Spec-Roughness Slice

136 | {% if slice_S_R is defined %} 137 | 138 | 139 | 140 | {% if slice_S_R[0].albedo is defined %} 141 | 142 | {% endif %} 143 | {% if slice_S_R[0].spec is defined %} 144 | 145 | {% endif %} 146 | {% if slice_S_R[0].roughness is defined %} 147 | 148 | {% endif %} 149 | {% if slice_S_R[0].total is defined %} 150 | 151 | {% endif %} 152 | {% if slice_S_R[0].visual is defined %} 153 | 154 | {% endif %} 155 | {% if slice_S_R[0].ssim is defined %} 156 | 157 | {% endif %} 158 | 159 | {% for slice in slice_S_R %} 160 | 161 | 162 | {% if slice.albedo is defined %} 163 | 168 | {% endif %} 169 | {% if slice.spec is defined %} 170 | 175 | {% endif %} 176 | {% if slice.roughness is defined %} 177 | 182 | {% endif %} 183 | {% if slice.total is defined %} 184 | 189 | {% endif %} 190 | {% if slice.visual is defined %} 191 | 196 | {% endif %} 197 | {% if slice.ssim is defined %} 198 | 203 | {% endif %} 204 | 205 | {% endfor %} 206 |
Sliced PointAlbedoSpecRoughnessTotalVisualSSIM
"{{slice.value}}" 164 |
165 | 166 |
167 |
171 |
172 | 173 |
174 |
178 |
179 | 180 |
181 |
185 |
186 | 187 |
188 |
192 |
193 | 194 |
195 |
199 |
200 | 201 |
202 |
207 | {% endif %} 208 | 209 |

#2-2: Albedo-Spec Slice

210 | {% if slice_A_S is defined %} 211 | 212 | 213 | 214 | {% if slice_A_S[0].albedo is defined %} 215 | 216 | {% endif %} 217 | {% if slice_A_S[0].spec is defined %} 218 | 219 | {% endif %} 220 | {% if slice_A_S[0].roughness is defined %} 221 | 222 | {% endif %} 223 | {% if slice_A_S[0].total is defined %} 224 | 225 | {% endif %} 226 | {% if slice_A_S[0].visual is defined %} 227 | 228 | {% endif %} 229 | {% if slice_A_S[0].ssim is defined %} 230 | 231 | {% endif %} 232 | 233 | {% for slice in slice_A_S %} 234 | 235 | 236 | {% if slice.albedo is defined %} 237 | 242 | {% endif %} 243 | {% if slice.spec is defined %} 244 | 249 | {% endif %} 250 | {% if slice.roughness is defined %} 251 | 256 | {% endif %} 257 | {% if slice.total is defined %} 258 | 263 | {% endif %} 264 | {% if slice.visual is defined %} 265 | 270 | {% endif %} 271 | {% if slice.ssim is defined %} 272 | 277 | {% endif %} 278 | 279 | {% endfor %} 280 |
Sliced PointAlbedoSpecRoughnessTotalVisualSSIM
"{{slice.value}}" 238 |
239 | 240 |
241 |
245 |
246 | 247 |
248 |
252 |
253 | 254 |
255 |
259 |
260 | 261 |
262 |
266 |
267 | 268 |
269 |
273 |
274 | 275 |
276 |
281 | {% endif %} 282 | 283 |

#2-3: Albedo-Roughness Slice

284 | {% if slice_A_R is defined %} 285 | 286 | 287 | 288 | {% if slice_A_R[0].albedo is defined %} 289 | 290 | {% endif %} 291 | {% if slice_A_R[0].spec is defined %} 292 | 293 | {% endif %} 294 | {% if slice_A_R[0].roughness is defined %} 295 | 296 | {% endif %} 297 | {% if slice_A_R[0].total is defined %} 298 | 299 | {% endif %} 300 | {% if slice_A_R[0].visual is defined %} 301 | 302 | {% endif %} 303 | {% if slice_A_R[0].ssim is defined %} 304 | 305 | {% endif %} 306 | 307 | {% for slice in slice_A_R %} 308 | 309 | 310 | {% if slice.albedo is defined %} 311 | 316 | {% endif %} 317 | {% if slice.spec is defined %} 318 | 323 | {% endif %} 324 | {% if slice.roughness is defined %} 325 | 330 | {% endif %} 331 | {% if slice.total is defined %} 332 | 337 | {% endif %} 338 | {% if slice.visual is defined %} 339 | 344 | {% endif %} 345 | {% if slice.ssim is defined %} 346 | 351 | {% endif %} 352 | 353 | {% endfor %} 354 |
Sliced PointAlbedoSpecRoughnessTotalVisualSSIM
"{{slice.value}}" 312 |
313 | 314 |
315 |
319 |
320 | 321 |
322 |
326 |
327 | 328 |
329 |
333 |
334 | 335 |
336 |
340 |
341 | 342 |
343 |
347 |
348 | 349 |
350 |
355 | {% endif %} 356 | 357 |
358 |

#3: Loss Curves

359 | 360 | 361 | {% if albedoloss is defined %} 362 | 363 | {% endif %} 364 | {% if specloss is defined %} 365 | 366 | {% endif %} 367 | {% if roughnessloss is defined %} 368 | 369 | {% endif %} 370 | {% if totalloss is defined %} 371 | 372 | {% endif %} 373 | {% if visualloss is defined %} 374 | 375 | {% endif %} 376 | {% if ssimloss is defined %} 377 | 378 | {% endif %} 379 | 380 | 381 | {% if albedoloss is defined %} 382 | 383 | {% endif %} 384 | {% if specloss is defined %} 385 | 386 | {% endif %} 387 | {% if roughnessloss is defined %} 388 | 389 | {% endif %} 390 | {% if totalloss is defined %} 391 | 392 | {% endif %} 393 | {% if visualloss is defined %} 394 | 395 | {% endif %} 396 | {% if ssimloss is defined %} 397 | 398 | {% endif %} 399 | 400 |
avg.albedo lossavg.spec lossavg.roughness lossavg.total lossavg.visual lossavg.ssim loss
{{albedoloss}}{{specloss}}{{roughnessloss}}{{totalloss}}{{visualloss}}{{ssimloss}}}
401 | 402 | 403 | 404 | {% if ((axisAlbedo is defined) and (axisAlbedo.albedo is defined)) or ((axisSpec is defined) and (axisSpec.albedo is defined)) or ((axisRoughness is defined) and (axisRoughness.albedo is defined)) %} 405 | 406 | {% endif %} 407 | {% if ((axisAlbedo is defined) and (axisAlbedo.spec is defined)) or ((axisSpec is defined) and (axisSpec.spec is defined)) or ((axisRoughness is defined) and (axisRoughness.spec is defined)) %} 408 | 409 | {% endif %} 410 | {% if ((axisAlbedo is defined) and (axisAlbedo.roughness is defined)) or ((axisSpec is defined) and (axisSpec.roughness is defined)) or ((axisRoughness is defined) and (axisRoughness.roughness is defined)) %} 411 | 412 | {% endif %} 413 | {% if ((axisAlbedo is defined) and (axisAlbedo.total is defined)) or ((axisSpec is defined) and (axisSpec.total is defined)) or ((axisRoughness is defined) and (axisRoughness.total is defined)) %} 414 | 415 | {% endif %} 416 | {% if ((axisAlbedo is defined) and (axisAlbedo.visual is defined)) or ((axisSpec is defined) and (axisSpec.visual is defined)) or ((axisRoughness is defined) and (axisRoughness.visual is defined)) %} 417 | 418 | {% endif %} 419 | {% if ((axisAlbedo is defined) and (axisAlbedo.ssim is defined)) or ((axisSpec is defined) and (axisSpec.ssim is defined)) or ((axisRoughness is defined) and (axisRoughness.ssim is defined)) %} 420 | 421 | {% endif %} 422 | 423 | {% if (axisAlbedo is defined) %} 424 | 425 | 426 | {% if (axisAlbedo.albedo is defined) %} 427 | 432 | {% endif %} 433 | {% if (axisAlbedo.spec is defined) %} 434 | 439 | {% endif %} 440 | {% if (axisAlbedo.roughness is defined) %} 441 | 446 | {% endif %} 447 | {% if (axisAlbedo.total is defined) %} 448 | 453 | {% endif %} 454 | {% if (axisAlbedo.visual is defined) %} 455 | 460 | {% endif %} 461 | {% if (axisAlbedo.ssim is defined) %} 462 | 467 | {% endif %} 468 | 469 | {% endif %} 470 | {% if (axisSpec is defined) %} 471 | 472 | 473 | {% if (axisSpec.albedo is defined) %} 474 | 479 | {% endif %} 480 | {% if (axisSpec.spec is defined) %} 481 | 486 | {% endif %} 487 | {% if (axisSpec.roughness is defined) %} 488 | 493 | {% endif %} 494 | {% if (axisSpec.total is defined) %} 495 | 500 | {% endif %} 501 | {% if (axisSpec.visual is defined) %} 502 | 507 | {% endif %} 508 | {% if (axisSpec.ssim is defined) %} 509 | 514 | {% endif %} 515 | 516 | {% endif %} 517 | {% if (axisRoughness is defined) %} 518 | 519 | 520 | {% if (axisRoughness.albedo is defined) %} 521 | 526 | {% endif %} 527 | {% if (axisRoughness.spec is defined) %} 528 | 533 | {% endif %} 534 | {% if (axisRoughness.roughness is defined) %} 535 | 540 | {% endif %} 541 | {% if (axisRoughness.total is defined) %} 542 | 547 | {% endif %} 548 | {% if (axisRoughness.visual is defined) %} 549 | 554 | {% endif %} 555 | {% if (axisRoughness.ssim is defined) %} 556 | 561 | {% endif %} 562 | 563 | {% endif %} 564 |
AxisAlbedoLossSpecLossRoughnessLossTotalLossVisualLossSSIMLoss
AlbedoAxis 428 |
429 | 430 |
431 |
435 |
436 | 437 |
438 |
442 |
443 | 444 |
445 |
449 |
450 | 451 |
452 |
456 |
457 | 458 |
459 |
463 |
464 | 465 |
466 |
SpecAxis 475 |
476 | 477 |
478 |
482 |
483 | 484 |
485 |
489 |
490 | 491 |
492 |
496 |
497 | 498 |
499 |
503 |
504 | 505 |
506 |
510 |
511 | 512 |
513 |
RoughnessAxis 522 |
523 | 524 |
525 |
529 |
530 | 531 |
532 |
536 |
537 | 538 |
539 |
543 |
544 | 545 |
546 |
550 |
551 | 552 |
553 |
557 |
558 | 559 |
560 |
565 |
566 |

#1: Rendering Compare

567 | 568 | {% for renderResultRow in renderResult %} 569 | 570 | {% for renderResult in renderResultRow %} 571 | 581 | {% endfor %} 582 | 583 | {% endfor %} 584 |
572 |
573 | 574 |
"Predict:{{renderResult.predictedBRDF}}"
575 |
576 |
577 | 578 |
"Truth:{{renderResult.gtBRDF}}"
579 |
580 |
585 |
586 | 587 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Modeling Surface Appearance from a Single Photograph using Self-augmented Convolutional Neural Networks 2 | 3 | The main contributors of this repository include [Xiao Li](http://home.ustc.edu.cn/~pableeto), [Yue Dong](http://yuedong.shading.me), [Pieter Peers](http://www.cs.wm.edu/~ppeers/) and [Xin Tong](https://www.microsoft.com/en-us/research/people/xtong/). 4 | 5 | ## Introduction 6 | 7 | This repository provides a reference implementation for the SIGGRAPH 2017 paper "Modeling Surface Appearance from a Single Photograph using Self-augmented Convolutional Neural Networks". 8 | 9 | More information (including a copy of the paper) can be found at http://msraig.info/~sanet/sanet.htm. 10 | 11 | ## Update 12 | 10/23/2018: If you are looking for a (improved) Tensorflow version of Self-Augmentation training, you may have a look at our latest Pacific Graphics 2018 project page: https://github.com/msraig/InexactSA. 13 | 14 | ## Citation 15 | If you use our code or models, please cite: 16 | 17 | ``` 18 | @article{Li:2017:MSA, 19 | author = {Li, Xiao and Dong, Yue and Peers, Pieter and Tong, Xin}, 20 | title = {Modeling Surface Appearance from a Single Photograph using Self-Augmented Convolutional Neural Networks}, 21 | month = {July}, 22 | year = {2017}, 23 | journal = {ACM Transactions on Graphics}, 24 | volume = {36}, 25 | number = {4}, 26 | article = {45}, 27 | } 28 | ``` 29 | 30 | ---------------------------------------------------------------- 31 | ## Usage: 32 | 33 | ### System Requirements 34 | - Windows or Linux system (validated on Windows 10 and Ubuntu 12.04. Mac OSX is currently not supported) 35 | - A NVidia GPU (tested on Titan X and GTX 1080) 36 | - Python 2.7 (Python 3.x is not supported) 37 | - Caffe with Python support (tested with both CUDA 7.5 + cuDNN 5.0 and CUDA 8.0 + cuDNN 5.1) 38 | - We strongly recommend to install Anaconda2 which includes many of the necessary external packages. The following packages are required: 39 | * NumPy (tested with version 1.10.4, however newer versions should work too) 40 | * OpenCV (tested with version 3.1.0) 41 | * PyCUDA (tested with version 2016-1-2) 42 | * Matplotlib (tested with version 1.5.1) 43 | * skimage 44 | * jinja2 45 | 46 | 47 | ### Installation 48 | After installing all the prerequisites listed above, download (or git clone) the code repository. Furthermore, to retrain the network, you may also need to download the datasets which includes training/test patches and collected lighting maps (see below). 49 | 50 | ### Preparing data 51 | We also provide both training and test datasets to quickly test or reproduce SA-BRDF-Net or SA-SVBRDF-Net. The dataset can be downloaded from the project website: http://msraig.info/~sanet/sanet.htm. Because the complete rendered training and test image patches are too large, the dataset needs to be generated from the original SVBRDFs and lighting maps using the provided python scripts. 52 | 53 | To generate from the downloaded data, first edit ./BRDFNet/folderPath.txt (for BRDF-Net) and/or ./SVBRDFNet/folderPath_SVBRDF.txt (for SVBRDF-Net). Next, to generate the training and test data for BRDF-Net, execute: 54 | 55 | python ./BRDFNet/RenderBRDFNetData.py $GPUID$ $BRDF_NET_DATA_FOLDER$ 56 | 57 | and/or to generate training and test data for SVBRDF-Net, execute: 58 | 59 | python ./SVBRDFNet/RenderSVBRDFDataset.py $SVBRDF_NET_DATA_FOLDER$ $CATEGORY_TAG$ $GPUID$ $RENDERTYPE$ -1 -1 $RENDERTEST$ 60 | 61 | with: 62 | 63 | **$BRDF_NET_DATA_FOLDER$:** output folder for the BRDF-Net dataset. 64 | 65 | **$SVBRDF_NET_DATA_FOLDER$**: folder containing the provided SVBRDFs (downloaded from the project website). This is also the output folder for the SVBRDF-Net dataset. 66 | 67 | **$CATEGORY_TAG$**: set to "wood" "metal" or "plastic". 68 | 69 | **$RENDERTYPE$**: 0 - only render PFM images; 1 - only render JPG images; 2 - render both. (0 or 2 are preferred) 70 | 71 | **$RENDERTEST$**: set to "train" - only render training images; "test" - only render testing images; "all" - render both training and testing data. 72 | 73 | ### Testing the trained model 74 | We also provide the trained CNN model for SA-SVBRDF-Net on Wood, Metal and Plastic dataset, as well as the model for SA-BRDF-Net, on the project website: http://msraig.info/~sanet/sanet.htm. 75 | 76 | To test the models on the provided SVBRDF datasets, execute: 77 | 78 | python ./SVBRDFNet/TestSVBRDF.py $MODELFILE$ $TESTSET$ $GPUID$ 79 | 80 | with: 81 | 82 | **$MODELFILE$**: Caffe model to test. 83 | 84 | **$TESTSET$**: test dataset. Typically: **$SVBRDF_NET_DATA_FOLDER$\Test_Suppmental\$DATA_TAG$\list.txt** 85 | 86 | For BRDF-Net, by default, the training script will automatically generate test reports after finishing the training. To manually test the model on the BRDF dataset, run: 87 | 88 | python ./BRDFNet/TestBRDF.py $MODELFILE$ $TESTCONFIG$ $GPUID$ 89 | 90 | with: 91 | 92 | **$MODELFILE$**: Caffe model to test. 93 | 94 | **$TESTCONFIG$**: a config ini file for testing. This should be generated during training. 95 | 96 | The test results and a report (a HTML file) will be generated at the folder containing the trained model. 97 | 98 | **Note**: For to run this test you will need to download and generate the test data first (see section 2 above) 99 | 100 | Advice for testing on your own images: 101 | 102 | - All our models are provided in the Caffe format. 103 | - The input to SVBRDF-Net model is an image (in [0, 1] range) with size 256*256. The output of our model is an albedo map and a normal map with the same size as input, a 3-channel vector represents RGB specular albedo and a float value representing the roughness. 104 | 105 | 106 | ### Training from scratch 107 | #### Training SA-BRDF-Net: 108 | Edit the text file **./BRDFNet/BRDF_Net_Config.ini**, which contains all the relevant settings w.r.t. training. Change the following rows: 109 | 110 | dataset = $BRDF_NET_DATA_FOLDER$/train_envlight/train_full.txt 111 | unlabelDataset = $BRDF_NET_DATA_FOLDER$/train_envlight/train_full.txt 112 | testDataset = $BRDF_NET_DATA_FOLDER$/test_envlight/test_full.txt 113 | 114 | These rows setup the paths for the training/test data; **$BRDF_NET_DATA_FOLDER$** is the folder of the BRDF-Net data. 115 | 116 | By default, the training of SA-BRDF-Net is configured to only use the corners of the training space as labeled data, leaving rest as unlabeled data. This behavior is defined via **albedoRange**, **specRange** and **roughnessRange** parameters in the BRDF_Net_Config.ini. Changing these parameters change the distribution of labeled/unlabeled data. Please note that albedoRange and specRange are in the [0, 9] range, while roughnessRange is in the [0, 14] range. 117 | 118 | To train the SA-BRDF-Model, run: 119 | 120 | python ./BRDFNet/BRDFNetTraining.py BRDF_Net_Config.ini $OUT_TAG$ $RESTORE_TAG$ $GPUID$ $RENDERGPUID$ $AUTOTEST$ 121 | 122 | with: 123 | 124 | **$OUT_TAG$**: name of the training. 125 | 126 | **$RESTORE_TAG$**: 0 - training from scratch 127 | 128 | **$RENDERGPUID$**: must be the same as **$GPUID$** 129 | 130 | **$AUTOTEST$**: 1 - running a full test and generate reports after training. 131 | 132 | By default, the training snapshot and results are saved in **./TrainedResult/$OUT_TAG$** (relative to root of code folder). 133 | You can change this by editing the first line in **./BRDFNet/folderPath.txt**. 134 | 135 | #### Training SA-SVBRDF-Net 136 | Open **./SVBRDFNet/SVBRDF_Net_Config.ini**, which contains all the settings w.r.t. the training, and change the following rows: 137 | 138 | dataset = $SVBRDF_NET_DATA_FOLDER$/$CATAGORY_TAG$/Labeled/trainingdata.txt 139 | unlabelDataset = $SVBRDF_NET_DATA_FOLDER$/$CATAGORY_TAG$/unlabeled.txt 140 | testDataset = $SVBRDF_NET_DATA_FOLDER$/$CATAGORY_TAG$/Test/test.txt 141 | 142 | These rows setup the path for the training/test data; **$SVBRDF_NET_DATA_FOLDER$** is the folder of the SVBRDF-Net data. 143 | **$CATAGORY_TAG$** should be either of "wood", "metal" or "plastic". 144 | 145 | lightPoolFile = lightPool_$CATAGORY_TAG$.dat 146 | autoExposureLUTFile = lightNormPool_$CATAGORY_TAG$.dat 147 | 148 | These rows setup the path for pre-defined lighting rotations and pre-computed auto-exposure factors. 149 | 150 | To train the SA-SVBRDF-Model, run: 151 | 152 | python ./SVBRDFNet/SVBRDFNetTraining.py SVBRDF_Net_Config.ini $OUT_TAG$ $RESTORE_TAG$ $GPUID$ $RENDERGPUID$ 153 | 154 | with: 155 | **$OUT_TAG$**: name of the training. 156 | 157 | **$RESTORE_TAG$**: 0 - training from scratch 158 | 159 | **$RENDERGPUID$**: must be the same as **$GPUID$** 160 | 161 | By default, the training snapshot and results are saved in **./TrainedResult/$OUT_TAG$** (relative to root of code folder). This can be changed by editing the first line in **./SVBRDFNet/folderPath_SVBRDF.txt**. 162 | . 163 | -------------------------------------------------------------------------------- /Render/BRDFEvaluate.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #define MATH_PI 3.14159265358979f 8 | 9 | #define SPE_LOD_OFFSET 1.f 10 | #define DIFF_LOD_OFFSET 2.f 11 | 12 | // Hammersley sequence generator 13 | // http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html 14 | #define NUM_LIGHT_MIPMAP 9 15 | 16 | texture texCube0; 17 | texture texCube1; 18 | texture texCube2; 19 | texture texCube3; 20 | texture texCube4; 21 | texture texCube5; 22 | texture texCube6; 23 | texture texCube7; 24 | texture texCube8; 25 | 26 | 27 | __device__ float3 samplefloat3Fromfloat4(float4 input) 28 | { 29 | return make_float3(input.x, input.y, input.z); 30 | } 31 | 32 | __device__ float3 samplefloat3Fromfloat(float input) 33 | { 34 | return make_float3(input, input, input); 35 | } 36 | 37 | __device__ float4 texCubeMipmap(float x, float y, float z, float lod) 38 | { 39 | int lodLeft = int(lod); 40 | float wLeft = 1.0 - (lod - lodLeft); 41 | float wRight = 1.0 - wLeft; 42 | 43 | float4 sampled = make_float4(0.0f); 44 | switch(lodLeft) 45 | { 46 | case 0: 47 | sampled = wLeft * texCubemap(texCube0, x, y, z) + wRight * texCubemap(texCube1, x, y, z); 48 | break; 49 | case 1: 50 | sampled = wLeft * texCubemap(texCube1, x, y, z) + wRight * texCubemap(texCube2, x, y, z); 51 | break; 52 | case 2: 53 | sampled = wLeft * texCubemap(texCube2, x, y, z) + wRight * texCubemap(texCube3, x, y, z); 54 | break; 55 | case 3: 56 | sampled = wLeft * texCubemap(texCube3, x, y, z) + wRight * texCubemap(texCube4, x, y, z); 57 | break; 58 | case 4://case 5:case 6:case 7: case 8: 59 | sampled = wLeft * texCubemap(texCube4, x, y, z) + wRight * texCubemap(texCube5, x, y, z); 60 | break; 61 | case 5: 62 | sampled = wLeft * texCubemap(texCube5, x, y, z) + wRight * texCubemap(texCube6, x, y, z); 63 | break; 64 | case 6: 65 | sampled = wLeft * texCubemap(texCube6, x, y, z) + wRight * texCubemap(texCube7, x, y, z); 66 | break; 67 | case 7: 68 | sampled = wLeft * texCubemap(texCube7, x, y, z) + wRight * texCubemap(texCube8, x, y, z); 69 | break; 70 | case 8: 71 | sampled = texCubemap(texCube8, x, y, z);// + wRight * texCubemap(texCube9, x, y, z); 72 | break; 73 | } 74 | // sampled = texCubemap(texCube0, x, y, z); 75 | // sampled = texCubemap(texCube0, x, y, z); 76 | //ampled = wLeft * texCubemapLod(texCube[lodLeft], x, y, z, float(lodLeft)) + wRight * texCubemapLod(texCube[lodRight], x, y, z, float(lodRight)); 77 | return sampled; 78 | } 79 | 80 | 81 | __device__ float radicalInverse_VdC(uint bits) 82 | { 83 | bits = (bits << 16u) | (bits >> 16u); 84 | bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u); 85 | bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u); 86 | bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u); 87 | bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u); 88 | return float(bits) * 2.3283064365386963e-10; // / 0x100000000 89 | } 90 | 91 | __device__ float2 Hammersley(uint i, uint NumSamples) 92 | { 93 | return make_float2(float(i) / float(NumSamples), radicalInverse_VdC(i)); 94 | } 95 | 96 | // PDFs 97 | // Beckmann 98 | // http://blog.selfshadow.com/publications/s2012-shading-course/ 99 | // PDF_L = PDF_H /(4 * VoH) 100 | __device__ float PDF_Beckmann_H(float roughness, float NoH) 101 | { 102 | float e = exp((NoH * NoH - 1) / (NoH * NoH * roughness * roughness)); 103 | 104 | // beckmann * NoH 105 | return e / (MATH_PI * roughness * roughness * NoH * NoH * NoH); 106 | } 107 | 108 | // Diffuse 109 | __device__ float PDF_Diffuse(float NoL) 110 | { 111 | return NoL / MATH_PI; 112 | } 113 | 114 | //Ref: Microfacet Models for Refraction through Rough Surfaces, EGSR 2007 115 | //Beckmann 116 | __device__ float NDF_Beckmann(float3 M, float3 N, float roughness) 117 | { 118 | float MoN = dot(M, N); 119 | float CosThetaM = clamp(MoN, 0.0, 1.0); 120 | if (CosThetaM > 0) 121 | { 122 | float roughness_2 = roughness * roughness; 123 | float CosThetaM_2 = CosThetaM * CosThetaM; 124 | float CosThetaM_4 = CosThetaM_2 * CosThetaM_2; 125 | float TanThetaM_2 = (1.0 - CosThetaM_2) / CosThetaM_2; 126 | 127 | return exp(-TanThetaM_2 / roughness_2) / (MATH_PI * roughness_2 * CosThetaM_4); 128 | } 129 | else 130 | return 0; 131 | } 132 | 133 | // Importance Sampling Functions 134 | // Beckmann 135 | __device__ float3 ImportanceSampleBeckmann(float2 Xi, float roughness, float3 N) 136 | { 137 | float Phi = 2 * MATH_PI * Xi.x; 138 | float CosTheta = sqrt(1.f / (1 - roughness * roughness * log(1 - Xi.y))); 139 | float SinTheta = sqrt(1 - CosTheta * CosTheta); 140 | float3 H; 141 | H.x = SinTheta * cos(Phi); 142 | H.y = SinTheta * sin(Phi); 143 | H.z = CosTheta; 144 | float3 UpVector = (abs(N.z) < 0.5f) ? make_float3(0, 0, 1) : make_float3(1, 0, 0); 145 | float3 TangentX = normalize(cross(UpVector, N)); 146 | float3 TangentY = cross(N, TangentX); 147 | // Tangent to world space 148 | return TangentX * H.x + TangentY * H.y + N * H.z; 149 | } 150 | 151 | // Schlick-Smith Geometric term 152 | //http://blog.selfshadow.com/publications/s2012-shading-course/mcauley/s2012_pbs_farcry3_notes_v2.pdf 153 | __device__ float G_SchlickSmith(float roughness, float NoL, float NoV) 154 | { 155 | float a = roughness * sqrt(2.f / MATH_PI); 156 | float visInv = (NoL * (1.f - a) + a) * (NoV *(1.f - a) + a); 157 | return NoL * NoV / visInv; 158 | } 159 | //Cook-Torrance Geometric term 160 | __device__ float G_CookTorrance(float NoL, float NoV, float NoH, float VoH) 161 | { 162 | float shad1 = (2.0f * NoH * NoV) / VoH; 163 | float shad2 = (2.0f * NoH * NoL) / VoH; 164 | return min(1.0f, min(shad1, shad2)); 165 | } 166 | 167 | // Diffuse 168 | __device__ float3 ImportanceSampleDiffuse(float2 Xi, float3 N) 169 | { 170 | float Phi = 2 * MATH_PI * Xi.x; 171 | float CosTheta = sqrt(1 - Xi.y); 172 | float SinTheta = sqrt(1 - CosTheta * CosTheta); 173 | float3 H; 174 | H.x = SinTheta * cos(Phi); 175 | H.y = SinTheta * sin(Phi); 176 | H.z = CosTheta; 177 | 178 | float3 UpVector = (abs(N.z) < 0.5f) ? make_float3(0, 0, 1) : make_float3(1, 0, 0); 179 | //float3 UpVector = normalize((1 - abs(N.z + N.x + N.y)) * float3(0, 0, 1) + 0.5f * abs(N.z + N.x + N.y) * float3(1, 0, 0)); 180 | float3 TangentX = normalize(cross(UpVector, N)); 181 | float3 TangentY = normalize(cross(N, TangentX)); 182 | // Tangent to world space 183 | return TangentX * H.x + TangentY * H.y + N * H.z; 184 | } 185 | 186 | 187 | 188 | //Shading Eval functions 189 | __device__ float3 EvalDiffusePointLight(float3 L, float3 V, float3 N) 190 | { 191 | float d = clamp(dot(L, N), 0.0, 1.0) / MATH_PI; 192 | return make_float3(d); 193 | } 194 | 195 | __device__ float3 EvalSpecularPointLight(float3 L, float3 V, float3 N, float roughness) 196 | { 197 | float NoL = clamp(dot(L, N), 0.0, 1.0); 198 | float NoV = clamp(dot(N, V), 0.0, 1.0); 199 | if (NoL > 1e-6 && NoV > 1e-6) 200 | { 201 | float3 H = normalize(0.5 *(L + V)); 202 | float NoH = clamp(dot(N, H), 0.0, 1.0) + 1e-10f; 203 | float VoH = clamp(dot(V, H), 0.0, 1.0) + 1e-10f; 204 | 205 | float D = 0; 206 | D = NDF_Beckmann(H, N, roughness); 207 | float G = G_CookTorrance(NoL, NoV, NoH, VoH); 208 | return make_float3(G * D / (4.0 * NoV)); 209 | } 210 | else 211 | return make_float3(0.0); 212 | } 213 | 214 | 215 | 216 | __device__ float3 EvalDiffuseEnvLight(float3 N, uint nSamples, uint nCubeRes) 217 | { 218 | float3 DiffuseLighting = make_float3(0); 219 | 220 | // https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch20.html 221 | // ignore distortion 222 | const float solidAng_pixel = 4 * MATH_PI / (nCubeRes * nCubeRes * 6); 223 | // nSamples = 128;// 224 | // float avg_l = make_float3(0); 225 | 226 | for (uint i = 0; i < nSamples; i++) 227 | { 228 | float2 Xi = Hammersley(i, nSamples); 229 | float3 L = ImportanceSampleDiffuse(Xi, N); 230 | // avg_l += L; 231 | float NoL = clamp(dot(N, L), 0.0, 1.0); 232 | if (NoL > 0.0f) 233 | { 234 | float solidAng_sample = 1.f / (nSamples * PDF_Diffuse(NoL)); 235 | float lod = min(NUM_LIGHT_MIPMAP - 1.0f, (max(0.0f, 0.5f * log2(solidAng_sample / solidAng_pixel)) + DIFF_LOD_OFFSET)); 236 | // lod = 0; 237 | float3 SampleColor = samplefloat3Fromfloat4(texCubeMipmap(L.x, L.y, L.z, lod)); 238 | //float3 SampleColor = samplefloat3Fromfloat4(texCubemapLod(texCube, L.x, L.y, L.z, lod)); 239 | DiffuseLighting += SampleColor; 240 | } 241 | } 242 | // avg_l /= nSamples; 243 | return DiffuseLighting / nSamples;//make_float3(avg_l.z, avg_l.y, avg_l.x);// 244 | } 245 | 246 | 247 | __device__ float3 EvalSpecularEnvLight(float roughness, float3 N, float3 V, uint nSamples, uint nCubeRes) 248 | { 249 | // float3 L = 2 * dot(V, N) * N - V; 250 | float3 SpecularLighting = make_float3(0); 251 | // float3 SampleColor = samplefloat3Fromfloat4(texCubeMipmap(L.x, L.y, L.z, 0)); 252 | // return SampleColor;//make_float3(L.z, L.y, L.x); 253 | 254 | // https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch20.html 255 | // ignore distortion 256 | const float solidAng_pixel = 4 * MATH_PI / (nCubeRes * nCubeRes * 6); 257 | 258 | 259 | for (uint i = 0; i < nSamples; i++) 260 | { 261 | float2 Xi = Hammersley(i, nSamples); 262 | //float3 H = make_float3(0.0f, 0.0f, 1.0f); 263 | float3 H = ImportanceSampleBeckmann(Xi, roughness, N); 264 | 265 | float3 L = 2 * dot(V, H) * H - V; 266 | float NoV = clamp(dot(N, V), 0.0, 1.0) + 1e-10f; 267 | float NoL = clamp(dot(N, L), 0.0, 1.0); 268 | float NoH = clamp(dot(N, H), 0.0, 1.0) + 1e-10f; 269 | float VoH = clamp(dot(V, H), 0.0, 1.0) + 1e-10f; 270 | if (NoL > 0) 271 | { 272 | // http://blog.selfshadow.com/publications/s2012-shading-course/ 273 | // PDF_L = PDF_H /(4 * VoH) 274 | float solidAng_sample = solidAng_pixel; 275 | solidAng_sample = 4.f * VoH / (nSamples * PDF_Beckmann_H(roughness, NoH)); 276 | float lod = min(NUM_LIGHT_MIPMAP - 1.0f, (max(0.0f, 0.5f * log2(solidAng_sample / solidAng_pixel)) + SPE_LOD_OFFSET)); 277 | float3 SampleColor = samplefloat3Fromfloat4(texCubeMipmap(L.x, L.y, L.z, lod)); 278 | float G = G_CookTorrance(NoL, NoV, NoH, VoH);//G_SchlickSmith(roughness, NoL, NoV); 279 | SpecularLighting += SampleColor * G * VoH / (NoH * NoV); 280 | } 281 | } 282 | return SpecularLighting / nSamples;//make_float3(avgLod / nSamples);//SpecularLighting / nSamples;////SpecularLighting / nSamples;//make_float3(avgLod / nSamples);//make_float3(avgLod.z / nSamples, avgLod.y / nSamples, avgLod.x / nSamples);//make_float3(avgLod / nSamples); //SpecularLighting / nSamples; 283 | } -------------------------------------------------------------------------------- /Render/FastRendererCUDA.py: -------------------------------------------------------------------------------- 1 | # FastRendererCUDA.py 2 | # PyCUDA based renderer class 3 | 4 | import numpy as np 5 | import math 6 | import os, sys 7 | working_path = os.path.dirname(os.path.realpath(__file__)) 8 | root_path = os.path.dirname(working_path) 9 | sys.path.append(root_path + r'/Utils') 10 | import cv2 11 | 12 | import pycuda.driver as cuda 13 | from pycuda.compiler import SourceModule, compile 14 | 15 | from utils import load_pfm, save_pfm, getTexCube, pfmFromBuffer, pfmToBuffer, genMipMap 16 | 17 | import time 18 | 19 | os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' 20 | os.chdir(working_path) 21 | 22 | def tex2DToGPU(tex): 23 | nChannal = 1 if (len(tex.shape) == 2) else 3 24 | 25 | if(nChannal == 3): 26 | #Add padding channal 27 | tex = np.dstack((tex, np.ones((tex.shape[0], tex.shape[1])))) 28 | tex = np.ascontiguousarray(tex).astype(np.float32) 29 | texGPUArray = cuda.make_multichannel_2d_array(tex, 'C') 30 | else: 31 | texGPUArray = cuda.np_to_array(tex, 'C') 32 | 33 | return texGPUArray 34 | 35 | def texCubeToGPUMipmap(texCube): 36 | #assume width = height and is 2^x 37 | texMipMapList = genMipMap(texCube) 38 | texMipMapGPUArray = [] 39 | for k in range(0, len(texMipMapList)): 40 | texMipMapGPUArray.append(texCubeToGPU(texMipMapList[k])) 41 | 42 | return texMipMapGPUArray 43 | 44 | 45 | 46 | def texCubeToGPU(texCube): 47 | descr = cuda.ArrayDescriptor3D() 48 | descr.width = texCube.shape[2] 49 | descr.height = texCube.shape[1] 50 | descr.depth = 6 51 | descr.format = cuda.dtype_to_array_format(texCube.dtype) 52 | descr.num_channels = 4 53 | descr.flags = cuda.array3d_flags.CUBEMAP 54 | 55 | texCubeArray = cuda.Array(descr) 56 | copy = cuda.Memcpy3D() 57 | copy.set_src_host(texCube) 58 | copy.set_dst_array(texCubeArray) 59 | copy.width_in_bytes = copy.src_pitch = texCube.strides[1] #d*h*w*c 60 | copy.src_height = copy.height = texCube.shape[1] 61 | copy.depth = 6 62 | 63 | copy() 64 | 65 | return texCubeArray 66 | 67 | class FastRenderEngine(object): 68 | 69 | cudadevice = None 70 | cudacontext = None 71 | 72 | nDiffCount = 128 73 | nSpecCount = 512 74 | 75 | matrixView = np.identity(4) 76 | matrixProj = np.identity(4) 77 | matrixLight = np.identity(4) 78 | 79 | out_buffer = None 80 | out_buffer_gpu = None 81 | 82 | geoType = 'Sphere' 83 | lightMode = 'Point' 84 | 85 | lightPos = np.zeros((8, 4)) 86 | lightIntensity = np.zeros((8, 3)) 87 | lightStatus = np.zeros(8) 88 | 89 | texRef_Light = None 90 | texRef_Light_List = [] 91 | texRef_Albedo = None 92 | texRef_Spec = None 93 | texRef_Roughness = None 94 | texRef_Normal = None 95 | 96 | texCubeList_gpu = {} 97 | nCubeRes = 512 98 | texAlbedo_gpu = None 99 | texSpec_gpu = None 100 | texRoughness_gpu = None 101 | texNormal_gpu = None 102 | 103 | cuda_mod = None 104 | renderMode = 0 105 | 106 | sphere_normal_map = None 107 | 108 | def __init__(self, gpuid): 109 | cuda.init() 110 | self.cudadevice = cuda.Device(gpuid) 111 | self.cudacontext = self.cudadevice.make_context() 112 | 113 | dir_path = os.path.dirname(os.path.realpath(__file__)) 114 | with open(dir_path + r'/PixelShader.cu', 'r') as f: 115 | cudaCode = f.read() 116 | self.cuda_mod = SourceModule(cudaCode, include_dirs = [dir_path], no_extern_c = True, options = ['-O0']) 117 | 118 | for k in range(0, 9): 119 | self.texRef_Light_List.append(self.cuda_mod.get_texref('texCube{}'.format(k))) 120 | 121 | self.texRef_Albedo = self.cuda_mod.get_texref('albedo') 122 | self.texRef_Spec = self.cuda_mod.get_texref('spec') 123 | self.texRef_Roughness = self.cuda_mod.get_texref('roughness') 124 | self.texRef_Normal = self.cuda_mod.get_texref('normal') 125 | 126 | self.sphere_normal_map = load_pfm(dir_path + r'/sphere_normal.pfm') 127 | 128 | import atexit 129 | atexit.register(self.cudacontext.pop) 130 | 131 | 132 | def SetSampleCount(self, diffCount, specCount): 133 | self.nDiffCount = diffCount 134 | self.nSpecCount = specCount 135 | 136 | 137 | def SetLightXform(self, rotXAngle, rotYAngle): 138 | matRotX = np.array([[1,0,0,0], 139 | [0, math.cos(rotXAngle * math.pi / 180.0), -math.sin(rotXAngle * math.pi / 180.0),0], 140 | [0, math.sin(rotXAngle * math.pi / 180.0), math.cos(rotXAngle * math.pi / 180.0),0], 141 | [0,0,0,1]]).transpose() 142 | matRotY = np.array([[math.cos(rotYAngle * math.pi / 180.0),0, math.sin(rotYAngle * math.pi / 180.0), 0], 143 | [0, 1, 0, 0], 144 | [-math.sin(rotYAngle * math.pi / 180.0), 0, math.cos(rotYAngle * math.pi / 180.0),0], 145 | [0,0,0,1]]).transpose() 146 | 147 | self.matrixLight = (matRotY.dot(matRotX)).astype(np.float32) 148 | self.matrixLight = np.ascontiguousarray(self.matrixLight) 149 | 150 | 151 | 152 | def SetGeometry(self, type): 153 | self.geoType = type 154 | if(type == 'Plane'): 155 | normal_default = np.dstack((np.ones((256,256)), np.zeros((256,256)), np.zeros((256,256)))) 156 | else: 157 | normal_default = self.sphere_normal_map 158 | self.SetNormalMap(normal_default) 159 | 160 | def SetRenderMode(self, mode): 161 | self.renderMode = mode 162 | 163 | def PreLoadAllLight(self, lightFile): 164 | folderPath, name = os.path.split(lightFile) 165 | with open(lightFile, 'r') as f: 166 | lightIDs = map(int, f.read().strip().split('\n')) 167 | for lid in lightIDs: 168 | crossImg = load_pfm(folderPath + r'/{:04d}.pfm'.format(lid)) 169 | self.nCubeRes = crossImg.shape[1] / 4 170 | self.texCubeList_gpu[lid] = texCubeToGPUMipmap(getTexCube(crossImg))# texCubeToGPU(getTexCube(crossImg)) 171 | 172 | def SetEnvLightByID(self, id, rotXAngle = 0, rotYAngle = 0): 173 | self.SetLightXform(rotXAngle, rotYAngle) 174 | 175 | for k in range(0, len(self.texCubeList_gpu[id])): 176 | self.texRef_Light_List[k].set_array(self.texCubeList_gpu[id][k]) 177 | self.texRef_Light_List[k].set_flags(cuda.TRSF_NORMALIZED_COORDINATES) 178 | self.texRef_Light_List[k].set_filter_mode(cuda.filter_mode.LINEAR) 179 | self.texRef_Light_List[k].set_address_mode(0, cuda.address_mode.WRAP) 180 | self.texRef_Light_List[k].set_address_mode(1, cuda.address_mode.WRAP) 181 | 182 | self.lightMode = 'Env' 183 | 184 | def SetPointLight(self, slot, x, y, z, w, r, g, b): 185 | self.lightPos[slot, :] = [x, y, z, w] 186 | self.lightIntensity[slot, :] = [b, g, r] 187 | self.lightStatus[slot] = 1 188 | self.lightMode = 'Point' 189 | 190 | def SetAlbedoMap(self, albedo): 191 | self.texAlbedo_gpu = tex2DToGPU(albedo.astype(np.float32)) 192 | self.texRef_Albedo.set_array(self.texAlbedo_gpu) 193 | self.texRef_Albedo.set_flags(cuda.TRSF_NORMALIZED_COORDINATES) 194 | self.texRef_Albedo.set_filter_mode(cuda.filter_mode.LINEAR) 195 | self.texRef_Albedo.set_address_mode(0, cuda.address_mode.WRAP) 196 | self.texRef_Albedo.set_address_mode(1, cuda.address_mode.WRAP) 197 | 198 | def SetAlbedoValue(self, albedo): 199 | self.SetAlbedoMap(albedo * np.ones((256,256,3))) 200 | 201 | def SetSpecMap(self, spec): 202 | self.texSpec_gpu = tex2DToGPU(spec.astype(np.float32)) 203 | self.texRef_Spec.set_array(self.texSpec_gpu) 204 | self.texRef_Spec.set_flags(cuda.TRSF_NORMALIZED_COORDINATES) 205 | self.texRef_Spec.set_filter_mode(cuda.filter_mode.LINEAR) 206 | self.texRef_Spec.set_address_mode(0, cuda.address_mode.WRAP) 207 | self.texRef_Spec.set_address_mode(1, cuda.address_mode.WRAP) 208 | 209 | def SetSpecValue(self, spec): 210 | self.SetSpecMap(spec * np.ones((256,256,3))) 211 | 212 | def SetRoughnessMap(self, roughness): 213 | if(len(roughness.shape) == 3): 214 | roughness = roughness[:,:,0] 215 | self.texRoughness_gpu = tex2DToGPU(roughness.astype(np.float32)) 216 | self.texRef_Roughness.set_array(self.texRoughness_gpu) 217 | self.texRef_Roughness.set_flags(cuda.TRSF_NORMALIZED_COORDINATES) 218 | self.texRef_Roughness.set_filter_mode(cuda.filter_mode.LINEAR) 219 | self.texRef_Roughness.set_address_mode(0, cuda.address_mode.WRAP) 220 | self.texRef_Roughness.set_address_mode(1, cuda.address_mode.WRAP) 221 | 222 | def SetRoughnessValue(self, roughness): 223 | self.SetRoughnessMap(roughness * np.ones((256,256))) 224 | 225 | def SetNormalMap(self, normal): 226 | self.texNormal_gpu = tex2DToGPU(normal.astype(np.float32)) 227 | self.texRef_Normal.set_array(self.texNormal_gpu) 228 | self.texRef_Normal.set_flags(cuda.TRSF_NORMALIZED_COORDINATES) 229 | self.texRef_Normal.set_filter_mode(cuda.filter_mode.LINEAR) 230 | self.texRef_Normal.set_address_mode(0, cuda.address_mode.WRAP) 231 | self.texRef_Normal.set_address_mode(1, cuda.address_mode.WRAP) 232 | 233 | def SetCamera(self, ox, oy, oz, lx, ly, lz, ux, uy, uz, fov, clipNear, clipFar, width, height): 234 | upDir = np.array([ux,uy,uz]) 235 | eyePos = np.array([ox,oy,oz]) 236 | eyeVec = np.array([lx-ox, ly-oy, lz-oz]) 237 | R2 = eyeVec / np.linalg.norm(eyeVec) 238 | R0 = np.cross(upDir, R2) 239 | R0 = R0 / np.linalg.norm(R0) 240 | R1 = np.cross(R2, R0) 241 | 242 | D0 = R0.dot(-eyePos) 243 | D1 = R1.dot(-eyePos) 244 | D2 = R2.dot(-eyePos) 245 | 246 | self.matrixView = np.array([[R0[0], R0[1], R0[2], D0], 247 | [R1[0], R1[1], R1[2], D1], 248 | [R2[0], R2[1], R2[2], D2], 249 | [0,0,0,1]]).transpose().astype(np.float32) 250 | self.matrixView = np.ascontiguousarray(self.matrixView) 251 | 252 | sinFov = math.sin(0.5*fov) 253 | cosFov = math.cos(0.5*fov) 254 | 255 | height1 = cosFov / sinFov 256 | width1 = height1 / (float(width) / float(height)) 257 | fRange = clipFar / (clipFar - clipNear) 258 | 259 | self.matrixProj = np.array([[width1, 0, 0, 0], 260 | [0, height1, 0, 0], 261 | [0, 0, fRange, 1], 262 | [0, 0, -fRange * clipNear, 0]]).astype(np.float32) 263 | self.matrixProj = np.ascontiguousarray(self.matrixProj) 264 | 265 | self.matrixView_gpu = cuda.mem_alloc(self.matrixView.nbytes) 266 | cuda.memcpy_htod(self.matrixView_gpu, self.matrixView) 267 | self.matrixProj_gpu = cuda.mem_alloc(self.matrixProj.nbytes) 268 | cuda.memcpy_htod(self.matrixProj_gpu, self.matrixProj) 269 | 270 | self.out_buffer = np.zeros((height, width, 3)).astype(np.float32) 271 | self.out_buffer_gpu = cuda.mem_alloc(self.out_buffer.nbytes) 272 | 273 | def Render(self): 274 | renderFunc = self.cuda_mod.get_function('PS_Render_{}_{}'.format(self.geoType, self.lightMode)) 275 | 276 | grid_x = (self.out_buffer.shape[1] - 1) / 16 + 1 277 | grid_y = (self.out_buffer.shape[0] - 1) / 16 + 1 278 | 279 | if(self.lightMode == 'Env'): 280 | texrefList = [self.texRef_Albedo, self.texRef_Spec, self.texRef_Normal, self.texRef_Roughness, 281 | self.texRef_Light_List[0], self.texRef_Light_List[1], self.texRef_Light_List[2], 282 | self.texRef_Light_List[3], self.texRef_Light_List[4], self.texRef_Light_List[5], 283 | self.texRef_Light_List[6], self.texRef_Light_List[7], self.texRef_Light_List[8]] 284 | matWorldToLight = np.ascontiguousarray(np.linalg.inv(self.matrixLight).astype(np.float32)) 285 | matWorldToLight_gpu = cuda.mem_alloc(matWorldToLight.nbytes) 286 | cuda.memcpy_htod(matWorldToLight_gpu, matWorldToLight) 287 | renderFunc(self.out_buffer_gpu, np.int32(self.out_buffer.shape[1]), np.int32(self.out_buffer.shape[0]), 288 | self.matrixProj_gpu, self.matrixView_gpu, 289 | matWorldToLight_gpu, np.int32(self.nCubeRes), 290 | np.int32(self.nDiffCount), np.int32(self.nSpecCount), np.int32(self.renderMode), 291 | block = (16,16,1), grid = (grid_x,grid_y,1), texrefs=texrefList) 292 | elif(self.lightMode == 'Point'): 293 | lightStatus_gpu = cuda.mem_alloc(self.lightStatus.astype(np.int32).nbytes) 294 | cuda.memcpy_htod(lightStatus_gpu, self.lightStatus.astype(np.int32)) 295 | lightIntensity_gpu = cuda.mem_alloc(self.lightIntensity.astype(np.float32).nbytes) 296 | cuda.memcpy_htod(lightIntensity_gpu, self.lightIntensity.astype(np.float32)) 297 | lightPos_gpu = cuda.mem_alloc(self.lightPos.astype(np.float32).nbytes) 298 | cuda.memcpy_htod(lightPos_gpu, self.lightPos.astype(np.float32)) 299 | 300 | renderFunc(self.out_buffer_gpu, np.int32(self.out_buffer.shape[1]), np.int32(self.out_buffer.shape[0]), 301 | self.matrixProj_gpu, self.matrixView_gpu, 302 | lightStatus_gpu, lightIntensity_gpu, lightPos_gpu, np.int32(self.renderMode), 303 | block = (16,16,1), grid = (grid_x,grid_y,1), texrefs=[self.texRef_Albedo, self.texRef_Spec, self.texRef_Normal, self.texRef_Roughness]) 304 | 305 | cuda.memcpy_dtoh(self.out_buffer, self.out_buffer_gpu) 306 | return np.copy(self.out_buffer) -------------------------------------------------------------------------------- /Render/GeometryEvaluate.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | __device__ void vectorUnproj(float2 pointScreen, float screenWidth, float screenHeight, float* matProj, float* matView, float3* out_start_point, float3* out_end_point) 10 | { 11 | float mVPInv[16]; 12 | float mVP[16]; 13 | mat4Mul(matView, matProj, mVP); 14 | mat4Inv(mVP, mVPInv); 15 | 16 | float4 p0 = make_float4(2.0f * (pointScreen.x - screenWidth) / screenWidth - 1.0f, 1.0f - 2.0f * (pointScreen.y - screenHeight) / screenHeight, 0.0f, 1.0f); 17 | float4 p1 = make_float4(2.0f * (pointScreen.x - screenWidth) / screenWidth - 1.0f, 1.0f - 2.0f * (pointScreen.y - screenHeight) / screenHeight, 1.0f, 1.0f); 18 | 19 | //TODO: mul p0 and p1 20 | float4 p0_out = mul(p0, mVPInv); 21 | float4 p1_out = mul(p1, mVPInv); 22 | 23 | out_start_point->x = p0_out.x / p0_out.w; 24 | out_start_point->y = p0_out.y / p0_out.w; 25 | out_start_point->z = p0_out.z / p0_out.w; 26 | 27 | out_end_point->x = p1_out.x / p1_out.w; 28 | out_end_point->y = p1_out.y / p1_out.w; 29 | out_end_point->z = p1_out.z / p1_out.w; 30 | } 31 | 32 | 33 | __device__ void get_geometry_ball(float2* tex, float3* wNrm, float3* wPos, int px, int py, float screenWidth, float screenHeight, float* matProj, float* matView) 34 | { 35 | //tex->x = 0.5; tex->y = 0.5; 36 | wPos->x = ((px+0.5f) - 0.5 * screenWidth) / (0.5 * screenWidth); 37 | wPos->y = -(0.5 * screenHeight - (py+0.5f)) / (0.5 * screenHeight); 38 | float tag = 1.0 - (wPos->x * wPos->x + wPos->y * wPos->y); 39 | if(tag < 0) 40 | { 41 | tex->x = -1.0; tex->y = -1.0; 42 | wNrm->x = 0.0; wNrm->y = 0.0; wNrm->z = 1.0; 43 | wPos->x = 0.0; wPos->y = 0.0; wNrm->z = 0.0; 44 | } 45 | else 46 | { 47 | wPos->z = sqrt(tag); 48 | wNrm->x = wPos->x; wNrm->y = wPos->y; wNrm->z = wPos->z; 49 | tex->x = 0.5; tex->y = 0.5; 50 | } 51 | } 52 | 53 | __device__ void get_geometry_plane(float2* tex, float3* wNrm, float3* wPos, int px, int py, float screenWidth, float screenHeight, float* matProj, float* matView) 54 | { 55 | //Assume front view and every pixel is on plane. 56 | tex->x = (px + 0.5f) / screenWidth; tex->y = (py + 0.5f) / screenHeight; 57 | wNrm->x = 0.0; wNrm->y = 0.0; wNrm->z = 1.0; 58 | wPos->x = ((px+0.5f) - 0.5 * screenWidth) / (0.5 * screenWidth); 59 | wPos->y = -(0.5 * screenHeight - (py+0.5f)) / (0.5 * screenHeight); 60 | wPos->z = 0.0; 61 | } 62 | 63 | -------------------------------------------------------------------------------- /Render/PixelShader.cu: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include "BRDFEvaluate.h" 6 | #include "GeometryEvaluate.h" 7 | 8 | #define MAX_LIGHT_COUNT 8 9 | #define RENDER_IMAGE 0 10 | #define RENDER_ALBEDO 1 11 | #define RENDER_SPECALBEDO 2 12 | #define RENDER_ROUGHNESS 3 13 | #define RENDER_NORMAL 4 14 | #define RENDER_MASK 5 15 | #define RENDER_POS 6 16 | #define RENDER_VIEW 7 17 | #define RENDER_UV 8 18 | #define RENDER_LIGHTDIR 9 19 | 20 | texture albedo; 21 | texture spec; 22 | texture roughness; 23 | texture normal; 24 | 25 | //First version: fixed object pos and viewpoint 26 | 27 | extern "C" { 28 | 29 | __device__ float3 reverse(float3 in) 30 | { 31 | return make_float3(in.z, in.y, in.x); 32 | } 33 | 34 | 35 | 36 | __global__ void PS_Render_Plane_Point(float* output, int imgwidth, int imgheight, 37 | float* matProj, float* matView, 38 | bool* lightStatus, float* lightIntensity, float* lightPos, int renderMode) 39 | { 40 | float mVInv[16]; 41 | mat4Inv(matView, mVInv); 42 | float4 tmpEye = mul(make_float4(0, 0, 0, 1), mVInv); 43 | float3 eyePos = make_float3(tmpEye.x, tmpEye.y, tmpEye.z); 44 | 45 | 46 | int px = threadIdx.x + blockIdx.x * blockDim.x; 47 | int py = threadIdx.y + blockIdx.y * blockDim.y; 48 | 49 | if(px < imgwidth && py < imgheight) 50 | { 51 | int imgindex = py * imgwidth + px; 52 | 53 | float2 tex = make_float2(0); 54 | float3 wPos = make_float3(0); 55 | float3 wNrm = make_float3(0); 56 | 57 | get_geometry_plane(&tex, &wNrm, &wPos, px, py, imgwidth, imgheight, matProj, matView); 58 | 59 | float3 diffColor = samplefloat3Fromfloat4(tex2D(albedo, tex.x, tex.y)); 60 | float3 speColor = samplefloat3Fromfloat4(tex2D(spec, tex.x, tex.y)); 61 | float roughnessValue = tex2D(roughness, tex.x, tex.y); 62 | wNrm = reverse(samplefloat3Fromfloat4(tex2D(normal, tex.x, tex.y))); 63 | 64 | if(wNrm.x > 2.0f) 65 | { 66 | output[3*imgindex] = 0.0f; 67 | output[3*imgindex+1] = 0.0f; 68 | output[3*imgindex+2] = 0.0f; 69 | return; 70 | } 71 | 72 | wNrm = normalize(wNrm); 73 | 74 | float3 V = normalize(eyePos - wPos); 75 | 76 | float3 color = make_float3(0); 77 | if(renderMode == RENDER_IMAGE) 78 | { 79 | float3 diffuse = make_float3(0); 80 | float3 specular = make_float3(0); 81 | for(int i=0; i 2.0f) 175 | { 176 | output[3*imgindex] = 0.0f; 177 | output[3*imgindex+1] = 0.0f; 178 | output[3*imgindex+2] = 0.0f; 179 | return; 180 | } 181 | 182 | wNrm = normalize(wNrm); 183 | wPos = wNrm; 184 | 185 | float3 V = normalize(eyePos - wPos); 186 | 187 | float3 color = make_float3(0); 188 | if(renderMode == RENDER_IMAGE) 189 | { 190 | float3 diffuse = make_float3(0); 191 | float3 specular = make_float3(0); 192 | for(int i=0; i 2.0f) 289 | { 290 | output[3*imgindex] = 0.0f; 291 | output[3*imgindex+1] = 0.0f; 292 | output[3*imgindex+2] = 0.0f; 293 | return; 294 | } 295 | wNrm = normalize(wNrm); 296 | 297 | float3 V = normalize(eyePos - wPos); 298 | float3 color = make_float3(0); 299 | 300 | float4 lNrm = mul(make_float4(wNrm.x, wNrm.y, wNrm.z, 0), matrixLight); 301 | float3 lightSpaceNrm = make_float3(lNrm.x, lNrm.y, lNrm.z); 302 | 303 | 304 | float4 lView = mul(make_float4(V.x, V.y, V.z, 0), matrixLight); 305 | float3 lightSpaceView = make_float3(lView.x, lView.y, lView.z); 306 | 307 | if(renderMode == RENDER_IMAGE) 308 | { 309 | float3 diffuse = EvalDiffuseEnvLight(lightSpaceNrm, nDiffuseSample, nCubeRes); 310 | float3 spec = EvalSpecularEnvLight(roughnessValue, lightSpaceNrm, lightSpaceView, nSpecSample, nCubeRes); 311 | color = diffuse * diffColor + spec * speColor; 312 | } 313 | else if(renderMode == RENDER_ALBEDO) 314 | { 315 | color = diffColor; 316 | } 317 | else if(renderMode == RENDER_SPECALBEDO) 318 | { 319 | color = speColor; 320 | } 321 | else if(renderMode == RENDER_ROUGHNESS) 322 | { 323 | color = make_float3(roughnessValue); 324 | } 325 | else if(renderMode == RENDER_NORMAL) 326 | { 327 | color = reverse(0.5*(wNrm+1.0f)); 328 | } 329 | else if(renderMode == RENDER_MASK) 330 | { 331 | color = make_float3(1.0f); 332 | } 333 | else if(renderMode == RENDER_POS) 334 | { 335 | color = reverse(wPos); 336 | } 337 | else if(renderMode == RENDER_VIEW) 338 | { 339 | color = reverse(lightSpaceView); 340 | // color = make_float3(matrixLight[6]); 341 | } 342 | else if(renderMode == RENDER_UV) 343 | { 344 | color = make_float3(0.0, tex.y, tex.x); 345 | } 346 | else if(renderMode == RENDER_LIGHTDIR) 347 | { 348 | float3 avgL = make_float3(0); 349 | for (uint i = 0; i < nSpecSample; i++) 350 | { 351 | float2 Xi = Hammersley(i, nSpecSample); 352 | float3 H = make_float3(0.0f, 0.0f, 1.0f); 353 | H = ImportanceSampleBeckmann(Xi, roughnessValue, lightSpaceNrm); 354 | 355 | float3 L = 2 * dot(lightSpaceView, H) * H - lightSpaceView; 356 | avgL += L; 357 | } 358 | color = reverse(avgL / nSpecSample); 359 | } 360 | output[3*imgindex] = color.x; 361 | output[3*imgindex+1] = color.y; 362 | output[3*imgindex+2] = color.z; 363 | } 364 | } 365 | 366 | __global__ void PS_Render_Sphere_Env(float* output, int imgwidth, int imgheight, 367 | float* matProj, float* matView, 368 | float* matrixLight, uint nCubeRes, 369 | uint nDiffuseSample, uint nSpecSample, int renderMode) 370 | { 371 | float mVInv[16]; 372 | mat4Inv(matView, mVInv); 373 | float4 tmpEye = mul(make_float4(0, 0, 0, 1), mVInv); 374 | float3 eyePos = make_float3(tmpEye.x, tmpEye.y, tmpEye.z); 375 | 376 | 377 | int px = threadIdx.x + blockIdx.x * blockDim.x; 378 | int py = threadIdx.y + blockIdx.y * blockDim.y; 379 | 380 | if(px < imgwidth && py < imgheight) 381 | { 382 | int imgindex = py * imgwidth + px; 383 | 384 | float2 tex = make_float2(0); 385 | float3 wPos = make_float3(0); 386 | float3 wNrm = make_float3(0); 387 | 388 | get_geometry_plane(&tex, &wNrm, &wPos, px, py, imgwidth, imgheight, matProj, matView); 389 | 390 | 391 | float3 diffColor = samplefloat3Fromfloat4(tex2D(albedo, tex.x, tex.y)); 392 | float3 speColor = samplefloat3Fromfloat4(tex2D(spec, tex.x, tex.y)); 393 | float roughnessValue = tex2D(roughness, tex.x, tex.y); 394 | wNrm = reverse(samplefloat3Fromfloat4(tex2D(normal, tex.x, tex.y))); 395 | 396 | if(wNrm.x > 2.0f) 397 | { 398 | output[3*imgindex] = 0.0f; 399 | output[3*imgindex+1] = 0.0f; 400 | output[3*imgindex+2] = 0.0f; 401 | return; 402 | } 403 | wNrm = normalize(wNrm); 404 | wPos = wNrm; 405 | 406 | float3 V = normalize(eyePos - wPos); 407 | float3 color = make_float3(0); 408 | 409 | float4 lNrm = mul(make_float4(wNrm.x, wNrm.y, wNrm.z, 0), matrixLight); 410 | float3 lightSpaceNrm = make_float3(lNrm.x, lNrm.y, lNrm.z); 411 | 412 | float4 lView = mul(make_float4(V.x, V.y, V.z, 0), matrixLight); 413 | float3 lightSpaceView = make_float3(lView.x, lView.y, lView.z); 414 | 415 | if(renderMode == RENDER_IMAGE) 416 | { 417 | float3 diffuse = EvalDiffuseEnvLight(lightSpaceNrm, nDiffuseSample, nCubeRes); 418 | float3 spec = EvalSpecularEnvLight(roughnessValue, lightSpaceNrm, lightSpaceView, nSpecSample, nCubeRes); 419 | color = diffuse * diffColor + spec * speColor; 420 | } 421 | else if(renderMode == RENDER_ALBEDO) 422 | { 423 | color = diffColor; 424 | } 425 | else if(renderMode == RENDER_SPECALBEDO) 426 | { 427 | color = speColor; 428 | } 429 | else if(renderMode == RENDER_ROUGHNESS) 430 | { 431 | color = make_float3(roughnessValue); 432 | } 433 | else if(renderMode == RENDER_NORMAL) 434 | { 435 | color = reverse(0.5*(wNrm+1.0f)); 436 | } 437 | else if(renderMode == RENDER_MASK) 438 | { 439 | color = make_float3(1.0f); 440 | } 441 | else if(renderMode == RENDER_POS) 442 | { 443 | color = reverse(wPos); 444 | } 445 | else if(renderMode == RENDER_VIEW) 446 | { 447 | color = reverse(lightSpaceView); 448 | } 449 | else if(renderMode == RENDER_UV) 450 | { 451 | color = make_float3(0.0, tex.y, tex.x); 452 | } 453 | else if(renderMode == RENDER_LIGHTDIR) 454 | { 455 | float3 avgL = make_float3(0); 456 | for (uint i = 0; i < nSpecSample; i++) 457 | { 458 | float2 Xi = Hammersley(i, nSpecSample); 459 | float3 H = make_float3(0.0f, 0.0f, 1.0f); 460 | H = ImportanceSampleBeckmann(Xi, roughnessValue, lightSpaceNrm); 461 | 462 | float3 L = 2 * dot(lightSpaceView, H) * H - lightSpaceView; 463 | avgL += L; 464 | } 465 | color = reverse(avgL / nSpecSample); 466 | } 467 | output[3*imgindex] = color.x; 468 | output[3*imgindex+1] = color.y; 469 | output[3*imgindex+2] = color.z; 470 | } 471 | } 472 | 473 | 474 | } -------------------------------------------------------------------------------- /Render/exception.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. 3 | * 4 | * Please refer to the NVIDIA end user license agreement (EULA) associated 5 | * with this source code for terms and conditions that govern your use of 6 | * this software. Any use, reproduction, disclosure, or distribution of 7 | * this software and related documentation outside the terms of the EULA 8 | * is strictly prohibited. 9 | * 10 | */ 11 | 12 | /* CUda UTility Library */ 13 | #ifndef _EXCEPTION_H_ 14 | #define _EXCEPTION_H_ 15 | 16 | // includes, system 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | //! Exception wrapper. 23 | //! @param Std_Exception Exception out of namespace std for easy typing. 24 | template 25 | class Exception : public Std_Exception 26 | { 27 | public: 28 | 29 | //! @brief Static construction interface 30 | //! @return Alwayss throws ( Located_Exception) 31 | //! @param file file in which the Exception occurs 32 | //! @param line line in which the Exception occurs 33 | //! @param detailed details on the code fragment causing the Exception 34 | static void throw_it(const char *file, 35 | const int line, 36 | const char *detailed = "-"); 37 | 38 | //! Static construction interface 39 | //! @return Alwayss throws ( Located_Exception) 40 | //! @param file file in which the Exception occurs 41 | //! @param line line in which the Exception occurs 42 | //! @param detailed details on the code fragment causing the Exception 43 | static void throw_it(const char *file, 44 | const int line, 45 | const std::string &detailed); 46 | 47 | //! Destructor 48 | virtual ~Exception() throw(); 49 | 50 | private: 51 | 52 | //! Constructor, default (private) 53 | Exception(); 54 | 55 | //! Constructor, standard 56 | //! @param str string returned by what() 57 | Exception(const std::string &str); 58 | 59 | }; 60 | 61 | //////////////////////////////////////////////////////////////////////////////// 62 | //! Exception handler function for arbitrary exceptions 63 | //! @param ex exception to handle 64 | //////////////////////////////////////////////////////////////////////////////// 65 | template 66 | inline void 67 | handleException(const Exception_Typ &ex) 68 | { 69 | std::cerr << ex.what() << std::endl; 70 | 71 | exit(EXIT_FAILURE); 72 | } 73 | 74 | //! Convenience macros 75 | 76 | //! Exception caused by dynamic program behavior, e.g. file does not exist 77 | #define RUNTIME_EXCEPTION( msg) \ 78 | Exception::throw_it( __FILE__, __LINE__, msg) 79 | 80 | //! Logic exception in program, e.g. an assert failed 81 | #define LOGIC_EXCEPTION( msg) \ 82 | Exception::throw_it( __FILE__, __LINE__, msg) 83 | 84 | //! Out of range exception 85 | #define RANGE_EXCEPTION( msg) \ 86 | Exception::throw_it( __FILE__, __LINE__, msg) 87 | 88 | //////////////////////////////////////////////////////////////////////////////// 89 | //! Implementation 90 | 91 | // includes, system 92 | #include 93 | 94 | //////////////////////////////////////////////////////////////////////////////// 95 | //! Static construction interface. 96 | //! @param Exception causing code fragment (file and line) and detailed infos. 97 | //////////////////////////////////////////////////////////////////////////////// 98 | /*static*/ template 99 | void 100 | Exception:: 101 | throw_it(const char *file, const int line, const char *detailed) 102 | { 103 | std::stringstream s; 104 | 105 | // Quiet heavy-weight but exceptions are not for 106 | // performance / release versions 107 | s << "Exception in file '" << file << "' in line " << line << "\n" 108 | << "Detailed description: " << detailed << "\n"; 109 | 110 | throw Exception(s.str()); 111 | } 112 | 113 | //////////////////////////////////////////////////////////////////////////////// 114 | //! Static construction interface. 115 | //! @param Exception causing code fragment (file and line) and detailed infos. 116 | //////////////////////////////////////////////////////////////////////////////// 117 | /*static*/ template 118 | void 119 | Exception:: 120 | throw_it(const char *file, const int line, const std::string &msg) 121 | { 122 | throw_it(file, line, msg.c_str()); 123 | } 124 | 125 | //////////////////////////////////////////////////////////////////////////////// 126 | //! Constructor, default (private). 127 | //////////////////////////////////////////////////////////////////////////////// 128 | template 129 | Exception::Exception() : 130 | Std_Exception("Unknown Exception.\n") 131 | { } 132 | 133 | //////////////////////////////////////////////////////////////////////////////// 134 | //! Constructor, standard (private). 135 | //! String returned by what(). 136 | //////////////////////////////////////////////////////////////////////////////// 137 | template 138 | Exception::Exception(const std::string &s) : 139 | Std_Exception(s) 140 | { } 141 | 142 | //////////////////////////////////////////////////////////////////////////////// 143 | //! Destructor 144 | //////////////////////////////////////////////////////////////////////////////// 145 | template 146 | Exception::~Exception() throw() { } 147 | 148 | // functions, exported 149 | 150 | #endif // #ifndef _EXCEPTION_H_ 151 | 152 | -------------------------------------------------------------------------------- /Render/helper_functions.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. 3 | * 4 | * Please refer to the NVIDIA end user license agreement (EULA) associated 5 | * with this source code for terms and conditions that govern your use of 6 | * this software. Any use, reproduction, disclosure, or distribution of 7 | * this software and related documentation outside the terms of the EULA 8 | * is strictly prohibited. 9 | * 10 | */ 11 | 12 | // These are helper functions for the SDK samples (string parsing, timers, image helpers, etc) 13 | #ifndef HELPER_FUNCTIONS_H 14 | #define HELPER_FUNCTIONS_H 15 | 16 | #ifdef WIN32 17 | #pragma warning(disable:4996) 18 | #endif 19 | 20 | // includes, project 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | // includes, timer, string parsing, image helpers 34 | #include // helper functions for timers 35 | #include // helper functions for string parsing 36 | #include // helper functions for image compare, dump, data comparisons 37 | 38 | #ifndef EXIT_WAIVED 39 | #define EXIT_WAIVED 2 40 | #endif 41 | 42 | #endif // HELPER_FUNCTIONS_H 43 | -------------------------------------------------------------------------------- /Render/helper_image.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. 3 | * 4 | * Please refer to the NVIDIA end user license agreement (EULA) associated 5 | * with this source code for terms and conditions that govern your use of 6 | * this software. Any use, reproduction, disclosure, or distribution of 7 | * this software and related documentation outside the terms of the EULA 8 | * is strictly prohibited. 9 | * 10 | */ 11 | 12 | // These are helper functions for the SDK samples (image,bitmap) 13 | #ifndef HELPER_IMAGE_H 14 | #define HELPER_IMAGE_H 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | #include 23 | #include 24 | #include 25 | 26 | #ifndef MIN 27 | #define MIN(a,b) ((a < b) ? a : b) 28 | #endif 29 | #ifndef MAX 30 | #define MAX(a,b) ((a > b) ? a : b) 31 | #endif 32 | 33 | #ifndef EXIT_WAIVED 34 | #define EXIT_WAIVED 2 35 | #endif 36 | 37 | #include 38 | 39 | // namespace unnamed (internal) 40 | namespace 41 | { 42 | //! size of PGM file header 43 | const unsigned int PGMHeaderSize = 0x40; 44 | 45 | // types 46 | 47 | //! Data converter from unsigned char / unsigned byte to type T 48 | template 49 | struct ConverterFromUByte; 50 | 51 | //! Data converter from unsigned char / unsigned byte 52 | template<> 53 | struct ConverterFromUByte 54 | { 55 | //! Conversion operator 56 | //! @return converted value 57 | //! @param val value to convert 58 | float operator()(const unsigned char &val) 59 | { 60 | return static_cast(val); 61 | } 62 | }; 63 | 64 | //! Data converter from unsigned char / unsigned byte to float 65 | template<> 66 | struct ConverterFromUByte 67 | { 68 | //! Conversion operator 69 | //! @return converted value 70 | //! @param val value to convert 71 | float operator()(const unsigned char &val) 72 | { 73 | return static_cast(val) / 255.0f; 74 | } 75 | }; 76 | 77 | //! Data converter from unsigned char / unsigned byte to type T 78 | template 79 | struct ConverterToUByte; 80 | 81 | //! Data converter from unsigned char / unsigned byte to unsigned int 82 | template<> 83 | struct ConverterToUByte 84 | { 85 | //! Conversion operator (essentially a passthru 86 | //! @return converted value 87 | //! @param val value to convert 88 | unsigned char operator()(const unsigned char &val) 89 | { 90 | return val; 91 | } 92 | }; 93 | 94 | //! Data converter from unsigned char / unsigned byte to unsigned int 95 | template<> 96 | struct ConverterToUByte 97 | { 98 | //! Conversion operator 99 | //! @return converted value 100 | //! @param val value to convert 101 | unsigned char operator()(const float &val) 102 | { 103 | return static_cast(val * 255.0f); 104 | } 105 | }; 106 | } 107 | 108 | #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) 109 | #ifndef FOPEN 110 | #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) 111 | #endif 112 | #ifndef FOPEN_FAIL 113 | #define FOPEN_FAIL(result) (result != 0) 114 | #endif 115 | #ifndef SSCANF 116 | #define SSCANF sscanf_s 117 | #endif 118 | #else 119 | #ifndef FOPEN 120 | #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) 121 | #endif 122 | #ifndef FOPEN_FAIL 123 | #define FOPEN_FAIL(result) (result == NULL) 124 | #endif 125 | #ifndef SSCANF 126 | #define SSCANF sscanf 127 | #endif 128 | #endif 129 | 130 | inline bool 131 | __loadPPM(const char *file, unsigned char **data, 132 | unsigned int *w, unsigned int *h, unsigned int *channels) 133 | { 134 | FILE *fp = NULL; 135 | 136 | if (FOPEN_FAIL(FOPEN(fp, file, "rb"))) 137 | { 138 | std::cerr << "__LoadPPM() : Failed to open file: " << file << std::endl; 139 | return false; 140 | } 141 | 142 | // check header 143 | char header[PGMHeaderSize]; 144 | 145 | if (fgets(header, PGMHeaderSize, fp) == NULL) 146 | { 147 | std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl; 148 | return false; 149 | } 150 | 151 | if (strncmp(header, "P5", 2) == 0) 152 | { 153 | *channels = 1; 154 | } 155 | else if (strncmp(header, "P6", 2) == 0) 156 | { 157 | *channels = 3; 158 | } 159 | else 160 | { 161 | std::cerr << "__LoadPPM() : File is not a PPM or PGM image" << std::endl; 162 | *channels = 0; 163 | return false; 164 | } 165 | 166 | // parse header, read maxval, width and height 167 | unsigned int width = 0; 168 | unsigned int height = 0; 169 | unsigned int maxval = 0; 170 | unsigned int i = 0; 171 | 172 | while (i < 3) 173 | { 174 | if (fgets(header, PGMHeaderSize, fp) == NULL) 175 | { 176 | std::cerr << "__LoadPPM() : reading PGM header returned NULL" << std::endl; 177 | return false; 178 | } 179 | 180 | if (header[0] == '#') 181 | { 182 | continue; 183 | } 184 | 185 | if (i == 0) 186 | { 187 | i += SSCANF(header, "%u %u %u", &width, &height, &maxval); 188 | } 189 | else if (i == 1) 190 | { 191 | i += SSCANF(header, "%u %u", &height, &maxval); 192 | } 193 | else if (i == 2) 194 | { 195 | i += SSCANF(header, "%u", &maxval); 196 | } 197 | } 198 | 199 | // check if given handle for the data is initialized 200 | if (NULL != *data) 201 | { 202 | if (*w != width || *h != height) 203 | { 204 | std::cerr << "__LoadPPM() : Invalid image dimensions." << std::endl; 205 | } 206 | } 207 | else 208 | { 209 | *data = (unsigned char *) malloc(sizeof(unsigned char) * width * height **channels); 210 | *w = width; 211 | *h = height; 212 | } 213 | 214 | // read and close file 215 | if (fread(*data, sizeof(unsigned char), width * height **channels, fp) == 0) 216 | { 217 | std::cerr << "__LoadPPM() read data returned error." << std::endl; 218 | } 219 | 220 | fclose(fp); 221 | 222 | return true; 223 | } 224 | 225 | template 226 | inline bool 227 | sdkLoadPGM(const char *file, T **data, unsigned int *w, unsigned int *h) 228 | { 229 | unsigned char *idata = NULL; 230 | unsigned int channels; 231 | 232 | if (true != __loadPPM(file, &idata, w, h, &channels)) 233 | { 234 | return false; 235 | } 236 | 237 | unsigned int size = *w **h * channels; 238 | 239 | // initialize mem if necessary 240 | // the correct size is checked / set in loadPGMc() 241 | if (NULL == *data) 242 | { 243 | *data = (T *) malloc(sizeof(T) * size); 244 | } 245 | 246 | // copy and cast data 247 | std::transform(idata, idata + size, *data, ConverterFromUByte()); 248 | 249 | free(idata); 250 | 251 | return true; 252 | } 253 | 254 | template 255 | inline bool 256 | sdkLoadPPM4(const char *file, T **data, 257 | unsigned int *w,unsigned int *h) 258 | { 259 | unsigned char *idata = 0; 260 | unsigned int channels; 261 | 262 | if (__loadPPM(file, &idata, w, h, &channels)) 263 | { 264 | // pad 4th component 265 | int size = *w **h; 266 | // keep the original pointer 267 | unsigned char *idata_orig = idata; 268 | *data = (T *) malloc(sizeof(T) * size * 4); 269 | unsigned char *ptr = *data; 270 | 271 | for (int i=0; i 0); 295 | assert(h > 0); 296 | 297 | std::fstream fh(file, std::fstream::out | std::fstream::binary); 298 | 299 | if (fh.bad()) 300 | { 301 | std::cerr << "__savePPM() : Opening file failed." << std::endl; 302 | return false; 303 | } 304 | 305 | if (channels == 1) 306 | { 307 | fh << "P5\n"; 308 | } 309 | else if (channels == 3) 310 | { 311 | fh << "P6\n"; 312 | } 313 | else 314 | { 315 | std::cerr << "__savePPM() : Invalid number of channels." << std::endl; 316 | return false; 317 | } 318 | 319 | fh << w << "\n" << h << "\n" << 0xff << std::endl; 320 | 321 | for (unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i) 322 | { 323 | fh << data[i]; 324 | } 325 | 326 | fh.flush(); 327 | 328 | if (fh.bad()) 329 | { 330 | std::cerr << "__savePPM() : Writing data failed." << std::endl; 331 | return false; 332 | } 333 | 334 | fh.close(); 335 | 336 | return true; 337 | } 338 | 339 | template 340 | inline bool 341 | sdkSavePGM(const char *file, T *data, unsigned int w, unsigned int h) 342 | { 343 | unsigned int size = w * h; 344 | unsigned char *idata = 345 | (unsigned char *) malloc(sizeof(unsigned char) * size); 346 | 347 | std::transform(data, data + size, idata, ConverterToUByte()); 348 | 349 | // write file 350 | bool result = __savePPM(file, idata, w, h, 1); 351 | 352 | // cleanup 353 | free(idata); 354 | 355 | return result; 356 | } 357 | 358 | inline bool 359 | sdkSavePPM4ub(const char *file, unsigned char *data, 360 | unsigned int w, unsigned int h) 361 | { 362 | // strip 4th component 363 | int size = w * h; 364 | unsigned char *ndata = (unsigned char *) malloc(sizeof(unsigned char) * size*3); 365 | unsigned char *ptr = ndata; 366 | 367 | for (int i=0; i 390 | inline bool 391 | sdkReadFile(const char *filename, T **data, unsigned int *len, bool verbose) 392 | { 393 | // check input arguments 394 | assert(NULL != filename); 395 | assert(NULL != len); 396 | 397 | // intermediate storage for the data read 398 | std::vector data_read; 399 | 400 | // open file for reading 401 | FILE *fh = NULL; 402 | 403 | // check if filestream is valid 404 | if (FOPEN_FAIL(FOPEN(fh, filename, "r"))) 405 | { 406 | printf("Unable to open input file: %s\n", filename); 407 | return false; 408 | } 409 | 410 | // read all data elements 411 | T token; 412 | 413 | while (!feof(fh)) 414 | { 415 | fscanf(fh, "%f", &token); 416 | data_read.push_back(token); 417 | } 418 | 419 | // the last element is read twice 420 | data_read.pop_back(); 421 | fclose(fh); 422 | 423 | // check if the given handle is already initialized 424 | if (NULL != *data) 425 | { 426 | if (*len != data_read.size()) 427 | { 428 | std::cerr << "sdkReadFile() : Initialized memory given but " 429 | << "size mismatch with signal read " 430 | << "(data read / data init = " << (unsigned int)data_read.size() 431 | << " / " << *len << ")" << std::endl; 432 | 433 | return false; 434 | } 435 | } 436 | else 437 | { 438 | // allocate storage for the data read 439 | *data = (T *) malloc(sizeof(T) * data_read.size()); 440 | // store signal size 441 | *len = static_cast(data_read.size()); 442 | } 443 | 444 | // copy data 445 | memcpy(*data, &data_read.front(), sizeof(T) * data_read.size()); 446 | 447 | return true; 448 | } 449 | 450 | ////////////////////////////////////////////////////////////////////////////// 451 | //! Read file \filename and return the data 452 | //! @return bool if reading the file succeeded, otherwise false 453 | //! @param filename name of the source file 454 | //! @param data uninitialized pointer, returned initialized and pointing to 455 | //! the data read 456 | //! @param len number of data elements in data, -1 on error 457 | ////////////////////////////////////////////////////////////////////////////// 458 | template 459 | inline bool 460 | sdkReadFileBlocks(const char *filename, T **data, unsigned int *len, unsigned int block_num, unsigned int block_size, bool verbose) 461 | { 462 | // check input arguments 463 | assert(NULL != filename); 464 | assert(NULL != len); 465 | 466 | // open file for reading 467 | FILE *fh = fopen(filename, "rb"); 468 | 469 | if (fh == NULL && verbose) 470 | { 471 | std::cerr << "sdkReadFile() : Opening file failed." << std::endl; 472 | return false; 473 | } 474 | 475 | // check if the given handle is already initialized 476 | // allocate storage for the data read 477 | data[block_num] = (T *) malloc(block_size); 478 | 479 | // read all data elements 480 | fseek(fh, block_num * block_size, SEEK_SET); 481 | *len = fread(data[block_num], sizeof(T), block_size/sizeof(T), fh); 482 | 483 | fclose(fh); 484 | 485 | return true; 486 | } 487 | 488 | ////////////////////////////////////////////////////////////////////////////// 489 | //! Write a data file \filename 490 | //! @return true if writing the file succeeded, otherwise false 491 | //! @param filename name of the source file 492 | //! @param data data to write 493 | //! @param len number of data elements in data, -1 on error 494 | //! @param epsilon epsilon for comparison 495 | ////////////////////////////////////////////////////////////////////////////// 496 | template 497 | inline bool 498 | sdkWriteFile(const char *filename, const T *data, unsigned int len, 499 | const S epsilon, bool verbose, bool append = false) 500 | { 501 | assert(NULL != filename); 502 | assert(NULL != data); 503 | 504 | // open file for writing 505 | // if (append) { 506 | std::fstream fh(filename, std::fstream::out | std::fstream::ate); 507 | 508 | if (verbose) 509 | { 510 | std::cerr << "sdkWriteFile() : Open file " << filename << " for write/append." << std::endl; 511 | } 512 | 513 | /* } else { 514 | std::fstream fh(filename, std::fstream::out); 515 | if (verbose) { 516 | std::cerr << "sdkWriteFile() : Open file " << filename << " for write." << std::endl; 517 | } 518 | } 519 | */ 520 | 521 | // check if filestream is valid 522 | if (! fh.good()) 523 | { 524 | if (verbose) 525 | { 526 | std::cerr << "sdkWriteFile() : Opening file failed." << std::endl; 527 | } 528 | 529 | return false; 530 | } 531 | 532 | // first write epsilon 533 | fh << "# " << epsilon << "\n"; 534 | 535 | // write data 536 | for (unsigned int i = 0; (i < len) && (fh.good()); ++i) 537 | { 538 | fh << data[i] << ' '; 539 | } 540 | 541 | // Check if writing succeeded 542 | if (! fh.good()) 543 | { 544 | if (verbose) 545 | { 546 | std::cerr << "sdkWriteFile() : Writing file failed." << std::endl; 547 | } 548 | 549 | return false; 550 | } 551 | 552 | // file ends with nl 553 | fh << std::endl; 554 | 555 | return true; 556 | } 557 | 558 | ////////////////////////////////////////////////////////////////////////////// 559 | //! Compare two arrays of arbitrary type 560 | //! @return true if \a reference and \a data are identical, otherwise false 561 | //! @param reference timer_interface to the reference data / gold image 562 | //! @param data handle to the computed data 563 | //! @param len number of elements in reference and data 564 | //! @param epsilon epsilon to use for the comparison 565 | ////////////////////////////////////////////////////////////////////////////// 566 | template 567 | inline bool 568 | compareData(const T *reference, const T *data, const unsigned int len, 569 | const S epsilon, const float threshold) 570 | { 571 | assert(epsilon >= 0); 572 | 573 | bool result = true; 574 | unsigned int error_count = 0; 575 | 576 | for (unsigned int i = 0; i < len; ++i) 577 | { 578 | float diff = (float)reference[i] - (float)data[i]; 579 | bool comp = (diff <= epsilon) && (diff >= -epsilon); 580 | result &= comp; 581 | 582 | error_count += !comp; 583 | 584 | #if 0 585 | 586 | if (! comp) 587 | { 588 | std::cerr << "ERROR, i = " << i << ",\t " 589 | << reference[i] << " / " 590 | << data[i] 591 | << " (reference / data)\n"; 592 | } 593 | 594 | #endif 595 | } 596 | 597 | if (threshold == 0.0f) 598 | { 599 | return (result) ? true : false; 600 | } 601 | else 602 | { 603 | if (error_count) 604 | { 605 | printf("%4.2f(%%) of bytes mismatched (count=%d)\n", (float)error_count*100/(float)len, error_count); 606 | } 607 | 608 | return (len*threshold > error_count) ? true : false; 609 | } 610 | } 611 | 612 | #ifndef __MIN_EPSILON_ERROR 613 | #define __MIN_EPSILON_ERROR 1e-3f 614 | #endif 615 | 616 | ////////////////////////////////////////////////////////////////////////////// 617 | //! Compare two arrays of arbitrary type 618 | //! @return true if \a reference and \a data are identical, otherwise false 619 | //! @param reference handle to the reference data / gold image 620 | //! @param data handle to the computed data 621 | //! @param len number of elements in reference and data 622 | //! @param epsilon epsilon to use for the comparison 623 | //! @param epsilon threshold % of (# of bytes) for pass/fail 624 | ////////////////////////////////////////////////////////////////////////////// 625 | template 626 | inline bool 627 | compareDataAsFloatThreshold(const T *reference, const T *data, const unsigned int len, 628 | const S epsilon, const float threshold) 629 | { 630 | assert(epsilon >= 0); 631 | 632 | // If we set epsilon to be 0, let's set a minimum threshold 633 | float max_error = MAX((float)epsilon, __MIN_EPSILON_ERROR); 634 | int error_count = 0; 635 | bool result = true; 636 | 637 | for (unsigned int i = 0; i < len; ++i) 638 | { 639 | float diff = fabs((float)reference[i] - (float)data[i]); 640 | bool comp = (diff < max_error); 641 | result &= comp; 642 | 643 | if (! comp) 644 | { 645 | error_count++; 646 | #if 0 647 | 648 | if (error_count < 50) 649 | { 650 | printf("\n ERROR(epsilon=%4.3f), i=%d, (ref)0x%02x / (data)0x%02x / (diff)%d\n", 651 | max_error, i, 652 | *(unsigned int *)&reference[i], 653 | *(unsigned int *)&data[i], 654 | (unsigned int)diff); 655 | } 656 | 657 | #endif 658 | } 659 | } 660 | 661 | if (threshold == 0.0f) 662 | { 663 | if (error_count) 664 | { 665 | printf("total # of errors = %d\n", error_count); 666 | } 667 | 668 | return (error_count == 0) ? true : false; 669 | } 670 | else 671 | { 672 | if (error_count) 673 | { 674 | printf("%4.2f(%%) of bytes mismatched (count=%d)\n", (float)error_count*100/(float)len, error_count); 675 | } 676 | 677 | return ((len*threshold > error_count) ? true : false); 678 | } 679 | } 680 | 681 | inline 682 | void sdkDumpBin(void *data, unsigned int bytes, const char *filename) 683 | { 684 | printf("sdkDumpBin: <%s>\n", filename); 685 | FILE *fp; 686 | FOPEN(fp, filename, "wb"); 687 | fwrite(data, bytes, 1, fp); 688 | fflush(fp); 689 | fclose(fp); 690 | } 691 | 692 | inline 693 | bool sdkCompareBin2BinUint(const char *src_file, const char *ref_file, unsigned int nelements, const float epsilon, const float threshold, char *exec_path) 694 | { 695 | unsigned int *src_buffer, *ref_buffer; 696 | FILE *src_fp = NULL, *ref_fp = NULL; 697 | 698 | unsigned long error_count = 0; 699 | size_t fsize = 0; 700 | 701 | if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) 702 | { 703 | printf("compareBin2Bin unable to open src_file: %s\n", src_file); 704 | error_count++; 705 | } 706 | 707 | char *ref_file_path = sdkFindFilePath(ref_file, exec_path); 708 | 709 | if (ref_file_path == NULL) 710 | { 711 | printf("compareBin2Bin unable to find <%s> in <%s>\n", ref_file, exec_path); 712 | printf(">>> Check info.xml and [project//data] folder <%s> <<<\n", ref_file); 713 | printf("Aborting comparison!\n"); 714 | printf(" FAILED\n"); 715 | error_count++; 716 | 717 | if (src_fp) 718 | { 719 | fclose(src_fp); 720 | } 721 | 722 | if (ref_fp) 723 | { 724 | fclose(ref_fp); 725 | } 726 | } 727 | else 728 | { 729 | if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) 730 | { 731 | printf("compareBin2Bin unable to open ref_file: %s\n", ref_file_path); 732 | error_count++; 733 | } 734 | 735 | if (src_fp && ref_fp) 736 | { 737 | src_buffer = (unsigned int *)malloc(nelements*sizeof(unsigned int)); 738 | ref_buffer = (unsigned int *)malloc(nelements*sizeof(unsigned int)); 739 | 740 | fsize = fread(src_buffer, nelements, sizeof(unsigned int), src_fp); 741 | fsize = fread(ref_buffer, nelements, sizeof(unsigned int), ref_fp); 742 | 743 | printf("> compareBin2Bin nelements=%d, epsilon=%4.2f, threshold=%4.2f\n", nelements, epsilon, threshold); 744 | printf(" src_file <%s>, size=%d bytes\n", src_file, (int)fsize); 745 | printf(" ref_file <%s>, size=%d bytes\n", ref_file_path, (int)fsize); 746 | 747 | if (!compareData(ref_buffer, src_buffer, nelements, epsilon, threshold)) 748 | { 749 | error_count++; 750 | } 751 | 752 | fclose(src_fp); 753 | fclose(ref_fp); 754 | 755 | free(src_buffer); 756 | free(ref_buffer); 757 | } 758 | else 759 | { 760 | if (src_fp) 761 | { 762 | fclose(src_fp); 763 | } 764 | 765 | if (ref_fp) 766 | { 767 | fclose(ref_fp); 768 | } 769 | } 770 | } 771 | 772 | if (error_count == 0) 773 | { 774 | printf(" OK\n"); 775 | } 776 | else 777 | { 778 | printf(" FAILURE: %d errors...\n", (unsigned int)error_count); 779 | } 780 | 781 | return (error_count == 0); // returns true if all pixels pass 782 | } 783 | 784 | inline 785 | bool sdkCompareBin2BinFloat(const char *src_file, const char *ref_file, unsigned int nelements, const float epsilon, const float threshold, char *exec_path) 786 | { 787 | float *src_buffer, *ref_buffer; 788 | FILE *src_fp = NULL, *ref_fp = NULL; 789 | size_t fsize = 0; 790 | 791 | unsigned long error_count = 0; 792 | 793 | if (FOPEN_FAIL(FOPEN(src_fp, src_file, "rb"))) 794 | { 795 | printf("compareBin2Bin unable to open src_file: %s\n", src_file); 796 | error_count = 1; 797 | } 798 | 799 | char *ref_file_path = sdkFindFilePath(ref_file, exec_path); 800 | 801 | if (ref_file_path == NULL) 802 | { 803 | printf("compareBin2Bin unable to find <%s> in <%s>\n", ref_file, exec_path); 804 | printf(">>> Check info.xml and [project//data] folder <%s> <<<\n", exec_path); 805 | printf("Aborting comparison!\n"); 806 | printf(" FAILED\n"); 807 | error_count++; 808 | 809 | if (src_fp) 810 | { 811 | fclose(src_fp); 812 | } 813 | 814 | if (ref_fp) 815 | { 816 | fclose(ref_fp); 817 | } 818 | } 819 | else 820 | { 821 | if (FOPEN_FAIL(FOPEN(ref_fp, ref_file_path, "rb"))) 822 | { 823 | printf("compareBin2Bin unable to open ref_file: %s\n", ref_file_path); 824 | error_count = 1; 825 | } 826 | 827 | if (src_fp && ref_fp) 828 | { 829 | src_buffer = (float *)malloc(nelements*sizeof(float)); 830 | ref_buffer = (float *)malloc(nelements*sizeof(float)); 831 | 832 | fsize = fread(src_buffer, nelements, sizeof(float), src_fp); 833 | fsize = fread(ref_buffer, nelements, sizeof(float), ref_fp); 834 | 835 | printf("> compareBin2Bin nelements=%d, epsilon=%4.2f, threshold=%4.2f\n", nelements, epsilon, threshold); 836 | printf(" src_file <%s>, size=%d bytes\n", src_file, (int)fsize); 837 | printf(" ref_file <%s>, size=%d bytes\n", ref_file_path, (int)fsize); 838 | 839 | if (!compareDataAsFloatThreshold(ref_buffer, src_buffer, nelements, epsilon, threshold)) 840 | { 841 | error_count++; 842 | } 843 | 844 | fclose(src_fp); 845 | fclose(ref_fp); 846 | 847 | free(src_buffer); 848 | free(ref_buffer); 849 | } 850 | else 851 | { 852 | if (src_fp) 853 | { 854 | fclose(src_fp); 855 | } 856 | 857 | if (ref_fp) 858 | { 859 | fclose(ref_fp); 860 | } 861 | } 862 | } 863 | 864 | if (error_count == 0) 865 | { 866 | printf(" OK\n"); 867 | } 868 | else 869 | { 870 | printf(" FAILURE: %d errors...\n", (unsigned int)error_count); 871 | } 872 | 873 | return (error_count == 0); // returns true if all pixels pass 874 | } 875 | 876 | inline bool 877 | sdkCompareL2fe(const float *reference, const float *data, 878 | const unsigned int len, const float epsilon) 879 | { 880 | assert(epsilon >= 0); 881 | 882 | float error = 0; 883 | float ref = 0; 884 | 885 | for (unsigned int i = 0; i < len; ++i) 886 | { 887 | 888 | float diff = reference[i] - data[i]; 889 | error += diff * diff; 890 | ref += reference[i] * reference[i]; 891 | } 892 | 893 | float normRef = sqrtf(ref); 894 | 895 | if (fabs(ref) < 1e-7) 896 | { 897 | #ifdef _DEBUG 898 | std::cerr << "ERROR, reference l2-norm is 0\n"; 899 | #endif 900 | return false; 901 | } 902 | 903 | float normError = sqrtf(error); 904 | error = normError / normRef; 905 | bool result = error < epsilon; 906 | #ifdef _DEBUG 907 | 908 | if (! result) 909 | { 910 | std::cerr << "ERROR, l2-norm error " 911 | << error << " is greater than epsilon " << epsilon << "\n"; 912 | } 913 | 914 | #endif 915 | 916 | return result; 917 | } 918 | 919 | inline bool 920 | sdkLoadPPMub(const char *file, unsigned char **data, 921 | unsigned int *w,unsigned int *h) 922 | { 923 | unsigned int channels; 924 | return __loadPPM(file, data, w, h, &channels); 925 | } 926 | 927 | inline bool 928 | sdkLoadPPM4ub(const char *file, unsigned char **data, 929 | unsigned int *w, unsigned int *h) 930 | { 931 | unsigned char *idata = 0; 932 | unsigned int channels; 933 | 934 | if (__loadPPM(file, &idata, w, h, &channels)) 935 | { 936 | // pad 4th component 937 | int size = *w **h; 938 | // keep the original pointer 939 | unsigned char *idata_orig = idata; 940 | *data = (unsigned char *) malloc(sizeof(unsigned char) * size * 4); 941 | unsigned char *ptr = *data; 942 | 943 | for (int i=0; i Compare (a)rendered: <" << src_file << ">\n"; 984 | std::cerr << "> (b)reference: <" << ref_file << ">\n"; 985 | } 986 | 987 | 988 | if (sdkLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != true) 989 | { 990 | if (verboseErrors) 991 | { 992 | std::cerr << "PPMvsPPM: unable to load ref image file: "<< ref_file << "\n"; 993 | } 994 | 995 | return false; 996 | } 997 | 998 | if (sdkLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != true) 999 | { 1000 | std::cerr << "PPMvsPPM: unable to load src image file: " << src_file << "\n"; 1001 | return false; 1002 | } 1003 | 1004 | if (src_height != ref_height || src_width != ref_width) 1005 | { 1006 | if (verboseErrors) std::cerr << "PPMvsPPM: source and ref size mismatch (" << src_width << 1007 | "," << src_height << ")vs(" << ref_width << "," << ref_height << ")\n"; 1008 | } 1009 | 1010 | if (verboseErrors) std::cerr << "PPMvsPPM: comparing images size (" << src_width << 1011 | "," << src_height << ") epsilon(" << epsilon << "), threshold(" << threshold*100 << "%)\n"; 1012 | 1013 | if (compareData(ref_data, src_data, src_width*src_height*4, epsilon, threshold) == false) 1014 | { 1015 | error_count=1; 1016 | } 1017 | 1018 | if (error_count == 0) 1019 | { 1020 | if (verboseErrors) 1021 | { 1022 | std::cerr << " OK\n\n"; 1023 | } 1024 | } 1025 | else 1026 | { 1027 | if (verboseErrors) 1028 | { 1029 | std::cerr << " FAILURE! "< Compare (a)rendered: <" << src_file << ">\n"; 1058 | std::cerr << "> (b)reference: <" << ref_file << ">\n"; 1059 | } 1060 | 1061 | 1062 | if (sdkLoadPPMub(ref_file, &ref_data, &ref_width, &ref_height) != true) 1063 | { 1064 | if (verboseErrors) 1065 | { 1066 | std::cerr << "PGMvsPGM: unable to load ref image file: "<< ref_file << "\n"; 1067 | } 1068 | 1069 | return false; 1070 | } 1071 | 1072 | if (sdkLoadPPMub(src_file, &src_data, &src_width, &src_height) != true) 1073 | { 1074 | std::cerr << "PGMvsPGM: unable to load src image file: " << src_file << "\n"; 1075 | return false; 1076 | } 1077 | 1078 | if (src_height != ref_height || src_width != ref_width) 1079 | { 1080 | if (verboseErrors) std::cerr << "PGMvsPGM: source and ref size mismatch (" << src_width << 1081 | "," << src_height << ")vs(" << ref_width << "," << ref_height << ")\n"; 1082 | } 1083 | 1084 | if (verboseErrors) std::cerr << "PGMvsPGM: comparing images size (" << src_width << 1085 | "," << src_height << ") epsilon(" << epsilon << "), threshold(" << threshold*100 << "%)\n"; 1086 | 1087 | if (compareData(ref_data, src_data, src_width*src_height, epsilon, threshold) == false) 1088 | { 1089 | error_count=1; 1090 | } 1091 | 1092 | if (error_count == 0) 1093 | { 1094 | if (verboseErrors) 1095 | { 1096 | std::cerr << " OK\n\n"; 1097 | } 1098 | } 1099 | else 1100 | { 1101 | if (verboseErrors) 1102 | { 1103 | std::cerr << " FAILURE! "< 17 | #include 18 | #include 19 | #include 20 | 21 | #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) 22 | #ifndef _CRT_SECURE_NO_DEPRECATE 23 | #define _CRT_SECURE_NO_DEPRECATE 24 | #endif 25 | #ifndef STRCASECMP 26 | #define STRCASECMP _stricmp 27 | #endif 28 | #ifndef STRNCASECMP 29 | #define STRNCASECMP _strnicmp 30 | #endif 31 | #ifndef STRCPY 32 | #define STRCPY(sFilePath, nLength, sPath) strcpy_s(sFilePath, nLength, sPath) 33 | #endif 34 | 35 | #ifndef FOPEN 36 | #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) 37 | #endif 38 | #ifndef FOPEN_FAIL 39 | #define FOPEN_FAIL(result) (result != 0) 40 | #endif 41 | #ifndef SSCANF 42 | #define SSCANF sscanf_s 43 | #endif 44 | #ifndef SPRINTF 45 | #define SPRINTF sprintf_s 46 | #endif 47 | #else // Linux Includes 48 | #include 49 | #include 50 | 51 | #ifndef STRCASECMP 52 | #define STRCASECMP strcasecmp 53 | #endif 54 | #ifndef STRNCASECMP 55 | #define STRNCASECMP strncasecmp 56 | #endif 57 | #ifndef STRCPY 58 | #define STRCPY(sFilePath, nLength, sPath) strcpy(sFilePath, sPath) 59 | #endif 60 | 61 | #ifndef FOPEN 62 | #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) 63 | #endif 64 | #ifndef FOPEN_FAIL 65 | #define FOPEN_FAIL(result) (result == NULL) 66 | #endif 67 | #ifndef SSCANF 68 | #define SSCANF sscanf 69 | #endif 70 | #ifndef SPRINTF 71 | #define SPRINTF sprintf 72 | #endif 73 | #endif 74 | 75 | #ifndef EXIT_WAIVED 76 | #define EXIT_WAIVED 2 77 | #endif 78 | 79 | // CUDA Utility Helper Functions 80 | inline int stringRemoveDelimiter(char delimiter, const char *string) 81 | { 82 | int string_start = 0; 83 | 84 | while (string[string_start] == delimiter) 85 | { 86 | string_start++; 87 | } 88 | 89 | if (string_start >= (int)strlen(string)-1) 90 | { 91 | return 0; 92 | } 93 | 94 | return string_start; 95 | } 96 | 97 | inline int getFileExtension(char *filename, char **extension) 98 | { 99 | int string_length = (int)strlen(filename); 100 | 101 | while (filename[string_length--] != '.') 102 | { 103 | if (string_length == 0) 104 | break; 105 | } 106 | 107 | if (string_length > 0) string_length += 2; 108 | 109 | if (string_length == 0) 110 | *extension = NULL; 111 | else 112 | *extension = &filename[string_length]; 113 | 114 | return string_length; 115 | } 116 | 117 | 118 | inline bool checkCmdLineFlag(const int argc, const char **argv, const char *string_ref) 119 | { 120 | bool bFound = false; 121 | 122 | if (argc >= 1) 123 | { 124 | for (int i=1; i < argc; i++) 125 | { 126 | int string_start = stringRemoveDelimiter('-', argv[i]); 127 | const char *string_argv = &argv[i][string_start]; 128 | 129 | const char *equal_pos = strchr(string_argv, '='); 130 | int argv_length = (int)(equal_pos == 0 ? strlen(string_argv) : equal_pos - string_argv); 131 | 132 | int length = (int)strlen(string_ref); 133 | 134 | if (length == argv_length && !STRNCASECMP(string_argv, string_ref, length)) 135 | { 136 | bFound = true; 137 | continue; 138 | } 139 | } 140 | } 141 | 142 | return bFound; 143 | } 144 | 145 | // This function wraps the CUDA Driver API into a template function 146 | template 147 | inline bool getCmdLineArgumentValue(const int argc, const char **argv, const char *string_ref, T *value) 148 | { 149 | bool bFound = false; 150 | 151 | if (argc >= 1) 152 | { 153 | for (int i=1; i < argc; i++) 154 | { 155 | int string_start = stringRemoveDelimiter('-', argv[i]); 156 | const char *string_argv = &argv[i][string_start]; 157 | int length = (int)strlen(string_ref); 158 | 159 | if (!STRNCASECMP(string_argv, string_ref, length)) 160 | { 161 | if (length+1 <= (int)strlen(string_argv)) 162 | { 163 | int auto_inc = (string_argv[length] == '=') ? 1 : 0; 164 | *value = (T)atoi(&string_argv[length + auto_inc]); 165 | } 166 | 167 | bFound = true; 168 | i=argc; 169 | } 170 | } 171 | } 172 | 173 | return bFound; 174 | } 175 | 176 | inline int getCmdLineArgumentInt(const int argc, const char **argv, const char *string_ref) 177 | { 178 | bool bFound = false; 179 | int value = -1; 180 | 181 | if (argc >= 1) 182 | { 183 | for (int i=1; i < argc; i++) 184 | { 185 | int string_start = stringRemoveDelimiter('-', argv[i]); 186 | const char *string_argv = &argv[i][string_start]; 187 | int length = (int)strlen(string_ref); 188 | 189 | if (!STRNCASECMP(string_argv, string_ref, length)) 190 | { 191 | if (length+1 <= (int)strlen(string_argv)) 192 | { 193 | int auto_inc = (string_argv[length] == '=') ? 1 : 0; 194 | value = atoi(&string_argv[length + auto_inc]); 195 | } 196 | else 197 | { 198 | value = 0; 199 | } 200 | 201 | bFound = true; 202 | continue; 203 | } 204 | } 205 | } 206 | 207 | if (bFound) 208 | { 209 | return value; 210 | } 211 | else 212 | { 213 | return 0; 214 | } 215 | } 216 | 217 | inline float getCmdLineArgumentFloat(const int argc, const char **argv, const char *string_ref) 218 | { 219 | bool bFound = false; 220 | float value = -1; 221 | 222 | if (argc >= 1) 223 | { 224 | for (int i=1; i < argc; i++) 225 | { 226 | int string_start = stringRemoveDelimiter('-', argv[i]); 227 | const char *string_argv = &argv[i][string_start]; 228 | int length = (int)strlen(string_ref); 229 | 230 | if (!STRNCASECMP(string_argv, string_ref, length)) 231 | { 232 | if (length+1 <= (int)strlen(string_argv)) 233 | { 234 | int auto_inc = (string_argv[length] == '=') ? 1 : 0; 235 | value = (float)atof(&string_argv[length + auto_inc]); 236 | } 237 | else 238 | { 239 | value = 0.f; 240 | } 241 | 242 | bFound = true; 243 | continue; 244 | } 245 | } 246 | } 247 | 248 | if (bFound) 249 | { 250 | return value; 251 | } 252 | else 253 | { 254 | return 0; 255 | } 256 | } 257 | 258 | inline bool getCmdLineArgumentString(const int argc, const char **argv, 259 | const char *string_ref, char **string_retval) 260 | { 261 | bool bFound = false; 262 | 263 | if (argc >= 1) 264 | { 265 | for (int i=1; i < argc; i++) 266 | { 267 | int string_start = stringRemoveDelimiter('-', argv[i]); 268 | char *string_argv = (char *)&argv[i][string_start]; 269 | int length = (int)strlen(string_ref); 270 | 271 | if (!STRNCASECMP(string_argv, string_ref, length)) 272 | { 273 | *string_retval = &string_argv[length+1]; 274 | bFound = true; 275 | continue; 276 | } 277 | } 278 | } 279 | 280 | if (!bFound) 281 | { 282 | *string_retval = NULL; 283 | } 284 | 285 | return bFound; 286 | } 287 | 288 | ////////////////////////////////////////////////////////////////////////////// 289 | //! Find the path for a file assuming that 290 | //! files are found in the searchPath. 291 | //! 292 | //! @return the path if succeeded, otherwise 0 293 | //! @param filename name of the file 294 | //! @param executable_path optional absolute path of the executable 295 | ////////////////////////////////////////////////////////////////////////////// 296 | inline char *sdkFindFilePath(const char *filename, const char *executable_path) 297 | { 298 | // defines a variable that is replaced with the name of the executable 299 | 300 | // Typical relative search paths to locate needed companion files (e.g. sample input data, or JIT source files) 301 | // The origin for the relative search may be the .exe file, a .bat file launching an .exe, a browser .exe launching the .exe or .bat, etc 302 | const char *searchPath[] = 303 | { 304 | "./", // same dir 305 | "./common/", // "/common/" subdir 306 | "./common/data/", // "/common/data/" subdir 307 | "./data/", // "/data/" subdir 308 | "./src/", // "/src/" subdir 309 | "./src//data/", // "/src//data/" subdir 310 | "./inc/", // "/inc/" subdir 311 | "./0_Simple/", // "/0_Simple/" subdir 312 | "./1_Utilities/", // "/1_Utilities/" subdir 313 | "./2_Graphics/", // "/2_Graphics/" subdir 314 | "./3_Imaging/", // "/3_Imaging/" subdir 315 | "./4_Finance/", // "/4_Finance/" subdir 316 | "./5_Simulations/", // "/5_Simulations/" subdir 317 | "./6_Advanced/", // "/6_Advanced/" subdir 318 | "./7_CUDALibraries/", // "/7_CUDALibraries/" subdir 319 | "./8_Android/", // "/8_Android/" subdir 320 | "./samples/", // "/samples/" subdir 321 | 322 | "./0_Simple//data/", // "/0_Simple//data/" subdir 323 | "./1_Utilities//data/", // "/1_Utilities//data/" subdir 324 | "./2_Graphics//data/", // "/2_Graphics//data/" subdir 325 | "./3_Imaging//data/", // "/3_Imaging//data/" subdir 326 | "./4_Finance//data/", // "/4_Finance//data/" subdir 327 | "./5_Simulations//data/", // "/5_Simulations//data/" subdir 328 | "./6_Advanced//data/", // "/6_Advanced//data/" subdir 329 | "./7_CUDALibraries//", // "/7_CUDALibraries//" subdir 330 | "./7_CUDALibraries//data/", // "/7_CUDALibraries//data/" subdir 331 | 332 | "../", // up 1 in tree 333 | "../common/", // up 1 in tree, "/common/" subdir 334 | "../common/data/", // up 1 in tree, "/common/data/" subdir 335 | "../data/", // up 1 in tree, "/data/" subdir 336 | "../src/", // up 1 in tree, "/src/" subdir 337 | "../inc/", // up 1 in tree, "/inc/" subdir 338 | 339 | "../0_Simple//data/", // up 1 in tree, "/0_Simple//" subdir 340 | "../1_Utilities//data/", // up 1 in tree, "/1_Utilities//" subdir 341 | "../2_Graphics//data/", // up 1 in tree, "/2_Graphics//" subdir 342 | "../3_Imaging//data/", // up 1 in tree, "/3_Imaging//" subdir 343 | "../4_Finance//data/", // up 1 in tree, "/4_Finance//" subdir 344 | "../5_Simulations//data/", // up 1 in tree, "/5_Simulations//" subdir 345 | "../6_Advanced//data/", // up 1 in tree, "/6_Advanced//" subdir 346 | "../7_CUDALibraries//data/",// up 1 in tree, "/7_CUDALibraries//" subdir 347 | "../8_Android//data/", // up 1 in tree, "/8_Android//" subdir 348 | "../samples//data/", // up 1 in tree, "/samples//" subdir 349 | "../../", // up 2 in tree 350 | "../../common/", // up 2 in tree, "/common/" subdir 351 | "../../common/data/", // up 2 in tree, "/common/data/" subdir 352 | "../../data/", // up 2 in tree, "/data/" subdir 353 | "../../src/", // up 2 in tree, "/src/" subdir 354 | "../../inc/", // up 2 in tree, "/inc/" subdir 355 | "../../sandbox//data/", // up 2 in tree, "/sandbox//" subdir 356 | "../../0_Simple//data/", // up 2 in tree, "/0_Simple//" subdir 357 | "../../1_Utilities//data/", // up 2 in tree, "/1_Utilities//" subdir 358 | "../../2_Graphics//data/", // up 2 in tree, "/2_Graphics//" subdir 359 | "../../3_Imaging//data/", // up 2 in tree, "/3_Imaging//" subdir 360 | "../../4_Finance//data/", // up 2 in tree, "/4_Finance//" subdir 361 | "../../5_Simulations//data/", // up 2 in tree, "/5_Simulations//" subdir 362 | "../../6_Advanced//data/", // up 2 in tree, "/6_Advanced//" subdir 363 | "../../7_CUDALibraries//data/", // up 2 in tree, "/7_CUDALibraries//" subdir 364 | "../../8_Android//data/", // up 2 in tree, "/8_Android//" subdir 365 | "../../samples//data/", // up 2 in tree, "/samples//" subdir 366 | "../../../", // up 3 in tree 367 | "../../../src//", // up 3 in tree, "/src//" subdir 368 | "../../../src//data/", // up 3 in tree, "/src//data/" subdir 369 | "../../../src//src/", // up 3 in tree, "/src//src/" subdir 370 | "../../../src//inc/", // up 3 in tree, "/src//inc/" subdir 371 | "../../../sandbox//", // up 3 in tree, "/sandbox//" subdir 372 | "../../../sandbox//data/", // up 3 in tree, "/sandbox//data/" subdir 373 | "../../../sandbox//src/", // up 3 in tree, "/sandbox//src/" subdir 374 | "../../../sandbox//inc/", // up 3 in tree, "/sandbox//inc/" subdir 375 | "../../../0_Simple//data/", // up 3 in tree, "/0_Simple//" subdir 376 | "../../../1_Utilities//data/", // up 3 in tree, "/1_Utilities//" subdir 377 | "../../../2_Graphics//data/", // up 3 in tree, "/2_Graphics//" subdir 378 | "../../../3_Imaging//data/", // up 3 in tree, "/3_Imaging//" subdir 379 | "../../../4_Finance//data/", // up 3 in tree, "/4_Finance//" subdir 380 | "../../../5_Simulations//data/", // up 3 in tree, "/5_Simulations//" subdir 381 | "../../../6_Advanced//data/", // up 3 in tree, "/6_Advanced//" subdir 382 | "../../../7_CUDALibraries//data/", // up 3 in tree, "/7_CUDALibraries//" subdir 383 | "../../../8_Android//data/", // up 3 in tree, "/8_Android//" subdir 384 | "../../../0_Simple//", // up 3 in tree, "/0_Simple//" subdir 385 | "../../../1_Utilities//", // up 3 in tree, "/1_Utilities//" subdir 386 | "../../../2_Graphics//", // up 3 in tree, "/2_Graphics//" subdir 387 | "../../../3_Imaging//", // up 3 in tree, "/3_Imaging//" subdir 388 | "../../../4_Finance//", // up 3 in tree, "/4_Finance//" subdir 389 | "../../../5_Simulations//", // up 3 in tree, "/5_Simulations//" subdir 390 | "../../../6_Advanced//", // up 3 in tree, "/6_Advanced//" subdir 391 | "../../../7_CUDALibraries//", // up 3 in tree, "/7_CUDALibraries//" subdir 392 | "../../../8_Android//", // up 3 in tree, "/8_Android//" subdir 393 | "../../../samples//data/", // up 3 in tree, "/samples//" subdir 394 | "../../../common/", // up 3 in tree, "../../../common/" subdir 395 | "../../../common/data/", // up 3 in tree, "../../../common/data/" subdir 396 | "../../../data/", // up 3 in tree, "../../../data/" subdir 397 | "../../../../", // up 4 in tree 398 | "../../../../src//", // up 4 in tree, "/src//" subdir 399 | "../../../../src//data/", // up 4 in tree, "/src//data/" subdir 400 | "../../../../src//src/", // up 4 in tree, "/src//src/" subdir 401 | "../../../../src//inc/", // up 4 in tree, "/src//inc/" subdir 402 | "../../../../sandbox//", // up 4 in tree, "/sandbox//" subdir 403 | "../../../../sandbox//data/", // up 4 in tree, "/sandbox//data/" subdir 404 | "../../../../sandbox//src/", // up 4 in tree, "/sandbox//src/" subdir 405 | "../../../../sandbox//inc/", // up 4 in tree, "/sandbox//inc/" subdir 406 | "../../../../0_Simple//data/", // up 4 in tree, "/0_Simple//" subdir 407 | "../../../../1_Utilities//data/", // up 4 in tree, "/1_Utilities//" subdir 408 | "../../../../2_Graphics//data/", // up 4 in tree, "/2_Graphics//" subdir 409 | "../../../../3_Imaging//data/", // up 4 in tree, "/3_Imaging//" subdir 410 | "../../../../4_Finance//data/", // up 4 in tree, "/4_Finance//" subdir 411 | "../../../../5_Simulations//data/",// up 4 in tree, "/5_Simulations//" subdir 412 | "../../../../6_Advanced//data/", // up 4 in tree, "/6_Advanced//" subdir 413 | "../../../../7_CUDALibraries//data/", // up 4 in tree, "/7_CUDALibraries//" subdir 414 | "../../../../8_Android//data/", // up 4 in tree, "/8_Android//" subdir 415 | "../../../../0_Simple//", // up 4 in tree, "/0_Simple//" subdir 416 | "../../../../1_Utilities//", // up 4 in tree, "/1_Utilities//" subdir 417 | "../../../../2_Graphics//", // up 4 in tree, "/2_Graphics//" subdir 418 | "../../../../3_Imaging//", // up 4 in tree, "/3_Imaging//" subdir 419 | "../../../../4_Finance//", // up 4 in tree, "/4_Finance//" subdir 420 | "../../../../5_Simulations//",// up 4 in tree, "/5_Simulations//" subdir 421 | "../../../../6_Advanced//", // up 4 in tree, "/6_Advanced//" subdir 422 | "../../../../7_CUDALibraries//", // up 4 in tree, "/7_CUDALibraries//" subdir 423 | "../../../../8_Android//", // up 4 in tree, "/8_Android//" subdir 424 | "../../../../samples//data/", // up 4 in tree, "/samples//" subdir 425 | "../../../../common/", // up 4 in tree, "../../../common/" subdir 426 | "../../../../common/data/", // up 4 in tree, "../../../common/data/" subdir 427 | "../../../../data/", // up 4 in tree, "../../../data/" subdir 428 | "../../../../../", // up 5 in tree 429 | "../../../../../src//", // up 5 in tree, "/src//" subdir 430 | "../../../../../src//data/", // up 5 in tree, "/src//data/" subdir 431 | "../../../../../src//src/", // up 5 in tree, "/src//src/" subdir 432 | "../../../../../src//inc/", // up 5 in tree, "/src//inc/" subdir 433 | "../../../../../sandbox//", // up 5 in tree, "/sandbox//" subdir 434 | "../../../../../sandbox//data/", // up 5 in tree, "/sandbox//data/" subdir 435 | "../../../../../sandbox//src/", // up 5 in tree, "/sandbox//src/" subdir 436 | "../../../../../sandbox//inc/", // up 5 in tree, "/sandbox//inc/" subdir 437 | "../../../../../0_Simple//data/", // up 5 in tree, "/0_Simple//" subdir 438 | "../../../../../1_Utilities//data/", // up 5 in tree, "/1_Utilities//" subdir 439 | "../../../../../2_Graphics//data/", // up 5 in tree, "/2_Graphics//" subdir 440 | "../../../../../3_Imaging//data/", // up 5 in tree, "/3_Imaging//" subdir 441 | "../../../../../4_Finance//data/", // up 5 in tree, "/4_Finance//" subdir 442 | "../../../../../5_Simulations//data/",// up 5 in tree, "/5_Simulations//" subdir 443 | "../../../../../6_Advanced//data/", // up 5 in tree, "/6_Advanced//" subdir 444 | "../../../../../7_CUDALibraries//data/", // up 5 in tree, "/7_CUDALibraries//" subdir 445 | "../../../../../8_Android//data/", // up 5 in tree, "/8_Android//" subdir 446 | "../../../../../samples//data/", // up 5 in tree, "/samples//" subdir 447 | "../../../../../common/", // up 5 in tree, "../../../common/" subdir 448 | "../../../../../common/data/", // up 5 in tree, "../../../common/data/" subdir 449 | }; 450 | 451 | // Extract the executable name 452 | std::string executable_name; 453 | 454 | if (executable_path != 0) 455 | { 456 | executable_name = std::string(executable_path); 457 | 458 | #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) 459 | // Windows path delimiter 460 | size_t delimiter_pos = executable_name.find_last_of('\\'); 461 | executable_name.erase(0, delimiter_pos + 1); 462 | 463 | if (executable_name.rfind(".exe") != std::string::npos) 464 | { 465 | // we strip .exe, only if the .exe is found 466 | executable_name.resize(executable_name.size() - 4); 467 | } 468 | 469 | #else 470 | // Linux & OSX path delimiter 471 | size_t delimiter_pos = executable_name.find_last_of('/'); 472 | executable_name.erase(0,delimiter_pos+1); 473 | #endif 474 | } 475 | 476 | // Loop over all search paths and return the first hit 477 | for (unsigned int i = 0; i < sizeof(searchPath)/sizeof(char *); ++i) 478 | { 479 | std::string path(searchPath[i]); 480 | size_t executable_name_pos = path.find(""); 481 | 482 | // If there is executable_name variable in the searchPath 483 | // replace it with the value 484 | if (executable_name_pos != std::string::npos) 485 | { 486 | if (executable_path != 0) 487 | { 488 | path.replace(executable_name_pos, strlen(""), executable_name); 489 | } 490 | else 491 | { 492 | // Skip this path entry if no executable argument is given 493 | continue; 494 | } 495 | } 496 | 497 | #ifdef _DEBUG 498 | printf("sdkFindFilePath <%s> in %s\n", filename, path.c_str()); 499 | #endif 500 | 501 | // Test if the file exists 502 | path.append(filename); 503 | FILE *fp; 504 | FOPEN(fp, path.c_str(), "rb"); 505 | 506 | if (fp != NULL) 507 | { 508 | fclose(fp); 509 | // File found 510 | // returning an allocated array here for backwards compatibility reasons 511 | char *file_path = (char *) malloc(path.length() + 1); 512 | STRCPY(file_path, path.length() + 1, path.c_str()); 513 | return file_path; 514 | } 515 | 516 | if (fp) 517 | { 518 | fclose(fp); 519 | } 520 | } 521 | 522 | // File not found 523 | return 0; 524 | } 525 | 526 | #endif 527 | -------------------------------------------------------------------------------- /Render/helper_timer.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. 3 | * 4 | * Please refer to the NVIDIA end user license agreement (EULA) associated 5 | * with this source code for terms and conditions that govern your use of 6 | * this software. Any use, reproduction, disclosure, or distribution of 7 | * this software and related documentation outside the terms of the EULA 8 | * is strictly prohibited. 9 | * 10 | */ 11 | 12 | // Helper Timing Functions 13 | #ifndef HELPER_TIMER_H 14 | #define HELPER_TIMER_H 15 | 16 | #ifndef EXIT_WAIVED 17 | #define EXIT_WAIVED 2 18 | #endif 19 | 20 | // includes, system 21 | #include 22 | 23 | // includes, project 24 | #include 25 | 26 | // Definition of the StopWatch Interface, this is used if we don't want to use the CUT functions 27 | // But rather in a self contained class interface 28 | class StopWatchInterface 29 | { 30 | public: 31 | StopWatchInterface() {}; 32 | virtual ~StopWatchInterface() {}; 33 | 34 | public: 35 | //! Start time measurement 36 | virtual void start() = 0; 37 | 38 | //! Stop time measurement 39 | virtual void stop() = 0; 40 | 41 | //! Reset time counters to zero 42 | virtual void reset() = 0; 43 | 44 | //! Time in msec. after start. If the stop watch is still running (i.e. there 45 | //! was no call to stop()) then the elapsed time is returned, otherwise the 46 | //! time between the last start() and stop call is returned 47 | virtual float getTime() = 0; 48 | 49 | //! Mean time to date based on the number of times the stopwatch has been 50 | //! _stopped_ (ie finished sessions) and the current total time 51 | virtual float getAverageTime() = 0; 52 | }; 53 | 54 | 55 | ////////////////////////////////////////////////////////////////// 56 | // Begin Stopwatch timer class definitions for all OS platforms // 57 | ////////////////////////////////////////////////////////////////// 58 | #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) 59 | // includes, system 60 | #define WINDOWS_LEAN_AND_MEAN 61 | #include 62 | #undef min 63 | #undef max 64 | 65 | //! Windows specific implementation of StopWatch 66 | class StopWatchWin : public StopWatchInterface 67 | { 68 | public: 69 | //! Constructor, default 70 | StopWatchWin() : 71 | start_time(), end_time(), 72 | diff_time(0.0f), total_time(0.0f), 73 | running(false), clock_sessions(0), freq(0), freq_set(false) 74 | { 75 | if (! freq_set) 76 | { 77 | // helper variable 78 | LARGE_INTEGER temp; 79 | 80 | // get the tick frequency from the OS 81 | QueryPerformanceFrequency((LARGE_INTEGER *) &temp); 82 | 83 | // convert to type in which it is needed 84 | freq = ((double) temp.QuadPart) / 1000.0; 85 | 86 | // rememeber query 87 | freq_set = true; 88 | } 89 | }; 90 | 91 | // Destructor 92 | ~StopWatchWin() { }; 93 | 94 | public: 95 | //! Start time measurement 96 | inline void start(); 97 | 98 | //! Stop time measurement 99 | inline void stop(); 100 | 101 | //! Reset time counters to zero 102 | inline void reset(); 103 | 104 | //! Time in msec. after start. If the stop watch is still running (i.e. there 105 | //! was no call to stop()) then the elapsed time is returned, otherwise the 106 | //! time between the last start() and stop call is returned 107 | inline float getTime(); 108 | 109 | //! Mean time to date based on the number of times the stopwatch has been 110 | //! _stopped_ (ie finished sessions) and the current total time 111 | inline float getAverageTime(); 112 | 113 | private: 114 | // member variables 115 | 116 | //! Start of measurement 117 | LARGE_INTEGER start_time; 118 | //! End of measurement 119 | LARGE_INTEGER end_time; 120 | 121 | //! Time difference between the last start and stop 122 | float diff_time; 123 | 124 | //! TOTAL time difference between starts and stops 125 | float total_time; 126 | 127 | //! flag if the stop watch is running 128 | bool running; 129 | 130 | //! Number of times clock has been started 131 | //! and stopped to allow averaging 132 | int clock_sessions; 133 | 134 | //! tick frequency 135 | double freq; 136 | 137 | //! flag if the frequency has been set 138 | bool freq_set; 139 | }; 140 | 141 | // functions, inlined 142 | 143 | //////////////////////////////////////////////////////////////////////////////// 144 | //! Start time measurement 145 | //////////////////////////////////////////////////////////////////////////////// 146 | inline void 147 | StopWatchWin::start() 148 | { 149 | QueryPerformanceCounter((LARGE_INTEGER *) &start_time); 150 | running = true; 151 | } 152 | 153 | //////////////////////////////////////////////////////////////////////////////// 154 | //! Stop time measurement and increment add to the current diff_time summation 155 | //! variable. Also increment the number of times this clock has been run. 156 | //////////////////////////////////////////////////////////////////////////////// 157 | inline void 158 | StopWatchWin::stop() 159 | { 160 | QueryPerformanceCounter((LARGE_INTEGER *) &end_time); 161 | diff_time = (float) 162 | (((double) end_time.QuadPart - (double) start_time.QuadPart) / freq); 163 | 164 | total_time += diff_time; 165 | clock_sessions++; 166 | running = false; 167 | } 168 | 169 | //////////////////////////////////////////////////////////////////////////////// 170 | //! Reset the timer to 0. Does not change the timer running state but does 171 | //! recapture this point in time as the current start time if it is running. 172 | //////////////////////////////////////////////////////////////////////////////// 173 | inline void 174 | StopWatchWin::reset() 175 | { 176 | diff_time = 0; 177 | total_time = 0; 178 | clock_sessions = 0; 179 | 180 | if (running) 181 | { 182 | QueryPerformanceCounter((LARGE_INTEGER *) &start_time); 183 | } 184 | } 185 | 186 | 187 | //////////////////////////////////////////////////////////////////////////////// 188 | //! Time in msec. after start. If the stop watch is still running (i.e. there 189 | //! was no call to stop()) then the elapsed time is returned added to the 190 | //! current diff_time sum, otherwise the current summed time difference alone 191 | //! is returned. 192 | //////////////////////////////////////////////////////////////////////////////// 193 | inline float 194 | StopWatchWin::getTime() 195 | { 196 | // Return the TOTAL time to date 197 | float retval = total_time; 198 | 199 | if (running) 200 | { 201 | LARGE_INTEGER temp; 202 | QueryPerformanceCounter((LARGE_INTEGER *) &temp); 203 | retval += (float) 204 | (((double)(temp.QuadPart - start_time.QuadPart)) / freq); 205 | } 206 | 207 | return retval; 208 | } 209 | 210 | //////////////////////////////////////////////////////////////////////////////// 211 | //! Time in msec. for a single run based on the total number of COMPLETED runs 212 | //! and the total time. 213 | //////////////////////////////////////////////////////////////////////////////// 214 | inline float 215 | StopWatchWin::getAverageTime() 216 | { 217 | return (clock_sessions > 0) ? (total_time/clock_sessions) : 0.0f; 218 | } 219 | #else 220 | // Declarations for Stopwatch on Linux and Mac OSX 221 | // includes, system 222 | #include 223 | #include 224 | 225 | //! Windows specific implementation of StopWatch 226 | class StopWatchLinux : public StopWatchInterface 227 | { 228 | public: 229 | //! Constructor, default 230 | StopWatchLinux() : 231 | start_time(), diff_time(0.0), total_time(0.0), 232 | running(false), clock_sessions(0) 233 | { }; 234 | 235 | // Destructor 236 | virtual ~StopWatchLinux() 237 | { }; 238 | 239 | public: 240 | //! Start time measurement 241 | inline void start(); 242 | 243 | //! Stop time measurement 244 | inline void stop(); 245 | 246 | //! Reset time counters to zero 247 | inline void reset(); 248 | 249 | //! Time in msec. after start. If the stop watch is still running (i.e. there 250 | //! was no call to stop()) then the elapsed time is returned, otherwise the 251 | //! time between the last start() and stop call is returned 252 | inline float getTime(); 253 | 254 | //! Mean time to date based on the number of times the stopwatch has been 255 | //! _stopped_ (ie finished sessions) and the current total time 256 | inline float getAverageTime(); 257 | 258 | private: 259 | 260 | // helper functions 261 | 262 | //! Get difference between start time and current time 263 | inline float getDiffTime(); 264 | 265 | private: 266 | 267 | // member variables 268 | 269 | //! Start of measurement 270 | struct timeval start_time; 271 | 272 | //! Time difference between the last start and stop 273 | float diff_time; 274 | 275 | //! TOTAL time difference between starts and stops 276 | float total_time; 277 | 278 | //! flag if the stop watch is running 279 | bool running; 280 | 281 | //! Number of times clock has been started 282 | //! and stopped to allow averaging 283 | int clock_sessions; 284 | }; 285 | 286 | // functions, inlined 287 | 288 | //////////////////////////////////////////////////////////////////////////////// 289 | //! Start time measurement 290 | //////////////////////////////////////////////////////////////////////////////// 291 | inline void 292 | StopWatchLinux::start() 293 | { 294 | gettimeofday(&start_time, 0); 295 | running = true; 296 | } 297 | 298 | //////////////////////////////////////////////////////////////////////////////// 299 | //! Stop time measurement and increment add to the current diff_time summation 300 | //! variable. Also increment the number of times this clock has been run. 301 | //////////////////////////////////////////////////////////////////////////////// 302 | inline void 303 | StopWatchLinux::stop() 304 | { 305 | diff_time = getDiffTime(); 306 | total_time += diff_time; 307 | running = false; 308 | clock_sessions++; 309 | } 310 | 311 | //////////////////////////////////////////////////////////////////////////////// 312 | //! Reset the timer to 0. Does not change the timer running state but does 313 | //! recapture this point in time as the current start time if it is running. 314 | //////////////////////////////////////////////////////////////////////////////// 315 | inline void 316 | StopWatchLinux::reset() 317 | { 318 | diff_time = 0; 319 | total_time = 0; 320 | clock_sessions = 0; 321 | 322 | if (running) 323 | { 324 | gettimeofday(&start_time, 0); 325 | } 326 | } 327 | 328 | //////////////////////////////////////////////////////////////////////////////// 329 | //! Time in msec. after start. If the stop watch is still running (i.e. there 330 | //! was no call to stop()) then the elapsed time is returned added to the 331 | //! current diff_time sum, otherwise the current summed time difference alone 332 | //! is returned. 333 | //////////////////////////////////////////////////////////////////////////////// 334 | inline float 335 | StopWatchLinux::getTime() 336 | { 337 | // Return the TOTAL time to date 338 | float retval = total_time; 339 | 340 | if (running) 341 | { 342 | retval += getDiffTime(); 343 | } 344 | 345 | return retval; 346 | } 347 | 348 | //////////////////////////////////////////////////////////////////////////////// 349 | //! Time in msec. for a single run based on the total number of COMPLETED runs 350 | //! and the total time. 351 | //////////////////////////////////////////////////////////////////////////////// 352 | inline float 353 | StopWatchLinux::getAverageTime() 354 | { 355 | return (clock_sessions > 0) ? (total_time/clock_sessions) : 0.0f; 356 | } 357 | //////////////////////////////////////////////////////////////////////////////// 358 | 359 | //////////////////////////////////////////////////////////////////////////////// 360 | inline float 361 | StopWatchLinux::getDiffTime() 362 | { 363 | struct timeval t_time; 364 | gettimeofday(&t_time, 0); 365 | 366 | // time difference in milli-seconds 367 | return (float)(1000.0 * (t_time.tv_sec - start_time.tv_sec) 368 | + (0.001 * (t_time.tv_usec - start_time.tv_usec))); 369 | } 370 | #endif // WIN32 371 | 372 | //////////////////////////////////////////////////////////////////////////////// 373 | //! Timer functionality exported 374 | 375 | //////////////////////////////////////////////////////////////////////////////// 376 | //! Create a new timer 377 | //! @return true if a time has been created, otherwise false 378 | //! @param name of the new timer, 0 if the creation failed 379 | //////////////////////////////////////////////////////////////////////////////// 380 | inline bool 381 | sdkCreateTimer(StopWatchInterface **timer_interface) 382 | { 383 | //printf("sdkCreateTimer called object %08x\n", (void *)*timer_interface); 384 | #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) 385 | *timer_interface = (StopWatchInterface *)new StopWatchWin(); 386 | #else 387 | *timer_interface = (StopWatchInterface *)new StopWatchLinux(); 388 | #endif 389 | return (*timer_interface != NULL) ? true : false; 390 | } 391 | 392 | 393 | //////////////////////////////////////////////////////////////////////////////// 394 | //! Delete a timer 395 | //! @return true if a time has been deleted, otherwise false 396 | //! @param name of the timer to delete 397 | //////////////////////////////////////////////////////////////////////////////// 398 | inline bool 399 | sdkDeleteTimer(StopWatchInterface **timer_interface) 400 | { 401 | //printf("sdkDeleteTimer called object %08x\n", (void *)*timer_interface); 402 | if (*timer_interface) 403 | { 404 | delete *timer_interface; 405 | *timer_interface = NULL; 406 | } 407 | 408 | return true; 409 | } 410 | 411 | //////////////////////////////////////////////////////////////////////////////// 412 | //! Start the time with name \a name 413 | //! @param name name of the timer to start 414 | //////////////////////////////////////////////////////////////////////////////// 415 | inline bool 416 | sdkStartTimer(StopWatchInterface **timer_interface) 417 | { 418 | //printf("sdkStartTimer called object %08x\n", (void *)*timer_interface); 419 | if (*timer_interface) 420 | { 421 | (*timer_interface)->start(); 422 | } 423 | 424 | return true; 425 | } 426 | 427 | //////////////////////////////////////////////////////////////////////////////// 428 | //! Stop the time with name \a name. Does not reset. 429 | //! @param name name of the timer to stop 430 | //////////////////////////////////////////////////////////////////////////////// 431 | inline bool 432 | sdkStopTimer(StopWatchInterface **timer_interface) 433 | { 434 | // printf("sdkStopTimer called object %08x\n", (void *)*timer_interface); 435 | if (*timer_interface) 436 | { 437 | (*timer_interface)->stop(); 438 | } 439 | 440 | return true; 441 | } 442 | 443 | //////////////////////////////////////////////////////////////////////////////// 444 | //! Resets the timer's counter. 445 | //! @param name name of the timer to reset. 446 | //////////////////////////////////////////////////////////////////////////////// 447 | inline bool 448 | sdkResetTimer(StopWatchInterface **timer_interface) 449 | { 450 | // printf("sdkResetTimer called object %08x\n", (void *)*timer_interface); 451 | if (*timer_interface) 452 | { 453 | (*timer_interface)->reset(); 454 | } 455 | 456 | return true; 457 | } 458 | 459 | //////////////////////////////////////////////////////////////////////////////// 460 | //! Return the average time for timer execution as the total time 461 | //! for the timer dividied by the number of completed (stopped) runs the timer 462 | //! has made. 463 | //! Excludes the current running time if the timer is currently running. 464 | //! @param name name of the timer to return the time of 465 | //////////////////////////////////////////////////////////////////////////////// 466 | inline float 467 | sdkGetAverageTimerValue(StopWatchInterface **timer_interface) 468 | { 469 | // printf("sdkGetAverageTimerValue called object %08x\n", (void *)*timer_interface); 470 | if (*timer_interface) 471 | { 472 | return (*timer_interface)->getAverageTime(); 473 | } 474 | else 475 | { 476 | return 0.0f; 477 | } 478 | } 479 | 480 | //////////////////////////////////////////////////////////////////////////////// 481 | //! Total execution time for the timer over all runs since the last reset 482 | //! or timer creation. 483 | //! @param name name of the timer to obtain the value of. 484 | //////////////////////////////////////////////////////////////////////////////// 485 | inline float 486 | sdkGetTimerValue(StopWatchInterface **timer_interface) 487 | { 488 | // printf("sdkGetTimerValue called object %08x\n", (void *)*timer_interface); 489 | if (*timer_interface) 490 | { 491 | return (*timer_interface)->getTime(); 492 | } 493 | else 494 | { 495 | return 0.0f; 496 | } 497 | } 498 | 499 | #endif // HELPER_TIMER_H 500 | -------------------------------------------------------------------------------- /Render/plane.obj: -------------------------------------------------------------------------------- 1 | v 1.000000 -1.000000 0.000000 2 | vt 1.0 0.0 3 | v 1.000000 1.000000 0.000000 4 | vt 1.0 1.0 5 | v -1.000000 -1.000000 0.000000 6 | vt 0.0 0.0 7 | v -1.000000 1.000000 0.000000 8 | vt 0.0 1.0 9 | vn 0.0000 0.0000 1.0000 10 | f 2/2/1 3/3/1 1/1/1 11 | f 2/2/1 4/4/1 3/3/1 12 | -------------------------------------------------------------------------------- /Render/sphere_normal.pfm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/msraig/self-augmented-net/d41e1b0d1cfe80a855aeaee5b50d8f76cb223d62/Render/sphere_normal.pfm -------------------------------------------------------------------------------- /SVBRDFNet/RenderSVBRDFDataset.py: -------------------------------------------------------------------------------- 1 | # RenderSVBRDFDataset.py 2 | # Script for generate SVBRDF-Net training and testing data 3 | 4 | import random, os, sys, time, glob, math, shutil, pickle 5 | working_path = os.path.dirname(os.path.realpath(__file__)) 6 | root_path = os.path.dirname(working_path) 7 | os.chdir(working_path) 8 | sys.path.append(root_path + r'/Render') 9 | sys.path.append(root_path + r'/Utils') 10 | import caffe 11 | import numpy as np 12 | import cv2 13 | 14 | from utils import save_pfm, load_pfm, pfmFromBuffer, pfmToBuffer, toHDR, toLDR, renormalize, normalizeAlbedoSpec, normalBatchToThetaPhiBatch, thetaPhiBatchToNormalBatch, DataLoaderSVBRDF, RealDataLoaderSVBRDF, make_dir, autoExposure 15 | from FastRendererCUDA import FastRenderEngine 16 | 17 | os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' 18 | 19 | 20 | params_global = {} 21 | os.chdir(working_path) 22 | 23 | pixelCnt = 256 * 256 24 | 25 | lightID = [] 26 | 27 | with open('folderPath_SVBRDF.txt', 'r') as f: 28 | params_global['geometryPath'] = r'../Render/plane.obj' 29 | params_global['scriptRoot'] = r'../Utils' 30 | params_global['outFolder'] = f.readline().strip() 31 | params_global['envMapFolder'] = f.readline().strip() 32 | 33 | 34 | with open(params_global['envMapFolder'] + r'/light.txt', 'r') as f: 35 | lightID = map(int, f.read().strip().split('\n')) 36 | lightID = list(np.array(lightID) - 1) 37 | 38 | lightIDToEnumerateID = {} 39 | for id, lid in enumerate(lightID): 40 | lightIDToEnumerateID[lid] = id 41 | 42 | 43 | def renderOnlineEnvlight(brdfBatch, onlineRender, lightIDs = [], lightXforms = []): 44 | imgBatch = np.zeros((brdfBatch.shape[0], 3, 384, 384)) 45 | if(lightIDs == []): 46 | lightIDs = random.sample(lightID, brdfBatch.shape[0]) 47 | if(lightXforms == []): 48 | angle_y = np.random.uniform(0.0, 360.0, brdfBatch.shape[0]) 49 | angle_x = np.random.uniform(-45.0, 45.0, brdfBatch.shape[0]) 50 | else: 51 | angle_y = lightXforms[1] 52 | angle_x = lightXforms[0] 53 | 54 | for i in range(0, brdfBatch.shape[0]): 55 | 56 | onlineRender.SetEnvLightByID(lightIDs[i] + 1) 57 | onlineRender.SetLightXform(angle_x[i], angle_y[i]) 58 | 59 | 60 | onlineRender.SetAlbedoMap(brdfBatch[i,0:3,:,:].transpose((1,2,0))) 61 | onlineRender.SetSpecValue(brdfBatch[i,3:6,0,0]) 62 | onlineRender.SetRoughnessValue(brdfBatch[i,6,0,0]) 63 | onlineRender.SetNormalMap((2.0 * brdfBatch[i,7:10,:,:] - 1.0).transpose((1,2,0))) 64 | # onlineRender.SetRenderMode(1) 65 | img = onlineRender.Render() 66 | 67 | #Auto Exposure and white balance 68 | onlineRender.SetAlbedoValue([1.0, 1.0, 1.0]) 69 | onlineRender.SetSpecValue([0.0, 0.0, 0.0]) 70 | normal_one = np.dstack((np.ones((256,256)), np.zeros((256,256)), np.zeros((256,256)))) 71 | onlineRender.SetNormalMap(normal_one) 72 | img_norm = onlineRender.Render() 73 | normValue = np.mean(img_norm, axis = (0,1)) 74 | img = 0.5 * img / normValue 75 | #img = autoExposure(img) 76 | imgBatch[i, :, :, :] = img.transpose((2,0,1)) 77 | 78 | return imgBatch, normValue 79 | 80 | 81 | 82 | if __name__ == '__main__': 83 | data_root = sys.argv[1] 84 | data_tag = sys.argv[2] 85 | gpuid = int(sys.argv[3]) 86 | renderType = int(sys.argv[4]) 87 | startTag = int(sys.argv[5]) 88 | endTag = int(sys.argv[6]) 89 | renderTag = sys.argv[7] 90 | if(len(sys.argv) == 9): 91 | out_root = sys.argv[8] 92 | else: 93 | out_root = data_root 94 | 95 | labeled_file_in = data_root + r'/{}/Labeled/trainingdata.txt'.format(data_tag) 96 | test_file_in = data_root + r'/{}/Test/test.txt'.format(data_tag) 97 | 98 | rendered_labeled_out = out_root + r'/{}/Labeled'.format(data_tag) 99 | rendered_test_out = out_root + r'/{}/Test'.format(data_tag) 100 | 101 | specular_file = data_root + r'/{}/Labeled/specroughness.txt'.format(data_tag) 102 | lightpool_file = params_global['envMapFolder'] + r'/lightPool_{}.dat'.format(data_tag) 103 | 104 | AugmentRender = FastRenderEngine(gpuid) 105 | AugmentRender.SetGeometry('Plane') 106 | AugmentRender.PreLoadAllLight(r'{}/light.txt'.format(params_global['envMapFolder'])) 107 | AugmentRender.SetSampleCount(128, 1024) 108 | fovRadian = 60.0 / 180.0 * math.pi 109 | cameraDist = 1.0 / (math.tan(fovRadian / 2.0)) 110 | AugmentRender.SetCamera(0, 0, cameraDist, 0, 0, 0, 0, 1, 0, fovRadian, 0.01, 100, 384, 384) 111 | 112 | specList_final = {} 113 | roughnessList_final = {} 114 | with open(specular_file, 'r') as f: 115 | rawList = f.read().strip().split('\n') 116 | 117 | for t in rawList: 118 | mid = int(t.split(',')[0]) 119 | spec = float(t.split(',')[1]) 120 | roughness = float(t.split(',')[2]) 121 | specList_final[mid] = spec 122 | roughnessList_final[mid] = roughness 123 | 124 | lightPool = pickle.load(open(lightpool_file, 'rb')) 125 | lightNormPool = {} 126 | 127 | # precompute auto-exposure factor 128 | normal_one = np.dstack((np.ones((256,256)), np.ones((256,256)), np.ones((256,256)))) 129 | for m in specList_final.keys(): 130 | lightNormPool[m] = np.zeros((len(lightID), 10, 3)) 131 | for id, lid in enumerate(lightID): 132 | for v in range(0, lightPool[m].shape[1]): 133 | AugmentRender.SetEnvLightByID(lid + 1, lightPool[m][id, v, 0], lightPool[m][id, v, 1]) 134 | AugmentRender.SetAlbedoValue([1.0, 1.0, 1.0]) 135 | AugmentRender.SetSpecValue([0.0, 0.0, 0.0]) 136 | AugmentRender.SetRoughnessValue(0) 137 | AugmentRender.SetNormalMap(normal_one) 138 | img_diffuse = AugmentRender.Render() 139 | norm = np.mean(img_diffuse, axis = (0,1)) 140 | lightNormPool[m][id, v] = norm 141 | 142 | with open(params_global['envMapFolder'] + r'/lightNormPool_{}.dat'.format(data_tag), 'wb') as f: 143 | pickle.dump(lightNormPool, f) 144 | 145 | print('Factor done.\n') 146 | 147 | #render training 148 | if(renderTag == 'train' or renderTag == 'all'): 149 | make_dir(rendered_labeled_out) 150 | path, file = os.path.split(labeled_file_in) 151 | dataset = DataLoaderSVBRDF(path, file, 384, 384, True) 152 | begin = 0 if startTag == -1 else startTag 153 | end = dataset.dataSize if endTag == -1 else endTag 154 | 155 | for k in range(begin, end): 156 | if(k % 1000 == 0): 157 | print('{}/{}'.format(k, dataset.dataSize)) 158 | name = map(int, dataset.dataList[k].split('_')) 159 | m,l,v,o = name 160 | brdfbatch = np.ones((1, 10, 384, 384)) 161 | 162 | albedo = load_pfm(path + r'/m_{}/gt_{}_albedo.pfm'.format(m, o)) 163 | normal = load_pfm(path + r'/m_{}/gt_{}_normal.pfm'.format(m, o)) 164 | specvalue = specList_final[m] 165 | roughnessvalue = roughnessList_final[m] 166 | 167 | brdfbatch[0,0:3,:,:] = albedo.transpose(2,0,1) 168 | brdfbatch[0,3:6,:,:] = specvalue 169 | brdfbatch[0,6,:,:] = roughnessvalue 170 | brdfbatch[0,7:10,:,:] = normal.transpose(2,0,1) 171 | 172 | lids = [l] 173 | rotX = lightPool[m][lightIDToEnumerateID[l], v, 0] 174 | rotY = lightPool[m][lightIDToEnumerateID[l], v, 1] 175 | lxforms = [[rotX],[rotY]] 176 | 177 | imgbatch, normValue = renderOnlineEnvlight(brdfbatch, AugmentRender, lids, lxforms) 178 | outfolder = rendered_labeled_out + r'/m_{}'.format(m) 179 | make_dir(outfolder) 180 | 181 | #0:HDR 1:LDR 2:BOTH 182 | 183 | if(renderType == 0 or renderType == 2): 184 | save_pfm(outfolder + r'/{}_{}_{}_{}_image.pfm'.format(m, l, v, o), imgbatch[0,:,:,:].transpose((1,2,0))) 185 | if(renderType == 1 or renderType == 2): 186 | cv2.imwrite(outfolder + r'/{}_{}_{}_{}_image.jpg'.format(m, l, v, o), toLDR(imgbatch[0,:,:,:].transpose((1,2,0)))) 187 | 188 | if(renderTag == 'test' or renderTag == 'all'): 189 | #render test 190 | make_dir(rendered_test_out) 191 | path, file = os.path.split(test_file_in) 192 | dataset = DataLoaderSVBRDF(path, file, 384, 384, False) 193 | for k in range(0, dataset.dataSize): 194 | if(k % 1000 == 0): 195 | print('{}/{}'.format(k, dataset.dataSize)) 196 | name = map(int, dataset.dataList[k].split('_')) 197 | m,l,v,o = name 198 | brdfbatch = np.ones((1, 10, 384, 384)) 199 | 200 | albedo = load_pfm(path + r'/m_{}/gt_{}_albedo.pfm'.format(m, o)) 201 | normal = load_pfm(path + r'/m_{}/gt_{}_normal.pfm'.format(m, o)) 202 | specvalue = specList_final[m] 203 | roughnessvalue = roughnessList_final[m] 204 | 205 | brdfbatch[0,0:3,:,:] = albedo.transpose(2,0,1) 206 | brdfbatch[0,3:6,:,:] = specvalue 207 | brdfbatch[0,6,:,:] = roughnessvalue 208 | brdfbatch[0,7:10,:,:] = normal.transpose(2,0,1) 209 | 210 | lids = [l] 211 | rotX = lightPool[m][lightIDToEnumerateID[l], v, 0] 212 | rotY = lightPool[m][lightIDToEnumerateID[l], v, 1] 213 | lxforms = [[rotX],[rotY]] 214 | 215 | imgbatch, normfactor = renderOnlineEnvlight(brdfbatch, AugmentRender, lids, lxforms) 216 | outfolder = rendered_test_out + r'/m_{}'.format(m) 217 | make_dir(outfolder) 218 | 219 | #0:HDR 1:LDR 2:BOTH 220 | if(renderType == 0 or renderType == 2): 221 | save_pfm(outfolder + r'/{}_{}_{}_{}_image.pfm'.format(m, l, v, o), imgbatch[0,:,:,:].transpose((1,2,0))) 222 | if(renderType == 1 or renderType == 2): 223 | cv2.imwrite(outfolder + r'/{}_{}_{}_{}_image.jpg'.format(m, l, v, o), toLDR(imgbatch[0,:,:,:].transpose((1,2,0)))) 224 | -------------------------------------------------------------------------------- /SVBRDFNet/SVBRDF_Net_Config.ini: -------------------------------------------------------------------------------- 1 | [device] 2 | randomSeed = 23333 3 | 4 | [solver] 5 | SolverType = Adam 6 | 7 | ;learning rate 8 | lr = 0.001 9 | momentum = 0.9 10 | batchSize = 8 11 | lrDecay = 0.0001 12 | weightDecay = 0 13 | 14 | ;use auto exposure 15 | autoExposure = 1 16 | 17 | [stopping] 18 | ;enter -1 to disable certain stopping critiria 19 | nMaxEpoch = 12000 20 | nMaxIter = 300000 21 | 22 | [loop] 23 | ;self-augment on/off 24 | renderLoop = 1 25 | 26 | ;automatic compute ratio between labeled data/unlabeled data usage 27 | autoLoopRatio = 0 28 | 29 | ;or manually set the ratio 30 | normalBatchLength = 1 31 | loopBatchLength = 1 32 | 33 | ;how many iter/epoch before self-augment 34 | loopStartEpoch = -1 35 | loopStartIteration = 30000 36 | 37 | loopLight = 1 38 | 39 | [network] 40 | NetworkType = HomogeneousSpec 41 | Channal = Full 42 | BN = 1 43 | DisableDecoder = 0 44 | nFirstFeatureMap = 16 45 | LogRoughness = 1 46 | LogSpec = 1 47 | 48 | [dataset] 49 | datasetID = 2 50 | 51 | ;labeled data 52 | dataset = /media/v-xil/New Volume/SA_SVBRDF_Net_Data/wood/Labeled/trainingdata.txt 53 | ;test data 54 | testDataset = /media/v-xil/New Volume/SA_SVBRDF_Net_Data/wood/Test/test.txt 55 | 56 | ;unlabeled data 57 | unlabelDataset = /media/v-xil/New Volume/SA_SVBRDF_Net_Data/wood/Unlabeled/unlabeled.txt 58 | 59 | grayLight = 0 60 | normalizeAlbedo = 1 61 | ;pre-computed LUT for lighting 62 | lightPoolFile = lightPool_metal.dat 63 | autoExposureLUTFile = lightNormPool_metal.dat 64 | LDR=0 65 | 66 | 67 | [display] 68 | displayStep = 1000 69 | loopdisplayStep = 100 70 | checkpointStepIteration = 10000 71 | checkpointStepEpoch = 3 72 | visulizeStep = 5000 73 | -------------------------------------------------------------------------------- /SVBRDFNet/TestSVBRDF.py: -------------------------------------------------------------------------------- 1 | # TestSVBRDF.py 2 | # Test script for SVBRDF-Net 3 | 4 | import random, os, time, sys 5 | working_path = os.path.dirname(os.path.realpath(__file__)) 6 | root_path = os.path.dirname(working_path) 7 | sys.path.append(root_path + r'/Render') 8 | sys.path.append(root_path + r'/Utils') 9 | import caffe 10 | from utils import save_pfm, load_pfm, pfmToBuffer, pfmFromBuffer, autoExposure 11 | import numpy as np 12 | import math 13 | import logging 14 | import matplotlib.pyplot as plt 15 | 16 | import itertools 17 | 18 | import glob 19 | 20 | from FastRendererCUDA import FastRenderEngine 21 | 22 | import sys 23 | import pickle, json 24 | import shutil 25 | 26 | from ConfigParser import ConfigParser, SafeConfigParser 27 | 28 | from multiprocessing import Process 29 | from multiprocessing import Queue as MultiQueue 30 | 31 | import cv2 32 | import jinja2 33 | os.chdir(working_path) 34 | os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' 35 | params_global = {} 36 | 37 | lightID = [] 38 | 39 | with open('folderPath_SVBRDF.txt', 'r') as f: 40 | params_global['geometryPath'] = r'../Render/plane.obj' 41 | params_global['scriptRoot'] = r'../Utils' 42 | params_global['outFolder'] = f.readline().strip() 43 | params_global['envMapFolder'] = f.readline().strip() 44 | 45 | with open(params_global['envMapFolder'] + r'/light.txt', 'r') as f: 46 | lightID = map(int, f.read().strip().split('\n')) 47 | lightID = list(np.array(lightID) - 1) 48 | 49 | def toLDR(img): 50 | img_out = img ** (1.0 / 2.2) 51 | img_out = np.minimum(255, img_out * 255) 52 | return img_out.astype(np.uint8) 53 | 54 | def toHDR(img): 55 | img = img / 255.0 56 | img_out = img ** 2.2 57 | return img_out.astype(np.float32) 58 | 59 | def test(testnet, img): 60 | testnet.blobs['Data_Image'].data[...] = img 61 | testnet.forward() 62 | 63 | albedo_p = testnet.blobs['ConvFinal_Albedo'].data[0,:,:,:].transpose((1,2,0)) 64 | spec_p = np.exp(testnet.blobs['ConvFinal_SpecAlbedo'].data.flatten()) 65 | 66 | roughness_p = np.exp(testnet.blobs['ConvFinal_Roughness'].data.flatten()) 67 | normal_p = testnet.blobs['ConvFinal_Normal'].data[0,:,:,:].transpose((1,2,0)) 68 | 69 | 70 | return albedo_p, spec_p, roughness_p, normal_p 71 | 72 | 73 | def renderSpecBall(renderer, spec, roughness): 74 | renderer.SetGeometry('Sphere') 75 | renderer.SetCamera(0, 0, cameraDist_1, 0, 0, 0, 0, 1, 0, fovRadian, 0.01, 100, 256, 256) 76 | renderer.SetSampleCount(128, 512) 77 | renderer.SetPointLight(0, 1, 1, 1, 0, 0.2, 0.2, 0.2) 78 | renderer.SetAlbedoValue([0.0, 0.0, 0.0]) 79 | renderer.SetSpecValue(spec) 80 | renderer.SetRoughnessValue(roughness) 81 | sphere = renderer.Render() 82 | #Mask 83 | renderer.SetPointLight(0, 1, 1, 1, 0, 0.2, 0.2, 0.2) 84 | renderer.SetAlbedoValue([1.0, 1.0, 1.0]) 85 | renderer.SetSpecValue([0.0,0.0,0.0]) 86 | renderer.SetRoughnessValue(roughness) 87 | renderer.SetRenderMode(1) 88 | mask = renderer.Render() 89 | renderer.SetRenderMode(0) 90 | sphere[mask == 0] = 1 91 | 92 | renderer.SetGeometry('Plane') 93 | renderer.SetCamera(0, 0, cameraDist, 0, 0, 0, 0, 1, 0, fovRadian, 0.01, 100, 256, 256) 94 | renderer.SetSampleCount(128, 512) 95 | 96 | return sphere 97 | 98 | def renderRelighting(renderer, albedo, spec, roughness, normal): 99 | renderer.SetPointLight(0, 0.27, -0.25, 1, 0, 0.6, 0.6, 0.6) 100 | renderer.SetAlbedoMap(albedo) 101 | renderer.SetSpecValue(spec) 102 | renderer.SetRoughnessValue(roughness) 103 | 104 | normal = normal * 2.0 - 1.0 105 | normal[0] = normal[0] * 2.5 106 | len = np.linalg.norm(normal, axis = 2) 107 | normal = normal / np.dstack((len, len, len)) 108 | normal = 0.5*(normal + 1.0) 109 | 110 | renderer.SetNormalMap(normal*2.0 - 1.0) 111 | img = renderer.Render() 112 | 113 | renderer.SetEnvLightByID(43, 30, -10.0) 114 | renderer.SetAlbedoMap(albedo) 115 | renderer.SetSpecValue(spec) 116 | renderer.SetRoughnessValue(roughness) 117 | renderer.SetNormalMap(normal*2.0 - 1.0) 118 | img_1 = renderer.Render() 119 | 120 | return 1.2 * img + 0.8 * img_1 121 | 122 | 123 | lightIDToEnumerateID = {} 124 | for id, lid in enumerate(lightID): 125 | lightIDToEnumerateID[lid] = id 126 | 127 | 128 | if __name__ == '__main__': 129 | modelFile = sys.argv[1] 130 | testSetPath = sys.argv[2] 131 | gpuid = int(sys.argv[3]) 132 | 133 | imgw = 256 134 | 135 | fovRadian = 60.0 / 180.0 * math.pi 136 | cameraDist = 1.0 / (math.tan(fovRadian / 2.0)) 137 | cameraDist_1 = 1.5 / (math.tan(fovRadian / 2.0)) 138 | 139 | RelightingRender = FastRenderEngine(gpuid) 140 | RelightingRender.SetGeometry('Plane') 141 | RelightingRender.SetCamera(0, 0, cameraDist, 0, 0, 0, 0, 1, 0, fovRadian, 0.01, 100, 256, 256) 142 | RelightingRender.SetSampleCount(128, 512) 143 | RelightingRender.PreLoadAllLight(r'{}/light.txt'.format(params_global['envMapFolder'])) 144 | 145 | caffe.set_mode_gpu() 146 | caffe.set_device(gpuid) 147 | path, file = os.path.split(modelFile) 148 | modelFolder = path 149 | testnet = caffe.Net(path + r'/net_test.prototxt', caffe.TEST) 150 | testnet.copy_from(modelFile) 151 | 152 | path, file = os.path.split(testSetPath) 153 | with open(testSetPath, 'r') as f: 154 | filenames = f.read().strip().split('\n') 155 | 156 | np.random.shuffle(filenames) 157 | 158 | pixelCnt = imgw*imgw 159 | 160 | tag_testSet = 'test' 161 | jinjiaEnv = jinja2.Environment(loader = jinja2.FileSystemLoader('./')).get_template('template_visSVBRDFNet_Real.html') 162 | renderContext = {} 163 | renderContext['networkTag'] = modelFolder 164 | renderContext['dataList'] = [] 165 | visualDir = modelFolder + r'/visualize_{}'.format(tag_testSet) 166 | if(os.path.exists(visualDir) == False): 167 | os.makedirs(visualDir) 168 | 169 | for filename in filenames: 170 | fullpath = path + r'/{}.jpg'.format(filename.strip()) 171 | print('Test {}\n'.format(filename.strip())) 172 | img = toHDR(cv2.imread(fullpath)) 173 | 174 | img_in = np.zeros((1,3,256,256)) 175 | img_in[0,:,:,:] = img.transpose((2,0,1)) 176 | albedo_p, spec_p, roughness_p, normal_p = test(testnet, img_in) 177 | factor = 0.5 / np.mean(np.linalg.norm(albedo_p, axis = 2)) 178 | albedo_p = albedo_p * factor 179 | spec_p = spec_p * factor 180 | specball_fit = renderSpecBall(RelightingRender, spec_p, roughness_p) 181 | 182 | data = {} 183 | data_id = '{}'.format(filename.strip()) 184 | 185 | cv2.imwrite(visualDir + r'/{}_img.jpg'.format(data_id), toLDR(img)) 186 | cv2.imwrite(visualDir + r'/{}_albedo_fit.jpg'.format(data_id), toLDR(albedo_p)) 187 | cv2.imwrite(visualDir + r'/{}_specball_fit.jpg'.format(data_id), toLDR(specball_fit)) 188 | cv2.imwrite(visualDir + r'/{}_normal_fit.jpg'.format(data_id), toLDR(normal_p)) 189 | cv2.imwrite(visualDir + r'/{}_relighting_fit.jpg'.format(data_id), toLDR(renderRelighting(RelightingRender, albedo_p, spec_p, roughness_p, normal_p))) 190 | 191 | data['ID'] = data_id 192 | data['img'] = visualDir + r'/{}_img.jpg'.format(data_id) 193 | data['albedo_fit'] = visualDir + r'/{}_albedo_fit.jpg'.format(data_id) 194 | data['specball_fit'] = visualDir + r'/{}_specball_fit.jpg'.format(data_id) 195 | data['normal_fit'] = visualDir + r'/{}_normal_fit.jpg'.format(data_id) 196 | data['relighting_fit'] = visualDir + r'/{}_relighting_fit.jpg'.format(data_id) 197 | 198 | renderContext['dataList'].append(data) 199 | renderedHtml = jinjiaEnv.render(renderContext) 200 | with open(modelFolder + r'/visResult_{}.html'.format(tag_testSet), 'w') as f1: 201 | f1.write(renderedHtml) -------------------------------------------------------------------------------- /SVBRDFNet/folderPath_SVBRDF.txt: -------------------------------------------------------------------------------- 1 | ../TrainedResult 2 | /home/v-xil/v-xil/envmap_pfm_cube 3 | -------------------------------------------------------------------------------- /SVBRDFNet/template_visSVBRDFNet_Real.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | DatasetVisualization 5 | 26 | 39 | 40 | 41 | 42 |

Network - {{networkTag}}

43 |

Summary:

44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | {% for data in dataList %} 57 | 58 | 59 | 64 | 69 | 74 | 79 | 84 | 85 | {% endfor %} 86 |
IDInput ImageAlbedo FitSpecball FitNormal Fit
{{data.ID}} 60 |
61 | 62 |
63 |
65 |
66 | 67 |
68 |
70 |
71 | 72 |
73 |
75 |
76 | 77 |
78 |
80 |
81 | 82 |
83 |
87 | 88 | -------------------------------------------------------------------------------- /Utils/solver_template.prototxt: -------------------------------------------------------------------------------- 1 | net: "#netpath#" 2 | base_lr: #base_lr# 3 | lr_policy: "inv" 4 | gamma: #gamma# 5 | power: 1.0 6 | momentum: #momentum# 7 | weight_decay:#weightDecay# 8 | solver_mode: GPU 9 | snapshot_prefix: "#snapshotpath#" -------------------------------------------------------------------------------- /Utils/utils.py: -------------------------------------------------------------------------------- 1 | import sys, os, math, random, glob, re, cStringIO 2 | import numpy as np 3 | import cv2 4 | 5 | def make_dir(folder): 6 | if(os.path.exists(folder) == False): 7 | os.makedirs(folder) 8 | 9 | def autoExposure(img): 10 | maxValue = np.max(img) + 1e-6 11 | return img / maxValue 12 | 13 | def toHDR(img): 14 | img = img / 255.0 15 | img_out = img ** (2.2) 16 | return img_out.astype(np.float32) 17 | 18 | def toLDR(img): 19 | img_out = img ** (1.0 / 2.2) 20 | img_out = np.minimum(255, img_out * 255) 21 | return img_out.astype(np.uint8) 22 | 23 | #PFM load and write 24 | def pfmFromBuffer(buffer, reverse = 1): 25 | sStream = cStringIO.StringIO(buffer) 26 | 27 | color = None 28 | width = None 29 | height = None 30 | scale = None 31 | endian = None 32 | 33 | header = sStream.readline().rstrip() 34 | color = (header == 'PF') 35 | 36 | width, height = map(int, sStream.readline().strip().split(' ')) 37 | scale = float(sStream.readline().rstrip()) 38 | endian = '<' if(scale < 0) else '>' 39 | scale = abs(scale) 40 | 41 | 42 | rawdata = np.fromstring(sStream.read(), endian + 'f') 43 | shape = (height, width, 3) if color else (height, width) 44 | sStream.close() 45 | if(len(shape) == 3): 46 | return rawdata.reshape(shape).astype(np.float32)[:,:,::-1] 47 | else: 48 | return rawdata.reshape(shape).astype(np.float32) 49 | 50 | def pfmToBuffer(img, reverse = 1): 51 | color = None 52 | sStream = cStringIO.StringIO() 53 | img = np.ascontiguousarray(img) 54 | if(img.dtype.name != 'float32'): 55 | img = img.astype(np.float32) 56 | 57 | 58 | color = True if (len(img.shape) == 3) else False 59 | 60 | if(reverse and color): 61 | img = img[:,:,::-1] 62 | 63 | sStream.write('PF\n' if color else 'Pf\n') 64 | sStream.write('%d %d\n' % (img.shape[1], img.shape[0])) 65 | 66 | endian = img.dtype.byteorder 67 | scale = 1.0 68 | if endian == '<' or endian == '=' and sys.byteorder == 'little': 69 | scale = -scale 70 | 71 | sStream.write('%f\n' % scale) 72 | sStream.write(img.tobytes()) 73 | outBuffer = sStream.getvalue() 74 | sStream.close() 75 | return outBuffer 76 | 77 | def save_pfm(filepath, img, reverse = 1): 78 | color = None 79 | file = open(filepath, 'wb') 80 | if(img.dtype.name != 'float32'): 81 | img = img.astype(np.float32) 82 | 83 | color = True if (len(img.shape) == 3) else False 84 | 85 | if(reverse and color): 86 | img = img[:,:,::-1] 87 | 88 | file.write('PF\n' if color else 'Pf\n') 89 | file.write('%d %d\n' % (img.shape[1], img.shape[0])) 90 | 91 | endian = img.dtype.byteorder 92 | scale = 1.0 93 | if endian == '<' or endian == '=' and sys.byteorder == 'little': 94 | scale = -scale 95 | 96 | file.write('%f\n' % scale) 97 | img.tofile(file) 98 | file.close() 99 | 100 | def load_pfm(filepath, reverse = 1): 101 | file = open(filepath, 'rb') 102 | color = None 103 | width = None 104 | height = None 105 | scale = None 106 | endian = None 107 | 108 | header = file.readline().rstrip() 109 | color = (header == 'PF') 110 | 111 | width, height = map(int, file.readline().strip().split(' ')) 112 | scale = float(file.readline().rstrip()) 113 | endian = '<' if(scale < 0) else '>' 114 | scale = abs(scale) 115 | 116 | rawdata = np.fromfile(file, endian + 'f') 117 | shape = (height, width, 3) if color else (height, width) 118 | file.close() 119 | 120 | if(color): 121 | return rawdata.reshape(shape).astype(np.float32)[:,:,::-1] 122 | else: 123 | return rawdata.reshape(shape).astype(np.float32) 124 | 125 | 126 | def load_and_clip(filepath, left, top, width, height, reverse = 1): 127 | name,ext = os.path.splitext(filepath) 128 | if(ext == '.pfm'): 129 | img = load_pfm(filepath) 130 | else: 131 | img = toHDR(cv2.imread(filepath)) 132 | 133 | if(len(img.shape) == 3): 134 | return img[top:top+height,left:left+width,:] 135 | else: 136 | return img[top:top+height,left:left+width] 137 | 138 | 139 | 140 | 141 | 142 | #utilties 143 | def renormalize(normalMap): 144 | nOut = np.zeros(normalMap.shape) 145 | for i in range(0, normalMap.shape[0]): 146 | normal_1 = (2.0 * normalMap[i,:,:,:] - 1).transpose(1,2,0) 147 | length = np.linalg.norm(normal_1, axis = 2) 148 | normal_1 = normal_1 / np.dstack((length, length, length)) 149 | nOut[i,:,:,:] = (0.5 * (normal_1 + 1)).transpose(2,0,1) 150 | return nOut 151 | 152 | def normalizeAlbedoSpec(brdfbatch): 153 | for i in range(0, brdfbatch.shape[0]): 154 | factor = 0.5 / np.mean(np.linalg.norm(brdfbatch[i,0:3,:,:], axis = 0)) 155 | brdfbatch[i,0:6,:,:] *= factor 156 | 157 | return brdfbatch 158 | 159 | #normal map : [0,1] range! 160 | def normalBatchToThetaPhiBatch(data): 161 | outBatch = np.zeros((data.shape[0], 2, data.shape[2], data.shape[3])) 162 | data_1 = data * 2 - 1.0 163 | outBatch[:,0,:,:] = np.arccos(data_1[:,0,:,:]) 164 | outBatch[:,1,:,:] = np.arctan2(data_1[:,1,:,:], data_1[:,2,:,:]) 165 | return outBatch 166 | 167 | 168 | def thetaPhiBatchToNormalBatch(data): 169 | outBatch = np.zeros((data.shape[0], 3, data.shape[2], data.shape[3])) 170 | outBatch[:,0,:,:] = np.cos(data[:,0,:,:]) 171 | outBatch[:,1,:,:] = np.sin(data[:,0,:,:]) * np.sin(data[:,1,:,:]) 172 | outBatch[:,2,:,:] = np.sin(data[:,0,:,:]) * np.cos(data[:,1,:,:]) 173 | 174 | outBatch = 0.5*(outBatch + 1.0) 175 | 176 | return outBatch 177 | #n*3*256*256 178 | 179 | def findIndex(query, pList): 180 | out = np.zeros((len(query))) 181 | for id, p in enumerate(query): 182 | out[id] = np.argmin(np.abs(p - pList)) 183 | 184 | return out 185 | 186 | def listToStr(numlist): 187 | strList = ['{},'.format(x) for x in numlist] 188 | strList[-1] = strList[-1][:-1] 189 | return 190 | 191 | def dssim(img1, img2): 192 | img1_g = cv2.cvtColor(img1.astype(np.float32), cv2.COLOR_BGR2GRAY) 193 | img2_g = cv2.cvtColor(img2.astype(np.float32), cv2.COLOR_BGR2GRAY) 194 | return 0.5 * (1.0 - ssim(img1_g, img2_g)) 195 | 196 | 197 | def meanDownsample(img): 198 | out = 0.25*(img[0::2, 0::2] + img[1::2, 0::2] + img[0::2, 1::2] + img[1::2, 1::2]) 199 | return out 200 | 201 | def genMipMap(texCube): 202 | #assume width = height and is 2^x 203 | nLevel = int(min(10, math.log(texCube.shape[1], 2))) + 1 204 | texMipMapList = [] 205 | texMipMapList.append(texCube) 206 | for k in range(1, nLevel): 207 | prevCube = texMipMapList[k-1] 208 | if(len(prevCube.shape) == 3): 209 | newCube = np.ones((6, prevCube.shape[1] / 2, prevCube.shape[2] / 2)) 210 | else: 211 | newCube = np.ones((6, prevCube.shape[1] / 2, prevCube.shape[2] / 2, 4)) 212 | 213 | for f in range(0, 6): 214 | newCube[f] = meanDownsample(prevCube[f])#cv2.pyrDown(prevCube[f]) 215 | 216 | texMipMapList.append(newCube.astype(np.float32)) 217 | return texMipMapList 218 | 219 | def getTexCube(crossImg): 220 | #TOBGRA since cuda only accept float4 textures 221 | if(len(crossImg.shape) == 2): 222 | crossImg = np.dstack((crossImg, crossImg, crossImg)) 223 | faceRes = crossImg.shape[1] / 4 224 | width = height = faceRes 225 | if(len(crossImg.shape) == 3): 226 | texCube = np.ones((6, faceRes, faceRes, 4)) 227 | texCube[0, :, :, 0:3] = crossImg[faceRes:faceRes+height, 2*faceRes:2*faceRes+width,:] 228 | texCube[1, :, :, 0:3] = crossImg[faceRes:faceRes+height, 0:width,:] 229 | texCube[3, :, :, 0:3] = crossImg[0:height, faceRes:faceRes+width,:] 230 | texCube[2, :, :, 0:3] = crossImg[2*faceRes:2*faceRes+height, faceRes:faceRes+width,:] 231 | texCube[4, :, :, 0:3] = crossImg[faceRes:faceRes+height, faceRes:faceRes+width,:] 232 | texCube[5, :, :, 0:3] = crossImg[faceRes:faceRes+height, 3*faceRes:3*faceRes+width,:] 233 | else: 234 | texCube = np.ones((6, faceRes, faceRes)) 235 | texCube[0, :, :] = crossImg[faceRes:faceRes+height, 2*faceRes:2*faceRes+width] 236 | texCube[1, :, :] = crossImg[faceRes:faceRes+height, 0:width] 237 | texCube[3, :, :] = crossImg[0:height, faceRes:faceRes+width] 238 | texCube[2, :, :] = crossImg[2*faceRes:2*faceRes+height, faceRes:faceRes+width] 239 | texCube[4, :, :] = crossImg[faceRes:faceRes+height, faceRes:faceRes+width] 240 | texCube[5, :, :] = crossImg[faceRes:faceRes+height, 3*faceRes:3*faceRes+width] 241 | 242 | for i in range(0, 6): 243 | texCube[i, :, :] = texCube[i, ::-1, :] 244 | 245 | 246 | return np.ascontiguousarray(texCube, dtype=np.float32) 247 | 248 | 249 | 250 | 251 | #DataLoader class 252 | def checkVaild(root, mid, lid, vid, oid): 253 | imgpath = root + r'/m_{}/{}_{}_{}_{}_image.pfm'.format(mid, mid, lid, vid, oid) 254 | apath = root + r'/m_{}/gt_{}_albedo.pfm'.format(mid, oid) 255 | spath = root + r'/m_{}/gt_{}_specalbedo.pfm'.format(mid, oid) 256 | rpath = root + r'/m_{}/gt_{}_roughness.pfm'.format(mid, oid) 257 | 258 | if(os.path.exists(imgpath) and os.path.exists(apath) and os.path.exists(spath) and os.path.exists(rpath)):# and os.path.exists(npath)): 259 | return True 260 | else: 261 | return False 262 | 263 | class RealDataLoaderSVBRDF(object): 264 | dataSize = 0 265 | rootPath = '' 266 | 267 | dataList = [] 268 | cursorPos = 0 269 | 270 | width = 256 271 | height = 256 272 | 273 | def __init__(self, rootPath, imgListFile): 274 | with open(rootPath + r'/{}'.format(imgListFile), 'r') as f: 275 | self.dataList = f.read().strip().split('\n') 276 | 277 | self.rootPath = rootPath 278 | self.dataSize = len(self.dataList) 279 | 280 | self.cursorPos = 0 281 | self.width = 256 282 | self.height = 256 283 | 284 | def shuffle(self, seed = []): 285 | if(seed == []): 286 | np.random.shuffle(self.dataList) 287 | else: 288 | np.random.seed(seed) 289 | np.random.shuffle(self.dataList) 290 | 291 | def GetImg(self, idx): 292 | path = r'{}/{}'.format(self.rootPath, self.dataList[idx]).strip() #for the FUCKING CRLF difference between WINDOWS and Linux 293 | img = toHDR(cv2.imread(path)).transpose(2,0,1)# / 255.0 294 | return img[np.newaxis, :, :, :] 295 | 296 | def GetImgWithName(self, idx): 297 | img = self.GetImg(idx) 298 | name = self.dataList[idx] 299 | return img, name 300 | 301 | def GetBatchWithName(self, start, n): 302 | dataBatch = np.zeros((n, 3, self.height, self.width)) 303 | nameList = [] 304 | 305 | tmpSize = self.dataSize 306 | for i in range(0, n): 307 | idx = (start + i) % tmpSize 308 | dataBatch[i, :, :, :], name = self.GetImgWithName(idx) 309 | nameList.append(name) 310 | 311 | return dataBatch, nameList 312 | 313 | def GetBatch(self, start, n): 314 | dataBatch = np.zeros((n, 3, self.height, self.width)) 315 | 316 | tmpSize = self.dataSize 317 | for i in range(0, n): 318 | idx = (start + i) % tmpSize 319 | dataBatch[i, :, :, :] = self.GetImg(idx) 320 | 321 | return dataBatch 322 | 323 | def GetNextBatch(self, n): 324 | dataBatch = self.GetBatch(self.cursorPos, n, unlabel) 325 | self.cursorPos = (self.cursorPos + n) % self.dataSize 326 | 327 | return dataBatch 328 | 329 | class DataLoaderSVBRDF(object): 330 | dataSize = 0 331 | rootPath = '' 332 | 333 | dataList = [] 334 | cursorPos = 0 335 | 336 | width = 256 337 | height = 256 338 | 339 | nBRDFChannal = 10 340 | 341 | rawwidth = 0 342 | rawheight = 0 343 | 344 | randomClip = False 345 | 346 | #hack 347 | clipPos = [] 348 | 349 | ldr = False 350 | smallInput = False 351 | 352 | def __init__(self, rootPath, imgListFile, rawWidth = 256, rawHeight = 256, randomClip = False): 353 | self.mList = [] 354 | self.lList = [] 355 | self.vList = [] 356 | self.oList = [] 357 | self.clipPosList = [] 358 | self.specRoughnessList = {} 359 | 360 | with open(rootPath + r'/{}'.format(imgListFile), 'r') as f: 361 | self.dataList = f.read().strip().split('\n') 362 | 363 | with open(rootPath + r'/{}'.format(imgListFile), 'r') as f: 364 | self.fullDataList = f.read().strip().split('\n') 365 | 366 | with open(rootPath + r'/{}'.format('specroughness.txt'), 'r') as f: 367 | rawList = f.read().strip().split('\n') 368 | for t in rawList: 369 | mid = int(t.split(',')[0]) 370 | spec = float(t.split(',')[1]) 371 | roughness = float(t.split(',')[2]) 372 | self.specRoughnessList[mid] = (spec, roughness) 373 | if(os.path.exists(rootPath + r'/{}'.format('translatepos.txt'))): 374 | self.clipPosList = pickle.load(open(rootPath + r'/{}'.format('translatepos.dat'), 'rb')) 375 | 376 | self.rootPath = rootPath 377 | 378 | self.rawwidth = rawWidth 379 | self.rawheight = rawHeight 380 | self.randomClip = randomClip 381 | 382 | self.buildMLVOList() 383 | self.dataSize = len(self.dataList) 384 | self.fulldataSize = len(self.dataList) 385 | 386 | self.cursorPos = 0 387 | self.width = 256 388 | self.height = 256 389 | 390 | self.nBRDFChannal = 10 391 | 392 | def shuffle(self, seed = []): 393 | if(seed == []): 394 | np.random.shuffle(self.dataList) 395 | else: 396 | np.random.seed(seed) 397 | np.random.shuffle(self.dataList) 398 | 399 | 400 | def checkList(self): 401 | newList = [] 402 | for item in self.dataList: 403 | m, l, v, o = map(int, item.split('_')) 404 | if(checkVaild(self.rootPath, m, l, v, o)): 405 | newList.append(item) 406 | 407 | self.dataList = newList 408 | 409 | def buildMLVOList(self): 410 | mList = set() 411 | lList = set() 412 | vList = set() 413 | oList = set() 414 | 415 | for data in self.dataList: 416 | m, l, v, o = map(int, data.split('_')) 417 | mList.add(m) 418 | lList.add(l) 419 | vList.add(v) 420 | oList.add(o) 421 | 422 | self.mList = sorted(list(mList)) 423 | self.lList = sorted(list(lList)) 424 | self.vList = sorted(list(vList)) 425 | self.oList = sorted(list(oList)) 426 | 427 | def buildSubDataset(self, mid_list, lid_list, vid_list, oid_list): 428 | dataList = [] 429 | for m in mid_list: 430 | for l in lid_list: 431 | for v in vid_list: 432 | dataList.append('{}_{}_{}_{}'.format(m, l ,v, o)) 433 | 434 | self.mList = sorted(mid_list) 435 | self.lList = sorted(lid_list) 436 | self.vList = sorted(vid_list) 437 | self.oList = sorted(oid_list) 438 | 439 | self.dataList = dataList 440 | self.dataSize = len(self.dataList) 441 | 442 | def GetItem(self, idx): 443 | mid, lid, vid, oid = map(int, self.dataList[idx].split('_')) 444 | img, brdf = self.GetItemByID(mid, lid, vid, oid) 445 | return img, brdf 446 | 447 | def GetImgOnly(self, idx): 448 | mid, lid, vid, oid = map(int, self.dataList[idx].split('_')) 449 | img = self.GetImgOnlyByID(mid, lid, vid, oid) 450 | 451 | return img 452 | 453 | def GetAlbedoAndNormal(self, idx): 454 | mid, lid, vid, oid = map(int, self.dataList[idx].split('_')) 455 | brdf = self.GetAlbedoAndNormalOnlyByID(mid, lid, vid, oid) 456 | return brdf 457 | 458 | 459 | def GetAlbedoAndNormalWithName(self, idx,): 460 | brdf = self.GetAlbedoAndNormal(idx) 461 | name = map(int, self.dataList[idx].split('_')) 462 | return brdf, name 463 | 464 | def GetItemWithName(self, idx): 465 | img, brdf = self.GetItem(idx) 466 | name = map(int, self.dataList[idx].split('_')) 467 | 468 | return img, brdf, name 469 | 470 | def GetBatchWithName(self, start, n): 471 | dataBatch = np.zeros((n, 3, self.height, self.width)) 472 | brdfBatch = np.zeros((n, self.nBRDFChannal, self.height, self.width)) 473 | nameList = [] 474 | 475 | tmpSize = self.dataSize 476 | 477 | for i in range(0, n): 478 | idx = (start + i) % tmpSize 479 | dataBatch[i, :, :, :], brdfBatch[i, :, :, :], name = self.GetItemWithName(idx) 480 | nameList.append(name) 481 | 482 | return dataBatch, brdfBatch, nameList 483 | 484 | def GetAlbedoAndNormalBatchWithName(self, start, n): 485 | brdfBatch = np.zeros((n, self.nBRDFChannal, self.height, self.width)) 486 | nameList = [] 487 | 488 | tmpSize = self.dataSize 489 | for i in range(0, n): 490 | idx = (start + i) % tmpSize 491 | brdfBatch[i, :, :, :], name = self.GetAlbedoAndNormalWithName(idx) 492 | nameList.append(name) 493 | 494 | return brdfBatch, nameList 495 | 496 | def GetImgOnlyBatch(self, start, n): 497 | dataBatch = np.zeros((n, 3, self.height, self.width)) 498 | 499 | tmpSize = self.dataSize 500 | for i in range(0, n): 501 | idx = (start + i) % tmpSize 502 | dataBatch[i, :, :, :] = self.GetImgOnly(idx) 503 | 504 | return dataBatch 505 | 506 | def GetBatch(self, start, n): 507 | dataBatch = np.zeros((n, 3, self.height, self.width)) 508 | brdfBatch = np.zeros((n, self.nBRDFChannal, self.height, self.width)) 509 | 510 | tmpSize = self.dataSize 511 | for i in range(0, n): 512 | idx = (start + i) % tmpSize 513 | dataBatch[i, :, :, :], brdfBatch[i, :, :, :] = self.GetItem(idx) 514 | 515 | return dataBatch, brdfBatch 516 | 517 | def GetImgOnlyNextBatch(self, n): 518 | dataBatch = self.GetImgOnlyBatch(self.cursorPos, n) 519 | self.cursorPos = (self.cursorPos + n) % self.dataSize 520 | 521 | return dataBatch 522 | 523 | def GetNextBatch(self, n): 524 | dataBatch, brdfBatch = self.GetBatch(self.cursorPos, n) 525 | self.cursorPos = (self.cursorPos + n) % self.dataSize 526 | 527 | return dataBatch, brdfBatch 528 | 529 | def GetAlbedoAndNormalOnlyByID(self, mid, lid, vid, oid): ##random give spec and roughness 530 | brdf = np.zeros((1, self.nBRDFChannal, self.height, self.width)) 531 | specValue = np.random.uniform(0.005, 0.15) 532 | roughnessValue = np.random.uniform(0.005, 0.15) 533 | if(self.randomClip): 534 | if(self.clipPosList != []): 535 | clip_left, clip_top = clipPos['{}_{}_{}_{}'.format(mid,lid,vid,oid)] 536 | else: 537 | clip_left = np.random.randint(0, self.rawwidth - 1 - self.width) 538 | clip_top = np.random.randint(0, self.rawheight - 1 - self.height) 539 | else: 540 | clip_left = self.rawwidth / 2 - self.width / 2 541 | clip_top = self.rawheight / 2 - self.height / 2 542 | 543 | self.clipPos = [clip_left, clip_top] 544 | brdf[0,0:3,:,:] = load_and_clip(self.rootPath + r'/m_{}/gt_{}_albedo.pfm'.format(mid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 545 | brdf[0,3:6,:,:] = specValue 546 | brdf[0,6,:,:] = roughnessValue 547 | brdf[0,7:10,:,:] = load_and_clip(self.rootPath + r'/m_{}/gt_{}_normal.pfm'.format(mid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 548 | brdf[0,7,:,:][np.isnan(brdf[0,7,:,:])] = 1.0 549 | brdf[0,8,:,:][np.isnan(brdf[0,8,:,:])] = 0.0 550 | brdf[0,9,:,:][np.isnan(brdf[0,9,:,:])] = 0.0 551 | 552 | return brdf 553 | 554 | def GetImgOnlyByID(self, mid, lid, vid, oid): 555 | if(self.randomClip): 556 | if(self.clipPosList != []): 557 | clip_left, clip_top = clipPos['{}_{}_{}_{}'.format(mid,lid,vid,oid)] 558 | else: 559 | clip_left = np.random.randint(0, self.rawwidth - 1 - self.width) 560 | clip_top = np.random.randint(0, self.rawheight - 1 - self.height) 561 | else: 562 | clip_left = self.rawwidth / 2 - self.width / 2 563 | clip_top = self.rawheight / 2 - self.height / 2 564 | self.clipPos = [clip_left, clip_top] 565 | if(self.ldr): 566 | img = load_and_clip(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.jpg'.format(mid, mid, lid, vid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 567 | else: 568 | img = load_and_clip(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.pfm'.format(mid, mid, lid, vid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 569 | return img[np.newaxis, :, :, :] 570 | 571 | def GetItemByID(self, mid, lid, vid, oid): 572 | brdf = np.zeros((1, self.nBRDFChannal, self.height, self.width)) 573 | 574 | if(self.randomClip): 575 | if(self.clipPosList != []): 576 | clip_left, clip_top = clipPos['{}_{}_{}_{}'.format(mid,lid,vid,oid)] 577 | else: 578 | clip_left = np.random.randint(0, self.rawwidth - 1 - self.width) 579 | clip_top = np.random.randint(0, self.rawheight - 1 - self.height) 580 | else: 581 | clip_left = self.rawwidth / 2 - self.width / 2 582 | clip_top = self.rawheight / 2 - self.height / 2 583 | self.clipPos = [clip_left, clip_top] 584 | 585 | if(os.path.exists(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.pfm'.format(mid, mid, lid, vid, oid)) == False or 586 | os.path.exists(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.jpg'.format(mid, mid, lid, vid, oid)) == False): 587 | img = -100*np.ones((3,self.height,self.width)) 588 | else: 589 | if(self.ldr): 590 | img = load_and_clip(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.jpg'.format(mid, mid, lid, vid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 591 | else: 592 | img = load_and_clip(self.rootPath + r'/m_{}/{}_{}_{}_{}_image.pfm'.format(mid, mid, lid, vid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 593 | brdf[0,0:3,:,:] = load_and_clip(self.rootPath + r'/m_{}/gt_{}_albedo.pfm'.format(mid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 594 | brdf[0,3:6,:,:] = self.specRoughnessList[mid][0] 595 | brdf[0,6,:,:] = self.specRoughnessList[mid][1] 596 | brdf[0,7:10,:,:] = load_and_clip(self.rootPath + r'/m_{}/gt_{}_normal.pfm'.format(mid, oid), clip_left, clip_top, self.width, self.height).transpose((2,0,1)) 597 | brdf[0,7,:,:][np.isnan(brdf[0,7,:,:])] = 1.0 598 | brdf[0,8,:,:][np.isnan(brdf[0,8,:,:])] = 0.0 599 | brdf[0,9,:,:][np.isnan(brdf[0,9,:,:])] = 0.0 600 | 601 | return img[np.newaxis, :, :, :], brdf 602 | 603 | class DataLoaderSimple(object): 604 | fulldataSize = 0 605 | dataSize = 0 606 | 607 | brdfCube = [] 608 | fulldataList = [] 609 | dataList = [] 610 | rootPath = '' 611 | 612 | cursorPos = 0 613 | width = 0 614 | height = 0 615 | 616 | aCnt = 0 617 | sCnt = 0 618 | rCnt = 0 619 | 620 | tCnt = 15 621 | pCnt = 15 622 | 623 | def __init__(self, rootPath, imgListFile, aCnt, sCnt, rCnt, width, height): 624 | #load brdf cube 625 | if(os.path.exists(rootPath + r'/cubeResolution.txt')): 626 | with open(rootPath + r'/cubeResolution.txt', 'r') as f: 627 | aCnt, sCnt, rCnt = map(int, f.read().strip().split(',')) 628 | 629 | print(aCnt, sCnt, rCnt) 630 | self.brdfCube = np.loadtxt(rootPath + r'/brdfcube.txt').reshape((aCnt, sCnt, rCnt, 3)) 631 | self.brdfCube[:,:,:,2] = np.log(self.brdfCube[:,:,:,2]) 632 | self.brdfCube_Masked = np.ma.array(self.brdfCube) 633 | self.brdfCube_Masked.mask = False 634 | 635 | with open(rootPath + r'/{}'.format(imgListFile), 'r') as f: 636 | self.dataList = f.read().strip().split('\n') 637 | 638 | with open(rootPath + r'/{}'.format(imgListFile), 'r') as f: 639 | self.fulldataList = f.read().strip().split('\n') 640 | 641 | 642 | self.dataSize = len(self.dataList) 643 | self.fulldataSize = len(self.dataList) 644 | self.rootPath = rootPath 645 | self.cursorPos = 0 646 | self.width = 128 647 | self.height = 128 648 | self.aCnt = aCnt 649 | self.sCnt = sCnt 650 | self.rCnt = rCnt 651 | self.lightCount = 15*15 652 | 653 | 654 | def buildSubDataset_2(self, brdf_light_list, inverse = 0): 655 | dataList = [] 656 | self.lightCount = -1 657 | self.brdfCube_Masked.mask = True 658 | 659 | for brdf_light in brdf_light_list: 660 | a, s, r, l, v = brdf_light 661 | self.brdfCube_Masked.mask[a, s, r, :] = False 662 | dataList.append('{}_{}_{}_{}_{}'.format(a, s, r, l, v)) 663 | 664 | if(inverse): 665 | self.dataList = list(set(self.fulldataList) - set(dataList)) 666 | self.dataSize = len(self.dataList) 667 | else: 668 | self.dataList = dataList 669 | self.dataSize = len(self.dataList) 670 | 671 | def buildSubDataset_1(self, brdf_list, tid_list, pid_list, inverse = 0): 672 | dataList = [] 673 | 674 | self.lightCount = len(tid_list) * len(pid_list) 675 | self.brdfCube_Masked.mask = True 676 | 677 | for brdf in brdf_list: 678 | a, s, r = brdf 679 | self.brdfCube_Masked.mask[a, s, r, :] = False 680 | for t in tid_list: 681 | for p in pid_list: 682 | dataList.append('{}_{}_{}_{}_{}'.format(a, s, r, t, p)) 683 | 684 | if(inverse): 685 | self.dataList = list(set(self.fulldataList) - set(dataList)) 686 | self.dataSize = len(self.dataList) 687 | else: 688 | self.dataList = dataList 689 | self.dataSize = len(self.dataList) 690 | 691 | 692 | def buildSubDataset(self, aid_list, sid_list, rid_list, tid_list, pid_list, inverse = 0): 693 | dataList = [] 694 | a_min, a_max = min(aid_list), max(aid_list) 695 | s_min, s_max = min(sid_list), max(sid_list) 696 | r_min, r_max = min(rid_list), max(rid_list) 697 | if(a_max >= self.brdfCube.shape[0]): 698 | print('a error') 699 | aid_list = range(a_min, self.brdfCube.shape[0]) 700 | if(s_max >= self.brdfCube.shape[1]): 701 | print('s error') 702 | sid_list = range(s_min, self.brdfCube.shape[1]) 703 | if(r_max >= self.brdfCube.shape[2]): 704 | rint('r error') 705 | rid_list = range(r_min, self.brdfCube.shape[2]) 706 | 707 | self.lightCount = len(tid_list) * len(pid_list) 708 | self.brdfCube_Masked.mask = True 709 | for a in aid_list: 710 | for s in sid_list: 711 | for r in rid_list: 712 | self.brdfCube_Masked.mask[a, s, r, :] = False 713 | for t in tid_list: 714 | for p in pid_list: 715 | dataList.append('{}_{}_{}_{}_{}'.format(a, s, r, t, p)) 716 | 717 | if(inverse): 718 | self.dataList = list(set(self.fulldataList) - set(dataList)) 719 | self.dataSize = len(self.dataList) 720 | else: 721 | self.dataList = dataList 722 | self.dataSize = len(self.dataList) 723 | 724 | 725 | def normalizeDataSet(self): 726 | self.amean, self.astd = np.ma.mean(self.brdfCube_Masked[:,:,:,0]), np.ma.std(self.brdfCube_Masked[:,:,:,0]) 727 | self.smean, self.sstd = np.ma.mean(self.brdfCube_Masked[:,:,:,0]), np.ma.std(self.brdfCube_Masked[:,:,:,0]) 728 | self.rmean, self.rstd = np.ma.mean(self.brdfCube_Masked[:,:,:,0]), np.ma.std(self.brdfCube_Masked[:,:,:,0]) 729 | 730 | self.astd = 1 if self.astd == 0 else self.astd 731 | self.sstd = 1 if self.sstd == 0 else self.sstd 732 | self.rstd = 1 if self.rstd == 0 else self.rstd 733 | 734 | self.brdfCube_Masked[:,:,:,0] = (self.brdfCube_Masked[:,:,:,0] - self.amean) / self.astd 735 | self.brdfCube_Masked[:,:,:,1] = (self.brdfCube_Masked[:,:,:,1] - self.smean) / self.sstd 736 | self.brdfCube_Masked[:,:,:,2] = (self.brdfCube_Masked[:,:,:,2] - self.rmean) / self.rstd 737 | 738 | self.brdfCube[:,:,:,0] = (self.brdfCube[:,:,:,0] - self.amean) / self.astd 739 | self.brdfCube[:,:,:,1] = (self.brdfCube[:,:,:,1] - self.smean) / self.sstd 740 | self.brdfCube[:,:,:,2] = (self.brdfCube[:,:,:,2] - self.rmean) / self.rstd 741 | 742 | def shuffle(self, seed = []): 743 | if(seed == []): 744 | np.random.shuffle(self.dataList) 745 | else: 746 | np.random.seed(seed) 747 | np.random.shuffle(self.dataList) 748 | 749 | 750 | def GetItem(self, idx, color = False): 751 | aid, sid, rid, tid, pid = map(int, self.dataList[idx].split('_')) 752 | return self.GetItemByID(aid, sid, rid, tid, pid, color)#img, brdf 753 | 754 | def GetItemByID(self, aid, sid, rid, tid, pid, color = False): 755 | if(color): 756 | img = load_pfm(self.rootPath + r'/{}_{}_{}/{}_{}.pfm'.format(aid, sid, rid, tid, pid)).transpose((2,0,1)) 757 | else: 758 | img = load_pfm(self.rootPath + r'/{}_{}_{}/{}_{}.pfm'.format(aid, sid, rid, tid, pid)) 759 | if(len(img.shape) == 3): 760 | img = img[:,:,0] 761 | img = img[np.newaxis,:,:] 762 | brdf = self.brdfCube[aid, sid, rid].reshape((1,3,1,1))#np.array(map(float, self.gtlist[idx].strip().split(','))).reshape((1, 3, 1, 1)) 763 | return img, brdf 764 | 765 | def GetItemWithName(self, idx, color = False): 766 | img, brdf = self.GetItem(idx, color) 767 | name = map(int, self.dataList[idx].split('_')) 768 | 769 | return img, brdf, name 770 | 771 | def GetBatchWithName(self, start, n, color = False): 772 | if(color): 773 | dataBatch = np.zeros((n, 3, self.height, self.width)) 774 | else: 775 | dataBatch = np.zeros((n, 1, self.height, self.width)) 776 | brdfBatch = np.zeros((n, 3, 1, 1)) 777 | nameList = [] 778 | tmpSize = self.dataSize 779 | for i in range(0, n): 780 | idx = (start + i) % tmpSize 781 | dataBatch[i, :, :, :], brdfBatch[i, :, :, :], name = self.GetItemWithName(idx, color) 782 | nameList.append(name) 783 | 784 | return dataBatch, brdfBatch, nameList 785 | 786 | def GetBatch(self, start, n, color = False): 787 | if(color): 788 | dataBatch = np.zeros((n, 3, self.height, self.width)) 789 | else: 790 | dataBatch = np.zeros((n, 1, self.height, self.width)) 791 | brdfBatch = np.zeros((n, 3, 1, 1)) 792 | tmpSize = self.dataSize 793 | for i in range(0, n): 794 | idx = (start + i) % tmpSize 795 | dataBatch[i, :, :, :], brdfBatch[i, :, :, :] = self.GetItem(idx, color) 796 | 797 | return dataBatch, brdfBatch 798 | 799 | def GetNextBatch(self, n, color = False): 800 | dataBatch, brdfBatch = self.GetBatch(self.cursorPos, n, color) 801 | self.cursorPos = (self.cursorPos + n) % self.dataSize 802 | 803 | return dataBatch, brdfBatch --------------------------------------------------------------------------------