├── dog.png ├── FINAL.png ├── NOISY.png ├── basicdog.png ├── finaldog.png ├── noisydog.png ├── README.md └── BM3D.py /dog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/dog.png -------------------------------------------------------------------------------- /FINAL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/FINAL.png -------------------------------------------------------------------------------- /NOISY.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/NOISY.png -------------------------------------------------------------------------------- /basicdog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/basicdog.png -------------------------------------------------------------------------------- /finaldog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/finaldog.png -------------------------------------------------------------------------------- /noisydog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChihaoZhang/BM3D/HEAD/noisydog.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BM3D 2 | BM3D denoising method implementation on Python 3 | 4 | *BM3D.py* is an implementation based on my understanding of the method proposed by K. Dabov *et al.* in 2007. For more information, please have a visit at 5 | [Image denoising by sparse 3D transform-domain collaborative filtering](http://www.cs.tut.fi/~foi/GCF-BM3D/) 6 | and 7 | [An Analysis and Implementation of the BM3D Image Denoising Method](https://www.ipol.im/pub/art/2012/l-bm3d/). 8 | 9 | ## result 10 | The output images of my code and official Matlab software are shown in PNG files for comparison and the PSNR is computed as the comparison criterion. The performance of my result is not as good as the official one since I have no idea whether I process some steps rightly in the method, like Wiener filtering. 11 | 12 | ## running time 13 | The running time of the whole process is about 25 minutes which is much longer than that of the official code. There is still some work needed to reduce the computing complexity. 14 | 15 | **Any suggestions on improving speed and final performance are welcome.** For that, contact with me 16 | 17 | email: zhangchihao@zju.edu.cn 18 | 19 | -------------------------------------------------------------------------------- /BM3D.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | 4 | Created on Fri Mar 22 09:28:17 2019 5 | 6 | @author: Amos 7 | 8 | Reference: 9 | [1] Image denoising by sparse 3D transform-domain collaborative filtering 10 | [2] An Analysis and Implementation of the BM3D Image Denoising Method 11 | 12 | """ 13 | 14 | 15 | import os 16 | import cv2 17 | import time 18 | import sys 19 | from scipy.fftpack import dct, idct 20 | import numpy as np 21 | 22 | 23 | # ================================================================================================== 24 | # Preprocessing 25 | # ================================================================================================== 26 | 27 | def AddNoise(Img, sigma): 28 | 29 | """ 30 | Add Gaussian nosie to an image 31 | 32 | Return: 33 | nosiy image 34 | """ 35 | 36 | GuassNoise = np.random.normal(0, sigma, Img.shape) 37 | 38 | noisyImg = Img + GuassNoise # float type noisy image 39 | 40 | # cv2.normalize(noisyImg, noisyImg, 0, 255, cv2.NORM_MINMAX, dtype=-1) 41 | # 42 | # noisyImg = noisyImg.astype(np.uint8) 43 | # 44 | # cv2.imwrite('noisydog.png', noisyImg) 45 | # 46 | # if cv2.imwrite('noisydog.png', noisyImg) == True: 47 | # 48 | # print('Noise has been added to the original image.\n') 49 | # 50 | # return noisyImg 51 | # 52 | # else: 53 | # 54 | # print('Error: adding noise failed.\n') 55 | # 56 | # exit() 57 | 58 | return noisyImg 59 | 60 | 61 | def Initialization(Img, BlockSize, Kaiser_Window_beta): 62 | 63 | """ 64 | Initialize the image, weight and Kaiser window 65 | 66 | Return: 67 | InitImg & InitWeight: zero-value Img.shape matrices 68 | InitKaiser: (BlockSize * BlockSize) Kaiser window 69 | """ 70 | 71 | InitImg = np.zeros(Img.shape, dtype=float) 72 | 73 | InitWeight = np.zeros(Img.shape, dtype=float) 74 | 75 | Window = np.matrix(np.kaiser(BlockSize, Kaiser_Window_beta)) 76 | 77 | InitKaiser = np.array(Window.T * Window) 78 | 79 | return InitImg, InitWeight, InitKaiser 80 | 81 | 82 | def SearchWindow(Img, RefPoint, BlockSize, WindowSize): 83 | 84 | """ 85 | Find the search window whose center is reference block in *Img* 86 | 87 | Note that the center of SearchWindow is not always the reference block because of the border 88 | 89 | Return: 90 | (2 * 2) array of left-top and right-bottom coordinates in search window 91 | """ 92 | 93 | if BlockSize >= WindowSize: 94 | 95 | print('Error: BlockSize is smaller than WindowSize.\n') 96 | 97 | exit() 98 | 99 | Margin = np.zeros((2,2), dtype = int) 100 | 101 | Margin[0, 0] = max(0, RefPoint[0]+int((BlockSize-WindowSize)/2)) # left-top x 102 | 103 | Margin[0, 1] = max(0, RefPoint[1]+int((BlockSize-WindowSize)/2)) # left-top y 104 | 105 | Margin[1, 0] = Margin[0, 0] + WindowSize # right-bottom x 106 | 107 | Margin[1, 1] = Margin[0, 1] + WindowSize # right-bottom y 108 | 109 | if Margin[1, 0] >= Img.shape[0]: 110 | 111 | Margin[1, 0] = Img.shape[0] - 1 112 | 113 | Margin[0, 0] = Margin[1, 0] - WindowSize 114 | 115 | if Margin[1, 1] >= Img.shape[1]: 116 | 117 | Margin[1, 1] = Img.shape[1] - 1 118 | 119 | Margin[0, 1] = Margin[1, 1] - WindowSize 120 | 121 | return Margin 122 | 123 | 124 | def dct2D(A): 125 | 126 | """ 127 | 2D discrete cosine transform (DCT) 128 | """ 129 | 130 | return dct(dct(A, axis = 0, norm = 'ortho'), axis = 1, norm = 'ortho') 131 | 132 | 133 | def idct2D(A): 134 | 135 | """ 136 | inverse 2D discrete cosine transform 137 | """ 138 | 139 | return idct(idct(A, axis = 0, norm = 'ortho'), axis = 1, norm = 'ortho') 140 | 141 | 142 | def PreDCT(Img, BlockSize): 143 | 144 | """ 145 | Do discrete cosine transform (2D transform) for each block in *Img* to reduce the complexity of 146 | applying transforms 147 | 148 | Return: 149 | BlockDCT_all: 4-dimensional array whose first two dimensions correspond to the block's 150 | position and last two correspond to the DCT array of the block 151 | """ 152 | 153 | BlockDCT_all = np.zeros((Img.shape[0]-BlockSize, Img.shape[1]-BlockSize, BlockSize, BlockSize),\ 154 | dtype = float) 155 | 156 | for i in range(BlockDCT_all.shape[0]): 157 | 158 | for j in range(BlockDCT_all.shape[1]): 159 | 160 | Block = Img[i:i+BlockSize, j:j+BlockSize] 161 | 162 | BlockDCT_all[i, j, :, :] = dct2D(Block.astype(np.float64)) 163 | 164 | return BlockDCT_all 165 | 166 | 167 | def ComputePSNR(Img1, Img2): 168 | 169 | """ 170 | Compute the Peak Signal to Noise Ratio (PSNR) in decibles(dB). 171 | """ 172 | 173 | if Img1.size != Img2.size: 174 | 175 | print('ERROR: two images should be in same size in computing PSNR.\n') 176 | 177 | sys.exit() 178 | 179 | Img1 = Img1.astype(np.float64) 180 | 181 | Img2 = Img2.astype(np.float64) 182 | 183 | RMSE = np.sqrt(np.sum((Img1-Img2)**2)/Img1.size) 184 | 185 | return 20*np.log10(255./RMSE) 186 | 187 | 188 | 189 | 190 | # ================================================================================================== 191 | # Basic estimate 192 | # ================================================================================================== 193 | 194 | def Step1_Grouping(noisyImg, RefPoint, BlockDCT_all, BlockSize, ThreDist, MaxMatch, WindowSize): 195 | 196 | """ 197 | Find blocks similar to the reference one in *noisyImg* based on *BlockDCT_all* 198 | 199 | Note that the distance computing is chosen from original paper rather than the analysis one 200 | 201 | Return: 202 | BlockPos: array of blocks' position (left-top point) 203 | BlockGroup: 3-dimensional array whose last two dimensions correspond to the DCT array of 204 | the block 205 | """ 206 | 207 | 208 | # initialization 209 | 210 | WindowLoc = SearchWindow(noisyImg, RefPoint, BlockSize, WindowSize) 211 | 212 | Block_Num_Searched = (WindowSize-BlockSize+1)**2 # number of searched blocks 213 | 214 | BlockPos = np.zeros((Block_Num_Searched, 2), dtype = int) 215 | 216 | BlockGroup = np.zeros((Block_Num_Searched, BlockSize, BlockSize), dtype = float) 217 | 218 | Dist = np.zeros(Block_Num_Searched, dtype = float) 219 | 220 | RefDCT = BlockDCT_all[RefPoint[0],RefPoint[1], :, :] 221 | 222 | match_cnt = 0 223 | 224 | 225 | # Block searching and similarity (distance) computing 226 | 227 | for i in range(WindowSize-BlockSize+1): 228 | 229 | for j in range(WindowSize-BlockSize+1): 230 | 231 | SearchedDCT = BlockDCT_all[WindowLoc[0, 0]+i, WindowLoc[0, 1]+j, :, :] 232 | 233 | dist = Step1_ComputeDist(RefDCT, SearchedDCT) 234 | 235 | if dist < ThreDist: 236 | 237 | BlockPos[match_cnt, :] = [WindowLoc[0, 0]+i, WindowLoc[0, 1]+j] 238 | 239 | BlockGroup[match_cnt, :, :] = SearchedDCT 240 | 241 | Dist[match_cnt] = dist 242 | 243 | match_cnt += 1 244 | 245 | # if match_cnt == 1: 246 | # 247 | # print('WARNING: no similar blocks founded for the reference block {} in basic estimate.\n'\ 248 | # .format(RefPoint)) 249 | 250 | if match_cnt <= MaxMatch: 251 | 252 | # less than MaxMatch similar blocks founded, return similar blocks 253 | 254 | BlockPos = BlockPos[:match_cnt, :] 255 | 256 | BlockGroup = BlockGroup[:match_cnt, :, :] 257 | 258 | else: 259 | 260 | # more than MaxMatch similar blocks founded, return MaxMatch similarest blocks 261 | 262 | idx = np.argpartition(Dist[:match_cnt], MaxMatch) # indices of MaxMatch smallest distances 263 | 264 | BlockPos = BlockPos[idx[:MaxMatch], :] 265 | 266 | BlockGroup = BlockGroup[idx[:MaxMatch], :] 267 | 268 | return BlockPos, BlockGroup 269 | 270 | 271 | def Step1_ComputeDist(BlockDCT1, BlockDCT2): 272 | 273 | """ 274 | Compute the distance of two DCT arrays *BlockDCT1* and *BlockDCT2* 275 | """ 276 | 277 | if BlockDCT1.shape != BlockDCT1.shape: 278 | 279 | print('ERROR: two DCT Blocks are not at the same shape in step1 computing distance.\n') 280 | 281 | sys.exit() 282 | 283 | elif BlockDCT1.shape[0] != BlockDCT1.shape[1]: 284 | 285 | print('ERROR: DCT Block is not square in step1 computing distance.\n') 286 | 287 | sys.exit() 288 | 289 | BlockSize = BlockDCT1.shape[0] 290 | 291 | if sigma > 40: 292 | 293 | ThreValue = lamb2d * sigma 294 | 295 | BlockDCT1 = np.where(abs(BlockDCT1) < ThreValue, 0, BlockDCT1) 296 | 297 | BlockDCT2 = np.where(abs(BlockDCT2) < ThreValue, 0, BlockDCT2) 298 | 299 | return np.linalg.norm(BlockDCT1 - BlockDCT2)**2 / (BlockSize**2) 300 | 301 | 302 | def Step1_3DFiltering(BlockGroup): 303 | 304 | """ 305 | Do collaborative hard-thresholding which includes 3D transform, noise attenuation through 306 | hard-thresholding and inverse 3D transform 307 | 308 | Return: 309 | BlockGroup 310 | """ 311 | 312 | ThreValue = lamb3d * sigma 313 | 314 | nonzero_cnt = 0 315 | 316 | # since 2D transform has been done, we do 1D transform, hard-thresholding and inverse 1D 317 | # transform, the inverse 2D transform is left in aggregation processing 318 | 319 | for i in range(BlockGroup.shape[1]): 320 | 321 | for j in range(BlockGroup.shape[2]): 322 | 323 | ThirdVector = dct(BlockGroup[:, i, j], norm = 'ortho') # 1D DCT 324 | 325 | ThirdVector[abs(ThirdVector[:]) < ThreValue] = 0. 326 | 327 | nonzero_cnt += np.nonzero(ThirdVector)[0].size 328 | 329 | BlockGroup[:, i, j] = list(idct(ThirdVector, norm = 'ortho')) 330 | 331 | return BlockGroup, nonzero_cnt 332 | 333 | 334 | def Step1_Aggregation(BlockGroup, BlockPos, basicImg, basicWeight, basicKaiser, nonzero_cnt): 335 | 336 | """ 337 | Compute the basic estimate of the true-image by weighted averaging all of the obtained 338 | block-wise estimates that are overlapping 339 | 340 | Note that the weight is set accroding to the original paper rather than the BM3D analysis one 341 | """ 342 | 343 | if nonzero_cnt < 1: 344 | 345 | BlockWeight = 1.0 * basicKaiser 346 | 347 | else: 348 | 349 | BlockWeight = (1./(sigma**2 * nonzero_cnt)) * basicKaiser 350 | 351 | for i in range(BlockPos.shape[0]): 352 | 353 | basicImg[BlockPos[i, 0]:BlockPos[i, 0]+BlockGroup.shape[1],\ 354 | BlockPos[i, 1]:BlockPos[i, 1]+BlockGroup.shape[2]]\ 355 | += BlockWeight * idct2D(BlockGroup[i, :, :]) 356 | 357 | basicWeight[BlockPos[i, 0]:BlockPos[i, 0]+BlockGroup.shape[1],\ 358 | BlockPos[i, 1]:BlockPos[i, 1]+BlockGroup.shape[2]] += BlockWeight 359 | 360 | 361 | def BM3D_Step1(noisyImg): 362 | 363 | """ 364 | Give the basic estimate after grouping, collaborative filtering and aggregation 365 | 366 | Return: 367 | basic estimate basicImg 368 | """ 369 | 370 | # preprocessing 371 | 372 | BlockSize = Step1_BlockSize 373 | 374 | ThreDist = Step1_ThreDist 375 | 376 | MaxMatch = Step1_MaxMatch 377 | 378 | WindowSize = Step1_WindowSize 379 | 380 | spdup_factor = Step1_spdup_factor 381 | 382 | basicImg, basicWeight, basicKaiser = Initialization(noisyImg, BlockSize, Kaiser_Window_beta) 383 | 384 | BlockDCT_all = PreDCT(noisyImg, BlockSize) 385 | 386 | 387 | # block-wise estimate with speed-up factor 388 | 389 | for i in range(int((noisyImg.shape[0]-BlockSize)/spdup_factor)+2): 390 | 391 | for j in range(int((noisyImg.shape[1]-BlockSize)/spdup_factor)+2): 392 | 393 | RefPoint = [min(spdup_factor*i, noisyImg.shape[0]-BlockSize-1), \ 394 | min(spdup_factor*j, noisyImg.shape[1]-BlockSize-1)] 395 | 396 | BlockPos, BlockGroup = Step1_Grouping(noisyImg, RefPoint, BlockDCT_all, BlockSize, \ 397 | ThreDist, MaxMatch, WindowSize) 398 | 399 | BlockGroup, nonzero_cnt = Step1_3DFiltering(BlockGroup) 400 | 401 | Step1_Aggregation(BlockGroup, BlockPos, basicImg, basicWeight, basicKaiser, nonzero_cnt) 402 | 403 | basicWeight = np.where(basicWeight == 0, 1, basicWeight) 404 | 405 | basicImg[:, :] /= basicWeight[:, :] 406 | 407 | # basicImg = (np.matrix(basicImg, dtype=int)).astype(np.uint8) 408 | 409 | return basicImg 410 | 411 | 412 | 413 | 414 | # ================================================================================================== 415 | # Final estimate 416 | # ================================================================================================== 417 | 418 | def Step2_Grouping(basicImg, noisyImg, RefPoint, BlockSize, ThreDist, MaxMatch, WindowSize, 419 | BlockDCT_basic, BlockDCT_noisy): 420 | 421 | """ 422 | Similar to Step1_Grouping, find the similar blocks to the reference one from *basicImg* 423 | 424 | Return: 425 | BlockPos: array of similar blocks' position (left-top point) 426 | BlockGroup_basic: 3-dimensional array standing for the stacked blocks similar to the 427 | reference one from *basicImg* after 2D DCT 428 | BlockGroup_noisy: the stacked blocks from *noisyImg* corresponding to BlockGroup_basic 429 | """ 430 | 431 | 432 | # initialization (same as Step1) 433 | 434 | WindowLoc = SearchWindow(basicImg, RefPoint, BlockSize, WindowSize) 435 | 436 | Block_Num_Searched = (WindowSize-BlockSize+1)**2 437 | 438 | BlockPos = np.zeros((Block_Num_Searched, 2), dtype = int) 439 | 440 | BlockGroup_basic = np.zeros((Block_Num_Searched, BlockSize, BlockSize), dtype = float) 441 | 442 | BlockGroup_noisy = np.zeros((Block_Num_Searched, BlockSize, BlockSize), dtype = float) 443 | 444 | Dist = np.zeros(Block_Num_Searched, dtype = float) 445 | 446 | match_cnt = 0 447 | 448 | 449 | # Block searching and similarity (distance) computing 450 | # Note the distance computing method is different from that of Step1 451 | 452 | for i in range(WindowSize-BlockSize+1): 453 | 454 | for j in range(WindowSize-BlockSize+1): 455 | 456 | SearchedPoint = [WindowLoc[0, 0]+i, WindowLoc[0, 1]+j] 457 | 458 | dist = Step2_ComputeDist(basicImg, RefPoint, SearchedPoint, BlockSize) 459 | 460 | if dist < ThreDist: 461 | 462 | BlockPos[match_cnt, :] = SearchedPoint 463 | 464 | Dist[match_cnt] = dist 465 | 466 | match_cnt += 1 467 | 468 | # if match_cnt == 1: 469 | # 470 | # print('WARNING: no similar blocks founded for the reference block {} in final estimate.\n'\ 471 | # .format(RefPoint)) 472 | 473 | if match_cnt <= MaxMatch: 474 | 475 | # less than MaxMatch similar blocks founded, return similar blocks 476 | 477 | BlockPos = BlockPos[:match_cnt, :] 478 | 479 | else: 480 | 481 | # more than MaxMatch similar blocks founded, return MaxMatch similarest blocks 482 | 483 | idx = np.argpartition(Dist[:match_cnt], MaxMatch) # indices of MaxMatch smallest distances 484 | 485 | BlockPos = BlockPos[idx[:MaxMatch], :] 486 | 487 | for i in range(BlockPos.shape[0]): 488 | 489 | SimilarPoint = BlockPos[i, :] 490 | 491 | BlockGroup_basic[i, :, :] = BlockDCT_basic[SimilarPoint[0], SimilarPoint[1], :, :] 492 | 493 | BlockGroup_noisy[i, :, :] = BlockDCT_noisy[SimilarPoint[0], SimilarPoint[1], :, :] 494 | 495 | BlockGroup_basic = BlockGroup_basic[:BlockPos.shape[0], :, :] 496 | 497 | BlockGroup_noisy = BlockGroup_noisy[:BlockPos.shape[0], :, :] 498 | 499 | return BlockPos, BlockGroup_basic, BlockGroup_noisy 500 | 501 | 502 | def Step2_ComputeDist(img, Point1, Point2, BlockSize): 503 | 504 | """ 505 | Compute distance between blocks whose left-top margins' coordinates are *Point1* and *Point2* 506 | """ 507 | 508 | Block1 = (img[Point1[0]:Point1[0]+BlockSize, Point1[1]:Point1[1]+BlockSize]).astype(np.float64) 509 | 510 | Block2 = (img[Point2[0]:Point2[0]+BlockSize, Point2[1]:Point2[1]+BlockSize]).astype(np.float64) 511 | 512 | return np.linalg.norm(Block1-Block2)**2 / (BlockSize**2) 513 | 514 | 515 | def Step2_3DFiltering(BlockGroup_basic, BlockGroup_noisy): 516 | 517 | """ 518 | Do collaborative Wiener filtering and here we choose 2D DCT + 1D DCT as the 3D transform which 519 | is the same with the 3D transform in hard-thresholding filtering 520 | 521 | Note that the Wiener weight is set accroding to the BM3D analysis paper rather than the original 522 | one 523 | 524 | Return: 525 | BlockGroup_noisy & WienerWeight 526 | """ 527 | 528 | 529 | Weight = 0 530 | 531 | coef = 1.0 / BlockGroup_noisy.shape[0] 532 | 533 | for i in range(BlockGroup_noisy.shape[1]): 534 | 535 | for j in range(BlockGroup_noisy.shape[2]): 536 | 537 | Vec_basic = dct(BlockGroup_basic[:, i, j], norm = 'ortho') 538 | 539 | Vec_noisy = dct(BlockGroup_noisy[:, i, j], norm = 'ortho') 540 | 541 | Vec_value = Vec_basic**2 * coef 542 | 543 | Vec_value /= (Vec_value + sigma**2) # pixel weight 544 | 545 | Vec_noisy *= Vec_value 546 | 547 | Weight += np.sum(Vec_value) 548 | # for k in range(BlockGroup_noisy.shape[0]): 549 | # 550 | # Value = Vec_basic[k]**2 * coef 551 | # 552 | # Value /= (Value + sigma**2) # pixel weight 553 | # 554 | # Vec_noisy[k] = Vec_noisy[k] * Value 555 | # 556 | # Weight += Value 557 | 558 | BlockGroup_noisy[:, i, j] = list(idct(Vec_noisy, norm = 'ortho')) 559 | 560 | if Weight > 0: 561 | 562 | WienerWeight = 1./(sigma**2 * Weight) 563 | 564 | else: 565 | 566 | WienerWeight = 1.0 567 | 568 | return BlockGroup_noisy, WienerWeight 569 | 570 | 571 | def Step2_Aggregation(BlockGroup_noisy, WienerWeight, BlockPos, finalImg, finalWeight, finalKaiser): 572 | 573 | """ 574 | Compute the final estimate of the true-image by aggregating all of the obtained local estimates 575 | using a weighted average 576 | """ 577 | 578 | BlockWeight = WienerWeight * finalKaiser 579 | 580 | for i in range(BlockPos.shape[0]): 581 | 582 | finalImg[BlockPos[i, 0]:BlockPos[i, 0]+BlockGroup_noisy.shape[1],\ 583 | BlockPos[i, 1]:BlockPos[i, 1]+BlockGroup_noisy.shape[2]]\ 584 | += BlockWeight * idct2D(BlockGroup_noisy[i, :, :]) 585 | 586 | finalWeight[BlockPos[i, 0]:BlockPos[i, 0]+BlockGroup_noisy.shape[1],\ 587 | BlockPos[i, 1]:BlockPos[i, 1]+BlockGroup_noisy.shape[2]] += BlockWeight 588 | 589 | 590 | def BM3D_Step2(basicImg, noisyImg): 591 | 592 | """ 593 | Give the final estimate after grouping, Wiener filtering and aggregation 594 | 595 | Return: 596 | final estimate finalImg 597 | """ 598 | 599 | 600 | # parameters setting 601 | 602 | BlockSize = Step2_BlockSize 603 | 604 | ThreDist = Step2_ThreDist 605 | 606 | MaxMatch = Step2_MaxMatch 607 | 608 | WindowSize = Step2_WindowSize 609 | 610 | spdup_factor = Step2_spdup_factor 611 | 612 | finalImg, finalWeight, finalKaiser = Initialization(basicImg, BlockSize, Kaiser_Window_beta) 613 | 614 | BlockDCT_noisy = PreDCT(noisyImg, BlockSize) 615 | 616 | BlockDCT_basic = PreDCT(basicImg, BlockSize) 617 | 618 | 619 | # block-wise estimate with speed-up factor 620 | 621 | for i in range(int((basicImg.shape[0]-BlockSize)/spdup_factor)+2): 622 | 623 | for j in range(int((basicImg.shape[1]-BlockSize)/spdup_factor)+2): 624 | 625 | RefPoint = [min(spdup_factor*i, basicImg.shape[0]-BlockSize-1), \ 626 | min(spdup_factor*j, basicImg.shape[1]-BlockSize-1)] 627 | 628 | BlockPos, BlockGroup_basic, BlockGroup_noisy = Step2_Grouping(basicImg, noisyImg, \ 629 | RefPoint, BlockSize, \ 630 | ThreDist, MaxMatch, \ 631 | WindowSize, \ 632 | BlockDCT_basic, \ 633 | BlockDCT_noisy) 634 | 635 | BlockGroup_noisy, WienerWeight = Step2_3DFiltering(BlockGroup_basic, BlockGroup_noisy) 636 | 637 | Step2_Aggregation(BlockGroup_noisy, WienerWeight, BlockPos, finalImg, finalWeight, \ 638 | finalKaiser) 639 | 640 | finalWeight = np.where(finalWeight == 0, 1, finalWeight) 641 | 642 | finalImg[:, :] /= finalWeight[:, :] 643 | 644 | # finalImg = (np.matrix(finalImg, dtype=int)).astype(np.uint8) 645 | 646 | return finalImg 647 | 648 | 649 | 650 | 651 | # ================================================================================================== 652 | # main 653 | # ================================================================================================== 654 | 655 | if __name__ == '__main__': 656 | 657 | cv2.setUseOptimized(True) 658 | 659 | img = cv2.imread('dog.png') 660 | 661 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 662 | 663 | #================================== Parameters initialization ================================== 664 | 665 | sigma = 25 # variance of the noise 666 | 667 | lamb2d = 2.0 668 | 669 | lamb3d = 2.7 670 | 671 | Step1_ThreDist = 2500 # threshold distance 672 | 673 | Step1_MaxMatch = 16 # max matched blocks 674 | 675 | Step1_BlockSize = 8 676 | 677 | Step1_spdup_factor = 3 # pixel jump for new reference block 678 | 679 | Step1_WindowSize = 39 # search window size 680 | 681 | Step2_ThreDist = 400 682 | 683 | Step2_MaxMatch = 32 684 | 685 | Step2_BlockSize = 8 686 | 687 | Step2_spdup_factor = 3 688 | 689 | Step2_WindowSize = 39 690 | 691 | Kaiser_Window_beta = 2.0 692 | #=============================================================================================== 693 | 694 | #============================================ BM3D ============================================= 695 | 696 | noisy_img = AddNoise(img, sigma) 697 | 698 | start_time = time.time() 699 | 700 | 701 | basic_img = BM3D_Step1(noisy_img) 702 | 703 | basic_PSNR = ComputePSNR(img, basic_img) 704 | 705 | print('The PSNR of basic image is {} dB.\n'.format(basic_PSNR)) 706 | 707 | basic_img_uint = np.zeros(img.shape) 708 | 709 | cv2.normalize(basic_img, basic_img_uint, 0, 255, cv2.NORM_MINMAX, dtype=-1) 710 | 711 | basic_img_uint = basic_img_uint.astype(np.uint8) 712 | 713 | cv2.imwrite('basicdog.png', basic_img_uint) 714 | 715 | if cv2.imwrite('basicdog.png', basic_img_uint) == True: 716 | 717 | print('Basic estimate has been saved successfully.\n') 718 | 719 | step1_time = time.time() 720 | 721 | print('The running time of basic estimate is', step1_time - start_time, 'seconds.\n') 722 | 723 | else: 724 | 725 | print('ERROR: basic estimate is not reconstructed successfully.\n') 726 | 727 | sys.exit() 728 | 729 | 730 | final_img = BM3D_Step2(basic_img, noisy_img) 731 | 732 | final_PSNR = ComputePSNR(img, final_img) 733 | 734 | print('The PSNR of final image is {} dB.\n'.format(final_PSNR)) 735 | 736 | cv2.normalize(final_img, final_img, 0, 255, cv2.NORM_MINMAX, dtype=-1) 737 | 738 | final_img = final_img.astype(np.uint8) 739 | 740 | cv2.imwrite('finaldog.png', final_img) 741 | 742 | if cv2.imwrite('finaldog.png', final_img) == True: 743 | 744 | print('Final estimate has been saved successfully.\n') 745 | 746 | step2_time = time.time() 747 | 748 | print('The running time of final estimate is', step2_time - step1_time, 'seconds.\n') 749 | 750 | else: 751 | 752 | print('ERROR: final estimate is not reconstructed successfully.\n') 753 | 754 | sys.exit() 755 | # 756 | --------------------------------------------------------------------------------