├── README.md ├── landweber.py ├── landweber_torch_opt.py ├── richardsonlucy.py └── richardsonlucy_tv.py /README.md: -------------------------------------------------------------------------------- 1 | # Image deconvolution algorithms 2 | Implementation of image deconvolution algorithms using `torch`. 3 | 4 | Values of an input image need to be in range [0,1] and the summation of PSF elements has to equal to 1. 5 | 6 | ## Overview 7 | 8 | ☑️ Richardson-Lucy iteration [1,2]
9 | ☑️ Richardson-Lucy iteration with Total Variation (TV) [7]
10 | ☑️ Landweber iteration [3]
11 | ❌ Wiener filter [4]
12 | ❌ Split Bergman method [5]
13 | ❌ ADMM [6]
14 | 15 | ## Future updates 16 | 17 | ⚠️ Remove `device` variable
18 | ⚠️ Remove additional padding 19 | 20 | ## References 21 | 22 | [1] William Hadley Richardson, "Bayesian-Based Iterative Method of Image Restoration*," J. Opt. Soc. Am. 62, 55-59 (1972). 23 | 24 | [2] Lucy, L. B., “An iterative technique for the rectification of observed distributions”, *The Astronomical Journal*, vol. 79, p. 745 (1974). doi:10.1086/111605. 25 | 26 | [3] Landweber, L., "An iteration formula for Fredholm integral equations of the first kind", Amer. J. Math. 73, 615–624 (1951). 27 | 28 | [4] Wiener, Norbert, et al., "Extrapolation, interpolation, and smoothing of stationary time series: with engineering applications", Vol. 113. No. 21. Cambridge, MA: MIT press, (1949). 29 | 30 | [5] Goldstein, Tom, and Stanley Osher. "The split Bregman method for L1-regularized problems." SIAM journal on imaging sciences 2.2 (2009): 323-343. 31 | 32 | [6] Afonso, Manya V., José M. Bioucas-Dias, and Mário AT Figueiredo. "An augmented Lagrangian approach to the constrained optimization formulation of imaging inverse problems." IEEE transactions on image processing 20.3 (2010): 681-695. 33 | 34 | [7] Dey, Nicolas, et al. "Richardson–Lucy algorithm with total variation regularization for 3D confocal microscope deconvolution." Microscopy research and technique 69.4 (2006): 260-266. 35 | -------------------------------------------------------------------------------- /landweber.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 4 | 5 | def torch_landweber(image, psf, num_iter=50, lam=0.7): 6 | """ 7 | image: 4-dimensional input, NCHW format 8 | psf: 4-dimensional input, NCHW format 9 | """ 10 | 11 | pad = psf.shape[-1]//2 + 1 12 | image = torch.nn.functional.pad(image, (pad,pad,pad,pad), mode='reflect') 13 | 14 | im_deconv = torch.full(image.shape, 0.5).to(device) 15 | 16 | for _ in range(num_iter): 17 | conv = torch.conv2d(im_deconv, psf, stride=1, padding=psf.shape[-1]//2) 18 | res = image - conv 19 | conv2 = torch.conv2d(res, torch.flip(psf, [0, 1, 2, 3]), stride=1, padding=psf.shape[-1]//2) 20 | im_deconv = im_deconv - lam * conv2 21 | im_deconv = torch.clip(im_deconv, -1, 1) 22 | 23 | return im_deconv[:,:,pad:-pad,pad:-pad] 24 | 25 | -------------------------------------------------------------------------------- /landweber_torch_opt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def landweber_torch_opt(y, k, num_iter=50, lam=0.7): 4 | xi = torch.nn.Parameter(torch.ones(y.shape)*0.5).float() 5 | 6 | optimizer = torch.optim.SGD([xi], lr=lam) 7 | 8 | for it in range(num_iter): 9 | optimizer.zero_grad() 10 | 11 | y_out = torch.conv2d(xi, k, stride=1, padding='same') 12 | loss = torch.sum(torch.pow((y_out - y),2))/2 13 | 14 | loss.backward() 15 | optimizer.step() 16 | 17 | return xi.detach() 18 | -------------------------------------------------------------------------------- /richardsonlucy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 4 | 5 | def torch_richardson_lucy(image, psf, num_iter=50): 6 | """ 7 | image: 4-dimensional input, NCHW format 8 | psf: 4-dimensional input, NCHW format 9 | """ 10 | 11 | pad = psf.shape[-1]//2 + 1 12 | image = torch.nn.functional.pad(image, (pad,pad,pad,pad), mode='reflect') 13 | 14 | im_deconv = torch.full(image.shape, 0.5).to(device) 15 | psf_mirror = torch.flip(psf, (-2,-1)) 16 | 17 | eps = 1e-12 18 | 19 | for _ in range(num_iter): 20 | conv = torch.conv2d(im_deconv, psf, stride=1, padding=psf.shape[-1]//2) + eps 21 | relative_blur = image / conv 22 | im_deconv *= torch.conv2d(relative_blur, psf_mirror, stride=1, padding=psf.shape[-1]//2) + eps 23 | im_deconv = torch.clip(im_deconv, -1, 1) 24 | 25 | return im_deconv[:,:,pad:-pad,pad:-pad] 26 | -------------------------------------------------------------------------------- /richardsonlucy_tv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # Richardson-Lucy with total variation (TV) regularisation 4 | def torch_richardson_lucy_tv(image, psf, num_iter=10, lam=2e-2): 5 | """ 6 | image: 4-dimensional input, NCHW format 7 | psf: 4-dimensional input, NCHW format 8 | """ 9 | 10 | im_deconv = torch.full(image.shape, 0.5) 11 | psf_mirror = torch.flip(psf, (-2,-1)) 12 | 13 | eps = 1e-12 14 | reg = 1 15 | 16 | for _ in range(num_iter): 17 | # tv 18 | if lam > 0: 19 | grad_torch = torch.gradient(im_deconv[0,0], axis=(0, 1)) 20 | norm_torch = torch.sqrt(torch.square(grad_torch[0])+torch.square(grad_torch[1])) + eps 21 | grad_torch = torch.stack(grad_torch)/norm_torch 22 | div_torch = torch.gradient(grad_torch[0], axis=0)[0] + torch.gradient(grad_torch[1], axis=1)[0] 23 | reg = 1/(1-div_torch*lam) 24 | 25 | conv = torch.conv2d(im_deconv, psf, stride=1, padding='same') + eps 26 | relative_blur = image / conv 27 | im_deconv *= (torch.conv2d(relative_blur, psf_mirror, stride=1, padding='same') + eps) * reg 28 | im_deconv = torch.clip(im_deconv, -1, 1) 29 | 30 | return im_deconv 31 | --------------------------------------------------------------------------------