├── bin ├── PETproj3d.p ├── vcomp100.dll ├── vcomp100d.dll ├── PETbackproj3d.p ├── projectionC3d.mexa64 ├── projectionC3d.mexw64 ├── backprojectionC3d.mexa64 ├── backprojectionC3d.mexw64 ├── quadratic3D.m ├── DnCNN_prior.m ├── LLF3d.m ├── FAST_NLM_3d.m └── DnCNN_prior_grad.m ├── Data └── Download_files.txt ├── DnCNN_6ds_iter_100000.caffemodel ├── DnCNN_6ds_iter_100000.solverstate ├── Demo_OPOSEM.m ├── ParamSetting.m ├── Demo_OSSART_QuadraticPenalty.m ├── Demo_OS_SQS_Nonlocalmeans.m ├── Important_Readme.txt ├── Demo_OS_SQS_DnCNN_LLF_Proposed.m ├── README.md └── DnCNN_deploy_test.prototxt /bin/PETproj3d.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/PETproj3d.p -------------------------------------------------------------------------------- /bin/vcomp100.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/vcomp100.dll -------------------------------------------------------------------------------- /bin/vcomp100d.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/vcomp100d.dll -------------------------------------------------------------------------------- /bin/PETbackproj3d.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/PETbackproj3d.p -------------------------------------------------------------------------------- /Data/Download_files.txt: -------------------------------------------------------------------------------- 1 | https://www.dropbox.com/sh/33kqnvbbclhvscr/AACAj0_qmCZby_yjKZjuCdLia?dl=0 -------------------------------------------------------------------------------- /bin/projectionC3d.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/projectionC3d.mexa64 -------------------------------------------------------------------------------- /bin/projectionC3d.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/projectionC3d.mexw64 -------------------------------------------------------------------------------- /bin/backprojectionC3d.mexa64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/backprojectionC3d.mexa64 -------------------------------------------------------------------------------- /bin/backprojectionC3d.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/bin/backprojectionC3d.mexw64 -------------------------------------------------------------------------------- /DnCNN_6ds_iter_100000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/DnCNN_6ds_iter_100000.caffemodel -------------------------------------------------------------------------------- /DnCNN_6ds_iter_100000.solverstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kssigari/LLF/HEAD/DnCNN_6ds_iter_100000.solverstate -------------------------------------------------------------------------------- /bin/quadratic3D.m: -------------------------------------------------------------------------------- 1 | function [ Nuimg, Deimg ] = quadratic3D( img, Param, k ) 2 | 3 | 4 | img = (single(img)); 5 | 6 | [sx, sy, sz] = size(img); 7 | 8 | tmp = (zeros(sx+2*k, sy+2*k, sz+2*k,'single')); 9 | tmp(k+1:sx+k,k+1:sy+k,k+1:sz+k) = img; 10 | 11 | Nuimg = (zeros(sx,sy,sz,'single')); 12 | Deimg = (zeros(sx,sy,sz,'single')); 13 | 14 | for iz = -k:k 15 | for iy = -k:k 16 | for ix = -k:k 17 | if (ix ~=0 || iy~=0 || iz~=0) 18 | Nuimg = Nuimg + (img - tmp(k+1+ix:k+sx+ix,k+1+iy:k+sy+iy,k+1+iz:k+sz+iz))/(sqrt(ix*Param.dx*ix*Param.dx + iy*Param.dy*iy*Param.dy + iz*Param.dz*iz*Param.dz)); 19 | Deimg = Deimg + 2/(sqrt(ix*Param.dx*ix*Param.dx + iy*Param.dy*iy*Param.dy + iz*Param.dz*iz*Param.dz)); 20 | end 21 | end 22 | end 23 | end 24 | 25 | Nuimg = (Nuimg)./((2*k+1)^3-1); 26 | Deimg = (Deimg)./((2*k+1)^3-1); 27 | 28 | 29 | 30 | end 31 | 32 | -------------------------------------------------------------------------------- /bin/DnCNN_prior.m: -------------------------------------------------------------------------------- 1 | function [ img_denoised ] = DnCNN_prior( img ) 2 | %DnCNN Summary of this function goes here 3 | % Detailed explanation goes here 4 | addpath(genpath('C:/caffe/caffe-master/matlab')); 5 | 6 | % Please change if you use CPU 7 | caffe.set_mode_gpu(); 8 | gpu_id = 0; 9 | caffe.set_device(gpu_id); 10 | 11 | net_weights = ['DnCNN_6ds_iter_100000.caffemodel']; 12 | net_model = ['DnCNN_deploy_test.prototxt']; 13 | net = caffe.Net(net_model, net_weights, 'test'); 14 | 15 | [nx, ny, nz] = size(img); 16 | 17 | nslice = 5; 18 | hslice = round((nslice-1)/2); 19 | 20 | img_proposed3d = zeros(nx, ny, nz, 'single'); 21 | weight_overlap = zeros(1, nz, 'single'); 22 | 23 | img_denoised = zeros(nx, ny, nz, 'single'); 24 | 25 | Scaling = 100./mean(img(img>0)); % we have trained with this scale 26 | 27 | img = img*Scaling; 28 | 29 | for iz = 1+hslice:1:nz-hslice 30 | 31 | img_noise = img(:,:,iz-hslice:iz+hslice); 32 | testpatch(:,:,1:5) = img_noise; 33 | tmp = net.forward({testpatch}); 34 | 35 | denoisedimg = tmp{1}(:,:,:); 36 | img_proposed3d(:,:,iz-hslice:iz+hslice) = img_proposed3d(:,:,iz-hslice:iz+hslice) + max(denoisedimg,0); 37 | weight_overlap(iz-hslice:iz+hslice) = weight_overlap(iz-hslice:iz+hslice)+1; 38 | end 39 | 40 | for iz = 1:nz 41 | img_denoised(:,:,iz) = img_proposed3d(:,:,iz)./weight_overlap(iz); 42 | end 43 | img_denoised(isnan(img_denoised)) = 0; 44 | 45 | img_denoised = img_denoised./Scaling; 46 | 47 | end 48 | 49 | -------------------------------------------------------------------------------- /Demo_OPOSEM.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear all; 3 | addpath('bin'); 4 | ParamSetting; 5 | 6 | %% 7 | 8 | load('Data/SinoFDGfull.mat'); 9 | Downsamplingfactor = 1; % 1 (full), 4, 6, 8, 10 10 | 11 | if Downsamplingfactor>1 12 | load(['Data/SinoDS',num2str(Downsamplingfactor),'.mat']); 13 | Sino = SinoDS; 14 | else 15 | Sino = PromptSino.*AttSino; 16 | end 17 | 18 | BG = ScSino; 19 | 20 | %% initial reconstruction 21 | 22 | img = ones(param.nx,param.ny,param.nz,'single'); 23 | sino = zeros(param.nR, param.nA/param.nsubset, param.nSinogram,'single'); 24 | 25 | EPS = 1e-8; 26 | 27 | for iter = 1:param.niter 28 | for sub = 0:param.nsubset-1 29 | 30 | RatioSino = (Sino(:,sub+1:param.nsubset:param.nA,:)+EPS)./(PETproj3d( img, param, sub )+BG(:,sub+1:param.nsubset:param.nA,:)+EPS); 31 | RatioSino(isnan(RatioSino)) = 0; 32 | RatioSino(isinf(RatioSino)) = 0; 33 | 34 | Norimg = PETbackproj3d( ones(param.nR,param.nA/param.nsubset,param.nSinogram,'single'), mask, param, sub); 35 | Ratioimg = PETbackproj3d( RatioSino , mask, param, sub); 36 | 37 | img = max(img.*Ratioimg./Norimg ,0); 38 | img(isnan(img)) = 0; 39 | img(isinf(img)) = 0; 40 | 41 | figure(11); imagesc(img(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; title(['Iter:',num2str(iter),' / subset:',num2str(sub)]) 42 | pause(0.01); 43 | end 44 | end 45 | 46 | img_osem = img; 47 | save('-v7.3',['Data/OPOSEM_DS',num2str(Downsamplingfactor),'.mat'],'img_osem'); 48 | -------------------------------------------------------------------------------- /ParamSetting.m: -------------------------------------------------------------------------------- 1 | 2 | 3 | %% Parameter setting %% 4 | % Sinogram domain 5 | param.nR = 256; % radial bin 6 | param.nA = 288; % azimuthal bin 7 | 8 | param.dr = 1.218750; % mm 9 | param.da = 3.141592/param.nA; % radian 10 | 11 | % Image domain 12 | param.nx = 256; % number of pixels 13 | param.ny = 256; 14 | param.nz = 207; 15 | 16 | param.dx = param.dr; % mm 17 | param.dy = param.dr; 18 | param.dz = param.dr; 19 | 20 | % % filter option: 'ram-lak'(no lowpass), 'hamming', 'hann', ... 21 | param.filter='hamming'; % 22 | 23 | % % 3D sinogram setting: If you don't know "segment", please study "michelogram" 24 | param.Span = 9; 25 | param.Segment = 3; 26 | 27 | % % Geometric parameter 28 | param.diameter = 469; % mm 29 | param.radius = param.diameter/2; 30 | 31 | 32 | %% Related variables for calculation (Auto calculation) 33 | param.sumSino = zeros(param.Segment+1,1); 34 | param.nSino = zeros(param.Segment+1,1); 35 | 36 | param.nSinogram = param.nz; 37 | param.sumSino(1) = param.nz; 38 | param.nSino(1) = param.nz; 39 | for i=1:param.Segment 40 | param.nSino(i+1) = ((param.nz - (param.Span+1))-(i-1)*2*param.Span); 41 | param.nSinogram = param.nSinogram + 2*param.nSino(i+1); 42 | param.sumSino(i+1) = param.nSinogram; 43 | end 44 | 45 | param.tan_seg = zeros(param.Segment+1,1); 46 | param.cos_seg = zeros(param.Segment+1,1); 47 | param.tan_seg(1) = 0; 48 | param.cos_seg(1) = 1; 49 | for i=1:param.Segment 50 | param.tan_seg(i+1) = param.Span*i*param.dz/param.radius; 51 | param.cos_seg(i+1) = 1/sqrt(1+param.tan_seg(i+1)^2); 52 | end 53 | 54 | %% iteration setting 55 | param.niter =6; % iteration number 56 | param.nsubset = 16; % subsets 57 | -------------------------------------------------------------------------------- /bin/LLF3d.m: -------------------------------------------------------------------------------- 1 | function [filtered, a, b] = LLF3d(input, prior, win_size) 2 | % Local linear fitting function 3 | 4 | input = (single(input)); 5 | prior = (single(prior)); 6 | 7 | half = floor(win_size / 2); 8 | pad_x = (padarray(input, [half, half, half], 'both')); 9 | pad_p = (padarray(single(prior), [half, half, half], 'both')); 10 | 11 | b = (zeros(size(input),'single')); 12 | sigmai = b; 13 | cross = b; 14 | 15 | mu_x = b; 16 | mu_p = b; 17 | 18 | 19 | %constructing denominator image; 20 | initial_denom = (ones(size(pad_p),'single')); 21 | denom = b; 22 | 23 | paddedpsquare = pad_p.^2; 24 | paddedxdp = pad_p.*pad_x; 25 | 26 | for i = -half : half 27 | for j = -half : half 28 | for k = -half : half 29 | mu_x = mu_x + pad_x(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k); 30 | mu_p = mu_p + pad_p(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k); 31 | denom = denom + initial_denom(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k); 32 | end 33 | end 34 | end 35 | 36 | mu_x = mu_x ./ denom; 37 | mu_p = mu_p ./ denom; 38 | 39 | % calculating sum over each window by shifting and adding 40 | for i = -half : half 41 | for j = -half : half 42 | for k = -half : half 43 | sigmai = sigmai + paddedpsquare(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k) - b.*pad_p(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k) ; 44 | cross = cross + paddedxdp(half+1+i:end-half+i,half+1+j:end-half+j,half+1+k:end-half+k); 45 | end 46 | end 47 | end 48 | EPS = 0.0; 49 | % % calculating the linear coefficients a and b 50 | a = (cross ) ./ (sigmai +EPS); 51 | a(isinf(a)) = 1; 52 | a(isnan(a)) = 1; 53 | b = mu_x - a .* mu_p; 54 | 55 | % the filtered image 56 | filtered = a.* prior + b; 57 | 58 | 59 | end 60 | 61 | -------------------------------------------------------------------------------- /Demo_OSSART_QuadraticPenalty.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | addpath('bin'); 3 | 4 | load Data/SinoDS10.mat 5 | ParamSetting; 6 | 7 | 8 | % % % sinogram gap mask 9 | % mask = ones(param.nR, param.nA, 'uint8'); % if gap is interpolated 10 | mask = uint8(mask); 11 | 12 | 13 | %% SART 14 | param.nsubset = 1; 15 | Norimg = PETbackproj3d(PETproj3d(ones(param.nx,param.ny,param.nz,'single'),param,0),mask,param,0); 16 | 17 | param.nsubset = 16; 18 | BG = ScSino; % Scatter + random 19 | img = zeros(param.nx, param.ny, param.nz, 'single'); 20 | 21 | %% 22 | beta = 0.2; 23 | 24 | for iter = 1:10 25 | 26 | for isubset = 0:param.nsubset-1 27 | 28 | tic; 29 | % diff back projection 30 | sino_diff = PETproj3d(img,param,isubset) + BG(:,isubset+1:param.nsubset:end,1:param.nSinogram) - SinoDS(:,isubset+1:param.nsubset:end,1:param.nSinogram); 31 | Diffimg = PETbackproj3d(sino_diff,mask,param,isubset); 32 | 33 | % Quadratic penalty (img, param, # of neighbor pixels: more pixel more blurr) 34 | [NU, DE] = quadratic3D(img, param, 1); 35 | 36 | % update 37 | img = max(img- (Diffimg+beta.*NU)./(Norimg+beta.*DE),0); 38 | img(isnan(img))=0; 39 | 40 | figure(11); 41 | subplot(1,3,1); imagesc(max(img(:,:,round(end/2)),0)); axis off; axis equal; colormap gray; colorbar; title(['iter - ',num2str(iter),' || sub - ',num2str(isubset)]); 42 | subplot(1,3,2); imagesc(max(squeeze(img(:,round(end/2),:))',0)); axis off; axis equal; colormap gray; colorbar; title(['iter - ',num2str(iter),' || sub - ',num2str(isubset)]); 43 | subplot(1,3,3); imagesc(max(squeeze(img(round(end/2),:,:))',0)); axis off; axis equal; colormap gray; colorbar; title(['iter - ',num2str(iter),' || sub - ',num2str(isubset)]); 44 | pause(0.01); 45 | exetime = toc; 46 | 47 | disp([num2str(iter),' - iteration done.. ','/ Exe. time (sec) : ', num2str(exetime)]); 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /Demo_OS_SQS_Nonlocalmeans.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | 3 | %% Geometric parameter setting 4 | addpath('bin'); % function add 5 | ParamSetting; 6 | 7 | %% Downsampling study for 4, 6, 8, 10 8 | Downsamplingfactor = 10; 9 | 10 | load(['Data/SinoDS',num2str(Downsamplingfactor),'.mat']); 11 | load('Data/SinoFDGfull.mat'); 12 | load(['Data/OPOSEM_DS',num2str(Downsamplingfactor),'.mat']); % for initialization 13 | 14 | Sino = SinoDS; 15 | clear SinoDS; 16 | BG = ScSino; 17 | clear ScSino; 18 | 19 | sino = zeros(param.nR, param.nA/param.nsubset, param.nSinogram,'single'); 20 | 21 | %% Hyper-parameter 22 | beta = 0.04; 23 | 24 | %% initialization 25 | img= img_osem; 26 | EPS = 1e-8; 27 | 28 | patchsize_half = 1; 29 | windowsize_half = 1; 30 | sigma = 1; 31 | 32 | %% Iteration 33 | for iter = 1:4 34 | 35 | for sub = 0:param.nsubset-1 36 | 37 | % Calculating NLM 38 | img_nlm = FAST_NLM_3d(img,patchsize_half,windowsize_half,sigma); 39 | 40 | % SQS calculation 41 | RatioSino = 1 - (Sino(:,sub+1:param.nsubset:param.nA,:)+EPS)./(PETproj3d( img, param, sub )+BG(:,sub+1:param.nsubset:param.nA,:) + EPS); 42 | Norimg = PETbackproj3d( PETproj3d(ones(param.nx,param.ny,param.nz,'single'),param,sub) ./ (Sino(:,sub+1:param.nsubset:param.nA,:) + EPS), mask, param, sub); 43 | Ratioimg = PETbackproj3d( RatioSino , mask, param, sub); 44 | 45 | % Update image 46 | img = max(img- (Ratioimg./Norimg + beta.*(img-img_nlm))./(1 + beta),0); 47 | img(isnan(img)) = 0; 48 | img(isinf(img)) = 0; 49 | 50 | figure(31); 51 | subplot(1,2,1); imagesc(img(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 52 | subplot(1,2,2); imagesc(img_nlm(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 53 | title(['Outer: ',num2str(iter),' / Subset: ',num2str(sub)]); 54 | pause(0.01); 55 | 56 | end 57 | 58 | end 59 | 60 | img_nlm = img; 61 | save('-v7.3',['Data/OSSQS_nlm_DS',num2str(Downsamplingfactor),'.mat'],'img_nlm'); -------------------------------------------------------------------------------- /Important_Readme.txt: -------------------------------------------------------------------------------- 1 | 2 | Hello, 3 | 4 | I am Kyungsang Kim. (kssigari@gmail.com, kkim24@mgh.harvard.edu) 5 | 6 | I share the code used for the paper "Penalized PET reconstruction using deep learning prior and local linear fitting", 7 | IEEE Transactions on Medical Imaging. 8 | 9 | 0. Linux/Windows Compatible (Sorry Mac users: more difficult to link openmp) 10 | 11 | 1. We provide pre-compiled Simense-type projector and backprojector, 12 | Parallel computing based on OpenMP is used. "libomp" should be linked. 13 | First you can try Demo_OPOSEM.m, if you see errors please check this: 14 | 15 | 1.1) Linux 16 | https://www.mathworks.com/matlabcentral/answers/125117-openmp-mex-files-static-tls-problem 17 | 18 | 1.2) Windows 19 | I have not seen errors yet, but if you see errors, please let me know. 20 | 21 | +extra) The Geometric parameters are in ParamSetting.m 22 | This code can be used for HR+, Biograph as well. 23 | You can change ParamSetting.m, 24 | The sinogram should be arc-corrected! 25 | (Please study: Michellogram and arc-correction) 26 | 27 | 28 | 2. Sinograms in Data folder 29 | We provide one clinical data for test. 30 | The scanner is the high-resolution research tomograph (HRRT) dedicated for brain studies, Siemens. 31 | We provide full data (4800 sec), and downsampled data for 4x, 6x, 8x, 10x. 32 | 33 | 34 | 3. Demo examples 35 | 36 | 4.1 OPOSEM (ordinary poisson ordered subsets expectation maximization) 37 | 4.2 OS-SQS+Non local means penalty (ordered subsets separable quadratic surrogates) 38 | Non-local means implementation is clearly explained in this paper: 39 | "Low?dose CT reconstruction using spatially encoded nonlocal penalty", Medical Physics. 40 | 41 | 4.3 Proposed method: OS-SQS + DnCNN + local linear fitting (LLF) 42 | 43 | +4.4 OS-SART + Quadratic penalty (for researchers) 44 | 45 | 46 | 4. Install Caffe version 1 47 | Please install Caffe with Matlab option on. 48 | First install CPU version, and if it works, then try to install GPU version. 49 | GPU version is more complicated. So if you just want to compare with your results and you are not a Caffe user, 50 | I highly recommend to install CPU version. But computational time will be very slow. 51 | 52 | These are pre-trained outputs: 53 | "DnCNN_6ds_iter_100000.caffemodel" 54 | "DnCNN_6ds_iter_100000.solverstate" 55 | 56 | The network is: 57 | "DnCNN_deploy_test.prototxt" 58 | 59 | After installation Caffe v1, 60 | please open "bin/DnCNN_prior.m" and "bin/DnCNN_prior_grad.m" 61 | and then change this option: 62 | 63 | ------------- 64 | caffe.set_mode_gpu(); 65 | gpu_id = 0; 66 | caffe.set_device(gpu_id); 67 | -------------- 68 | 69 | if you use CPU or another GPU number, change this: 70 | ex) caffe.set_mode_cpu(); 71 | or gpu_id = 2; 72 | 73 | Enjoy, 74 | 75 | Kyungsang 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /bin/FAST_NLM_3d.m: -------------------------------------------------------------------------------- 1 | function DenoisedImg = FAST_NLM_3d(NoisyImg,PatchSizeHalf,WindowSizeHalf,Sigma) 2 | 3 | NoisyImg = (single(NoisyImg)); 4 | [nx,ny,nz] = size(NoisyImg); 5 | 6 | % Initialize the denoised image 7 | u = (zeros(nx,ny,nz,'single')); 8 | % Initialize the weight max 9 | M = u; 10 | % Initialize the accumlated weights 11 | Z = M; 12 | 13 | PaddedImg = padarray(NoisyImg,[PatchSizeHalf,PatchSizeHalf,PatchSizeHalf],0); 14 | PaddedV = padarray(NoisyImg,[WindowSizeHalf,WindowSizeHalf,WindowSizeHalf],0); 15 | % Main loop 16 | for dz = -WindowSizeHalf:WindowSizeHalf 17 | for dx = -WindowSizeHalf:WindowSizeHalf 18 | for dy = -WindowSizeHalf:WindowSizeHalf 19 | if dx ~= 0 || dy ~= 0 || dz ~= 0 20 | % Compute the Integral Image 21 | Sd = integralImgSqDiff(PaddedImg,dx,dy,dz); 22 | % Obtaine the Square difference for every pair of pixels 23 | SqDist = -Sd(1:end-2*PatchSizeHalf,1:end-2*PatchSizeHalf,1:end-2*PatchSizeHalf) + Sd(2*PatchSizeHalf+1:end,1:end-2*PatchSizeHalf,1:end-2*PatchSizeHalf) ... 24 | + Sd(1:end-2*PatchSizeHalf,2*PatchSizeHalf+1:end,1:end-2*PatchSizeHalf) + Sd(1:end-2*PatchSizeHalf,1:end-2*PatchSizeHalf,2*PatchSizeHalf+1:end) ... 25 | + Sd(2*PatchSizeHalf+1:end,2*PatchSizeHalf+1:end,2*PatchSizeHalf+1:end) ... 26 | - Sd(1:end-2*PatchSizeHalf,2*PatchSizeHalf+1:end,2*PatchSizeHalf+1:end) - Sd(2*PatchSizeHalf+1:end,1:end-2*PatchSizeHalf,2*PatchSizeHalf+1:end)... 27 | - Sd(2*PatchSizeHalf+1:end,2*PatchSizeHalf+1:end,1:end-2*PatchSizeHalf); 28 | % Compute the weights for every pixels 29 | SqDist = max(SqDist,0); 30 | w = exp(-SqDist/(2*Sigma^2)); 31 | 32 | % Obtaine the corresponding noisy pixels 33 | v = PaddedV((WindowSizeHalf+1+dx):(WindowSizeHalf+dx+nx),(WindowSizeHalf+1+dy):(WindowSizeHalf+dy+ny),(WindowSizeHalf+1+dz):(WindowSizeHalf+dz+nz)); 34 | 35 | % Compute and accumalate denoised pixels 36 | u = u+w.*v; 37 | M = max(M,w); 38 | Z = Z+w; 39 | end 40 | end 41 | end 42 | end 43 | % Speical controls to accumlate the contribution of the noisy pixels to be denoised 44 | u = (u)./(Z); 45 | u(isnan(u)) = NoisyImg( isnan(u)); 46 | u(isinf(u)) = NoisyImg(isinf(u)); 47 | % Output denoised image 48 | DenoisedImg = u; 49 | 50 | 51 | function Sd = integralImgSqDiff(v,dx,dy,dz) 52 | % FUNCTION intergralImgDiff: Compute Integral Image of Squared Difference 53 | % Decide shift type, tx = vx+dx; ty = vy+dy 54 | t = img3DShift(v,dx,dy,dz); 55 | % Create sqaured difference image 56 | diff = abs(v-t).^2; 57 | % Construct integral image along x 58 | Sd = cumsum(diff,1); 59 | % Construct integral image along y 60 | Sd = cumsum(Sd,2); 61 | % Construct integral image along z 62 | Sd = cumsum(Sd,3); 63 | 64 | function t = img3DShift(v,dx,dy,dz) 65 | % FUNCTION img2DShift: Shift Image with respect to x and y coordinates 66 | t = (zeros(size(v),'single')); 67 | 68 | [nx, ny, nz] = size(v); 69 | 70 | x = max(1,1+dx):min(nx,nx+dx); 71 | y = max(1,1+dy):min(ny,ny+dy); 72 | z = max(1,1+dz):min(nz,nz+dz); 73 | 74 | t(x-dx,y-dy,z-dz) = v(x,y,z); 75 | % ------------------------------------------------------------------------- 76 | -------------------------------------------------------------------------------- /Demo_OS_SQS_DnCNN_LLF_Proposed.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | 3 | %% Geometric parameter setting 4 | addpath('bin'); % function add 5 | ParamSetting; 6 | 7 | % % Gradient of DnCNN with respect to image takes longer time, 8 | % % The value is very small compared to other terms. 9 | % % You can skip the Gradient of DnCNN to get result faster. 10 | % % 0 - Skip, 1 - using Gradient 11 | Opt = 0; 12 | 13 | %% Downsampling study for 4, 6, 8, 10 14 | Downsamplingfactor = 10; 15 | 16 | load(['Data/SinoDS',num2str(Downsamplingfactor),'.mat']); 17 | load('Data/SinoFDGfull.mat'); 18 | load(['Data/OPOSEM_DS',num2str(Downsamplingfactor),'.mat']); % for initialization 19 | 20 | Sino = SinoDS; 21 | clear SinoDS; 22 | BG = ScSino; 23 | clear ScSino; 24 | 25 | sino = zeros(param.nR, param.nA/param.nsubset, param.nSinogram,'single'); 26 | 27 | %% Hyper-parameter 28 | beta = 0.04; 29 | gamma = 0.1; 30 | alpha = 1; 31 | 32 | %% initialization 33 | img= img_osem; 34 | img_DnCNN = img; 35 | q = ones(size(img),'single'); 36 | b = zeros(size(img),'single'); 37 | z = img*2; 38 | eta = zeros(size(img),'single'); 39 | EPS = 1e-8; 40 | 41 | %% Iterative reconstruction 42 | 43 | for iter = 1:4 44 | 45 | % caffe in matlab is unstable sometimes. 46 | reset(gpuDevice(1)); 47 | 48 | for sub = 0:param.nsubset-1 49 | 50 | % Calculating DnCNN f_w(x) 51 | img_DnCNN = max(DnCNN_prior(img) ,0); 52 | 53 | % Update x^D 54 | imgD = (beta.*q.*img - beta.*q.*b + gamma.*img_DnCNN + gamma.*eta)./(beta.*q.*q + gamma); 55 | 56 | % Update z 57 | z = (img + q.*imgD + b); 58 | 59 | % Update q, b 60 | [img_D_LLF, q, b] = LLF3d(img, imgD , 5); % img_D_LLF = q.*imgD + b; 61 | 62 | % Update gradient of DnCNN with respect to image 63 | if Opt == 1 64 | if mod(sub,4) == 0 65 | img_grad = DnCNN_prior_grad(img, imgD - img_DnCNN - eta); 66 | end 67 | else 68 | img_grad = 0; 69 | end 70 | 71 | % SQS calculation 72 | RatioSino = 1 - (Sino(:,sub+1:param.nsubset:param.nA,:)+EPS)./(PETproj3d( img, param, sub )+BG(:,sub+1:param.nsubset:param.nA,:) + EPS); 73 | Norimg = PETbackproj3d( PETproj3d(ones(param.nx,param.ny,param.nz,'single'),param,sub) ./ (Sino(:,sub+1:param.nsubset:param.nA,:) + EPS), mask, param, sub); 74 | Ratioimg = PETbackproj3d( RatioSino , mask, param, sub); 75 | 76 | % Update image 77 | img = max(img- (Ratioimg./Norimg + beta.*(2*img-z)-gamma.*img_grad)./(1 + 2*beta + alpha*gamma),0); 78 | img(isnan(img)) = 0; 79 | img(isinf(img)) = 0; 80 | 81 | % Update Lagrangian parameter 82 | eta = eta - (imgD - img_DnCNN); 83 | 84 | figure(21); 85 | subplot(2,2,1); imagesc(img(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 86 | subplot(2,2,2); imagesc(img_DnCNN(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 87 | subplot(2,2,3); imagesc(z(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 88 | subplot(2,2,4); imagesc(img_D_LLF(:,:,round(end/2))); axis off; axis equal; colormap gray; colorbar; 89 | title(['Outer: ',num2str(iter),' / Subset: ',num2str(sub)]); 90 | pause(0.01); 91 | 92 | end 93 | 94 | end 95 | 96 | 97 | -------------------------------------------------------------------------------- /bin/DnCNN_prior_grad.m: -------------------------------------------------------------------------------- 1 | function [ img_grad ] = DnCNN_prior_grad( img, res ) 2 | %DnCNN Summary of this function goes here 3 | % Detailed explanation goes here 4 | % addpath(genpath('C:/caffe/caffe-master/matlab')); 5 | 6 | %% 7 | % Please change if you use CPU 8 | caffe.set_mode_gpu(); 9 | gpu_id = 0; 10 | caffe.set_device(gpu_id); 11 | 12 | net_weights = ['DnCNN_6ds_iter_100000.caffemodel']; 13 | net_model = ['DnCNN_deploy_test.prototxt']; 14 | net = caffe.Net(net_model, net_weights, 'test'); 15 | 16 | [nx, ny, nz] = size(img); 17 | 18 | nslice = 5; 19 | hslice = round((nslice-1)/2); 20 | 21 | img_proposed3d = zeros(nx, ny, nz, 'single'); 22 | weight_overlap = zeros(1, nz, 'single'); 23 | 24 | img_grad = zeros(nx, ny, nz, 'single'); 25 | 26 | Scaling = 100./mean(img(img>0)); % we have trained with this scale. 27 | 28 | img = img*Scaling; 29 | 30 | % % Gradients of CNN about image have values only at near voxels. 31 | % % Thus we can calculate garadients of voxels in parallel. 32 | % % Voxels for the same gradient calculation should have a certain distance 33 | % % to avoid affecting gradient to each other, here, we use "parallel_term" 34 | parallel_term = 5; 35 | stepsize = 0.1; 36 | 37 | for iz = 1+hslice:1:nz-hslice 38 | 39 | img_noise = img(:,:,iz-hslice:iz+hslice); 40 | img_res = res(:,:,iz-hslice:iz+hslice); 41 | 42 | grad_img = zeros(nx, ny, 'single'); 43 | 44 | % Numerical gradient: grad f(x)/x_j = (f(x_j+a) - f(x_j-a))/(2a) 45 | % a - stepsize 46 | for islice = hslice 47 | for idx =1:parallel_term 48 | for idy = 1:parallel_term 49 | 50 | img_tmp = img_noise; 51 | img_tmp(idx:parallel_term:end, idy:parallel_term:end,islice) = img_tmp(idx:parallel_term:end, idy:parallel_term:end,islice) + stepsize; 52 | testpatch(:,:,1:5) = img_tmp; 53 | tmp_plus = net.forward({testpatch}); 54 | tmp_plus = tmp_plus{1}(:,:,:); 55 | 56 | img_tmp = img_noise; 57 | img_tmp(idx:parallel_term:end, idy:parallel_term:end,islice) = img_tmp(idx:parallel_term:end, idy:parallel_term:end,islice) - stepsize; 58 | testpatch(:,:,1:5) = img_tmp; 59 | tmp_minus = net.forward({testpatch}); 60 | tmp_minus = tmp_minus{1}(:,:,:); 61 | 62 | half = ceil((parallel_term-1)/2); 63 | 64 | img_diff = (tmp_plus - tmp_minus)/(2*stepsize).*img_res; 65 | img_diff_pad = padarray(img_diff,[half,half,0],0); 66 | tmp_sum = zeros(size(img_noise(idx:parallel_term:end, idy:parallel_term:end, islice)),'single'); 67 | 68 | for ii = -half:half 69 | for jj = -half:half 70 | tmp = sum(img_diff_pad(idx+ii+half:parallel_term:end, idy+jj+half:parallel_term:end,:),3); 71 | tmp_sum = tmp_sum + tmp(1:size(tmp_sum,1),1:size(tmp_sum,2)); 72 | end 73 | end 74 | 75 | grad_img(idx:parallel_term:end, idy:parallel_term:end) = tmp_sum; 76 | 77 | end 78 | end 79 | end 80 | img_proposed3d(:,:,iz-hslice:iz+hslice) = img_proposed3d(:,:,iz-hslice:iz+hslice) + repmat(grad_img,[1,1,nslice]); 81 | weight_overlap(iz-hslice:iz+hslice) = weight_overlap(iz-hslice:iz+hslice)+1; 82 | end 83 | 84 | for iz = 1:nz 85 | img_grad(:,:,iz) = img_proposed3d(:,:,iz)./weight_overlap(iz); 86 | end 87 | img_grad(isnan(img_grad)) = 0; 88 | 89 | img_grad = img_grad./Scaling; 90 | 91 | 92 | end 93 | 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Penalized PET reconstruction using deep learning prior and local linear fitting 2 | 3 | Hello, 4 | 5 | I am Kyungsang Kim. (kssigari(at)gmail.com, kkim24(at)mgh.harvard.edu) 6 | 7 | The code was used for the following paper: 8 | Kyungsang Kim et al. "Penalized PET reconstruction using deep learning prior and local linear fitting", IEEE Transactions on Medical Imaging. (https://ieeexplore.ieee.org/document/8354909/) 9 | 10 | Please read below carefully because data files are provided with link. 11 | 12 | Abstract 13 | 14 | Motivated by the great potential of deep learning in medical imaging, we propose an iterative positron emission tomography (PET) reconstruction framework using a deep learning-based prior. We utilized the denoising convolutional neural network (DnCNN) method and trained the network using full-dose images as the ground truth and low dose images reconstructed from downsampled data by Poisson thinning as input. Since most published deep networks are trained at a predetermined noise level, the noise level disparity of training and testing data is a major problem for their applicability as a generalized prior. In particular, the noise level significantly changes in each iteration, which can potentially degrade the overall performance of iterative reconstruction. Due to insufficient existing studies, we conducted simulations and evaluated the degradation of performance at various noise conditions. Our findings indicated that DnCNN produces additional bias induced by the disparity of noise levels. To address this issue, we propose a local linear fitting (LLF) function incorporated with the DnCNN prior to improve the image quality by preventing unwanted bias. We demonstrate that the resultant method is robust against noise level disparities despite the network being trained at a predetermined noise level. By means of bias and standard deviation studies via both simulations and clinical experiments, we show that the proposed method outperforms conventional methods based on total variation (TV) and non-local means (NLM) penalties. We thereby confirm that the proposed method improves the reconstruction result both quantitatively and qualitatively. 15 | 16 | Details: 17 | 18 | 0. Linux/Windows Compatible (Sorry for Mac users: difficult to link openmp) 19 | 20 | 1. We provide pre-compiled Siemens-type projector and backprojector, 21 | Parallel computing based on OpenMP is used. "libomp" should be linked. 22 | First you can try Demo_OPOSEM.m, if you see errors please check this: 23 | 24 | 1.1) Linux: 25 | https://www.mathworks.com/matlabcentral/answers/125117-openmp-mex-files-static-tls-problem 26 | 27 | 1.2) Windows: 28 | I have not seen errors yet, but if you see errors, please let me know. 29 | 30 | +extra) The Geometric parameters are in ParamSetting.m 31 | This code can be used for HR+, Biograph as well. 32 | You can change ParamSetting.m, 33 | The sinogram should be arc-corrected! 34 | (Please study: Michellogram and arc-correction) 35 | 36 | 37 | 2. Sinograms in Data folder 38 | We provide one clinical data for test. 39 | The scanner is the high-resolution research tomograph (HRRT) dedicated for brain studies, Siemens. 40 | We provide full data (4800 sec), and downsampled data for 4x, 6x, 8x, 10x. 41 | 42 | Please download the "Data" folder: https://www.dropbox.com/sh/33kqnvbbclhvscr/AACAj0_qmCZby_yjKZjuCdLia?dl=0 43 | 44 | 45 | 3. Demo examples 46 | 47 | 4.1 OPOSEM (ordinary poisson ordered subsets expectation maximization) 48 | 49 | 4.2 OS-SQS+Non local means penalty (ordered subsets separable quadratic surrogates): 50 | Non-local means implementation is clearly explained in this paper: 51 | Kim et al. "Low-dose CT reconstruction using spatially encoded nonlocal penalty", Medical Physics. 52 | 53 | 4.3 Proposed method: OS-SQS + DnCNN + local linear fitting (LLF) 54 | 55 | +4.4 OS-SART + Quadratic penalty (for researchers) 56 | 57 | 58 | 4. Install Caffe version 1 59 | Please install Caffe with Matlab option on. 60 | First install CPU version, and if it works, then try to install GPU version. 61 | GPU version is more complicated. So if you just want to compare with your results and you are not a Caffe user, 62 | I highly recommend to install CPU version. But computational time will be very slow. 63 | 64 | These are pre-trained outputs: 65 | "DnCNN_6ds_iter_100000.caffemodel" 66 | "DnCNN_6ds_iter_100000.solverstate" 67 | 68 | The network is: 69 | "DnCNN_deploy_test.prototxt" 70 | 71 | After installation Caffe v1, 72 | please open "bin/DnCNN_prior.m" and "bin/DnCNN_prior_grad.m" 73 | and then change this option: 74 | 75 | ////////////////////// 76 | 77 | caffe.set_mode_gpu(); 78 | 79 | gpu_id = 0; 80 | 81 | caffe.set_device(gpu_id); 82 | 83 | ////////////////////// 84 | 85 | if you use CPU or another GPU number, change this: 86 | ex) caffe.set_mode_cpu(); 87 | or gpu_id = 2; 88 | 89 | Enjoy, 90 | 91 | Kyungsang 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /DnCNN_deploy_test.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "HDF5Data" 4 | top: "data" 5 | include { 6 | phase: TRAIN 7 | } 8 | hdf5_data_param { 9 | source: "trainingFileList_oneinput.txt" 10 | batch_size: 50 11 | } 12 | input_param { 13 | shape { 14 | dim: 80000 15 | dim: 10 16 | dim: 32 17 | dim: 32 18 | } 19 | } 20 | } 21 | layer { 22 | name: "data" 23 | type: "Input" 24 | top: "dataSrc" 25 | include { 26 | phase: TEST 27 | } 28 | input_param { 29 | shape { 30 | dim: 1 31 | dim: 5 32 | dim: 256 33 | dim: 256 34 | } 35 | } 36 | } 37 | layer { 38 | name: "0_conv" 39 | type: "Convolution" 40 | bottom: "dataSrc" 41 | top: "0_conv" 42 | convolution_param { 43 | num_output: 64 44 | pad: 1 45 | kernel_size: 3 46 | stride: 1 47 | weight_filler { 48 | type: "xavier" 49 | } 50 | bias_filler { 51 | type: "constant" 52 | value: 0 53 | } 54 | } 55 | } 56 | layer { 57 | name: "0_conv_relu" 58 | type: "ReLU" 59 | bottom: "0_conv" 60 | top: "0_conv_relu" 61 | } 62 | layer { 63 | name: "1_conv" 64 | type: "Convolution" 65 | bottom: "0_conv_relu" 66 | top: "1_conv" 67 | convolution_param { 68 | num_output: 64 69 | pad: 1 70 | kernel_size: 3 71 | stride: 1 72 | weight_filler { 73 | type: "xavier" 74 | } 75 | bias_filler { 76 | type: "constant" 77 | value: 0 78 | } 79 | } 80 | } 81 | layer { 82 | name: "1_conv_bnorm" 83 | type: "BatchNorm" 84 | bottom: "1_conv" 85 | top: "1_conv_bnorm" 86 | param { 87 | lr_mult: 0 88 | } 89 | param { 90 | lr_mult: 0 91 | } 92 | param { 93 | lr_mult: 0 94 | } 95 | batch_norm_param { 96 | use_global_stats: true 97 | } 98 | } 99 | layer { 100 | name: "1_conv_scale" 101 | type: "Scale" 102 | bottom: "1_conv_bnorm" 103 | top: "1_conv_scale" 104 | scale_param { 105 | bias_term: true 106 | } 107 | } 108 | layer { 109 | name: "1_conv_relu" 110 | type: "ReLU" 111 | bottom: "1_conv_scale" 112 | top: "1_conv_relu" 113 | } 114 | layer { 115 | name: "2_conv" 116 | type: "Convolution" 117 | bottom: "1_conv_relu" 118 | top: "2_conv" 119 | convolution_param { 120 | num_output: 64 121 | pad: 1 122 | kernel_size: 3 123 | stride: 1 124 | weight_filler { 125 | type: "xavier" 126 | } 127 | bias_filler { 128 | type: "constant" 129 | value: 0 130 | } 131 | } 132 | } 133 | layer { 134 | name: "2_conv_bnorm" 135 | type: "BatchNorm" 136 | bottom: "2_conv" 137 | top: "2_conv_bnorm" 138 | param { 139 | lr_mult: 0 140 | } 141 | param { 142 | lr_mult: 0 143 | } 144 | param { 145 | lr_mult: 0 146 | } 147 | batch_norm_param { 148 | use_global_stats: true 149 | } 150 | } 151 | layer { 152 | name: "2_conv_scale" 153 | type: "Scale" 154 | bottom: "2_conv_bnorm" 155 | top: "2_conv_scale" 156 | scale_param { 157 | bias_term: true 158 | } 159 | } 160 | layer { 161 | name: "2_conv_relu" 162 | type: "ReLU" 163 | bottom: "2_conv_scale" 164 | top: "2_conv_relu" 165 | } 166 | layer { 167 | name: "3_conv" 168 | type: "Convolution" 169 | bottom: "2_conv_relu" 170 | top: "3_conv" 171 | convolution_param { 172 | num_output: 64 173 | pad: 1 174 | kernel_size: 3 175 | stride: 1 176 | weight_filler { 177 | type: "xavier" 178 | } 179 | bias_filler { 180 | type: "constant" 181 | value: 0 182 | } 183 | } 184 | } 185 | layer { 186 | name: "3_conv_bnorm" 187 | type: "BatchNorm" 188 | bottom: "3_conv" 189 | top: "3_conv_bnorm" 190 | param { 191 | lr_mult: 0 192 | } 193 | param { 194 | lr_mult: 0 195 | } 196 | param { 197 | lr_mult: 0 198 | } 199 | batch_norm_param { 200 | use_global_stats: true 201 | } 202 | } 203 | layer { 204 | name: "3_conv_scale" 205 | type: "Scale" 206 | bottom: "3_conv_bnorm" 207 | top: "3_conv_scale" 208 | scale_param { 209 | bias_term: true 210 | } 211 | } 212 | layer { 213 | name: "3_conv_relu" 214 | type: "ReLU" 215 | bottom: "3_conv_scale" 216 | top: "3_conv_relu" 217 | } 218 | layer { 219 | name: "4_conv" 220 | type: "Convolution" 221 | bottom: "3_conv_relu" 222 | top: "4_conv" 223 | convolution_param { 224 | num_output: 64 225 | pad: 1 226 | kernel_size: 3 227 | stride: 1 228 | weight_filler { 229 | type: "xavier" 230 | } 231 | bias_filler { 232 | type: "constant" 233 | value: 0 234 | } 235 | } 236 | } 237 | layer { 238 | name: "4_conv_bnorm" 239 | type: "BatchNorm" 240 | bottom: "4_conv" 241 | top: "4_conv_bnorm" 242 | param { 243 | lr_mult: 0 244 | } 245 | param { 246 | lr_mult: 0 247 | } 248 | param { 249 | lr_mult: 0 250 | } 251 | batch_norm_param { 252 | use_global_stats: true 253 | } 254 | } 255 | layer { 256 | name: "4_conv_scale" 257 | type: "Scale" 258 | bottom: "4_conv_bnorm" 259 | top: "4_conv_scale" 260 | scale_param { 261 | bias_term: true 262 | } 263 | } 264 | layer { 265 | name: "4_conv_relu" 266 | type: "ReLU" 267 | bottom: "4_conv_scale" 268 | top: "4_conv_relu" 269 | } 270 | layer { 271 | name: "5_conv" 272 | type: "Convolution" 273 | bottom: "4_conv_relu" 274 | top: "5_conv" 275 | convolution_param { 276 | num_output: 64 277 | pad: 1 278 | kernel_size: 3 279 | stride: 1 280 | weight_filler { 281 | type: "xavier" 282 | } 283 | bias_filler { 284 | type: "constant" 285 | value: 0 286 | } 287 | } 288 | } 289 | layer { 290 | name: "5_conv_bnorm" 291 | type: "BatchNorm" 292 | bottom: "5_conv" 293 | top: "5_conv_bnorm" 294 | param { 295 | lr_mult: 0 296 | } 297 | param { 298 | lr_mult: 0 299 | } 300 | param { 301 | lr_mult: 0 302 | } 303 | batch_norm_param { 304 | use_global_stats: true 305 | } 306 | } 307 | layer { 308 | name: "5_conv_scale" 309 | type: "Scale" 310 | bottom: "5_conv_bnorm" 311 | top: "5_conv_scale" 312 | scale_param { 313 | bias_term: true 314 | } 315 | } 316 | layer { 317 | name: "5_conv_relu" 318 | type: "ReLU" 319 | bottom: "5_conv_scale" 320 | top: "5_conv_relu" 321 | } 322 | layer { 323 | name: "6_conv" 324 | type: "Convolution" 325 | bottom: "5_conv_relu" 326 | top: "6_conv" 327 | convolution_param { 328 | num_output: 5 329 | pad: 1 330 | kernel_size: 3 331 | stride: 1 332 | weight_filler { 333 | type: "xavier" 334 | } 335 | bias_filler { 336 | type: "constant" 337 | value: 0 338 | } 339 | } 340 | } 341 | layer { 342 | name: "loss" 343 | type: "EuclideanLoss" 344 | bottom: "6_conv" 345 | bottom: "dataRef" 346 | top: "loss" 347 | include { 348 | phase: TRAIN 349 | } 350 | } 351 | 352 | --------------------------------------------------------------------------------