├── data └── data_url.txt ├── compared_methods ├── rctv-main │ ├── README.md │ ├── TV_operator │ │ ├── diff_y.m │ │ ├── diff_z.m │ │ ├── diff_x.m │ │ ├── diff_yT.m │ │ ├── diff_zT.m │ │ ├── diff_xT.m │ │ ├── myPCG_ss.m │ │ ├── diff2.m │ │ ├── myPCG_sstv.m │ │ ├── diffT2.m │ │ ├── diff3.m │ │ ├── diffT3.m │ │ ├── diff_dT2.m │ │ └── diff_d2.m │ ├── Normalize.m │ ├── demo.m │ └── MoG_RBTV.m ├── AWLP │ ├── AWLP.m │ └── indwt2_working.m ├── LMHTV │ ├── LMHTV.m │ ├── LMHTV_Lq.m │ ├── LRTV_accelerate.m │ ├── PROPACK │ │ ├── helio.mat │ │ ├── bdsqr.mexsg │ │ ├── tqlb.mexglx │ │ ├── tqlb.mexsg │ │ ├── tqlb.mexsol │ │ ├── bdsqr.mexglx │ │ ├── bdsqr.mexsg64 │ │ ├── bdsqr.mexsol │ │ ├── bdsqr.mexw32 │ │ ├── reorth.mexglx │ │ ├── reorth.mexsg │ │ ├── reorth.mexsg64 │ │ ├── reorth.mexsol │ │ ├── tqlb.mexsg64 │ │ ├── Afunc.m │ │ ├── AtAfunc.m │ │ ├── Atransfunc.m │ │ ├── Cfunc.m │ │ ├── dbdqr.f │ │ ├── pythag.m │ │ ├── tqlb.m │ │ ├── bdsqr.m │ │ ├── refinebounds.m │ │ ├── tqlb_mex.c │ │ ├── compute_int.m │ │ ├── testtqlb.m │ │ ├── bdsqr_mex.c │ │ ├── lansvd.doc │ │ ├── lansvd.txt │ │ ├── laneig.doc │ │ ├── laneig.txt │ │ ├── update_gbound.m │ │ ├── reorth_mex.c │ │ ├── reorth.m │ │ ├── lanpro.doc │ │ ├── lanpro.txt │ │ ├── mminfo.m │ │ ├── lanbpro.doc │ │ ├── lanbpro.txt │ │ └── reorth.f │ ├── prox_operators │ │ ├── TV_norm.m │ │ ├── gradient_op.m │ │ ├── div_op.m │ │ └── prox_TV.m │ └── choosvd.m ├── tensor_dl.zip ├── LTHTV │ ├── funrand.m │ ├── mylib │ │ ├── Unfold.m │ │ ├── softthre.m │ │ ├── tensorSVD.m │ │ ├── Fold.m │ │ ├── my_ttm.m │ │ ├── tucker_hooi.m │ │ └── tucker_hooi_s.m │ ├── hardthre.m │ ├── prox_operators │ │ ├── diffT3_weight.m │ │ ├── myPCG1.m │ │ ├── myPCG1_w.m │ │ ├── diff3.m │ │ ├── diff3_weight.m │ │ └── diffT3.m │ ├── LRTDTV.m │ ├── LTHTV.m │ └── LTHTV_Lq.m ├── WNLRATV │ ├── softthre.m │ ├── TV_operator.rar │ ├── readme.txt │ ├── optimMCL2.m │ ├── Weight_NMoG.m │ ├── NMoG2Weight.m │ ├── R_initialization.m │ ├── Block_matching.m │ ├── logsumexp.m │ ├── WNNM_yang.m │ ├── Patch2Cub.m │ ├── InitialPara.m │ ├── NLPatEstimation_yang.m │ ├── Wn_est_initial.m │ ├── Cub2Patch_yang.m │ ├── VariatInf_NoiseDist.m │ ├── EfficientMCL2.m │ ├── NeighborIndex.m │ ├── TVFast_fft.m │ └── SetParam_NWT.m ├── TCTV │ ├── high-order tensor-SVD Toolbox │ │ ├── htprod_U.m │ │ ├── htprod_fft.m │ │ ├── foldi.m │ │ ├── unfoldi.m │ │ ├── PSNR.m │ │ ├── nc.m │ │ ├── htran.m │ │ ├── README.txt │ │ ├── inverselineartransform.m │ │ ├── lineartransform.m │ │ ├── htsvd.m │ │ ├── prox_htnn_U.m │ │ ├── ht_svd_fft.m │ │ ├── htsvd_fft.m │ │ ├── RandOrthMat.m │ │ ├── prox_htnn_F.m │ │ ├── nmodeproduct.m │ │ ├── HTNN_FFT.m │ │ ├── HTNN_U.m │ │ └── tmprod.m │ ├── differential operators │ │ ├── diff_2.m │ │ ├── diff_2T.m │ │ ├── diff_3T.m │ │ ├── diff_1.m │ │ ├── diff_3.m │ │ ├── diff_1T.m │ │ ├── diff_matrix.m │ │ ├── diff_element.m │ │ ├── diff_element_order2.m │ │ ├── porder_diff.m │ │ └── porder_diff_T.m │ └── TCTV_TRPCA.m ├── NGmeet │ ├── Block_matching.m │ ├── Patch2Cub.m │ ├── NLPatEstimation.m │ ├── Cub2Patch.m │ ├── NeighborIndex.m │ └── ParSetH.m ├── CTV │ └── ctv_rpca.m └── LRTV │ └── LRTV.m ├── utils ├── visualization │ ├── imamp.m │ ├── myshow.m │ └── rsshow.m ├── TV_operator │ ├── diff_y.m │ ├── diff_z.m │ ├── diff_x.m │ ├── diff_yT.m │ ├── diff_zT.m │ ├── diff_xT.m │ ├── myPCG_ss.m │ ├── diff2.m │ ├── myPCG_sstv.m │ ├── diffT2.m │ ├── diff3.m │ ├── diffT3.m │ ├── diff_dT2.m │ └── diff_d2.m ├── prox_nn.m ├── clamp.m ├── svdecon.m ├── svdsecon.m └── imcorrfilter.m ├── Table5_Fig10.m ├── PWRCTV.m ├── README.md └── Table_4_step1.m /data/data_url.txt: -------------------------------------------------------------------------------- 1 | https://www.researchgate.net/publication/383825518_data_PWRCTVzip 2 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/README.md: -------------------------------------------------------------------------------- 1 | # representative_coefficient_total_variation 2 | The code of RCTV 3 | -------------------------------------------------------------------------------- /utils/visualization/imamp.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/utils/visualization/imamp.m -------------------------------------------------------------------------------- /compared_methods/AWLP/AWLP.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/AWLP/AWLP.m -------------------------------------------------------------------------------- /compared_methods/LMHTV/LMHTV.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/LMHTV.m -------------------------------------------------------------------------------- /compared_methods/tensor_dl.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/tensor_dl.zip -------------------------------------------------------------------------------- /compared_methods/LTHTV/funrand.m: -------------------------------------------------------------------------------- 1 | function an=funrand(a,n) 2 | for i=1:n 3 | an(i)=randperm(a,1); 4 | end 5 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/Unfold.m: -------------------------------------------------------------------------------- 1 | function [X] = Unfold( X, dim, i ) 2 | X = reshape(shiftdim(X,i-1), dim(i), []); -------------------------------------------------------------------------------- /compared_methods/LMHTV/LMHTV_Lq.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/LMHTV_Lq.m -------------------------------------------------------------------------------- /compared_methods/LTHTV/hardthre.m: -------------------------------------------------------------------------------- 1 | function x = hardthre(a, tau) 2 | x = a; 3 | x(find(abs(a)<=tau)) =0; 4 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/softthre.m: -------------------------------------------------------------------------------- 1 | function x = softthre(a, tau) 2 | 3 | x = sign(a).* max( abs(a) - tau, 0); 4 | end -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/softthre.m: -------------------------------------------------------------------------------- 1 | function x = softthre(a, tau) 2 | 3 | x = sign(a).* max( abs(a) - tau, 0); 4 | end -------------------------------------------------------------------------------- /compared_methods/LMHTV/LRTV_accelerate.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/LRTV_accelerate.m -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/helio.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/helio.mat -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/tensorSVD.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LTHTV/mylib/tensorSVD.m -------------------------------------------------------------------------------- /compared_methods/WNLRATV/TV_operator.rar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/WNLRATV/TV_operator.rar -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.mexsg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/bdsqr.mexsg -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/tqlb.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/tqlb.mexglx -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/tqlb.mexsg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/tqlb.mexsg -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/tqlb.mexsol: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/tqlb.mexsol -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/bdsqr.mexglx -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.mexsg64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/bdsqr.mexsg64 -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.mexsol: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/bdsqr.mexsol -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/bdsqr.mexw32 -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/reorth.mexglx -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.mexsg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/reorth.mexsg -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.mexsg64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/reorth.mexsg64 -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.mexsol: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/reorth.mexsol -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/tqlb.mexsg64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LMHTV/PROPACK/tqlb.mexsg64 -------------------------------------------------------------------------------- /compared_methods/WNLRATV/readme.txt: -------------------------------------------------------------------------------- 1 | Hyperspectral Image Denoising with Weighted nonlocal Low-rank Model and Adaptive Total Variation Regularization -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/Fold.m: -------------------------------------------------------------------------------- 1 | function [X] = Fold(X, dim, i) 2 | dim = circshift(dim, [1-i, 1-i]); 3 | X = shiftdim(reshape(X, dim), length(dim)+1-i); -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/diffT3_weight.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/LTHTV/prox_operators/diffT3_weight.m -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/htprod_U.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/TCTV/high-order tensor-SVD Toolbox/htprod_U.m -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/htprod_fft.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shuangxu96/PWRCTV/HEAD/compared_methods/TCTV/high-order tensor-SVD Toolbox/htprod_fft.m -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_2.m: -------------------------------------------------------------------------------- 1 | function [ DX_2 ] = diff_2(X) 2 | %DIFF_2 3 | slice = X(:,1,:)- X(:,end,:); 4 | DX_2 = diff(X,1,2); 5 | DX_2 = cat(2,DX_2,slice); 6 | 7 | -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_2T.m: -------------------------------------------------------------------------------- 1 | function [ DX_2T ] = diff_2T(X) 2 | %DIFF_2T 3 | slice = X(:,1,:)- X(:,end,:); 4 | DX_2T = diff(X,1,2); 5 | DX_2T = cat(2,-slice,-DX_2T); -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_3T.m: -------------------------------------------------------------------------------- 1 | function [ DX_3T ] = diff_3T(X) 2 | %DIFF_3T 3 | slice = X(:,:,1)- X(:,:,end); 4 | DX_3T = diff(X,1,3); 5 | DX_3T = cat(3,-slice,-DX_3T); -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_1.m: -------------------------------------------------------------------------------- 1 | function [ DX_1 ] = diff_1(X) 2 | %DIFF_1 3 | slice = X(1,:,:)- X(end,:,:); 4 | DX_1 = diff(X,1,1); 5 | DX_1 = cat(1,DX_1,slice); 6 | 7 | -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_3.m: -------------------------------------------------------------------------------- 1 | function [ DX_3 ] = diff_3(X) 2 | %DIFF_3 3 | slice = X(:,:,1)- X(:,:,end); 4 | DX_3 = diff(X,1,3); 5 | DX_3 = cat(3,DX_3,slice); 6 | 7 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/prox_operators/TV_norm.m: -------------------------------------------------------------------------------- 1 | function y = TV_norm(u) 2 | 3 | 4 | [dx, dy] = gradient_op(u); 5 | 6 | temp = sqrt(abs(dx).^2 + abs(dy).^2); 7 | y = sum(temp(:)); 8 | 9 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_1T.m: -------------------------------------------------------------------------------- 1 | function [ DX_1T ] = diff_1T(X) 2 | %DIFF_1T 3 | slice = X(1,:,:)- X(end,:,:); 4 | DX_1T = diff(X,1,1); 5 | DX_1T = cat(1,-slice,-DX_1T); 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/optimMCL2.m: -------------------------------------------------------------------------------- 1 | function v = optimMCL2(Matrix,W,u) 2 | [d n] = size(Matrix); 3 | TX = W.*Matrix; 4 | U = u*ones(1,n); 5 | U = W.*U; 6 | up = sum(TX.*U); 7 | down = sum(U.*U); 8 | v = up./down; -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/foldi.m: -------------------------------------------------------------------------------- 1 | function i = foldi(n,j,L) 2 | 3 | % Written by Wenjin Qin (qinwenjin2021@163.com) 4 | 5 | i = 1; 6 | for t = j:-1:3 7 | i = i + (n(t)-1)*L(t-1); 8 | end 9 | -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_matrix.m: -------------------------------------------------------------------------------- 1 | function D = diff_matrix(n) 2 | D = zeros(n,n); 3 | d = zeros(1,n); 4 | d(1) = -1; 5 | d(2) = 1; 6 | for j=1:n 7 | D(j,:)=circshift(d,[0,j-1]); 8 | end 9 | end 10 | 11 | 12 | -------------------------------------------------------------------------------- /utils/TV_operator/diff_y.m: -------------------------------------------------------------------------------- 1 | function tv_y = diff_y(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfy1 = diff(tenX, 1, 2); 5 | dfy = zeros(sizeD); 6 | dfy(:,1:end-1,:) = dfy1; 7 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 8 | tv_y=dfy(:); 9 | -------------------------------------------------------------------------------- /utils/TV_operator/diff_z.m: -------------------------------------------------------------------------------- 1 | function tv_z = diff_z(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfz1 = diff(tenX, 1, 3); 5 | dfz = zeros(sizeD); 6 | dfz(:,:,1:end-1) = dfz1; 7 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 8 | tv_z=dfz(:); 9 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_y.m: -------------------------------------------------------------------------------- 1 | function tv_y = diff_y(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfy1 = diff(tenX, 1, 2); 5 | dfy = zeros(sizeD); 6 | dfy(:,1:end-1,:) = dfy1; 7 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 8 | tv_y=dfy(:); 9 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_z.m: -------------------------------------------------------------------------------- 1 | function tv_z = diff_z(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfz1 = diff(tenX, 1, 3); 5 | dfz = zeros(sizeD); 6 | dfz(:,:,1:end-1) = dfz1; 7 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 8 | tv_z=dfz(:); 9 | -------------------------------------------------------------------------------- /utils/TV_operator/diff_x.m: -------------------------------------------------------------------------------- 1 | function tv_x = diff_x(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfx1 = diff(tenX, 1, 1); 5 | dfx = zeros(sizeD); 6 | dfx(1:end-1,:,:) = dfx1; 7 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 8 | tv_x=dfx(:); 9 | 10 | -------------------------------------------------------------------------------- /utils/TV_operator/diff_yT.m: -------------------------------------------------------------------------------- 1 | function tv_yT = diff_yT(a, sizeD) 2 | n = prod(sizeD); 3 | tenY = reshape(a(1:n), sizeD); 4 | dfy = diff(tenY, 1, 2); 5 | dfyT = zeros(sizeD); 6 | dfyT(:,1,:) = tenY(:,end,:) - tenY(:,1,:); 7 | dfyT(:,2:end,:) = -dfy; 8 | tv_yT=dfyT(:); -------------------------------------------------------------------------------- /utils/TV_operator/diff_zT.m: -------------------------------------------------------------------------------- 1 | function tv_zT = diff_zT(a, sizeD) 2 | n = prod(sizeD); 3 | tenZ = reshape(a(1:n), sizeD); 4 | dfz = diff(tenZ, 1, 3); 5 | dfzT = zeros(sizeD); 6 | dfzT(:,:,1) = tenZ(:,:,end) - tenZ(:,:,1); 7 | dfzT(:,:,2:end) = -dfz; 8 | tv_zT=dfzT(:); -------------------------------------------------------------------------------- /utils/TV_operator/diff_xT.m: -------------------------------------------------------------------------------- 1 | function tv_xT = diff_xT(a, sizeD) 2 | n = prod(sizeD); 3 | tenX = reshape(a(1: n), sizeD); 4 | dfx = diff(tenX, 1, 1); 5 | dfxT = zeros(sizeD); 6 | dfxT(1,:,:) = tenX(end, :, :) - tenX(1, :, :); % 7 | dfxT(2:end,:,:) = -dfx; 8 | tv_xT=dfxT(:); 9 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/Afunc.m: -------------------------------------------------------------------------------- 1 | function y=Afunc(x) 2 | % y=Afunc(x) 3 | % Testfunction returning a linear operator applied to x. 4 | % Used for testing lansvd. 5 | % 6 | % y = A'*x 7 | 8 | % Rasmus Munk Larsen, DAIMI, 1998 9 | 10 | global A MxV 11 | y = A*x; 12 | MxV = MxV + 1; -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_x.m: -------------------------------------------------------------------------------- 1 | function tv_x = diff_x(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | dfx1 = diff(tenX, 1, 1); 5 | dfx = zeros(sizeD); 6 | dfx(1:end-1,:,:) = dfx1; 7 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 8 | tv_x=dfx(:); 9 | 10 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_yT.m: -------------------------------------------------------------------------------- 1 | function tv_yT = diff_yT(a, sizeD) 2 | n = prod(sizeD); 3 | tenY = reshape(a(1:n), sizeD); 4 | dfy = diff(tenY, 1, 2); 5 | dfyT = zeros(sizeD); 6 | dfyT(:,1,:) = tenY(:,end,:) - tenY(:,1,:); 7 | dfyT(:,2:end,:) = -dfy; 8 | tv_yT=dfyT(:); -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_zT.m: -------------------------------------------------------------------------------- 1 | function tv_zT = diff_zT(a, sizeD) 2 | n = prod(sizeD); 3 | tenZ = reshape(a(1:n), sizeD); 4 | dfz = diff(tenZ, 1, 3); 5 | dfzT = zeros(sizeD); 6 | dfzT(:,:,1) = tenZ(:,:,end) - tenZ(:,:,1); 7 | dfzT(:,:,2:end) = -dfz; 8 | tv_zT=dfzT(:); -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_element.m: -------------------------------------------------------------------------------- 1 | function Eny = diff_element(dim,direction) 2 | d = length(dim); 3 | e = ones(1,d); 4 | element1 = ones(e); 5 | element2 = -1*ones(e); 6 | element = cat(direction, element1, element2); 7 | Eny = ( abs(psf2otf(element, dim)) ).^2 ; 8 | end 9 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/unfoldi.m: -------------------------------------------------------------------------------- 1 | function I = unfoldi(i,j,L) 2 | 3 | % Written by Wenjin Qin (qinwenjin2021@163.com) 4 | 5 | I = ones(1,j); 6 | for t = j:-1:3 7 | I(t) = ceil(i/L(t-1)); 8 | i = i-(I(t)-1)*L(t-1); 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_xT.m: -------------------------------------------------------------------------------- 1 | function tv_xT = diff_xT(a, sizeD) 2 | n = prod(sizeD); 3 | tenX = reshape(a(1: n), sizeD); 4 | dfx = diff(tenX, 1, 1); 5 | dfxT = zeros(sizeD); 6 | dfxT(1,:,:) = tenX(end, :, :) - tenX(1, :, :); % 7 | dfxT(2:end,:,:) = -dfx; 8 | tv_xT=dfxT(:); 9 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/AtAfunc.m: -------------------------------------------------------------------------------- 1 | function y=AtAfunc(x) 2 | % y=AtAfunc(x) 3 | % Testfunction defining a linear operator applied to x. 4 | % Used for testing laneig. 5 | % 6 | % y = A'*(A*x) 7 | 8 | % Rasmus Munk Larsen, DAIMI, 1998 9 | 10 | 11 | global A MxV 12 | y = A'*(A*x); 13 | MxV = MxV + 2; 14 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/Atransfunc.m: -------------------------------------------------------------------------------- 1 | function y=Atransfunc(x) 2 | % y=Atransfunc(x) 3 | % Testfunction returning the transpose of a linear operator applied to x. 4 | % Used for testing lansvd. 5 | % 6 | % y = A'*x 7 | 8 | % Rasmus Munk Larsen, DAIMI, 1998 9 | 10 | global A MxV 11 | y = A'*x; 12 | MxV = MxV + 1; 13 | -------------------------------------------------------------------------------- /utils/TV_operator/myPCG_ss.m: -------------------------------------------------------------------------------- 1 | function x= myPCG_ss(x,X,M2,M3,F,beta,dim) 2 | temp=beta*X(:)+beta*diffT3(F,dim)+M2(:)-diffT3(M3,dim); 3 | [x, ~] = pcg(@(x) Fun(x), temp, 1e-4,1000,[],[],x); 4 | function y = Fun(x) 5 | y=beta*x+beta*diffT3(diff3(x,dim),dim); 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/prox_operators/gradient_op.m: -------------------------------------------------------------------------------- 1 | function [dx, dy] = gradient_op(I, weights_dx, weights_dy) 2 | 3 | dx = [I(2:end, :)-I(1:end-1, :) ; zeros(1, size(I, 2))]; 4 | dy = [I(:, 2:end)-I(:, 1:end-1) , zeros(size(I, 1), 1)]; 5 | 6 | if nargin>1 7 | dx = dx .* weights_dx; 8 | dy = dy .* weights_dy; 9 | end 10 | 11 | end -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/myPCG_ss.m: -------------------------------------------------------------------------------- 1 | function x= myPCG_ss(x,X,M2,M3,F,beta,dim) 2 | temp=beta*X(:)+beta*diffT3(F,dim)+M2(:)-diffT3(M3,dim); 3 | [x, ~] = pcg(@(x) Fun(x), temp, 1e-4,1000,[],[],x); 4 | function y = Fun(x) 5 | y=beta*x+beta*diffT3(diff3(x,dim),dim); 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/prox_operators/div_op.m: -------------------------------------------------------------------------------- 1 | function I = div_op(dx, dy, weights_dx, weights_dy) 2 | 3 | if nargin > 2 4 | dx = dx .* conj(weights_dx); 5 | dy = dy .* conj(weights_dy); 6 | end 7 | 8 | I = [dx(1, :) ; dx(2:end-1, :)-dx(1:end-2, :) ; -dx(end-1, :)]; 9 | I = I + [dy(:, 1) , dy(:, 2:end-1)-dy(:, 1:end-2) , -dy(:, end-1)]; 10 | 11 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/diff_element_order2.m: -------------------------------------------------------------------------------- 1 | function Eny = diff_element_order2(dim,direction) 2 | d = length(dim); 3 | e = ones(1,d); 4 | element1 = ones(e); 5 | element2 = -2*ones(e); 6 | element3 = 1*ones(e); 7 | element = cat(direction, element1, element2, element3); 8 | Eny = ( abs(psf2otf(element, dim)) ).^2 ; 9 | end 10 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/myPCG1.m: -------------------------------------------------------------------------------- 1 | function x= myPCG1(x,Ysum,M2,F,Gamma,beta,dim) 2 | temp=beta*Ysum(:)+M2(:)+beta*diffT3(F,dim)-diffT3(Gamma,dim); 3 | [x, ~] = pcg(@(x) Fun(x), temp, 1e-4,1000,[],[],x); 4 | function y = Fun(x) 5 | y=beta*x+beta*diffT3(diff3(x,dim),dim); 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /utils/prox_nn.m: -------------------------------------------------------------------------------- 1 | function x = prox_nn(input, gamma, rank) 2 | if nargin<=2 3 | rank=inf; 4 | end 5 | [uu,sigma,vv] = svdecon(input); 6 | sigma = diag(sigma); 7 | svp = min(length(find(sigma>gamma)),rank); 8 | if svp>=1 9 | sigma = sigma(1:svp)-gamma; 10 | else 11 | svp = 1; 12 | sigma = 0; 13 | end 14 | x = uu(:,1:svp)*diag(sigma)*vv(:,1:svp)'; 15 | end -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/Cfunc.m: -------------------------------------------------------------------------------- 1 | function y = Cfunc(x) 2 | % y=Cfunc(x) 3 | % Testfunction defining a linear operator applied to x. 4 | % Used for testing laneig. 5 | % 6 | % y = [ 0 A ] * x 7 | % [ A' 0 ] 8 | 9 | % Rasmus Munk Larsen, DAIMI, 1998 10 | 11 | 12 | global A MxV 13 | [m n] = size(A); 14 | y = [A*x(m+1:end,:); A'*x(1:m,:)]; 15 | MxV = MxV + 2; -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/my_ttm.m: -------------------------------------------------------------------------------- 1 | function X = my_ttm(X,V,dims,sizeOld,sizeNew,Ndim) 2 | % tensor times matrix,V is cell 3 | for n = dims(1:end) 4 | order = [n,1:n-1,n+1:Ndim]; 5 | temp = reshape(permute(X,order), sizeOld(n), []); 6 | temp = V{n}*temp; 7 | sizeOld(n) = sizeNew(n); 8 | X = ipermute(reshape(temp, [sizeOld(n),sizeOld(1:n-1),sizeOld(n+1:Ndim)]),order); 9 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/PSNR.m: -------------------------------------------------------------------------------- 1 | function psnr = PSNR(Xfull,Xrecover,maxP) 2 | % 3 | % Written by Wenjin Qin (qinwenjin2021@163.com) 4 | % 5 | 6 | Xrecover = max(0,Xrecover); 7 | Xrecover = min(maxP,Xrecover); 8 | 9 | Xfull = max(0,Xfull); 10 | Xfull = min(maxP,Xfull); 11 | 12 | MSE = (norm(Xfull(:)-Xrecover(:))^2)/ numel(Xrecover); 13 | psnr = 10*log10(maxP^2/MSE); -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/myPCG1_w.m: -------------------------------------------------------------------------------- 1 | function x= myPCG1_w(x,Ysum,M2,F,Gamma,weight,beta,dim) 2 | temp=beta*Ysum(:)+M2(:)-diffT3_weight(Gamma,dim,weight)+beta*diffT3_weight(F,dim,weight); 3 | [x, ~] = pcg(@(x) Fun(x),temp,1e-4,1000,[],[],x); 4 | function y = Fun(x) 5 | y=beta*x+beta*diffT3_weight(diff3_weight(x,dim,weight),dim,weight); 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/Weight_NMoG.m: -------------------------------------------------------------------------------- 1 | function W = Weight_NMoG(model,sizeD) 2 | c = model.c; 3 | d = model.d; 4 | tau = c./d; 5 | R = model.R; 6 | [k,B] = size(tau); 7 | W = 0.5*bsxfun(@times,tau(1,:),squeeze(R(:,1,:))); 8 | if k>1 9 | for j = 2:k 10 | W = W + 0.5*bsxfun(@times,tau(j,:),squeeze(R(:,j,:))); 11 | end 12 | end 13 | W = reshape(W,sizeD); 14 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/NMoG2Weight.m: -------------------------------------------------------------------------------- 1 | 2 | function W = NMoG2Weight(model) 3 | c = model.c; 4 | d = model.d; 5 | tau = c./d; 6 | R = model.R; 7 | [N, k, B] = size(R); 8 | 9 | tau = reshape(tau,k,B); 10 | W = zeros(N,B); 11 | for j = 1:k 12 | W = W + 0.5*bsxfun(@times,tau(j,:),squeeze(R(:,j,:))); 13 | end 14 | 15 | [v,~] = sort(W(:)); 16 | Wmax = v(ceil(0.9*N*B)); 17 | W = min(W,Wmax); -------------------------------------------------------------------------------- /utils/clamp.m: -------------------------------------------------------------------------------- 1 | function y = clamp(x,a,b) 2 | 3 | % clamp - clamp a value 4 | % 5 | % y = clamp(x,a,b); 6 | % 7 | % Default is [a,b]=[0,1]. 8 | % 9 | % Copyright (c) 2004 Gabriel Peyr� 10 | 11 | if nargin<2 12 | a = 0; 13 | end 14 | if nargin<3 15 | b = 1; 16 | end 17 | 18 | if iscell(x) 19 | for i=1:length(x) 20 | y{i} = clamp(x{i},a,b); 21 | end 22 | return; 23 | end 24 | 25 | y = max(x,a); 26 | y = min(y,b); -------------------------------------------------------------------------------- /utils/visualization/myshow.m: -------------------------------------------------------------------------------- 1 | function myshow(X, scale, band) 2 | 3 | if nargin==1 4 | band = NaN; 5 | scale = 0.01; 6 | end 7 | if nargin==2 8 | band = NaN; 9 | end 10 | 11 | C = size(X,3); 12 | 13 | % figure() 14 | if C == 3 || C==1 15 | imshow(X) 16 | else 17 | if isnan(band) 18 | band = [C, uint8(C*0.5), uint8(C*0.1)]; 19 | band(band<1) = 1; 20 | end 21 | rsshow(X(:,:,band), scale) 22 | end 23 | -------------------------------------------------------------------------------- /utils/TV_operator/diff2.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff2(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | 8 | dfx = zeros(sizeD); 9 | dfy = zeros(sizeD); 10 | dfx(1:end-1,:,:) = dfx1; 11 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 12 | dfy(:,1:end-1,:) = dfy1; 13 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 14 | 15 | diff_x = [dfx(:); dfy(:)]; 16 | 17 | end 18 | 19 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/R_initialization.m: -------------------------------------------------------------------------------- 1 | function R = R_initialization(X, k) 2 | n = size(X, 2); 3 | idx = randsample(n,k); 4 | m = X(:,idx); 5 | [~,label] = max(bsxfun(@minus,m'*X,dot(m,m,1)'/2),[],1); 6 | [u,~,label] = unique(label); 7 | while k ~= length(u) 8 | idx = randsample(n,k); 9 | m = X(:,idx); 10 | [~,label] = max(bsxfun(@minus,m'*X,dot(m,m,1)'/2),[],1); 11 | [u,~,label] = unique(label); 12 | end 13 | R = full(sparse(1:n,label,1,n,k,n)); -------------------------------------------------------------------------------- /compared_methods/rctv-main/Normalize.m: -------------------------------------------------------------------------------- 1 | function norm_tensor = Normalize(orginal_tensor) 2 | [m,n,p] = size(orginal_tensor); 3 | norm_tensor = zeros([m,n,p]); 4 | for band =1:p 5 | tmp = orginal_tensor(:,:,band); 6 | tmp = (tmp-min(tmp(:)))/(max(tmp(:)) - min(tmp(:))); 7 | norm_tensor(:,:,band) = tmp; 8 | end 9 | 10 | % max_value = max(orginal_tensor(:)); 11 | % min_value = min(orginal_tensor(:)); 12 | % norm_tensor = (max_value-orginal_tensor)/(max_value-min_value); 13 | end -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff2.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff2(x,sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | 8 | dfx = zeros(sizeD); 9 | dfy = zeros(sizeD); 10 | dfx(1:end-1,:,:) = dfx1; 11 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 12 | dfy(:,1:end-1,:) = dfy1; 13 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 14 | 15 | diff_x = [dfx(:); dfy(:)]; 16 | 17 | end 18 | 19 | -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/porder_diff.m: -------------------------------------------------------------------------------- 1 | function DX = porder_diff(X, direction) 2 | %compute the difference tensor (gradient map) along cerrtain direction 3 | dim = size(X); 4 | index_first = repmat({':'},1,ndims(X)); 5 | index_first(direction) = {1}; 6 | index_end = repmat({':'},1,ndims(X)); 7 | index_end(direction) = {dim(direction)}; 8 | 9 | slice = X(index_first{:}) - X(index_end{:}); 10 | DX = diff(X,1,direction); 11 | DX = cat(direction,DX,slice); 12 | 13 | 14 | -------------------------------------------------------------------------------- /compared_methods/NGmeet/Block_matching.m: -------------------------------------------------------------------------------- 1 | function [Init_Index] = Block_matching(X, Par,Neighbor_arr,Num_arr, SelfIndex_arr) 2 | L = length(Num_arr); 3 | Init_Index = zeros(Par.patnum,L); 4 | 5 | for i = 1 : L 6 | Patch = X(:,SelfIndex_arr(i)); 7 | Neighbors = X(:,Neighbor_arr(1:Num_arr(i),i)); 8 | Dist = sum((repmat(Patch,1,size(Neighbors,2))-Neighbors).^2); 9 | [val, index] = sort(Dist); 10 | Init_Index(:,i)=Neighbor_arr(index(1:Par.patnum),i); 11 | end 12 | -------------------------------------------------------------------------------- /compared_methods/TCTV/differential operators/porder_diff_T.m: -------------------------------------------------------------------------------- 1 | function DX_T = porder_diff_T(X, direction) 2 | %compute the difference tensor (gradient map) along cerrtain direction 3 | dim = size(X); 4 | index_first = repmat({':'},1,ndims(X)); 5 | index_first(direction) = {1}; 6 | index_end = repmat({':'},1,ndims(X)); 7 | index_end(direction) = {dim(direction)}; 8 | 9 | slice = X(index_first{:}) - X(index_end{:}); 10 | DX = diff(X,1,direction); 11 | DX_T = cat(direction,-slice,-DX); 12 | 13 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/Block_matching.m: -------------------------------------------------------------------------------- 1 | function [Init_Index] = Block_matching(X, Par,Neighbor_arr,Num_arr, SelfIndex_arr) 2 | L = length(Num_arr); 3 | Init_Index = zeros(Par.patnum,L); 4 | 5 | for i = 1 : L 6 | Patch = X(:,SelfIndex_arr(i)); 7 | Neighbors = X(:,Neighbor_arr(1:Num_arr(i),i)); 8 | Dist = sum((repmat(Patch,1,size(Neighbors,2))-Neighbors).^2); 9 | [val, index] = sort(Dist); 10 | Init_Index(:,i)=Neighbor_arr(index(1:Par.patnum),i); 11 | end 12 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/nc.m: -------------------------------------------------------------------------------- 1 | function N =nc(I,j,n) 2 | 3 | % Written by Wenjin Qin (qinwenjin2021@163.com) 4 | 5 | N = zeros(1,length(n)); 6 | if j >= 3 7 | for t = 3 : j-1 8 | if I(t) == 1 9 | N(t) = 1; 10 | else 11 | N(t) = n(t)-I(t)+2; 12 | end 13 | end 14 | end 15 | 16 | N(j) = n(j)-I(j)+2; 17 | 18 | if j3) tensor 4 | % Written by Wenjin Qin (qinwenjin2021@163.com) 5 | 6 | 7 | p = length(size(A)); 8 | n = size(A); m= size(A); 9 | m(2)=n(1); m(1)=n(2); 10 | L=1; 11 | for i = 3:p 12 | L = L * n(i); 13 | m(i)=n(i); 14 | end 15 | A_Linear = lineartransform(A,Mat_L); 16 | A_Tran=zeros(m); 17 | for j = 1 : L 18 | A_Tran(:,:,j) = (A_Linear(:,:,j))'; 19 | end 20 | 21 | B=inverselineartransform(A_Tran,Mat_L); 22 | 23 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/logsumexp.m: -------------------------------------------------------------------------------- 1 | function s = logsumexp(x, dim) 2 | % Compute log(sum(exp(x),dim)) while avoiding numerical underflow. 3 | % By default dim = 1 (columns). 4 | % Written by Michael Chen (sth4nth@gmail.com). 5 | if nargin == 1, 6 | % Determine which dimension sum will use 7 | dim = find(size(x)~=1,1); 8 | if isempty(dim), dim = 1; end 9 | end 10 | 11 | % subtract the largest in each column 12 | y = max(x,[],dim); 13 | x = bsxfun(@minus,x,y); 14 | s = y + log(sum(exp(x),dim)); 15 | i = find(~isfinite(y)); 16 | if ~isempty(i) 17 | s(i) = y(i); 18 | end 19 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/README.txt: -------------------------------------------------------------------------------- 1 | The high-order tensor SVD toolbox with any linear invertible transform L is attributed by: 2 | Wenjin, Qin and Hailin, Wang and Feng, Zhang and Jianjun, Wang et.al., 3 | If you use any of these tools, please cite: 4 | @ARTICLE{9730793, 5 | author={Qin, Wenjin and Wang, Hailin and Zhang, Feng and Wang, Jianjun and Luo, Xin and Huang, Tingwen}, 6 | journal={IEEE Transactions on Image Processing}, 7 | title={Low-Rank High-Order Tensor Completion With Applications in Visual Data}, 8 | year={2022}, 9 | volume={31}, 10 | number={}, 11 | pages={2433-2448}, 12 | doi={10.1109/TIP.2022.3155949}} -------------------------------------------------------------------------------- /utils/TV_operator/diff3.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff3(x, sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | dfz1 = diff(tenX, 1, 3); 8 | 9 | dfx = zeros(sizeD); 10 | dfy = zeros(sizeD); 11 | dfz = zeros(sizeD); 12 | dfx(1:end-1,:,:) = dfx1; 13 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 14 | dfy(:,1:end-1,:) = dfy1; 15 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 16 | dfz(:,:,1:end-1) = dfz1; 17 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 18 | 19 | diff_x = [dfx(:); dfy(:);dfz(:)]; 20 | % diff_x = [abs(dfx(:)); abs(dfy(:));abs(dfz(:))]; 21 | 22 | end 23 | 24 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/diff3.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff3(x, sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | dfz1 = diff(tenX, 1, 3); 8 | 9 | dfx = zeros(sizeD); 10 | dfy = zeros(sizeD); 11 | dfz = zeros(sizeD); 12 | dfx(1:end-1,:,:) = dfx1; 13 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 14 | dfy(:,1:end-1,:) = dfy1; 15 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 16 | dfz(:,:,1:end-1) = dfz1; 17 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 18 | 19 | diff_x = [dfx(:); dfy(:);dfz(:)]; 20 | % diff_x = [abs(dfx(:)); abs(dfy(:));abs(dfz(:))]; 21 | 22 | end 23 | 24 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff3.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff3(x, sizeD) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | dfz1 = diff(tenX, 1, 3); 8 | 9 | dfx = zeros(sizeD); 10 | dfy = zeros(sizeD); 11 | dfz = zeros(sizeD); 12 | dfx(1:end-1,:,:) = dfx1; 13 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 14 | dfy(:,1:end-1,:) = dfy1; 15 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 16 | dfz(:,:,1:end-1) = dfz1; 17 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 18 | 19 | diff_x = [dfx(:); dfy(:);dfz(:)]; 20 | % diff_x = [abs(dfx(:)); abs(dfy(:));abs(dfz(:))]; 21 | 22 | end 23 | 24 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/inverselineartransform.m: -------------------------------------------------------------------------------- 1 | function A = inverselineartransform(A,L) 2 | 3 | % Compute the result of inverse linear transform L on tensor A 4 | % Written by Wenjin Qin (qinwenjin2021@163.com) 5 | 6 | 7 | p = length(size(A)); 8 | n = zeros(1,p); 9 | for i = 1:p 10 | n(i) = size(A,i); 11 | end 12 | 13 | 14 | if strcmp(L,'fft') 15 | for i = p:-1:3 16 | A = ifft(A,[],i); 17 | end 18 | elseif strcmp(L,'dct') 19 | for i = p:-1:3 20 | A = idct(A,[],i); 21 | end 22 | elseif iscell(L) 23 | for i = p:-1:3 24 | Mat_L=L{i-2}; 25 | A = tmprod(A,inv(Mat_L),i); 26 | end 27 | end -------------------------------------------------------------------------------- /compared_methods/rctv-main/demo.m: -------------------------------------------------------------------------------- 1 | clear all;clc; 2 | load('Simu_indian.mat') 3 | %load('pure_DCmall.mat') 4 | Ohsi = Normalize(Ori_H); 5 | Nhsi = Ohsi; 6 | [M,N,p] = size(Ohsi); 7 | 8 | noiselevel = 0.075*rand(p,1); 9 | ratio = 0.15*rand(p,1); 10 | %% Gaussian noise 11 | for i = 1:p 12 | Nhsi(:,:,i)=Ohsi(:,:,i) + noiselevel(i)*randn(M,N); 13 | end 14 | for i = 1:p 15 | Nhsi(:,:,i)=imnoise(Nhsi(:,:,i),'salt & pepper',ratio(i)); 16 | end 17 | 18 | r=13; 19 | beta = 50; 20 | lambda = 1;% 5,0.5 21 | tau = [0.8,0.8];% need to fine tune 22 | k=4; 23 | tic; 24 | output_image = MoG_RBTV(Nhsi, beta,lambda, tau, r, k); 25 | time = toc; 26 | [mpsnr,mssim,ergas]=msqia(Ohsi, output_image) -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/diff3_weight.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff3_weight(x,sizeD,weight) 2 | 3 | tenX = reshape(x, sizeD); 4 | 5 | dfx1 = diff(tenX, 1, 1); 6 | dfy1 = diff(tenX, 1, 2); 7 | dfz1 = diff(tenX, 1, 3); 8 | 9 | dfx = zeros(sizeD); 10 | dfy = zeros(sizeD); 11 | dfz = zeros(sizeD); 12 | dfx(1:end-1,:,:) = dfx1; 13 | dfx(end,:,:) = tenX(1,:,:) - tenX(end,:,:); 14 | dfy(:,1:end-1,:) = dfy1; 15 | dfy(:,end,:) = tenX(:,1,:) - tenX(:,end,:); 16 | if weight(3)~=0 17 | dfz(:,:,1:end-1) = dfz1; 18 | dfz(:,:,end) = tenX(:,:,1) - tenX(:,:,end); 19 | end 20 | 21 | diff_x = [weight(1)*dfx(:);weight(2)*dfy(:);weight(3)*dfz(:)]; 22 | 23 | end 24 | 25 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/choosvd.m: -------------------------------------------------------------------------------- 1 | function y = choosvd( n, d) 2 | 3 | if n <= 100 4 | if d / n <= 0.02 5 | y = 1; 6 | else 7 | y = 0; 8 | end 9 | elseif n <= 200 10 | if d / n <= 0.06 11 | y = 1; 12 | else 13 | y = 0; 14 | end 15 | elseif n <= 300 16 | if d / n <= 0.26 17 | y = 1; 18 | else 19 | y = 0; 20 | end 21 | elseif n <= 400 22 | if d / n <= 0.28 23 | y = 1; 24 | else 25 | y = 0; 26 | end 27 | elseif n <= 500 28 | if d / n <= 0.34 29 | y = 1; 30 | else 31 | y = 0; 32 | end 33 | else 34 | if d / n <= 0.38 35 | y = 1; 36 | else 37 | y = 0; 38 | end 39 | end -------------------------------------------------------------------------------- /utils/TV_operator/diffT3.m: -------------------------------------------------------------------------------- 1 | function diffT_a = diffT3(a, sizeD) 2 | 3 | N = prod(sizeD); 4 | tenX = reshape(a(1: N), sizeD); 5 | tenY = reshape(a((N+1):2*N), sizeD); 6 | tenZ = reshape(a((2*N+1):3*N), sizeD); 7 | dfx = diff(tenX, 1, 1); 8 | dfy = diff(tenY, 1, 2); 9 | dfz = diff(tenZ, 1, 3); 10 | 11 | dfxT = zeros(sizeD); 12 | dfyT = zeros(sizeD); 13 | dfzT = zeros(sizeD); 14 | dfxT(1,:,:) = tenX(end, :, :) - tenX(1, :, :); % 15 | dfxT(2:end,:,:) = -dfx; 16 | dfyT(:,1,:) = tenY(:,end,:) - tenY(:,1,:); 17 | dfyT(:,2:end,:) = -dfy; 18 | dfzT(:,:,1) = tenZ(:,:,end) - tenZ(:,:,1); 19 | dfzT(:,:,2:end) = -dfz; 20 | 21 | diffT_a = dfxT + dfyT+dfzT ; 22 | % diffT_a = abs(dfxT) + abs(dfyT)+abs(dfzT) ; 23 | diffT_a = diffT_a(:); 24 | end -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/pythag.m: -------------------------------------------------------------------------------- 1 | function x = pythag(y,z) 2 | %PYTHAG Computes sqrt( y^2 + z^2 ). 3 | % 4 | % x = pythag(y,z) 5 | % 6 | % Returns sqrt(y^2 + z^2) but is careful to scale to avoid overflow. 7 | 8 | % Christian H. Bischof, Argonne National Laboratory, 03/31/89. 9 | 10 | [m n] = size(y); 11 | if m>1 | n>1 12 | y = y(:); z=z(:); 13 | rmax = max(abs([y';z']))'; 14 | id=find(rmax==0); 15 | if length(id)>0 16 | rmax(id) = 1; 17 | x = rmax.*sqrt((y./rmax).^2 + (z./rmax).^2); 18 | x(id)=0; 19 | else 20 | x = rmax.*sqrt((y./rmax).^2 + (z./rmax).^2); 21 | end 22 | x = reshape(x,m,n); 23 | else 24 | rmax = max(abs([y;z])); 25 | if (rmax==0) 26 | x = 0; 27 | else 28 | x = rmax*sqrt((y/rmax)^2 + (z/rmax)^2); 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /utils/TV_operator/diff_dT2.m: -------------------------------------------------------------------------------- 1 | function diffT_a = diff_dT2(a, sizeD) 2 | 3 | n = prod(sizeD); 4 | Dxx = reshape(a(1: n), sizeD); 5 | Dyx = reshape(a((n+1):2*n), sizeD); 6 | Dyy = reshape(a((2*n+1):3*n), sizeD); 7 | 8 | dfxx = diff(Dxx, 1, 1); 9 | dfyx = diff(Dyx, 1, 1); 10 | dfyy = diff(Dyy, 1, 2); 11 | 12 | dfxxT = zeros(sizeD); 13 | dfyxT = zeros(sizeD); 14 | dfyyT = zeros(sizeD); 15 | dfxxT(1,:,:) = Dxx(end, :, :) - Dxx(1, :, :); % 16 | dfxxT(2:end,:,:) = -dfxx; 17 | dfyxT(1,:,:) = Dyx(end, :, :) - Dyx(1, :, :); % 18 | dfyxT(2:end,:,:) = -dfyx; 19 | dfyyT(:,1,:) = Dyy(:,end,:) - Dyy(:,1,:); 20 | dfyyT(:,2:end,:) = -dfyy; 21 | 22 | 23 | diffT_a1 = dfxxT + dfyxT; 24 | diffT_a2 = dfyxT + dfyyT; 25 | diffT_a = [diffT_a1(:);diffT_a2(:)]; 26 | end -------------------------------------------------------------------------------- /compared_methods/LTHTV/prox_operators/diffT3.m: -------------------------------------------------------------------------------- 1 | function diffT_a = diffT3(a, sizeD) 2 | 3 | N = prod(sizeD); 4 | tenX = reshape(a(1: N), sizeD); 5 | tenY = reshape(a((N+1):2*N), sizeD); 6 | tenZ = reshape(a((2*N+1):3*N), sizeD); 7 | dfx = diff(tenX, 1, 1); 8 | dfy = diff(tenY, 1, 2); 9 | dfz = diff(tenZ, 1, 3); 10 | 11 | dfxT = zeros(sizeD); 12 | dfyT = zeros(sizeD); 13 | dfzT = zeros(sizeD); 14 | dfxT(1,:,:) = tenX(end, :, :) - tenX(1, :, :); % 15 | dfxT(2:end,:,:) = -dfx; 16 | dfyT(:,1,:) = tenY(:,end,:) - tenY(:,1,:); 17 | dfyT(:,2:end,:) = -dfy; 18 | dfzT(:,:,1) = tenZ(:,:,end) - tenZ(:,:,1); 19 | dfzT(:,:,2:end) = -dfz; 20 | 21 | diffT_a = dfxT + dfyT+dfzT ; 22 | % diffT_a = abs(dfxT) + abs(dfyT)+abs(dfzT) ; 23 | diffT_a = diffT_a(:); 24 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/WNNM_yang.m: -------------------------------------------------------------------------------- 1 | 2 | 3 | function [X] = WNNM_yang( Y, C, NSig) 4 | % solve min 1/Nsig|| y-x ||_F^2 + ||x||_w* 5 | % ||x||_w* = sum_i {w_i*sigma_i(x)} 6 | % sigma_i(x) is the singular value of x 7 | [U,SigmaY,V] = svd(full(Y),'econ'); 8 | PatNum = numel(Y)/size(U,2); 9 | TempC = C*sqrt(PatNum)*2*NSig^2; 10 | [SigmaX,svp] = ClosedWNNM(SigmaY,TempC,eps); 11 | X = U(:,1:svp)*diag(SigmaX)*V(:,1:svp)'; 12 | 13 | 14 | function [SigmaX,svp]=ClosedWNNM(SigmaY,C,oureps) 15 | temp=(SigmaY-oureps).^2 - 4*(C-oureps*SigmaY); 16 | ind=find (temp>0); 17 | svp=length(ind); 18 | SigmaX=max(SigmaY(ind)-oureps+sqrt(temp(ind)),0)/2; 19 | if svp ==0 20 | svp = 1; 21 | SigmaX(1) = SigmaY(1)-oureps; 22 | end 23 | 24 | 25 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diffT3.m: -------------------------------------------------------------------------------- 1 | function diffT_a = diffT3(a, sizeD) 2 | 3 | N = prod(sizeD); 4 | tenX = reshape(a(1: N), sizeD); 5 | tenY = reshape(a((N+1):2*N), sizeD); 6 | tenZ = reshape(a((2*N+1):3*N), sizeD); 7 | dfx = diff(tenX, 1, 1); 8 | dfy = diff(tenY, 1, 2); 9 | dfz = diff(tenZ, 1, 3); 10 | 11 | dfxT = zeros(sizeD); 12 | dfyT = zeros(sizeD); 13 | dfzT = zeros(sizeD); 14 | dfxT(1,:,:) = tenX(end, :, :) - tenX(1, :, :); % 15 | dfxT(2:end,:,:) = -dfx; 16 | dfyT(:,1,:) = tenY(:,end,:) - tenY(:,1,:); 17 | dfyT(:,2:end,:) = -dfy; 18 | dfzT(:,:,1) = tenZ(:,:,end) - tenZ(:,:,1); 19 | dfzT(:,:,2:end) = -dfz; 20 | 21 | diffT_a = dfxT + dfyT+dfzT ; 22 | % diffT_a = abs(dfxT) + abs(dfyT)+abs(dfzT) ; 23 | diffT_a = diffT_a(:); 24 | end -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_dT2.m: -------------------------------------------------------------------------------- 1 | function diffT_a = diff_dT2(a, sizeD) 2 | 3 | n = prod(sizeD); 4 | Dxx = reshape(a(1: n), sizeD); 5 | Dyx = reshape(a((n+1):2*n), sizeD); 6 | Dyy = reshape(a((2*n+1):3*n), sizeD); 7 | 8 | dfxx = diff(Dxx, 1, 1); 9 | dfyx = diff(Dyx, 1, 1); 10 | dfyy = diff(Dyy, 1, 2); 11 | 12 | dfxxT = zeros(sizeD); 13 | dfyxT = zeros(sizeD); 14 | dfyyT = zeros(sizeD); 15 | dfxxT(1,:,:) = Dxx(end, :, :) - Dxx(1, :, :); % 16 | dfxxT(2:end,:,:) = -dfxx; 17 | dfyxT(1,:,:) = Dyx(end, :, :) - Dyx(1, :, :); % 18 | dfyxT(2:end,:,:) = -dfyx; 19 | dfyyT(:,1,:) = Dyy(:,end,:) - Dyy(:,1,:); 20 | dfyyT(:,2:end,:) = -dfyy; 21 | 22 | 23 | diffT_a1 = dfxxT + dfyxT; 24 | diffT_a2 = dfyxT + dfyyT; 25 | diffT_a = [diffT_a1(:);diffT_a2(:)]; 26 | end -------------------------------------------------------------------------------- /compared_methods/NGmeet/Patch2Cub.m: -------------------------------------------------------------------------------- 1 | function [E_Img,W_Img] = Patch2Cub( ImPat, WPat, PatSize, ImageH, ImageW,ImageB ) 2 | TempR = ImageH-PatSize+1; 3 | TempC = ImageW-PatSize+1; 4 | TempOffsetR = 1:TempR; 5 | TempOffsetC = 1:TempC; 6 | 7 | E_Img = zeros(ImageH,ImageW,ImageB); 8 | W_Img = zeros(ImageH,ImageW,ImageB); 9 | k = 0; 10 | for o = 1:ImageB 11 | for i = 1:PatSize 12 | for j = 1:PatSize 13 | k = k+1; 14 | E_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) = E_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) + reshape( ImPat(k,:)', [TempR TempC]); 15 | W_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) = W_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) + reshape( WPat(k,:)', [TempR TempC]); 16 | end 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/Patch2Cub.m: -------------------------------------------------------------------------------- 1 | function [E_Img,W_Img] = Patch2Cub( ImPat, WPat, PatSize, ImageH, ImageW,ImageB ) 2 | TempR = ImageH-PatSize+1; 3 | TempC = ImageW-PatSize+1; 4 | TempOffsetR = 1:TempR; 5 | TempOffsetC = 1:TempC; 6 | 7 | E_Img = zeros(ImageH,ImageW,ImageB); 8 | W_Img = zeros(ImageH,ImageW,ImageB); 9 | k = 0; 10 | for o = 1:ImageB 11 | for i = 1:PatSize 12 | for j = 1:PatSize 13 | k = k+1; 14 | E_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) = E_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) + reshape( ImPat(k,:)', [TempR TempC]); 15 | W_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) = W_Img(TempOffsetR-1+i,TempOffsetC-1+j,o) + reshape( WPat(k,:)', [TempR TempC]); 16 | end 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/lineartransform.m: -------------------------------------------------------------------------------- 1 | function A = lineartransform(A,L) 2 | 3 | % Compute the result of invertible linear transform L on tensor A 4 | 5 | % Written by Wenjin Qin (qinwenjin2021@163.com) 6 | 7 | 8 | 9 | 10 | p = length(size(A)); 11 | n = zeros(1,p); 12 | for i = 1:p 13 | n(i) = size(A,i); 14 | end 15 | 16 | 17 | 18 | if strcmp(L,'fft') 19 | for i = 3:p 20 | A = fft(A,[],i); 21 | end 22 | elseif strcmp(L,'dct') 23 | for i = 3:p 24 | A = dct(A,[],i); 25 | end 26 | elseif iscell(L) 27 | for i=1:(p-2) 28 | [l1,l2] = size(L{i}); 29 | if l1 ~= l2 || l1 ~= n(i+2) 30 | error('Inner tensor dimensions must agree.'); 31 | else 32 | A = tmprod(A,L{i},i+2); 33 | end 34 | end 35 | 36 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/htsvd.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = htsvd(X,UU) 2 | 3 | 4 | % Order-d tensors Singular Value Decomposition under generalized invertible linear transform 5 | % Written by Wenjin Qin (qinwenjin2021@163.com) 6 | 7 | Ndim = length(size(X)); 8 | Nway= size(X); 9 | 10 | s1 = Nway; s1(2) = Nway(1); 11 | s2 = Nway; s2(1) = Nway(2); 12 | 13 | U=zeros(s1);S=zeros(Nway);V=zeros(s2); 14 | L = ones(1,Ndim); 15 | for i = 3:Ndim 16 | % X = nmodeproduct(X,UU{i-2},i); 17 | X = tmprod(X,UU{i-2},i); 18 | L(i) = L(i-1) * Nway(i); 19 | end 20 | 21 | for i = 1 : L(Ndim) 22 | [U(:,:,i),S(:,:,i),V(:,:,i)] = svd(X(:,:,i)); 23 | end 24 | 25 | 26 | for j = Ndim:-1:3 27 | U = tmprod(U,inv(UU{j-2}),j); 28 | S = tmprod(S,inv(UU{j-2}),j); 29 | V = tmprod(V,inv(UU{j-2}),j); 30 | end 31 | 32 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/InitialPara.m: -------------------------------------------------------------------------------- 1 | function [prior, model] = InitialPara(param,muOn,B) 2 | % Description: initialize prior parameters and model parameters of NMoG_RPCA model. 3 | % Input: 4 | % muOn ---- The switch for updating mu. 5 | % muOn = 1 : update mu; 6 | % muOn = 0 : set mu as 0 without updating. 7 | 8 | prior.alpha0 = 1e-3; prior.beta0 = 1e3; prior.mu0 = 1e-4; 9 | prior.c0 = 1e-3; prior.eta0 = 1e-3; 10 | prior.lambda0 = 1e-3; prior.a0 = 1e-6; prior.b0 = 1e-6; 11 | 12 | model.alpha = 1e-3*ones(param.mog_k,B); model.beta = 1e-3*ones(param.mog_k,B); 13 | model.mu = 0*ones(param.mog_k,B); model.c = 1e-3*ones(param.mog_k,B); 14 | model.d = 1e-3*ones(param.mog_k,B); model.eta = 1e-3; 15 | model.lambda = 1e-3; model.R = []; 16 | model.muOn = muOn; -------------------------------------------------------------------------------- /utils/TV_operator/diff_d2.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff_d2(tv,sizeD) 2 | %% tv's size is 2*prod(sizeD),containing [dx(:);dy(:)]; 3 | % dx(:)is total variation along the x_axis; 4 | % dy(:)is total variation along the y_axis; 5 | % sizeD is size of original image; 6 | n = prod(sizeD); 7 | DX = reshape(tv(1:n), sizeD); 8 | DY = reshape(tv(n+1:2*n),sizeD); 9 | dfxx1 = diff(DX, 1, 1);% construct the dfxx; 10 | dfxx = zeros(sizeD); 11 | dfxx(1:end-1,:,:) = dfxx1; 12 | dfxx(end,:,:) = DX(1,:,:) - DX(end,:,:); 13 | 14 | dfyx1 = diff(DY, 1, 1);% construct the dfyx 15 | dfyx = zeros(sizeD); 16 | dfyx(1:end-1,:,:) = dfyx1; 17 | dfyx(end,:,:) = DY(1,:,:) - DY(end,:,:); 18 | 19 | dfyy1 = diff(DY, 1, 2);% construct the dfyy 20 | dfyy = zeros(sizeD); 21 | dfyy(:,1:end-1,:) = dfyy1; 22 | dfyy(end,:,:) = DY(:,1,:) - DY(:,end,:); 23 | 24 | diff_x = [dfxx(:); dfyx(:);dfyy(:)]; 25 | 26 | end 27 | 28 | -------------------------------------------------------------------------------- /compared_methods/rctv-main/TV_operator/diff_d2.m: -------------------------------------------------------------------------------- 1 | function diff_x = diff_d2(tv,sizeD) 2 | %% tv's size is 2*prod(sizeD),containing [dx(:);dy(:)]; 3 | % dx(:)is total variation along the x_axis; 4 | % dy(:)is total variation along the y_axis; 5 | % sizeD is size of original image; 6 | n = prod(sizeD); 7 | DX = reshape(tv(1:n), sizeD); 8 | DY = reshape(tv(n+1:2*n),sizeD); 9 | dfxx1 = diff(DX, 1, 1);% construct the dfxx; 10 | dfxx = zeros(sizeD); 11 | dfxx(1:end-1,:,:) = dfxx1; 12 | dfxx(end,:,:) = DX(1,:,:) - DX(end,:,:); 13 | 14 | dfyx1 = diff(DY, 1, 1);% construct the dfyx 15 | dfyx = zeros(sizeD); 16 | dfyx(1:end-1,:,:) = dfyx1; 17 | dfyx(end,:,:) = DY(1,:,:) - DY(end,:,:); 18 | 19 | dfyy1 = diff(DY, 1, 2);% construct the dfyy 20 | dfyy = zeros(sizeD); 21 | dfyy(:,1:end-1,:) = dfyy1; 22 | dfyy(end,:,:) = DY(:,1,:) - DY(:,end,:); 23 | 24 | diff_x = [dfxx(:); dfyx(:);dfyy(:)]; 25 | 26 | end 27 | 28 | -------------------------------------------------------------------------------- /utils/svdecon.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = svdecon(X) 2 | % Input: 3 | % X : m x n matrix 4 | % 5 | % Output: 6 | % X = U*S*V' 7 | % 8 | % Description: 9 | % Does equivalent to svd(X,'econ') but faster 10 | % 11 | % Vipin Vijayan (2014) 12 | 13 | %X = bsxfun(@minus,X,mean(X,2)); 14 | [m,n] = size(X); 15 | 16 | if m <= n 17 | C = X*X'; 18 | [U,D] = eig(C); 19 | clear C; 20 | 21 | [d,ix] = sort(abs(diag(D)),'descend'); 22 | U = U(:,ix); 23 | 24 | if nargout > 2 25 | V = X'*U; 26 | s = sqrt(d); 27 | V = bsxfun(@(x,c)x./c, V, s'); 28 | S = diag(s); 29 | end 30 | else 31 | C = X'*X; 32 | [V,D] = eig(C); 33 | clear C; 34 | 35 | [d,ix] = sort(abs(diag(D)),'descend'); 36 | V = V(:,ix); 37 | 38 | U = X*V; % convert evecs from X'*X to X*X'. the evals are the same. 39 | %s = sqrt(sum(U.^2,1))'; 40 | s = sqrt(d); 41 | U = bsxfun(@(x,c)x./c, U, s'); 42 | S = diag(s); 43 | end 44 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/tqlb.m: -------------------------------------------------------------------------------- 1 | function [lambda,top,bot,err] = tqlb(alpha,beta) 2 | 3 | % TQLB: Compute eigenvalues and top and bottom elements of 4 | % eigenvectors of a symmetric tridiagonal matrix T. 5 | % 6 | % [lambda,top,bot,err] = tqlb(alpha,beta) 7 | % 8 | % Input parameters: 9 | % alpha(1:n) : Diagonal elements. 10 | % beta(2:n) : Off-diagonal elements. 11 | % Output parameters: 12 | % lambda(1:n) : Computed eigenvalues. 13 | % top(1:n) : Top elements in eigenvectors. 14 | % bot(1:n) : Bottom elements in eigenvectors. 15 | % err : dummy argument. 16 | 17 | 18 | % Rasmus Munk Larsen, DAIMI, 1998 19 | 20 | 21 | % 22 | % This is a slow Matlab substitute for the 23 | % TQLB MEX-file. 24 | % 25 | 26 | warning('PROPACK:NotUsingMex','Using slow matlab code for tqlb.') 27 | n = length(alpha); 28 | T = spdiags([[beta(2:n);0] alpha(1:n) beta(1:n)],-1:1,n,n); 29 | 30 | [V,lambda] = eig(full(T)); lambda = diag(lambda); 31 | bot = V(end,:)'; 32 | top = V(1,:)'; 33 | err=0; 34 | 35 | 36 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/tucker_hooi.m: -------------------------------------------------------------------------------- 1 | function X =tucker_hooi(X_all,rank) 2 | [Core, U] = tensorSVD(X_all,rank);% initial with HOSVD 3 | ilastX = X_all; 4 | ndim = length(size(Core)); 5 | sizeD = size(X_all); 6 | normD = norm(X_all(:)); 7 | for j=1:ndim 8 | Ut{j} = U{j}'; 9 | end 10 | for Initer =1:6 11 | for j=1:ndim 12 | RRank =rank; 13 | tempC = my_ttm(X_all,Ut,[1:j-1,j+1:ndim],sizeD,rank,ndim); 14 | RRank(j) =sizeD(j); 15 | UnfoldC = Unfold(tempC,RRank,j); 16 | [V1,~,~]= svd(UnfoldC,'econ'); 17 | U{j} = V1(:,1:rank(j)); 18 | Ut{j} = U{j}'; 19 | end 20 | Core = my_ttm(X_all,Ut,1:ndim,sizeD,rank,ndim); 21 | X = my_ttm(Core,U,1:ndim,rank,sizeD,ndim); 22 | % stop criterion 23 | errT = ilastX-X; 24 | err = norm(errT(:))/normD; 25 | if err<=1e-3 26 | % fprintf(' ++++ Innerloop iterations = %d+++ \n',Initer); 27 | break; 28 | else 29 | ilastX=X; 30 | end 31 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/NLPatEstimation_yang.m: -------------------------------------------------------------------------------- 1 | function [ EPat, W ] = NLPatEstimation_yang( NL_mat, Self_arr, CurPat, Par) 2 | if nargin<5 3 | lam=1; 4 | end 5 | EPat = zeros(size(CurPat)); 6 | W = zeros(size(CurPat)); 7 | for i = 1 : length(Self_arr) % For each keypatch group 8 | Temp = CurPat(:, NL_mat(1:Par.patnum,i)); % Non-local similar patches to the keypatch 9 | M_Temp = repmat(mean( Temp, 2 ),1,Par.patnum); 10 | Temp = Temp - M_Temp; 11 | Sigma = std(Temp(:)); 12 | xx = WNNM_yang(Temp, Par.lambda*Par.c1, Sigma); 13 | % xx = WNNM_yang(Temp, lam*Par.c1, Sigma_arr(Self_arr(i))); 14 | % xxrank(i) = rank(xx); 15 | E_Temp = xx + M_Temp; % WNNM Estimation 16 | EPat(:,NL_mat(1:Par.patnum,i)) = EPat(:,NL_mat(1:Par.patnum,i)) + E_Temp; 17 | W(:,NL_mat(1:Par.patnum,i)) = W(:,NL_mat(1:Par.patnum,i)) + ones(size(CurPat,1),size(NL_mat(1:Par.patnum,i),1)); 18 | end 19 | % Rank = [mean(xxrank),min(xxrank),max(xxrank)] 20 | end 21 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/mylib/tucker_hooi_s.m: -------------------------------------------------------------------------------- 1 | function X =tucker_hooi_s(X_all,rank,delta) 2 | [Core, U] = tensorSVD(X_all,rank);% initial with HOSVD 3 | ilastX = X_all; 4 | ndim = length(size(Core)); 5 | sizeD = size(X_all); 6 | normD = norm(X_all(:)); 7 | for j=1:ndim 8 | Ut{j} = U{j}'; 9 | end 10 | for Initer =1:6 11 | for j=1:ndim 12 | RRank =rank; 13 | tempC = my_ttm(X_all,Ut,[1:j-1,j+1:ndim],sizeD,rank,ndim); 14 | RRank(j) =sizeD(j); 15 | UnfoldC = Unfold(tempC,RRank,j); 16 | [V1,~,~]= svd(UnfoldC,'econ'); 17 | U{j} = V1(:,1:rank(j)); 18 | Ut{j} = U{j}'; 19 | end 20 | Core = softthre(my_ttm(X_all,Ut,1:ndim,sizeD,rank,ndim),delta); 21 | X = my_ttm(Core,U,1:ndim,rank,sizeD,ndim); 22 | % stop criterion 23 | errT = ilastX-X; 24 | err = norm(errT(:))/normD; 25 | if err<=1e-3 26 | % fprintf(' ++++ Innerloop iterations = %d+++ \n',Initer); 27 | break; 28 | else 29 | ilastX=X; 30 | end 31 | end -------------------------------------------------------------------------------- /utils/svdsecon.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = svdsecon(X,k) 2 | % Input: 3 | % X : m x n matrix 4 | % k : extracts the first k singular values 5 | % 6 | % Output: 7 | % X = U*S*V' approximately (up to k) 8 | % 9 | % Description: 10 | % Does equivalent to svds(X,k) but faster 11 | % Requires that k < min(m,n) where [m,n] = size(X) 12 | % This function is useful if k is much smaller than m and n 13 | % or if X is sparse (see doc eigs) 14 | % 15 | % Vipin Vijayan (2014) 16 | 17 | %X = bsxfun(@minus,X,mean(X,2)); 18 | [m,n] = size(X); 19 | assert(k <= m && k <= n, 'k needs to be smaller than size(X,1) and size(X,2)'); 20 | 21 | if m <= n 22 | C = X*X'; 23 | [U,D] = eigs(C,k); 24 | clear C; 25 | if nargout > 2 26 | V = X'*U; 27 | s = sqrt(abs(diag(D))); 28 | V = bsxfun(@(x,c)x./c, V, s'); 29 | S = diag(s); 30 | end 31 | else 32 | C = X'*X; 33 | [V,D] = eigs(C,k); 34 | clear C; 35 | U = X*V; % convert evecs from X'*X to X*X'. the evals are the same. 36 | %s = sqrt(sum(U.^2,1))'; 37 | s = sqrt(abs(diag(D))); 38 | U = bsxfun(@(x,c)x./c, U, s'); 39 | S = diag(s); 40 | end 41 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr.m: -------------------------------------------------------------------------------- 1 | function [sigma,bnd] = bdsqr(alpha,beta) 2 | 3 | % BDSQR: Compute the singular values and bottom element of 4 | % the left singular vectors of a (k+1) x k lower bidiagonal 5 | % matrix with diagonal alpha(1:k) and lower bidiagonal beta(1:k), 6 | % where length(alpha) = length(beta) = k. 7 | % 8 | % [sigma,bnd] = bdsqr(alpha,beta) 9 | % 10 | % Input parameters: 11 | % alpha(1:k) : Diagonal elements. 12 | % beta(1:k) : Sub-diagonal elements. 13 | % Output parameters: 14 | % sigma(1:k) : Computed eigenvalues. 15 | % bnd(1:k) : Bottom elements in left singular vectors. 16 | 17 | % Below is a very slow replacement for the BDSQR MEX-file. 18 | 19 | % warning('PROPACK:NotUsingMex','Using slow matlab code for bdsqr.') 20 | k = length(alpha); 21 | if min(size(alpha)') ~= 1 | min(size(beta)') ~= 1 22 | error('alpha and beta must be vectors') 23 | elseif length(beta) ~= k 24 | error('alpha and beta must have the same lenght') 25 | end 26 | B = spdiags([alpha(:),beta(:)],[0,-1],k+1,k); 27 | [U,S,V] = svd(full(B),0); 28 | sigma = diag(S); 29 | bnd = U(end,1:k)'; 30 | 31 | 32 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/refinebounds.m: -------------------------------------------------------------------------------- 1 | function [bnd,gap] = refinebounds(D,bnd,tol1) 2 | %REFINEBONDS Refines error bounds for Ritz values based on gap-structure 3 | % 4 | % bnd = refinebounds(lambda,bnd,tol1) 5 | % 6 | % Treat eigenvalues closer than tol1 as a cluster. 7 | 8 | % Rasmus Munk Larsen, DAIMI, 1998 9 | 10 | j = length(D); 11 | 12 | if j<=1 13 | return 14 | end 15 | % Sort eigenvalues to use interlacing theorem correctly 16 | [D,PERM] = sort(D); 17 | bnd = bnd(PERM); 18 | 19 | 20 | % Massage error bounds for very close Ritz values 21 | eps34 = sqrt(eps*sqrt(eps)); 22 | [y,mid] = max(bnd); 23 | for l=[-1,1] 24 | for i=((j+1)-l*(j-1))/2:l:mid-l 25 | if abs(D(i+l)-D(i)) < eps34*abs(D(i)) 26 | if bnd(i)>tol1 & bnd(i+l)>tol1 27 | bnd(i+l) = pythag(bnd(i),bnd(i+l)); 28 | bnd(i) = 0; 29 | end 30 | end 31 | end 32 | end 33 | % Refine error bounds 34 | gap = inf*ones(1,j); 35 | gap(1:j-1) = min([gap(1:j-1);[D(2:j)-bnd(2:j)-D(1:j-1)]']); 36 | gap(2:j) = min([gap(2:j);[D(2:j)-D(1:j-1)-bnd(1:j-1)]']); 37 | gap = gap(:); 38 | I = find(gap>bnd); 39 | bnd(I) = bnd(I).*(bnd(I)./gap(I)); 40 | 41 | bnd(PERM) = bnd; -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/prox_htnn_U.m: -------------------------------------------------------------------------------- 1 | function [X,htnn,tsvd_rank] = prox_htnn_U(M,Y,rho) 2 | 3 | %The proximal operator for the order-D tensor nuclear norm under generalized invertible linear transform 4 | % 5 | % Written by Wenjin Qin (qinwenjin2021@163.com) 6 | % 7 | 8 | p = length(size(Y)); 9 | n = zeros(1,p); 10 | for i = 1:p 11 | n(i) = size(Y,i); 12 | end 13 | X = zeros(n); 14 | 15 | L = ones(1,p); 16 | for i = 3:p 17 | Y = tmprod(Y,M{i-2},i); 18 | L(i) = L(i-1) * n(i); 19 | end 20 | 21 | htnn = 0; 22 | tsvd_rank = 0; 23 | 24 | for i=1:L(p) 25 | [U,S,V] = svd(Y(:,:,i),'econ'); 26 | S = diag(S); 27 | r = length(find(S>rho)); 28 | if r>=1 29 | S =max( S(1:r)-rho,0); 30 | X(:,:,i) = U(:,1:r)*diag(S)*V(:,1:r)'; 31 | htnn = htnn+sum(S); 32 | tsvd_rank = max(tsvd_rank,r); 33 | end 34 | end 35 | 36 | rho=1; 37 | for j=3:p 38 | Tran_M=M{j-2}; 39 | a=sum(diag(Tran_M*(Tran_M)'))/n(j); 40 | rho=rho*a; 41 | end 42 | 43 | htnn = htnn/rho; 44 | 45 | for i = p:-1:3 46 | X = tmprod(X,inv(M{i-2}),i); 47 | end 48 | 49 | X = real(X); 50 | 51 | 52 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/ht_svd_fft.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = ht_svd_fft(X) 2 | 3 | Ndim = length(size(X)); 4 | Nway= zeros(1,Ndim); 5 | for i = 1:Ndim 6 | Nway(i) = size(X,i); 7 | end 8 | 9 | s1 = Nway; s1(2) = Nway(1); 10 | s2 = Nway; s2(1) = Nway(2); 11 | U=zeros(s1);S=zeros(Nway);V=zeros(s2); 12 | L = ones(1,Ndim); 13 | for ii = 3:Ndim 14 | X = fft(X,[],ii); 15 | L(ii) = L(ii-1) * Nway(ii); 16 | end 17 | 18 | 19 | [U(:,:,1),S(:,:,1),V(:,:,1)] = svd(X(:,:,1)); 20 | for j = 3 : Ndim 21 | for i = L(j-1)+1 : L(j) 22 | I = unfoldi(i,j,L); 23 | halfnj = floor(Nway(j)/2)+1; 24 | if I(j) <= halfnj && I(j) >= 2 25 | [U(:,:,i),S(:,:,i),V(:,:,i)] = svd(X(:,:,i)); 26 | elseif I(j) > halfnj 27 | n_ = nc(I,j,Nway); 28 | i_ = foldi(n_,j,L); 29 | U(:,:,i) = conj(U(:,:,i_)); 30 | V(:,:,i) = conj(V(:,:,i_)); 31 | S(:,:,i) = conj(S(:,:,i_)); 32 | end 33 | end 34 | end 35 | 36 | 37 | for jj = Ndim:-1:3 38 | U = ifft(U,[],jj); 39 | S = ifft(S,[],jj); 40 | V = ifft(V,[],jj); 41 | end 42 | U =real( U ); 43 | S =real( S ); 44 | V =real( V ); 45 | 46 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/Wn_est_initial.m: -------------------------------------------------------------------------------- 1 | function Wn = Wn_est_initial(Noi_H,sng,param,model,prior) 2 | if (~isfield(param,'initial_rank')) 3 | initial_rank = 10; 4 | else 5 | initial_rank = param.initial_rank; 6 | end 7 | [m,n,b] = size(Noi_H); 8 | sizeD = size(Noi_H); 9 | Y = reshape(Noi_H,m*n,b); 10 | [N,B] = size(Y); 11 | k = param.mog_k; 12 | 13 | % Initial low-rank component 14 | [u, s, v] = svd(Y, 'econ'); 15 | r = initial_rank; 16 | U = u(:,1:r)*(s(1:r,1:r)).^(0.6); 17 | V = (s(1:r,1:r)).^(0.4)*v(:,1:r)'; 18 | V = V'; 19 | 20 | %%%%%%%%% Initial model parameters %%%%%%%%%%%%%%% 21 | E = Y-U*V'; 22 | for i=1:B 23 | model.R(:,:,i) = R_initialization(E(:,i)', k); 24 | alpha0 = prior.alpha0; 25 | c0 = prior.c0; 26 | nxbar = reshape(E(:,i), 1,N)*model.R(:,:,i); 27 | nxbar = nxbar'; 28 | nk = sum(model.R(:,:,i),1)'; 29 | model.alpha(:,i) = alpha0+nk; 30 | model.c(:,i) = c0 + nk/2; 31 | temp = reshape(E(:,i).^2, 1, N)*model.R(:,:,i); 32 | model.d = model.eta/model.lambda + 0.5*temp ; 33 | end 34 | 35 | for j = 1:10 36 | model = VariatInf_NoiseDist(model,prior,E); 37 | end 38 | 39 | % W_n 40 | Wn = Weight_NMoG(model,sizeD); 41 | 42 | Var = var(E(:)); 43 | sng - Var 44 | 45 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/Cub2Patch_yang.m: -------------------------------------------------------------------------------- 1 | function [Y, Mat] = Cub2Patch_yang( E_Img,N_Img, Average, par ) 2 | TotalPatNum = (size(E_Img,1)-par.patsize+1)*(size(E_Img,2)-par.patsize+1); %Total Patch Number in the image 3 | Y = zeros(par.patsize*par.patsize*size(E_Img,3), TotalPatNum, 'single'); %Current Patches 4 | N_Y = zeros(par.patsize*par.patsize*size(E_Img,3), TotalPatNum, 'single'); %Patches in the original noisy image 5 | Mat = zeros(par.patsize*par.patsize, TotalPatNum, 'single'); %Patches in the original noisy image 6 | k = 0; 7 | for o = 1:size(E_Img,3) 8 | for i = 1:par.patsize 9 | for j = 1:par.patsize 10 | k = k+1; 11 | E_patch = E_Img(i:end-par.patsize+i,j:end-par.patsize+j,o); 12 | N_patch = N_Img(i:end-par.patsize+i,j:end-par.patsize+j,o); 13 | Y(k,:) = E_patch(:)'; 14 | N_Y(k,:) = N_patch(:)'; 15 | end 16 | end 17 | end 18 | 19 | k = 0; 20 | for i = 1:par.patsize 21 | for j = 1:par.patsize 22 | k = k+1; 23 | Mat_patch = Average(i:end-par.patsize+i,j:end-par.patsize+j); 24 | Mat(k,:) = Mat_patch(:)'; 25 | end 26 | end 27 | 28 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/VariatInf_NoiseDist.m: -------------------------------------------------------------------------------- 1 | function model = VariatInf_NoiseDist(model,prior,E) 2 | % Variational inference of NMoG_RPCA. 3 | E2 = E.^2; 4 | model = nmog_vmax(model, prior, E2); 5 | model = nmog_vexp(model, E2); 6 | 7 | 8 | function model = nmog_vmax(model,prior,E2) 9 | [m,n] = size(E2); 10 | alpha0 = prior.alpha0; 11 | c0 = prior.c0; 12 | R = permute(model.R,[1,3,2]); 13 | k = size(R,3); 14 | for i=1:k 15 | temp(i,:) = diag(E2'*R(:,:,i)); 16 | end 17 | nk = reshape(sum(R,1),n,k); 18 | model.alpha = alpha0 + nk'; 19 | model.c = c0 + nk'/2; 20 | model.d = model.eta/model.lambda + 0.5*temp ; 21 | model.eta = prior.eta0 + k*n*prior.c0; 22 | model.lambda = prior.lambda0 + sum(sum(model.c ./ model.d)); 23 | 24 | function model = nmog_vexp(model, E2) 25 | alpha = model.alpha; 26 | c = model.c; 27 | d = model.d; 28 | k = size(c,1); 29 | tau = c./d; 30 | Elogtau = psi(0, c) - log(d); 31 | Elogpi = psi(0, alpha) - psi(0, sum(alpha(:))); 32 | for i=1:k 33 | temp = bsxfun(@times,tau(i,:),E2) ; 34 | logRho(:,:,i) = (bsxfun(@minus,temp,2*Elogpi(i,:) + Elogtau(i,:) - log(2*pi)))/(-2); 35 | end 36 | logR = bsxfun(@minus,logRho,logsumexp(logRho,3)); 37 | R = exp(logR); 38 | model.logR = logR; 39 | model.R = permute(R,[1,3,2]); 40 | 41 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/htsvd_fft.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = htsvd_fft(X) 2 | 3 | 4 | % Order-d tensors Singular Value Decomposition under FFT 5 | % Written by Wenjin Qin (qinwenjin2021@163.com) 6 | 7 | 8 | Ndim = length(size(X)); 9 | Nway= zeros(1,Ndim); 10 | for ii = 1:Ndim 11 | Nway(ii) = size(X,ii); 12 | end 13 | 14 | s1 = Nway; s1(2) = Nway(1); 15 | s2 = Nway; s2(1) = Nway(2); 16 | 17 | U=zeros(s1);S=zeros(Nway);V=zeros(s2); 18 | L = ones(1,Ndim); 19 | for ii_ = 3:Ndim 20 | X = fft(X,[],ii_); 21 | L(ii_) = L(ii_-1) * Nway(ii_); 22 | end 23 | 24 | [U(:,:,1),S(:,:,1),V(:,:,1)] = svd(X(:,:,1)); 25 | 26 | for j = 3 : Ndim 27 | for i = L(j-1)+1 : L(j) 28 | 29 | I = unfoldi(i,j,L); 30 | halfnj = floor(Nway(j)/2)+1; 31 | if I(j) <= halfnj && I(j) >= 2 32 | [U(:,:,i),S(:,:,i),V(:,:,i)] = svd(X(:,:,i)); 33 | elseif I(j) > halfnj 34 | n_ = nc(I,j,Nway); 35 | i_ = foldi(n_,j,L); 36 | U(:,:,i) = conj(U(:,:,i_)); 37 | V(:,:,i) = conj(V(:,:,i_)); 38 | S(:,:,i) = conj(S(:,:,i_)); 39 | end 40 | end 41 | end 42 | 43 | 44 | for jj = Ndim:-1:3 45 | U = ifft(U,[],jj); 46 | S = ifft(S,[],jj); 47 | V = ifft(V,[],jj); 48 | end 49 | 50 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/EfficientMCL2.m: -------------------------------------------------------------------------------- 1 | %%%%% Weighted L2 norm factorization %%%%% 2 | function [OutU,OutV,t] = EfficientMCL2(Matrix, W, InU,InV, MaxIt, tol) 3 | %%%%% Matrix - data matrix for factorization (d*n) %%%%% 4 | %%%%% W - weight matrix (d*n) %%%%% 5 | %%%%% InU,InV - initialization of U (d*k dimensional) and V (n*k dimensional) %%%%% 6 | %%%%% MaxIt - Maximal iteration number %%%%% 7 | %%%%% tol - Tolerance for RobustL1 algorithm %%%%% 8 | 9 | [d n] = size(Matrix); 10 | [k] = size(InU,2); 11 | OutU = InU; 12 | OutV = InV; 13 | t = sum(sum((W.*Matrix).^2)); 14 | 15 | for i = 1:MaxIt 16 | ind = randperm(k); 17 | % ind = 1:k; 18 | for j = ind 19 | TX = Matrix - OutU*OutV' + OutU(:,j)*OutV(:,j)'; 20 | u = InU(:,j); 21 | OutV(:,j) = optimMCL2(TX,W,u); 22 | OutU(:,j) = optimMCL2(TX',W',OutV(:,j)); 23 | end 24 | t = [t sum(sum(((W.*(Matrix-OutU*OutV')).^2)))]; 25 | sum(sum(((W.*(Matrix-OutU*OutV')).^2)))/sum(sum(((W.*(Matrix)).^2))); 26 | 27 | if norm(InU - OutU) < tol 28 | break; 29 | else 30 | InU = OutU; 31 | end 32 | end 33 | 34 | Nu = sqrt(sum(OutU.^2))'; 35 | Nv = sqrt(sum(OutV.^2))'; 36 | No = diag(Nu.*Nv); 37 | OutU = OutU*diag(1./Nu)*sqrt(No); 38 | OutV = OutV*diag(1./Nv)*sqrt(No); 39 | 40 | -------------------------------------------------------------------------------- /compared_methods/NGmeet/NLPatEstimation.m: -------------------------------------------------------------------------------- 1 | function [ EPat, W ] = NLPatEstimation( NL_mat, Self_arr, Sigma_arr, CurPat, Par) 2 | EPat = zeros(size(CurPat)); 3 | W = zeros(size(CurPat)); 4 | for i = 1 : length(Self_arr) % For each keypatch group 5 | Temp = CurPat(:, NL_mat(1:Par.patnum,i)); % Non-local similar patches to the keypatch 6 | M_Temp = repmat(mean( Temp, 2 ),1,Par.patnum); 7 | Temp = Temp - M_Temp; 8 | E_Temp = WNNM(Temp, Par.c1, Sigma_arr(Self_arr(i))) + M_Temp; % WNNM Estimation 9 | EPat(:,NL_mat(1:Par.patnum,i)) = EPat(:,NL_mat(1:Par.patnum,i)) + E_Temp; 10 | W(:,NL_mat(1:Par.patnum,i)) = W(:,NL_mat(1:Par.patnum,i)) + ones(size(CurPat,1),size(NL_mat(1:Par.patnum,i),1)); 11 | end 12 | end 13 | 14 | function [X] = WNNM( Y, C, NSig) 15 | [U,SigmaY,V] = svd(full(Y),'econ'); 16 | PatNum = size(Y,2); 17 | TempC = C*sqrt(PatNum)*2*NSig^2; 18 | [SigmaX,svp] = ClosedWNNM(SigmaY,TempC,eps); 19 | X = U(:,1:svp)*diag(SigmaX)*V(:,1:svp)'; 20 | end 21 | 22 | function [SigmaX,svp]=ClosedWNNM(SigmaY,C,oureps) 23 | temp=(SigmaY-oureps).^2-4*(C-oureps*SigmaY); 24 | ind=find (temp>0); 25 | svp=length(ind); 26 | SigmaX=max(SigmaY(ind)-oureps+sqrt(temp(ind)),0)/2; 27 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/RandOrthMat.m: -------------------------------------------------------------------------------- 1 | function M=RandOrthMat(n, tol) 2 | % M = RANDORTHMAT(n) 3 | % generates a random n x n orthogonal real matrix. 4 | % 5 | % M = RANDORTHMAT(n,tol) 6 | % explicitly specifies a thresh value that measures linear dependence 7 | % of a newly formed column with the existing columns. Defaults to 1e-6. 8 | % 9 | % In this version the generated matrix distribution *is* uniform over the manifold 10 | % O(n) w.r.t. the induced R^(n^2) Lebesgue measure, at a slight computational 11 | % overhead (randn + normalization, as opposed to rand ). 12 | % 13 | % (c) Ofek Shilon , 2006. 14 | 15 | 16 | if nargin==1 17 | tol=1e-6; 18 | end 19 | 20 | M = zeros(n); % prealloc 21 | 22 | % gram-schmidt on random column vectors 23 | 24 | vi = randn(n,1); 25 | % the n-dimensional normal distribution has spherical symmetry, which implies 26 | % that after normalization the drawn vectors would be uniformly distributed on the 27 | % n-dimensional unit sphere. 28 | 29 | M(:,1) = vi ./ norm(vi); 30 | 31 | for i=2:n 32 | nrm = 0; 33 | while nrm 8 | #include "mex.h" 9 | 10 | /* Template for tqlb: */ 11 | void tqlb_(int *n, double *d__, double *e, double *bnd, 12 | double *bnd2, int *ierr); 13 | 14 | /* Here comes the gateway function to be called by Matlab: */ 15 | void mexFunction(int nlhs, mxArray *plhs[], 16 | int nrhs, const mxArray *prhs[]) 17 | { 18 | int m, n,i, ierr; 19 | double x, *tmp; 20 | 21 | if (nrhs != 2) 22 | mexErrMsgTxt("tqlb requires two input arguments"); 23 | else if (nlhs != 4) 24 | mexErrMsgTxt("tqlb requires four output arguments"); 25 | 26 | for (i=0; i<2; i++) { 27 | m = mxGetM(prhs[i]); /* get the dimensions of the input */ 28 | n = mxGetN(prhs[i]); 29 | 30 | /* make sure input is m x 1 */ 31 | if (n != 1) 32 | mexErrMsgTxt("Input must be a m x 1 vectors"); 33 | } 34 | 35 | /* Create/allocate return argument, a 1x1 real-valued Matrix */ 36 | for (i=0; i<3; i++) { 37 | plhs[i]=mxCreateDoubleMatrix(m,1,mxREAL); 38 | } 39 | plhs[3] = mxCreateDoubleMatrix(1,1,mxREAL); 40 | tmp = mxCalloc(m,sizeof(double)); 41 | 42 | memcpy(mxGetPr(plhs[0]), mxGetPr(prhs[0]),m*sizeof(double)); 43 | memcpy(tmp,mxGetPr(prhs[1]), m*sizeof(double)); 44 | tqlb_(&m,mxGetPr(plhs[0]),tmp,mxGetPr(plhs[1]), 45 | mxGetPr(plhs[2]),&ierr); 46 | 47 | *(mxGetPr(plhs[3])) = (double) ierr; 48 | mxFree(tmp); 49 | } 50 | -------------------------------------------------------------------------------- /utils/visualization/rsshow.m: -------------------------------------------------------------------------------- 1 | function A = rsshow(I, scale, ignore_value) 2 | % Remote sensing image enhancement for visualization. 3 | % 4 | % Usage: 5 | % display an image: rsshow(I, 0.05) 6 | % write an image: A = rsshow(I, 0.05); imwrite(A, 'output.jpg') 7 | % 8 | % If the code is used in your scientific research, please cite the paper. 9 | % [1] Shuang Xu, Xiangyong Cao, Jiangjun Peng, Qiao Ke, Cong Ma and Deyu 10 | % Meng. Hyperspectral Image Denoising by Asymmetric Noise Modeling. IEEE 11 | % TGRS, 2023. 12 | 13 | if nargin==1 14 | scale = 0.005; 15 | ignore_value = NaN; 16 | elseif nargin==2 17 | ignore_value = NaN; 18 | end 19 | I = double(I); 20 | 21 | if size(I,3)>=3 22 | C = size(I,3); 23 | band = [C, uint8(C*0.5), uint8(C*0.1)]; 24 | band(band<1) = 1; 25 | I = I(:,:,band); 26 | end 27 | 28 | Iq = I; 29 | if ~isnan(ignore_value) 30 | Iq(Iq==ignore_value) = nan; 31 | end 32 | 33 | if ismatrix(I) 34 | q = quantile(Iq(:),[scale, 1-scale]); 35 | [low, high] = deal(q(1),q(2)); 36 | I(I>high) = high; 37 | I(Ihigh) = high; 46 | temp(tempeta, where v_{i} are the vectors with mu>delta. 8 | % Strategy 1: Orthogonalize all vectors v_{r-extra},...,v_{s+extra} where 9 | % v_{r} is the first and v_{s} the last Lanczos vector with 10 | % mu > eta. 11 | % Strategy 2: Orthogonalize all vectors with mu > eta. 12 | % 13 | % Notice: The first LL vectors are excluded since the new Lanczos 14 | % vector is already orthogonalized against them in the main iteration. 15 | 16 | % Rasmus Munk Larsen, DAIMI, 1998. 17 | 18 | if (delta= ETA.') 20 | end 21 | switch strategy 22 | case 0 23 | I0 = find(abs(mu(1:j))>=delta); 24 | if length(I0)==0 25 | [mm,I0] = max(abs(mu(1:j))); 26 | end 27 | int = zeros(j,1); 28 | for i = 1:length(I0) 29 | for r=I0(i):-1:1 30 | if abs(mu(r))0 47 | int(1:LL) = 0; 48 | end 49 | int = find(int); 50 | case 1 51 | int=find(abs(mu(1:j))>eta); 52 | int = max(LL+1,min(int)-extra):min(max(int)+extra,j); 53 | case 2 54 | int=find(abs(mu(1:j))>=eta); 55 | end 56 | int = int(:); -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/prox_htnn_F.m: -------------------------------------------------------------------------------- 1 | function [X,htnn,tsvd_rank] = prox_htnn_F(Y,rho) 2 | 3 | % The proximal operator for the order-D tensor nuclear norm under FFT 4 | % 5 | % Written by Wenjin Qin (qinwenjin2021@163.com) 6 | % 7 | 8 | p = length(size(Y)); 9 | n = zeros(1,p); 10 | for i = 1:p 11 | n(i) = size(Y,i); 12 | end 13 | 14 | X = zeros(n); 15 | L = ones(1,p); 16 | for i = 3:p 17 | Y = fft(Y,[],i); 18 | L(i) = L(i-1) * n(i); 19 | end 20 | 21 | htnn = 0; 22 | tsvd_rank = 0; 23 | 24 | [U,S,V] = svd(Y(:,:,1),'econ'); 25 | S = diag(S); 26 | r = length(find(S>rho)); 27 | if r>=1 28 | S = max(S(1:r)-rho,0); 29 | X(:,:,1) = U(:,1:r)*diag(S)*V(:,1:r)'; 30 | htnn = htnn+sum(S); 31 | tsvd_rank = max(tsvd_rank,r); 32 | end 33 | 34 | for j = 3 : p 35 | for i = L(j-1)+1 : L(j) 36 | % 37 | I = unfoldi(i,j,L); 38 | halfnj = floor(n(j)/2)+1; 39 | % 40 | if I(j) <= halfnj && I(j) >= 2 41 | [U,S,V] = svd(Y(:,:,i),'econ'); 42 | S = diag(S); 43 | r = length(find(S>rho)); 44 | if r>=1 45 | S = max(S(1:r)-rho,0); 46 | X(:,:,i) = U(:,1:r)*diag(S)*V(:,1:r)'; 47 | htnn = htnn+sum(S)*2; 48 | tsvd_rank = max(tsvd_rank,r); 49 | end 50 | 51 | %Conjugation property 52 | elseif I(j) > halfnj 53 | % 54 | n_ = nc(I,j,n); 55 | % 56 | i_ = foldi(n_,j,L); 57 | X(:,:,i) = conj( X(:,:,i_)); 58 | 59 | end 60 | end 61 | end 62 | 63 | htnn = htnn/prod(n(3:end)); 64 | 65 | for i = p:-1:3 66 | X = (ifft(X,[],i)); 67 | end 68 | X = real(X); 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/testtqlb.m: -------------------------------------------------------------------------------- 1 | % Script for comparing speed an accuracy of original TQLB, optimized TQLB 2 | % and builtin EIG command. 3 | 4 | % Rasmus Munk Larsen, DAIMI, 1998. 5 | 6 | n=1000; 7 | 8 | % Use 2. order difference matrix as testproblem. 9 | e = ones(n,1); 10 | T = spdiags([-e 2*e -e], -1:1, n, n); 11 | true = 4*cos(pi/2*[n:-1:1]'./(n+1)).^2; 12 | alpha = 2*ones(n,1); 13 | beta = -ones(n,1); 14 | 15 | fprintf('-----------------------------------------------------------------\n') 16 | disp('Modified tqlb:') 17 | fprintf('\n') 18 | tic, flops(0) 19 | [lambda,top,bot,err] = tqlb(alpha,beta); 20 | fprintf('Elapsed time = %f\n',toc); 21 | fprintf('Number of flops = %f\n',flops); 22 | fprintf('Max rel. error = %e\n',max(abs((lambda-true)./true))) 23 | 24 | 25 | fprintf('-----------------------------------------------------------------\n') 26 | disp('Original tqlb:') 27 | fprintf('\n') 28 | tic, flops(0); 29 | [lambda2,top,bot,err2] = tqlb_orig(alpha,beta); 30 | fprintf('Elapsed time = %f\n',toc); 31 | fprintf('Number of flops = %f\n',flops); 32 | fprintf('Max rel. error = %e\n',max(abs((lambda2-true)./true))) 33 | 34 | 35 | fprintf('-----------------------------------------------------------------\n') 36 | disp('eig:') 37 | fprintf('\n') 38 | tic, flops(0); 39 | lambda1 = eig(T); 40 | lambda1 =sort(lambda1); 41 | fprintf('Elapsed time = %f\n',toc); 42 | fprintf('Number of flops = %f\n',flops); 43 | fprintf('Max rel. error = %e\n',max(abs((lambda1-true)./true))) 44 | 45 | fprintf('-----------------------------------------------------------------\n') 46 | disp('eig:') 47 | fprintf('\n') 48 | tic, flops(0); 49 | lambda1 = eig(full(T)); 50 | lambda1 =sort(lambda1); 51 | fprintf('Elapsed time = %f\n',toc); 52 | fprintf('Number of flops = %f\n',flops); 53 | fprintf('Max rel. error = %e\n',max(abs((lambda1-true)./true))) 54 | 55 | 56 | -------------------------------------------------------------------------------- /compared_methods/NGmeet/NeighborIndex.m: -------------------------------------------------------------------------------- 1 | function [Neighbor_arr, Num_arr, SelfIndex_arr] = NeighborIndex(im, par) 2 | % This Function Precompute the all the patch indexes in the Searching window 3 | % -Neighbor_arr is the array of neighbor patch indexes for each keypatch 4 | % -Num_arr is array of the effective neighbor patch numbers for each keypatch 5 | % -SelfIndex_arr is the index of keypatches in the total patch index array 6 | SW = par.SearchWin; 7 | s = par.step; 8 | TempR = size(im,1)-par.patsize+1; 9 | TempC = size(im,2)-par.patsize+1; 10 | R_GridIdx = [1:s:TempR]; 11 | R_GridIdx = [R_GridIdx R_GridIdx(end)+1:TempR]; 12 | C_GridIdx = [1:s:TempC]; 13 | C_GridIdx = [C_GridIdx C_GridIdx(end)+1:TempC]; 14 | 15 | Idx = (1:TempR*TempC); 16 | Idx = reshape(Idx, TempR, TempC); 17 | R_GridH = length(R_GridIdx); 18 | C_GridW = length(C_GridIdx); 19 | 20 | Neighbor_arr = int32(zeros(4*SW*SW,R_GridH*C_GridW)); 21 | Num_arr = int32(zeros(1,R_GridH*C_GridW)); 22 | SelfIndex_arr = int32(zeros(1,R_GridH*C_GridW)); 23 | 24 | for i = 1 : R_GridH 25 | for j = 1 : C_GridW 26 | OffsetR = R_GridIdx(i); 27 | OffsetC = C_GridIdx(j); 28 | Offset1 = (OffsetC-1)*TempR + OffsetR; 29 | Offset2 = (j-1)*R_GridH + i; 30 | 31 | top = max( OffsetR-SW, 1 ); 32 | button = min( OffsetR+SW, TempR ); 33 | left = max( OffsetC-SW, 1 ); 34 | right = min( OffsetC+SW, TempC ); 35 | 36 | NL_Idx = Idx(top:button, left:right); 37 | NL_Idx = NL_Idx(:); 38 | 39 | Num_arr(Offset2) = length(NL_Idx); 40 | Neighbor_arr(1:Num_arr(Offset2),Offset2) = NL_Idx; 41 | SelfIndex_arr(Offset2) = Offset1; 42 | end 43 | end -------------------------------------------------------------------------------- /compared_methods/WNLRATV/NeighborIndex.m: -------------------------------------------------------------------------------- 1 | function [Neighbor_arr, Num_arr, SelfIndex_arr] = NeighborIndex(im, par) 2 | % This Function Precompute the all the patch indexes in the Searching window 3 | % -Neighbor_arr is the array of neighbor patch indexes for each keypatch 4 | % -Num_arr is array of the effective neighbor patch numbers for each keypatch 5 | % -SelfIndex_arr is the index of keypatches in the total patch index array 6 | SW = par.SearchWin; 7 | s = par.step; 8 | TempR = size(im,1)-par.patsize+1; 9 | TempC = size(im,2)-par.patsize+1; 10 | R_GridIdx = [1:s:TempR]; 11 | R_GridIdx = [R_GridIdx R_GridIdx(end)+1:TempR]; 12 | C_GridIdx = [1:s:TempC]; 13 | C_GridIdx = [C_GridIdx C_GridIdx(end)+1:TempC]; 14 | 15 | Idx = (1:TempR*TempC); 16 | Idx = reshape(Idx, TempR, TempC); 17 | R_GridH = length(R_GridIdx); 18 | C_GridW = length(C_GridIdx); 19 | 20 | Neighbor_arr = int32(zeros(4*SW*SW,R_GridH*C_GridW)); 21 | Num_arr = int32(zeros(1,R_GridH*C_GridW)); 22 | SelfIndex_arr = int32(zeros(1,R_GridH*C_GridW)); 23 | 24 | for i = 1 : R_GridH 25 | for j = 1 : C_GridW 26 | OffsetR = R_GridIdx(i); 27 | OffsetC = C_GridIdx(j); 28 | Offset1 = (OffsetC-1)*TempR + OffsetR; 29 | Offset2 = (j-1)*R_GridH + i; 30 | 31 | top = max( OffsetR-SW, 1 ); 32 | button = min( OffsetR+SW, TempR ); 33 | left = max( OffsetC-SW, 1 ); 34 | right = min( OffsetC+SW, TempC ); 35 | 36 | NL_Idx = Idx(top:button, left:right); 37 | NL_Idx = NL_Idx(:); 38 | 39 | Num_arr(Offset2) = length(NL_Idx); 40 | Neighbor_arr(1:Num_arr(Offset2),Offset2) = NL_Idx; 41 | SelfIndex_arr(Offset2) = Offset1; 42 | end 43 | end -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/nmodeproduct.m: -------------------------------------------------------------------------------- 1 | function B = nmodeproduct(A,M,n) 2 | % Calculates the n-Mode Product of a Tensor A and a Matrix M 3 | % 4 | % B = nmodeproduct(A,M,n) 5 | % 6 | % B = A (x)_n M .. According to the Definition in De Lathauwer (2000) 7 | % 8 | % with: 9 | % A: (I_1 x I_2 x .. I_n x .. I_N) .. -> n is in [1..N] 10 | % M: (J x I_n) 11 | % B: (I_1 x I_2 x .. J x .. I_N) 12 | % 13 | % note: "(x)_n" is the operator between the tensor and the matrix 14 | % 15 | % v0.001 2009 by Fabian Schneiter 16 | % 17 | 18 | % check inputs: 19 | dimvec = size(A); 20 | n = fix(n); 21 | if (length(dimvec)1) = 1; 13 | I_NHSI_LR(I_NHSI_LR<0) = 0; 14 | I_PAN = im2double(pan); 15 | 16 | ratio = 6; 17 | metrics = zeros(3,4); 18 | 19 | % pan-sharpening with noisy LR-HSI (Fig. 10c) 20 | I_Fus_wN = AWLP(imresize(I_NHSI_LR,ratio), I_PAN,ratio); 21 | [mpsnr,mssim,ergas,sam] = pwrctv_msqia(I_GT, I_Fus_wN); 22 | metrics(1,1:4) = [mpsnr,mssim,ergas,sam]; 23 | 24 | % denoised LR-HSI + pan-sharpening (Fig. 10d) 25 | beta = 100; 26 | lambda = 1; 27 | tau = 0.4*[1,1]; 28 | q = 10; 29 | r = 4; 30 | I_DNHSI_LR = PWRCTV(I_NHSI_LR, imresize(I_PAN,1/ratio), beta, lambda, tau, r, q); 31 | I_Fus_DP = AWLP(imresize(I_DNHSI_LR,ratio), I_PAN,ratio); 32 | [mpsnr,mssim,ergas,sam] = pwrctv_msqia(I_GT, I_Fus_DP); 33 | metrics(2,1:4) = [mpsnr,mssim,ergas,sam]; 34 | 35 | % pan-sharpening + denoised LR-HSI (Fig. 10e) 36 | beta = 100; 37 | lambda = 1; 38 | tau = 0.4*[1,1]; 39 | q = 10; 40 | r = 4; 41 | I_Fus_PD = PWRCTV(I_Fus_wN, I_PAN, beta, lambda, tau, r, q); 42 | [mpsnr,mssim,ergas,sam] = pwrctv_msqia(I_GT, I_Fus_PD); 43 | metrics(3,1:4) = [mpsnr,mssim,ergas,sam]; 44 | 45 | % save result 46 | savepath = 'result\pansharpening'; 47 | mkdir(savepath) 48 | save(fullfile(savepath,'Milan_iidGauss.mat'), 'I_NHSI_LR', 'I_Fus_DP', 'I_Fus_PD','I_Fus_wN') 49 | 50 | rgb_index = [58,47,36]; 51 | imwrite(rsshow(I_GT(:,:,rgb_index)), fullfile(savepath,'Milan_Pansharpening_GT.jpg')) 52 | imwrite(rsshow(I_NHSI_LR(:,:,rgb_index)), fullfile(savepath,'Milan_Pansharpening_LR_NHSI.jpg')) 53 | imwrite(rsshow(I_Fus_wN(:,:,rgb_index)), fullfile(savepath,'Milan_Pansharpening_Fus_noisy.jpg')) 54 | imwrite(rsshow(I_Fus_DP(:,:,rgb_index)), fullfile(savepath,'Milan_Pansharpening_Fus_D+P.jpg')) 55 | imwrite(rsshow(I_Fus_PD(:,:,rgb_index)), fullfile(savepath,'Milan_Pansharpening_Fus_P+D.jpg')) 56 | imwrite(rsshow(I_PAN), fullfile(savepath,'Milan_Pansharpening_PAN.jpg')) -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/bdsqr_mex.c: -------------------------------------------------------------------------------- 1 | /* 2 | MEX interface for LAPACK routine bdsqr. 3 | Matlab calling sequence: 4 | [sigma,bnd] = bdsqr(alpha,beta) 5 | */ 6 | 7 | #include 8 | #include 9 | #include "mex.h" 10 | 11 | /* Templates for FORTRAN routines: */ 12 | void dbdqr_(int *n, double *d, double *e, double *c1, double *c2); 13 | void dbdsqr_(char *uplo, int *n, int *ncvt, int *nru, int *ncc, 14 | double *d, double *e, double *vt, int *ldt, double *u, 15 | int *ldu, double *c, int *ldc, double *work, int *info); 16 | 17 | /* Here comes the gateway function to be called by Matlab: */ 18 | void mexFunction(int nlhs, mxArray *plhs[], 19 | int nrhs, const mxArray *prhs[]) 20 | { 21 | int m, n, i, info, zero=0, one=1; 22 | double *d,*e,dummy, *wrk, *bnd; 23 | 24 | if (nrhs != 2) 25 | mexErrMsgTxt("bdsqr requires two input arguments"); 26 | else if (nlhs != 2) 27 | mexErrMsgTxt("bdsqr requires two output arguments"); 28 | 29 | m = mxGetM(prhs[0]); /* get the dimensions of the input */ 30 | n = mxGetN(prhs[0]); 31 | /* make sure input input vectors are same length */ 32 | if (m != mxGetM(prhs[1]) ) 33 | mexErrMsgTxt("alpha and beta must have the same size"); 34 | /* make sure input is m x 1 */ 35 | if ( n != 1 || mxGetN(prhs[1]) != 1 || n != mxGetN(prhs[1])) 36 | mexErrMsgTxt("alpha and beta must be a m x 1 vectors"); 37 | 38 | /* Create/allocate return arguments */ 39 | for (i=0; i<2; i++) { 40 | plhs[i]=mxCreateDoubleMatrix(m,1,mxREAL); 41 | } 42 | 43 | e = mxCalloc(m,sizeof(double)); 44 | wrk = mxCalloc(4*m-4,sizeof(double)); 45 | d = mxGetPr(plhs[0]); 46 | memcpy(d,mxGetPr(prhs[0]), m*sizeof(double)); 47 | memcpy(e,mxGetPr(prhs[1]), m*sizeof(double)); 48 | bnd = mxGetPr(plhs[1]); 49 | for (i=0; i 0) 63 | mexWarnMsgTxt("DBDSQR: singular values did not converge"); 64 | 65 | /* Free work arrays */ 66 | mxFree(e); 67 | mxFree(wrk); 68 | } 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lansvd.doc: -------------------------------------------------------------------------------- 1 | LANSVD Compute a few singular values and singular vectors. 2 | LANSVD computes singular triplets (u,v,sigma) such that 3 | A*u = sigma*v and A'*v = sigma*u. Only a few singular values 4 | and singular vectors are computed using the Lanczos 5 | bidiagonalization algorithm with partial reorthogonalization (BPRO). 6 | 7 | S = LANSVD(A) 8 | S = LANSVD('Afun','Atransfun',M,N) 9 | 10 | The first input argument is either a matrix or a 11 | string containing the name of an M-file which applies a linear 12 | operator to the columns of a given matrix. In the latter case, 13 | the second input must be the name of an M-file which applies the 14 | transpose of the same operator to the columns of a given matrix, 15 | and the third and fourth arguments must be M and N, the dimensions 16 | of the problem. 17 | 18 | [U,S,V] = LANSVD(A,K,'L',...) computes the K largest singular values. 19 | 20 | [U,S,V] = LANSVD(A,K,'S',...) computes the K smallest singular values. 21 | 22 | The full calling sequence is 23 | 24 | [U,S,V] = LANSVD(A,K,SIGMA,OPTIONS) 25 | [U,S,V] = LANSVD('Afun','Atransfun',M,N,K,SIGMA,OPTIONS) 26 | 27 | where K is the number of singular values desired and 28 | SIGMA is 'L' or 'S'. 29 | 30 | The OPTIONS structure specifies certain parameters in the algorithm. 31 | Field name Parameter Default 32 | 33 | OPTIONS.tol Convergence tolerance 16*eps 34 | OPTIONS.lanmax Dimension of the Lanczos basis. 35 | OPTIONS.p0 Starting vector for the Lanczos rand(n,1)-0.5 36 | iteration. 37 | OPTIONS.delta Level of orthogonality among the sqrt(eps/K) 38 | Lanczos vectors. 39 | OPTIONS.eta Level of orthogonality after 10*eps^(3/4) 40 | reorthogonalization. 41 | OPTIONS.cgs reorthogonalization method used 0 42 | '0' : iterated modified Gram-Schmidt 43 | '1' : iterated classical Gram-Schmidt 44 | OPTIONS.elr If equal to 1 then extended local 1 45 | reorthogonalization is enforced. 46 | 47 | See also LANBPRO, SVDS, SVD 48 | 49 | References: 50 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 51 | 52 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 53 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 54 | 55 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 56 | Math. Comp. 42 (1984), no. 165, 115--142. 57 | 58 | Rasmus Munk Larsen, DAIMI, 1998 59 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lansvd.txt: -------------------------------------------------------------------------------- 1 | LANSVD Compute a few singular values and singular vectors. 2 | LANSVD computes singular triplets (u,v,sigma) such that 3 | A*u = sigma*v and A'*v = sigma*u. Only a few singular values 4 | and singular vectors are computed using the Lanczos 5 | bidiagonalization algorithm with partial reorthogonalization (BPRO). 6 | 7 | S = LANSVD(A) 8 | S = LANSVD('Afun','Atransfun',M,N) 9 | 10 | The first input argument is either a matrix or a 11 | string containing the name of an M-file which applies a linear 12 | operator to the columns of a given matrix. In the latter case, 13 | the second input must be the name of an M-file which applies the 14 | transpose of the same operator to the columns of a given matrix, 15 | and the third and fourth arguments must be M and N, the dimensions 16 | of the problem. 17 | 18 | [U,S,V] = LANSVD(A,K,'L',...) computes the K largest singular values. 19 | 20 | [U,S,V] = LANSVD(A,K,'S',...) computes the K smallest singular values. 21 | 22 | The full calling sequence is 23 | 24 | [U,S,V] = LANSVD(A,K,SIGMA,OPTIONS) 25 | [U,S,V] = LANSVD('Afun','Atransfun',M,N,K,SIGMA,OPTIONS) 26 | 27 | where K is the number of singular values desired and 28 | SIGMA is 'L' or 'S'. 29 | 30 | The OPTIONS structure specifies certain parameters in the algorithm. 31 | Field name Parameter Default 32 | 33 | OPTIONS.tol Convergence tolerance 16*eps 34 | OPTIONS.lanmax Dimension of the Lanczos basis. 35 | OPTIONS.p0 Starting vector for the Lanczos rand(n,1)-0.5 36 | iteration. 37 | OPTIONS.delta Level of orthogonality among the sqrt(eps/K) 38 | Lanczos vectors. 39 | OPTIONS.eta Level of orthogonality after 10*eps^(3/4) 40 | reorthogonalization. 41 | OPTIONS.cgs reorthogonalization method used 0 42 | '0' : iterated modified Gram-Schmidt 43 | '1' : iterated classical Gram-Schmidt 44 | OPTIONS.elr If equal to 1 then extended local 1 45 | reorthogonalization is enforced. 46 | 47 | See also LANBPRO, SVDS, SVD 48 | 49 | References: 50 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 51 | 52 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 53 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 54 | 55 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 56 | Math. Comp. 42 (1984), no. 165, 115--142. 57 | 58 | Rasmus Munk Larsen, DAIMI, 1998 59 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/laneig.doc: -------------------------------------------------------------------------------- 1 | LANEIG Compute a few eigenvalues and eigenvectors. 2 | LANEIG solves the eigenvalue problem A*v=lambda*v, when A is 3 | real and symmetric using the Lanczos algorithm with partial 4 | reorthogonalization (PRO). 5 | 6 | [V,D] = LANEIG(A) 7 | [V,D] = LANEIG('Afun',N) 8 | 9 | The first input argument is either a real symmetric matrix, or a 10 | string containing the name of an M-file which applies a linear 11 | operator to the columns of a given matrix. In the latter case, 12 | the second input argument must be N, the order of the problem. 13 | 14 | The full calling sequence is 15 | 16 | [V,D,ERR] = LANEIG(A,K,SIGMA,OPTIONS) 17 | [V,D,ERR] = LANEIG('Afun',N,K,SIGMA,OPTIONS) 18 | 19 | On exit ERR contains the computed error bounds. K is the number of 20 | eigenvalues desired and SIGMA is numerical shift or a two letter string 21 | which specifies which part of the spectrum should be computed: 22 | 23 | SIGMA Specified eigenvalues 24 | 25 | 'AL' Algebraically Largest 26 | 'AS' Algebraically Smallest 27 | 'LM' Largest Magnitude (default) 28 | 'SM' Smallest Magnitude (does not work when A is an m-file) 29 | 'BE' Both Ends. Computes k/2 eigenvalues 30 | from each end of the spectrum (one more 31 | from the high end if k is odd.) 32 | 33 | The OPTIONS structure specifies certain parameters in the algorithm. 34 | 35 | Field name Parameter Default 36 | 37 | OPTIONS.tol Convergence tolerance 16*eps 38 | OPTIONS.lanmax Dimension of the Lanczos basis. 39 | OPTIONS.v0 Starting vector for the Lanczos rand(n,1)-0.5 40 | iteration. 41 | OPTIONS.delta Level of orthogonality among the sqrt(eps/K) 42 | Lanczos vectors. 43 | OPTIONS.eta Level of orthogonality after 10*eps^(3/4) 44 | reorthogonalization. 45 | OPTIONS.cgs reorthogonalization method used 0 46 | '0' : iterated modified Gram-Schmidt 47 | '1' : iterated classical Gram-Schmidt 48 | OPTIONS.elr If equal to 1 then extended local 1 49 | reorthogonalization is enforced. 50 | 51 | See also LANPRO, EIGS, EIG. 52 | 53 | References: 54 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 55 | 56 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 57 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 58 | 59 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 60 | Math. Comp. 42 (1984), no. 165, 115--142. 61 | 62 | Rasmus Munk Larsen, DAIMI, 1998 63 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/laneig.txt: -------------------------------------------------------------------------------- 1 | LANEIG Compute a few eigenvalues and eigenvectors. 2 | LANEIG solves the eigenvalue problem A*v=lambda*v, when A is 3 | real and symmetric using the Lanczos algorithm with partial 4 | reorthogonalization (PRO). 5 | 6 | [V,D] = LANEIG(A) 7 | [V,D] = LANEIG('Afun',N) 8 | 9 | The first input argument is either a real symmetric matrix, or a 10 | string containing the name of an M-file which applies a linear 11 | operator to the columns of a given matrix. In the latter case, 12 | the second input argument must be N, the order of the problem. 13 | 14 | The full calling sequence is 15 | 16 | [V,D,ERR] = LANEIG(A,K,SIGMA,OPTIONS) 17 | [V,D,ERR] = LANEIG('Afun',N,K,SIGMA,OPTIONS) 18 | 19 | On exit ERR contains the computed error bounds. K is the number of 20 | eigenvalues desired and SIGMA is numerical shift or a two letter string 21 | which specifies which part of the spectrum should be computed: 22 | 23 | SIGMA Specified eigenvalues 24 | 25 | 'AL' Algebraically Largest 26 | 'AS' Algebraically Smallest 27 | 'LM' Largest Magnitude (default) 28 | 'SM' Smallest Magnitude (does not work when A is an m-file) 29 | 'BE' Both Ends. Computes k/2 eigenvalues 30 | from each end of the spectrum (one more 31 | from the high end if k is odd.) 32 | 33 | The OPTIONS structure specifies certain parameters in the algorithm. 34 | 35 | Field name Parameter Default 36 | 37 | OPTIONS.tol Convergence tolerance 16*eps 38 | OPTIONS.lanmax Dimension of the Lanczos basis. 39 | OPTIONS.v0 Starting vector for the Lanczos rand(n,1)-0.5 40 | iteration. 41 | OPTIONS.delta Level of orthogonality among the sqrt(eps/K) 42 | Lanczos vectors. 43 | OPTIONS.eta Level of orthogonality after 10*eps^(3/4) 44 | reorthogonalization. 45 | OPTIONS.cgs reorthogonalization method used 0 46 | '0' : iterated modified Gram-Schmidt 47 | '1' : iterated classical Gram-Schmidt 48 | OPTIONS.elr If equal to 1 then extended local 1 49 | reorthogonalization is enforced. 50 | 51 | See also LANPRO, EIGS, EIG. 52 | 53 | References: 54 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 55 | 56 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 57 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 58 | 59 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 60 | Math. Comp. 42 (1984), no. 165, 115--142. 61 | 62 | Rasmus Munk Larsen, DAIMI, 1998 63 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/prox_operators/prox_TV.m: -------------------------------------------------------------------------------- 1 | function sol = prox_TV(b, lambda, param) 2 | % PROX_TV - Total variation proximal operator 3 | % 4 | % sol = prox_TV(y, lambda, param) solves: 5 | % 6 | % min_{x} ||y - x||_2^2 + lambda * ||x||_{TV} 7 | % 8 | % The input argument param contains the following fields: 9 | % 10 | % - max_iter: max. nb. of iterations (default: 200) 11 | % 12 | % - rel_obj: minimum relative change of the objective value (default: 13 | % 1e-4) 14 | % The algorithm stops if 15 | % | f(x(t))-f(x(t-1)) | / f(x(t)) < rel_obj, 16 | % where x(t) is the estimate of the solution at iteration t. 17 | % 18 | % - verbose: 0 no log, 1 a summary log at convergence, 2 print main 19 | % steps (default: 1) 20 | % 21 | % 22 | % 23 | % Reference: 24 | % [1] A. Beck and M. Teboulle, "Fast gradient-based algorithms for 25 | % constrained Total Variation Image Denoising and Deblurring Problems", 26 | % IEEE Transactions on Image Processing, VOL. 18, NO. 11, 2419-2434, 27 | % November 2009. 28 | % 29 | % 30 | % 31 | % Author: Gilles Puy 32 | % E-mail: gilles.puy@epfl.ch 33 | % Date: October 15, 2010 34 | % 35 | 36 | % Optional input arguments 37 | 38 | if nargin<3, param=struct; end 39 | 40 | if ~isfield(param, 'rel_obj'), param.rel_obj = 1e-4; end 41 | if ~isfield(param, 'verbose'), param.verbose = 1; end 42 | if ~isfield(param, 'max_iter'), param.max_iter = 200; end 43 | 44 | % Initializations 45 | [r, s] = gradient_op(b*0); 46 | pold = r; qold = s; 47 | told = 1; prev_obj = 0; 48 | 49 | % Main iterations 50 | if param.verbose > 1 51 | fprintf(' Proximal TV operator:\n'); 52 | end 53 | for iter = 1:param.max_iter 54 | 55 | % Current solution 56 | sol = b - lambda*div_op(r, s); 57 | 58 | % Objective function value 59 | obj = .5*norm(b(:)-sol(:), 2)^2 + lambda * TV_norm(sol); 60 | rel_obj = abs(obj-prev_obj)/obj; 61 | prev_obj = obj; 62 | 63 | % Stopping criterion 64 | if param.verbose>1 65 | fprintf(' Iter %i, obj = %e, rel_obj = %e\n', ... 66 | iter, obj, rel_obj); 67 | end 68 | if rel_obj < param.rel_obj 69 | crit_TV = 'TOL_EPS'; break; 70 | end 71 | 72 | % Udpate divergence vectors and project 73 | [dx, dy] = gradient_op(sol); 74 | r = r - 1/(8*lambda) * dx; s = s - 1/(8*lambda) * dy; 75 | weights = max(1, sqrt(abs(r).^2+abs(s).^2)); 76 | p = r./weights; q = s./weights; 77 | 78 | % FISTA update 79 | t = (1+sqrt(4*told^2))/2; 80 | r = p + (told-1)/t * (p - pold); pold = p; 81 | s = q + (told-1)/t * (q - qold); qold = q; 82 | told = t; 83 | 84 | end 85 | 86 | % Log after the minimization 87 | if ~exist('crit_TV', 'var'), crit_TV = 'MAX_IT'; end 88 | if param.verbose >= 1 89 | fprintf([' Prox_TV: obj = %e, rel_obj = %e,' ... 90 | ' %s, iter = %i\n'], obj, rel_obj, crit_TV, iter); 91 | end 92 | 93 | end -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/update_gbound.m: -------------------------------------------------------------------------------- 1 | function anorm = update_gbound(anorm,alpha,beta,j) 2 | %UPDATE_GBOUND Update Gerscgorin estimate of 2-norm 3 | % ANORM = UPDATE_GBOUND(ANORM,ALPHA,BETA,J) updates the Gersgorin bound 4 | % for the tridiagonal in the Lanczos process after the J'th step. 5 | % Applies Gerscgorins circles to T_K'*T_k instead of T_k itself 6 | % since this gives a tighter bound. 7 | 8 | if j==1 % Apply Gerscgorin circles to T_k'*T_k to estimate || A ||_2 9 | i=j; 10 | % scale to avoid overflow 11 | scale = max(abs(alpha(i)),abs(beta(i+1))); 12 | alpha(i) = alpha(i)/scale; 13 | beta(i) = beta(i)/scale; 14 | anorm = 1.01*scale*sqrt(alpha(i)^2+beta(i+1)^2 + abs(alpha(i)*beta(i+1))); 15 | elseif j==2 16 | i=1; 17 | % scale to avoid overflow 18 | scale = max(max(abs(alpha(1:2)),max(abs(beta(2:3))))); 19 | alpha(1:2) = alpha(1:2)/scale; 20 | beta(2:3) = beta(2:3)/scale; 21 | 22 | anorm = max(anorm, scale*sqrt(alpha(i)^2+beta(i+1)^2 + ... 23 | abs(alpha(i)*beta(i+1) + alpha(i+1)*beta(i+1)) + ... 24 | abs(beta(i+1)*beta(i+2)))); 25 | i=2; 26 | anorm = max(anorm,scale*sqrt(abs(beta(i)*alpha(i-1) + alpha(i)*beta(i)) + ... 27 | beta(i)^2+alpha(i)^2+beta(i+1)^2 + ... 28 | abs(alpha(i)*beta(i+1))) ); 29 | elseif j==3 30 | % scale to avoid overflow 31 | scale = max(max(abs(alpha(1:3)),max(abs(beta(2:4))))); 32 | alpha(1:3) = alpha(1:3)/scale; 33 | beta(2:4) = beta(2:4)/scale; 34 | i=2; 35 | anorm = max(anorm,scale*sqrt(abs(beta(i)*alpha(i-1) + alpha(i)*beta(i)) + ... 36 | beta(i)^2+alpha(i)^2+beta(i+1)^2 + ... 37 | abs(alpha(i)*beta(i+1) + alpha(i+1)*beta(i+1)) + ... 38 | abs(beta(i+1)*beta(i+2))) ); 39 | i=3; 40 | anorm = max(anorm,scale*sqrt(abs(beta(i)*beta(i-1)) + ... 41 | abs(beta(i)*alpha(i-1) + alpha(i)*beta(i)) + ... 42 | beta(i)^2+alpha(i)^2+beta(i+1)^2 + ... 43 | abs(alpha(i)*beta(i+1))) ); 44 | else 45 | % scale to avoid overflow 46 | % scale = max(max(abs(alpha(j-2:j)),max(abs(beta(j-2:j+1))))); 47 | % alpha(j-2:j) = alpha(j-2:j)/scale; 48 | % beta(j-2:j+1) = beta(j-2:j+1)/scale; 49 | 50 | % Avoid scaling, which is slow. At j>3 the estimate is usually quite good 51 | % so just make sure that anorm is not made infinite by overflow. 52 | i = j-1; 53 | anorm1 = sqrt(abs(beta(i)*beta(i-1)) + ... 54 | abs(beta(i)*alpha(i-1) + alpha(i)*beta(i)) + ... 55 | beta(i)^2+alpha(i)^2+beta(i+1)^2 + ... 56 | abs(alpha(i)*beta(i+1) + alpha(i+1)*beta(i+1)) + ... 57 | abs(beta(i+1)*beta(i+2))); 58 | if isfinite(anorm1) 59 | anorm = max(anorm,anorm1); 60 | end 61 | i = j; 62 | anorm1 = sqrt(abs(beta(i)*beta(i-1)) + ... 63 | abs(beta(i)*alpha(i-1) + alpha(i)*beta(i)) + ... 64 | beta(i)^2+alpha(i)^2+beta(i+1)^2 + ... 65 | abs(alpha(i)*beta(i+1))); 66 | if isfinite(anorm1) 67 | anorm = max(anorm,anorm1); 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/HTNN_FFT.m: -------------------------------------------------------------------------------- 1 | function [L,obj,tsvd_rank] =HTNN_FFT(X,Omega,opts) 2 | 3 | %Low-Rank Order-D(D>3) Tensor Completion under FFT 4 | 5 | % -------------------------------------------- 6 | % Input: 7 | % X - Order-D tensor 8 | % Omega - index of the observed entries 9 | % opts - Structure value in Matlab. The fields are 10 | % opts.tol - termination tolerance 11 | % opts.max_iter - maximum number of iterations 12 | % opts.mu - stepsize for dual variable updating in ADMM 13 | % opts.max_mu - maximum stepsize 14 | % opts.rho - rho>=1, ratio used to increase mu 15 | % opts.DEBUG - 0 or 1 16 | % 17 | % Output: 18 | % L - Order-D tensor 19 | % obj - Objective function value 20 | % tsvd_rank - T_svd rank 21 | % 22 | % Written by Wenjin Qin (qinwenjin2021@163.com) 23 | % 24 | 25 | mu = 1e-3; 26 | max_mu = 1e8; 27 | tol = 1e-6; 28 | max_iter = 500; 29 | rho = 1.2; 30 | DEBUG = 1; 31 | 32 | if ~exist('opts', 'var') 33 | opts = []; 34 | end 35 | if isfield(opts, 'tol'); tol = opts.tol; end 36 | if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end 37 | if isfield(opts, 'rho'); rho = opts.rho; end 38 | if isfield(opts, 'mu'); mu = opts.mu; end 39 | if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end 40 | if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end 41 | 42 | 43 | dim = size(X); 44 | p = length(size(X)); 45 | n = zeros(1,p); 46 | for i = 1:p 47 | n(i) = size(X,i); 48 | end 49 | 50 | BarOmega = ones(dim) - Omega; 51 | E = zeros(dim); 52 | Y = E; 53 | L = E; 54 | 55 | for iter = 1 : max_iter 56 | Lk = L; 57 | Ek = E; 58 | 59 | % updata Mbar 60 | Mbar = L + E + Y/mu; 61 | Mbar = X.*Omega + Mbar.*BarOmega; 62 | 63 | % update L 64 | [L,tnnX,trank] =prox_htnn_F(Mbar-E-Y/mu,1/mu); 65 | 66 | % updata M 67 | M = L + E + Y/mu; 68 | M = X.*Omega + M.*BarOmega; 69 | 70 | % update E 71 | E = (M-L-Y/mu).*BarOmega; 72 | 73 | 74 | dY = L+E-M; 75 | chgX = max(abs(Lk(:)-L(:))); 76 | chgE = max(abs(Ek(:)-E(:))); 77 | chg = max([chgX chgE max(abs(dY(:)))]); 78 | if DEBUG 79 | if iter == 1 || mod(iter, 10) == 0 80 | obj = tnnX; 81 | err = chg; 82 | disp(['iter ' num2str(iter) ', mu=' num2str(mu) ... 83 | ', obj=' num2str(obj) ', err=' num2str(err)]); 84 | end 85 | end 86 | 87 | if chg < tol 88 | break; 89 | end 90 | Y = Y + mu*dY; 91 | mu = min(rho*mu,max_mu); 92 | end 93 | obj = tnnX; 94 | tsvd_rank=trank; -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/HTNN_U.m: -------------------------------------------------------------------------------- 1 | function [L,obj,tsvd_rank] =HTNN_U(ModeX,X,Omega,opts) 2 | 3 | %Low-Rank Order-D(D>3) Tensor Completion under generalized invertible linear transform 4 | 5 | % -------------------------------------------- 6 | % Input: 7 | % ModeX - invertible linear transform 8 | % X - Order-D tensor 9 | % Omega - index of the observed entries 10 | % opts - Structure value in Matlab. The fields are 11 | % opts.tol - termination tolerance 12 | % opts.max_iter - maximum number of iterations 13 | % opts.mu - stepsize for dual variable updating in ADMM 14 | % opts.max_mu - maximum stepsize 15 | % opts.rho - rho>=1, ratio used to increase mu 16 | % opts.DEBUG - 0 or 1 17 | % 18 | % Output: 19 | % L - Order-D tensor 20 | % obj - Objective function value 21 | % tsvd_rank - T_svd rank 22 | % 23 | % Written by Wenjin Qin (qinwenjin2021@163.com) 24 | % 25 | 26 | mu = 1e-3; 27 | max_mu = 1e8; 28 | tol = 1e-6; 29 | max_iter = 500; 30 | rho = 1.2; 31 | DEBUG = 1; 32 | 33 | if ~exist('opts', 'var') 34 | opts = []; 35 | end 36 | if isfield(opts, 'tol'); tol = opts.tol; end 37 | if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end 38 | if isfield(opts, 'rho'); rho = opts.rho; end 39 | if isfield(opts, 'mu'); mu = opts.mu; end 40 | if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end 41 | if isfield(opts, 'DEBUG'); DEBUG = opts.DEBUG; end 42 | 43 | 44 | dim = size(X); 45 | p = length(size(X)); 46 | n = zeros(1,p); 47 | for i = 1:p 48 | n(i) = size(X,i); 49 | end 50 | 51 | BarOmega = ones(dim) - Omega; 52 | E = zeros(dim); 53 | Y = E; 54 | L = E; 55 | 56 | for iter = 1 : max_iter 57 | Lk = L; 58 | Ek = E; 59 | % updata Mbar 60 | Mbar = L + E + Y/mu; 61 | Mbar = X.*Omega + Mbar.*BarOmega; 62 | 63 | % update L 64 | [L,tnnX,trank] =prox_htnn_U(ModeX,Mbar-E-Y/mu,1/mu); 65 | 66 | % updata M 67 | M = L + E + Y/mu; 68 | M = X.*Omega + M.*BarOmega; 69 | 70 | % update E 71 | E = (M-L-Y/mu).*BarOmega; 72 | 73 | 74 | dY =L+E- M; 75 | chgX = max(abs(Lk(:)-L(:))); 76 | chgE = max(abs(Ek(:)-E(:))); 77 | chg = max([chgX chgE max(abs(dY(:)))]); 78 | if DEBUG 79 | if iter == 1 || mod(iter, 10) == 0 80 | obj = tnnX; 81 | err = chg; 82 | disp(['iter ' num2str(iter) ', mu=' num2str(mu) ... 83 | ', obj=' num2str(obj) ', err=' num2str(err)]); 84 | end 85 | end 86 | 87 | if chg < tol 88 | break; 89 | end 90 | Y = Y + mu*dY; 91 | mu = min(rho*mu,max_mu); 92 | end 93 | obj = tnnX; 94 | tsvd_rank=trank; -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth_mex.c: -------------------------------------------------------------------------------- 1 | /* 2 | ------------------------------------------------------------------------- 3 | GATEWAY ROUTINE FOR CALLING REORTH FROM MATLAB. 4 | 5 | REORTH Reorthogonalize a vector using iterated Gram-Schmidt 6 | 7 | [R_NEW,NORMR_NEW,NRE] = reorth(Q,R,NORMR,INDEX,ALPHA,METHOD) 8 | reorthogonalizes R against the subset of columns of Q given by INDEX. 9 | If INDEX==[] then R is reorthogonalized all columns of Q. 10 | If the result R_NEW has a small norm, i.e. if norm(R_NEW) < ALPHA*NORMR, 11 | then a second reorthogonalization is performed. If the norm of R_NEW 12 | is once more decreased by more than a factor of ALPHA then R is 13 | numerically in span(Q(:,INDEX)) and a zero-vector is returned for R_NEW. 14 | 15 | If method==0 then iterated modified Gram-Schmidt is used. 16 | If method==1 then iterated classical Gram-Schmidt is used. 17 | 18 | The default value for ALPHA is 0.5. 19 | NRE is the number of reorthogonalizations performed (1 or 2). 20 | 21 | References: 22 | Aake Bjorck, "Numerical Methods for Least Squares Problems", 23 | SIAM, Philadelphia, 1996, pp. 68-69. 24 | 25 | J.~W. Daniel, W.~B. Gragg, L. Kaufman and G.~W. Stewart, 26 | ``Reorthogonalization and Stable Algorithms Updating the 27 | Gram-Schmidt QR Factorization'', Math. Comp., 30 (1976), no. 28 | 136, pp. 772-795. 29 | 30 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 31 | Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109 32 | 33 | Rasmus Munk Larsen, DAIMI, 1998. 34 | ------------------------------------------------------------------------- 35 | */ 36 | 37 | #include 38 | #include "mex.h" 39 | 40 | /* Template for reorth: */ 41 | 42 | void reorth_(int *n, int *k, double *V, int *ldv, double *vnew, 43 | double *normvnew, double *index, double *alpha, double *work, 44 | int *iflag, int *nre); 45 | 46 | /* Here comes the gateway function to be called by Matlab: */ 47 | void mexFunction(int nlhs, mxArray *plhs[], 48 | int nrhs, const mxArray *prhs[]) 49 | { 50 | int n, k1, k, imethod, inre; 51 | double *work; 52 | 53 | if (nrhs != 6) 54 | mexErrMsgTxt("reorth requires 6 input arguments"); 55 | else if (nlhs < 2) 56 | mexErrMsgTxt("reorth requires at least 2 output arguments"); 57 | 58 | n = mxGetM(prhs[0]); /* get the dimensions of the input */ 59 | k1 = mxGetN(prhs[0]); 60 | k = mxGetM(prhs[3]) * mxGetN(prhs[3]); 61 | 62 | /* Create/allocate return argument, a 1x1 real-valued Matrix */ 63 | plhs[0]=mxCreateDoubleMatrix(n,1,mxREAL); 64 | plhs[1]=mxCreateDoubleMatrix(1,1,mxREAL); 65 | if (nlhs>2) 66 | plhs[2]=mxCreateDoubleMatrix(1,1,mxREAL); 67 | 68 | work = mxCalloc(k,sizeof(double)); 69 | 70 | memcpy(mxGetPr(plhs[0]),mxGetPr(prhs[1]), n*sizeof(double)); 71 | memcpy(mxGetPr(plhs[1]),mxGetPr(prhs[2]), sizeof(double)); 72 | imethod = (int) mxGetScalar(prhs[5]); 73 | 74 | reorth_(&n, &k, mxGetPr(prhs[0]), &n, mxGetPr(plhs[0]), 75 | mxGetPr(plhs[1]), mxGetPr(prhs[3]), mxGetPr(prhs[4]), 76 | work,&imethod,&inre); 77 | if (nlhs>2) 78 | *(mxGetPr(plhs[2])) = (double) inre*k; 79 | 80 | mxFree(work); 81 | } 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.m: -------------------------------------------------------------------------------- 1 | function [r,normr,nre,s] = reorth(Q,r,normr,index,alpha,method) 2 | 3 | %REORTH Reorthogonalize a vector using iterated Gram-Schmidt 4 | % 5 | % [R_NEW,NORMR_NEW,NRE] = reorth(Q,R,NORMR,INDEX,ALPHA,METHOD) 6 | % reorthogonalizes R against the subset of columns of Q given by INDEX. 7 | % If INDEX==[] then R is reorthogonalized all columns of Q. 8 | % If the result R_NEW has a small norm, i.e. if norm(R_NEW) < ALPHA*NORMR, 9 | % then a second reorthogonalization is performed. If the norm of R_NEW 10 | % is once more decreased by more than a factor of ALPHA then R is 11 | % numerically in span(Q(:,INDEX)) and a zero-vector is returned for R_NEW. 12 | % 13 | % If method==0 then iterated modified Gram-Schmidt is used. 14 | % If method==1 then iterated classical Gram-Schmidt is used. 15 | % 16 | % The default value for ALPHA is 0.5. 17 | % NRE is the number of reorthogonalizations performed (1 or 2). 18 | 19 | % References: 20 | % Aake Bjorck, "Numerical Methods for Least Squares Problems", 21 | % SIAM, Philadelphia, 1996, pp. 68-69. 22 | % 23 | % J.~W. Daniel, W.~B. Gragg, L. Kaufman and G.~W. Stewart, 24 | % ``Reorthogonalization and Stable Algorithms Updating the 25 | % Gram-Schmidt QR Factorization'', Math. Comp., 30 (1976), no. 26 | % 136, pp. 772-795. 27 | % 28 | % B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 29 | % Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109 30 | 31 | % Rasmus Munk Larsen, DAIMI, 1998. 32 | 33 | % Check input arguments. 34 | % warning('PROPACK:NotUsingMex','Using slow matlab code for reorth.') 35 | if nargin<2 36 | error('Not enough input arguments.') 37 | end 38 | [n k1] = size(Q); 39 | if nargin<3 | isempty(normr) 40 | % normr = norm(r); 41 | normr = sqrt(r'*r); 42 | end 43 | if nargin<4 | isempty(index) 44 | k=k1; 45 | index = [1:k]'; 46 | simple = 1; 47 | else 48 | k = length(index); 49 | if k==k1 & index(:)==[1:k]' 50 | simple = 1; 51 | else 52 | simple = 0; 53 | end 54 | end 55 | if nargin<5 | isempty(alpha) 56 | alpha=0.5; % This choice garanties that 57 | % || Q^T*r_new - e_{k+1} ||_2 <= 2*eps*||r_new||_2, 58 | % cf. Kahans ``twice is enough'' statement proved in 59 | % Parletts book. 60 | end 61 | if nargin<6 | isempty(method) 62 | method = 0; 63 | end 64 | if k==0 | n==0 65 | return 66 | end 67 | if nargout>3 68 | s = zeros(k,1); 69 | end 70 | 71 | 72 | normr_old = 0; 73 | nre = 0; 74 | while normr < alpha*normr_old | nre==0 75 | if method==1 76 | if simple 77 | t = Q'*r; 78 | r = r - Q*t; 79 | else 80 | t = Q(:,index)'*r; 81 | r = r - Q(:,index)*t; 82 | end 83 | else 84 | for i=index, 85 | t = Q(:,i)'*r; 86 | r = r - Q(:,i)*t; 87 | end 88 | end 89 | if nargout>3 90 | s = s + t; 91 | end 92 | normr_old = normr; 93 | % normr = norm(r); 94 | normr = sqrt(r'*r); 95 | nre = nre + 1; 96 | if nre > 4 97 | % r is in span(Q) to full accuracy => accept r = 0 as the new vector. 98 | r = zeros(n,1); 99 | normr = 0; 100 | return 101 | end 102 | end 103 | -------------------------------------------------------------------------------- /compared_methods/NGmeet/ParSetH.m: -------------------------------------------------------------------------------- 1 | function [par]=ParSetH(nSig,band) 2 | %Pavia with 0.1 noise : par.SearchWin 40, par.c1=5*sqrt(2); par.Iter= 5; 3 | %par.rho = 10; par.lambda = 0.1; par.patnum = 200; 4 | % toy data par.SearchWin = 30; par.patnum =120; Other, 5 | % WDC the same as Pavia data but with par.patnum 120 6 | % the setting of par.SearchWin, par.c1 and par.step, par.patsize par.Iter 7 | % par.lamada par.patnum are from LLRT, par.k_subspace is estimated by 8 | % HySime 9 | %% Patch-based Iteration Parameters 10 | par.nSig = nSig; % Variance of the noise image 11 | par.SearchWin = 30; % Non-local patch searching window 12 | par.c1 = 5*sqrt(2); % Constant num for HSI 13 | par.step = 4; % stepsize 14 | %% Patch and noise Parameters 15 | if band<=50 16 | if nSig<=10.1 17 | par.patsize = 5; 18 | par.patnum = 150; 19 | par.Iter = 5; 20 | par.lamada = 0.54; 21 | par.k_subspace = 8; 22 | elseif nSig <= 30.1 23 | par.patsize = 5; 24 | par.patnum = 150; 25 | par.Iter = 5; 26 | par.lamada = 0.56; 27 | par.k_subspace = 6; 28 | elseif nSig <= 50.1 29 | par.patsize = 5; 30 | par.patnum = 150; 31 | par.Iter = 5; 32 | par.lamada = 0.56; 33 | par.k_subspace = 5; 34 | else 35 | par.patsize = 5; 36 | par.patnum = 200; 37 | par.Iter = 5; 38 | par.lamada = 0.58; 39 | par.k_subspace = 4; 40 | end 41 | %%%%%%%%%%%%%%%%%% 42 | elseif band<=100 43 | if nSig<=10.1 44 | par.patsize = 5; 45 | par.patnum = 150; 46 | par.Iter = 5; 47 | par.lamada = 0.54; 48 | par.k_subspace = 6; 49 | elseif nSig <= 30.1 50 | par.patsize = 5; 51 | par.patnum = 150; 52 | par.Iter = 5; 53 | par.lamada = 0.56; 54 | par.k_subspace = 6; 55 | elseif nSig <= 50.1 56 | par.patsize = 5; 57 | par.patnum = 150; 58 | par.Iter = 5; 59 | par.lamada = 0.58; 60 | par.k_subspace = 5; 61 | else 62 | par.patsize = 5; 63 | par.patnum = 200; 64 | par.Iter = 5; 65 | par.lamada = 0.58; 66 | par.k_subspace = 4; 67 | end 68 | %%%%%%%%%%%%%%%%%% 69 | elseif band<=250 70 | par.c1 = 8*sqrt(2); 71 | if nSig<=10.1 72 | par.patsize = 5; 73 | par.patnum = 150; 74 | par.Iter = 5; 75 | par.lamada = 0.54; 76 | par.k_subspace = 5; 77 | elseif nSig <= 30.1 78 | par.patsize = 5; 79 | par.patnum = 150; 80 | par.Iter = 5; 81 | par.lamada = 0.56; 82 | par.k_subspace = 6; 83 | elseif nSig <= 50.1 84 | par.patsize = 5; 85 | par.patnum = 150; 86 | par.Iter = 5; 87 | par.lamada = 0.58; 88 | par.k_subspace = 5; 89 | else 90 | par.patsize = 5; 91 | par.patnum = 200; 92 | par.Iter = 5; 93 | par.lamada = 0.58; 94 | par.k_subspace = 4; 95 | end 96 | 97 | end 98 | 99 | 100 | -------------------------------------------------------------------------------- /compared_methods/TCTV/high-order tensor-SVD Toolbox/tmprod.m: -------------------------------------------------------------------------------- 1 | function [S,iperm] = tmprod(T,U,mode,transpose,saveperm) 2 | %TMPROD Mode-n tensor-matrix product. 3 | % S = tmprod(T,U,mode) computes the tensor-matrix product of the tensor T 4 | % with the matrices U{1}, ..., U{N} along the modes mode(1), ..., 5 | % mode(N), respectively. Note that in this implementation, the vector 6 | % mode should contain distinct integers. The mode-n tensor-matrix 7 | % products are computed sequentially in a heuristically determined order. 8 | % A mode-n tensor-matrix product results in a new tensor S in which the 9 | % mode-n vectors of a given tensor T are premultiplied by a given matrix 10 | % U{n}, i.e., tens2mat(S,mode(n)) = U{n}*tens2mat(T,mode(n)). 11 | % 12 | % S = tmprod(T,U,mode,'T') and tmprod(T,U,mode,'H') apply the mode-n 13 | % tensor-matrix product using the transposed matrices U{n}.' and 14 | % conjugate transposed matrices U{n}' respectively along mode(n). 15 | % 16 | % [S,iperm] = tmprod(T,U,mode) and S = tmprod(T,U,mode,'saveperm') save 17 | % one permutation operation. In the former case, the tensor-matrix 18 | % product can then be recovered by permute(S,iperm). 19 | % 20 | % See also tens2mat, mat2tens, contract. 21 | 22 | % Authors: Laurent Sorber (Laurent.Sorber@cs.kuleuven.be), 23 | % Nick Vannieuwenhoven (Nick.Vannieuwenhoven@cs.kuleuven.be) 24 | % Marc Van Barel (Marc.VanBarel@cs.kuleuven.be) 25 | % Lieven De Lathauwer (Lieven.DeLathauwer@kuleuven-kulak.be) 26 | 27 | % Check arguments. 28 | if nargin < 4, transpose = 0; end 29 | if nargin < 5, saveperm = false; end 30 | if ischar(saveperm), saveperm = strcmpi(saveperm,'saveperm'); end 31 | switch transpose 32 | case {'T','H'}, m = [2 1]; 33 | otherwise, m = [1 2]; if nargin < 5, saveperm = transpose; end 34 | end 35 | if ~iscell(U), U = {U}; end 36 | if length(U) ~= length(mode) 37 | error('tmprod:NumberOfProducts','length(U) should be length(mode).'); 38 | end 39 | U = U(:)'; mode = mode(:)'; 40 | size_tens = ones(1,max(mode)); 41 | size_tens(1:ndims(T)) = size(T); 42 | if any(cellfun('size',U,m(2)) ~= size_tens(mode)) 43 | error('tmprod:U','size(T,mode(n)) should be size(U{n},%i).',m(2)); 44 | end 45 | 46 | % Sort the order of the mode-n products. 47 | [~,idx] = sort(size_tens(mode)./cellfun('size',U,m(1))); 48 | mode = mode(idx); 49 | U = U(idx); 50 | 51 | % Compute the complement of the set of modes. 52 | n = length(mode); 53 | N = length(size_tens); 54 | bits = ones(1,N); 55 | bits(mode) = 0; 56 | modec = 1:N; 57 | modec = modec(logical(bits(modec))); 58 | 59 | % Prepermute the tensor. 60 | perm = [mode modec]; 61 | size_tens = size_tens(perm); 62 | S = T; if any(mode ~= 1:n), S = permute(S,perm); end 63 | 64 | % Cycle through the n-mode products. 65 | for i = 1:n 66 | size_tens(1) = size(U{i},m(1)); 67 | switch transpose 68 | case 'T' 69 | S = reshape(U{i}.'*reshape(S,size(S,1),[]),size_tens); 70 | case 'H' 71 | S = reshape(U{i}'*reshape(S,size(S,1),[]),size_tens); 72 | otherwise 73 | S = reshape(U{i}*reshape(S,size(S,1),[]),size_tens); 74 | end 75 | if i < n 76 | S = permute(S,[2:N 1]); 77 | size_tens = size_tens([2:N 1]); 78 | end 79 | end 80 | 81 | % Inverse permute the tensor, unless the user intends to do so himself. 82 | iperm(perm([n:N 1:n-1])) = 1:N; 83 | if nargout <= 1 && ~saveperm, S = permute(S,iperm); end 84 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lanpro.doc: -------------------------------------------------------------------------------- 1 | LANPRO Lanczos tridiagonalization with partial reorthogonalization 2 | LANPRO computes the Lanczos tridiagonalization of a real symmetric 3 | matrix using the symmetric Lanczos algorithm with partial 4 | reorthogonalization. 5 | 6 | [Q_K,T_K,R,ANORM,IERR,WORK] = LANPRO(A,K,R0,OPTIONS,Q_old,T_old) 7 | [Q_K,T_K,R,ANORM,IERR,WORK] = LANPRO('Afun',N,K,R0,OPTIONS,Q_old,T_old) 8 | 9 | Computes K steps of the Lanczos algorithm with starting vector R0, 10 | and returns the K x K tridiagonal T_K, the N x K matrix Q_K 11 | with semiorthonormal columns and the residual vector R such that 12 | 13 | A*Q_K = Q_K*T_K + R . 14 | 15 | Partial reorthogonalization is used to keep the columns of Q_K 16 | semiorthogonal: 17 | MAX(DIAG((eye(k) - Q_K'*Q_K))) <= OPTIONS.delta. 18 | 19 | 20 | The first input argument is either a real symmetric matrix, a struct with 21 | components A.L and A.U or a string containing the name of an M-file which 22 | applies a linear operator to the columns of a given matrix. In the latter 23 | case, the second input argument must be N, the order of the problem. 24 | 25 | If A is a struct with components A.L and A.U, such that 26 | L*U = (A - sigma*I), a shift-and-invert Lanczos iteration is performed 27 | 28 | The OPTIONS structure is used to control the reorthogonalization: 29 | OPTIONS.delta: Desired level of orthogonality 30 | (default = sqrt(eps/K)). 31 | OPTIONS.eta : Level of orthogonality after reorthogonalization 32 | (default = eps^(3/4)/sqrt(K)). 33 | OPTIONS.cgs : Flag for switching between different reorthogonalization 34 | algorithms: 35 | 0 = iterated modified Gram-Schmidt (default) 36 | 1 = iterated classical Gram-Schmidt 37 | OPTIONS.elr : If OPTIONS.elr = 1 (default) then extended local 38 | reorthogonalization is enforced. 39 | OPTIONS.Y : The lanczos vectors are reorthogonalized against 40 | the columns of the matrix OPTIONS.Y. 41 | 42 | If both R0, Q_old and T_old are provided, they must contain 43 | a partial Lanczos tridiagonalization of A on the form 44 | 45 | A Q_old = Q_old T_old + R0 . 46 | 47 | In this case the factorization is extended to dimension K x K by 48 | continuing the Lanczos algorithm with R0 as starting vector. 49 | 50 | On exit ANORM contains an approximation to ||A||_2. 51 | IERR = 0 : K steps were performed succesfully. 52 | IERR > 0 : K steps were performed succesfully, but the algorithm 53 | switched to full reorthogonalization after IERR steps. 54 | IERR < 0 : Iteration was terminated after -IERR steps because an 55 | invariant subspace was found, and 3 deflation attempts 56 | were unsuccessful. 57 | On exit WORK(1) contains the number of reorthogonalizations performed, and 58 | WORK(2) contains the number of inner products performed in the 59 | reorthogonalizations. 60 | 61 | See also LANEIG, REORTH, COMPUTE_INT 62 | 63 | References: 64 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 65 | 66 | G. H. Golub & C. F. Van Loan, "Matrix Computations", 67 | 3. Ed., Johns Hopkins, 1996. Chapter 9. 68 | 69 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 70 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 71 | 72 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 73 | Math. Comp. 42 (1984), no. 165, 115--142. 74 | 75 | Rasmus Munk Larsen, DAIMI, 1998 76 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lanpro.txt: -------------------------------------------------------------------------------- 1 | LANPRO Lanczos tridiagonalization with partial reorthogonalization 2 | LANPRO computes the Lanczos tridiagonalization of a real symmetric 3 | matrix using the symmetric Lanczos algorithm with partial 4 | reorthogonalization. 5 | 6 | [Q_K,T_K,R,ANORM,IERR,WORK] = LANPRO(A,K,R0,OPTIONS,Q_old,T_old) 7 | [Q_K,T_K,R,ANORM,IERR,WORK] = LANPRO('Afun',N,K,R0,OPTIONS,Q_old,T_old) 8 | 9 | Computes K steps of the Lanczos algorithm with starting vector R0, 10 | and returns the K x K tridiagonal T_K, the N x K matrix Q_K 11 | with semiorthonormal columns and the residual vector R such that 12 | 13 | A*Q_K = Q_K*T_K + R . 14 | 15 | Partial reorthogonalization is used to keep the columns of Q_K 16 | semiorthogonal: 17 | MAX(DIAG((eye(k) - Q_K'*Q_K))) <= OPTIONS.delta. 18 | 19 | 20 | The first input argument is either a real symmetric matrix, a struct with 21 | components A.L and A.U or a string containing the name of an M-file which 22 | applies a linear operator to the columns of a given matrix. In the latter 23 | case, the second input argument must be N, the order of the problem. 24 | 25 | If A is a struct with components A.L and A.U, such that 26 | L*U = (A - sigma*I), a shift-and-invert Lanczos iteration is performed 27 | 28 | The OPTIONS structure is used to control the reorthogonalization: 29 | OPTIONS.delta: Desired level of orthogonality 30 | (default = sqrt(eps/K)). 31 | OPTIONS.eta : Level of orthogonality after reorthogonalization 32 | (default = eps^(3/4)/sqrt(K)). 33 | OPTIONS.cgs : Flag for switching between different reorthogonalization 34 | algorithms: 35 | 0 = iterated modified Gram-Schmidt (default) 36 | 1 = iterated classical Gram-Schmidt 37 | OPTIONS.elr : If OPTIONS.elr = 1 (default) then extended local 38 | reorthogonalization is enforced. 39 | OPTIONS.Y : The lanczos vectors are reorthogonalized against 40 | the columns of the matrix OPTIONS.Y. 41 | 42 | If both R0, Q_old and T_old are provided, they must contain 43 | a partial Lanczos tridiagonalization of A on the form 44 | 45 | A Q_old = Q_old T_old + R0 . 46 | 47 | In this case the factorization is extended to dimension K x K by 48 | continuing the Lanczos algorithm with R0 as starting vector. 49 | 50 | On exit ANORM contains an approximation to ||A||_2. 51 | IERR = 0 : K steps were performed succesfully. 52 | IERR > 0 : K steps were performed succesfully, but the algorithm 53 | switched to full reorthogonalization after IERR steps. 54 | IERR < 0 : Iteration was terminated after -IERR steps because an 55 | invariant subspace was found, and 3 deflation attempts 56 | were unsuccessful. 57 | On exit WORK(1) contains the number of reorthogonalizations performed, and 58 | WORK(2) contains the number of inner products performed in the 59 | reorthogonalizations. 60 | 61 | See also LANEIG, REORTH, COMPUTE_INT 62 | 63 | References: 64 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 65 | 66 | G. H. Golub & C. F. Van Loan, "Matrix Computations", 67 | 3. Ed., Johns Hopkins, 1996. Chapter 9. 68 | 69 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 70 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 71 | 72 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 73 | Math. Comp. 42 (1984), no. 165, 115--142. 74 | 75 | Rasmus Munk Larsen, DAIMI, 1998 76 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/mminfo.m: -------------------------------------------------------------------------------- 1 | function [rows, cols, entries, rep, field, symm] = mminfo(filename) 2 | % 3 | % function [rows, cols, entries, rep, field, symmetry] = mminfo(filename) 4 | % 5 | % Reads the contents of the Matrix Market file 'filename' 6 | % and extracts size and storage information. 7 | % 8 | % In the case of coordinate matrices, entries refers to the 9 | % number of coordinate entries stored in the file. The number 10 | % of non-zero entries in the final matrix cannot be determined 11 | % until the data is read (and symmetrized, if necessary). 12 | % 13 | % In the case of array matrices, entries is the product 14 | % rows*cols, regardless of whether symmetry was used to 15 | % store the matrix efficiently. 16 | % 17 | % 18 | 19 | mmfile = fopen(filename,'r'); 20 | if ( mmfile == -1 ) 21 | disp(filename); 22 | error('File not found'); 23 | end; 24 | 25 | header = fgets(mmfile); 26 | if (header == -1 ) 27 | error('Empty file.') 28 | end 29 | 30 | % NOTE: If using a version of Matlab for which strtok is not 31 | % defined, substitute 'gettok' for 'strtok' in the 32 | % following lines, and download gettok.m from the 33 | % Matrix Market site. 34 | [head0,header] = strtok(header); % see note above 35 | [head1,header] = strtok(header); 36 | [rep,header] = strtok(header); 37 | [field,header] = strtok(header); 38 | [symm,header] = strtok(header); 39 | head1 = lower(head1); 40 | rep = lower(rep); 41 | field = lower(field); 42 | symm = lower(symm); 43 | if ( length(symm) == 0 ) 44 | disp('Not enough words in header line.') 45 | disp('Recognized format: ') 46 | disp('%%MatrixMarket matrix representation field symmetry') 47 | error('Check header line.') 48 | end 49 | if ( ~ strcmp(head0,'%%MatrixMarket') ) 50 | error('Not a valid MatrixMarket header.') 51 | end 52 | if ( ~ strcmp(head1,'matrix') ) 53 | disp(['This seems to be a MatrixMarket ',head1,' file.']); 54 | disp('This function only knows how to read MatrixMarket matrix files.'); 55 | disp(' '); 56 | error(' '); 57 | end 58 | 59 | % Read through comments, ignoring them 60 | 61 | commentline = fgets(mmfile); 62 | while length(commentline) > 0 & commentline(1) == '%', 63 | commentline = fgets(mmfile); 64 | end 65 | 66 | % Read size information, then branch according to 67 | % sparse or dense format 68 | 69 | if ( strcmp(rep,'coordinate')) % read matrix given in sparse 70 | % coordinate matrix format 71 | 72 | [sizeinfo,count] = sscanf(commentline,'%d%d%d'); 73 | while ( count == 0 ) 74 | commentline = fgets(mmfile); 75 | if (commentline == -1 ) 76 | error('End-of-file reached before size information was found.') 77 | end 78 | [sizeinfo,count] = sscanf(commentline,'%d%d%d'); 79 | if ( count > 0 & count ~= 3 ) 80 | error('Invalid size specification line.') 81 | end 82 | end 83 | rows = sizeinfo(1); 84 | cols = sizeinfo(2); 85 | entries = sizeinfo(3); 86 | 87 | elseif ( strcmp(rep,'array') ) % read matrix given in dense 88 | % array (column major) format 89 | 90 | [sizeinfo,count] = sscanf(commentline,'%d%d'); 91 | while ( count == 0 ) 92 | commentline = fgets(mmfile); 93 | if (commentline == -1 ) 94 | error('End-of-file reached before size information was found.') 95 | end 96 | [sizeinfo,count] = sscanf(commentline,'%d%d'); 97 | if ( count > 0 & count ~= 2 ) 98 | error('Invalid size specification line.') 99 | end 100 | end 101 | rows = sizeinfo(1); 102 | cols = sizeinfo(2); 103 | entries = rows*cols; 104 | end 105 | 106 | fclose(mmfile); 107 | % Done. 108 | 109 | -------------------------------------------------------------------------------- /compared_methods/AWLP/indwt2_working.m: -------------------------------------------------------------------------------- 1 | function X = indwt2_working(W,varargin) 2 | %INDWT2 Inverse nondecimated 2-D wavelet transform. 3 | % INDWT2 will be removed in a future release of MATLAB. Use the 4 | % following function instead: 5 | % iswt2 6 | 7 | % Error in R2015a 8 | % error(message('Wavelet:warnobsolete:ErrorReplaceINDWT2')); 9 | nbIN = nargin-1; 10 | idxCFS = -1; 11 | cfsFLAG = false; 12 | if nbIN>0 13 | nbCELL = numel(W.dec); 14 | type = varargin{1}; 15 | if ~ischar(type) 16 | error(message('Wavelet:FunctionArgVal:Invalid_ArgTyp')) 17 | end 18 | type = upper(type); 19 | cfsFLAG = isequal(upper(type(1)),'C'); 20 | if cfsFLAG , type = type(2:end); end 21 | switch type 22 | case {'D','H'} , idxCFS = 0; 23 | case {'AA','LL','A','L'} , idxCFS = 1; 24 | case {'AD','LH'} , idxCFS = 2; 25 | case {'DA','HL'} , idxCFS = 3; 26 | case {'DD','HH'} , idxCFS = 4; 27 | end 28 | if nbIN>1 , levREC = varargin{2}; else levREC = W.level; end 29 | 30 | if idxCFS>1 31 | idxCFS = idxCFS + 3*(W.level-levREC); 32 | if ~cfsFLAG 33 | for j=1:nbCELL 34 | if ~isequal(j,idxCFS); 35 | W.dec{j} = zeros(size(W.dec{j})); 36 | end 37 | end 38 | else 39 | X = W.dec{idxCFS}; % Coefficients 40 | return 41 | end 42 | 43 | elseif idxCFS==1 % Approximations (AA or LL) 44 | if cfsFLAG && levREC==W.level 45 | X = W.dec{1}; 46 | return; % Coefficients of Approximation at level MAX 47 | end 48 | idxMinToKill = 1 + 3*(W.level-levREC)+1; 49 | for j=idxMinToKill:nbCELL 50 | W.dec{j} = zeros(size(W.dec{j})); 51 | end 52 | 53 | elseif idxCFS==0 54 | idxMaxToKill = 1 + 3*(W.level-levREC); 55 | for j=1:idxMaxToKill 56 | W.dec{j} = zeros(size(W.dec{j})); 57 | end 58 | 59 | else 60 | 61 | end 62 | end 63 | 64 | % Initialization. 65 | Lo = W.filters.LoR; 66 | Hi = W.filters.HiR; 67 | dwtEXTM = W.mode; 68 | perFLAG = isequal(dwtEXTM,'per'); 69 | cfs = W.dec; 70 | sizes = W.sizes; 71 | level = W.level; 72 | 73 | maxloop = level; 74 | if idxCFS==1 && cfsFLAG , maxloop = (level-levREC); end 75 | 76 | idxBeg = 1; 77 | for k=1:maxloop 78 | idxEnd = idxBeg+3; 79 | dec = reshape(cfs(idxBeg:idxEnd),2,2); 80 | sizerec = sizes(k+1,:); 81 | X = recFUNC(dec,sizerec,Lo,Hi,perFLAG); 82 | cfs(1:idxEnd-1) = {[]}; 83 | cfs{idxEnd} = X; 84 | idxBeg = idxEnd; 85 | end 86 | 87 | if abs(idxCFS)==1 && ~cfsFLAG && length(W.sizeINI)==3 88 | % X = uint8(X); 89 | end 90 | %-----------------------------------------------------------------------% 91 | function X = recFUNC(dec,sINI,Lo,Hi,perFLAG) 92 | 93 | % Reconstruction. 94 | perm = [2,1,3]; 95 | W = cell(1,2); 96 | for i = 1:2 97 | W{i} = wrec1D(dec{i,1},Lo{2},perm,perFLAG) + ... 98 | wrec1D(dec{i,2},Hi{2},perm,perFLAG); 99 | end 100 | X = (wrec1D(W{1},Lo{1},[],perFLAG) + wrec1D(W{2},Hi{1},[],perFLAG))/4; 101 | 102 | % Extraction of central part 103 | sREC = size(X); 104 | F = floor((sREC-sINI)/2); 105 | C = ceil((sREC-sINI)/2); 106 | X = X(1+F(1):end-C(1),1+F(2):end-C(2),:); 107 | %-----------------------------------------------------------------------% 108 | function X = wrec1D(X,F,perm,perFLAG) 109 | 110 | if ~isempty(perm) , X = permute(X,perm); end 111 | if perFLAG 112 | nb = length(F)-1; 113 | X = [X X(:,1:nb,:)]; 114 | end 115 | X = convn(X,F); 116 | if ~isempty(perm) , X = permute(X,perm); end 117 | %-----------------------------------------------------------------------% 118 | -------------------------------------------------------------------------------- /compared_methods/WNLRATV/SetParam_NWT.m: -------------------------------------------------------------------------------- 1 | function param = SetParam_NWT(Noi_H,sigma_noise) 2 | [M, N, B] = size(Noi_H); 3 | param.rankDeRate = 0; % the number of rank reduced in each iteration 4 | param.mog_k = 3; % the number of component reduced in each band 5 | param.lr_init = 'SVD'; 6 | param.maxiter = 20; 7 | param.tol = 1e-4; 8 | param.display = 1; 9 | 10 | % patch setting 11 | param.bandnum = ceil(B/50); 12 | param.nSig = sigma_noise; 13 | param.patsize = 5; % patch size 14 | patch_num = param.patsize*param.patsize; 15 | param.SearchWin = 50; 16 | param.step = 4; 17 | param.RankSelection = 0.01; 18 | param.NumIter = 50; % Iteration to compute U&V 19 | param.lamada = 0.56; 20 | param.c1 = 1*sqrt(2); % Constant num for HSI 21 | 22 | 23 | % nSig = sigma_noise*255; 24 | nSig = sigma_noise; 25 | if B<=50 26 | param.initial_rank = ceil(B/3); % initial rank of low rank component 27 | param.bandnum = ceil(B/8); % number of clean band for patch matching 28 | param.SearchWin = 50; % Non-local patch searching window 29 | param.patnum = 200; % patch number in each group 30 | if nSig<=10.1 31 | param.patsize = 5; 32 | param.patnum = 200; % Increase the patch number and iterations could further improve the performance, at the cost of running time. 33 | elseif nSig <= 30.1 34 | param.patsize = 6; 35 | param.patnum = 300; 36 | param.lamada = 0.56; 37 | else 38 | param.patsize = 7; 39 | param.patnum = 300; 40 | param.lamada = 0.54; 41 | end 42 | %%%% 43 | elseif B<=100 44 | param.initial_rank = ceil(B/5); % initial rank of low rank component 45 | param.bandnum = ceil(B/10); 46 | param.lamada = 0.7; 47 | param.patsize = 5; 48 | param.SearchWin = 50; % Non-local patch searching window 49 | param.patnum = 300; 50 | if nSig<=10.1 51 | param.patnum = 300; % Increase the patch number and iterations could further improve the performance, at the cost of running time. 52 | param.lamada = 0.56; 53 | elseif nSig <= 30.1 54 | param.patsize = 5; 55 | param.patnum = 400; 56 | param.lamada = 0.54; 57 | else 58 | param.patsize = 6; 59 | param.patnum = 400; 60 | param.lamada = 0.53; 61 | end 62 | %%%% 63 | elseif B<=200 64 | param.initial_rank = ceil(B/6); % initial rank of low rank component 65 | param.bandnum = ceil(B/15); 66 | param.lamada = 0.7; 67 | param.patnum = 300; 68 | param.SearchWin = 50; % Non-local patch searching window 69 | param.step = 4; 70 | if nSig<=10.1 71 | param.patsize = 5; 72 | param.patnum = 300; % Increase the patch number and iterations could further improve the performance, at the cost of running time. 73 | param.lamada = 0.56; 74 | elseif nSig <= 30.1 75 | param.patsize = 5; 76 | param.patnum = 400; 77 | param.lamada = 0.56; 78 | else 79 | param.patsize = 6; 80 | param.patnum = 400; 81 | param.lamada = 0.54; 82 | end 83 | else 84 | param.initial_rank = ceil(B/10); % initial rank of low rank component 85 | param.patsize = 5; 86 | param.patnum = 400; 87 | param.lamada = 0.5; 88 | end 89 | 90 | 91 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lanbpro.doc: -------------------------------------------------------------------------------- 1 | LANBPRO Lanczos bidiagonalization with partial reorthogonalization. 2 | 3 | LANBPRO computes the Lanczos bidiagonalization of a real 4 | matrix using the with partial reorthogonalization. 5 | 6 | [U_k,B_k,V_k,R,ierr,work] = LANBPRO(A,K,R0,OPTIONS,U_old,B_old,V_old) 7 | [U_k,B_k,V_k,R,ierr,work] = LANBPRO('Afun','Atransfun',M,N,K,R0, ... 8 | OPTIONS,U_old,B_old,V_old) 9 | 10 | Computes K steps of the Lanczos bidiagonalization algorithm with partial 11 | reorthogonalization (BPRO) with M-by-1 starting vector R0, producing a 12 | lower bidiagonal K-by-K matrix B_k, an N-by-K matrix V_k, an M-by-K 13 | matrix U_k and a M-by-1 vector such that 14 | A*V_k = U_k*B_k + R 15 | Partial reorthogonalization is used to keep the columns of V_K and U_k 16 | semiorthogonal: 17 | MAX(DIAG((EYE(K) - V_K'*V_K))) <= OPTIONS.delta 18 | and 19 | MAX(DIAG((EYE(K) - U_K'*U_K))) <= OPTIONS.delta. 20 | 21 | B_k = LANBPRO(...) returns the bidiagonal matrix only. 22 | 23 | The first input argument is either a real matrix, or a string 24 | containing the name of an M-file which applies a linear operator 25 | to the columns of a given matrix. In the latter case, the second 26 | input must be the name of an M-file which applies the transpose of 27 | the same linear operator to the columns of a given matrix, 28 | and the third and fourth arguments must be M and N, the dimensions 29 | of then problem. 30 | 31 | The OPTIONS structure is used to control the reorthogonalization: 32 | OPTIONS.delta: Desired level of orthogonality 33 | (default = sqrt(eps/K)). 34 | OPTIONS.eta : Level of orthogonality after reorthogonalization 35 | (default = eps^(3/4)/sqrt(K)). 36 | OPTIONS.cgs : Flag for switching between different reorthogonalization 37 | algorithms: 38 | 0 = iterated modified Gram-Schmidt (default) 39 | 1 = iterated classical Gram-Schmidt 40 | OPTIONS.elr : If OPTIONS.elr = 1 (default) then extended local 41 | reorthogonalization is enforced. 42 | OPTIONS.onesided 43 | : If OPTIONS.onesided = 0 (default) then both the left 44 | (U) and right (V) Lanczos vectors are kept 45 | semiorthogonal. 46 | OPTIONS.onesided = 1 then only the columns of U are 47 | are reorthogonalized. 48 | OPTIONS.onesided = -1 then only the columns of V are 49 | are reorthogonalized. 50 | OPTIONS.waitbar 51 | : The progress of the algorithm is display graphically. 52 | 53 | If both R0, U_old, B_old, and V_old are provided, they must 54 | contain a partial Lanczos bidiagonalization of A on the form 55 | 56 | A V_old = U_old B_old + R0 . 57 | 58 | In this case the factorization is extended to dimension K x K by 59 | continuing the Lanczos bidiagonalization algorithm with R0 as a 60 | starting vector. 61 | 62 | The output array work contains information about the work used in 63 | reorthogonalizing the u- and v-vectors. 64 | work = [ RU PU ] 65 | [ RV PV ] 66 | where 67 | RU = Number of reorthogonalizations of U. 68 | PU = Number of inner products used in reorthogonalizing U. 69 | RV = Number of reorthogonalizations of V. 70 | PV = Number of inner products used in reorthogonalizing V. 71 | 72 | References: 73 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 74 | 75 | G. H. Golub & C. F. Van Loan, "Matrix Computations", 76 | 3. Ed., Johns Hopkins, 1996. Section 9.3.4. 77 | 78 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 79 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 80 | 81 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 82 | Math. Comp. 42 (1984), no. 165, 115--142. 83 | 84 | 85 | Rasmus Munk Larsen, DAIMI, 1998. 86 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/lanbpro.txt: -------------------------------------------------------------------------------- 1 | LANBPRO Lanczos bidiagonalization with partial reorthogonalization. 2 | 3 | LANBPRO computes the Lanczos bidiagonalization of a real 4 | matrix using the with partial reorthogonalization. 5 | 6 | [U_k,B_k,V_k,R,ierr,work] = LANBPRO(A,K,R0,OPTIONS,U_old,B_old,V_old) 7 | [U_k,B_k,V_k,R,ierr,work] = LANBPRO('Afun','Atransfun',M,N,K,R0, ... 8 | OPTIONS,U_old,B_old,V_old) 9 | 10 | Computes K steps of the Lanczos bidiagonalization algorithm with partial 11 | reorthogonalization (BPRO) with M-by-1 starting vector R0, producing a 12 | lower bidiagonal K-by-K matrix B_k, an N-by-K matrix V_k, an M-by-K 13 | matrix U_k and a M-by-1 vector such that 14 | A*V_k = U_k*B_k + R 15 | Partial reorthogonalization is used to keep the columns of V_K and U_k 16 | semiorthogonal: 17 | MAX(DIAG((EYE(K) - V_K'*V_K))) <= OPTIONS.delta 18 | and 19 | MAX(DIAG((EYE(K) - U_K'*U_K))) <= OPTIONS.delta. 20 | 21 | B_k = LANBPRO(...) returns the bidiagonal matrix only. 22 | 23 | The first input argument is either a real matrix, or a string 24 | containing the name of an M-file which applies a linear operator 25 | to the columns of a given matrix. In the latter case, the second 26 | input must be the name of an M-file which applies the transpose of 27 | the same linear operator to the columns of a given matrix, 28 | and the third and fourth arguments must be M and N, the dimensions 29 | of then problem. 30 | 31 | The OPTIONS structure is used to control the reorthogonalization: 32 | OPTIONS.delta: Desired level of orthogonality 33 | (default = sqrt(eps/K)). 34 | OPTIONS.eta : Level of orthogonality after reorthogonalization 35 | (default = eps^(3/4)/sqrt(K)). 36 | OPTIONS.cgs : Flag for switching between different reorthogonalization 37 | algorithms: 38 | 0 = iterated modified Gram-Schmidt (default) 39 | 1 = iterated classical Gram-Schmidt 40 | OPTIONS.elr : If OPTIONS.elr = 1 (default) then extended local 41 | reorthogonalization is enforced. 42 | OPTIONS.onesided 43 | : If OPTIONS.onesided = 0 (default) then both the left 44 | (U) and right (V) Lanczos vectors are kept 45 | semiorthogonal. 46 | OPTIONS.onesided = 1 then only the columns of U are 47 | are reorthogonalized. 48 | OPTIONS.onesided = -1 then only the columns of V are 49 | are reorthogonalized. 50 | OPTIONS.waitbar 51 | : The progress of the algorithm is display graphically. 52 | 53 | If both R0, U_old, B_old, and V_old are provided, they must 54 | contain a partial Lanczos bidiagonalization of A on the form 55 | 56 | A V_old = U_old B_old + R0 . 57 | 58 | In this case the factorization is extended to dimension K x K by 59 | continuing the Lanczos bidiagonalization algorithm with R0 as a 60 | starting vector. 61 | 62 | The output array work contains information about the work used in 63 | reorthogonalizing the u- and v-vectors. 64 | work = [ RU PU ] 65 | [ RV PV ] 66 | where 67 | RU = Number of reorthogonalizations of U. 68 | PU = Number of inner products used in reorthogonalizing U. 69 | RV = Number of reorthogonalizations of V. 70 | PV = Number of inner products used in reorthogonalizing V. 71 | 72 | References: 73 | R.M. Larsen, Ph.D. Thesis, Aarhus University, 1998. 74 | 75 | G. H. Golub & C. F. Van Loan, "Matrix Computations", 76 | 3. Ed., Johns Hopkins, 1996. Section 9.3.4. 77 | 78 | B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 79 | Prentice-Hall, Englewood Cliffs, NJ, 1980. 80 | 81 | H. D. Simon, ``The Lanczos algorithm with partial reorthogonalization'', 82 | Math. Comp. 42 (1984), no. 165, 115--142. 83 | 84 | 85 | Rasmus Munk Larsen, DAIMI, 1998. 86 | -------------------------------------------------------------------------------- /compared_methods/LMHTV/PROPACK/reorth.f: -------------------------------------------------------------------------------- 1 | subroutine reorth(n,k,V,ldv,vnew,normv,index,alpha,work, 2 | c iflag,nre) 3 | c 4 | c FORTRAN 77 version of MATLAB routine REORTH: 5 | c 6 | c REORTH Reorthogonalize a vector using iterated Gram-Schmidt 7 | c 8 | c [R_NEW,NORMR_NEW,NRE] = reorth(Q,R,NORMR,INDEX,ALPHA,METHOD) 9 | c reorthogonalizes R against the subset of columns of Q given by INDEX. 10 | c If INDEX==[] then R is reorthogonalized all columns of Q. 11 | c If the result R_NEW has a small norm, i.e. if norm(R_NEW) < ALPHA*NORMR, 12 | c then a second reorthogonalization is performed. If the norm of R_NEW 13 | c is once more decreased by more than a factor of ALPHA then R is 14 | c numerically in span(Q(:,INDEX)) and a zero-vector is returned for R_NEW. 15 | c 16 | c If method==0 then iterated modified Gram-Schmidt is used. 17 | c If method==1 then iterated classical Gram-Schmidt is used. 18 | c 19 | c The default value for ALPHA is 0.5. 20 | c NRE is the number of reorthogonalizations performed (1 or 2). 21 | 22 | c References: 23 | c Aake Bjorck, "Numerical Methods for Least Squares Problems", 24 | c SIAM, Philadelphia, 1996, pp. 68-69. 25 | c 26 | c J.~W. Daniel, W.~B. Gragg, L. Kaufman and G.~W. Stewart, 27 | c ``Reorthogonalization and Stable Algorithms Updating the 28 | c Gram-Schmidt QR Factorization'', Math. Comp., 30 (1976), no. 29 | c 136, pp. 772-795. 30 | c 31 | c B. N. Parlett, ``The Symmetric Eigenvalue Problem'', 32 | c Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109 33 | 34 | c Rasmus Munk Larsen, DAIMI, 1998. 35 | implicit none 36 | integer n,k,ldv,i,iflag,nre 37 | double precision V(ldv,*),vnew(*),normv,index(*),work(*) 38 | double precision alpha,normv_old,dnrm2 39 | integer MAXTRY 40 | parameter (MAXTRY=4) 41 | external dgemv,dnrm2 42 | 43 | c Hack: If index .ne. 1:k we do MGS to avoid reshuffling. 44 | if (iflag.eq.1) then 45 | do i=1,k 46 | if (int(index(i)).ne.i) then 47 | iflag=0 48 | goto 100 49 | endif 50 | enddo 51 | endif 52 | 100 normv_old = 0 53 | nre = 0 54 | normv = dnrm2(n,vnew,1) 55 | do while ((normv.lt.alpha*normv_old .or. nre.eq.0)) 56 | if (iflag.eq.1) then 57 | c CGS: 58 | call dgemv('T',n,k,1D0,V,ldv,vnew,1,0D0,work,1) 59 | call dgemv('N',n,k,-1D0,V,ldv,work,1,1D0,vnew,1) 60 | else 61 | c MGS: 62 | call MGS(n,k,V,ldv,vnew,index) 63 | endif 64 | normv_old = normv 65 | normv = dnrm2(n,vnew,1) 66 | nre = nre + 1 67 | 68 | if ( nre.gt.MAXTRY ) then 69 | c 70 | c vnew is numerically in span(V) => return vnew = (0,0,...,0)^T 71 | normv = 0d0 72 | do i=1,n 73 | vnew(i) = 0d0 74 | enddo 75 | return 76 | endif 77 | enddo 78 | end 79 | c 80 | c**************************************************************************** 81 | c 82 | 83 | subroutine MGS(n,k,V,ldv,vnew,index) 84 | implicit none 85 | integer n,k,ldv 86 | double precision V(ldv,*),vnew(*),index(*) 87 | integer i,j,idx 88 | double precision s 89 | 90 | c 91 | c Modified Gram-Schmidt orthogonalization: 92 | c Orthogalizes vnew against the k vectors in V by the 93 | c iterative process 94 | c 95 | c FOR i=1...k DO 96 | c vnew = vnew - DOT( V(:,i), vnew ) * V(:,i) 97 | c 98 | 99 | c This simple version is faster on Pentium machines. 100 | c Compile with "g77 -O6 -funroll-all-loops -fomit-frame-pointer" 101 | 102 | do i=1,k 103 | idx = int(index(i)) 104 | s = 0 105 | do j=1,n 106 | s = s + V(j,idx)*vnew(j) 107 | enddo 108 | do j=1,n 109 | vnew(j) = vnew(j) - s*V(j,idx) 110 | enddo 111 | enddo 112 | end 113 | c 114 | c**************************************************************************** 115 | c 116 | 117 | -------------------------------------------------------------------------------- /compared_methods/LTHTV/LRTDTV.m: -------------------------------------------------------------------------------- 1 | %% =========================== Frist part notes =========================== 2 | % ADMM algorithm: tensor denosing 3 | % solve the following problem 4 | % (SSTV regularized low rank tucker decomposition problem) 5 | % 6 | % The approximate model: 7 | % min tau*||F||_1 + lambda*||E||_1 8 | % s.t. D = X+E, X = Z,DZ = F 9 | % X = Core*U1*U2*U3,rank(Core_i)=ri 10 | % where,D is SSTV difference operator in the above model 11 | % 12 | % The lagrange function is : 13 | % tau*||F||_1 + lambda*||E||_1 + + + 14 | % + beta/2*( ||D-X-E||_F^2 + ||X-Z||_F^2 + ||DZ - F||_F^2 ) 15 | % 16 | %% =========================== Second part notes=========================== 17 | 18 | % Reference paper: Hyperspectral Image Restoration via Total Variation 19 | % Regularized Low-rank Tensor Decomposition 20 | % Author: Yao Wang, Jiangjun Peng 21 | % E-mail addresses: andrew.pengjj@gmail.com 22 | % ------------------------------------------------------------------------- 23 | 24 | %% =========================== Thrid part notes =========================== 25 | % INPUT: 26 | % Noi: noisy 3-D image of size M*N*p normalized to [0,1] band by band 27 | % tau: the trade-off parameter (recommended value 1) 28 | % lambda: sparse noise coefficient 29 | % rank: rank constraint,[0.8*M,0.8*N,r] 30 | % OUTPUT: 31 | % clean_iamge: 3-D denoised image 32 | % S: The noise term 33 | % out_value: MPSNR and MSSIM and ERGAS valuses of each iteration 34 | % ======================================================================== 35 | function [clean_image,S,out_value,time] = LRTDTV(Noi, tau,lambda,rank) 36 | tic 37 | sizeD = size(Noi); 38 | 39 | normD = norm(Noi(:)); 40 | n = prod(sizeD); 41 | maxIter = 40; 42 | epsilon = 1e-6; 43 | beta = 0.01; % The ascending multiplier value 44 | 45 | out_value = []; 46 | out_value.SSIM = []; 47 | out_value.PSNR = []; 48 | out_value.ERGAS = []; 49 | 50 | h = sizeD(1); 51 | w = sizeD(2); 52 | d = sizeD(3); 53 | %% 54 | Eny_x = ( abs(psf2otf([+1; -1], [h,w,d])) ).^2 ; 55 | Eny_y = ( abs(psf2otf([+1, -1], [h,w,d])) ).^2 ; 56 | Eny_z = ( abs(psf2otf([+1, -1], [w,d,h])) ).^2 ; 57 | Eny_z = permute(Eny_z, [3, 1 2]); 58 | determ = Eny_x + Eny_y + Eny_z; 59 | 60 | %% Initialization 61 | X = rand(sizeD); % X : The clean image 62 | Z = X; % Z : auxiliary variable for X 63 | S = zeros(sizeD); % S : sparse noise 64 | F = zeros(3*n,1); % F : auxiliary variable for tv 65 | Gamma = F; % The multiplier for DZ-F 66 | M1 = zeros(size(Noi)); % The multiplier for 67 | M2 = M1; 68 | 69 | %% main loop 70 | 71 | for iter = 1: maxIter 72 | preX = X; 73 | %% - update Core and U_i and X 74 | temp = 1/2*(Noi-S+Z+(M1-M2)/beta); 75 | X = tucker_hooi(temp,rank); 76 | %% - update Z 77 | 78 | 79 | diffT_p = diffT3( beta*F - Gamma, sizeD ); 80 | numer1 = reshape( diffT_p + beta*X(:) + M2(:), sizeD); 81 | z = real( ifftn( fftn(numer1) ./ (beta*determ + beta) ) ); 82 | Z = reshape(z,sizeD); 83 | % z = Z(:); 84 | % z = myPCG1(z,X,M2,F,Gamma,beta,sizeD); 85 | % Z = reshape(z,sizeD); 86 | %% - update F 87 | diff_Z = diff3(Z(:), sizeD); 88 | F = softthre(diff_Z+ Gamma/beta, tau/beta ); 89 | %% - update S 90 | S = softthre(Noi-X+M1/beta,lambda/beta);% sparse 91 | % S = beta*(Noi-X+M1/beta)/(beta+2*lambda); 92 | %% - update M 93 | M1 = M1 + beta*(Noi-X-S); 94 | M2 = M2 + beta*(X-Z); 95 | %% - update Gamma 96 | Gamma = Gamma+beta*(diff_Z-F); 97 | beta = min(beta *1.5,1e5); 98 | %% compute the error 99 | errList = norm(X(:)-preX(:)) / normD; 100 | fprintf('LRTDTV: iterations = %d difference=%f\n', iter, errList); 101 | if errList < epsilon 102 | break; 103 | end 104 | %% output SSIM and PSNR values of each step 105 | % load simu_indian 106 | % OriData3 = simu_indian; 107 | % [out_value.PSNR(iter),out_value.SSIM(iter),out_value.ERGAS(iter)]=msqia(OriData3,X); 108 | end 109 | %% the final clean image 110 | clean_image = X; 111 | fprintf('LRTDTV ends: total iterations = %d,difference=%f\n\n', iter, errList); 112 | toc 113 | time=toc; 114 | end 115 | 116 | -------------------------------------------------------------------------------- /PWRCTV.m: -------------------------------------------------------------------------------- 1 | function [output_image,U,V,MPSNR,MSSIM,ERGAS,SAM] = PWRCTV(Nhsi, Pan, beta, lambda, tau, r, q) 2 | %% Initializing admm variables 3 | 4 | if nargin < 2 5 | beta = 0.5; 6 | end 7 | if nargin < 3 8 | lambda = 1; 9 | end 10 | if nargin < 4 11 | tau = [0.01,0.01]; 12 | end 13 | if nargin < 5 14 | r = 6; 15 | end 16 | 17 | 18 | 19 | tol = 1e-5; 20 | tol_rho = 100; 21 | maxIter = 200; 22 | % rho = 1.2; 23 | % mu0 = 0.09; 24 | rho = 1.5; mu0 = 1; 25 | max_mu = 1e6; 26 | eps = 1e-4; 27 | 28 | [height,width,nband] = size(Nhsi); 29 | sizeU = [height,width,r]; 30 | D = reshape(Nhsi, [height*width,nband]); 31 | 32 | [~,norm_two,~] = svdsecon(D,1); 33 | mu = mu0/norm_two; 34 | normD = norm(D,'fro'); 35 | 36 | %% FFT setting 37 | Eny_x = ( abs(psf2otf([+1; -1], [height,width,r])) ).^2 ; 38 | Eny_y = ( abs(psf2otf([+1, -1], [height,width,r])) ).^2 ; 39 | determ = Eny_x + Eny_y; 40 | 41 | %% Initializing optimization variables 42 | [u,s,v]= svd(D,'econ'); 43 | U = u(:,1:r)*s(1:r,1:r); 44 | V = v(:,1:r); 45 | 46 | S = zeros(height*width,nband);%Sparse 47 | E = zeros(height*width,nband);%Gaussian 48 | 49 | M1 = zeros(height*width*r,1); % multiplier for Dx_U-F1 50 | M2 = zeros(height*width*r,1); % multiplier for Dy_U-F2 51 | M3 = zeros([height*width,nband]); % multiplier for D-UV^T-E 52 | W3 = 1; 53 | 54 | G1 = diff_x(Pan, size(Pan)); G1=reshape(G1,size(Pan)); 55 | G2 = diff_y(Pan, size(Pan)); G2=reshape(G2,size(Pan)); 56 | 57 | W1 = repmat(G1, [1,1,r]); W1 = abs(W1); 58 | W2 = repmat(G2, [1,1,r]); W2 = abs(W2); 59 | W1 = (1-abs(W1)).^q; 60 | W2 = (1-abs(W2)).^q; 61 | 62 | 63 | rho_XY1 = ones(height,width,r); 64 | rho_XY2 = ones(height,width,r); 65 | update_rho = 0; 66 | 67 | MPSNR = zeros(maxIter,1); 68 | MSSIM = zeros(maxIter,1); 69 | 70 | 71 | %% main loop 72 | 73 | disp('Stage 1') 74 | iter = 0; 75 | while iter100 115 | % 忽略掉高斯噪音影响 116 | S = 0; 117 | else 118 | S = softthre_s(D-U*V'-E+M3/mu,lambda/mu,W3); 119 | end 120 | 121 | % output_image = reshape(U*V',[height,width,nband]); 122 | % [MPSNR(iter), MSSIM(iter),ERGAS(iter),SAM(iter)] = pwrctv_msqia(Ohsi,output_image); 123 | 124 | 125 | %% -Update Multiplier 126 | leq1 = diff_x(U,sizeU)-F1; 127 | leq2 = diff_y(U,sizeU)-F2; 128 | leq3 = D-U*V'-E-S; 129 | stopC1 = norm(leq1,'fro')/normD; 130 | stopC2 = norm(leq2,'fro')/normD; 131 | stopC3 = norm(leq3,'fro')/normD; 132 | if mod(iter,10)==0 133 | disp(['iter ' num2str(iter) ',mu=' num2str(mu,'%2.1e') ... 134 | ',U_x rele = ' num2str(stopC1,'%2.3e') ',U_y rele = ' num2str(stopC2,'%2.3e')... 135 | ',X-UV = ' num2str(stopC3,'%2.3e')]); 136 | end 137 | if stopC15 88 | % tmp_max = max(abs(G1(:))); 89 | % W1 = tmp_max./(abs(G1)+delta*tmp_max); 90 | % tmp_max = max(abs(G2(:))); 91 | % W2 = tmp_max./(abs(G2)+delta*tmp_max); 92 | % end 93 | %% -Update U 94 | diffT_p = diff_xT(G1-M1/mu,sizeU)+diff_yT(G2-M2/mu,sizeU); 95 | temp = (D-E-S+M3/mu)*V; 96 | numer1 = reshape( diffT_p +temp(:), sizeU); 97 | x = real( ifftn( fftn(numer1) ./ (determ + 1+eps) ) ); 98 | U = reshape(x,[M*N,r]); 99 | %% -Update V 100 | [u,~,v] = svd((D-E-S+M3/mu)'*U,'econ'); 101 | V = u*v'; 102 | %% -Update E 103 | E = mu*(D-U*V'-S+M3/mu)/(2*beta+mu); 104 | % if beta>100 105 | % E = 0; 106 | % else 107 | % E = mu*(D-U*V'-S+M3/mu)/(2*beta+mu); 108 | % % if mod(iter,4)==1 109 | % % [Weight,~,Sigma,~,Pk] = MoGWeight(D-S,U,V,K,Pk,Sigma); 110 | % % end 111 | % % E = mu*(D-U*V'-S+M3/mu)./(2*beta*Weight+mu); 112 | % end 113 | %% -Update S 114 | if lambda >100 115 | % 忽略掉高斯噪音影响 116 | S = 0; 117 | else 118 | S = softthre_s(D-U*V'-E+M3/mu,lambda/mu,W3); 119 | end 120 | % [psnr,ssim,~,~,~] = evaluate(clean_data,output_image,M,N); 121 | % MPSNR(iter) = mean(psnr); 122 | % MSSIM(iter) = mean(ssim); 123 | %% -Update Multiplier 124 | leq1 = diff_x(U,sizeU)-G1; 125 | leq2 = diff_y(U,sizeU)-G2; 126 | leq3 = D-U*V'-E-S; 127 | stopC1 = norm(leq1,'fro')/normD; 128 | stopC2 = norm(leq2,'fro')/normD; 129 | stopC3 = norm(leq3,'fro')/normD; 130 | if mod(iter,10)==0 131 | disp(['iter ' num2str(iter) ',mu=' num2str(mu,'%2.1e') ... 132 | ',U_x rele = ' num2str(stopC1,'%2.3e') ',U_y rele = ' num2str(stopC2,'%2.3e')... 133 | ',X-UV = ' num2str(stopC3,'%2.3e')]); 134 | end 135 | if stopC1=0.995 132 | % break; 133 | % end 134 | % end 135 | % X = u(:,1:svp)*diag(diags(1:svp))*v(:,1:svp)'; 136 | output_image = reshape(X,[M,N,p]); 137 | end -------------------------------------------------------------------------------- /compared_methods/LRTV/LRTV.m: -------------------------------------------------------------------------------- 1 | function [ output_image output_sparse ] = LRTV_accelerate(Y, tau,lambda,r) 2 | % Solve problem 3 | % solve the following problem (TV regularized LR and MC problem) 4 | % argmin ||L||_nuclear + tao * ||X||_TV+lanbda*||E||_1 5 | % s.t. X = L D = L + E and rank(L)<=r; 6 | % via IALM 7 | % ------------------------------------------------------------------------------------------------------------- 8 | % Reference paper: W. He, H. Zhang, L. Zhang, and H. Shen, “Total-Variation-Regularized 9 | % Low-Rank Matrix Factorization for Hyperspectral Image Restoration,” IEEE Trans. Geosci. Remote Sens., 10 | % vol. 54, pp. 178-188, Jan. 2016. 11 | % Author: Wei He (November, 2014) 12 | % E-mail addresses:(weihe1990@whu.edu.cn) 13 | % --------------------------------------------------INPUT----------------------------------------------------- 14 | % oriData3_noise noisy 3-D image of size sizeY(1)*N*p normalized to [0,1] band by band 15 | % tao (recommended value 0.01) 16 | % lambda 17 | % G1(omega) all one matrix of size (sizeY(1)*N)*p 18 | % G0(omega~) all zero matrix of size (sizeY(1)*N)*p 19 | % r rank constraint 20 | % --------------------------------------------------OUTPUT----------------------------------------------------- 21 | % output_iamge 3-D denoised image 22 | % out_value MPSNR and MSSIM valuses of each iteration 23 | % ------------------------------------------------------------------------------------------------------------- 24 | % Note: the parameters G0 and G1 do not have any effect. these two 25 | % parameters are used to solve the impainting problem with the location of 26 | % missing pixels to be known. 27 | % ------------------------------------------------------------------------------------------------------------- 28 | sizeY = size(Y); 29 | sizeD = [sizeY(1)*sizeY(2), sizeY(3)]; 30 | D = reshape(Y, sizeD); clear Y 31 | 32 | tol = 1e-5; 33 | maxIter = 100; 34 | rho = 1.25; 35 | 36 | % Initialize mu 37 | normD = norm(D,'fro'); 38 | [~,norm_two,~] = svdsecon(D, 1); 39 | norm_inf = max(abs(D(:)))/lambda; 40 | dual_norm = max(norm_two, norm_inf); 41 | mu = 1.25/dual_norm; % this one can be tuned 42 | max_mu1 = mu * 1e8; 43 | mu1 = mu; mu2 = mu; mu3 = mu; 44 | %% 45 | debug=1; 46 | %% Initializing optimization variables 47 | % intialize 48 | % L = rand(sizeD); 49 | [uu,ss,vv] = svdsecon(D,6); 50 | L = uu*ss*vv'; 51 | f = reshape(L, sizeY); 52 | S = zeros(sizeD); 53 | 54 | M1 = zeros(sizeD); 55 | M2 = zeros(sizeD); 56 | 57 | % F1 = zeros(sizeY); F2 = zeros(sizeY); 58 | M31 = zeros(sizeY); M32 = zeros(sizeY); 59 | 60 | % for the 3D-TV norm 61 | % define operators 62 | Eny_x = ( abs(psf2otf([+1; -1], sizeY)) ).^2 ; 63 | Eny_y = ( abs(psf2otf([+1, -1], sizeY)) ).^2 ; 64 | eigDtD = Eny_x + Eny_y; 65 | 66 | [diff,diff_t] = defDDt(); 67 | 68 | 69 | % main loop 70 | iter = 0; 71 | tic 72 | while iter + + 14 | % + beta/2*( ||D-X-E||_F^2 + ||X-Z||_F^2 + ||DZ - F||_F^2 ) 15 | % 16 | %% =========================== Second part notes=========================== 17 | 18 | % Reference paper: Hyperspectral Image Restoration via Total Variation 19 | % Regularized Low-rank Tensor Decomposition 20 | % Author: Yao Wang, Jiangjun Peng 21 | % E-mail addresses: andrew.pengjj@gmail.com 22 | % ------------------------------------------------------------------------- 23 | 24 | %% =========================== Thrid part notes =========================== 25 | % INPUT: 26 | % Noi: noisy 3-D image of size M*N*p normalized to [0,1] band by band 27 | % tau: the trade-off parameter (recommended value 1) 28 | % lambda: sparse noise coefficient 29 | % rank: rank constraint,[0.8*M,0.8*N,r] 30 | % OUTPUT: 31 | % clean_iamge: 3-D denoised image 32 | % S: The noise term 33 | % out_value: MPSNR and MSSIM and ERGAS valuses of each iteration 34 | % ======================================================================== 35 | function [clean_image,S,out_value,time] = LTNTV(Noi, tau,lambda,rank,beta) 36 | tic 37 | sizeD = size(Noi); 38 | 39 | normD = norm(Noi(:)); 40 | n = prod(sizeD); 41 | maxIter = 40; 42 | epsilon = 1e-6; 43 | mu1 = 0.01; % The ascending multiplier value 44 | mu2 = 0.01; 45 | mu3 = 0.01; 46 | 47 | out_value = []; 48 | out_value.SSIM = []; 49 | out_value.PSNR = []; 50 | out_value.ERGAS = []; 51 | 52 | h = sizeD(1); 53 | w = sizeD(2); 54 | d = sizeD(3); 55 | %% 56 | % Eny_x = ( abs(psf2otf([+1; -1], [h,w,d])) ).^2 ; 57 | % Eny_y = ( abs(psf2otf([+1, -1], [h,w,d])) ).^2 ; 58 | % Eny_z = ( abs(psf2otf([+1, -1], [w,d,h])) ).^2 ; 59 | Eny_x = beta(1)^2*( abs(psf2otf([+1; -1], [h,w,d])) ).^2 ; 60 | Eny_y = beta(2)^2*( abs(psf2otf([+1, -1], [h,w,d])) ).^2 ; 61 | Eny_z = beta(3)^2*( abs(psf2otf([+1, -1], [w,d,h])) ).^2 ; 62 | Eny_z = permute(Eny_z, [3, 1 2]); 63 | determ = Eny_x + Eny_y + Eny_z; 64 | 65 | %% Initialization 66 | X = zeros(sizeD); % X : The clean image 67 | Z = X; % Z : auxiliary variable for X 68 | S = zeros(sizeD); % S : sparse noise 69 | F = zeros(3*n,1); % F : auxiliary variable for tv 70 | C = F; % The multiplier for DZ-F 71 | B = zeros(size(Noi)); % The multiplier for 72 | A = B; 73 | 74 | %% main loop 75 | 76 | for iter = 1: maxIter 77 | preX = X; 78 | %% - update Core and U_i and X 79 | temp = (mu1*(Z-A/mu1)+mu2*(Noi-S+B/mu2))/(mu1+mu2); 80 | X = tucker_hooi(temp,rank); 81 | 82 | %% - update Z 83 | % diffT_p = diffT3( mu3*F - C, sizeD ); 84 | diffT_p = diffT3_weight( mu3*F - C, sizeD,beta ); 85 | numer1 = reshape( diffT_p + mu1*X(:) + A(:), sizeD); 86 | z = real( ifftn( fftn(numer1) ./ (mu3*determ + mu1) ) ); 87 | Z = reshape(z,sizeD); 88 | 89 | %% - update F 90 | % diff_Z = diff3(Z(:), sizeD); 91 | diff_Z = diff3_weight(Z(:), sizeD,beta); 92 | F = prox_half(diff_Z+ C/mu3, tau/mu3 ); 93 | 94 | %% - update S 95 | S = softthre(Noi-X+B/mu2,lambda/mu2);% sparse 96 | 97 | %% - update M 98 | B = B + mu2*(Noi-X-S); 99 | A = A + mu1*(X-Z); 100 | C = C + mu3*(diff_Z-F); 101 | mu1 = min(mu1 * 1.5,1e6); 102 | mu2 = min(mu2 * 1.5,1e6); 103 | mu3 = min(mu3 * 1.5,1e6); 104 | 105 | %% compute the error 106 | errList = norm(X(:)-preX(:)) / normD; 107 | fprintf('LRTDTV: iterations = %d difference=%f\n', iter, errList); 108 | if errList < epsilon 109 | break; 110 | end 111 | %% output SSIM and PSNR values of each step 112 | % load simu_indian 113 | % OriData3 = simu_indian; 114 | % [out_value.PSNR(iter),out_value.SSIM(iter),out_value.ERGAS(iter)]=msqia(OriData3,X); 115 | end 116 | %% the final clean image 117 | clean_image = X; 118 | fprintf('LRTDTV ends: total iterations = %d,difference=%f\n\n', iter, errList); 119 | toc 120 | time=toc; 121 | end 122 | 123 | function z = prox_half(x,gamma) 124 | z = (2/3)*x.*(abs(x)>(gamma^(2/3).*54^(1/3)/4)).*(1+cos(2*pi/3-2*acos((abs(x)/3).^(-1.5)*gamma/8)/3)); 125 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hyperspectral Pan-denoising by PWRCTV 2 | 3 | Pan-denoising: Guided Hyperspectral Image Denoising via Weighted Represent Coefficient Total Variation. IEEE Trans. Geosci. Remote. Sens. 4 | 5 | [Shuang Xu](https://shuangxu96.github.io/), [Qiao Ke](https://teacher.nwpu.edu.cn/qiaoke.html), [Jiangjun Peng](https://teacher.nwpu.edu.cn/pengjj), [Xiangyong Cao](https://gr.xjtu.edu.cn/en/web/caoxiangyong), [Zixiang Zhao](https://zhaozixiang1228.github.io/) 6 | 7 | Northwestern Polytechnical University, and Xi'an Jiaotong University 8 | 9 | ------------------------------------------- 10 | [![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/pdf/2407.06064) 11 | [![Free](https://img.shields.io/badge/free_for_non_commercial_use-brightgreen)](#-license) 12 | 13 | This repository contains the MATLAB code for the paper "Pan-denoising: Guided Hyperspectral Image Denoising via Weighted Represent Coefficient Total Variation". The code implements the proposed PWRCTV method for hyperspectral image denoising using panchromatic image guidance. 14 | 15 | ## Pan-denoising 16 | With the recent launch of satellites equipped with both hyperspectral and PAN sensors, such as PRISMA (PRecursore IperSpettrale della Missione Applicativa) and XG3 (XIGUANG-003), a new opportunity has emerged. PAN images, due to their imaging mechanism, are less noisy than HSI but still exhibit similar textures. As depicted in following figure (b), this paper therefore aims to investigate PAN image-guided HSI denoising, which is referred to as _pan-denoising_. This problem arises from two primary aspects: 17 | - Despite the significant advancements in hyperspectral imaging techniques, the HSIs captured by recent satellite sensors still suffer from noticeable noise. _Pan-denoising_ presents an important and novel approach to enhance HSI quality. 18 | - Substantial research has been conducted on hyperspectral pan-sharpening, which assumes that HSIs are noise-free. However, this assumption does not hold in practice. Following pan-sharpening, a denoising step is still required. _Pan-denoising_ would lead to a more robust image preprocessing result. 19 | 20 | Compared with the traditional HSI denoising paradigm, _pan-denoising_ incorporates an additional regularization term derived from external prior knowledge: 21 | 22 | $$\min_{\mathcal{X}}\, \mathscr{L}\left( \mathcal{X},\mathcal{Y} \right) + \lambda \mathscr{R}\left( \mathcal{X} \right) +\tau \mathscr{E}\left( \mathcal{X},\mathbf{P} \right),$$ 23 | 24 | where $\mathscr{E}\left( \mathcal{X},\mathbf{P} \right)$ characterizes the external prior knowledge, $\mathbf{P}\in\mathbb{R}^{M\times N}$ is the PAN image, and $\tau$ controls the regularization strength. Nevertheless, designing an appropriate regularization term to effectively utilize the complementary information from PAN images remains a significant challenge. 25 | 26 | 27 |
28 | 29 | ## Contents 30 | * `PWRCTV.m`: The implementation of the PWRCTV denoising algorithm. 31 | * `Table2_3_Fig5_6_PRISMA.m`: Functions for evaluating the denoising performance on Florence and Milan datasets to reproduce Tables 2-3 and Figs. 5-6 of the manuscript. 32 | * `Fig7_8_XG3.m`: Functions for evaluating the denoising performance on Beijing and Yulin datasets to reproduce Figs. 7-8 of the manuscript. 33 | * `Table_4_step1.m`: Functions for denoising on the Urban dataset. 34 | * `Table_4_step2.py`: Functions for classification on the denoised Urban dataset. 35 | * `Table5_Fig10.m`: Functions for hyperspectral pan-sharpening on the Milan dataset. 36 | * `datasets/`: Dataset download links. 37 | 38 | ## Usage 39 | 1. **Data Download**: Download the datasets from the provided links and put them in the `data/` folder. 40 | 2. **Experiments on simulated datasets**: Run `Table2_3_Fig5_6_PRISMA.m`. 41 | 3. **Experiments on real-world datasets**: Run `Fig7_8_XG3.m`. 42 | 4. **Application on HSI classification**: Run `Table_4_step1.m` and `Table_4_step2.py`. 43 | 5. **Application on HSI pan-sharpening**: Run `Table5_Fig10.m`. 44 | 6. **Visualize results**: The denoised images and corresponding metrics will be saved in the `results/` folder. 45 | 46 | ## Dependencies 47 | * MATLAB 48 | 49 | ## Our other works 50 | - **HSI denoising**: [BALMF](https://github.com/shuangxu96/BALMF)(TGRS'22), [STDTV](https://github.com/shuangxu96/STDTV), [RCTV](https://github.com/andrew-pengjj/rctv)(TGRS'22) 51 | - **Guided RGB image denoising**: [MN](https://github.com/shuangxu96/MN-A-model-driven-network-for-guided-image-denoising)(Inf Fus'22) 52 | - **Pan-sharpening**: [GPPNN](https://github.com/shuangxu96/GPPNN)(CVPR21) 53 | 54 | ## Citation 55 | If you use this code in your research, please cite the corresponding paper: 56 | ``` 57 | @article{PWRCTV, 58 | author = {Shuang Xu and 59 | Qiao Ke and 60 | Jiangjun Peng and 61 | Xiangyong Cao and 62 | Zixiang Zhao}, 63 | title = {Pan-denoising: Guided Hyperspectral Image Denoising via Weighted Represent Coefficient Total Variation}, 64 | journal = {{IEEE} Trans. Geosci. Remote. Sens.}, 65 | volume = {62}, 66 | pages = {Art. no. 5528714}, 67 | year = {2024}, 68 | doi = {https://doi.org/10.1109/TGRS.2024.3450888}, 69 | } 70 | ``` 71 | 72 | 73 | 74 | ## Contact 75 | If you have any questions or need further assistance, please contact Shuang Xu at xs@nwpu.edu.cn 76 | 77 | ## Acknowledgement 78 | The codes are based on [RCTV](https://github.com/andrew-pengjj/rctv). Thanks for their awesome works. 79 | 80 | 81 | -------------------------------------------------------------------------------- /Table_4_step1.m: -------------------------------------------------------------------------------- 1 | %% load data 2 | load('data\Urban\Urban_F210.mat') 3 | load('data\Urban\ikonos_spec_resp.mat') 4 | wvl = load('data\Urban\URBAN.wvl'); 5 | 6 | Nhsi = reshape(Y, [nBand,nRow,nCol]); 7 | Nhsi = permute(Nhsi, [2,3,1]); 8 | 9 | Chsi = Nhsi(:,:,wvl(:,4)==1 & wvl(:,2)>=350 & wvl(:,2)<=1035); 10 | working_wvl = wvl(wvl(:,4)==1 & wvl(:,2)>=350 & wvl(:,2)<=1035,2); 11 | srf = zeros(size(Chsi,3),1); 12 | pan = zeros(size(Chsi,1),size(Chsi,2)); 13 | for i=1:length(working_wvl) 14 | temp_wvl = working_wvl(i); 15 | [~,index] = min(abs(temp_wvl - ikonos_sp(:,1))); 16 | srf(i) = ikonos_sp(index,2); 17 | pan = pan + srf(i)*Chsi(:,:,i); 18 | end 19 | pan = pan/sum(srf); 20 | plot(working_wvl,srf) 21 | title('Spectral Response Function') 22 | 23 | %% Preprocess 24 | Nhsi = Nhsi/max(Nhsi(:)); 25 | pan = pan/max(pan(:)); 26 | [M,N,B] = size(Nhsi); 27 | 28 | %% set savepath 29 | savepath = fullfile('result','urban'); 30 | mkdir(savepath) 31 | 32 | %% save simulations 33 | save([savepath,'\Noisy.mat'],'Nhsi','pan') 34 | 35 | %% TCTV 36 | % parameters 37 | opts = []; 38 | opts.rho = 1.25; 39 | opts.directions = [1,2,3]; 40 | % run algorithm 41 | disp('--------------- Run TCTV --------------- ') 42 | tic 43 | output = TCTV_TRPCA(Nhsi, opts); 44 | elapsed_time = toc; 45 | % save data 46 | output = uint8(255*output); 47 | save([savepath,'\TCTV.mat'],'output') 48 | 49 | %% LMHTV 50 | % parameters 51 | r = 4; 52 | tau = 0.0005; 53 | lambda = 10/sqrt(M*N); 54 | % run algorithm 55 | disp('--------------- Run LMHTV --------------- ') 56 | tic 57 | output = LMHTV(Nhsi, tau, lambda, r, [1,1,0]); 58 | elapsed_time = toc; 59 | % save data 60 | output = uint8(255*output); 61 | save([savepath,'\LMHTV.mat'],'output') 62 | 63 | %% LTHTV 64 | % parameters 65 | tau = 0.04; 66 | lambda = 1000/sqrt(M*N); 67 | ten_rank = [ceil(M*0.7),ceil(N*0.7),r]; 68 | % run algorithm 69 | disp('--------------- Run LTHTV --------------- ') 70 | tic 71 | output = LTHTV(Nhsi, tau,lambda,ten_rank,[1,1,0]); 72 | elapsed_time = toc; 73 | % save data 74 | output = uint8(255*output); 75 | save([savepath,'\LTHTV.mat'],'output') 76 | 77 | %% LRTV 78 | % parameters 79 | if size(Nhsi,3) > 100 80 | tau = 0.015; 81 | lambda = 20/sqrt(M*N); 82 | rank = 10; 83 | else 84 | tau = 0.01; 85 | lambda = 10/sqrt(M*N); 86 | rank = 5; 87 | end 88 | % run algorithm 89 | disp('--------------- Run LRTV --------------- ') 90 | tic 91 | output = LRTV(Nhsi, tau, lambda, rank); 92 | elapsed_time = toc; 93 | % save data 94 | output = uint8(255*output); 95 | save([savepath,'\LRTV.mat'],'output') 96 | 97 | 98 | 99 | %% RCTV 100 | % parameters 101 | beta = 200; 102 | lambda = 1; 103 | tau = 0.8*[1,1]; 104 | r = 4; 105 | q = 10; 106 | % run algorithm 107 | disp('--------------- Run RCTV --------------- ') 108 | tic 109 | output = MoG_RBTV(Nhsi, beta, lambda, tau, r); 110 | elapsed_time = toc; 111 | % save data 112 | output = uint8(255*output); 113 | save([savepath,'\RCTV.mat'],'output') 114 | 115 | %% BALMF 116 | % parameters 117 | r = 4; 118 | % run algorithm 119 | disp('--------------- Run BALMF --------------- ') 120 | tic 121 | output = BALMF(Nhsi, r); 122 | elapsed_time = toc; 123 | % save data 124 | output = uint8(255*output); 125 | save([savepath,'\BALMF.mat'],'output') 126 | 127 | 128 | 129 | %% CTV 130 | % parameters 131 | opts = []; 132 | opts.rho = 1.5; 133 | % run algorithm 134 | disp('--------------- Run CTV --------------- ') 135 | tic 136 | output = ctv_rpca(Nhsi, opts); 137 | elapsed_time = toc; 138 | % save data 139 | output = uint8(255*output); 140 | save([savepath,'\CTV.mat'],'output') 141 | 142 | 143 | %% PWRCTV 144 | % parameters 145 | beta = 100; 146 | lambda = 1; 147 | tau = 0.5*[1,1]; 148 | r = 3; 149 | q = 2; 150 | % run algorithm 151 | disp('--------------- Run PWRCTV --------------- ') 152 | tic 153 | output = PWRCTV(Nhsi, pan, beta, lambda, tau, r, q); 154 | elapsed_time = toc; 155 | % save data 156 | output = uint8(255*output); 157 | save([savepath,'\PWRCTV.mat'],'output') 158 | 159 | %% TDL 160 | Ohsi = im2double(output); 161 | % parameters 162 | noiselevel = std(reshape(Ohsi-Nhsi, [M*N,B])); 163 | vstbmtf_params.peak_value = 1; 164 | vstbmtf_params.nsigma = mean(noiselevel); 165 | % run algorithm 166 | disp('--------------- Run TDL --------------- ') 167 | tic 168 | output = TensorDL(Nhsi, vstbmtf_params); 169 | elapsed_time = toc; 170 | % save data 171 | output = uint8(255*output); 172 | save([savepath,'\TDL.mat'],'output') 173 | 174 | %% NGMeet 175 | % parameters 176 | noiselevel = std(reshape(Ohsi-Nhsi, [M*N,B])); 177 | Par = ParSetH(255*mean(noiselevel),B); 178 | % run algorithm 179 | disp('--------------- Run NGMeet --------------- ') 180 | tic 181 | output = NGmeet_DeNoising( 255*Nhsi, 255*Ohsi, Par); %NGmeet denoisng function 182 | elapsed_time = toc; 183 | % save data 184 | output = uint8(output); 185 | save([savepath,'\NGMeet.mat'],'output') 186 | 187 | %% WNLRATV 188 | % parameters 189 | noise = reshape(Nhsi - Ohsi, M*N,B); 190 | Sigma_ratio = std(noise(:)); 191 | initial_rank = 3; 192 | Rank = 6; 193 | ModelPar.alpha = 30; 194 | ModelPar.belta = 1; 195 | ModelPar.gamma = 0.08; 196 | param = SetParam_NWT(Nhsi, Sigma_ratio); 197 | param.initial_rank = initial_rank; 198 | param.maxiter = 15; 199 | param.patnum = 200; 200 | param.lambda = 2e-1; 201 | [prior, model] = InitialPara( param,0,B); 202 | % run algorithm 203 | disp('--------------- Run WNLRATV --------------- ') 204 | tic 205 | output = WNLRATV2(Nhsi,Ohsi, Rank,ModelPar, param, model, prior); 206 | elapsed_time = toc; 207 | % save data 208 | output = uint8(255*output); 209 | save([savepath,'\WNLRATV.mat'],'output') 210 | 211 | -------------------------------------------------------------------------------- /compared_methods/TCTV/TCTV_TRPCA.m: -------------------------------------------------------------------------------- 1 | %----- Correlted Total Viariation based Tensor Robust Principle Component Analysis -----% 2 | function [X, E, obj, err, iter] = TCTV_TRPCA(M, opts) 3 | % Solve the p-order Tensor Robust Principle Component Analysis via Tensor Correlted Total Viariation(TCTV) norm minimization by ADMM 4 | % the transform in high-order TSVD uses DFT (default) 5 | % 6 | % min_{X \in R^{n1*n_2*...*n_d}} ||X||_tctv + lambda*||E||_1 s.t M = X+ E 7 | % 8 | % --------------------------------------------- 9 | % Input: 10 | % M - any p-order observed tensor 11 | % opts - Structure value in Matlab. The fields are 12 | % opts.directions - considered local smoothness along certain directions 13 | % opts.transform - the transform case of TSVD, DFT, DCT and other invertible linear transform 14 | % opts.transform_matrices - the transform matrices of TSVD for generalized invertible linear transform 15 | % opts.tol - termination tolerance 16 | % opts.max_iter - maximum number of iterations 17 | % opts.mu - stepsize for dual variable updating in ADMM 18 | % opts.max_mu - maximum stepsize 19 | % opts.rho - rho>=1, ratio that is used to increase mu 20 | % opts.detail - 0 or 1, show the update details or not 21 | % 22 | % Output: 23 | % X - recovered order-p tensor 24 | % E - corresponding sparse tensor 25 | % obj - objective function value 26 | % err - residual 27 | % iter - number of iterations 28 | % 29 | % version 1.0 - 10/22/2022 30 | % 31 | % Written by Hailin Wang(wanghailin97@163.com) 32 | % 33 | 34 | %% default paremeters setting 35 | dim = size(M); 36 | d = ndims(M); 37 | 38 | transform = 'DFT'; 39 | for i = 3:d 40 | transform_matrices{i-2} = dftmtx(dim(i)); 41 | end 42 | lambda = 1/sqrt(prod(dim)/min(dim(1),dim(2))); 43 | directions = 1:d; 44 | tol = 1e-8; 45 | max_iter = 500; 46 | rho = 1.1; 47 | mu = 1e-4; 48 | max_mu = 1e10; 49 | detail = 1; 50 | 51 | if ~exist('opts', 'var') 52 | opts = []; 53 | end 54 | if isfield(opts, 'transform'); transform = opts.transform; end 55 | if isfield(opts, 'transform_matrices'); transform_matrices = opts.transform_matrices; end 56 | if isfield(opts, 'directions'); directions = opts.directions; end 57 | if isfield(opts, 'tol'); tol = opts.tol; end 58 | if isfield(opts, 'max_iter'); max_iter = opts.max_iter; end 59 | if isfield(opts, 'rho'); rho = opts.rho; end 60 | if isfield(opts, 'mu'); mu = opts.mu; end 61 | if isfield(opts, 'max_mu'); max_mu = opts.max_mu; end 62 | if isfield(opts, 'detail'); detail = opts.detail; end 63 | 64 | %% variables initialization 65 | n = length(directions); 66 | X = randn(dim); 67 | E = zeros(dim); 68 | Lambda = zeros(dim); 69 | for i = 1:n 70 | index = directions(i); 71 | G{index} = porder_diff(X,index); 72 | Gamma{index} = zeros(dim); 73 | end 74 | 75 | %% FFT setting 76 | T = zeros(dim); 77 | for i = 1:n 78 | Eny = diff_element(dim,directions(i)); 79 | T = T + Eny; 80 | end 81 | 82 | %% main loop 83 | iter = 0; 84 | while iter(gamma^(2/3).*54^(1/3)/4)).*(1+cos(2*pi/3-2*acos((abs(x)/3).^(-1.5)*gamma/8)/3)); 152 | end 153 | 154 | function x = prox_L0(x,gamma) 155 | x(abs(x)