├── README.md ├── Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization.pdf ├── SparseLowRankRec.m └── main.m /README.md: -------------------------------------------------------------------------------- 1 | # Simultaneously-Sparse-and-Low-Rank-Matrix-Reconstruction-via-Nonconvex-and-Nonseparable-Regularization 2 | Matlab code for the paper: 3 | Wei Chen, "Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization," IEEE Transactions on Signal Processing (TSP), 66(20), pp.5313-5323, 2018 4 | -------------------------------------------------------------------------------- /Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wc253/SSLR/31961952ae00dcd5523f8488ad7d185addead428/Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization.pdf -------------------------------------------------------------------------------- /SparseLowRankRec.m: -------------------------------------------------------------------------------- 1 | function [X] = SparseLowRankRec(y,A,n,m,para,X0) 2 | % *********************************************************************** 3 | % Code for the paper: 4 | % Wei Chen, "Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization", IEEE Transactions on Signal Processing (TSP), 66(20), pp.5313-5323, 2018. 5 | % 6 | % y: observation vector y \in R^p 7 | % A: linear mapping matrix A \in R^{p*nm} 8 | % n,m : dimension of X, X \in R^(n*m) 9 | % para.iters : maximum number of iterations 10 | % para.delta : convergce criteria 11 | % para.alpha: sparsity-rank trade-off parameter 12 | % para.lambda : controls variance of Gaussian errors 13 | % lambda can be set to 1e-10 for noiseless problems. 14 | % X0: the ground truth matrix. Use it for early stopping in simulations 15 | % *********************************************************************** 16 | 17 | lambda = para.lambda; 18 | iters = para.iters; 19 | delta = para.delta; 20 | alpha = para.alpha; 21 | 22 | % Initialization 23 | Vx = zeros(n*m,1); 24 | p = length(y); 25 | Psi = eye(n); 26 | X = reshape(Vx,[n,m]); 27 | Cov = sparse(n*m,n*m); 28 | gamma = ones(n,m); 29 | 30 | % prepare for the loop 31 | X_old = X; 32 | norml = norm(y); % to normalize when checking for stop 33 | check = 0; 34 | if nargin == 6 35 | check = 1; 36 | normx = norm(X0,'fro'); 37 | end 38 | 39 | 40 | % run 41 | for k = 1:iters 42 | 43 | % update X 44 | for itr=1:m 45 | [a,b,c] = svd(Psi+diag(gamma(:,itr))); 46 | temp = length(diag(b)>1e-16); 47 | temp_inv = c(:,1:temp)*diag(1./(diag(b(1:temp,1:temp))))*a(:,1:temp)'; 48 | temp = diag(gamma(:,itr)); 49 | Cov(((itr-1)*n+1):(itr*n),((itr-1)*n+1):(itr*n)) = temp - temp*temp_inv*temp; 50 | end 51 | Vx = Cov*A'*((lambda*sparse(eye(p))+A*Cov*A')\y); 52 | X = reshape(Vx,[n,m]); 53 | 54 | % update Psi 55 | bar_Psi = sparse(kron(sparse(eye(m)),Psi)); 56 | T = (lambda*sparse(eye(p))+A*bar_Psi*A')\A; 57 | Psi_sum = 0; 58 | for i = 1:m 59 | Ai = A(:,(i-1)*n+1:i*n); 60 | Ti = T(:,(i-1)*n+1:i*n); 61 | Ui = Psi - Psi*Ai'*Ti*Psi; 62 | Psi_sum = Psi_sum + Ui; 63 | end; 64 | Psi = Psi_sum/m; 65 | Psi = Psi + X*(X')./(m - m*alpha); 66 | 67 | % update gamma 68 | G = repmat(sqrt(gamma(:))',p,1); 69 | PhiG = A.*G; 70 | [U,S,V] = svd(PhiG,'econ'); 71 | 72 | diag_S = diag(S); 73 | U_scaled = U(:,1:p).*repmat((diag_S./(diag_S.^2 + lambda + 1e-16))',p,1); 74 | Xi = G'.*(V*U_scaled'); 75 | 76 | PhiGsqr = PhiG.*G; 77 | Sigma_w_diag = real( gamma(:) - ( sum(Xi.'.*PhiGsqr) ).' ); 78 | gamma = X(:).*X(:)./alpha + Sigma_w_diag; 79 | gamma = reshape(gamma,[n,m]); 80 | 81 | 82 | % check for stop 83 | d = norm(X_old-X,'fro')/norml; 84 | if d < delta 85 | break; 86 | end 87 | X_old = X; 88 | 89 | if check 90 | % check for stop 91 | d = norm(X-X0,'fro')/normx; 92 | if d < 1e-3 93 | break; 94 | end 95 | end 96 | end; 97 | 98 | 99 | end 100 | 101 | -------------------------------------------------------------------------------- /main.m: -------------------------------------------------------------------------------- 1 | % *********************************************************************** 2 | % Code for the paper: 3 | % Wei Chen, "Simultaneously Sparse and Low-Rank Matrix Reconstruction via Nonconvex and Nonseparable Regularization", IEEE Transactions on Signal Processing (TSP), 66(20), pp.5313-5323, 2018. 4 | % 5 | % Main function 6 | % y: observation vector y \in R^p 7 | % A: linear mapping matrix A \in R^{p*nm} 8 | % n,m : dimension of X, X \in R^(n*m) 9 | % s^2 : sparsity level of X 10 | % r : rank of X 11 | % para.alpha: sparsity-rank trade-off parameter 12 | % para.iters : maximum number of iterations 13 | % para.delta : convergce criteria 14 | % para.lambda : controls variance of Gaussian errors 15 | 16 | % *********************************************************************** 17 | 18 | 19 | n = 50; 20 | m = n; 21 | s = 10; 22 | r = 4; 23 | p = 200; 24 | snr = 1000; 25 | 26 | % generate matrix X 27 | X = zeros(n,m); 28 | q1 = randperm(n); 29 | q2 = randperm(m); 30 | Ml = randn(s,r); 31 | Mr = randn(r,s); 32 | X(q1(1:s),q2(1:s)) = Ml*Mr; 33 | 34 | % generate sensing matrix A 35 | A = randn(p,n*m); 36 | A = A/norm(A); 37 | 38 | % generate y 39 | y = A*X(:); 40 | y = awgn(y,snr,'measured'); 41 | 42 | % normalization 43 | ny = norm(y); 44 | y = y/ny; 45 | X = X/ny; 46 | 47 | noise = (norm(y-A*X(:),2)^2)/p; 48 | 49 | %============================================begin tests 50 | % set parameters for converge 51 | para.iters = 500; % max iter 52 | para.delta = 1e-6; % convergence control 53 | para.lambda = noise; % noise parameter 54 | para.alpha = 0.5; 55 | 56 | % call algorithm 57 | [hat_X] = SparseLowRankRec(y,A,n,m,para,X); 58 | recovery_error = norm(hat_X-X,'fro')/norm(X,'fro') 59 | 60 | 61 | 62 | --------------------------------------------------------------------------------