├── BSBL_BO.m ├── BSBL_FM.m ├── Phi.mat ├── README.md ├── demo.mat ├── demo_fecg.m ├── demo_mmv.m ├── demo_real.m ├── demo_smv_complex.m ├── demo_smv_real.m └── signal_01.mat /BSBL_BO.m: -------------------------------------------------------------------------------- 1 | function Result = BSBL_BO(Phi, y, blkStartLoc, LearnLambda, varargin) 2 | 3 | % BSBL-BO: Recover block sparse signal (1D) exploiting intra-block correlation, given the block partition. 4 | % 5 | % The algorithm solves the inverse problem for the block sparse 6 | % model with known block partition: 7 | % y = Phi * x + v 8 | % 9 | % 10 | % ============================== INPUTS ============================== 11 | % Phi : N X M known matrix 12 | % 13 | % y : N X 1 measurement vector 14 | % 15 | % blkStartLoc : Start location of each block 16 | % 17 | % LearnLambda : (1) If LearnLambda = 1, use the lambda learning rule for very LOW SNR cases (SNR<10dB) 18 | % (using lambda=std(y)*1e-2 or user-input value as initial value) 19 | % (2) If LearnLambda = 2, use the lambda learning rule for medium noisy cases (SNR>10dB) 20 | % (using lambda=std(y)*1e-2 or user-input value as initial value) 21 | % (3) If LearnLambda = 0, do not use the lambda learning rule 22 | % ((using lambda=1e-14 or user-input value as initial value) 23 | % 24 | % 25 | % [varargin values -- in most cases you can use the default values] 26 | % 27 | % 'LEARNTYPE' : LEARNTYPE = 0: Ignore intra-block correlation 28 | % LEARNTYPE = 1: Exploit intra-block correlation 29 | % [ Default: LEARNTYPE = 1 ] 30 | % 31 | % 'PRUNE_GAMMA' : threshold to prune out small gamma_i 32 | % (generally, 10^{-3} or 10^{-2}) 33 | % 34 | % 'LAMBDA' : user-input value for lambda 35 | % [ Default: LAMBDA=1e-14 when LearnLambda=0; LAMBDA=std(y)*1e-2 in noisy cases] 36 | % 37 | % 'MAX_ITERS' : Maximum number of iterations. 38 | % [ Default value: MAX_ITERS = 600 ] 39 | % 40 | % 'EPSILON' : Solution accurancy tolerance parameter 41 | % [ Default value: EPSILON = 1e-8 ] 42 | % 43 | % 'PRINT' : Display flag. If = 1: show output; If = 0: supress output 44 | % [ Default value: PRINT = 0 ] 45 | % 46 | % ============================== OUTPUTS ============================== 47 | % Result : 48 | % Result.x : the estimated block sparse signal 49 | % Result.gamma_used : indexes of nonzero groups in the sparse signal 50 | % Result.gamma_est : the gamma values of all the groups of the signal 51 | % Result.B : the final value of the B 52 | % Result.count : iteration times 53 | % Result.lambda : the final value of lambda 54 | % 55 | % 56 | % ========================= Command examples ============================= 57 | % < Often-used command > 58 | % For most noisy environment (SNR > 10dB): 59 | % 60 | % Result = BSBL_BO(Phi, y, blkStartLoc, 2); 61 | % 62 | % For very low SNR cases (SNR < 10 dB): 63 | % 64 | % Result = BSBL_BO(Phi, y, blkStartLoc, 1); 65 | % 66 | % For noiseless cases: 67 | % 68 | % Result = BSBL_BO(Phi, y, blkStartLoc, 0); 69 | % 70 | % To recover non-Sparse structured signals (noiseless): 71 | % Result = BSBL_BO(Phi,y,groupStartLoc,0,'prune_gamma',-1); 72 | % ('prune_gamma' can be set any non positive constant) 73 | % 74 | % < Full-Command Example > 75 | % Result = BSBL_BO(Phi, y, blkStartLoc, learnlambda, ... 76 | % 'LEARNTYPE', 1,... 77 | % 'PRUNE_GAMMA',1e-2,... 78 | % 'LAMBDA',1e-3,... 79 | % 'MAX_ITERS', 800,... 80 | % 'EPSILON', 1e-8,... 81 | % 'PRINT',0); 82 | % 83 | % ================================= See Also ============================= 84 | % EBSBL_BO, BSBL_EM, BSBL_L1, EBSBL_L1, TMSBL, TSBL 85 | % 86 | % ================================ Reference ============================= 87 | % [1] Zhilin Zhang, Bhaskar D. Rao, Extension of SBL Algorithms for the 88 | % Recovery of Block Sparse Signals with Intra-Block Correlation, 89 | % available at: http://arxiv.org/abs/1201.0862 90 | % 91 | % [2] Zhilin Zhang, Tzyy-Ping Jung, Scott Makeig, Bhaskar D. Rao, 92 | % Low Energy Wireless Body-Area Networks for Fetal ECG Telemonitoring 93 | % via the Framework of Block Sparse Bayesian Learning, 94 | % available at: http://arxiv.org/pdf/1205.1287v1.pdf 95 | % 96 | % [3] webpage: http://dsp.ucsd.edu/~zhilin/BSBL.html, or 97 | % https://sites.google.com/site/researchbyzhang/bsbl 98 | % 99 | % ============= Author ============= 100 | % Zhilin Zhang (z4zhang@ucsd.edu, zhangzlacademy@gmail.com) 101 | % 102 | % ============= Version ============= 103 | % 1.4 (07/23/2012) debug 104 | % 1.3 (05/30/2012) make faster 105 | % 1.2 (05/28/2012) 106 | % 1.1 (01/22/2012) 107 | % 1.0 (08/27/2011) 108 | % 109 | 110 | 111 | % scaling... 112 | scl = std(y); 113 | if (scl < 0.4) | (scl > 1) 114 | y = y/scl*0.4; 115 | end 116 | 117 | % Default Parameter Values for Any Cases 118 | EPSILON = 1e-8; % solution accurancy tolerance 119 | MAX_ITERS = 600; % maximum iterations 120 | PRINT = 0; % don't show progress information 121 | LEARNTYPE = 1; % adaptively estimate the covariance matrix B 122 | 123 | if LearnLambda == 0 124 | lambda = 1e-12; 125 | PRUNE_GAMMA = 1e-3; 126 | elseif LearnLambda == 2 127 | lambda = scl * 1e-2; 128 | PRUNE_GAMMA = 1e-2; 129 | elseif LearnLambda == 1 130 | lambda = scl * 1e-2; 131 | PRUNE_GAMMA = 1e-2; 132 | else 133 | error(['Unrecognized Value for Input Argument ''LearnLambda''']); 134 | end 135 | 136 | 137 | if(mod(length(varargin),2)==1) 138 | error('Optional parameters should always go by pairs\n'); 139 | else 140 | for i=1:2:(length(varargin)-1) 141 | switch lower(varargin{i}) 142 | case 'learntype' 143 | LEARNTYPE = varargin{i+1}; 144 | if LEARNTYPE ~= 1 & LEARNTYPE ~= 0 145 | error(['Unrecognized Value for Input Argument ''LEARNTYPE''']); 146 | end 147 | case 'prune_gamma' 148 | PRUNE_GAMMA = varargin{i+1}; 149 | case 'lambda' 150 | lambda = varargin{i+1}; 151 | case 'epsilon' 152 | EPSILON = varargin{i+1}; 153 | case 'print' 154 | PRINT = varargin{i+1}; 155 | case 'max_iters' 156 | MAX_ITERS = varargin{i+1}; 157 | otherwise 158 | error(['Unrecognized parameter: ''' varargin{i} '''']); 159 | end 160 | end 161 | end 162 | 163 | 164 | if PRINT 165 | fprintf('\n====================================================\n'); 166 | fprintf(' Running BSBL-BO ....... \n'); 167 | fprintf(' Information about parameters...\n'); 168 | fprintf('====================================================\n'); 169 | fprintf('PRUNE_GAMMA : %e\n',PRUNE_GAMMA); 170 | fprintf('lambda : %e\n',lambda); 171 | fprintf('LearnLambda : %d\n',LearnLambda); 172 | fprintf('LearnType : %d\n',LEARNTYPE); 173 | fprintf('EPSILON : %e\n',EPSILON); 174 | fprintf('MAX_ITERS : %d\n\n',MAX_ITERS); 175 | end 176 | 177 | 178 | %% Initialization 179 | [N,M] = size(Phi); 180 | Phi0 = Phi; 181 | blkStartLoc0 = blkStartLoc; 182 | p = length(blkStartLoc); % block number 183 | for k = 1 : p-1 184 | blkLenList(k) = blkStartLoc(k+1)-blkStartLoc(k); 185 | end 186 | blkLenList(p) = M - blkStartLoc(end)+1; 187 | maxLen = max(blkLenList); 188 | if sum(blkLenList == maxLen) == p, 189 | equalSize = 1; 190 | else 191 | equalSize = 0; 192 | end 193 | 194 | for k = 1 : p 195 | Sigma0{k} = eye(blkLenList(k)); 196 | end 197 | 198 | gamma = ones(p,1); 199 | keep_list = [1:p]'; 200 | usedNum = length(keep_list); 201 | mu_x = zeros(M,1); 202 | count = 0; 203 | 204 | 205 | %% Iteration 206 | while (1) 207 | count = count + 1; 208 | 209 | %=========== Prune weighys as yheir hyperparameyers go yo zero ============== 210 | if (min(gamma) < PRUNE_GAMMA) 211 | index = find(gamma > PRUNE_GAMMA); 212 | usedNum = length(index); 213 | keep_list = keep_list(index); 214 | if isempty(keep_list), 215 | fprintf('\n====================================================================================\n'); 216 | fprintf('x becomes zero vector. The solution may be incorrect. \n'); 217 | fprintf('Current ''prune_gamma'' = %g, and Current ''EPSILON'' = %g.\n',PRUNE_GAMMA,EPSILON); 218 | fprintf('Try smaller values of ''prune_gamma'' and ''EPSILON'' or normalize ''y'' to unit norm.\n'); 219 | fprintf('====================================================================================\n\n'); 220 | break; 221 | end; 222 | blkStartLoc = blkStartLoc(index); 223 | blkLenList = blkLenList(index); 224 | 225 | % prune gamma and associated components in Sigma0 226 | gamma = gamma(index); 227 | temp = Sigma0; 228 | Sigma0 = []; 229 | for k = 1 : usedNum 230 | Sigma0{k} = temp{index(k)}; 231 | end 232 | 233 | % construct new Phi 234 | temp = []; 235 | for k = 1 : usedNum 236 | temp = [temp, Phi0(:,blkStartLoc(k):blkStartLoc(k)+blkLenList(k)-1)]; 237 | end 238 | Phi = temp; 239 | %clear temp; 240 | end 241 | 242 | %=================== Compute new weights ================= 243 | mu_old = mu_x; 244 | 245 | PhiBPhi = zeros(N); 246 | currentLoc = 0; 247 | for i = 1 : usedNum 248 | 249 | currentLen = size(Sigma0{i},1); 250 | currentLoc = currentLoc + 1; 251 | currentSeg = currentLoc : 1 : currentLoc + currentLen - 1; 252 | 253 | PhiBPhi = PhiBPhi + Phi(:, currentSeg)*Sigma0{i}*Phi(:, currentSeg)'; 254 | currentLoc = currentSeg(end); 255 | end 256 | 257 | H = Phi' /(PhiBPhi + lambda * eye(N)); 258 | Hy = H * y; 259 | HPhi = H * Phi; 260 | 261 | mu_x = zeros(size(Phi,2),1); 262 | Sigma_x = []; 263 | Cov_x = []; 264 | 265 | B = []; invB = []; B0 = zeros(maxLen); r0 = zeros(1); r1 = zeros(1); 266 | currentLoc = 0; 267 | for i = 1 : usedNum 268 | 269 | currentLen = size(Sigma0{i},1); 270 | currentLoc = currentLoc + 1; 271 | seg = currentLoc : 1 : currentLoc + currentLen - 1; 272 | 273 | mu_x(seg) = Sigma0{i} * Hy(seg); % solution 274 | Sigma_x{i} = Sigma0{i} - Sigma0{i} * HPhi(seg,seg) * Sigma0{i}; 275 | Cov_x{i} = Sigma_x{i} + mu_x(seg) * mu_x(seg)'; 276 | currentLoc = seg(end); 277 | 278 | %=========== Learn correlation structure in blocks =========== 279 | % do not consider correlation structure in each block 280 | if LEARNTYPE == 0 281 | B{i} = eye(currentLen); 282 | invB{i} = eye(currentLen); 283 | 284 | % constrain all the blocks have the same correlation structure 285 | elseif LEARNTYPE == 1 286 | if equalSize == 0 287 | if currentLen > 1 288 | temp = Cov_x{i}/gamma(i); 289 | r0 = r0 + mean(diag(temp)); 290 | r1 = r1 + mean(diag(temp,1)); 291 | end 292 | elseif equalSize == 1 293 | B0 = B0 + Cov_x{i}/gamma(i); 294 | end 295 | 296 | end % end of learnType 297 | 298 | end 299 | 300 | %=========== Learn correlation structure in blocks with Constraint 1 =========== 301 | % If blocks have the same size 302 | if (equalSize == 1) & (LEARNTYPE == 1) 303 | 304 | % Constrain all the blocks have the same correlation structure 305 | % (an effective strategy to avoid overfitting) 306 | b = (mean(diag(B0,1))/mean(diag(B0))); 307 | if abs(b) >= 0.99, b = 0.99*sign(b); end; 308 | bs = []; 309 | for j = 1 : maxLen, bs(j) = (b)^(j-1); end; 310 | B0 = toeplitz(bs); 311 | 312 | for i = 1 : usedNum 313 | 314 | B{i} = B0; 315 | invB{i} = inv(B{i}); 316 | end 317 | 318 | % if blocks have different sizes 319 | elseif (equalSize == 0) & (LEARNTYPE == 1) 320 | r = r1/r0; if abs(r) >= 0.99, r = 0.99*sign(r); end; 321 | 322 | for i = 1 : usedNum 323 | currentLen = size(Cov_x{i},1); 324 | 325 | bs = []; 326 | for j = 1 : currentLen, bs(j) = r^(j-1); end; 327 | B{i} = toeplitz(bs); 328 | invB{i} = inv(B{i}); 329 | 330 | end 331 | 332 | end 333 | 334 | 335 | % estimate gamma(i) and lambda 336 | if LearnLambda == 1 337 | gamma_old = gamma; 338 | lambdaComp = 0; currentLoc = 0; 339 | for i = 1 : usedNum 340 | 341 | currentLen = size(Sigma_x{i},1); 342 | currentLoc = currentLoc + 1; 343 | currentSeg = currentLoc : 1 : currentLoc + currentLen - 1; 344 | 345 | gamma(i) = gamma_old(i)*norm( sqrtm(B{i})*Hy(currentSeg) )/sqrt(trace(HPhi(currentSeg,currentSeg)*B{i})); 346 | 347 | lambdaComp = lambdaComp + trace(Phi(:,currentSeg)*Sigma_x{i}*Phi(:,currentSeg)'); 348 | 349 | Sigma0{i} = B{i} * gamma(i); 350 | 351 | currentLoc = currentSeg(end); 352 | end 353 | lambda = norm(y - Phi * mu_x,2)^2/N + lambdaComp/N; 354 | 355 | 356 | elseif LearnLambda == 2 357 | gamma_old = gamma; 358 | lambdaComp = 0; currentLoc = 0; 359 | for i = 1 : usedNum 360 | 361 | currentLen = size(Sigma_x{i},1); 362 | currentLoc = currentLoc + 1; 363 | currentSeg = currentLoc : 1 : currentLoc + currentLen - 1; 364 | 365 | gamma(i) = gamma_old(i)*norm( sqrtm(B{i})*Hy(currentSeg) )/sqrt(trace(HPhi(currentSeg,currentSeg)*B{i})); 366 | 367 | lambdaComp = lambdaComp + trace(Sigma_x{i}*invB{i})/gamma_old(i); 368 | 369 | Sigma0{i} = B{i} * gamma(i); 370 | 371 | currentLoc = currentSeg(end); 372 | end 373 | lambda = norm(y - Phi * mu_x,2)^2/N + lambda * (length(mu_x) - lambdaComp)/N; 374 | 375 | else % only estimate gamma(i) 376 | gamma_old = gamma; 377 | currentLoc = 0; 378 | for i = 1 : usedNum 379 | % gamma(i) = trace(invB{i} * Cov_x{i})/size(Cov_x{i},1); 380 | currentLen = size(Sigma0{i},1); 381 | currentLoc = currentLoc + 1; 382 | seg = currentLoc : 1 : currentLoc + currentLen - 1; 383 | 384 | gamma(i) = gamma_old(i)*norm( sqrtm(B{i})*Hy(seg) )/sqrt(trace(HPhi(seg,seg)*B{i})); 385 | 386 | Sigma0{i} = B{i} * gamma(i); 387 | 388 | currentLoc = seg(end); 389 | end 390 | end 391 | 392 | 393 | % ================= Check stopping conditions, eyc. ============== 394 | if (size(mu_x) == size(mu_old)) 395 | dmu = max(max(abs(mu_old - mu_x))); 396 | if (dmu < EPSILON) break; end; 397 | end; 398 | if (PRINT) 399 | disp([' iters: ',num2str(count),... 400 | ' num coeffs: ',num2str(usedNum), ... 401 | ' min gamma: ', num2str(min(gamma)),... 402 | ' gamma change: ',num2str(max(abs(gamma - gamma_old))),... 403 | ' mu change: ', num2str(dmu)]); 404 | end; 405 | if (count >= MAX_ITERS), if PRINT, fprintf('Reach max iterations. Stop\n\n'); end; break; end; 406 | 407 | end; 408 | 409 | 410 | 411 | if isempty(keep_list) 412 | Result.x = zeros(M,1); 413 | Result.gamma_used = []; 414 | Result.gamma_est = zeros(p,1); 415 | Result.B = B; 416 | Result.count = count; 417 | Result.lambdatrace = lambda; 418 | 419 | else 420 | %% Expand hyperparameyers 421 | gamma_used = sort(keep_list); 422 | gamma_est = zeros(p,1); 423 | gamma_est(keep_list,1) = gamma; 424 | 425 | 426 | %% reconstruct the original signal 427 | x = zeros(M,1); 428 | currentLoc = 0; 429 | for i = 1 : usedNum 430 | 431 | currentLen = size(Sigma0{i},1); 432 | currentLoc = currentLoc + 1; 433 | seg = currentLoc : 1 : currentLoc + currentLen - 1; 434 | 435 | realLocs = blkStartLoc0(keep_list(i)) : blkStartLoc0(keep_list(i))+currentLen-1; 436 | 437 | x( realLocs ) = mu_x( seg ); 438 | currentLoc = seg(end); 439 | end 440 | 441 | if (scl < 0.4) | (scl > 1) 442 | Result.x = x * scl/0.4; 443 | else 444 | Result.x = x; 445 | end 446 | Result.gamma_used = gamma_used; 447 | Result.gamma_est = gamma_est; 448 | Result.B = B; 449 | Result.count = count; 450 | Result.lambda = lambda; 451 | end 452 | 453 | return; 454 | 455 | 456 | 457 | -------------------------------------------------------------------------------- /BSBL_FM.m: -------------------------------------------------------------------------------- 1 | function Result = BSBL_FM(PHI,y,blkStartLoc,LearnLambda,varargin) 2 | %------------------------------------------------------------------ 3 | % The block BCS algorithm for our following paper: 4 | % "Fast Marginalized Block SBL Algorithm" (Preprint, 2012) 5 | % 6 | % for Zhang Zhilin's 7 | % "Extension of SBL Algorithms for the Recovery of Block 8 | % Sparse Signals with Intra-Block Correlation" (Preprint, Zhang2012) 9 | % 10 | % Coded by: Liu Benyuan 11 | % Change Log: 12 | % v1.5[20121122]: optimized for speed 13 | % v1.6[20121122]: add complex support, only works for learnType=0; 14 | % v1.7[20121126]: add comments 15 | % 16 | %------------------------------------------------------------------ 17 | % Input for BSBL-FM: 18 | % PHI: projection matrix 19 | % y: CS measurements 20 | % blkStartLoc : Start location of each block 21 | % LearnLambda : (1) If LearnLambda = 1, 22 | % use the lambda learning rule for MEDIUM SNR cases (SNR<=30dB) 23 | % (using lambda=std(y)*1e-2 or user-input value as initial value) 24 | % (2) If LearnLambda = 2, 25 | % use the lambda learning rule for HIGH SNR cases (SNR>30dB) 26 | % (using lambda=std(y)*1e-3 or user-input value as initial value) 27 | % (3) If LearnLambda = 0, do not use the lambda learning rule 28 | % ((using lambda=1e-7 or user-input value as initial value) 29 | % 30 | % [varargin values -- in most cases you can use the default values] 31 | % 'LEARNTYPE' : LEARNTYPE = 0: Ignore intra-block correlation 32 | % LEARNTYPE = 1: Exploit intra-block correlation 33 | % [ Default: LEARNTYPE = 1 ] 34 | % 'VERBOSE' : debuging information. 35 | % 'EPSILON' : convergence criterion 36 | % 37 | % ============================== OUTPUTS ============================== 38 | % Result : 39 | % Result.x : the estimated block sparse signal 40 | % Result.gamma_used : indexes of nonzero groups in the sparse signal 41 | % Result.gamma_est : the gamma values of all the groups of the signal 42 | % Result.B : the final mean value of each correlation block 43 | % Result.count : iteration times 44 | % Result.lambda : the final value of lambda 45 | 46 | % default values for BSBL-FM 47 | eta = 1e-4; % default convergence test 48 | verbose = 0; % print some debug information 49 | learnType = 0; % default not to exploit intra block correlation 50 | max_it = 1000; % maximum iterations 51 | 52 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 53 | % 0. intialize, scale 54 | scl = max(std(y)); % max scale 55 | if (scl < 0.4) || (scl > 1) 56 | y = y/scl*0.4; 57 | end 58 | [~,M] = size(PHI); 59 | [~,T] = size(y); 60 | 61 | % select sigma2 62 | stdy2 = mean(std(y))^2; 63 | sigma2 = 1e-3*stdy2; % default value if otherwise specified [99] 64 | if LearnLambda == 0 65 | sigma2 = 1e-6; % noiseless [0 ] 66 | elseif LearnLambda == 2 67 | sigma2 = 1e-2*stdy2; % high SNR (SNR>=20) [2 ] 68 | elseif LearnLambda == 1 69 | sigma2 = 1e-1*stdy2; % medium SNR (SNR<20) [1 ] 70 | end 71 | 72 | if(mod(length(varargin),2)==1) 73 | error('Optional parameters should always go by pairs\n'); 74 | else 75 | for i=1:2:(length(varargin)-1) 76 | switch lower(varargin{i}) 77 | case 'learntype' 78 | learnType = varargin{i+1}; 79 | case 'epsilon' 80 | eta = varargin{i+1}; 81 | case 'sigma2_scale' 82 | sigma2 = varargin{i+1}*stdy2; 83 | case 'max_iters' 84 | max_it = varargin{i+1}; 85 | case 'verbose' 86 | verbose = varargin{i+1}; 87 | otherwise 88 | error(['Unrecognized parameter: ''' varargin{i} '''']); 89 | end 90 | end 91 | end 92 | 93 | beta = 1/sigma2; 94 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 95 | % 1. formalize the blocks and quantities used in the code 96 | % p : the number of blocks 97 | % blkStartLoc : the start index of blk 98 | % blkLenList : the length of each block 99 | p = length(blkStartLoc); 100 | blkLenList = ones(p,1); 101 | for k = 1 : p-1 102 | blkLenList(k) = blkStartLoc(k+1)-blkStartLoc(k); 103 | end 104 | blkLenList(p) = M - blkStartLoc(end)+1; 105 | maxLen = max(blkLenList); 106 | if sum(blkLenList == maxLen) == p, 107 | equalSize = 1; 108 | else 109 | equalSize = 0; 110 | end 111 | % when the blkLen=1 we avoid the exploiting feature. 112 | if maxLen == 1, 113 | learnType = 0; 114 | end 115 | 116 | % pre-allocating space 117 | S = cell(p,1); s = cell(p,1); 118 | Q = cell(p,1); q = cell(p,1); 119 | currentSeg = cell(p,1); 120 | localSeg = cell(p,1); 121 | Phi = cell(p,1); 122 | % 2. prepare the quantities used in the code. 123 | for k = 1 : p 124 | currentLoc = blkStartLoc(k); 125 | currentLen = blkLenList(k); 126 | currentSeg{k} = currentLoc:1:currentLoc + currentLen - 1; 127 | 128 | Phi{k} = PHI(:,currentSeg{k}); 129 | S{k} = beta.*Phi{k}'*Phi{k}; 130 | Q{k} = beta.*Phi{k}'*y; 131 | end 132 | 133 | % 3. start from *NULL*, decide which one to add -> 134 | A = cell(p,1); 135 | Am = cell(p,1); % old A 136 | theta = zeros(p,1); 137 | for k = 1 : p 138 | A{k} = (S{k})\(Q{k}*Q{k}' - S{k})/(S{k}); 139 | theta(k) = 1/blkLenList(k) * real(trace(A{k})); 140 | A{k} = eye(blkLenList(k)).*theta(k); 141 | end 142 | % select the basis that minimize the change of *likelihood* 143 | ml = inf*ones(1,p); 144 | ig0 = find(theta>0); 145 | len = length(ig0); 146 | for kk = 1:len 147 | k = ig0(kk); 148 | ml(k) = log(abs(det(eye(blkLenList(k)) + A{k}*S{k}))) ... 149 | - trace(real(Q{k}'/(eye(blkLenList(k)) + A{k}*S{k})*A{k}*Q{k})); 150 | end 151 | [~,index] = min(ml); 152 | gamma = theta(index); 153 | Am{index} = A{index}; % Am -> record the past value of A 154 | if verbose, fprintf(1,'ADD,\t idx=%3d, GAMMA_OP=%f\n',index,gamma); end 155 | 156 | % 3. update quantities (Sig,Mu,S,Q,Phiu) 157 | Sigma_ii = (eye(blkLenList(index))/Am{index} + S{index})\eye(blkLenList(index)); 158 | Sig = Sigma_ii; 159 | Mu = Sigma_ii*Q{index}; 160 | % The relevent block basis 161 | Phiu = Phi{index}; 162 | for k = 1 : p 163 | Phi_k = Phi{k}; 164 | S{k} = S{k} - beta^2.*Phi_k'*(Phiu*Sigma_ii*Phiu')*Phi_k; 165 | Q{k} = Q{k} - beta .*Phi_k'*Phiu*Mu; 166 | end 167 | 168 | % system parameter 169 | ML=zeros(max_it,1); 170 | 171 | for count = 1:max_it 172 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 173 | localLoc = 1; 174 | for i = 1 : length(index); 175 | k = index(i); 176 | localLen = blkLenList(k); 177 | localSeg{i} = localLoc:1:localLoc + localLen - 1; 178 | localLoc = localLoc + localLen; 179 | end 180 | 181 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 182 | % pre-process steps if we want to learn the intra-block-correlation 183 | % learnType == 2 : calculate the mean of r_i 184 | if learnType == 2 185 | len = length(index); r = zeros(len,1); 186 | for i = 1 : len 187 | seg = localSeg{i}; 188 | Sigma_ii = Sig(seg,seg); 189 | Mu_i = Mu(seg); 190 | [~,r(i)] = learnB(Sigma_ii,Mu_i,gamma(i)); 191 | end 192 | r_hat = mean(r); % mean or max 193 | BT = genB(r_hat,maxLen); 194 | end 195 | 196 | % calculate s,q 197 | for k = 1 : p 198 | which = find(index==k,1); 199 | if isempty(which) % the k-th basis is not included 200 | s{k} = S{k}; 201 | q{k} = Q{k}; 202 | else % the k-th basis is calculated 203 | invDenom = (eye(blkLenList(k)) - S{k}*Am{k})\eye(blkLenList(k)); 204 | s{k} = invDenom*S{k}; 205 | q{k} = invDenom*Q{k}; 206 | end 207 | % learnType ==>> [0,1,2] 208 | A{k} = (s{k})\(q{k}*q{k}' - s{k})/(s{k}); 209 | theta(k) = 1/blkLenList(k) * real(trace(A{k})); 210 | if learnType == 0 % [0] without intra-correlation 211 | A{k} = eye(blkLenList(k))*theta(k); 212 | elseif learnType == 1 % [1] with individual intra corr 213 | rr = mean(diag(A{k},1))/mean(diag(A{k})); 214 | if abs(rr)>0.95, rr = 0.95*sign(rr); end 215 | Bc = genB(rr,blkLenList(k)); 216 | A{k} = Bc*theta(k); 217 | elseif learnType == 2 % [2] with unified intra corr 218 | if equalSize 219 | Bc = BT; 220 | else 221 | Bc = genB(r_hat,blkLenList(k)); 222 | end 223 | A{k} = Bc.*theta(k); 224 | end 225 | end 226 | 227 | % choice the next basis that [minimizes] the cost function 228 | ml = inf*ones(1,p); 229 | ig0 = find(theta>0); 230 | % index for re-estimate 231 | [ire,~,which] = intersect(ig0,index); 232 | if ~isempty(ire) 233 | len = length(which); 234 | for kk = 1:len 235 | k = ire(kk); 236 | ml(k) = log(abs(det(eye(blkLenList(k)) + A{k}*s{k}))) ... 237 | -trace(real(q{k}'/(eye(blkLenList(k)) + A{k}*s{k})*A{k}*q{k})) ... 238 | -(log(abs(det(eye(blkLenList(k))+ Am{k}*s{k}))) ... 239 | -trace(real(q{k}'/(eye(blkLenList(k)) + Am{k}*s{k})*Am{k}*q{k}))); 240 | end 241 | end 242 | % index for adding 243 | iad = setdiff(ig0,ire); 244 | if ~isempty(iad) 245 | len = length(iad); 246 | for kk = 1:len 247 | k = iad(kk); 248 | ml(k) = log(abs(det(eye(blkLenList(k)) + A{k}*s{k}))) ... 249 | -trace(real(q{k}'/(eye(blkLenList(k)) + A{k}*s{k})*A{k}*q{k})); 250 | end 251 | end 252 | % index for deleting 253 | is0 = setdiff((1:p),ig0); 254 | [ide,~,which] = intersect(is0,index); 255 | if ~isempty(ide) 256 | len = length(which); 257 | for kk = 1:len 258 | k = ide(kk); 259 | ml(k) = -(log(abs(det(eye(blkLenList(k)) + Am{k}*s{k}))) ... 260 | -trace(real(q{k}'/(eye(blkLenList(k)) + Am{k}*s{k})*Am{k}*q{k}))); 261 | end 262 | end 263 | 264 | % as we are minimizing the cost function : 265 | [ML(count),idx] = min(ml); 266 | 267 | % check if terminates? 268 | if ML(count)>=0, break; end 269 | if count > 2 && abs(ML(count)-ML(count-1)) < abs(ML(count)-ML(1))*eta, break; end 270 | 271 | % update block gammas 272 | which = find(index==idx); 273 | % processing the quantities update 274 | if ~isempty(which) % the select basis is already in the *LIST* 275 | seg = localSeg{which}; 276 | Sig_j = Sig(:,seg); 277 | Sig_jj = Sig(seg,seg); 278 | if theta(idx)>0 279 | %%%% re-estimate %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 280 | if verbose,fprintf(1,'REE,\t idx=%3d, GAMMA_OP=%f\n',idx,theta(idx));end 281 | gamma_new = theta(idx); 282 | ki = Sig_j/(Sig_jj + Am{idx}/(Am{idx} - A{idx})*A{idx})*Sig_j'; 283 | Sig = Sig - ki; 284 | Mu = Mu - beta.*ki*Phiu'*y; 285 | PKP = Phiu*ki*Phiu'; 286 | for k = 1 : p 287 | Phi_m = Phi{k}; 288 | PPKP = Phi_m'*PKP; 289 | S{k} = S{k} + beta^2.*PPKP*Phi_m; 290 | Q{k} = Q{k} + beta^2.*PPKP*y; 291 | end 292 | % 293 | gamma(which) = gamma_new; % 1 294 | Am{idx} = A{idx}; % 2 295 | else 296 | %%%% delete %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 297 | if verbose,fprintf(1,'DEL,\t idx=%3d, GAMMA_OP=%f\n',idx,gamma(which));end 298 | if length(index)==1, break; end % we are deleting the only one 299 | ki = Sig_j/Sig_jj*Sig_j'; 300 | Sig = Sig - ki; 301 | Mu = Mu - beta.*ki*Phiu'*y; 302 | PKP = Phiu*ki*Phiu'; 303 | for k = 1 : p 304 | Phi_m = Phi{k}; 305 | PPKP = Phi_m'*PKP; 306 | S{k} = S{k} + beta^2.*PPKP*Phi_m; 307 | Q{k} = Q{k} + beta^2.*PPKP*y; 308 | end 309 | % delete relevant basis and block 310 | index(which) = []; 311 | Mu(seg,:) = []; 312 | Sig(:,seg) = []; 313 | Sig(seg,:) = []; 314 | Phiu(:,seg) = []; 315 | % 316 | gamma(which) = []; % 1 317 | Am{idx} = []; % 2 318 | end 319 | else 320 | if theta(idx)>0 321 | %%%% add %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 322 | if verbose,fprintf(1,'ADD,\t idx=%3d, GAMMA_OP=%f\n',idx,theta(idx));end 323 | gamma_new = theta(idx); 324 | Phi_j = Phi{idx}; 325 | % 326 | Sigma_ii = (eye(blkLenList(idx))+A{idx}*S{idx})\A{idx}; 327 | mu_i = Sigma_ii*Q{idx}; 328 | SPP = Sig*Phiu'*Phi_j; % common 329 | Sigma_11 = Sig + beta^2.*SPP*Sigma_ii*SPP'; 330 | Sigma_12 = -beta.*SPP*Sigma_ii; 331 | Sigma_21 = Sigma_12'; 332 | mu_1 = Mu - beta.*SPP*mu_i; 333 | e_i = Phi_j - beta.*Phiu*SPP; 334 | ESE = e_i*Sigma_ii*e_i'; 335 | for k = 1 : p 336 | Phi_m = Phi{k}; 337 | S{k} = S{k} - beta^2.*Phi_m'*ESE*Phi_m; 338 | Q{k} = Q{k} - beta.*Phi_m'*e_i*mu_i; 339 | end 340 | % adding relevant basis 341 | Sig = [Sigma_11 Sigma_12; ... 342 | Sigma_21 Sigma_ii]; 343 | Mu = [mu_1; ... 344 | mu_i]; 345 | Phiu = [Phiu Phi_j]; 346 | index = [index;idx]; 347 | gamma = [gamma;gamma_new]; % 1 348 | Am{idx} = A{idx}; % 2 349 | else 350 | break; % null operation 351 | end 352 | end 353 | 354 | end 355 | % format the output ===> X the signal 356 | weights = zeros(M,T); 357 | formatSeg = [currentSeg{index}]; 358 | weights(formatSeg,:) = Mu; 359 | if (scl < 0.4) || (scl > 1) 360 | Result.x = weights * scl/0.4; 361 | else 362 | Result.x = weights; 363 | end 364 | Result.r = 1.0; % lazy ... 365 | Result.gamma_used = index; 366 | Result.gamma_est = gamma; 367 | Result.count = count; 368 | Result.lambda = sigma2; 369 | % END % 370 | 371 | %% sub-functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 372 | % subfunctions of estimating the AR(1) coefficient r and 373 | % reconstruction the covariance matrix with B^{-1} valid 374 | function [B,r] = learnB(Sig,Mu,gamma) 375 | len = length(Mu); 376 | B = (Sig + Mu*Mu')./gamma; 377 | r = (mean(diag(B,1))/mean(diag(B))); 378 | if abs(r) >= 0.95, r = 0.95*sign(r); end; 379 | B = genB(r,len); 380 | % generate B according to r,len 381 | % NOTE: abs(r) should be less than 1.0 382 | function B = genB(r,len) 383 | jup = 0:len-1; 384 | bs = r.^jup; 385 | B = toeplitz(bs); 386 | 387 | % generate temporal Smooth matrix 388 | % NOTE: current does not handle L 389 | function B = temporalSmooth(a,b,~,len) 390 | A1 = b.*eye(len); 391 | A2 = (a*b).*[zeros(1,len-1) 0; eye(len-1), zeros(len-1,1)]; 392 | Bc = A1 + A2; 393 | B = Bc*Bc'; 394 | 395 | -------------------------------------------------------------------------------- /Phi.mat: -------------------------------------------------------------------------------- 1 | MATLAB 5.0 MAT-file, Platform: PCWIN, Created on: Sun Jan 13 22:50:43 2013 IM0Phi -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BSBL-FM # 2 | 3 | This is a fast implementation of the Block Sparse Bayesian Learning [BSBL](https://sites.google.com/site/researchbyzhang/) algorithm. The developed algorith is based on the Fast Marginalized **(FM)** likelihood maximization algorithm, which yields ~8 times speedup while also pertains nearly the same recovery performances. 4 | 5 | A Python version of BSBL algorithms is also available: 6 | [pyBSBL](https://github.com/liubenyuan/pyBSBL) 7 | 8 | # A Short Introduction # 9 | 10 | A CS algorithm aims to solve, **Y** = **Phi** **X** + **N**, where **Y** is the measurement matrix of size M times T, **Phi** is the under-determined sensing matrix of size M times N, **X** is the signal. 11 | 12 | Compressed sensing, can recover **X** given **Y** and the under-determined matrix **Phi**. When **T=1**, we called it Single Measurement Vector (**SMV**) Model, with **T>1**, it is the Multiple Measurement Vector (**MMV**) model. 13 | 14 | Block Sparse, assumes that **x** can be partitioned into blocks **x** = {**x_1**, ... , **x_g**}. The non-zero entries cluster within some blocks and *zeros* otherwise. If *d* out of *g* blocks are non-zero, then the block sparsity is defined as *d/g*. Exploiting both the block sparsity and the intra-block correlation is the source of the magic of all BSBL algorithms. 15 | 16 | Our **BSBL-FM** algorithm, is an ultra fast implementation of the original BSBL framework, which brings about **~8** times speedup. What's more, It can worked in all the scenarios include: 17 | 18 | > - SMV sparse 19 | > - MMV sparse 20 | > - SMV block sparse 21 | > - MMV block sparse 22 | > - Real-valued 23 | > - Complex-valued 24 | 25 | See the demos and implementations below for more details. 26 | 27 | # Codes and Data # 28 | 29 | The `.m` codes are: 30 | 31 | > **CODE:** 32 | > 33 | > - **BSBL_FM.m**: the main algorithm, also called **MBSBL-FM** in MMV model 34 | > - **BSBL_BO.m**: Zhilin's BSBL-BO algorithm. 35 | > - **demo_smv_real.m**: the real valued SMV block sparse demo 36 | > - **demo_smv_complex.m**: the complexed valued SMV block sparse demo 37 | > - **demo_mmv.m**: the real valued MMV block sparse demo 38 | > - **demo_fecg.m**: the demo code for FECG dataset recovery 39 | 40 | The `.mat` data files are: 41 | 42 | > **DATA:** 43 | > 44 | > - **demo.mat**: the data for SMV case, contains *re*, *im* vectors 45 | > - **signal_01.mat**: FECG datasets used in BSBL-BO by Zhilin 46 | > - **Phi.mat**: the sensing matrix for CS FECG data 47 | 48 | # Citations # 49 | 50 | If you find the **BSBL-FM** algorithm useful, please cite: 51 | 52 | ```bibtex 53 | @Article{liu2013energy, 54 | Title = {Energy Efficient Telemonitoring of Physiological Signals via Compressed Sensing: A Fast Algorithm and Power 55 | Consumption Evaluation}, 56 | Author = {Liu, Benyuan and Zhang, Zhilin and Xu, Gary and Fan, Hongqi and Fu, Qiang}, 57 | Journal = {Biomedical Signal Processing and Control}, 58 | Year = {2014}, 59 | Pages = {80--88}, 60 | Volume = {11C} 61 | } 62 | ``` 63 | 64 | ```bibtex 65 | @InProceedings{liu2013compression, 66 | Title = {Compression via Compressive Sensing: A Low-Power Framework for the Telemonitoring of Multi-Channel Physiological 67 | Signals}, 68 | Author = {Benyuan Liu and Zhilin Zhang and Hongqi Fan and Qiang Fu}, 69 | Booktitle = {2013 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)}, 70 | Year = {2013}, 71 | Organization = {IEEE}, 72 | Pages = {9--12} 73 | } 74 | ``` 75 | 76 | More powerful **STSBL** algorithm developed by Zhilin Zhang is available on-line: 77 | ```bibtex 78 | @InProceedings{ZhangAsilomar2013, 79 | Title = {Compressed Sensing for Energy-Efficient Wireless Telemonitoring: Challenges and Opportunities}, 80 | Author = {Zhilin Zhang and Bhaskar D. Rao and Tzyy-Ping Jung}, 81 | Booktitle = {Asilomar Conference on Signals, Systems, and Computers (Asilomar 2013)}, 82 | Year = {2013} 83 | } 84 | ``` 85 | 86 | ```bibtex 87 | @Article{zhang2014spatiotemporal, 88 | Title = {Spatiotemporal Sparse Bayesian Learning with Applications to Compressed Sensing of Multichannel EEG for Wireless 89 | Telemonitoring and Brain-Computer Interfaces}, 90 | Author = {Zhilin Zhang and Tzyy-Ping Jung and Scott Makeig and Bhaskar D. Rao and Zhouyue Pi}, 91 | Journal = {(Accepted) IEEE Trans. on Neural Systems and Rehabilitation Engineering}, 92 | Year = {2014} 93 | } 94 | ``` 95 | 96 | -------------------------------------------------------------------------------- /demo.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liubenyuan/BSBL-FM/798c8cc576715b3653c394f1332e6ece0ea828bb/demo.mat -------------------------------------------------------------------------------- /demo_fecg.m: -------------------------------------------------------------------------------- 1 | % MMV for BSBL-FM 2 | clear; close all; 3 | 4 | % get the raw dataset (which includes strong noise) 5 | load signal_01.mat; 6 | 7 | % downsampling to 250 Hz 8 | s = s(:,1:4:2048); 9 | C = 8; 10 | 11 | % the size of the sensing matrix Phi 12 | load Phi; 13 | [M,N] = size(Phi); 14 | W = dctmtx(N); W = W'; 15 | A = Phi*W; 16 | % A=zeros(M,N); 17 | % for k=1:M 18 | % A(k,:)=dct(Phi(k,:)); 19 | % end 20 | 21 | % block size of the user-defined block partition of BSBL-BO 22 | blkLen = 32; 23 | 24 | % the user-defined block partition 25 | groupStartLoc = 1:blkLen:N; 26 | 27 | % variable for recovered dataset 28 | sig = s'; 29 | 30 | %==================================================== 31 | % Compress all the ECG recordings, and recover them 32 | %==================================================== 33 | Y = Phi*sig; 34 | 35 | % method 1. recover using MBSBL-FM 36 | tic; 37 | Result = BSBL_FM(A,Y,groupStartLoc,99,'epsilon',1e-4,'learnType',0,'verbose',0); 38 | runtime = toc; 39 | 40 | % method 2. recover using SMV each channel 41 | runtime1 = zeros(C,1); mse1 = zeros(C,1); 42 | for ii = 1 : C 43 | y = Phi*sig(:,ii); 44 | tic; 45 | Result1 = BSBL_FM(A,y,groupStartLoc,99,'epsilon',1e-4,'learnType',0,'verbose',0); 46 | runtime1(ii) = toc; 47 | 48 | mse1(ii) = (norm(sig(:,ii) - W*Result1.x,'fro')/norm(sig(:,ii),'fro'))^2; 49 | end 50 | 51 | fprintf('Tol Runtime %8.8f <----> Single Tol Runtime %8.8f \n',runtime,sum(runtime1)); 52 | for jj = 1 : 8 % 8 channels 53 | mse = (norm(sig(:,jj) - W*Result.x(:,jj),'fro')/norm(sig(:,jj),'fro'))^2; 54 | fprintf('MSE(C=%3d): %8.8f <----> Single MSE: %8.8f\n',jj,mse,mse1(jj)); 55 | end 56 | -------------------------------------------------------------------------------- /demo_mmv.m: -------------------------------------------------------------------------------- 1 | % This demo shows the capability of recoverying REAL valued signals 2 | % using the proposed algorithm : BSBL-FM 3 | % 4 | % author: liubenyuan@gmail.com 5 | % date: 2013-03-04 6 | % 7 | clear all; close all; 8 | %========================================================================== 9 | rng(1985,'v4'); 10 | 11 | % problem dimension 12 | M = 128; % row number of the dictionary matrix 13 | N = 256; % column number 14 | blkNum = 5; % nonzero block number 15 | blkLen = 16; % block length 16 | iterNum= 1; % number of experiments (100) 17 | 18 | % Generate the known matrix with columns draw uniformly from the surface of a unit hypersphere 19 | Phi = randn(M,N); 20 | for i=1:N 21 | Phi(:,i) = Phi(:,i) / norm(Phi(:,i)); 22 | end 23 | 24 | % load data 25 | load demo.mat; 26 | 27 | % prepare 28 | Wgen = [re im]; % Nx2, you may extend to NxT 29 | % compressed the signal 30 | signal = Phi * Wgen; 31 | % Observation noise 32 | SNR = 15; 33 | stdnoise = std(signal)*10^(-SNR/20); 34 | noise = randn(M,1) * stdnoise; 35 | % Noisy observation 36 | Y = signal + noise; 37 | 38 | %=== BSBL-FM ============================================================== 39 | blkStartLoc = 1:blkLen:N; 40 | learnLambda = 1; 41 | 42 | tic; 43 | Result2 = BSBL_FM(Phi,Y,blkStartLoc,learnLambda,'learnType',0,'verbose',0); 44 | t_fm2 = toc; 45 | mse_fm2 = (norm(Wgen - Result2.x,'fro')/norm(Wgen,'fro'))^2; 46 | %=== BSBL-FM ============================================================== 47 | 48 | fprintf('BSBL-FM(learn correlation) : time: %4.3f, MSE: %g, Iter=%d\n',mean(t_fm2),mean(mse_fm2),Result2.count); 49 | 50 | %=== draw(1) 51 | figure(1) 52 | clf; 53 | subplot(121) 54 | plot(Wgen(:,1),'b-','linewidth',2); hold on 55 | plot(Wgen(:,2),'r--','linewidth',2); grid on; axis tight 56 | hx1 = xlabel('(a) Original 2-channel Signal'); hy1 = ylabel('Amplitude'); 57 | ax1 = gca; 58 | hl = legend('Channel -- 1','Channel -- 2'); 59 | subplot(122) 60 | plot(Result2.x(:,1),'b-','linewidth',2); hold on 61 | plot(Result2.x(:,2),'r--','linewidth',2); grid on; axis tight 62 | hx2 = xlabel('(b) Recover by BSBL-FM'); 63 | ax2 = gca; 64 | 65 | %--- config --- 66 | set(ax1, 'LooseInset', get(ax1, 'TightInset')); 67 | set(ax2, 'LooseInset', get(ax2, 'TightInset')); 68 | set([ax1 ax2 hl],'FontName','Times','FontSize',13); 69 | set([hx1 hy1 hx2],'FontName','Times','FontSize',15,'FontWeight','bold'); 70 | 71 | -------------------------------------------------------------------------------- /demo_real.m: -------------------------------------------------------------------------------- 1 | % This demo shows the capability of recoverying REAL valued signals 2 | % using the proposed algorithm : BSBL-FM 3 | % 4 | % author: liubenyuan@gmail.com 5 | % date: 2013-03-04 6 | % 7 | clear all; close all; 8 | %========================================================================== 9 | rng(1985,'v4'); 10 | 11 | % problem dimension 12 | M = 128; % row number of the dictionary matrix 13 | N = 256; % column number 14 | blkNum = 5; % nonzero block number 15 | blkLen = 16; % block length 16 | iterNum= 1; % number of experiments (100) 17 | 18 | % Generate the known matrix with columns draw uniformly from the surface of a unit hypersphere 19 | Phi = randn(M,N); 20 | for i=1:N 21 | Phi(:,i) = Phi(:,i) / norm(Phi(:,i)); 22 | end 23 | 24 | % load data 25 | load demo.mat; 26 | 27 | % prepare 28 | Wgen = re; 29 | % compressed the signal 30 | signal = Phi * Wgen; 31 | % Observation noise 32 | SNR = 15; 33 | stdnoise = std(signal)*10^(-SNR/20); 34 | noise = randn(M,1) * stdnoise; 35 | % Noisy observation 36 | Y = signal + noise; 37 | 38 | %=== BSBL-FM ============================================================== 39 | blkStartLoc = 1:blkLen:N; 40 | learnLambda = 1; 41 | 42 | tic; 43 | Result2 = BSBL_FM(Phi,Y,blkStartLoc,learnLambda,'learnType',2,'verbose',0); 44 | t_fm2 = toc; 45 | mse_fm2 = (norm(Wgen - Result2.x,'fro')/norm(Wgen,'fro'))^2; 46 | %=== BSBL-FM ============================================================== 47 | 48 | fprintf('BSBL-FM(learn correlation) : time: %4.3f, MSE: %g, Iter=%d\n',mean(t_fm2),mean(mse_fm2),Result2.count); 49 | 50 | %=== draw(1) 51 | figure(1) 52 | clf; 53 | subplot(121) 54 | plot(Wgen,'b-','linewidth',2); hold on; grid on; axis tight 55 | hx1 = xlabel('(a) Original Signal'); hy1 = ylabel('Amplitude'); 56 | ax1 = gca; 57 | subplot(122) 58 | plot(Result2.x,'b-','linewidth',2); hold on; grid on; axis tight 59 | hx2 = xlabel('(b) Recover by BSBL-FM'); 60 | ax2 = gca; 61 | 62 | %--- config --- 63 | set(ax1, 'LooseInset', get(ax1, 'TightInset')); 64 | set(ax2, 'LooseInset', get(ax2, 'TightInset')); 65 | set([hx1 hy1 hx2],'FontName','Times','FontSize',15,'FontWeight','bold'); 66 | 67 | -------------------------------------------------------------------------------- /demo_smv_complex.m: -------------------------------------------------------------------------------- 1 | % This demo shows the capability of recoverying COMPLEX valued signals 2 | % using the proposed algorithm : BSBL-FM 3 | % 4 | % author: liubenyuan@gmail.com 5 | % date: 2013-03-04 6 | % 7 | clear all; close all; 8 | %========================================================================== 9 | rng(1985,'v4'); 10 | 11 | % problem dimension 12 | M = 128; % row number of the dictionary matrix 13 | N = 256; % column number 14 | blkNum = 5; % nonzero block number 15 | blkLen = 16; % block length 16 | iterNum= 1; % number of experiments (100) 17 | 18 | % Generate the known matrix with columns draw uniformly from the surface of a unit hypersphere 19 | Phi = randn(M,N) + sqrt(-1)*randn(M,N); 20 | for i=1:N 21 | Phi(:,i) = Phi(:,i) / norm(Phi(:,i)); 22 | end 23 | 24 | % load data 25 | load demo.mat; 26 | 27 | % prepare 28 | Wgen = re + im*1i; 29 | % compressed the signal 30 | signal = Phi * Wgen; 31 | % Observation noise 32 | SNR = 15; 33 | stdnoise = std(signal)*10^(-SNR/20); 34 | noise = randn(M,1) * stdnoise; 35 | % Noisy observation 36 | Y = signal + noise; 37 | 38 | %=== BSBL-FM ============================================================== 39 | blkStartLoc = 1:blkLen:N; 40 | learnLambda = 1; 41 | 42 | tic; 43 | Result2 = BSBL_FM(Phi,Y,blkStartLoc,learnLambda,'learnType',2,'verbose',0); 44 | t_fm2 = toc; 45 | mse_fm2 = (norm(Wgen - Result2.x,'fro')/norm(Wgen,'fro'))^2; 46 | %=== BSBL-FM ============================================================== 47 | 48 | fprintf('BSBL-FM(learn correlation) : time: %4.3f, MSE: %g, Iter=%d\n',mean(t_fm2),mean(mse_fm2),Result2.count); 49 | 50 | %=== draw(1) 51 | figure(1) 52 | clf; 53 | subplot(121) 54 | plot(real(Wgen),'b-','linewidth',2); hold on 55 | plot(imag(Wgen),'r--','linewidth',2); grid on; axis tight 56 | hx1 = xlabel('(a) Original Complex Signal'); hy1 = ylabel('Amplitude'); 57 | ax1 = gca; 58 | subplot(122) 59 | plot(real(Result2.x),'b-','linewidth',2); hold on 60 | plot(imag(Result2.x),'r--','linewidth',2); grid on; axis tight 61 | hx2 = xlabel('(b) Recover by BSBL-FM'); 62 | ax2 = gca; 63 | 64 | %--- config --- 65 | set(ax1, 'LooseInset', get(ax1, 'TightInset')); 66 | set(ax2, 'LooseInset', get(ax2, 'TightInset')); 67 | set([ax1 ax2],'FontName','Times','FontSize',13); 68 | set([hx1 hy1 hx2],'FontName','Times','FontSize',15,'FontWeight','bold'); 69 | 70 | -------------------------------------------------------------------------------- /demo_smv_real.m: -------------------------------------------------------------------------------- 1 | % This demo shows the capability of recoverying REAL valued signals 2 | % using the proposed algorithm : BSBL-FM 3 | % 4 | % author: liubenyuan@gmail.com 5 | % date: 2013-03-04 6 | % 7 | clear all; close all; 8 | %========================================================================== 9 | rng(1985,'v4'); 10 | 11 | % problem dimension 12 | M = 128; % row number of the dictionary matrix 13 | N = 256; % column number 14 | blkNum = 5; % nonzero block number 15 | blkLen = 16; % block length 16 | iterNum= 1; % number of experiments (100) 17 | 18 | % Generate the known matrix with columns draw uniformly from the surface of a unit hypersphere 19 | Phi = randn(M,N); 20 | for i=1:N 21 | Phi(:,i) = Phi(:,i) / norm(Phi(:,i)); 22 | end 23 | 24 | % load data 25 | load demo.mat; 26 | 27 | % prepare 28 | Wgen = re; 29 | % compressed the signal 30 | signal = Phi * Wgen; 31 | % Observation noise 32 | SNR = 15; 33 | stdnoise = std(signal)*10^(-SNR/20); 34 | noise = randn(M,1) * stdnoise; 35 | % Noisy observation 36 | Y = signal + noise; 37 | 38 | %=== BSBL-FM ============================================================== 39 | blkStartLoc = 1:blkLen:N; 40 | learnLambda = 1; 41 | 42 | tic; 43 | Result2 = BSBL_FM(Phi,Y,blkStartLoc,learnLambda,'learnType',2,'verbose',0); 44 | t_fm2 = toc; 45 | mse_fm2 = (norm(Wgen - Result2.x,'fro')/norm(Wgen,'fro'))^2; 46 | %=== BSBL-FM ============================================================== 47 | 48 | fprintf('BSBL-FM(learn correlation) : time: %4.3f, MSE: %g, Iter=%d\n',mean(t_fm2),mean(mse_fm2),Result2.count); 49 | 50 | %=== draw(1) 51 | figure(1) 52 | clf; 53 | subplot(121) 54 | plot(Wgen,'b-','linewidth',2); hold on; grid on; axis tight 55 | hx1 = xlabel('(a) Original Signal'); hy1 = ylabel('Amplitude'); 56 | ax1 = gca; 57 | subplot(122) 58 | plot(Result2.x,'b-','linewidth',2); hold on; grid on; axis tight 59 | hx2 = xlabel('(b) Recover by BSBL-FM'); 60 | ax2 = gca; 61 | 62 | %--- config --- 63 | set(ax1, 'LooseInset', get(ax1, 'TightInset')); 64 | set(ax2, 'LooseInset', get(ax2, 'TightInset')); 65 | set([ax1 ax2 hl],'FontName','Times','FontSize',13); 66 | set([hx1 hy1 hx2],'FontName','Times','FontSize',15,'FontWeight','bold'); 67 | 68 | -------------------------------------------------------------------------------- /signal_01.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/liubenyuan/BSBL-FM/798c8cc576715b3653c394f1332e6ece0ea828bb/signal_01.mat --------------------------------------------------------------------------------