├── .gitignore ├── .gitmodules ├── README.md ├── completion2MTL.m ├── diag3.m ├── errorxy.m ├── errorxym.m ├── exp_completion.m ├── exp_completion_4d.m ├── exp_completion_l1.m ├── exp_denoising.m ├── exp_postproc.m ├── exp_tensor_tradeoff.m ├── eye3.m ├── flatten.m ├── flatten_adj.m ├── gitlog.m ├── ispropertystruct.m ├── kolda3.m ├── matrix_adm.m ├── matrixl1_adm.m ├── method_names_string.m ├── numcomp.m ├── pca.m ├── pcaspec.m ├── plot_compare_denoising.m ├── plot_completion_l1.m ├── plot_denoising.m ├── plot_nips2011.m ├── plot_nips2011_final.m ├── plot_nips2013.m ├── plot_overlap_vs_latent_2013.m ├── plot_tensorworkshop10.m ├── plot_threshold_vs_normalized_rank.m ├── printvec.m ├── propertylist2struct.m ├── randsplit.m ├── randtensor.m ├── randtensor3.m ├── set_defaults.m ├── softth.m ├── softth_overlap.m ├── split.m ├── tensor_as_matrix.m ├── tensorconst_adm.m ├── tensorconst_subset_adm.m ├── tensorl1_adm.m └── tensormix_adm.m /.gitignore: -------------------------------------------------------------------------------- 1 | *.*~ 2 | *.mat 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ConvexTensor"] 2 | path = ConvexTensor 3 | url = https://github.com/bernard24/ConvexTensor.git 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | tensor 2 | ====== 3 | 4 | Matlab code for convex optimization based tensor decomposition (completion/denoising). 5 | 6 | This is a newer, cleaner, but less documented repository. Please also have a look at my old [demo](https://github.com/ryotat/TKML10). 7 | -------------------------------------------------------------------------------- /completion2MTL.m: -------------------------------------------------------------------------------- 1 | % completion2MTL - this is a simplified version of completion2MTL 2 | % written by Bernardino Romera-Paredes 3 | % 4 | % Example 5 | % addpath tensor_toolbox % path to SANDIA tensor toolbox 6 | % sz=[50 50 20]; rr=[7 8 9]; 7 | % X0=randtensor3(sz, rr); 8 | % [ind_tr, ind_te]=randsplit(prod(sz), 0.1); 9 | % Knowns=zeros(sz); 10 | % Knowns(ind_tr)=1; 11 | % Xobs=zeros(sz); 12 | % Xobs(ind_tr)=X0(ind_tr); 13 | % data=completion2MTL(struct('Tensor', tensor(Xobs), 'KnownInputs', tensor(Knowns)),[]); 14 | % 15 | % See also 16 | % tensor, randsplit 17 | % 18 | % Copyright(c) 2010-2014 Ryota Tomioka 19 | % This software is distributed under the MIT license. See license.txt 20 | 21 | function [dataConf DataParameters] = completion2MTL(dataConf, DataParameters) 22 | %COMPLETION2MTL Summary of this function goes here 23 | % Detailed explanation goes here 24 | 25 | noisyWTensor=dataConf.Tensor; 26 | KnownInputs=dataConf.KnownInputs; 27 | 28 | dimensions=size(noisyWTensor); 29 | nAttrs=dimensions(1); 30 | nTasks=prod(dimensions(2:end)); 31 | noisyW=double(reshape(noisyWTensor, [nAttrs, nTasks])); 32 | knowns=double(reshape(KnownInputs, [nAttrs, nTasks])); 33 | 34 | trainYCell=cell(1,nTasks); 35 | trainXCell=cell(1,nTasks); 36 | testYCell=cell(1,nTasks); 37 | testXCell=cell(1,nTasks); 38 | 39 | for i=1:nTasks 40 | knownT=knowns(:,i); 41 | present=find(knownT); 42 | nInstances=length(present); 43 | X=zeros(nAttrs, nInstances); 44 | Y=zeros(nInstances, 1); 45 | for j=1:length(present) 46 | X(present(j), j)=1; 47 | Y(j)=noisyW(present(j),i); 48 | end 49 | 50 | present=find(knownT==0); 51 | nInstances=length(present); 52 | XTest=zeros(nAttrs, nInstances); 53 | YTest=zeros(nInstances, 1); 54 | % for j=1:length(present) 55 | % XTest(present(j), j)=1; 56 | % YTest(j)=W(present(j),i); 57 | % end 58 | 59 | % XTest=eye(nAttrs); 60 | % YTest=W(:,i); 61 | 62 | trainYCell{i}=Y; 63 | trainXCell{i}=X; 64 | testYCell{i}=YTest; 65 | testXCell{i}=XTest; 66 | end 67 | 68 | dataConf.trainXCell=trainXCell; 69 | dataConf.trainYCell=trainYCell; 70 | dataConf.testXCell=testXCell; 71 | dataConf.testYCell=testYCell; 72 | % dataConf.validation_testXCell=validationXCell; 73 | % dataConf.validation_testYCell=validationYCell; 74 | %dataConf.W=W; 75 | %dataConf.WTensor=WTensor; 76 | dataConf.indicators=dimensions; 77 | 78 | end 79 | -------------------------------------------------------------------------------- /diag3.m: -------------------------------------------------------------------------------- 1 | % diag3 - 3rd order super diagonal tensor 2 | % 3 | % Syntax 4 | % D=diag3(dd) 5 | % 6 | % See also 7 | % eye3 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | 29 | function D=diag3(dd) 30 | n=length(dd); 31 | D=zeros(n,n,n); 32 | 33 | for ii=1:n 34 | D(ii,ii,ii)=dd(ii); 35 | end 36 | -------------------------------------------------------------------------------- /errorxy.m: -------------------------------------------------------------------------------- 1 | function errorxy(Data,varargin); 2 | %---------------------------------------------------------------------------- 3 | % errorxy function Plot graphs with error bars on both axes. 4 | % (This is an improved version of errorxy_o.m). 5 | % Input : - Data matrix. 6 | % * an arbitrary pairs of keyword and value parameters, where 7 | % keyword is one of the followings: 8 | % (In case that the second parameter is a number then call the 9 | % old version errorxy_o). 10 | % 'ColX' - Column number containing X values, default is 1. 11 | % 'ColY' - Column number containing Y values, default is 2. 12 | % 'ColXe' - Column number containing error X values. 13 | % If one parameter is given then assume upper 14 | % and lower errors are the same, if two values 15 | % are given then the first is the left and the 16 | % second is the right errors, 17 | % default is NaN (no X errors). 18 | % 'ColYe' - Column number containing error Y values. 19 | % If one parameter is given then assume right 20 | % and left errors are the same, if two values 21 | % are given then the first is the lower and the 22 | % second is the upper errors, 23 | % default is 3. 24 | % 'Marker' - Marker type, see plot for options, default is 'o'. 25 | % 'MarkSize' - Marker size, default is 6. 26 | % 'EdgeColor'- Marker edge color, default is 'b'. 27 | % 'FaceColor'- Marker face color, default is NaN. 28 | % 'ColorEB' - Error bars color, default is the same as 'EdgeColor'. 29 | % 'LineEB' - Error bars line type, default is '-'. 30 | % 'WidthEB' - Error bars line width, default is 0.5. 31 | % 'EdgeEB' - Length [ticks] of error bar edge, default is 0. 32 | % 'Con' - Connect data points {'y' | 'n'}, default is 'n'. 33 | % 'ConColor' - Color of Connecting line, default is the 34 | % same as 'EdgeColor' 35 | % 'ConLine' - Type of connecting line, default is '-'. 36 | % 'ConWidth' - Width of connecting line, default is 0.5. 37 | % 'Hist' - Plot histogram {'y' | 'n'}, default is 'n'. 38 | % 'HistFaceColor'- Histogram face color, default is the same as 'EdgeColor'. 39 | % 'HistEdgeColor'- Histogram edge color, default is the same as 'EdgeColor'. 40 | % 'ScaleX' - X scale {'linear' | 'log'}, default is 'linear'. 41 | % 'ScaleY' - Y scale {'linear' | 'log'}, default is 'linear'. 42 | % 'DirX' - X direction {'normal' | 'reverse'}, default is 'normal'. 43 | % 'DirY' - Y direction {'normal' | 'reverse'}, default is 'normal'. 44 | % Plot : errorxy plot 45 | % Example: errorxy(Data,'ColYe',[5 6],'Marker','^'); 46 | % Tested : Matlab 5.3 47 | % By : Eran O. Ofek March 2004 48 | % URL : http://wise-obs.tau.ac.il/~eran/matlab.html 49 | %---------------------------------------------------------------------------- 50 | Ndots = 450; % approximate number of dots in paper 51 | 52 | if (nargin>1 & ischar(varargin{1})==0), 53 | %--- call old erroxy_o --- 54 | errorxy_o(Data,varargin{:}); 55 | else 56 | %--- new version of errorxy --- 57 | 58 | Nvarg = length(varargin); 59 | if (0.5.*Nvarg~=floor(0.5.*Nvarg)), 60 | error('Illegal number of input arguments'); 61 | end 62 | 63 | %--- set default values --- 64 | ColX = 1; 65 | ColY = 2; 66 | ColXe = [NaN, NaN]; 67 | ColYe = [3, 3]; 68 | Marker = 'o'; 69 | MarkSize = 6; 70 | EdgeColor = 'b'; 71 | FaceColor = 'n'; 72 | ColorEB = NaN; 73 | LineEB = '-'; 74 | WidthEB = 0.5; 75 | EdgeEB = 0; 76 | Con = 'n'; 77 | ConColor = NaN; 78 | ConLine = '-'; 79 | ConWidth = 0.5; 80 | Hist = 'n'; 81 | HistFaceColor= NaN; 82 | HistEdgeColor= NaN; 83 | ScaleX = 'linear'; 84 | ScaleY = 'linear'; 85 | DirX = 'normal'; 86 | DirY = 'normal'; 87 | 88 | for I=1:2:Nvarg-1, 89 | switch varargin{I} 90 | case 'ColX' 91 | ColX = varargin{I+1}; 92 | case 'ColY' 93 | ColY = varargin{I+1}; 94 | case 'ColXe' 95 | ColXe = varargin{I+1}; 96 | case 'ColYe' 97 | ColYe = varargin{I+1}; 98 | case 'Marker' 99 | Marker = varargin{I+1}; 100 | case 'MarkSize' 101 | MarkSize = varargin{I+1}; 102 | case 'EdgeColor' 103 | EdgeColor = varargin{I+1}; 104 | case 'FaceColor' 105 | FaceColor = varargin{I+1}; 106 | case 'ColorEB' 107 | ColorEB = varargin{I+1}; 108 | case 'LineEB' 109 | LineEB = varargin{I+1}; 110 | case 'WidthEB' 111 | WidthEB = varargin{I+1}; 112 | case 'EdgeEB' 113 | EdgeEB = varargin{I+1}; 114 | case 'Con' 115 | Con = varargin{I+1}; 116 | case 'ConColor' 117 | ConColor = varargin{I+1}; 118 | case 'ConLine' 119 | ConLine = varargin{I+1}; 120 | case 'ConWidth' 121 | ConWidth = varargin{I+1}; 122 | case 'Hist' 123 | Hist = varargin{I+1}; 124 | case 'HistFaceColor' 125 | HistFaceColor = varargin{I+1}; 126 | case 'HistEdgeColor' 127 | HistEdgeColor = varargin{I+1}; 128 | case 'ScaleX' 129 | ScaleX = varargin{I+1}; 130 | case 'ScaleY' 131 | ScaleY = varargin{I+1}; 132 | case 'DirX' 133 | DirX = varargin{I+1}; 134 | case 'DirY' 135 | DirY = varargin{I+1}; 136 | case 'LineWidth', 137 | LineWidth = varargin{I+1}; 138 | otherwise 139 | error('Unknown keyword option'); 140 | end 141 | end 142 | 143 | %--- set NaN colors --- 144 | if (isnan(ColorEB)==1), 145 | ColorEB = EdgeColor; 146 | end 147 | if (isnan(ConColor)==1), 148 | ConColor = EdgeColor; 149 | end 150 | if (isnan(HistFaceColor)==1), 151 | HistFaceColor = EdgeColor; 152 | end 153 | if (isnan(HistEdgeColor)==1), 154 | HistEdgeColor = EdgeColor; 155 | end 156 | 157 | if (length(ColXe)==1), 158 | ColXe = [ColXe, ColXe]; 159 | end 160 | if (length(ColYe)==1), 161 | ColYe = [ColYe, ColYe]; 162 | end 163 | 164 | %--------------------- 165 | %--- Hold handling --- 166 | %--------------------- 167 | NextPlot = get(gcf,'NextPlot'); 168 | hold on; 169 | box on; 170 | 171 | %----------------------------------- 172 | %--- Data span for edge plotting --- 173 | %----------------------------------- 174 | SpanX = max(Data(:,ColX)) - min(Data(:,ColX)); 175 | SpanY = max(Data(:,ColY)) - min(Data(:,ColY)); 176 | EdgeX = SpanX.*EdgeEB./Ndots; 177 | EdgeY = SpanY.*EdgeEB./Ndots; 178 | 179 | 180 | %---------------------- 181 | %--- plot histogram --- 182 | %---------------------- 183 | switch Hist 184 | case 'n' 185 | % do nothing 186 | case 'y' 187 | Hhist = bar(Data(:,ColX),Data(:,ColY)); 188 | set(Hhist,'FaceColor',HistFaceColor,... 189 | 'EdgeColor',HistEdgeColor); 190 | otherwise 191 | error('Unknown Hist option'); 192 | end 193 | 194 | %------------------------ 195 | %--- plot data points --- 196 | %------------------------ 197 | N = size(Data,1); 198 | 199 | for I=1:1:N, 200 | H = plot(Data(I,ColX),Data(I,ColY),'o'); 201 | Hpoint(I) = H; 202 | set(Hpoint,'Marker',Marker,... 203 | 'MarkerSize',MarkSize,... 204 | 'MarkerEdgeColor',EdgeColor,... 205 | 'MarkerFaceColor',FaceColor,... 206 | 'LineWidth', LineWidth); 207 | %--- plot Y error bars --- 208 | if (isnan(ColYe(1))==0), 209 | H = plot([Data(I,ColX); Data(I,ColX)],... 210 | [Data(I,ColY)-Data(I,ColYe(1)); Data(I,ColY)+Data(I,ColYe(2))],... 211 | '-'); 212 | Hyerr(I) = H; 213 | set(Hyerr,'Color',ColorEB,... 214 | 'LineStyle',LineEB,... 215 | 'LineWidth',WidthEB); 216 | 217 | %--- Error bar edge --- 218 | if (EdgeEB>0), 219 | %--- Y error bar lower edge --- 220 | H = plot([Data(I,ColX)-EdgeX; Data(I,ColX)+EdgeX],... 221 | [Data(I,ColY)-Data(I,ColYe(1)); Data(I,ColY)-Data(I,ColYe(1))],... 222 | '-'); 223 | Heyel(I) = H; 224 | set(Heyel,'Color',ColorEB,... 225 | 'LineStyle',LineEB,... 226 | 'LineWidth',WidthEB); 227 | %--- Y error bar upper edge --- 228 | H = plot([Data(I,ColX)-EdgeX; Data(I,ColX)+EdgeX],... 229 | [Data(I,ColY)+Data(I,ColYe(2)); Data(I,ColY)+Data(I,ColYe(2))],... 230 | '-'); 231 | Heyeu(I) = H; 232 | set(Heyeu,'Color',ColorEB,... 233 | 'LineStyle',LineEB,... 234 | 'LineWidth',WidthEB); 235 | end 236 | end 237 | %--- plot X error bars --- 238 | if (isnan(ColXe(1))==0), 239 | H = plot([Data(I,ColX)-Data(I,ColXe(1)); Data(I,ColX)+Data(I,ColXe(2))],... 240 | [Data(I,ColY); Data(I,ColY)],... 241 | '-'); 242 | Hxerr(I) = H; 243 | set(Hxerr,'Color',ColorEB,... 244 | 'LineStyle',LineEB,... 245 | 'LineWidth',WidthEB); 246 | 247 | %--- Error bar edge --- 248 | if (EdgeEB>0), 249 | %--- X error bar lower edge --- 250 | H = plot([Data(I,ColX)-Data(I,ColXe(1)); Data(I,ColX)-Data(I,ColXe(1))],... 251 | [Data(I,ColY)-EdgeY; Data(I,ColY)+EdgeY],... 252 | '-'); 253 | Hexel(I) = H; 254 | set(Hexel,'Color',ColorEB,... 255 | 'LineStyle',LineEB,... 256 | 'LineWidth',WidthEB); 257 | %--- X error bar upper edge --- 258 | H = plot([Data(I,ColX)+Data(I,ColXe(2)); Data(I,ColX)+Data(I,ColXe(2))],... 259 | [Data(I,ColY)-EdgeY; Data(I,ColY)+EdgeY],... 260 | '-'); 261 | Hexer(I) = H; 262 | set(Hexer,'Color',ColorEB,... 263 | 'LineStyle',LineEB,... 264 | 'LineWidth',WidthEB); 265 | end 266 | end 267 | 268 | 269 | end 270 | 271 | %--------------------------- 272 | %--- connect data points --- 273 | %--------------------------- 274 | switch Con 275 | case 'n' 276 | % do nothing 277 | case 'y' 278 | Hcon = plot(Data(:,ColX),Data(:,ColY),'-'); 279 | set(Hcon,'Color',ConColor,... 280 | 'LineStyle',ConLine,... 281 | 'LineWidth',ConWidth); 282 | otherwise 283 | error('Unknown Con option'); 284 | end 285 | 286 | %--------------------------- 287 | %--- set scale/direction --- 288 | %--------------------------- 289 | set(gca,'XScale',ScaleX,... 290 | 'YScale',ScaleY,... 291 | 'XDir',DirX,... 292 | 'YDir',DirY); 293 | 294 | %--- return hold to riginal state --- 295 | set(gcf,'NextPlot',NextPlot); 296 | end 297 | -------------------------------------------------------------------------------- /errorxym.m: -------------------------------------------------------------------------------- 1 | % errorxym - plots multiple sequences of points with different 2 | % markers (without connecting lines) 3 | % 4 | % Syntax 5 | % errorxym(X, Ym, Ys, varargin) 6 | % 7 | % See also 8 | % errorxy 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function errorxym(X, Ym, Ys, varargin) 30 | 31 | if size(X,1)0)/length(ind2); 136 | fp(ii)=sum(abs(err(ix(1:ii)))==0)/(ntr-length(ind2)); 137 | end 138 | 139 | figure, plot(fp, tp, '-x', 'linewidth',2); 140 | grid on; 141 | xlabel('FP rate'); 142 | ylabel('TP rate'); 143 | 144 | end -------------------------------------------------------------------------------- /exp_denoising.m: -------------------------------------------------------------------------------- 1 | % exp_denoising - performs denoising experiments 2 | % 3 | % See also 4 | % exp_completion, plot_denoising, plot_compare_denoising 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | RandStream.setDefaultStream ... 27 | (RandStream('mt19937ar','seed',sum(100*clock))); 28 | 29 | 30 | gitstring = gitlog; 31 | 32 | 33 | nsample=20; 34 | nrep=10; 35 | 36 | % sz=[50 50 20]; 37 | sz=input('size='); 38 | sigma=input('sigma='); 39 | 40 | tol=1e-4; 41 | 42 | methods = {'constraint','mixture'}; % ,'tuckertrue'}; 43 | 44 | lambda=exp(linspace(log(0.1),log(100),20)); 45 | 46 | time=nan*ones(nrep, length(lambda), length(methods)); 47 | res=nan*ones(nrep, length(lambda), length(methods)); 48 | err=nan*ones(nrep, length(lambda), length(methods)); 49 | 50 | 51 | for ll=1:nsample 52 | % dtrue=[rr(ll),rr(ll),3]; 53 | dtrue=max(1,round(rand(1,3).*sz)); 54 | 55 | fprintf('====================== sz=%s dtrue=%s =============================\n', printvec(sz), printvec(dtrue)); 56 | 57 | for kk=1:nrep 58 | X0=randtensor3(sz,dtrue); 59 | nn=prod(sz); 60 | ind=(1:nn)'; 61 | [I,J,K]=ind2sub(sz, ind); 62 | 63 | [X0out,Z0,A0,fval0,res0]=tensormix_adm(zeros(sz), {I,J,K}, X0(:), ... 64 | 0, 'tol', 1e-4); 65 | 66 | rank_mix(kk,:)=[sum(svd(Z0{1})>1e-9), sum(svd(Z0{2})>1e-9), sum(svd(Z0{3})>1e-9)]; 67 | fprintf('Latent rank=%s\n', printvec(rank_mix(kk,:))); 68 | 69 | Y=X0+sigma*randn(sz); 70 | 71 | for mm=1:length(methods) 72 | switch(methods{mm}) 73 | case 'constraint' 74 | %% Constrained 75 | for jj=1:length(lambda) 76 | t0=cputime; 77 | [X,Z,A,fval,ress]=tensorconst_adm(zeros(sz),{I,J,K},Y(:),lambda(jj),'tol',tol); 78 | time(kk,jj,mm)=cputime-t0; 79 | res(kk,jj,mm)=ress(end); 80 | err(kk,jj,mm)=norm(X(:)-X0(:)); 81 | rank_obtained{kk,jj,mm}=[rank(flatten(X,1),1e-9),... 82 | rank(flatten(X,2),1e-9),... 83 | rank(flatten(X,3),1e-9)]; 84 | end 85 | case 'mixture' 86 | %% Mixture 87 | for jj=1:length(lambda) 88 | t0=cputime; 89 | [X2,Z2,fval2,res2]=tensormix_adm(zeros(sz), {I,J,K}, Y(:), ... 90 | lambda(jj), 'tol', tol); 91 | time(kk,jj,mm)=cputime-t0; 92 | res(kk,jj,mm)=res2(end); 93 | err(kk,jj,mm)=norm(X2(:)-X0(:)); 94 | rank_obtained{kk,jj,mm}=[rank(Z2{1},1e-9),... 95 | rank(Z2{2},1e-9),... 96 | rank(Z2{3},1e-9)]; 97 | end 98 | 99 | case {'tucker','tuckertrue'} 100 | %% Tucker 101 | yfact=std(Y(:))*2; 102 | Xobs=Y/yfact; 103 | Options(5)=100; 104 | if strcmp(methods{mm},'tuckertrue') 105 | dd = dtrue; 106 | else 107 | dd = min(round(dtrue*1.5), sz); 108 | end 109 | t0=cputime; 110 | [Factors,G,ExplX,Xm]=tucker(Xobs, dd, Options); 111 | Xm=Xm*yfact; 112 | time(kk,1,mm)=cputime-t0; 113 | res(kk,1,mm)=nan; 114 | err(kk,1,mm)=norm(Xm(:)-X0(:)); 115 | otherwise 116 | error('Method [%s] unknown!', methods{mm}); 117 | end 118 | fprintf('kk=%d: [%s] err=%s\n', kk, methods{mm}, printvec(err(kk,:,mm))); 119 | 120 | end 121 | 122 | end 123 | file_save=sprintf('result_compare_full_%d_%d_%d_%d_%d_%d_nrep=%d_sigma=%g.mat',sz(1),sz(2),sz(3),dtrue(1),dtrue(2),dtrue(3),nrep,sigma); 124 | 125 | 126 | save(file_save,'nrep', 'sz', 'dtrue', 'methods','lambda','err','res','time','sigma','tol','rank_mix','gitstring','rank_obtained'); 127 | 128 | 129 | end 130 | 131 | -------------------------------------------------------------------------------- /exp_postproc.m: -------------------------------------------------------------------------------- 1 | % exp_postproc - performs CP decomposition after overlapped approach 2 | % 3 | % See also 4 | % tensorconst_adm 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | addpath ver3.1/ 27 | 28 | load amino.mat 29 | 30 | X0=permute(reshape(X,DimX), [2,3,1]); 31 | 32 | 33 | sz=size(X0); 34 | nn=prod(sz); 35 | 36 | ntr=round(.5*nn); 37 | 38 | tol=1e-3; 39 | dparafac=4; 40 | 41 | % Options for tucker/parafac 42 | Options(4)=2; % no scaling 43 | Options(5)=100; % display every 100 iterations 44 | 45 | 46 | % 47 | % Method 1: first train "const" and apply parafac 48 | % 49 | ind=randperm(nn); ind=ind(1:ntr)'; 50 | ind_test=setdiff(1:prod(sz), ind); 51 | [I,J,K]=ind2sub(sz,ind); 52 | yy=X0(ind); 53 | t0=cputime; 54 | [X,Z,Y,fval,gvals]=tensorconst_adm(zeros(sz),{I,J,K},yy,0,'tol', tol); 55 | 56 | % Estimate the rank 57 | r1 = numcomp(svd(Z{1})); 58 | r2 = numcomp(svd(Z{2})); 59 | r3 = numcomp(svd(Z{3})); 60 | 61 | % Extract factors 62 | [U1,S,V]=pca(Z{1},r1,20); ss=diag(S); 63 | [U2,S,V]=pca(Z{2},r2,20); 64 | [U3,S,V]=pca(Z{3},r3,20); 65 | C=kolda3(X,U1',U2',U3'); 66 | F=parafac(C,dparafac, Options); 67 | time(3)=cputime-t0; 68 | err(3)=norm(X(ind_test)-X0(ind_test))/norm(X0(:)); 69 | U1=flipsign(U1*F{1}); 70 | U2=flipsign(U2*F{2}); 71 | U3=flipsign(U3*F{3}); 72 | figure 73 | subplot(3,3,3); 74 | plot(EmAx, U1, 'linewidth',2); grid on; 75 | title('Proposed(4)','fontsize',16); 76 | subplot(3,3,6); 77 | plot(ExAx, U2, 'linewidth',2); grid on; 78 | subplot(3,3,9); 79 | plot(1:5, U3, 'linewidth',2); grid on; 80 | 81 | % 82 | % Method 2: directly apply parafac 83 | % 84 | yfact=std(yy)*10; 85 | Xobs=zeros(sz); 86 | Xobs(ind)=X0(ind)/yfact; 87 | Xobs(ind_test)=nan; 88 | t0=cputime; 89 | Factors=parafac(Xobs,dparafac,Options); 90 | time(2)=cputime-t0; 91 | Xp=nmodel(Factors)*yfact; 92 | err(2)=norm(Xp(ind_test)-X0(ind_test))/norm(X0(:)); 93 | subplot(3,3,2); 94 | plot(EmAx, Factors{1}*yfact, 'linewidth',2); 95 | title('PARAFAC(4)','fontsize',16); 96 | grid on; 97 | subplot(3,3,5); 98 | plot(ExAx, Factors{2}, 'linewidth',2); 99 | grid on; 100 | subplot(3,3,8); 101 | plot(1:5, Factors{3}, 'linewidth',2); 102 | grid on; 103 | 104 | 105 | % 106 | % Method 0: True parafac 107 | % 108 | t0=cputime; 109 | F0=parafac(Xobs, 3, Options); 110 | time(1)=cputime-t0; 111 | Xp=nmodel(Factors)*yfact; 112 | err(1)=norm(Xp(ind_test)-X0(ind_test))/norm(X0(:)); 113 | subplot(3,3,1); 114 | plot(EmAx, F0{1}*yfact, 'linewidth',2); 115 | grid on; 116 | title('PARAFAC(3)','fontsize',16); 117 | ylabel('Emission loadings','fontsize',16); 118 | subplot(3,3,4); 119 | plot(ExAx, F0{2}, 'linewidth',2); 120 | grid on; 121 | ylabel('Excitation loadings','fontsize',16); 122 | subplot(3,3,7); 123 | plot(1:5, F0{3}, 'linewidth',2); 124 | grid on; 125 | ylabel('Sample loadings','fontsize',16); 126 | 127 | 128 | set(gcf,'papersize',[20 20]); 129 | 130 | -------------------------------------------------------------------------------- /exp_tensor_tradeoff.m: -------------------------------------------------------------------------------- 1 | % exp_tensor_tradeoff - runs an experiment for CP tensor 2 | % 3 | % Reference 4 | % "Estimation of low-rank tensors via convex optimization" 5 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 6 | % arXiv:1010.0789 7 | % http://arxiv.org/abs/1010.0789 8 | % 9 | % "Statistical Performance of Convex Tensor Decomposition" 10 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 11 | % NIPS 2011 12 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 13 | % 14 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 15 | % Ryota Tomioka, Taiji Suzuki 16 | % NIPS 2013 17 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 18 | % 19 | % Copyright(c) 2010-2014 Ryota Tomioka 20 | % This software is distributed under the MIT license. See license.txt 21 | 22 | 23 | addpath ConvexTensor/TotanaImpl 24 | addpath tensor_toolbox 25 | 26 | RandStream.setGlobalStream ... 27 | (RandStream('mt19937ar','seed',sum(100*clock))); 28 | 29 | gitstring = gitlog; 30 | 31 | nrep=10; 32 | nsample = 1; 33 | sz=[10 10 5]; 34 | trfrac=0.5:0.05:0.95; 35 | sigma=0.001; 36 | tol=1e-3; 37 | 38 | % methods = {'matrix','constraint','nn','parafac','parafactrue'}; % , 'l2ball'}; 39 | methods = {'constraint','l2ball'}; 40 | 41 | 42 | methodParameters.gamma=1e-4; % Parameter that ponders the importance of the regularizer. It can take any positive real value. 43 | methodParameters.beta=0.1; % Parameter of ADMM (see eq. 9 in the paper). It can take any positive real value. 44 | % methodParameters.radius=1; % radius of the \ell_2 ball (see Sec. 3 of the paper). In principle it can take any positive real value. 45 | % If it is not specified, it is estimated using last formula in pag. 6 in the paper. 46 | methodParameters.nIt=200; 47 | methodParameters.threshold=10^-20; 48 | 49 | mstr=method_names_string(methods); 50 | 51 | for ll=1:nsample 52 | % dtrue=round(rand(1,3)*40); 53 | dtrue=5; 54 | % dtrue=[50 50 5]; 55 | for kk=1:nrep 56 | X0=randtensor3(sz,dtrue); 57 | nn=prod(sz); 58 | 59 | file_save=sprintf('result_tensor_tradeoff_%d_%d_%d_%d_%s_nrep=%d_sigma=%g_tol=%g.mat',sz(1),sz(2),sz(3),dtrue,mstr,nrep,sigma,tol); 60 | 61 | for ii=1:length(trfrac) 62 | [ind, ind_test]=randsplit(nn, trfrac(ii)); 63 | [I,J,K]=ind2sub(sz,ind); 64 | yy=X0(ind)+sigma*randn(length(ind),1); 65 | 66 | for mm=1:length(methods) 67 | clear X res 68 | switch(methods{mm}) 69 | case 'matrix' 70 | %% Tensor as a matrix 71 | J1=sub2ind(sz(2:end), J, K); 72 | t0=cputime; 73 | [X,Z1,Y1,fval1,res]=matrix_adm(zeros(sz(1), prod(sz(2:end))),{I,J1}, yy, 0, 'tol', tol); 74 | time(kk,ii,mm)=cputime-t0; 75 | case 'constraint' 76 | %% Constrained 77 | t0=cputime; 78 | [X,Z,Y,fval,res]=tensorconst_adm(zeros(sz),{I,J,K},yy,0,'tol',tol); 79 | time(kk,ii,mm)=cputime-t0; 80 | case 'mixture' 81 | %% Mixture 82 | t0=cputime; 83 | [X,Z2,fval2,res]=tensormix_adm(zeros(sz), {I,J,K}, yy, ... 84 | 0, 'tol', tol); 85 | time(kk,ii,mm)=cputime-t0; 86 | case 'nn' 87 | t0=cputime; 88 | [X,Z,Y,fval,res]=tensorcomplnn_adm(X0, {I,J,K}, yy, ... 89 | 0, 0, 'tol', tol, ... 90 | 'display', 1,... 91 | 'maxiter', 30); 92 | time(kk,ii,mm)=cputime-t0; 93 | case {'parafac', 'parafactrue'} 94 | %% PARAFAC 95 | yfact=std(yy)*2; 96 | Xobs=zeros(sz); 97 | Xobs(ind)=yy/yfact; 98 | Xobs(ind_test)=nan; 99 | Options(5)=100; 100 | if strcmp(methods{mm},'parafactrue') 101 | dd = dtrue; 102 | else 103 | dd = min(round(dtrue*1.2), max(sz)); 104 | end 105 | t0=cputime; 106 | Factors=parafac(Xobs, dd, Options); 107 | X=nmodel(Factors)*yfact; 108 | time(kk,ii,mm)=cputime-t0; 109 | res=nan; 110 | case 'l2ball' 111 | Knowns=zeros(sz); 112 | Knowns(ind)=1; 113 | Xobs=zeros(sz); 114 | Xobs(ind)=(yy-mean(yy))/std(yy); 115 | t0=cputime; 116 | data=completion2MTL(struct('Tensor', tensor(Xobs), 'KnownInputs', tensor(Knowns)),[]); 117 | l2Ball=MLMTL_ConvexL2BallRadiusMTL(methodParameters, 'l_2 Ball'); 118 | l2Ball=train(l2Ball, data); 119 | time(kk,ii,mm)=cputime-t0; 120 | X=l2Ball.model.allW*std(yy)+mean(yy); 121 | res=nan; 122 | otherwise 123 | error('Method [%s] unknown!', methods{mm}); 124 | end 125 | res(kk,ii,mm)=res(end); 126 | err(kk,ii,mm)=norm(X(ind_test)-X0(ind_test))/norm(X0(ind_test)); 127 | end 128 | fprintf('frac=%g\nerr=%s\n',trfrac(ii), printvec(err(kk,ii,:))); 129 | fprintf('time=%s\n', printvec(time(kk,ii,:))); 130 | % fprintf('frac=%g\nerr1=%s err2=%g err3=%g err4=%g err5=%g\n',... 131 | % trfrac(ii), printvec(err(kk,ii,1:3)),... 132 | % err(kk,ii,4), err(kk,ii,5), err(kk,ii,6), err(kk,ii,7)); 133 | % fprintf('time1=%g time2=%g time3=%g time4=%g time5=%g\n', time(kk,ii,1),time(kk,ii,2),time(kk,ii,3),time(kk,ii,4),time(kk,ii,5)); 134 | end 135 | end 136 | 137 | 138 | 139 | save(file_save,'nrep', 'sz', 'dtrue', 'sigma', 'methods','err', 'trfrac','res','time','gitstring','methodParameters'); 140 | 141 | 142 | end 143 | 144 | nm=length(methods); 145 | figure 146 | subplot(1,2,1); 147 | h=errorbar(trfrac'*ones(1,nm), shiftdim(mean(err)), ... 148 | shiftdim(std(err))); 149 | set(h, 'linewidth', 2); 150 | set(gca,'fontsize', 14); 151 | xlabel('Fraction of observed elements'); 152 | ylabel('Generalization error'); 153 | legend(methods); 154 | grid on; 155 | 156 | subplot(1,2,2); 157 | h=errorbar(trfrac'*ones(1,nm), shiftdim(mean(time)), ... 158 | shiftdim(std(time))); 159 | set(h, 'linewidth', 2); 160 | set(gca,'fontsize', 14); 161 | xlabel('Fraction of observed elements'); 162 | ylabel('CPU time'); 163 | legend(methods); 164 | grid on; 165 | -------------------------------------------------------------------------------- /eye3.m: -------------------------------------------------------------------------------- 1 | % eye3 - 3rd order super diagonal identity tensor 2 | % 3 | % Syntax 4 | % E=eye3(n) 5 | % 6 | % See also 7 | % diag3 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | 29 | function E=eye3(n) 30 | 31 | E=zeros(n,n,n); 32 | 33 | for ii=1:n 34 | E(ii,ii,ii)=1; 35 | end 36 | -------------------------------------------------------------------------------- /flatten.m: -------------------------------------------------------------------------------- 1 | % flatten - mode-k unfolding of a tensor 2 | % 3 | % Syntax 4 | % Z=flatten(X,ind) 5 | % 6 | % See also 7 | % flatten_adj 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function Z=flatten(X,ind) 29 | 30 | sz=size(X); 31 | 32 | if isnumeric(ind) 33 | nd=max(ndims(X),ind); 34 | ind = {ind, [ind+1:nd, 1:ind-1]}; 35 | else 36 | nd=max(cellfun(@max,ind)); 37 | end 38 | 39 | 40 | if length(ind{1})~=1 || ind{1}~=1 41 | X=permute(X,cell2mat(ind)); 42 | end 43 | 44 | if length(ind{1})==1 45 | Z=X(:,:); 46 | else 47 | Z=reshape(X,[prod(sz(ind{1})),prod(sz(ind{2}))]); 48 | end 49 | -------------------------------------------------------------------------------- /flatten_adj.m: -------------------------------------------------------------------------------- 1 | % flatten_adj - mode-k folding of a matrix X into a tensor 2 | % 3 | % Syntax 4 | % X=flatten_adj(X,sz,ind) 5 | % 6 | % See also 7 | % flatten 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function X=flatten_adj(X,sz,ind) 29 | 30 | nd=length(sz); 31 | 32 | if isnumeric(ind) 33 | ind = {ind, [ind+1:nd,1:ind-1]}; 34 | end 35 | 36 | sz=sz(cell2mat(ind)); 37 | X=reshape(X,sz); 38 | if length(ind{1})~=1 || ind{1}~=1 39 | [ss,indInv]=sort(cell2mat(ind)); 40 | X=permute(X,indInv); 41 | % X=permute(X,[nd-jj+2:nd 1:nd-jj+1]); 42 | end 43 | -------------------------------------------------------------------------------- /gitlog.m: -------------------------------------------------------------------------------- 1 | function str=gitlog(number,hashonly) 2 | 3 | if ~exist('number','var') 4 | number=1; 5 | end 6 | 7 | if ~exist('hashonly','var') 8 | hashonly=0; 9 | end 10 | 11 | 12 | os=computer; 13 | 14 | if isequal(os(1:4),'PCWIN') 15 | str=''; 16 | return; 17 | end 18 | 19 | if isequal(os(1:3),'MAC') 20 | gitcmd = '/usr/local/git/bin/git'; 21 | else 22 | gitcmd = 'git'; 23 | end 24 | 25 | if hashonly 26 | command = sprintf('%s --no-pager log -%d --format="%%h"',... 27 | gitcmd, number); 28 | else 29 | command = sprintf('%s --no-pager log -%d --format="%%ci %%h %%s"',... 30 | gitcmd, number); 31 | end 32 | 33 | [res,str]=system(command); 34 | 35 | str(end)=[]; -------------------------------------------------------------------------------- /ispropertystruct.m: -------------------------------------------------------------------------------- 1 | function t = ispropertystruct(opts) 2 | % ISPROPERTYSTRUCT - Check whether a structure contains optional parameters 3 | % 4 | % T = ISPROPERTYSTRUCT(OPTS) 5 | % returns 1 if OPTS is a structure generated by PROPERTYLIST2STRUCT. 6 | % 7 | % 8 | % See also PROPERTYLIST2STRUCT 9 | % 10 | 11 | % Copyright Fraunhofer FIRST.IDA (2004) 12 | % $Id: ispropertystruct.m,v 1.1 2004/08/16 11:52:17 neuro_toolbox Exp $ 13 | 14 | error(nargchk(1, 1, nargin)); 15 | % Currently, we do not check the version number. Existence of the field 16 | % is enough to identify the opts structure as a property list 17 | t = isfield(opts, 'isPropertyStruct'); 18 | -------------------------------------------------------------------------------- /kolda3.m: -------------------------------------------------------------------------------- 1 | % kolda3 - Comptues [[C; U1, U2, U3]] 2 | % 3 | % Syntax 4 | % X = kolda3(C, U1, U2, U3) 5 | % 6 | % See also 7 | % 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function X = kolda3(C, U1, U2, U3) 29 | 30 | sz=[size(U1,1), size(U2,1), size(U3,1)]; 31 | 32 | dims=size(C); 33 | if ndims(C)<3 34 | dims(3)=1; 35 | end 36 | 37 | U1=U1(:,1:dims(1)); 38 | U2=U2(:,1:dims(2)); 39 | U3=U3(:,1:dims(3)); 40 | 41 | for ii=1:sz(3) 42 | Cii=reshape(reshape(C,[dims(1)*dims(2), dims(3)])*U3(ii,:)',dims(1:2)); 43 | X(:,:,ii)=U1*Cii*U2'; 44 | end 45 | -------------------------------------------------------------------------------- /matrix_adm.m: -------------------------------------------------------------------------------- 1 | % matrix_adm - Computes reconstruction of a partly observed matrix 2 | % 3 | % Syntax 4 | % [X,Z,A,fval,res]=matrix_adm(X, I, yy, lambda, varargin) 5 | % 6 | % See also 7 | % tensor_as_matrix 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function [X,Z,A,fval,res]=matrix_adm(X, I, yy, lambda, varargin) 29 | 30 | opt=propertylist2struct(varargin{:}); 31 | opt=set_defaults(opt, 'eta',[], 'yfact', 10, 'gamma', [], 'tol', ... 32 | 1e-3, 'verbose', 0, 'relative',1,'maxiter',2000); 33 | 34 | if ~isempty(opt.gamma) 35 | gamma=opt.gamma; 36 | else 37 | gamma=1; 38 | end 39 | 40 | if ~isempty(opt.eta) 41 | eta=opt.eta; 42 | else 43 | eta=1/(opt.yfact*std(yy)); 44 | end 45 | 46 | 47 | sz=size(X); 48 | 49 | m=length(yy); 50 | 51 | 52 | Z=zeros(size(X)); 53 | A=zeros(size(X)); 54 | 55 | B=zeros(sz); 56 | ind=sub2ind(sz,I{:}); 57 | B(ind)=yy; 58 | 59 | kk=1; 60 | nsv=10; 61 | while 1 62 | if lambda>0 63 | X = (B/lambda+eta*Z-A)./((B~=0)/lambda+eta); 64 | else 65 | X=Z-A/eta; 66 | X(ind)=yy; 67 | end 68 | 69 | Z0=Z; 70 | [Z,ss,nsv]=softth(X+A/eta,gamma/eta,nsv); 71 | 72 | A=A+eta*(X-Z); 73 | 74 | viol = norm(X(:)-Z(:)); 75 | 76 | 77 | fval(kk)=gamma*sum(svd(X)); 78 | if lambda>0 79 | fval(kk)=fval(kk)+0.5*sum((X(ind)-yy).^2)/lambda; 80 | end 81 | 82 | 83 | % gval(kk)=eta*norm(Z(:)-Z0(:)); %norm(G(:)); 84 | gap=fval(kk)+evalDual(A, yy, lambda, gamma, ind); 85 | if opt.relative 86 | gap=gap/fval(kk); 87 | end 88 | res(kk)=gap; 89 | 90 | if opt.verbose 91 | fprintf('[%d] fval=%g res=%g viol=%g\n', kk, fval(kk), res(kk), ... 92 | viol); 93 | end 94 | 95 | 96 | if res(kk)opt.maxiter 101 | break; 102 | end 103 | 104 | kk=kk+1; 105 | end 106 | 107 | fprintf('[%d] fval=%g res=%g viol=%g eta=%g\n', kk, fval(kk), res(kk), ... 108 | viol,eta); 109 | 110 | 111 | function dval=evalDual(A, yy, lambda, gamma, ind) 112 | 113 | sz=size(A); 114 | ind_te=setdiff(1:prod(sz),ind); 115 | A(ind_te)=0; 116 | ss=pcaspec(A,1,10); 117 | A=A/max(1,ss/gamma); 118 | 119 | dval = 0.5*lambda*norm(A(ind))^2 - yy'*A(ind); 120 | 121 | -------------------------------------------------------------------------------- /matrixl1_adm.m: -------------------------------------------------------------------------------- 1 | % matrixl1_adm - computes sparse+low-rank decomposition of 2 | % partially observed matrix 3 | % 4 | % Syntax 5 | % [X,Z,A,fval,res]=matrixl1_adm(X, I, yy, lambda, varargin) 6 | % 7 | % See also 8 | % tensor_as_matrix 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function [X,Z,A,fval,res]=matrixl1_adm(X, I, yy, lambda, varargin) 30 | 31 | opt=propertylist2struct(varargin{:}); 32 | opt=set_defaults(opt, 'eta',[], 'eta1', [], 'yfact', 10, 'gamma', [], 'tol', 1e-3, 'verbose', 0,'maxiter',2000); 33 | 34 | if ~isempty(opt.gamma) 35 | gamma=opt.gamma; 36 | else 37 | gamma=1; 38 | end 39 | 40 | if ~isempty(opt.eta) 41 | eta=opt.eta; 42 | else 43 | eta=1/(opt.yfact*std(yy)); 44 | end 45 | 46 | if ~isempty(opt.eta1) 47 | eta1=opt.eta1; 48 | else 49 | eta1=1/(opt.yfact*std(yy)); 50 | end 51 | 52 | 53 | sz=size(X); 54 | 55 | m=length(yy); 56 | 57 | 58 | Z=X; 59 | A=zeros(size(X)); 60 | 61 | Y=zeros(sz); 62 | ind=sub2ind(sz,I{:}); 63 | Y(ind)=yy; 64 | 65 | delta = zeros(m,1); 66 | beta = zeros(m,1); 67 | 68 | kk=1; 69 | nsv=10; 70 | dval=-inf; 71 | while 1 72 | % X update 73 | X1=eta*Z-A; 74 | X1(ind)=X1(ind) + eta1*(yy-delta-beta/eta1); 75 | X = X1./(eta1*(Y~=0)+eta); 76 | 77 | % delta update 78 | [delta,ss] = l1_softth(yy-X(ind)-beta/eta1, 1/lambda/eta1); 79 | 80 | % Z update 81 | [Z,ss,nsv]=softth(X+A/eta,gamma/eta,nsv); 82 | 83 | % A update 84 | A=A+eta*(X-Z); 85 | 86 | % beta update 87 | beta = beta + eta1*(X(ind)+delta-yy); 88 | 89 | viol = [norm(X(:)-Z(:)), norm(X(ind)+delta-yy)]; 90 | 91 | 92 | fval(kk)=gamma*sum(svd(X)); 93 | if lambda>0 94 | fval(kk)=fval(kk)+sum(abs(X(ind)-yy))/lambda; 95 | end 96 | 97 | 98 | % gval(kk)=eta*norm(Z(:)-Z0(:)); %norm(G(:)); 99 | dval = max(dval, -evalDual(A, beta, yy, lambda, gamma, ind)); 100 | res(kk)=1-dval/fval(kk); 101 | % res(kk) = max(viol); 102 | 103 | if opt.verbose 104 | fprintf('[%d] fval=%g res=%g viol=%s\n', kk, fval(kk), res(kk), ... 105 | printvec(viol)); 106 | end 107 | 108 | 109 | if res(kk)opt.maxiter 114 | break; 115 | end 116 | 117 | kk=kk+1; 118 | end 119 | 120 | fprintf('[%d] fval=%g res=%g viol=%s eta=%g\n', kk, fval(kk), res(kk), ... 121 | printvec(viol),eta); 122 | 123 | 124 | function dval=evalDual(A, beta, yy, lambda, gamma, ind) 125 | 126 | sz=size(A); 127 | ind_te=setdiff(1:prod(sz),ind); 128 | A(ind)=-beta; 129 | A(ind_te)=0; 130 | ss=pcaspec(A,1,10); 131 | 132 | fact=min([1,gamma/ss,1/lambda/max(abs(beta))]); 133 | A=A*fact; 134 | beta=beta*fact; 135 | 136 | % fprintf('fact=%g\n',fact); 137 | 138 | dval = yy'*beta; 139 | 140 | 141 | 142 | -------------------------------------------------------------------------------- /method_names_string.m: -------------------------------------------------------------------------------- 1 | function str=method_names_string(methods, len) 2 | 3 | if ~exist('len','var') 4 | len=3; 5 | end 6 | 7 | 8 | str=''; 9 | for mm=1:length(methods) 10 | method=methods{mm}; 11 | str = [str, method(1:min(len,length(method)))]; 12 | if mmmm*0.01); 33 | 34 | -------------------------------------------------------------------------------- /pca.m: -------------------------------------------------------------------------------- 1 | function [U,S,V] = pca(A,k,its,l) 2 | %PCA Low-rank approximation in SVD form. 3 | % 4 | % 5 | % [U,S,V] = PCA(A) constructs a nearly optimal rank-6 approximation 6 | % USV' to A, using 2 full iterations of a block Lanczos method 7 | % of block size 6+2=8, started with an n x 8 random matrix, 8 | % when A is m x n; the ref. below explains "nearly optimal." 9 | % The smallest dimension of A must be >= 6 when A is 10 | % the only input to PCA. 11 | % 12 | % [U,S,V] = PCA(A,k) constructs a nearly optimal rank-k approximation 13 | % USV' to A, using 2 full iterations of a block Lanczos method 14 | % of block size k+2, started with an n x (k+2) random matrix, 15 | % when A is m x n; the ref. below explains "nearly optimal." 16 | % k must be a positive integer <= the smallest dimension of A. 17 | % 18 | % [U,S,V] = PCA(A,k,its) constructs a nearly optimal rank-k approx. USV' 19 | % to A, using its full iterations of a block Lanczos method 20 | % of block size k+2, started with an n x (k+2) random matrix, 21 | % when A is m x n; the ref. below explains "nearly optimal." 22 | % k must be a positive integer <= the smallest dimension of A, 23 | % and its must be a nonnegative integer. 24 | % 25 | % [U,S,V] = PCA(A,k,its,l) constructs a nearly optimal rank-k approx. 26 | % USV' to A, using its full iterates of a block Lanczos method 27 | % of block size l, started with an n x l random matrix, 28 | % when A is m x n; the ref. below explains "nearly optimal." 29 | % k must be a positive integer <= the smallest dimension of A, 30 | % its must be a nonnegative integer, 31 | % and l must be a positive integer >= k. 32 | % 33 | % 34 | % The low-rank approximation USV' is in the form of an SVD in the sense 35 | % that the columns of U are orthonormal, as are the columns of V, 36 | % the entries of S are all nonnegative, and the only nonzero entries 37 | % of S appear in non-increasing order on its diagonal. 38 | % U is m x k, V is n x k, and S is k x k, when A is m x n. 39 | % 40 | % Increasing its or l improves the accuracy of the approximation USV' 41 | % to A; the ref. below describes how the accuracy depends on its and l. 42 | % 43 | % 44 | % Note: PCA invokes RAND. To obtain repeatable results, 45 | % invoke RAND('seed',j) with a fixed integer j before invoking PCA. 46 | % 47 | % Note: PCA currently requires the user to center and normalize the rows 48 | % or columns of the input matrix A before invoking PCA (if such 49 | % is desired). 50 | % 51 | % Note: The user may ascertain the accuracy of the approximation USV' 52 | % to A by invoking DIFFSNORM(A,U,S,V). 53 | % 54 | % 55 | % inputs (the first is required): 56 | % A -- matrix being approximated 57 | % k -- rank of the approximation being constructed; 58 | % k must be a positive integer <= the smallest dimension of A, 59 | % and defaults to 6 60 | % its -- number of full iterations of a block Lanczos method to conduct; 61 | % its must be a nonnegative integer, and defaults to 2 62 | % l -- block size of the block Lanczos iterations; 63 | % l must be a positive integer >= k, and defaults to k+2 64 | % 65 | % outputs (all three are required): 66 | % U -- m x k matrix in the rank-k approximation USV' to A, 67 | % where A is m x n; the columns of U are orthonormal 68 | % S -- k x k matrix in the rank-k approximation USV' to A, 69 | % where A is m x n; the entries of S are all nonnegative, 70 | % and its only nonzero entries appear in nonincreasing order 71 | % on the diagonal 72 | % V -- n x k matrix in the rank-k approximation USV' to A, 73 | % where A is m x n; the columns of V are orthonormal 74 | % 75 | % 76 | % Example: 77 | % A = rand(1000,2)*rand(2,1000); 78 | % A = A/normest(A); 79 | % [U,S,V] = pca(A,2,0); 80 | % diffsnorm(A,U,S,V) 81 | % 82 | % This code snippet produces a rank-2 approximation USV' to A such that 83 | % the columns of U are orthonormal, as are the columns of V, and 84 | % the entries of S are all nonnegative and are zero off the diagonal. 85 | % diffsnorm(A,U,S,V) outputs an estimate of the spectral norm 86 | % of A-USV', which should be close to the machine precision. 87 | % 88 | % 89 | % Reference: 90 | % Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, 91 | % Finding structure with randomness: Stochastic algorithms 92 | % for constructing approximate matrix decompositions, 93 | % arXiv:0909.4061 [math.NA; math.PR], 2009 94 | % (available at http://arxiv.org). 95 | % 96 | % 97 | % See also PCACOV, PRINCOMP, SVDS. 98 | % 99 | 100 | % Copyright 2009 Mark Tygert. 101 | 102 | % 103 | % Check the number of inputs. 104 | % 105 | if(nargin < 1) 106 | error('MATLAB:pca:TooFewIn',... 107 | 'There must be at least 1 input.') 108 | end 109 | 110 | if(nargin > 4) 111 | error('MATLAB:pca:TooManyIn',... 112 | 'There must be at most 4 inputs.') 113 | end 114 | 115 | % 116 | % Check the number of outputs. 117 | % 118 | if(nargout ~= 3) 119 | error('MATLAB:pca:WrongNumOut',... 120 | 'There must be exactly 3 outputs.') 121 | end 122 | 123 | % 124 | % Set the inputs k, its, and l to default values, if necessary. 125 | % 126 | if(nargin == 1) 127 | k = 6; 128 | its = 2; 129 | l = k+2; 130 | end 131 | 132 | if(nargin == 2) 133 | its = 2; 134 | l = k+2; 135 | end 136 | 137 | if(nargin == 3) 138 | l = k+2; 139 | end 140 | 141 | % 142 | % Check the first input argument. 143 | % 144 | if(~isfloat(A)) 145 | error('MATLAB:pca:In1NotFloat',... 146 | 'Input 1 must be a floating-point matrix.') 147 | end 148 | 149 | if(isempty(A)) 150 | error('MATLAB:pca:In1Empty',... 151 | 'Input 1 must not be empty.') 152 | end 153 | 154 | % 155 | % Retrieve the dimensions of A. 156 | % 157 | [m n] = size(A); 158 | 159 | % 160 | % Check the remaining input arguments. 161 | % 162 | if(size(k,1) ~= 1 || size(k,2) ~= 1) 163 | error('MATLAB:pca:In2Not1x1',... 164 | 'Input 2 must be a scalar.') 165 | end 166 | 167 | if(size(its,1) ~= 1 || size(its,2) ~= 1) 168 | error('MATLAB:pca:In3Not1x1',... 169 | 'Input 3 must be a scalar.') 170 | end 171 | 172 | if(size(l,1) ~= 1 || size(l,2) ~= 1) 173 | error('MATLAB:pca:In4Not1x1',... 174 | 'Input 4 must be a scalar.') 175 | end 176 | 177 | if(k <= 0) 178 | error('MATLAB:pca:In2NonPos',... 179 | 'Input 2 must be > 0.') 180 | end 181 | 182 | if((k > m) || (k > n)) 183 | error('MATLAB:pca:In2TooBig',... 184 | 'Input 2 must be <= the smallest dimension of Input 1.') 185 | end 186 | 187 | if(its < 0) 188 | error('MATLAB:pca:In3Neg',... 189 | 'Input 3 must be >= 0.') 190 | end 191 | 192 | if(l < k) 193 | error('MATLAB:pca:In4ltIn2',... 194 | 'Input 4 must be >= Input 2.') 195 | end 196 | 197 | % 198 | % SVD A directly if (its+1)*l >= m/1.25 or (its+1)*l >= n/1.25. 199 | % 200 | if(((its+1)*l >= m/1.25) || ((its+1)*l >= n/1.25)) 201 | 202 | if(~issparse(A)) 203 | try 204 | [U,S,V] = svd(A,'econ'); 205 | error(lastwarn); 206 | catch 207 | [Q,R]=qr(A,0); 208 | [V,S,U]=svd(R','econ'); 209 | U=Q*U; 210 | end 211 | end 212 | 213 | if(issparse(A)) 214 | [U,S,V] = svd(full(A),'econ'); 215 | end 216 | % 217 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 218 | % and the uppermost leftmost k x k block of S. 219 | % 220 | U = U(:,1:k); 221 | V = V(:,1:k); 222 | S = S(1:k,1:k); 223 | 224 | return 225 | 226 | end 227 | 228 | 229 | if(m >= n) 230 | 231 | % 232 | % Apply A to a random matrix, obtaining H. 233 | % 234 | rand('seed',rand('seed')); 235 | 236 | if(isreal(A)) 237 | H = A*(2*rand(n,l)-ones(n,l)); 238 | end 239 | 240 | if(~isreal(A)) 241 | H = A*( (2*rand(n,l)-ones(n,l)) + i*(2*rand(n,l)-ones(n,l)) ); 242 | end 243 | 244 | rand('twister',rand('twister')); 245 | 246 | % 247 | % Initialize F to its final size and fill its leftmost block with H. 248 | % 249 | F = zeros(m,(its+1)*l); 250 | F(1:m, 1:l) = H; 251 | 252 | % 253 | % Apply A*A' to H a total of its times, 254 | % augmenting F with the new H each time. 255 | % 256 | for it = 1:its 257 | H = (H'*A)'; 258 | H = A*H; 259 | F(1:m, (1+it*l):((it+1)*l)) = H; 260 | end 261 | 262 | clear H; 263 | 264 | % 265 | % Form a matrix Q whose columns constitute an orthonormal basis 266 | % for the columns of F. 267 | % 268 | [Q,R,E] = qr(F,0); 269 | 270 | clear F R E; 271 | 272 | % 273 | % SVD Q'*A to obtain approximations to the singular values 274 | % and right singular vectors of A; adjust the left singular vectors 275 | % of Q'*A to approximate the left singular vectors of A. 276 | % 277 | [U2,S,V] = svd(Q'*A,'econ'); 278 | U = Q*U2; 279 | 280 | clear Q U2; 281 | 282 | % 283 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 284 | % and the uppermost leftmost k x k block of S. 285 | % 286 | U = U(:,1:k); 287 | V = V(:,1:k); 288 | S = S(1:k,1:k); 289 | 290 | end 291 | 292 | 293 | if(m < n) 294 | 295 | % 296 | % Apply A' to a random matrix, obtaining H. 297 | % 298 | rand('seed',rand('seed')); 299 | 300 | if(isreal(A)) 301 | H = ((2*rand(l,m)-ones(l,m))*A)'; 302 | end 303 | 304 | if(~isreal(A)) 305 | H = (( (2*rand(l,m)-ones(l,m)) + i*(2*rand(l,m)-ones(l,m)) )*A)'; 306 | end 307 | 308 | rand('twister',rand('twister')); 309 | 310 | % 311 | % Initialize F to its final size and fill its leftmost block with H. 312 | % 313 | F = zeros(n,(its+1)*l); 314 | F(1:n, 1:l) = H; 315 | 316 | % 317 | % Apply A'*A to H a total of its times, 318 | % augmenting F with the new H each time. 319 | % 320 | for it = 1:its 321 | H = A*H; 322 | H = (H'*A)'; 323 | F(1:n, (1+it*l):((it+1)*l)) = H; 324 | end 325 | 326 | clear H; 327 | 328 | % 329 | % Form a matrix Q whose columns constitute an orthonormal basis 330 | % for the columns of F. 331 | % 332 | [Q,R,E] = qr(F,0); 333 | 334 | clear F R E; 335 | 336 | % 337 | % SVD A*Q to obtain approximations to the singular values 338 | % and left singular vectors of A; adjust the right singular vectors 339 | % of A*Q to approximate the right singular vectors of A. 340 | % 341 | [U,S,V2] = svd(A*Q,'econ'); 342 | V = Q*V2; 343 | 344 | clear Q V2; 345 | 346 | % 347 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 348 | % and the uppermost leftmost k x k block of S. 349 | % 350 | U = U(:,1:k); 351 | V = V(:,1:k); 352 | S = S(1:k,1:k); 353 | 354 | end 355 | -------------------------------------------------------------------------------- /pcaspec.m: -------------------------------------------------------------------------------- 1 | % pcaspec - computes k largest singular values 2 | % 3 | % Syntax 4 | % S = pcaspec(A,k,its,l) 5 | % 6 | % See also 7 | % pca 8 | 9 | function S = pcaspec(A,k,its,l) 10 | %PCA Low-rank approximation in SVD form. 11 | % 12 | % 13 | % [U,S,V] = PCA(A) constructs a nearly optimal rank-6 approximation 14 | % USV' to A, using 2 full iterations of a block Lanczos method 15 | % of block size 6+2=8, started with an n x 8 random matrix, 16 | % when A is m x n; the ref. below explains "nearly optimal." 17 | % The smallest dimension of A must be >= 6 when A is 18 | % the only input to PCA. 19 | % 20 | % [U,S,V] = PCA(A,k) constructs a nearly optimal rank-k approximation 21 | % USV' to A, using 2 full iterations of a block Lanczos method 22 | % of block size k+2, started with an n x (k+2) random matrix, 23 | % when A is m x n; the ref. below explains "nearly optimal." 24 | % k must be a positive integer <= the smallest dimension of A. 25 | % 26 | % [U,S,V] = PCA(A,k,its) constructs a nearly optimal rank-k approx. USV' 27 | % to A, using its full iterations of a block Lanczos method 28 | % of block size k+2, started with an n x (k+2) random matrix, 29 | % when A is m x n; the ref. below explains "nearly optimal." 30 | % k must be a positive integer <= the smallest dimension of A, 31 | % and its must be a nonnegative integer. 32 | % 33 | % [U,S,V] = PCA(A,k,its,l) constructs a nearly optimal rank-k approx. 34 | % USV' to A, using its full iterates of a block Lanczos method 35 | % of block size l, started with an n x l random matrix, 36 | % when A is m x n; the ref. below explains "nearly optimal." 37 | % k must be a positive integer <= the smallest dimension of A, 38 | % its must be a nonnegative integer, 39 | % and l must be a positive integer >= k. 40 | % 41 | % 42 | % The low-rank approximation USV' is in the form of an SVD in the sense 43 | % that the columns of U are orthonormal, as are the columns of V, 44 | % the entries of S are all nonnegative, and the only nonzero entries 45 | % of S appear in non-increasing order on its diagonal. 46 | % U is m x k, V is n x k, and S is k x k, when A is m x n. 47 | % 48 | % Increasing its or l improves the accuracy of the approximation USV' 49 | % to A; the ref. below describes how the accuracy depends on its and l. 50 | % 51 | % 52 | % Note: PCA invokes RAND. To obtain repeatable results, 53 | % invoke RAND('seed',j) with a fixed integer j before invoking PCA. 54 | % 55 | % Note: PCA currently requires the user to center and normalize the rows 56 | % or columns of the input matrix A before invoking PCA (if such 57 | % is desired). 58 | % 59 | % Note: The user may ascertain the accuracy of the approximation USV' 60 | % to A by invoking DIFFSNORM(A,U,S,V). 61 | % 62 | % 63 | % inputs (the first is required): 64 | % A -- matrix being approximated 65 | % k -- rank of the approximation being constructed; 66 | % k must be a positive integer <= the smallest dimension of A, 67 | % and defaults to 6 68 | % its -- number of full iterations of a block Lanczos method to conduct; 69 | % its must be a nonnegative integer, and defaults to 2 70 | % l -- block size of the block Lanczos iterations; 71 | % l must be a positive integer >= k, and defaults to k+2 72 | % 73 | % outputs (all three are required): 74 | % U -- m x k matrix in the rank-k approximation USV' to A, 75 | % where A is m x n; the columns of U are orthonormal 76 | % S -- k x k matrix in the rank-k approximation USV' to A, 77 | % where A is m x n; the entries of S are all nonnegative, 78 | % and its only nonzero entries appear in nonincreasing order 79 | % on the diagonal 80 | % V -- n x k matrix in the rank-k approximation USV' to A, 81 | % where A is m x n; the columns of V are orthonormal 82 | % 83 | % 84 | % Example: 85 | % A = rand(1000,2)*rand(2,1000); 86 | % A = A/normest(A); 87 | % [U,S,V] = pca(A,2,0); 88 | % diffsnorm(A,U,S,V) 89 | % 90 | % This code snippet produces a rank-2 approximation USV' to A such that 91 | % the columns of U are orthonormal, as are the columns of V, and 92 | % the entries of S are all nonnegative and are zero off the diagonal. 93 | % diffsnorm(A,U,S,V) outputs an estimate of the spectral norm 94 | % of A-USV', which should be close to the machine precision. 95 | % 96 | % 97 | % Reference: 98 | % Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, 99 | % Finding structure with randomness: Stochastic algorithms 100 | % for constructing approximate matrix decompositions, 101 | % arXiv:0909.4061 [math.NA; math.PR], 2009 102 | % (available at http://arxiv.org). 103 | % 104 | % 105 | % See also PCACOV, PRINCOMP, SVDS. 106 | % 107 | 108 | % Copyright 2009 Mark Tygert. 109 | 110 | % 111 | % Check the number of inputs. 112 | % 113 | if(nargin < 1) 114 | error('MATLAB:pca:TooFewIn',... 115 | 'There must be at least 1 input.') 116 | end 117 | 118 | if(nargin > 4) 119 | error('MATLAB:pca:TooManyIn',... 120 | 'There must be at most 4 inputs.') 121 | end 122 | 123 | % 124 | % Check the number of outputs. 125 | % 126 | if(nargout ~= 1) 127 | error('MATLAB:pca:WrongNumOut',... 128 | 'There must be exactly one output.') 129 | end 130 | 131 | % 132 | % Set the inputs k, its, and l to default values, if necessary. 133 | % 134 | if(nargin == 1) 135 | k = 6; 136 | its = 2; 137 | l = k+2; 138 | end 139 | 140 | if(nargin == 2) 141 | its = 2; 142 | l = k+2; 143 | end 144 | 145 | if(nargin == 3) 146 | l = k+2; 147 | end 148 | 149 | % 150 | % Check the first input argument. 151 | % 152 | if(~isfloat(A)) 153 | error('MATLAB:pca:In1NotFloat',... 154 | 'Input 1 must be a floating-point matrix.') 155 | end 156 | 157 | if(isempty(A)) 158 | error('MATLAB:pca:In1Empty',... 159 | 'Input 1 must not be empty.') 160 | end 161 | 162 | % 163 | % Retrieve the dimensions of A. 164 | % 165 | [m n] = size(A); 166 | 167 | % 168 | % Check the remaining input arguments. 169 | % 170 | if(size(k,1) ~= 1 || size(k,2) ~= 1) 171 | error('MATLAB:pca:In2Not1x1',... 172 | 'Input 2 must be a scalar.') 173 | end 174 | 175 | if(size(its,1) ~= 1 || size(its,2) ~= 1) 176 | error('MATLAB:pca:In3Not1x1',... 177 | 'Input 3 must be a scalar.') 178 | end 179 | 180 | if(size(l,1) ~= 1 || size(l,2) ~= 1) 181 | error('MATLAB:pca:In4Not1x1',... 182 | 'Input 4 must be a scalar.') 183 | end 184 | 185 | if(k <= 0) 186 | error('MATLAB:pca:In2NonPos',... 187 | 'Input 2 must be > 0.') 188 | end 189 | 190 | if((k > m) || (k > n)) 191 | error('MATLAB:pca:In2TooBig',... 192 | 'Input 2 must be <= the smallest dimension of Input 1.') 193 | end 194 | 195 | if(its < 0) 196 | error('MATLAB:pca:In3Neg',... 197 | 'Input 3 must be >= 0.') 198 | end 199 | 200 | if(l < k) 201 | error('MATLAB:pca:In4ltIn2',... 202 | 'Input 4 must be >= Input 2.') 203 | end 204 | 205 | % 206 | % SVD A directly if (its+1)*l >= m/1.25 or (its+1)*l >= n/1.25. 207 | % 208 | if(((its+1)*l >= m/1.25) || ((its+1)*l >= n/1.25)) 209 | 210 | if(~issparse(A)) 211 | S = svd(A); 212 | end 213 | 214 | if(issparse(A)) 215 | S = svd(full(A)); 216 | end 217 | % 218 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 219 | % and the uppermost leftmost k x k block of S. 220 | % 221 | S = S(1:k); 222 | 223 | return 224 | 225 | end 226 | 227 | 228 | if(m >= n) 229 | 230 | % 231 | % Apply A to a random matrix, obtaining H. 232 | % 233 | rand('seed',rand('seed')); 234 | 235 | if(isreal(A)) 236 | H = A*(2*rand(n,l)-ones(n,l)); 237 | end 238 | 239 | if(~isreal(A)) 240 | H = A*( (2*rand(n,l)-ones(n,l)) + i*(2*rand(n,l)-ones(n,l)) ); 241 | end 242 | 243 | rand('twister',rand('twister')); 244 | 245 | % 246 | % Initialize F to its final size and fill its leftmost block with H. 247 | % 248 | F = zeros(m,(its+1)*l); 249 | F(1:m, 1:l) = H; 250 | 251 | % 252 | % Apply A*A' to H a total of its times, 253 | % augmenting F with the new H each time. 254 | % 255 | for it = 1:its 256 | H = (H'*A)'; 257 | H = A*H; 258 | F(1:m, (1+it*l):((it+1)*l)) = H; 259 | end 260 | 261 | clear H; 262 | 263 | % 264 | % Form a matrix Q whose columns constitute an orthonormal basis 265 | % for the columns of F. 266 | % 267 | [Q,R,E] = qr(F,0); 268 | 269 | clear F R E; 270 | 271 | % 272 | % SVD Q'*A to obtain approximations to the singular values 273 | % and right singular vectors of A; adjust the left singular vectors 274 | % of Q'*A to approximate the left singular vectors of A. 275 | % 276 | S = svd(Q'*A); 277 | 278 | clear Q U2; 279 | 280 | % 281 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 282 | % and the uppermost leftmost k x k block of S. 283 | % 284 | S = S(1:k); 285 | 286 | end 287 | 288 | 289 | if(m < n) 290 | 291 | % 292 | % Apply A' to a random matrix, obtaining H. 293 | % 294 | rand('seed',rand('seed')); 295 | 296 | if(isreal(A)) 297 | H = ((2*rand(l,m)-ones(l,m))*A)'; 298 | end 299 | 300 | if(~isreal(A)) 301 | H = (( (2*rand(l,m)-ones(l,m)) + i*(2*rand(l,m)-ones(l,m)) )*A)'; 302 | end 303 | 304 | rand('twister',rand('twister')); 305 | 306 | % 307 | % Initialize F to its final size and fill its leftmost block with H. 308 | % 309 | F = zeros(n,(its+1)*l); 310 | F(1:n, 1:l) = H; 311 | 312 | % 313 | % Apply A'*A to H a total of its times, 314 | % augmenting F with the new H each time. 315 | % 316 | for it = 1:its 317 | H = A*H; 318 | H = (H'*A)'; 319 | F(1:n, (1+it*l):((it+1)*l)) = H; 320 | end 321 | 322 | clear H; 323 | 324 | % 325 | % Form a matrix Q whose columns constitute an orthonormal basis 326 | % for the columns of F. 327 | % 328 | [Q,R,E] = qr(F,0); 329 | 330 | clear F R E; 331 | 332 | % 333 | % SVD A*Q to obtain approximations to the singular values 334 | % and left singular vectors of A; adjust the right singular vectors 335 | % of A*Q to approximate the right singular vectors of A. 336 | % 337 | S = svd(A*Q); 338 | 339 | clear Q V2; 340 | 341 | % 342 | % Retain only the leftmost k columns of U, the leftmost k columns of V, 343 | % and the uppermost leftmost k x k block of S. 344 | % 345 | S = S(1:k); 346 | 347 | end 348 | -------------------------------------------------------------------------------- /plot_compare_denoising.m: -------------------------------------------------------------------------------- 1 | % plot_compare_denoising - plots the result of exp_denoising 2 | % 3 | % See also 4 | % exp_denoising, plot_nips2013 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | %sz=input('size='); 27 | %sz=[60 60 30]; 28 | %sigmas=input('sigma='); % [0.01, 0.1]; 29 | %lambda=input('lambda='); 30 | %bHold = input('bHold='); 31 | 32 | nn=prod(sz); 33 | 34 | K=length(sz); 35 | 36 | for sigma=sigmas 37 | S=ls(sprintf('result_compare_full_%d_%d_%d_*sigma=%g.mat',sz(1),sz(2),sz(3),sigma)); 38 | 39 | files=split(S,char(10)); 40 | 41 | % Find best lambda 42 | % $$$ ix=zeros(length(files),2); 43 | % $$$ for ii=1:length(files) 44 | % $$$ S=load(files{ii}); 45 | % $$$ [mm,ix(ii,:)]=min(shiftdim(mean(S.err))); 46 | % $$$ end 47 | % $$$ ix=floor(median(ix)); 48 | S=load(files{1}); 49 | ix=zeros(size(lambda)); 50 | leg=cell(size(lambda)); 51 | for ii=1:prod(size(lambda)) 52 | [mm,ix(ii)]=min(abs(S.lambda-lambda(ii))); 53 | leg{ii}=sprintf('size=%s \\lambda=%.2f', printvec(sz), S.lambda(ix(ii))); 54 | end 55 | if ~bHold 56 | leg1=leg; 57 | end 58 | 59 | fprintf('sigma=%g lambda=%s (%s)\n', sigma, printvec(S.lambda(ix)), printvec(ix)); 60 | 61 | clear X1 X2 Y1 Y2 62 | Y1=zeros(S.nrep, length(lambda), length(files)); 63 | Y2=zeros(S.nrep, length(lambda), length(files)); 64 | for ii=1:length(files) 65 | S=load(files{ii}); 66 | X1(ii)=mean(sqrt(1./sz))^2*mean(sqrt(S.dtrue))^2; 67 | X2(:,ii)=min(sum(S.rank_mix,2),min(S.dtrue))/min(sz); 68 | Y1(:,:,ii)=S.err(:,ix(1,:),1).^2/nn; 69 | Y2(:,:,ii)=S.err(:,ix(2,:),2).^2/nn; 70 | end 71 | 72 | F=(1+sqrt(min(sz)*max(sz)/nn)+2/sigma*sqrt(min(sz)/nn))^2; 73 | 74 | 75 | if ~bHold 76 | figure; 77 | end 78 | 79 | % subplot(1,2,find(sigmas==sigma)); 80 | % errorxy([mean(X)',mean(Y)',std(X)',std(Y)'],'ColXe',3,'ColYe',4,'WidthEB',2,'Marker','x','MarkSize',10) 81 | 82 | mY1=shiftdim(mean(Y1))'; 83 | p=polyfit(X1', mY1(:,2), 1); 84 | if bHold 85 | axes(ax(1)); 86 | hold on; 87 | errorxym(X1', mY1,[],'Color','r','MarkerSize',10,'LineWidth',2); 88 | plot([0 1], polyval(p,[0 1]), '--', 'color', [.5 .5 .5], 'linewidth',2); 89 | h=get(gca,'children'); 90 | h=h(strcmp(get(get(gca,'children'),'linestyle'),'none')); 91 | legend(flipud(h),[leg1(1,:),leg(1,:)]); 92 | else 93 | subplot(1,3,1); 94 | errorxym(X1', mY1,[],'MarkerSize',10, 'LineWidth',2); 95 | hold on; 96 | plot([0 1], polyval(p,[0 1]), '--', 'color', [.5 .5 .5], 'linewidth',2); 97 | ylim([0 0.015]); 98 | grid on; 99 | set(gca,'fontsize',12); 100 | xlabel('Tucker rank complexity'); 101 | ylabel('Mean squared error (overlap)') 102 | title('Overlapped approach'); 103 | legend(leg{1,:}); 104 | pos=get(gca,'position'); 105 | set(gca,'position',[pos(1) 0.15, pos(3), 0.7]) 106 | ax(1)=gca; 107 | end 108 | 109 | mY2=shiftdim(mean(Y2))'; 110 | p=polyfit(mean(X2)', mY2(:,2), 1); 111 | if bHold 112 | axes(ax(2)); 113 | hold on; 114 | errorxym(mean(X2)',mY2,[], 'Color','r','MarkerSize',10,'LineWidth',2); 115 | plot([0 1], polyval(p,[0 1]), '--', 'color', [.5 .5 .5], 'linewidth',2); 116 | h=get(gca,'children'); 117 | h=h(strcmp(get(get(gca,'children'),'linestyle'),'none')); 118 | legend(flipud(h), [leg1(2,:),leg(2,:)]); 119 | else 120 | subplot(1,3,2); 121 | errorxym(mean(X2)', mY2,[],'MarkerSize', 10,'LineWidth',2); 122 | hold on; 123 | plot([0 1], polyval(p,[0 1]), '--', 'color', [.5 .5 .5], 'linewidth',2); 124 | ylim([0 0.015]); 125 | grid on; 126 | set(gca,'fontsize',12); 127 | xlabel('Latent rank complexity'); 128 | ylabel('Mean squared error (latent)') 129 | title('Latent approach'); 130 | legend(leg{2,:}); 131 | pos=get(gca,'position'); 132 | set(gca,'position',[pos(1) 0.15, pos(3), 0.7]) 133 | ax(2)=gca; 134 | end 135 | 136 | if bHold 137 | axes(ax(3)); 138 | hold on; 139 | errorxym(X1./mean(X2), shiftdim(mean(Y1)./mean(Y2))', [], 'Color','r' ,'linewidth',2, 'MarkerSize',10); 140 | else 141 | subplot(1,3,3); 142 | errorxym(X1./mean(X2), shiftdim(mean(Y1)./mean(Y2))',[] ,'linewidth',2, 'MarkerSize',10); 143 | xlim([0 max(xlim)]); 144 | ylim([0 max(ylim)]); 145 | set(gca,'fontsize',12); 146 | grid on; 147 | xlabel('TR complexity/LR complexity'); 148 | ylabel('MSE (overlap) / MSE (latent)'); 149 | title('Comparison'); 150 | pos=get(gca,'position'); 151 | set(gca,'position',[pos(1) 0.15, pos(3), 0.7]) 152 | ax(3)=gca; 153 | end 154 | % $$$ figure; 155 | % $$$ hold on; 156 | % $$$ for ii=1:length(files) 157 | % $$$ S=load(files{ii}); 158 | % $$$ plot(mean(S.err(:,:,1)), mean(S.err(:,:,2)), '-x', 'linewidth',2); 159 | % $$$ end 160 | end 161 | 162 | set(gcf,'position',[-368, 31, 1640, 628]); 163 | -------------------------------------------------------------------------------- /plot_completion_l1.m: -------------------------------------------------------------------------------- 1 | % plot_completion_l1 - undocumented 2 | % 3 | % See also 4 | % exp_completion_l1 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | 27 | 28 | f = {[], @(x)23*x.^(1/4), @(x)10*x.^(1/2)}; 29 | 30 | for ii=1:3 31 | figure, imagesc(trfrac,log10(lambda(:,ii)), squeeze(mean(gen(:,:,ii,:)))); 32 | set(gca,'fontsize',12,'clim',[0 1]); 33 | xlabel('Fraction of observed elements'); 34 | ylabel('log(Regularization constant)'); 35 | colorbar; 36 | hold on; 37 | if ~isempty(f{ii}) 38 | plot(trfrac, log10(f{ii}(trfrac)), 'm--', 'linewidth', 2); 39 | end 40 | set(gcf,'paperpositionmode','auto','papersize',[20 20]); 41 | end 42 | 43 | 44 | 45 | 46 | % Compute ROC curve 47 | ixm = 10; % 50% observed 48 | ntr = round(prod(sz)*trfrac(ixm)); 49 | ncor = round(ntr*noisefr); 50 | figure 51 | for ii=1:3 52 | [mm,ilmd]=min(gen(1,:,ii,ixm)) 53 | err0 = memo(1,ilmd,ii,ixm).err0; 54 | err = memo(1,ilmd,ii,ixm).err; 55 | [ss,ix]=sort(-abs(err)); 56 | 57 | tp=cumsum((abs(err0(ix))>0)/ncor); 58 | fp=cumsum((abs(err0(ix))==0)/(ntr-ncor)); 59 | 60 | auc(ii)=diff(fp)'*tp(2:end); 61 | 62 | subplot(1,3,ii); 63 | plot(fp, tp, '-x', 'linewidth',2); 64 | set(gca,'fontsize',16); 65 | grid on; 66 | xlabel('False positive rate'); 67 | ylabel('True positive rate'); 68 | ylim([0.9 1]); 69 | end 70 | 71 | -------------------------------------------------------------------------------- /plot_denoising.m: -------------------------------------------------------------------------------- 1 | % plot_denoising - plot MSE against normalized rank 2 | % 3 | % Syntax 4 | % [dim1, dim2, dim3,err, lambda]=plot_denoising(files, lmd, style) 5 | % 6 | % See also 7 | % exp_denoising, plot_nips2011, plot_nips2011_final 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | 29 | % 30 | % 31 | function [dim1, dim2, dim3,err, lambda]=plot_denoising(files, lmd, style) 32 | 33 | if ~exist('style','var') 34 | style='x'; 35 | end 36 | 37 | 38 | ns=length(files); 39 | 40 | frac=zeros(ns,1); 41 | 42 | err=zeros(ns,1); 43 | nn =zeros(ns,1); 44 | for ii=1:ns 45 | S=load(files{ii}); 46 | sz(ii,:)=S.sz; 47 | rs(ii,:)=S.dtrue; 48 | 49 | if exist('lmd','var') 50 | [mm,ix]=min((log(S.lambda)-log(lmd)).^2); 51 | err(ii)=mean(S.err(:,ix).^2)/prod(S.sz); 52 | lambda(ii)=S.lambda(ix); 53 | else 54 | [err(ii), ix]=min(mean(S.err.^2)/prod(S.sz)); 55 | lambda(ii)=S.lambda(ix); 56 | end 57 | end 58 | 59 | fprintf('rangeof(lambda)=%s\n',printvec(rangeof(lambda))); 60 | 61 | nn=prod(sz,2); 62 | 63 | if size(rs,2)>1 64 | rst = [min(rs(:,1), rs(:,2).*rs(:,3)),... 65 | min(rs(:,2), rs(:,1).*rs(:,3)),... 66 | min(rs(:,3), rs(:,1).*rs(:,2))]; 67 | else 68 | rst=[rs,rs]; 69 | end 70 | 71 | 72 | % dim=rs*[50 50 20]'+rs(:,1).*rs(:,2).*rs(:,3) -sum(rs.^2,2); 73 | %dim=max(rst,[],2);%sum(rst,2); 74 | % dim=sum(rst,2); 75 | % dim=sum(sqrt(rst./(ones(ns,1)*sz)),2).^2/(size(sz,2)^2*size(rst,2)^2); 76 | dim1=(mean(sqrt(1./sz),2).*mean(sqrt(rst),2)).^2; 77 | dim2=sum(sqrt(sz)+sqrt((nn*ones(1,3))./sz),2); 78 | dim3=sum(sqrt(rst)/size(rst,2),2).^2; 79 | %dim=nm'; 80 | % figure; 81 | plot(dim1,err,style,'linewidth',2,'markersize',10); 82 | 83 | if 0 % size(rs,2)>1 84 | for ii=1:ns 85 | text(dim(ii),frac(ii),... 86 | sprintf('[%d %d %d]',rs(ii,1),rs(ii,2),rs(ii,3))); 87 | end 88 | end 89 | 90 | ix=find(~isnan(err)); 91 | p=polyfit(dim1(ix),err(ix),1) 92 | hold on; 93 | plot(xlim,polyval(p,xlim),'--','color',[.5 .5 .5],'linewidth', 2) 94 | h=get(gca,'children'); 95 | set(gca,'children',h([2:end,1])); 96 | 97 | set(gca,'fontsize',16) 98 | xlabel('Normalized rank') 99 | ylabel('Mean squared error') 100 | 101 | grid on; 102 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /plot_nips2011.m: -------------------------------------------------------------------------------- 1 | % plot_nips2011 - plots figures for nips 2011 paper 2 | % 3 | % See also 4 | % plot_denoising, plot_threshold_vs_normalized_rank 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | if input('Plot noisy tensor decomposition?')==1 27 | tmps = {'result_full_sigma=0.01_50_50_20_*.mat',... 28 | 'result_full_sigma=0.1_50_50_20_*.mat',... 29 | 'result_full_sigma=0.01_100_100_50_*.mat',... 30 | 'result_full_sigma=0.1_100_100_50_*.mat'}; 31 | 32 | 33 | for ii=1:length(tmps) 34 | S=ls(tmps{ii},'-1'); 35 | files=split(S,char(10))' 36 | 37 | lmd=input('lambda='); 38 | figure; 39 | [dim1,dim2,dim3,err,lambda]=plot_denoising(files,lmd(1),'bx'); 40 | [dim1,dim2,dim3,err,lambda]=plot_denoising(files,lmd(2),'^g'); 41 | [dim1,dim2,dim3,err,lambda]=plot_denoising(files,lmd(3),'ro'); 42 | set(gca,'position',[0.13 0.15 0.775 0.8]); 43 | h=get(gca,'children'); 44 | set(h(2),'color',[0 .5 0]); 45 | legend(h([3,2,1]),sprintf('\\lambda_M=%g/N',3*lmd(1)),... 46 | sprintf('\\lambda_M=%g/N',3*lmd(2)),... 47 | sprintf('\\lambda_M=%g/N',3*lmd(3)),... 48 | 'Location','NorthWest'); 49 | 50 | 51 | clear files, lmd; 52 | end 53 | keyboard; 54 | 55 | end 56 | 57 | 58 | if input('Plot tensor completion?')==1 59 | figure 60 | S=ls('result_50_50_20_*.mat','-1'); 61 | files=split(S,char(10)); 62 | [rs,frac,dim]=plot_threshold_vs_normalized_rank(files); 63 | h=get(gca,'children') 64 | h1=h(1); 65 | set(h1, 'color','r','marker','o','markersize',10); 66 | 67 | S=ls('result_100_100_50_*.mat','-1'); 68 | files=split(S,char(10)); 69 | [rs2,frac2,dim2]=plot_threshold_vs_normalized_rank(files); 70 | 71 | 72 | set(gca,'fontsize',16); 73 | grid on; 74 | 75 | 76 | h=get(gca,'children'); 77 | % ix=find(cell2mat(foreach(@(x)isequal(x,'line'),get(h,'type')))); 78 | % h=h([1:16,18:28,30,31,17,29]); 79 | % set(gca,'children',h); 80 | % set(h(1),'marker','x','markersize',10); 81 | % set(h(2),'marker','o','color','r','markersize',10); 82 | set(h(1),'markersize',10); 83 | 84 | legend(h([2,1]),'size=[50 50 20]', 'size=[100 100 50]'); 85 | set(gca,'position',[0.13 0.15 0.775 0.78]); 86 | 87 | xlabel('Normalized rank ||n^{-1}||_{1/2}||r||_{1/2}'); 88 | ylabel('Fraction at err<=0.01') 89 | 90 | end 91 | 92 | %%%% matrix 93 | figure 94 | 95 | markers={'o','x','^','v'}; 96 | col=get(gca,'colororder'); 97 | 98 | 99 | pats = {'result_matrix_50_20_*.mat',... 100 | 'result_matrix_100_40_*.mat',... 101 | 'result_matrix_250_200*.mat'}; 102 | 103 | leg=repmat({[]},1,length(pats)); 104 | hh=zeros(1,length(pats)); 105 | 106 | for kk=1:length(pats) 107 | S=ls(pats{kk},'-1'); 108 | files=split(S,char(10)); 109 | [rs,frac,dim]=plot_threshold_vs_normalized_rank(files); 110 | h=get(gca,'children') 111 | hh(kk)=h(1); 112 | 113 | set(hh(kk), 'color',col(kk,:),'marker',markers{kk},'markersize',10); 114 | 115 | load(files{1},'sz'); 116 | leg{kk}=sprintf('size=[%d %d]',sz(1),sz(2)); 117 | 118 | end 119 | 120 | legend(hh, leg); 121 | set(gca,'fontsize',16,... 122 | 'position',[0.13 0.15 0.775 0.78]); 123 | grid on; 124 | ylim([0 1]); 125 | xlabel('Normalized rank ||n^{-1}||_{1/2}||r||_{1/2}'); 126 | ylabel('Fraction at err<=0.01') 127 | -------------------------------------------------------------------------------- /plot_nips2011_final.m: -------------------------------------------------------------------------------- 1 | % plot_nips2011_final - undocumented 2 | % 3 | % See also 4 | % plot_nips2011, plot_denoising 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | S1=load('result_compare5_50_50_20_7_8_9.mat'); 27 | figure, h=errorbar(S1.trfrac'*[1 1], shiftdim(mean(S1.err(:,:,[4,7]))), ... 28 | shiftdim(std(S1.err(:,:,[4,7])))); 29 | set(h,'linewidth',2); 30 | set(h(2), 'linestyle','-.'); 31 | set(gca,'yscale','log', 'fontsize', 16, 'ytick', [1e-3 1]); 32 | 33 | hold on; 34 | plot(xlim, 1e-3*[1 1], '--', 'color', [.5 .5 .5],'linewidth',2); 35 | 36 | ylim([1e-4 100]); 37 | grid on; 38 | 39 | legend('Convex', 'Tucker (exact)','Optimization tolerance','location','NorthEastOutside'); 40 | 41 | xlabel('Fraction of observed elements') 42 | ylabel('Estimation error') 43 | 44 | set(gcf,'PaperSize',[20 20]); 45 | 46 | keyboard; 47 | 48 | % $$$ patterns = {'result_full_sigma=0.01_50_50_20_*.mat',... 49 | % $$$ 'result_full_sigma=0.1_50_50_20_*.mat';... 50 | % $$$ 'result_full_sigma=0.01_100_100_50_*.mat',... 51 | % $$$ 'result_full_sigma=0.1_100_100_50_*.mat'}; 52 | % $$$ 53 | 54 | 55 | % Low noise 56 | patterns{1} = {'result_full_sigma=0.01_50_50_20_*.mat',... 57 | 'result_full_sigma=0.01_50_50_20_*.mat',... 58 | 'result_full_sigma=0.01_50_50_20_*.mat';... 59 | 'result_full_lmd=0.02_0.367_sigma=0.01_100_100_50_*.mat',... 60 | 'result_full_lmd=0.02_0.367_sigma=0.01_100_100_50_*.mat',... 61 | 'result_full_lmd=0.02_0.367_sigma=0.01_100_100_50_*.mat'}; 62 | 63 | 64 | 65 | 66 | lambda{1} = [0.01, 0.11 0.18; 0.02, 0.23 0.37] 67 | 68 | 69 | % High noise 70 | patterns{2} = {'result_full_sigma=0.1_50_50_20_*.mat',... 71 | 'result_full_sigma=0.1_50_50_20_*.mat',... 72 | 'result_full_sigma=0.1_50_50_20_*.mat';... 73 | 'result_full_lmd=0.22_4_sigma=0.1_100_100_50_*.mat',... 74 | 'result_full_lmd=0.22_4_sigma=0.1_100_100_50_*.mat',... 75 | 'result_full_lmd=0.22_4_sigma=0.1_100_100_50_*.mat'} 76 | 77 | 78 | 79 | lambda{2} = [0.11 0.78 2; 0.22 1.5 4] 80 | 81 | 82 | for kk=1:length(lambda) 83 | lmd = lambda{kk}; 84 | pats = patterns{kk}; 85 | figure; 86 | color = {'b' 'r'}; 87 | marker = {'x', '^', 'o','+', 'v', '*'}; 88 | 89 | for ii=1:size(lmd,1) 90 | for jj=1:size(lmd,2) 91 | [ret,S]=system(sprintf('ls -1 %s', pats{ii,jj})); 92 | files=split(S,char(10))'; 93 | 94 | style = [color{ii}, marker{jj+(ii-1)*3}]; 95 | [dim1,dim2,dim3,err,lambda_out(:,ii,jj)]=plot_denoising(files,lmd(ii,jj),style); 96 | end 97 | set(gca,'position',[0.13 0.15 0.775 0.8]); 98 | 99 | 100 | end 101 | 102 | leg=cell(3,2); 103 | for jj=1:3 104 | leg{jj,1}=sprintf('size=[50 50 20] \\lambda_M=%g/N',3*lmd(1,jj)); 105 | end 106 | for jj=1:3 107 | leg{jj,2}=sprintf('size=[100 100 50] \\lambda_M=%g/N',3*lmd(2,jj)); 108 | end 109 | 110 | h=get(gca,'children'); 111 | legend(h(6:-1:1),leg{:}); 112 | set(gca,'position',[0.1300 0.1000 0.7750 0.8000]); 113 | set(gcf,'position',[156 55 694 610]); 114 | shiftdim(median(lambda_out)) 115 | end 116 | 117 | 118 | -------------------------------------------------------------------------------- /plot_nips2013.m: -------------------------------------------------------------------------------- 1 | % plot_nips2013 - plots figures for nips 2013 paper 2 | % 3 | % See also 4 | % plot_overlap_vs_latent_2013, plot_compare_denoising 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | plot_overlap_vs_latent_2013 27 | ylim([0 70]) 28 | % printFigure(1,'overlap_vs_latent_2013.eps') 29 | 30 | sz=[50 50 20]; 31 | sigmas=0.1; 32 | lambda=[0.43 0.89 3.8; 0.89 3.8 11.3]; 33 | bHold=0; 34 | plot_compare_denoising; 35 | 36 | sz=[80 80 40]; 37 | sigmas=0.1; 38 | lambda=1.6*[0.43 0.89 3.8; 0.89 3.8 11.3]; 39 | bHold=1; 40 | plot_compare_denoising; 41 | -------------------------------------------------------------------------------- /plot_overlap_vs_latent_2013.m: -------------------------------------------------------------------------------- 1 | % plot_overlap_vs_latent_2013 - plots figure 1 2 | % 3 | % See also 4 | % exp_compare_full 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | 26 | rr=5:5:50; 27 | err_best=zeros(length(rr), 3); 28 | errbar_best=zeros(length(rr), 3); 29 | lmd_best=zeros(length(rr), 3); 30 | err_fix=zeros(length(rr),3); 31 | ix_lmd_fix=[7, 11,1]; 32 | 33 | for jj=1:length(rr) 34 | r=rr(jj); 35 | S=load(sprintf('result_compare_full_50_50_20_%d_%d_3_nrep=10_sigma=0.1_demo.mat',r,r)); 36 | 37 | errm=shiftdim(mean(S.err)); 38 | errs=shiftdim(std(S.err)); 39 | 40 | [mm,ix]=min(errm); 41 | 42 | err_best(jj,:)=mm; 43 | lmd_best(jj,:)=S.lambda(ix); 44 | 45 | errbar_best(jj,:)=diag(errs(ix,1:3))'; 46 | 47 | err_fix(jj,:)=diag(errm(ix_lmd_fix,1:3))'; 48 | 49 | errbar_fix(jj,:)=diag(errs(ix_lmd_fix,1:3))'; 50 | end 51 | 52 | S=load('result_compare_full_50_50_20_40_40_3_nrep=10_sigma=0.1_demo.mat') 53 | 54 | figure, 55 | h=errorbar(rr'*ones(1,2), err_fix(:,1:2), errbar_fix(:,1:2), 'linewidth', 2); 56 | hold on; plot(rr, err_best(:,1:2), '--','linewidth',2); 57 | grid on; 58 | set(gca,'fontsize',16); 59 | xlabel('Rank of the first two modes'); 60 | ylabel('Estimation error ||W-W*||_F'); 61 | title(sprintf('size=%s',printvec(S.sz))); 62 | legend('Overlapped Schatten 1-norm','Latent Schatten 1-norm'); 63 | ylim([6 30]); 64 | plot([40 40], ylim, '--', 'color', [.5 .5 .5], 'linewidth', 2); 65 | 66 | ax=axes('position',[0.2 0.6 0.3 0.25]); 67 | errm=shiftdim(mean(S.err(:,:,1:2))); 68 | h=errorbar(S.lambda'*[1 1], errm, ... 69 | shiftdim(std(S.err(:,:,1:2)))); 70 | set(h,'linewidth',2); 71 | set(gca,'xscale','log','fontsize',12); 72 | grid on; 73 | 74 | hold on; 75 | plot([1;1]*S.lambda(ix_lmd_fix(1:2)), [0, 0; ... 76 | diag(errm(ix_lmd_fix(1:2),1:2))'], 'm--',... 77 | 'linewidth',2); 78 | 79 | xlabel('Regularization constant \lambda'); 80 | ylabel('||W-W*||_F'); 81 | xlim(rangeof(S.lambda)); 82 | title(sprintf('rank=%s',printvec(S.dtrue))); 83 | set(gcf,'papersize',[20 20],... 84 | 'position', [32, 421, 832 470]); -------------------------------------------------------------------------------- /plot_tensorworkshop10.m: -------------------------------------------------------------------------------- 1 | % PLOT_TENSORWORKSHOP10 - Plots figure for TKML workshop 2010 2 | % 3 | % Example 4 | % load('result_compare5_new_50_50_20_7_8_9.mat') 5 | % plot_tensorworkshop10 6 | % 7 | % See also 8 | % exp_completion, tensorconst_adm, tensormix_adm 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | % load('result_compare5_50_50_20_7_8_9.mat') 30 | nm = size(err,3); 31 | 32 | if ~exist('tol','var') 33 | tol=1e-3; 34 | end 35 | 36 | figure, h=errorbar_logsafe(trfrac'*ones(1,nm), shiftdim(mean(err)), shiftdim(std(err))); 37 | 38 | set(gca,'fontsize',14,'yscale','log'); 39 | ylim([1e-5 1e+2]); 40 | 41 | set(h,'linewidth',2); 42 | set(h(1:3),'color',[0 0 1]); 43 | set(h(1),'linestyle','--'); 44 | set(h(2),'linestyle','-.'); 45 | 46 | col=get(gca,'colororder'); 47 | for ii=4:length(h), set(h(ii),'color', col(ii-2,:)); end 48 | 49 | hold on; 50 | plot(xlim, tol*[1 1], '--', 'color', [.5 .5 .5], 'linewidth',2); 51 | 52 | 53 | grid on; 54 | xlabel('Fraction of observed elements'); 55 | ylabel('Generalization error'); 56 | legend('As a Matrix (mode 1)',... 57 | 'As a Matrix (mode 2)', ... 58 | 'As a Matrix (mode 3)',... 59 | 'Constraint',... 60 | 'Mixture',... 61 | 'Tucker (large)',... 62 | 'Tucker (exact)',... 63 | 'Optimization tolerance',... 64 | 'Location','NorthEastOutside'); 65 | 66 | h=get(gca,'children'); 67 | set(gca,'children',h([2:end,1])); 68 | 69 | set(gcf,'PaperSize',[20 20]); 70 | -------------------------------------------------------------------------------- /plot_threshold_vs_normalized_rank.m: -------------------------------------------------------------------------------- 1 | % plot_threshold_vs_normalized_rank - plots phase transition threshold 2 | % for tensor completion against normalized rank 3 | % 4 | % Syntax 5 | % [rst, frac, dim]=plot_threshold_vs_normalized_rank(files,tol) 6 | % 7 | % See also 8 | % exp_completion, plot_nips2011 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function [rst, frac, dim]=plot_threshold_vs_normalized_rank(files,tol) 30 | 31 | if ~exist('tol','var') 32 | tol=0.01; 33 | end 34 | 35 | 36 | ns=length(files); 37 | 38 | frac=zeros(ns,1); 39 | 40 | for ii=1:ns 41 | S=load(files{ii}); 42 | sz=S.sz; 43 | rs(ii,:)=S.dtrue; 44 | ix=min(find(mean(S.err)1 68 | rst=zeros(size(rs)); 69 | for kk=1:size(rs,2) 70 | rst(:,kk)=min(rs(:,kk), prod(rs,2)./rs(:,kk)); 71 | end 72 | if size(rs,2)==3 73 | rst_check=[min(rs(:,1), rs(:,2).*rs(:,3)),... 74 | min(rs(:,2), rs(:,1).*rs(:,3)),... 75 | min(rs(:,3), rs(:,1).*rs(:,2))]; 76 | if ~isequal(rst,rst_check) 77 | error('rst check for 3rd order tensor failed'); 78 | end 79 | end 80 | else 81 | rst=[rs,rs]; 82 | end 83 | 84 | 85 | % dim=rs*[50 50 20]'+rs(:,1).*rs(:,2).*rs(:,3) -sum(rs.^2,2); 86 | %dim=max(rst,[],2);%sum(rst,2); 87 | % dim=sum(rst,2); 88 | % dim=sum(sqrt(rst./(ones(ns,1)*sz)),2).^2/(size(sz,2)^2*size(rst,2)^2); 89 | dim=(sum(sqrt(1./sz))*sum(sqrt(rst),2)).^2/(size(sz,2)^2*size(rst,2)^2); 90 | 91 | %dim=nm'; 92 | % figure; 93 | plot(dim,frac,'x','linewidth',2); 94 | 95 | if 0 % size(rs,2)>1 96 | for ii=1:ns 97 | text(dim(ii),frac(ii),... 98 | sprintf('[%d %d %d]',rs(ii,1),rs(ii,2),rs(ii,3))); 99 | end 100 | end 101 | ix=find(~isnan(frac)); 102 | p=polyfit(dim(ix),frac(ix),1) 103 | hold on; 104 | plot(xlim,polyval(p,xlim),'--','color',[.5 .5 .5],'linewidth', 2) 105 | h=get(gca,'children'); 106 | set(gca,'children',h([2:end,1])); 107 | 108 | 109 | 110 | 111 | %dim = prod(rs,2)+sum(rs.*(ones(ns,1)*sz),2)-sum(rs.^2,2); 112 | %x = dim.^(1/3); 113 | % $$$ dim(:,1) = max([rst(:,1)*(sz(1)+prod(sz(2:3))) - rst(:,1).^2,... 114 | % $$$ rst(:,2)*(sz(2)+prod(sz([1,3]))) - rst(:,2).^2,... 115 | % $$$ rst(:,3)*(sz(3)+prod(sz(1:2))) - rst(:,3).^2],[],2); 116 | % $$$ 117 | % $$$ dim(:,2) = rst(:,1)*sz(1)+rst(:,2)*sz(2)+rst(:,3)*sz(3); +rst(:,1).*rst(:,2).*rst(:,3)-rst(:,1).^2-rst(:,2).^2-rst(:,3).^2; 118 | % $$$ 119 | % $$$ 120 | % $$$ dim=min(dim,[],2); 121 | % $$$ 122 | % $$$ figure, plot(dim, frac,'x', 'linewidth',2) 123 | % $$$ for ii=1:ns 124 | % $$$ text(dim(ii),frac(ii),... 125 | % $$$ sprintf('[%d %d %d]',rs(ii,1),rs(ii,2),rs(ii,3))); 126 | % $$$ end 127 | -------------------------------------------------------------------------------- /printvec.m: -------------------------------------------------------------------------------- 1 | % printvec - prints a vector into a string 2 | % 3 | % Syntax 4 | % str=printvec(vv,ml) 5 | % 6 | % Copyright(c) 2010-2014 Ryota Tomioka 7 | % This software is distributed under the MIT license. See license.txt 8 | 9 | function str=printvec(vv,ml) 10 | 11 | if ~exist('ml','var') 12 | ml=10; 13 | end 14 | 15 | 16 | vv=vv(:); 17 | 18 | str = '['; 19 | 20 | for ii=1:min(length(vv),ml)-1 21 | str = [str, sprintf('%g ', vv(ii))]; 22 | end 23 | 24 | if length(vv)>ml 25 | str = [str, '...']; 26 | end 27 | 28 | str = [str, sprintf('%g]',vv(end))]; 29 | -------------------------------------------------------------------------------- /propertylist2struct.m: -------------------------------------------------------------------------------- 1 | function opt = propertylist2struct(varargin) 2 | % PROPERTYLIST2STRUCT - Make options structure from parameter/value list 3 | % 4 | % OPT = PROPERTYLIST2STRUCT('param1', VALUE1, 'param2', VALUE2, ...) 5 | % Generate a structure OPT with fields 'param1' set to value VALUE1, field 6 | % 'param2' set to value VALUE2, and so forth. 7 | % 8 | % OPT has an additional field 'isPropertyStruct' that is meant to identify 9 | % OPT is a structure containing options data. Only in the case of missing 10 | % input arguments, no such identification field is written, that is, 11 | % PROPERTYLIST2STRUCT() returns []. 12 | % 13 | % OPT2 = PROPERTYLIST2STRUCT(OPT1, 'param', VALUE, ...) takes the options 14 | % structure OPT1 and adds new fields 'param' with according VALUE. 15 | % 16 | % See also SET_DEFAULTS 17 | % 18 | 19 | % Copyright Fraunhofer FIRST.IDA (2004) 20 | 21 | if nargin==0, 22 | % Return an empty struct without identification tag 23 | opt= []; 24 | return; 25 | end 26 | 27 | if isstruct(varargin{1}) | isempty(varargin{1}), 28 | % First input argument is already a structure: Start with that, write 29 | % the additional fields 30 | opt= varargin{1}; 31 | iListOffset= 1; 32 | else 33 | % First argument is not a structure: Assume this is the start of the 34 | % parameter/value list 35 | opt = []; 36 | iListOffset = 0; 37 | end 38 | % Write the identification field. ID field contains a 'version number' of 39 | % how parameters are passed. 40 | opt.isPropertyStruct = 1; 41 | 42 | nFields= (nargin-iListOffset)/2; 43 | if nFields~=round(nFields), 44 | error('Invalid parameter/value list'); 45 | end 46 | 47 | for ff= 1:nFields, 48 | fld = varargin{iListOffset+2*ff-1}; 49 | if ~ischar(fld), 50 | error(sprintf('String required on position %i of the parameter/value list', ... 51 | iListOffset+2*ff-1)); 52 | end 53 | prp= varargin{iListOffset+2*ff}; 54 | opt= setfield(opt, fld, prp); 55 | end 56 | -------------------------------------------------------------------------------- /randsplit.m: -------------------------------------------------------------------------------- 1 | % randsplit - randomly split indices into training and testing 2 | % 3 | % Syntax 4 | % [ind_tr, ind_te]=randsplit(n, trfrac) 5 | % 6 | % Copyright(c) 2010-2014 Ryota Tomioka 7 | % This software is distributed under the MIT license. See license.txt 8 | 9 | function [ind_tr, ind_te]=randsplit(n, trfrac) 10 | 11 | ind=randperm(n)'; 12 | 13 | ntr=round(n*trfrac); 14 | 15 | ind_tr=ind(1:ntr); 16 | ind_te=ind(ntr+1:end); -------------------------------------------------------------------------------- /randtensor.m: -------------------------------------------------------------------------------- 1 | % randtensor - randomly generates a low-rank tensor 2 | % 3 | % Syntax 4 | % X=randtensor(sz, dims) 5 | % 6 | % See also 7 | % randtensor3 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function X=randtensor(sz, dims) 29 | 30 | nd=length(sz); 31 | 32 | X=randn(dims); 33 | 34 | for jj=1:nd 35 | [U,R]=qr(randn(sz(jj),dims(jj)),0); 36 | 37 | sz1=size(X); 38 | sz1(jj)=sz(jj); 39 | X = flatten_adj(U*flatten(X,jj),sz1,jj); 40 | end 41 | 42 | 43 | -------------------------------------------------------------------------------- /randtensor3.m: -------------------------------------------------------------------------------- 1 | % randtensor3 - randomly generates a 3-way low-rank tensor 2 | % 3 | % Syntax 4 | % X=randtensor3(sz, dims, const) 5 | % 6 | % Examples 7 | % X=randtensor3([50 50 20], [7 8 9]); % tucker 8 | % X=randtensor3([50 50 20], 3); % parafac 9 | % 10 | % See also 11 | % randtensor3 12 | % 13 | % Reference 14 | % "Estimation of low-rank tensors via convex optimization" 15 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 16 | % arXiv:1010.0789 17 | % http://arxiv.org/abs/1010.0789 18 | % 19 | % "Statistical Performance of Convex Tensor Decomposition" 20 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 21 | % NIPS 2011 22 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 23 | % 24 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 25 | % Ryota Tomioka, Taiji Suzuki 26 | % NIPS 2013 27 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 28 | % 29 | % Copyright(c) 2010-2014 Ryota Tomioka 30 | % This software is distributed under the MIT license. See license.txt 31 | 32 | function X=randtensor3(sz, dims, const) 33 | 34 | nd=length(sz); 35 | U=cell(1,nd); 36 | if exist('const','var') && const 37 | C=randn(dims); 38 | U{1}=qr(randn(sz(1),dims),0); 39 | U{2}=qr(randn(sz(2),dims),0); 40 | U{3}=rand(sz(3),dims); 41 | U{3}=bsxfun(@mrdivide, U{3}, sqrt(sum(U{3}.^2))); 42 | else 43 | if length(dims)>1 44 | C=randn(dims); 45 | for jj=1:nd 46 | [U{jj},R]=qr(randn(sz(jj),dims(jj)),0); 47 | end 48 | else 49 | C=diag3(randn(dims)); 50 | for jj=1:nd 51 | % [U{jj},R]=qr(randn(sz(jj),dims),0); 52 | U{jj}=randn(sz(jj),dims); 53 | end 54 | end 55 | end 56 | X=kolda3(C,U{:}); 57 | -------------------------------------------------------------------------------- /set_defaults.m: -------------------------------------------------------------------------------- 1 | function [opt, isdefault]= set_defaults(opt, varargin) 2 | %[opt, isdefault]= set_defaults(opt, defopt) 3 | %[opt, isdefault]= set_defaults(opt, field/value list) 4 | % 5 | % This functions fills in the given struct opt some new fields with 6 | % default values, but only when these fields DO NOT exist before in opt. 7 | % Existing fields are kept with their original values. 8 | % There are two forms in which you can can specify the default values, 9 | % (1) as struct, 10 | % opt= set_defaults(opt, struct('color','g', 'linewidth',3)); 11 | % 12 | % (2) as property/value list, e.g., 13 | % opt= set_defaults(opt, 'color','g', 'linewidth',3); 14 | % 15 | % The second output argument isdefault is a struct with the same fields 16 | % as the returned opt, where each field has a boolean value indicating 17 | % whether or not the default value was inserted in opt for that field. 18 | % 19 | % The default values should be given for ALL VALID property names, i.e. the 20 | % set of fields in 'opt' should be a subset of 'defopt' or the field/value 21 | % list. A warning will be issued for all fields in 'opt' that are not present 22 | % in 'defopt', thus possibly avoiding a silent setting of options that are 23 | % not understood by the receiving functions. 24 | % 25 | % $Id$ 26 | % 27 | % Copyright (C) Fraunhofer FIRST 28 | % Authors: Frank Meinecke (meinecke@first.fhg.de) 29 | % Benjamin Blankertz (blanker@first.fhg.de) 30 | % Pavel Laskov (laskov@first.fhg.de) 31 | 32 | if length(opt)>1, 33 | error('first argument must be a 1x1 struct'); 34 | end 35 | 36 | % Set 'isdefault' to ones for the field already present in 'opt' 37 | isdefault= []; 38 | if ~isempty(opt), 39 | for Fld=fieldnames(opt)', 40 | isdefault= setfield(isdefault, Fld{1}, 0); 41 | end 42 | end 43 | 44 | % Check if we have a field/value list 45 | if length(varargin) > 1 46 | 47 | % If the target is a propertylist structure use propertylist2struct to 48 | % convert the property list to a defopt structure. 49 | if (ispropertystruct(opt)) 50 | defopt = propertylist2struct(varargin{:}); 51 | 52 | else % otherwise construct defopt from scratch 53 | 54 | 55 | % Create a dummy defopt structure: a terrible Matlab hack to overcome 56 | % impossibility of incremental update of an empty structure. 57 | defopt = struct('matlabsucks','foo'); 58 | 59 | % Check consistency of a field/value list: even number of arguments 60 | nArgs= length(varargin)/2; 61 | if nArgs~=round(nArgs) & length(varargin~=1), 62 | error('inconsistent field/value list'); 63 | end 64 | 65 | % Write a temporary defopt structure 66 | for ii= 1:nArgs, 67 | defopt= setfield(defopt, varargin{ii*2-1}, varargin{ii*2}); 68 | end 69 | 70 | % Remove the dummy field from defopt 71 | defopt = rmfield(defopt,'matlabsucks'); 72 | end 73 | 74 | else 75 | 76 | % If varargin has only one element, it must be a defopt structure. 77 | defopt = varargin{1}; 78 | 79 | end 80 | 81 | % Replace the missing fields in 'opt' from their 'defopt' counterparts. 82 | for Fld=fieldnames(defopt)', 83 | fld= Fld{1}; 84 | if ~isfield(opt, fld), 85 | opt= setfield(opt, fld, getfield(defopt, fld)); 86 | isdefault= setfield(isdefault, fld, 1); 87 | end 88 | end 89 | 90 | % Check if some fields in 'opt' are missing in 'defopt': possibly wrong 91 | % options. 92 | for Fld=fieldnames(opt)', 93 | fld= Fld{1}; 94 | if ~isfield(defopt,fld) 95 | % warning('set_defaults:DEFAULT_FLD',['field ''' fld ''' does not have a valid default option']); 96 | end 97 | end 98 | -------------------------------------------------------------------------------- /softth.m: -------------------------------------------------------------------------------- 1 | % softth - computes the proximity operator corresponding to the 2 | % trace norm 3 | % 4 | % Syntax 5 | % [vv,ss,nsv]=softth(vv, lambda, nsv, verbose); 6 | % 7 | % See also 8 | % pca, softth_overlap 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function [vv,ss,nsv]=softth(vv, lambda, nsv, verbose); 30 | 31 | if ~exist('verbose','var') 32 | verbose=0; 33 | end 34 | 35 | 36 | sz=size(vv); 37 | nsv=min(min(sz),nsv+1); 38 | 39 | if verbose 40 | t0=cputime; 41 | fprintf('sz=[%d %d]\n',sz(1), sz(2)); 42 | fprintf('nsv='); 43 | end 44 | 45 | while 1 46 | if verbose 47 | fprintf('%d/',nsv); 48 | end 49 | [U,S,V]=pca(vv,min(min(sz),nsv),10); 50 | ss=diag(S); 51 | if min(ss)=lambda); 62 | ss=ss(ix)-lambda; 63 | vv = U(:,ix)*diag(ss)*V(:,ix)'; 64 | 65 | nsv=length(ix); -------------------------------------------------------------------------------- /softth_overlap.m: -------------------------------------------------------------------------------- 1 | % softth_overlap - computes the soft-threshold operation 2 | % corresponding to the overlapped trace norm 3 | % 4 | % Syntax 5 | % X = softth_overlap(Y, lambda, varargin) 6 | % 7 | % See also 8 | % pca, pcaspec, softth_overlap 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | 30 | % softth_overlap - Computes the soft-threshold operation with respect 31 | % to the overlapped Schatten 1-norm 32 | % 33 | % Syntax 34 | % function X = softth_overlap(Y, lambda, varargin) 35 | % 36 | % Reference 37 | % "On the extension of trace norm to tensors" 38 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 39 | % arXiv:1010.0789 40 | % http://arxiv.org/abs/1010.0789 41 | % 42 | % Copyright(c) 2010 Ryota Tomioka 43 | % This software is distributed under the MIT license. See license.txt 44 | 45 | 46 | function X = softth_overlap(Y, lambda, varargin) 47 | 48 | opt=propertylist2struct(varargin{:}); 49 | opt=set_defaults(opt, 'eta', [], 'tol', 1e-3, 'verbose', 0,'yfact',10,'maxiter',2000); 50 | 51 | sz=size(Y); 52 | nd=ndims(Y); 53 | m =prod(sz); 54 | 55 | gamma=ones(1,nd)/nd; 56 | 57 | 58 | if ~isempty(opt.eta) 59 | eta=opt.eta; 60 | else 61 | eta=1/(opt.yfact*std(Y(:))); 62 | end 63 | 64 | 65 | Z=cell(1,nd); 66 | A=cell(1,nd); 67 | S=cell(1,nd); 68 | 69 | for jj=1:nd 70 | szj = [sz(jj), prod(sz)/sz(jj)]; 71 | A{jj} = zeros(szj); 72 | Z{jj} = zeros(szj); 73 | end 74 | 75 | X=Y; 76 | 77 | kk=1; 78 | nsv=10*ones(1,nd); 79 | viol=inf*ones(1,nd); 80 | while 1 81 | % Update X 82 | X1 = Y/lambda; 83 | for jj=1:nd 84 | X1 = X1 + flatten_adj(eta*Z{jj}-A{jj},sz,jj); 85 | end 86 | 87 | X=X1/(1/lambda + nd*eta); 88 | 89 | % Update Z 90 | for jj=1:nd 91 | [Z{jj},S{jj},nsv(jj)] = softth(A{jj}/eta+flatten(X,jj),gamma(jj)/eta,nsv(jj)); 92 | 93 | % Check derivative 94 | % fprintf('max[%d]=%g\n',jj,max(svd(eta*(Z{jj}-flatten(X,jj)-A{jj}/eta)))); 95 | end 96 | 97 | % Update A 98 | for jj=1:nd 99 | V=flatten(X,jj)-Z{jj}; 100 | A{jj}=A{jj}+eta*V; 101 | viol(jj)=norm(V(:)); 102 | end 103 | 104 | % Compute the objective 105 | fval(kk)=0.5*norm(X(:)-Y(:))^2/lambda; 106 | G=(X-Y)/lambda; 107 | for jj=1:nd 108 | fval(kk)=fval(kk)+gamma(jj)*sum(svd(flatten(X,jj))); 109 | G=G+flatten_adj(A{jj},sz,jj); 110 | end 111 | 112 | res(kk)=max([norm(G(:))/eta,viol]); 113 | 114 | if opt.verbose 115 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 116 | kk, fval(kk), res(kk), printvec(viol),eta); 117 | end 118 | 119 | if kk>=3 && res(kk)opt.maxiter 126 | break; 127 | end 128 | 129 | 130 | kk=kk+1; 131 | end 132 | 133 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 134 | kk, fval(kk), res(kk), printvec(viol),eta); 135 | 136 | 137 | 138 | function dval=evalDual(A, Y, lambda, gamma, sz) 139 | 140 | nd=length(A); 141 | 142 | fact=1; 143 | for jj=1:nd 144 | ss=pcaspec(A{jj},1,10); 145 | fact=min(fact,gamma(jj)/ss); 146 | end 147 | %fprintf('fact=%g\n',fact); 148 | 149 | As=zeros(sz); 150 | for jj=1:nd 151 | A{jj}=A{jj}*fact; 152 | As=As+flatten_adj(A{jj},sz,jj); 153 | %fprintf('norm(A{[%d]})=%g\n',jj,norm(A{jj})); 154 | end 155 | 156 | 157 | 158 | 159 | dval = 0.5*lambda*norm(As(:))^2 - Y(:)'*As(:); 160 | 161 | -------------------------------------------------------------------------------- /split.m: -------------------------------------------------------------------------------- 1 | % split - splits a string (or a cell array of string) into a cell array 2 | % 3 | % Syntax 4 | % B = split(A, sep) 5 | % 6 | % Reference 7 | % "Estimation of low-rank tensors via convex optimization" 8 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 9 | % arXiv:1010.0789 10 | % http://arxiv.org/abs/1010.0789 11 | % 12 | % "Statistical Performance of Convex Tensor Decomposition" 13 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 14 | % NIPS 2011 15 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 16 | % 17 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 18 | % Ryota Tomioka, Taiji Suzuki 19 | % NIPS 2013 20 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 21 | % 22 | % Copyright(c) 2010-2014 Ryota Tomioka 23 | % This software is distributed under the MIT license. See license.txt 24 | 25 | function B = split(A, sep) 26 | 27 | if ischar(A) 28 | A = {A}; 29 | end 30 | 31 | B = cell(size(A)); 32 | 33 | for i=1:prod(size(A)) 34 | s = findstr(A{i},sep); 35 | c = 1; 36 | B{i} = []; 37 | for j=1:length(s) 38 | B{i} = [B{i}, {A{i}(c:s(j)-1)}]; 39 | c = s(j)+1; 40 | end 41 | if (c<=length(A{i})) 42 | B{i} = [B{i}, {A{i}(c:end)}]; 43 | end 44 | end 45 | 46 | if prod(size(A))==1 47 | B = B{1}; 48 | end 49 | -------------------------------------------------------------------------------- /tensor_as_matrix.m: -------------------------------------------------------------------------------- 1 | % tensor_as_matrix - Computes the reconstruction of partly observed 2 | % tensor via "As A Matrix" approach 3 | % 4 | % Syntax 5 | % [X,Z,fval,gval]=tensor_as_matrix(X, I, yy, lambda, varargin) 6 | % 7 | % See also 8 | % matrix_adm, matrixl1_adm 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function [X,Z,fval,gval]=tensor_as_matrix(X, I, yy, lambda, varargin) 30 | opt=propertylist2struct(varargin{:}); 31 | opt=set_defaults(opt, 'eta', [], 'tol', 1e-3, 'solver', @matrix_adm); 32 | 33 | 34 | if ~exist('tol','var') 35 | tol=1e-3; 36 | end 37 | 38 | sz=size(X); 39 | nd=ndims(X); 40 | 41 | Z=cell(1,nd); 42 | for ii=1:nd 43 | szp=[sz(ii:end) sz(1:ii-1)]; 44 | Ip=[I(ii:end) I(1:ii-1)]; 45 | J =sub2ind(szp(2:end), Ip{2:end}); 46 | [Z1,Z{ii},Y,fval1,gval1]=opt.solver(zeros(szp(1),prod(szp(2:end))),{Ip{1}, J}, yy, lambda, opt); 47 | Z{ii}=Z{ii}; 48 | fval(ii)=fval1(end); 49 | gval(ii)=gval1(end); 50 | end 51 | 52 | X=[]; 53 | 54 | -------------------------------------------------------------------------------- /tensorconst_adm.m: -------------------------------------------------------------------------------- 1 | % tensorconst_adm - computes the reconstruction of a partially 2 | % observed tensor using overlapped approach 3 | % 4 | % Syntax 5 | % [X,Z,A,fval,res] = tensorconst_adm(X, I, yy, lambda, varargin) 6 | % 7 | % See also 8 | % tensormix_adm, exp_completion 9 | % 10 | % Reference 11 | % "Estimation of low-rank tensors via convex optimization" 12 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 13 | % arXiv:1010.0789 14 | % http://arxiv.org/abs/1010.0789 15 | % 16 | % "Statistical Performance of Convex Tensor Decomposition" 17 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 18 | % NIPS 2011 19 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 20 | % 21 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 22 | % Ryota Tomioka, Taiji Suzuki 23 | % NIPS 2013 24 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 25 | % 26 | % Copyright(c) 2010-2014 Ryota Tomioka 27 | % This software is distributed under the MIT license. See license.txt 28 | 29 | function [X,Z,A,fval,res] = tensorconst_adm(X, I, yy, lambda, varargin) 30 | 31 | opt=propertylist2struct(varargin{:}); 32 | opt=set_defaults(opt, 'eta', [], 'gamma',[],'tol', 1e-3, 'verbose', 0,'yfact',10,'maxiter',2000); 33 | 34 | sz=size(X); 35 | nd=ndims(X); 36 | m =length(I{1}); 37 | 38 | if ~isempty(opt.gamma) 39 | gamma=opt.gamma; 40 | else 41 | gamma=ones(1,nd); 42 | end 43 | 44 | if ~isempty(opt.eta) 45 | eta=opt.eta; 46 | else 47 | eta=1/(opt.yfact*std(yy)); 48 | end 49 | 50 | if nd~=length(I) 51 | error('Number of dimensions mismatch.'); 52 | end 53 | 54 | if m~=length(yy) 55 | error('Number of samples mismatch.'); 56 | end 57 | 58 | Z=cell(1,nd); 59 | A=cell(1,nd); 60 | S=cell(1,nd); 61 | 62 | for jj=1:nd 63 | szj = [sz(jj), prod(sz)/sz(jj)]; 64 | A{jj} = zeros(szj); 65 | Z{jj} = zeros(szj); 66 | end 67 | 68 | B=zeros(sz); 69 | ind=sub2ind(sz, I{:}); 70 | B(ind)=yy; 71 | 72 | kk=1; 73 | nsv=10*ones(1,nd); 74 | while 1 75 | X1 = zeros(size(X)); 76 | for jj=1:nd 77 | X1 = X1 - flatten_adj(A{jj}-eta*Z{jj},sz,jj); 78 | end 79 | 80 | if lambda>0 81 | X1(ind) = X1(ind) + yy/lambda; 82 | X=X1./((B~=0)/lambda + nd*eta); 83 | else 84 | X=X1/(eta*nd); 85 | X(ind)=yy; 86 | end 87 | 88 | 89 | % Check derivative 90 | % $$$ D=zeros(size(X)); 91 | % $$$ D(ind)=X(ind)-yy; 92 | % $$$ for jj=1:nd 93 | % $$$ D=D+eta*flatten_adj(A{jj}/eta+flatten(X,jj)-Z{jj},sz,jj); 94 | % $$$ end 95 | % $$$ fprintf('gnorm=%g\n',norm(D(:))); 96 | 97 | for jj=1:nd 98 | [Z{jj},S{jj},nsv(jj)] = softth(A{jj}/eta+flatten(X,jj),gamma(jj)/eta,nsv(jj)); 99 | 100 | % Check derivative 101 | % fprintf('max[%d]=%g\n',jj,max(svd(eta*(Z{jj}-flatten(X,jj)-A{jj}/eta)))); 102 | end 103 | 104 | for jj=1:nd 105 | V=flatten(X,jj)-Z{jj}; 106 | A{jj}=A{jj}+eta*V; 107 | viol(jj)=norm(V(:)); 108 | end 109 | 110 | 111 | % Compute the objective 112 | G=zeros(size(X)); 113 | fval(kk)=0; 114 | for jj=1:nd 115 | fval(kk)=fval(kk)+gamma(jj)*sum(svd(flatten(X,jj))); 116 | % fval(kk)=fval(kk)+gamma(jj)*sum(S{jj}); 117 | G = G + flatten_adj(A{jj},sz,jj); 118 | end 119 | if lambda>0 120 | fval(kk)=fval(kk)+0.5*sum((X(ind)-yy).^2)/lambda; 121 | G(ind)=G(ind)+(X(ind)-yy)/lambda; 122 | else 123 | G(ind)=0; 124 | end 125 | 126 | res(kk)=1+evalDual(A, yy, lambda, gamma, sz, ind)/fval(kk); 127 | % res(kk)=max([norm(G(:))/eta,viol]);% /norm(X(:)); 128 | 129 | if opt.verbose 130 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 131 | kk, fval(kk), res(kk), printvec(viol),eta); 132 | end 133 | 134 | if kk>1 && res(kk)opt.maxiter 139 | break; 140 | end 141 | 142 | 143 | kk=kk+1; 144 | end 145 | 146 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 147 | kk, fval(kk), res(kk), printvec(viol),eta); 148 | 149 | 150 | 151 | function dval=evalDual(A, yy, lambda, gamma, sz, ind) 152 | 153 | nd=length(A); 154 | 155 | Am=zeros(sz); 156 | for jj=1:nd 157 | Am=Am+flatten_adj(A{jj},sz,jj); 158 | end 159 | 160 | Am(ind)=0; 161 | Am=Am/nd; 162 | 163 | fact=1; 164 | for jj=1:nd 165 | A{jj}=A{jj}-flatten(Am,jj); 166 | ss=pcaspec(A{jj},1,10); 167 | fact=min(fact,gamma(jj)/ss); 168 | end 169 | %fprintf('fact=%g\n',fact); 170 | 171 | As=zeros(sz); 172 | for jj=1:nd 173 | fact=min(1,gamma(jj)/ss); 174 | A{jj}=A{jj}*fact; 175 | As=As+flatten_adj(A{jj},sz,jj); 176 | % fprintf('fact[%d]=%g ',jj, max(1,ss/gamma(jj))); 177 | end 178 | %fprintf('norm(Am)=%g\n', norm(Am(:))); 179 | 180 | % $$$ ind_test=setdiff(1:prod(sz), ind); 181 | % $$$ V=zeros(sz); 182 | % $$$ for jj=1:nd 183 | % $$$ V=V+flatten_adj(A{jj},sz,jj); 184 | % $$$ end 185 | % $$$ fprintf('violation=%g\n',norm(V(ind_test))); 186 | 187 | 188 | dval = 0.5*lambda*norm(As(ind))^2 - yy'*As(ind); 189 | 190 | -------------------------------------------------------------------------------- /tensorconst_subset_adm.m: -------------------------------------------------------------------------------- 1 | % tensorconst_subset_adm - overlapped approach with more general unfolding 2 | % 3 | % Syntax 4 | % [X,Z,A,fval,res] = tensorconst_subset_adm(X, I, yy, lambda, indUnfold, varargin) 5 | % 6 | % See also 7 | % tensorconst_adm, exp_completion_4d 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function [X,Z,A,fval,res] = tensorconst_subset_adm(X, I, yy, lambda, indUnfold, varargin) 29 | 30 | opt=propertylist2struct(varargin{:}); 31 | opt=set_defaults(opt, 'eta', [], 'gamma',[],'tol', 1e-3, 'verbose', 0,'yfact',10,'maxiter',2000); 32 | 33 | sz=size(X); 34 | nd=ndims(X); 35 | m =length(I{1}); 36 | 37 | %indUnfold = {[1 2],[3 4]; [1 3], [2 4]}; 38 | %indUnfold = {1, 2:4; 2, [3,4,1]; 3, [4,1,2]; 4, 1:3}; 39 | nUnfold = size(indUnfold,1); 40 | 41 | if ~isempty(opt.gamma) 42 | gamma=opt.gamma; 43 | else 44 | gamma=ones(1,nUnfold); 45 | end 46 | 47 | if ~isempty(opt.eta) 48 | eta=opt.eta; 49 | else 50 | eta=1/(opt.yfact*std(yy)); 51 | end 52 | 53 | if nd~=length(I) 54 | error('Number of dimensions mismatch.'); 55 | end 56 | 57 | if m~=length(yy) 58 | error('Number of samples mismatch.'); 59 | end 60 | 61 | Z=cell(1,nUnfold); 62 | A=cell(1,nUnfold); 63 | S=cell(1,nUnfold); 64 | 65 | 66 | for jj=1:nUnfold 67 | szj=[prod(sz(indUnfold{jj,1})), prod(sz(indUnfold{jj,2}))]; 68 | A{jj} = zeros(szj); 69 | Z{jj} = zeros(szj); 70 | end 71 | 72 | B=zeros(sz); 73 | ind=sub2ind(sz, I{:}); 74 | B(ind)=yy; 75 | 76 | kk=1; 77 | nsv=10*ones(1,nUnfold); 78 | while 1 79 | X1 = zeros(size(X)); 80 | for jj=1:nUnfold 81 | X1 = X1 - flatten_adj(A{jj}-eta*Z{jj},sz,indUnfold(jj,:)); 82 | end 83 | 84 | if lambda>0 85 | X1(ind) = X1(ind) + yy/lambda; 86 | X=X1./((B~=0)/lambda + nUnfold*eta); 87 | else 88 | X=X1/(eta*nUnfold); 89 | X(ind)=yy; 90 | end 91 | 92 | 93 | for jj=1:nUnfold 94 | [Z{jj},S{jj},nsv(jj)] = softth(A{jj}/eta+flatten(X,indUnfold(jj,:)),gamma(jj)/eta,nsv(jj)); 95 | 96 | end 97 | 98 | for jj=1:nUnfold 99 | V=flatten(X,indUnfold(jj,:))-Z{jj}; 100 | A{jj}=A{jj}+eta*V; 101 | viol(jj)=norm(V(:)); 102 | end 103 | 104 | 105 | % Compute the objective 106 | G=zeros(size(X)); 107 | fval(kk)=0; 108 | for jj=1:nUnfold 109 | fval(kk)=fval(kk)+gamma(jj)*sum(svd(flatten(X,indUnfold(jj,:)))); 110 | G = G + flatten_adj(A{jj},sz,indUnfold(jj,:)); 111 | end 112 | if lambda>0 113 | fval(kk)=fval(kk)+0.5*sum((X(ind)-yy).^2)/lambda; 114 | G(ind)=G(ind)+(X(ind)-yy)/lambda; 115 | else 116 | G(ind)=0; 117 | end 118 | 119 | res(kk)=1+evalDual(A, yy, lambda, gamma, sz, ind, indUnfold)/fval(kk); 120 | % res(kk)=max([norm(G(:))/eta,viol]);% /norm(X(:)); 121 | 122 | if opt.verbose 123 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 124 | kk, fval(kk), res(kk), printvec(viol),eta); 125 | end 126 | 127 | if kk>1 && res(kk)opt.maxiter 132 | break; 133 | end 134 | 135 | 136 | kk=kk+1; 137 | end 138 | 139 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 140 | kk, fval(kk), res(kk), printvec(viol),eta); 141 | 142 | 143 | 144 | function dval=evalDual(A, yy, lambda, gamma, sz, ind, indUnfold) 145 | 146 | nUnfold=size(indUnfold,1); 147 | 148 | Am=zeros(sz); 149 | for jj=1:nUnfold 150 | Am=Am+flatten_adj(A{jj},sz,indUnfold(jj,:)); 151 | end 152 | 153 | Am(ind)=0; 154 | Am=Am/nUnfold; 155 | 156 | fact=1; 157 | for jj=1:nUnfold 158 | A{jj}=A{jj}-flatten(Am,indUnfold(jj,:)); 159 | ss=pcaspec(A{jj},1,10); 160 | fact=min(fact,gamma(jj)/ss); 161 | end 162 | %fprintf('fact=%g\n',fact); 163 | 164 | As=zeros(sz); 165 | for jj=1:nUnfold 166 | A{jj}=A{jj}*fact; 167 | As=As+flatten_adj(A{jj},sz,indUnfold(jj,:)); 168 | % fprintf('fact[%d]=%g ',jj, max(1,ss/gamma(jj))); 169 | end 170 | %fprintf('norm(Am)=%g\n', norm(Am(:))); 171 | 172 | % $$$ ind_test=setdiff(1:prod(sz), ind); 173 | % $$$ V=zeros(sz); 174 | % $$$ for jj=1:nd 175 | % $$$ V=V+flatten_adj(A{jj},sz,jj); 176 | % $$$ end 177 | % $$$ fprintf('violation=%g\n',norm(V(ind_test))); 178 | 179 | 180 | dval = 0.5*lambda*norm(As(ind))^2 - yy'*As(ind); 181 | 182 | -------------------------------------------------------------------------------- /tensorl1_adm.m: -------------------------------------------------------------------------------- 1 | % tensorl1_adm - sparse + low-rank decomposition via overlapped approach 2 | % 3 | % Syntax 4 | % [X,Z,A,beta,fval,res] = tensorl1_adm(X, I, yy, lambda, varargin) 5 | % 6 | % See also 7 | % tensorconst_adm 8 | % 9 | % Reference 10 | % "Estimation of low-rank tensors via convex optimization" 11 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 12 | % arXiv:1010.0789 13 | % http://arxiv.org/abs/1010.0789 14 | % 15 | % "Statistical Performance of Convex Tensor Decomposition" 16 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 17 | % NIPS 2011 18 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 19 | % 20 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 21 | % Ryota Tomioka, Taiji Suzuki 22 | % NIPS 2013 23 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 24 | % 25 | % Copyright(c) 2010-2014 Ryota Tomioka 26 | % This software is distributed under the MIT license. See license.txt 27 | 28 | function [X,Z,A,beta,fval,res] = tensorl1_adm(X, I, yy, lambda, varargin) 29 | 30 | opt=propertylist2struct(varargin{:}); 31 | opt=set_defaults(opt, 'eta', [], 'eta1', [], 'gamma',[],'tol', 1e-3, 'verbose', 0,'yfact',10,'maxiter',2000); 32 | 33 | sz=size(X); 34 | nd=ndims(X); 35 | m =length(I{1}); 36 | 37 | if ~isempty(opt.gamma) 38 | gamma=opt.gamma; 39 | else 40 | gamma=ones(1,nd); 41 | end 42 | 43 | if ~isempty(opt.eta) 44 | eta=opt.eta; 45 | else 46 | eta=1/(opt.yfact*std(yy)); 47 | end 48 | 49 | if ~isempty(opt.eta1) 50 | eta1=opt.eta1; 51 | else 52 | eta1=1/(opt.yfact*std(yy)); 53 | % eta1=1/(opt.yfact*std(yy)*lambda^(1/4)); 54 | end 55 | 56 | 57 | if nd~=length(I) 58 | error('Number of dimensions mismatch.'); 59 | end 60 | 61 | if m~=length(yy) 62 | error('Number of samples mismatch.'); 63 | end 64 | 65 | Z=cell(1,nd); 66 | A=cell(1,nd); 67 | S=cell(1,nd); 68 | 69 | for jj=1:nd 70 | szj = [sz(jj), prod(sz)/sz(jj)]; 71 | A{jj} = zeros(szj); 72 | Z{jj} = zeros(szj); 73 | end 74 | 75 | Y=zeros(sz); 76 | ind=sub2ind(sz, I{:}); 77 | Y(ind)=yy; 78 | 79 | delta = zeros(m,1); 80 | beta = zeros(m,1); 81 | 82 | kk=1; 83 | nsv=10*ones(1,nd); 84 | dval=-inf; 85 | while 1 86 | Xorig = X; 87 | 88 | % X update 89 | X1 = zeros(size(X)); 90 | for jj=1:nd 91 | X1 = X1 + flatten_adj(eta*Z{jj}-A{jj},sz,jj); 92 | end 93 | 94 | X1(ind) = X1(ind) + eta1*(yy-delta-beta/eta1); 95 | X=X1./(eta1*(Y~=0)+nd*eta); 96 | 97 | % delta update 98 | [delta,ss] = l1_softth(yy-X(ind)-beta/eta1, 1/lambda/eta1); 99 | 100 | % Z update 101 | for jj=1:nd 102 | [Z{jj},S{jj},nsv(jj)] = softth(flatten(X,jj)+A{jj}/eta,gamma(jj)/eta,nsv(jj)); 103 | 104 | % Check derivative 105 | % fprintf('max[%d]=%g\n',jj,max(svd(eta*(Z{jj}-flatten(X,jj)-A{jj}/eta)))); 106 | end 107 | 108 | % Update A 109 | for jj=1:nd 110 | V=flatten(X,jj)-Z{jj}; 111 | A{jj}=A{jj}+eta*V; 112 | viol(jj)=norm(V(:)); 113 | end 114 | 115 | % Update beta 116 | beta = beta + eta1*(X(ind)+delta-yy); 117 | 118 | % Compute the objective 119 | G=zeros(size(X)); 120 | fval(kk)=0; 121 | for jj=1:nd 122 | fval(kk)=fval(kk)+gamma(jj)*sum(svd(flatten(X,jj))); 123 | G = G + flatten_adj(A{jj},sz,jj); 124 | end 125 | if lambda>0 126 | fval(kk)=fval(kk)+sum(abs(yy-X(ind)))/lambda; 127 | G(ind)=G(ind)+(X(ind)-yy)/lambda; 128 | else 129 | G(ind)=0; 130 | end 131 | 132 | viol(nd+1) =norm(X(ind)+delta-yy); 133 | 134 | dval = max(dval, -evalDual(A, beta, yy, lambda, gamma, sz, ind)); 135 | res(kk)=1-dval/fval(kk); 136 | % res(kk)=max([norm(G(:))/eta,viol]);% /norm(X(:)); 137 | % res(kk)=max(viol); 138 | 139 | if opt.verbose 140 | fprintf('k=%d fval=%g res=%g viol=%s eta=%s\n',... 141 | kk, fval(kk), res(kk), printvec(viol),printvec([eta eta1])); 142 | end 143 | 144 | if kk>1 && res(kk)opt.maxiter 149 | break; 150 | end 151 | 152 | 153 | kk=kk+1; 154 | end 155 | 156 | fprintf('k=%d fval=%g res=%g viol=%s eta=%g\n',... 157 | kk, fval(kk), res(kk), printvec(viol),eta); 158 | 159 | 160 | 161 | function dval=evalDual(A, beta, yy, lambda, gamma, sz, ind) 162 | 163 | nd=length(A); 164 | 165 | Am=zeros(sz); 166 | for jj=1:nd 167 | Am=Am+flatten_adj(A{jj},sz,jj); 168 | end 169 | 170 | Am(ind)=Am(ind)+beta; 171 | Am=Am/nd; 172 | 173 | fact=1; 174 | for jj=1:nd 175 | A{jj}=A{jj}-flatten(Am,jj); 176 | ss=pcaspec(A{jj},1,10); 177 | fact=min(fact,gamma(jj)/ss); 178 | end 179 | % fprintf('fact=%g\n',fact); 180 | 181 | fact = min(fact, 1/lambda/max(abs(beta))); 182 | 183 | As=zeros(sz); 184 | for jj=1:nd 185 | A{jj}=A{jj}*fact; 186 | As=As+flatten_adj(A{jj},sz,jj); 187 | % fprintf('fact[%d]=%g ',jj, max(1,ss/gamma(jj))); 188 | end 189 | beta=beta*fact; 190 | As(ind)=As(ind)+beta; 191 | 192 | % fprintf('norm(Am)=%g fact=%g norm(As)=%g\n', norm(Am(:)), fact, norm(As(:))); 193 | 194 | % $$$ ind_test=setdiff(1:prod(sz), ind); 195 | % $$$ V=zeros(sz); 196 | % $$$ for jj=1:nd 197 | % $$$ V=V+flatten_adj(A{jj},sz,jj); 198 | % $$$ end 199 | % $$$ fprintf('violation=%g\n',norm(V(ind_test))); 200 | 201 | 202 | dval = yy'*beta; 203 | 204 | -------------------------------------------------------------------------------- /tensormix_adm.m: -------------------------------------------------------------------------------- 1 | % tensormix_adm - computes the reconstruction of a partially 2 | % observed tensor via latent approach 3 | % 4 | % Syntax 5 | % [X,Z,A,fval,res]=tensormix_adm(X, I, yy, lambda, varargin) 6 | % 7 | % Reference 8 | % "Estimation of low-rank tensors via convex optimization" 9 | % Ryota Tomioka, Kohei Hayashi, and Hisashi Kashima 10 | % arXiv:1010.0789 11 | % http://arxiv.org/abs/1010.0789 12 | % 13 | % "Statistical Performance of Convex Tensor Decomposition" 14 | % Ryota Tomioka, Taiji Suzuki, Kohei Hayashi, Hisashi Kashima 15 | % NIPS 2011 16 | % http://books.nips.cc/papers/files/nips24/NIPS2011_0596.pdf 17 | % 18 | % Convex Tensor Decomposition via Structured Schatten Norm Regularization 19 | % Ryota Tomioka, Taiji Suzuki 20 | % NIPS 2013 21 | % http://papers.nips.cc/paper/4985-convex-tensor-decomposition-via-structured-schatten-norm-regularization.pdf 22 | % 23 | % Copyright(c) 2010-2014 Ryota Tomioka 24 | % This software is distributed under the MIT license. See license.txt 25 | 26 | function [X,Z,A,fval,res]=tensormix_adm(X, I, yy, lambda, varargin) 27 | 28 | opt=propertylist2struct(varargin{:}); 29 | opt=set_defaults(opt, 'eta', [],'gamma',[], 'tol', 1e-3, 'verbose', 0,'yfact',10,'maxiter',2000); 30 | 31 | sz=size(X); 32 | nd=ndims(X); 33 | 34 | if ~isempty(opt.gamma) 35 | gamma=opt.gamma; 36 | else 37 | gamma=ones(1,nd); 38 | end 39 | 40 | if ~isempty(opt.eta) 41 | eta=opt.eta; 42 | else 43 | eta=opt.yfact*std(yy); 44 | end 45 | 46 | ind=sub2ind(sz, I{:}); 47 | ind0=setdiff(1:prod(sz),ind); 48 | m=size(yy,1); 49 | 50 | Z=cell(1,nd); 51 | V=cell(1,nd); 52 | for jj=1:nd 53 | szj = [sz(jj), prod(sz)/sz(jj)]; 54 | Z{jj} = zeros(szj); 55 | V{jj} = zeros(szj); 56 | end 57 | 58 | kk=1; 59 | nsv=10*ones(1,nd); 60 | alpha=yy; 61 | A=zeros(sz); A(ind)=alpha; 62 | while 1 63 | for jj=1:nd 64 | Ztmp = Z{jj}+eta*flatten(A,jj); 65 | [Z{jj},S{jj},nsv(jj)]=softth(Ztmp,gamma(jj)*eta,nsv(jj)); 66 | V{jj}=(Ztmp-Z{jj})/eta; 67 | 68 | viol(jj)=norm(flatten(A,jj)-V{jj},'fro'); 69 | end 70 | 71 | Zsum = zeros(sz); 72 | Vsum = zeros(sz); 73 | for jj=1:nd 74 | Zsum=Zsum+flatten_adj(Z{jj},sz,jj); 75 | Vsum=Vsum+flatten_adj(V{jj},sz,jj); 76 | end 77 | 78 | alpha = (yy-Zsum(ind)+eta*Vsum(ind))/(lambda+eta*nd); 79 | A(ind)=alpha; 80 | 81 | 82 | % Compute the objective 83 | if lambda>0 84 | fval(kk)=0.5*sum((Zsum(ind)-yy).^2)/lambda; 85 | else 86 | Zm=(Zsum(ind)-yy)/nd; 87 | fval(kk)=0; 88 | end 89 | 90 | gval(kk)=norm(lambda*alpha - yy + Zsum(ind)); 91 | 92 | for jj=1:nd 93 | if lambda>0 94 | fval(kk)=fval(kk)+gamma(jj)*sum(S{jj}); 95 | else 96 | Ztmp=flatten_adj(Z{jj},sz,jj); 97 | Ztmp(ind)=Ztmp(ind)-Zm; 98 | fval(kk)=fval(kk)+gamma(jj)*sum(svd(flatten(Ztmp,jj))); 99 | end 100 | end 101 | 102 | % Compute the dual objective 103 | fact=1; 104 | for jj=1:nd 105 | fact=min(fact, gamma(jj)/norm(flatten(A,jj))); 106 | end 107 | 108 | aa = alpha*fact; 109 | 110 | if kk>1 111 | dval=max(dval,-0.5*lambda*sum(aa.^2)+aa'*yy); 112 | else 113 | dval=-inf; 114 | end 115 | res(kk)=1-dval/fval(kk); 116 | 117 | if opt.verbose 118 | fprintf('[%d] fval=%g res=%g viol=%s\n', kk, fval(kk), ... 119 | res(kk), printvec(viol)); 120 | % fprintf('[%d] fval=%g dval=%g fact=%g\n', kk, fval(kk), ... 121 | % dval(kk), fact); 122 | end 123 | % if kk>1 && 1-dval(kk)/fval(kk)1 && res(kk)opt.maxiter 129 | break; 130 | end 131 | 132 | kk=kk+1; 133 | end 134 | 135 | fprintf('[%d] fval=%g res=%g viol=%s eta=%g\n', kk, fval(kk), ... 136 | res(kk), printvec(viol),eta); 137 | 138 | %fprintf('[%d] fval=%g dval=%g fact=%g\n', kk, fval(kk), ... 139 | % dval(kk), fact); 140 | 141 | X=zeros(sz); 142 | for jj=1:nd 143 | X = X + flatten_adj(Z{jj},sz,jj); 144 | Z{jj}=Z{jj}*nd; 145 | end 146 | --------------------------------------------------------------------------------