├── ConvNet.m ├── LICENSE ├── README.md ├── data ├── test.images.bin ├── test.labels.bin ├── train.images.bin └── train.labels.bin ├── dataClass.m ├── demoMNIST.m ├── html ├── demoMNIST.html ├── demoMNIST.png ├── demoMNIST_01.png └── demoMNIST_02.png ├── lossClass.m └── visualizeNetwork.m /ConvNet.m: -------------------------------------------------------------------------------- 1 | classdef ConvNet < handle 2 | 3 | properties (SetAccess = private) 4 | net; 5 | theta; 6 | dtheta 7 | nLayers; 8 | O; 9 | delta; 10 | lossInd; 11 | Loss; 12 | AllLoss; 13 | AvgTheta; 14 | end 15 | properties (SetAccess = private, GetAccess = private) 16 | mygpuArray; 17 | mygather; 18 | printDecay; 19 | printIter; 20 | snapshotFile; 21 | end 22 | 23 | 24 | methods 25 | 26 | function this = ConvNet(netD,atGPU,weightsInitialization) 27 | % Constructor. See demoMNIST for example 28 | if nargin<3 29 | weightsInitialization = 'Orthogonal'; 30 | end 31 | if atGPU 32 | this.mygpuArray = @(x) gpuArray(x); 33 | this.mygather = @(x) gather(x); 34 | else 35 | this.mygpuArray = @(x) x; 36 | this.mygather = @(x) x; 37 | end 38 | netD=this.add_reduced_conv_layers(netD); 39 | this.initializeNet(netD,atGPU); 40 | this.initializeWeights(weightsInitialization); 41 | this.nLayers = length(this.net); 42 | this.delta = cell(size(this.O)); 43 | % make everything single and atGPU 44 | this.theta = this.mygpuArray(single(this.theta)); 45 | for i=1:length(this.O) 46 | if ~isempty(this.O{i}) 47 | this.O{i} = this.mygpuArray(this.O{i}); 48 | end 49 | end 50 | 51 | end 52 | 53 | % forward function 54 | function forward(this,I) 55 | 56 | for i=1:this.nLayers 57 | outInd = this.net{i}.outInd; 58 | inInd = this.net{i}.inInd; 59 | switch this.net{i}.type 60 | case 'input' 61 | this.O{outInd} = this.net{i}.data.get(I); 62 | case 'duplicate' 63 | for j=outInd 64 | this.O{j} = this.O{inInd}; 65 | end 66 | case 'im2col' 67 | this.O{outInd} = this.O{inInd}(this.net{i}.im2colInd); % fast im2col 68 | case 'max' 69 | this.O{outInd} = max(this.O{inInd}); 70 | case 'mean' 71 | this.O{outInd} = mean(this.O{inInd}); 72 | case 'reshape' 73 | this.O{outInd} = reshape(this.O{inInd},this.net{i}.newshape); 74 | case 'permute' 75 | this.O{outInd} = permute(this.O{inInd},this.net{i}.newshape); 76 | case 'affine' 77 | this.O{outInd} = reshape(this.theta(this.net{i}.Ws:this.net{i}.We),this.net{i}.Wshape)*this.O{inInd} + this.theta(this.net{i}.bs:this.net{i}.be)*this.net{i}.ones; 78 | case 'relu' 79 | this.O{outInd} = max(0,this.O{inInd}); 80 | case 'clamp' 81 | this.O{outInd} = min(1,max(0,this.O{inInd})); 82 | case 'loss' 83 | this.O{outInd} = this.net{i}.loss.LossAndErr(this.O(inInd)); 84 | case 'concat' 85 | this.O{outInd} = cat(this.net{i}.dim,this.O{inInd}); 86 | case 'split' 87 | tmp = cell(length(size(this.O{inInd})),1); 88 | for j=1:length(tmp), tmp{j} = ':'; end; 89 | pos=0; nl = size(this.O{outInd(1)},this.net{i}.dim); 90 | for j=1:length(outInd) 91 | tmp{this.net{i}.dim} = pos + (1:nl); 92 | pos = pos+nl; 93 | this.O{outInd(j)} = this.O{inInd}(tmp{:}); 94 | end 95 | case 'pad' 96 | blobSize = size(this.O{inInd}); 97 | this.O{outInd}(1:blobSize(1),1:blobSize(2),:,:) = this.O{inInd}; 98 | case 'elementwiseProd' 99 | this.O{outInd} = this.O{inInd(1)} .* this.O{inInd(2)}; 100 | case 'add' 101 | this.O{outInd} = this.net{i}.alpha * this.O{inInd(1)} + this.net{i}.beta *this.O{inInd(2)}; 102 | otherwise 103 | assert(false,'Unknown Layer type') 104 | end 105 | end 106 | end 107 | 108 | % backward function 109 | function backward(this,lam) 110 | 111 | this.dtheta = this.theta-this.theta; 112 | 113 | for i=this.nLayers:-1:1 114 | outInd = this.net{i}.outInd; 115 | inInd = this.net{i}.inInd; 116 | 117 | if isequal(this.net{i}.type,'affine') 118 | this.dtheta(this.net{i}.Ws:this.net{i}.We) = reshape( this.delta{outInd} * this.O{inInd}' , this.net{i}.We-this.net{i}.Ws+1 , 1); % Nabla_W = delta*O{i-1}'; 119 | this.dtheta(this.net{i}.bs:this.net{i}.be) = reshape( sum(this.delta{outInd},2)' , this.net{i}.be-this.net{i}.bs+1 , 1); % Nabla_b = sum(delta'); 120 | end 121 | 122 | if ~(this.net{i}.needBackward) 123 | continue; 124 | end 125 | 126 | switch this.net{i}.type 127 | case 'loss' 128 | this.delta{inInd(1)} = this.net{i}.loss.Grad(this.O(inInd)); 129 | case 'duplicate' 130 | this.delta{inInd} = this.delta{outInd(1)}; 131 | for j=2:length(outInd) 132 | this.delta{inInd} = this.delta{inInd} + this.delta{outInd(j)}; 133 | end 134 | case 'im2col' 135 | % method I 136 | tmp = cumsum(this.delta{outInd}(this.net{i}.sortedInd)); 137 | this.delta{inInd} = reshape([tmp(1) ; diff(tmp(this.net{i}.I))] , size(this.O{inInd})); 138 | case 'max' 139 | s = [size(this.O{inInd},1) 1]; 140 | tmp = (repmat(this.O{outInd},s) == this.O{inInd}); 141 | this.delta{inInd} = repmat(this.delta{outInd} ./ sum(tmp),s) .* tmp; 142 | case 'mean' 143 | s = [size(this.O{inInd},1) 1]; 144 | this.delta{inInd} = repmat(this.delta{outInd} / s(1),s); 145 | case 'reshape' 146 | this.delta{inInd} = reshape(this.delta{outInd}, this.net{i}.oldshape); 147 | this.O{inInd} = reshape(this.O{outInd}, this.net{i}.oldshape); 148 | case 'permute' 149 | this.delta{inInd} = permute(this.delta{outInd}, this.net{i}.oldshape); 150 | case 'affine' 151 | this.delta{inInd} = reshape(this.theta(this.net{i}.Ws:this.net{i}.We),this.net{i}.Wshape)' * this.delta{outInd}; 152 | case 'relu' 153 | this.delta{inInd} = this.delta{outInd} .* (this.O{outInd} > 0); 154 | case 'clamp' 155 | this.delta{inInd} = this.delta{outInd} .* ((this.O{outInd} > 0) & (this.O{outInd} < 0)); 156 | case 'concat' 157 | tmp = cell(length(size(this.O{outInd})),1); 158 | for j=1:length(tmp), tmp{j} = ':'; end; 159 | pos=0; nl = size(this.O{inInd(1)},this.net{i}.dim); 160 | for j=1:length(inInd) 161 | tmp{this.net{i}.dim} = pos + (1:nl); 162 | pos = pos+nl; 163 | this.delta{inInd(j)} = this.delta{outInd}(tmp{:}); 164 | end 165 | case 'split' 166 | this.delta{inInd} = cat(this.net{i}.dim,this.delta{outInd}); 167 | case 'pad' 168 | blobSize = size(this.O{inInd}); 169 | this.delta{inInd} = this.delta{outInd}(1:blobSize(1),1:blobSize(2),:,:); 170 | case 'elementwiseProd' 171 | this.delta{inInd(1)} = this.delta{outInd} .* this.O{inInd(2)}; 172 | this.delta{inInd(2)} = this.delta{outInd} .* this.O{inInd(1)}; 173 | case 'add' 174 | this.delta{inInd(1)} = this.net{i}.alpha * this.delta{outInd}; 175 | this.delta{inInd(2)} = this.net{i}.beta * this.delta{outInd}; 176 | otherwise 177 | assert(false,'Unknown Layer type') 178 | end 179 | 180 | end 181 | 182 | % and add the regularization gradient 183 | this.dtheta = this.dtheta + lam*this.theta; 184 | 185 | end 186 | 187 | 188 | function Nesterov(this,T,learningRate,mu,lam,param) 189 | % SGD with Nesterov's momentum 190 | % See demoMNIST.m for usage 191 | 192 | this.prepareForStatAndSnapshot(T,param); 193 | m = this.net{1}.data.m; 194 | 195 | history = this.theta-this.theta; 196 | 197 | for t=1:T 198 | % momentum 199 | history = mu*history; 200 | this.theta = this.theta + history; 201 | 202 | % choose mini batch 203 | i = this.net{1}.data.nextRand(); 204 | % forward backward 205 | this.forward(i); 206 | this.backward(lam); 207 | 208 | % update 209 | eta = learningRate(t); 210 | this.theta = this.theta - eta*this.dtheta; 211 | history = history - eta*this.dtheta; 212 | 213 | % statistics for printing and snapshot 214 | this.statAndSnapshot(t,lam); 215 | end 216 | end 217 | 218 | function SGD(this,T,learningRate,lam,param) 219 | % vanilla SGD 220 | if ~isfield(param,'normalizedGradient') 221 | param.normalizedGradient = false; 222 | end 223 | this.prepareForStatAndSnapshot(T,param); 224 | m = this.net{1}.data.m; 225 | 226 | for t=1:T 227 | % choose mini batch 228 | i = this.net{1}.data.nextRand(); 229 | % forward backward 230 | this.forward(i); 231 | this.backward(lam); 232 | 233 | % learning rate 234 | eta = learningRate(t); 235 | if param.normalizedGradient 236 | eta = eta / norm(this.dtheta); 237 | end 238 | 239 | % update 240 | this.theta = this.theta - eta*this.dtheta; 241 | 242 | % statistics for printing and snapshot 243 | this.statAndSnapshot(t,lam); 244 | 245 | end 246 | end 247 | 248 | 249 | function alpha=SDCA(this,alpha,T,eta,lam,param) 250 | % SDCA solver 251 | 252 | this.prepareForStatAndSnapshot(T,param); 253 | m = this.net{1}.data.m; 254 | if ~isempty(alpha) 255 | % initialize primal from dual 256 | [d,n] = size(alpha); 257 | assert(n == m); 258 | assert(d == length(this.theta)); 259 | else 260 | % initialize by random dual variabls 261 | d = length(this.theta); 262 | n = m; 263 | alpha = randn(d,n,'single')*lam; 264 | end 265 | beta = eta*lam*n; 266 | this.theta = this.mygpuArray(single(mean(alpha,2)/lam)); 267 | 268 | for t=1:T 269 | % choose mini batch 270 | i = this.net{1}.data.nextRand(); 271 | galpha = this.mygpuArray(alpha(:,i)); 272 | % forward backward 273 | this.forward(i); 274 | this.backward(0); 275 | 276 | % update 277 | v = this.dtheta+galpha; 278 | galpha = galpha - beta*v; 279 | this.theta = this.theta - eta*v; 280 | alpha(:,i) = this.mygather(galpha); 281 | 282 | % statistics for printing and snapshot 283 | this.statAndSnapshot(t,lam); 284 | end 285 | end 286 | 287 | function calcLossAndErr(this) 288 | m = this.net{1}.data.m; 289 | this.Loss = this.O{this.lossInd}-this.O{this.lossInd}; 290 | for i=1:m 291 | this.forward(i); 292 | this.Loss = this.Loss + this.O{this.lossInd}; 293 | end 294 | this.Loss = this.Loss/m; 295 | end 296 | 297 | function setTheta(this,newtheta) 298 | this.theta = this.mygpuArray(newtheta); 299 | end 300 | 301 | 302 | function initializeWeights(this,initMethod) 303 | for i=1:length(this.net) 304 | if strcmp(this.net{i}.type,'affine') 305 | switch initMethod 306 | case 'Orthogonal' 307 | [Q,~] = qr(randn(this.net{i}.Wshape)'); 308 | W = Q(1:this.net{i}.Wshape(1),:); 309 | case 'Xavier' 310 | W = (rand(this.net{i}.Wshape)-0.5)/ sqrt(this.net{i}.Wshape(2)) * sqrt(3); 311 | otherwise 312 | fprintf(1,'Unknown initalization method %s\n',initMethod); 313 | end 314 | this.theta(this.net{i}.Ws:this.net{i}.We) = this.mygpuArray(W(:)); 315 | end 316 | end 317 | end 318 | 319 | 320 | end 321 | 322 | 323 | % Private methods for initializing the network 324 | methods (Access = private) 325 | function initializeNet(this,netD,atGPU) 326 | % construct a network (net,theta) and initialize the network based on a 327 | % description given in netD 328 | 329 | 330 | % find maximal value of Oind and required number of layers 331 | maxOind = 0; 332 | for i=1:length(netD) 333 | maxOind = max(maxOind,max(netD{i}.outInd)); 334 | end 335 | lenO = maxOind; 336 | this.nLayers = length(netD); 337 | for i=1:length(netD) 338 | if strcmp(netD{i}.type,'conv') 339 | this.nLayers = this.nLayers + 3; 340 | lenO = lenO + 2; 341 | elseif strcmp(netD{i}.type,'maxpool') 342 | this.nLayers = this.nLayers + 2; 343 | lenO = lenO + 1; 344 | elseif strcmp(netD{i}.type,'avgpool') 345 | this.nLayers = this.nLayers + 2; 346 | lenO = lenO + 1; 347 | end 348 | end 349 | 350 | 351 | % initialize 352 | this.net = cell(this.nLayers,1); 353 | this.O = cell(lenO,1); 354 | layerInd = 0; 355 | this.theta = []; 356 | needBack = false(lenO,1); 357 | 358 | for i=1:length(netD) 359 | 360 | Oind = netD{i}.outInd; 361 | 362 | % determine the needBackward flag 363 | if ~strcmp(netD{i}.type,'input') 364 | inInd = netD{i}.inInd; 365 | needBackward = sum(needBack(inInd))>0; 366 | end 367 | 368 | switch netD{i}.type 369 | case 'input' 370 | this.O{Oind} = zeros(netD{i}.blobSize,'single'); 371 | layerInd = layerInd+1; 372 | this.net{layerInd} = struct('type','input','outInd',Oind,'inInd',0,'needBackward',false,'data',... 373 | dataClass(netD{i}.fName,netD{i}.dataType,netD{i}.blobSize,netD{i}.scale,inf,atGPU)); 374 | 375 | case 'duplicate' 376 | layerInd = layerInd+1; 377 | this.net{layerInd} = struct('type','duplicate','outInd',Oind,'inInd',inInd,'needBackward',needBackward); 378 | for j=Oind 379 | this.O{j} = this.O{inInd}; 380 | end 381 | needBack(Oind) = needBack(inInd); 382 | 383 | case 'split' 384 | layerInd = layerInd+1; 385 | this.net{layerInd} = struct('type','split','outInd',Oind,'inInd',inInd,'dim',netD{i}.dim,'needBackward',needBackward); 386 | blobSize = size(this.O{inInd}); 387 | for j=1:(length(blobSize)) 388 | cellInd{j} = ':'; 389 | end 390 | curDim=0; jump=blobSize(netD{i}.dim)/length(Oind); 391 | for j=Oind 392 | cellInd{netD{i}.dim} = curDim+(1:jump); 393 | curDim=curDim+jump; 394 | this.O{j} = this.O{inInd}(cellInd{:}); 395 | end 396 | needBack(Oind) = needBack(inInd); 397 | 398 | case 'concat' 399 | layerInd = layerInd+1; 400 | this.net{layerInd} = netD{i}; this.net{layerInd}.needBackward = needBackward; 401 | this.O{Oind} = cat(netD{i}.dim,this.O{inInd}); 402 | needBack(Oind) = sum(needBack(inInd))>0; 403 | 404 | 405 | case 'conv' 406 | 407 | originalBlobDimSize = size(this.O{inInd}); 408 | if length(originalBlobDimSize)<4 409 | originalBlobDimSize = [originalBlobDimSize ones(1,4-length(originalBlobDimSize))]; 410 | end 411 | 412 | % construct im2col layer 413 | maxOind = maxOind + 1; 414 | [layer,blobDim,height,width] = this.constructIm2ColLayer(netD{i}.kernelsize,netD{i}.stride,originalBlobDimSize,needBackward,maxOind,inInd,true); 415 | layerInd = layerInd+1; 416 | this.net{layerInd} = layer; 417 | this.O{maxOind} = zeros(blobDim,'single'); 418 | needBack(maxOind) = needBack(inInd); 419 | 420 | % then affine layer 421 | nOut = netD{i}.nOutChannels; 422 | W = zeros(netD{i}.nOutChannels,blobDim(1)); 423 | Wind = length(this.theta)+(1:length(W(:))); 424 | this.theta = [this.theta ; W(:)]; 425 | b = zeros(nOut,1) + netD{i}.bias_filler; 426 | bind = length(this.theta)+(1:length(b)); 427 | this.theta = [this.theta ; b]; 428 | layerInd = layerInd+1; 429 | maxOind = maxOind + 1; 430 | this.net{layerInd} = struct('type','affine','outInd',maxOind,'inInd',maxOind-1,'ones',this.mygpuArray(ones(1,blobDim(2),'single')),'Ws',min(Wind),'We',max(Wind),'Wshape',size(W),'bs',min(bind),'be',max(bind),'needBackward',needBackward); 431 | this.O{maxOind} = zeros(size(W,1),blobDim(2),'single'); 432 | blobDim = [size(W,1) blobDim(2)]; 433 | needBack(maxOind) = true; 434 | 435 | 436 | % and then reshape and permute layers 437 | % currently, the order in memory is 438 | % (channels,height,width,items) 439 | % we want it to be 440 | % (height,width,channels,items) 441 | channels = netD{i}.nOutChannels; 442 | items = originalBlobDimSize(4); 443 | layerInd = layerInd+1; 444 | this.net{layerInd} = struct('type','reshape','outInd',maxOind,'inInd',maxOind,'newshape',[channels height width items],'oldshape',[channels height*width*items],'needBackward',true); 445 | this.O{maxOind} = reshape(this.O{maxOind},[channels height width items]); 446 | needBack(maxOind) = true; 447 | layerInd = layerInd+1; 448 | this.net{layerInd} = struct('type','permute','outInd',Oind,'inInd',maxOind,'newshape',[2 3 1 4],'oldshape',[3 1 2 4],'needBackward',true); 449 | this.O{Oind} = permute(this.O{maxOind},[2 3 1 4]); 450 | needBack(Oind) = true; 451 | 452 | 453 | case 'flatten' 454 | 455 | blobDim = size(this.O{inInd}); 456 | if length(blobDim)<4 457 | blobDim = [blobDim ones(1,4-length(blobDim))]; 458 | end 459 | newshape = [prod(blobDim(1:3)) blobDim(4)]; 460 | layerInd = layerInd+1; 461 | this.O{Oind} = zeros(newshape,'single'); 462 | this.net{layerInd} = struct('type','reshape','outInd',Oind,'inInd',inInd,'newshape',newshape,'oldshape',blobDim,'needBackward',needBackward); 463 | needBack(Oind) = needBack(inInd); 464 | 465 | case 'affine' 466 | 467 | blobDim = size(this.O{inInd}); 468 | ncol = blobDim(2); 469 | nrows = blobDim(1); 470 | W = zeros(netD{i}.nOutChannels,nrows); 471 | Wind = length(this.theta)+(1:length(W(:))); 472 | this.theta = [this.theta ; W(:)]; 473 | b = zeros(netD{i}.nOutChannels,1) + netD{i}.bias_filler; 474 | bind = length(this.theta)+(1:length(b)); 475 | this.theta = [this.theta ; b]; 476 | this.O{Oind} = zeros(size(W,1),ncol,'single'); 477 | layerInd = layerInd+1; 478 | this.net{layerInd} = struct('type','affine','outInd',Oind,'inInd',inInd,'ones',this.mygpuArray(ones(1,ncol,'single')),'Ws',min(Wind),'We',max(Wind),'Wshape',size(W),'bs',min(bind),'be',max(bind),'needBackward',needBackward); 479 | 480 | needBack(Oind) = true; 481 | 482 | case {'maxpool','avgpool'} 483 | 484 | originalBlobDimSize = size(this.O{inInd}); 485 | if length(originalBlobDimSize)<4 486 | originalBlobDimSize = [originalBlobDimSize ones(1,4-length(originalBlobDimSize))]; 487 | end 488 | 489 | 490 | % construct im2col layer 491 | maxOind = maxOind + 1; 492 | [layer,blobDim,height,width] = this.constructIm2ColLayer(netD{i}.kernelsize,netD{i}.stride,originalBlobDimSize,needBackward,maxOind,inInd,false); 493 | this.O{maxOind} = zeros(blobDim,'single'); 494 | layerInd = layerInd+1; 495 | this.net{layerInd} = layer; 496 | needBack(maxOind) = needBack(inInd); 497 | 498 | 499 | % then max layer 500 | blobDim = [1 blobDim(2)]; 501 | layerInd = layerInd+1; 502 | if strcmp(netD{i}.type,'maxpool') 503 | this.net{layerInd} = struct('type','max','outInd',Oind,'inInd',maxOind,'needBackward',needBackward); 504 | else 505 | this.net{layerInd} = struct('type','mean','outInd',Oind,'inInd',maxOind,'needBackward',needBackward); 506 | end 507 | 508 | % and then reshape 509 | channels = originalBlobDimSize(3); 510 | items = originalBlobDimSize(4); 511 | layerInd = layerInd+1; 512 | this.net{layerInd} = struct('type','reshape','outInd',Oind,'inInd',Oind,'newshape',[height width channels items],'oldshape',blobDim,'needBackward',needBackward); 513 | this.O{Oind} = zeros([height width channels items],'single'); 514 | needBack(Oind) = needBack(inInd); 515 | 516 | 517 | case 'relu' 518 | layerInd = layerInd+1; 519 | this.O{Oind} = this.O{inInd}; 520 | this.net{layerInd} = struct('type','relu','outInd',Oind,'inInd',inInd,'needBackward',needBackward); 521 | needBack(Oind) = needBack(inInd); 522 | 523 | case 'clamp' 524 | layerInd = layerInd+1; 525 | this.O{Oind} = this.O{inInd}; 526 | this.net{layerInd} = struct('type','clamp','outInd',Oind,'inInd',inInd,'needBackward',needBackward); 527 | needBack(Oind) = needBack(inInd); 528 | 529 | case 'reshape' 530 | layerInd = layerInd+1; 531 | this.O{Oind} = reshape(this.O{inInd},netD{i}.newshape); 532 | this.net{layerInd} = struct('type','reshape','outInd',Oind,'inInd',inInd,'newshape',netD{i}.newshape,'oldshape',size(this.O{inInd}),'needBackward',needBackward); 533 | needBack(Oind) = needBack(inInd); 534 | 535 | case 'permute' 536 | [~,ind] = sort(netD{i}.newshape); 537 | layerInd = layerInd+1; 538 | this.O{Oind} = permute(this.O{inInd},netD{i}.newshape); 539 | this.net{layerInd} = struct('type','permute','outInd',Oind,'inInd',inInd,'newshape',netD{i}.newshape,'oldshape',ind,'needBackward',needBackward); 540 | needBack(Oind) = needBack(inInd); 541 | 542 | case 'pad' 543 | layerInd = layerInd+1; 544 | blobSize = size(this.O{inInd}); 545 | this.O{Oind} = zeros(blobSize(1)+netD{i}.amount,blobSize(2)+netD{i}.amount,blobSize(3),blobSize(4),'single'); 546 | this.net{layerInd} = struct('type','pad','outInd',Oind,'inInd',inInd,'amount',netD{i}.amount,'needBackward',needBackward); 547 | needBack(Oind) = needBack(inInd); 548 | 549 | case 'elementwiseProd' 550 | layerInd = layerInd+1; 551 | this.O{Oind} = this.O{inInd(1)}; 552 | this.net{layerInd} = struct('type','elementwiseProd','outInd',Oind,'inInd',inInd,'needBackward',needBackward); 553 | needBack(Oind) = sum(needBack(inInd))>0; 554 | 555 | case 'add' 556 | layerInd = layerInd+1; 557 | this.O{Oind} = this.O{inInd(1)}; 558 | this.net{layerInd} = struct('type','add','outInd',Oind,'inInd',inInd,'alpha',netD{i}.alpha,'beta',netD{i}.beta,'needBackward',needBackward); 559 | needBack(Oind) = sum(needBack(inInd))>0; 560 | 561 | case 'loss' 562 | layerInd = layerInd+1; 563 | this.net{layerInd} = struct('type','loss','outInd',Oind,'inInd',inInd,'loss',lossClass(netD{i}.lossType),'needBackward',needBackward); 564 | this.O{Oind} = this.net{layerInd}.loss.LossAndErr(this.O(inInd)); 565 | needBack(Oind) = needBack(inInd(1)); 566 | this.lossInd = Oind; 567 | 568 | otherwise 569 | assert(false,'Unknown Layer type') 570 | end 571 | end 572 | 573 | end 574 | 575 | 576 | function [layer,blobDim,outHeight,outWidth] = constructIm2ColLayer(this,ksize,kstride,blobDim,needBackward,Oind,inInd,isConv) 577 | 578 | B = reshape((1:(prod(blobDim))),blobDim); 579 | C = []; 580 | 581 | if isConv 582 | for t=1:blobDim(4) 583 | w=0; 584 | while (w+ksize <= blobDim(2)) 585 | h=0; 586 | while (h+ksize <= blobDim(1)) 587 | C = [C reshape(B(h+(1:ksize),w+(1:ksize),:,t),ksize*ksize*blobDim(3),1)]; 588 | h = h+kstride; 589 | end 590 | w=w+kstride; 591 | end 592 | end 593 | outHeight = h/kstride; 594 | outWidth = w/kstride; 595 | else % for pooling layer 596 | outHeight = ceil((blobDim(1) - ksize)/kstride) + 1; 597 | outWidth = ceil((blobDim(2) - ksize)/kstride) + 1; 598 | for t=1:blobDim(4) 599 | for c=1:blobDim(3) 600 | for ww=1:outWidth 601 | ws = (ww-1)*kstride + 1; 602 | we = min(size(B,2),ws-1+ksize); 603 | Iw = zeros(ksize,1) + we; 604 | Iw((ws:we)-ws+1) = (ws:we); 605 | for hh=1:outHeight 606 | hs = (hh-1)*kstride + 1; 607 | he = min(size(B,1),hs-1+ksize); 608 | Ih = zeros(ksize,1) + he; 609 | Ih((hs:he)-hs+1) = (hs:he); 610 | C = [C reshape(B(Ih,Iw,c,t),ksize*ksize,1)]; 611 | end 612 | end 613 | end 614 | end 615 | end 616 | [val,ind] = sort(C(:)); 617 | I = [find(val(1:end-1) ~= val(2:end)) ; length(val)]; 618 | 619 | % method II -- not implmenented properly 620 | % backwardMat = zeros(ksize*ksize,prod(blobDim)); 621 | % backwardMat(1,1) = 1; 622 | % for j=2:length(I) 623 | % backwardMat(1:length(ind((I(j-1)+1):I(j))),j) = ind((I(j-1)+1):I(j)); 624 | % end 625 | % J = find(backwardMat>0); bI = backwardMat(J); 626 | % 627 | % For the above method we will need: 628 | %this.net{i}.backwardMat(this.net{i}.J) = delta(this.net{i}.bI); 629 | %delta = reshape( sum(this.net{i}.backwardMat) , size(this.O{this.net{i}.inInd})); 630 | 631 | blobDim = size(C); 632 | layer = struct('type','im2col','outInd',Oind,'inInd',inInd,'im2colInd',this.mygpuArray(uint32(C)),'sortedInd',this.mygpuArray(uint32(ind)),'I',this.mygpuArray(uint32(I)),'needBackward',needBackward); 633 | %layer = struct('type','im2col','im2colInd',mygpuArray(uint32(C)),'backwardMat',mygpuArray(zeros(size(backwardMat),'single')),'J',mygpuArray(uint32(J)),'bI',mygpuArray(uint32(bI)),'sortedInd',mygpuArray(uint32(ind)),'I',mygpuArray(uint32(I)),'inInd',int32(inInd),'outInd',int32(outInd),'needBackward',needBackward); 634 | 635 | end 636 | 637 | function newNetD=add_reduced_conv_layers(this,netD) 638 | 639 | layerInd=0; maxOind = netD{end}.outInd; 640 | for i=1:length(netD) 641 | layerInd = layerInd+1; 642 | if strcmp(netD{i}.type,'reducedConv') 643 | inInd = netD{i}.inInd; outInd = netD{i}.outInd; nC = netD{i}.nOutChannels; 644 | % construct conv 1x1 645 | maxOind = maxOind + 1; 646 | newNetD{layerInd} = struct('type','conv','inInd',inInd,'outInd',maxOind,'kernelsize',1,'stride',1,'nOutChannels',netD{i}.rank*nC,'bias_filler',netD{i}.bias_filler); 647 | 648 | % construct the split layer 649 | layerInd = layerInd+1; 650 | tmpInChannels = maxOind+(1:nC); 651 | newNetD{layerInd} = struct('type','split','inInd',maxOind,'outInd',tmpInChannels,'dim',3); 652 | maxOind = maxOind + nC; 653 | 654 | % construct the group conv layers 655 | tmpOutChannels = tmpInChannels(end) + (1:nC); 656 | for j=1:nC 657 | layerInd = layerInd+1; 658 | newNetD{layerInd} = struct('type','conv','inInd',tmpInChannels(j),'outInd',tmpOutChannels(j),'kernelsize',netD{i}.kernelsize,'stride',netD{i}.stride,'nOutChannels',1,'bias_filler',netD{i}.bias_filler); 659 | end 660 | 661 | % concat layer 662 | layerInd = layerInd+1; 663 | newNetD{layerInd} = struct('type','concat','inInd',tmpOutChannels,'outInd',outInd,'dim',3); 664 | 665 | else 666 | newNetD{layerInd} = netD{i}; 667 | end 668 | end 669 | 670 | end 671 | 672 | function prepareForStatAndSnapshot(this,T,param) 673 | if isfield(param,'printDecay') 674 | this.printDecay=param.printDecay; 675 | else 676 | this.printDecay=0.9; 677 | end 678 | if isfield(param,'printIter') 679 | this.printIter=param.printIter; 680 | else 681 | this.printIter=T; this.printDecay=0; 682 | end 683 | if isfield(param,'snapshotFile') 684 | this.snapshotFile = param.snapshotFile; 685 | else 686 | this.snapshotFile=[]; 687 | end 688 | this.Loss=this.O{this.lossInd}-this.O{this.lossInd}; 689 | this.AllLoss=repmat(this.Loss,1,floor(T/this.printIter)); 690 | this.AvgTheta = this.theta; 691 | end 692 | 693 | function statAndSnapshot(this,t,lam) 694 | this.Loss = this.printDecay * this.Loss + (1-this.printDecay)*this.O{this.lossInd}; 695 | this.AvgTheta = this.printDecay * this.AvgTheta + (1-this.printDecay)*this.theta; 696 | if (rem(t,this.printIter)==0) 697 | %this.AllLoss(:,t/this.printIter) = this.Loss + 0.5*lam*norm(this.theta)^2; 698 | this.AllLoss(:,t/this.printIter) = this.Loss; 699 | fprintf(1,'Iter: %d: ',t); for i=1:length(this.Loss), fprintf(1,'%f ',this.Loss(i)); end; fprintf(1,'\n'); 700 | if ~isempty(this.snapshotFile) 701 | fid = fopen(sprintf('%s.%.7d.bin',this.snapshotFile,t),'wb'); 702 | fwrite(fid,this.mygather(this.AvgTheta),'single'); 703 | fclose(fid); 704 | end 705 | end 706 | end 707 | 708 | end 709 | 710 | end 711 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | 167 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | EasyConvNet 2 | =========== 3 | 4 | A simple implementation of convolutional networks in Matlab. 5 | 6 | Written by Shai Shalev-Shwartz 7 | http://www.cs.huji.ac.il/~shais/ 8 | 9 | Main features: 10 | - Everything is implemented in Matlab. No mex files, no complication, 11 | is needed. 12 | - If you have the Matlab parallel toolbox, you can use GPU implementation 13 | by just defining one flag: atGPU = true; 14 | - The implementation has simplicity in mind, but it is still 15 | reasonably fast. 16 | - See demoMNIST for a simple use. 17 | 18 | 19 | This software is distributed under the GNU LESSER GENERAL PUBLIC 20 | LICENSE. 21 | 22 | THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 23 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY 26 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 28 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 30 | IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 | OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | -------------------------------------------------------------------------------- /data/test.images.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaisha/EasyConvNet/ec15fa5c0d5cf99d3c9441585435d977f06cb174/data/test.images.bin -------------------------------------------------------------------------------- /data/test.labels.bin: -------------------------------------------------------------------------------- 1 |  -------------------------------------------------------------------------------- /data/train.images.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaisha/EasyConvNet/ec15fa5c0d5cf99d3c9441585435d977f06cb174/data/train.images.bin -------------------------------------------------------------------------------- /dataClass.m: -------------------------------------------------------------------------------- 1 | classdef dataClass < handle 2 | 3 | properties (SetAccess = private) 4 | X; 5 | scale; 6 | atGPU; 7 | type; 8 | m; 9 | blobSize; 10 | blobSizeBytes; 11 | cellInd; 12 | fid; 13 | curInd; 14 | nExamplesInBlock; 15 | curBlock; 16 | blockInds; 17 | nBlocks; 18 | blockStart; 19 | blockEnd; 20 | end 21 | 22 | 23 | methods 24 | function this = dataClass(fName,type,blobSize,scale,maxExamples,atGPU) 25 | 26 | this.atGPU = atGPU; 27 | this.scale = scale; 28 | this.blobSize = blobSize; 29 | this.type = type; 30 | 31 | this.fid = fopen(fName,'rb'); 32 | fread(this.fid,prod(this.blobSize),type); 33 | this.blobSizeBytes = ftell(this.fid); 34 | fseek(this.fid,0,'eof'); 35 | lenInBytes = ftell(this.fid); 36 | this.m = min(floor(lenInBytes/this.blobSizeBytes),maxExamples); 37 | 38 | this.nExamplesInBlock = min(this.m, floor(2^30 / this.blobSizeBytes)); 39 | this.nBlocks = ceil(this.m / this.nExamplesInBlock); 40 | this.curBlock = 0; 41 | this.fetchBlock(1); 42 | 43 | this.cellInd = cell(length(size(this.X)),1); 44 | for i=1:(length(this.cellInd)-1) 45 | this.cellInd{i} = ':'; 46 | end 47 | end 48 | 49 | function delete(this) 50 | fclose(this.fid); 51 | end 52 | 53 | function x = get(this,i) 54 | if ithis.blockEnd 55 | this.fetchBlock(ceil(i/this.nExamplesInBlock)); 56 | end 57 | i = mod(i,this.nExamplesInBlock); 58 | if i==0 59 | i=length(this.blockInds); 60 | end 61 | this.cellInd{end} = i; 62 | if this.atGPU 63 | x = gpuArray(single(this.X(this.cellInd{:})*this.scale)); 64 | else 65 | x = single(this.X(this.cellInd{:})*this.scale); 66 | end 67 | end 68 | 69 | function ind=nextRand(this) 70 | if this.curInd == length(this.blockInds) 71 | this.fetchBlock(randi(this.nBlocks,1,1)); 72 | end 73 | this.curInd = this.curInd+1; 74 | ind = this.blockInds(this.curInd); 75 | end 76 | 77 | function fetchBlock(this,ind) 78 | if ind ~= this.curBlock 79 | this.curBlock = ind; 80 | fseek(this.fid,(this.curBlock-1)*this.nExamplesInBlock*this.blobSizeBytes,'bof'); 81 | this.blockStart = (this.curBlock-1)*this.nExamplesInBlock + 1; 82 | this.blockEnd = min(this.m,this.blockStart - 1 + this.nExamplesInBlock); 83 | n = this.blockEnd - this.blockStart + 1; 84 | this.X = reshape(fread(this.fid,n*prod(this.blobSize),this.type),[this.blobSize n]); 85 | end 86 | this.blockInds = this.blockStart-1 + randperm(this.blockEnd - this.blockStart + 1); 87 | this.curInd = 0; 88 | end 89 | 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /demoMNIST.m: -------------------------------------------------------------------------------- 1 | %% a simple demonstration of the EasyConvNet code 2 | % This code demonstrates the EasyConvNet package on the MNIST data set 3 | % 4 | 5 | %% data preparation 6 | % we need 4 files 7 | trainImages = 'data/train.images.bin'; 8 | trainLabels = 'data/train.labels.bin'; 9 | testImages = 'data/test.images.bin'; 10 | testLabels = 'data/test.labels.bin'; 11 | % The format is as follows 12 | % Suppose X is a 4 dim matlab array with dimensions 28x28x1x60000, 13 | % where X(:,:,1,j) is the j'th image 14 | % Suppose that X are numbers between 0 and 255. 15 | % Then, create trainImages by 16 | % fid = fopen(trainImages,'wb'); fwrite(fid,X(:),'uint8'); fclose(fid); 17 | % To create the label file, suppose we have k classes and let Y be a matrix 18 | % with Y(:,i) being all zeros vector except 1 in the position j, where j is 19 | % the correct label of example i. Then, create trainLabels by: 20 | % fid = fopen(trainLabels,'wb'); fwrite(fid,Y(:),'uint8'); fclose(fid); 21 | 22 | 23 | %% Decide if you want GPU or CPU implementation 24 | atGPU = false; % change this to true if you have a GPU and Matlab parallel toolbox 25 | 26 | %% Define the architecture of a network (the LeNet architecture) 27 | % the architecture is a cell array of structs. 28 | % Each layer is one struct. 29 | % The first layer must be of 'type' input and the last layer must be of 30 | % type 'loss' 31 | % Each layer has fields 'inInd' and 'outInd'. These are indices to where 32 | % the input of the layer comes from and where the output of the layer goes 33 | % to. 34 | lenet = { ... 35 | struct('type','input','inInd',0,'outInd',1,'blobSize',[28 28 1 100],'fName',trainImages,'scale',1/256,'dataType','uint8'), ... 36 | struct('type','input','inInd',0,'outInd',2,'blobSize',[10 100],'fName',trainLabels,'scale',1,'dataType','uint8'), ... 37 | struct('type','conv','inInd',1,'outInd',3,'kernelsize',5,'stride',1,'nOutChannels',20,'bias_filler',0),... 38 | struct('type','maxpool','inInd',3,'outInd',4,'kernelsize',2,'stride',2), ... 39 | struct('type','conv','inInd',4,'outInd',5,'kernelsize',5,'stride',1,'nOutChannels',50,'bias_filler',0),... 40 | struct('type','maxpool','inInd',5,'outInd',6,'kernelsize',2,'stride',2), ... 41 | struct('type','flatten','inInd',6,'outInd',6), ... 42 | struct('type','affine','inInd',6,'outInd',7,'nOutChannels',500,'bias_filler',0), ... 43 | struct('type','relu','inInd',7,'outInd',7), ... 44 | struct('type','affine','inInd',7,'outInd',8,'nOutChannels',10,'bias_filler',0), ... 45 | struct('type','loss','inInd',[8 2],'outInd',10,'lossType','MCLogLoss') }; 46 | 47 | %% initialize a network class 48 | 49 | cnn = ConvNet(lenet,atGPU,'Orthogonal'); 50 | 51 | %% For debugging purposes, show some images 52 | x = cnn.net{1}.data.get(1); y = cnn.net{2}.data.get(1); [~,bla] = max(y); 53 | figure; for i=1:25, subplot(5,5,i); imagesc(squeeze(x(:,:,:,i))'); colormap gray; axis off; title(sprintf('%d',bla(i)-1)); end 54 | 55 | 56 | %% Train using SGD with Nesterov's momentum 57 | % mandatory fields 58 | T = 1000; mu = single(0.9); lam = single(0.0005); 59 | learning_rate = @(t)(0.01*(1+0.0001*t)^(-0.75)); % learning rate rule 60 | % optional fields 61 | param.snapshotFile = '/tmp/snapshot'; param.printIter = 100; param.printDecay = 0.9; 62 | % call Nesterov 63 | cnn.Nesterov(T,learning_rate,mu,lam,param); 64 | % plot convergence 65 | figure; plot(cnn.AllLoss'); title('Nesterov'); legend('Train loss','Train 0-1 error'); 66 | 67 | 68 | %% calculate test error 69 | 70 | testlenet = lenet; 71 | testlenet{1}.fName = testImages; 72 | testlenet{2}.fName = testLabels; 73 | testNet = ConvNet(testlenet,atGPU); 74 | testNet.setTheta(cnn.AvgTheta); 75 | testNet.calcLossAndErr(); 76 | fprintf(1,'Test loss= %f, Test accuracy = %f\n',testNet.Loss(1),1-testNet.Loss(2)); 77 | 78 | -------------------------------------------------------------------------------- /html/demoMNIST.html: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | a simple demonstration of the EasyConvNet code

a simple demonstration of the EasyConvNet code

This code demonstrates the EasyConvNet package on the MNIST data set

Contents

data preparation

we need 4 files

trainImages = 'data/train.images.bin';
 70 | trainLabels = 'data/train.labels.bin';
 71 | testImages = 'data/test.images.bin';
 72 | testLabels = 'data/test.labels.bin';
 73 | % The format is as follows
 74 | % Suppose X is a 4 dim matlab array with dimensions 28x28x1x60000,
 75 | % where X(:,:,1,j) is the j'th image
 76 | % Suppose that X are numbers between 0 and 255.
 77 | % Then, create trainImages by
 78 | %   fid = fopen(trainImages,'wb'); fwrite(fid,X(:),'uint8'); fclose(fid);
 79 | % To create the label file, suppose we have k classes and let Y be a matrix
 80 | % with Y(:,i) being all zeros vector except 1 in the position j, where j is
 81 | % the correct label of example i. Then, create trainLabels by:
 82 | %   fid = fopen(trainLabels,'wb'); fwrite(fid,Y(:),'ubit1'); fclose(fid);
 83 | 

Decide if you want GPU or CPU implementation

atGPU = false; % change this to true if you have a GPU and Matlab parallel toolbox
 84 | 

Define the architecture of a network (the LeNet architecture)

the architecture is a cell array of structs. Each layer is one struct. The first layer must be of 'type' input and the last layer must be of type 'loss' Each layer has fields 'inInd' and 'outInd'. These are indices to where the input of the layer comes from and where the output of the layer goes to.

lenet = { ...
 85 |     struct('type','input','inInd',0,'outInd',1,'blobSize',[28 28 1 100],'fName',trainImages,'scale',1/256,'dataType','uint8'), ...
 86 |     struct('type','input','inInd',0,'outInd',2,'blobSize',[10 100],'fName',trainLabels,'scale',1,'dataType','ubit1'), ...
 87 |     struct('type','conv','inInd',1,'outInd',3,'kernelsize',5,'stride',1,'nOutChannels',20,'bias_filler',0),...
 88 |     struct('type','maxpool','inInd',3,'outInd',4,'kernelsize',2,'stride',2), ...
 89 |     struct('type','conv','inInd',4,'outInd',5,'kernelsize',5,'stride',1,'nOutChannels',50,'bias_filler',0),...
 90 |     struct('type','maxpool','inInd',5,'outInd',6,'kernelsize',2,'stride',2), ...
 91 |     struct('type','flatten','inInd',6,'outInd',6), ...
 92 |     struct('type','affine','inInd',6,'outInd',7,'nOutChannels',500,'bias_filler',0), ...
 93 |     struct('type','relu','inInd',7,'outInd',7),  ...
 94 |     struct('type','affine','inInd',7,'outInd',8,'nOutChannels',10,'bias_filler',0), ...
 95 |     struct('type','loss','inInd',[8 2],'outInd',10,'lossType','MCLogLoss') };
 96 | 

initialize a network class

cnn = ConvNet(lenet,atGPU,'Orthogonal');
 97 | 

For debugging purposes, show some images

x = cnn.net{1}.data.get(1); y = cnn.net{2}.data.get(1); [~,bla] = max(y);
 98 | figure; for i=1:25, subplot(5,5,i); imagesc(squeeze(x(:,:,:,i))'); colormap gray; axis off; title(sprintf('%d',bla(i)-1));  end
 99 | 

Train using SGD with Nesterov's momentum

mandatory fields

T = 1000; mu = single(0.9); printIter = 100; lam =  single(0.0005);
100 | learning_rate = @(t)(0.01*(1+0.0001*t)^(-0.75)); % learning rate rule
101 | % optional fields
102 | param.snapshotFile = '/tmp/snapshot'; param.printIter = 100; param.printDecay = 0.9;
103 | % call Nesterov
104 | cnn.Nesterov(T,learning_rate,mu,lam,param);
105 | % plot convergence
106 | figure; plot(cnn.AllLoss'); title('Nesterov'); legend('Train loss','Train 0-1 error');
107 | 
Iter: 100: 0.265366 0.075039 
108 | Iter: 200: 0.171004 0.047117 
109 | Iter: 300: 0.119551 0.037094 
110 | Iter: 400: 0.091825 0.024946 
111 | Iter: 500: 0.080097 0.024165 
112 | Iter: 600: 0.067927 0.019668 
113 | Iter: 700: 0.077423 0.025204 
114 | Iter: 800: 0.063537 0.019559 
115 | Iter: 900: 0.065999 0.020782 
116 | Iter: 1000: 0.045626 0.012953 
117 | 

calculate test error

testlenet = lenet;
118 | testlenet{1}.fName = testImages;
119 | testlenet{2}.fName = testLabels;
120 | testNet = ConvNet(testlenet,atGPU);
121 | testNet.setTheta(cnn.theta);
122 | testNet.calcLossAndErr();
123 | fprintf(1,'Test loss= %f, Test accuracy = %f\n',testNet.Loss(1),1-testNet.Loss(2));
124 | 
Test loss= 0.054117, Test accuracy = 0.982300
125 | 
-------------------------------------------------------------------------------- /html/demoMNIST.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaisha/EasyConvNet/ec15fa5c0d5cf99d3c9441585435d977f06cb174/html/demoMNIST.png -------------------------------------------------------------------------------- /html/demoMNIST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaisha/EasyConvNet/ec15fa5c0d5cf99d3c9441585435d977f06cb174/html/demoMNIST_01.png -------------------------------------------------------------------------------- /html/demoMNIST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shaisha/EasyConvNet/ec15fa5c0d5cf99d3c9441585435d977f06cb174/html/demoMNIST_02.png -------------------------------------------------------------------------------- /lossClass.m: -------------------------------------------------------------------------------- 1 | classdef lossClass < handle 2 | 3 | properties (SetAccess = private) 4 | type; 5 | end 6 | 7 | 8 | methods 9 | function this = lossClass(lossType) 10 | 11 | switch lossType 12 | case 'MCLogLoss' 13 | this.type = 1; 14 | case 'SquaredLoss' 15 | this.type = 2; 16 | case 'BinLogLoss' 17 | this.type = 3; 18 | otherwise 19 | assert(false,'Unknown loss type') 20 | end 21 | 22 | end 23 | 24 | function loss=LossAndErr(this,input) 25 | switch this.type 26 | case 1 % multiclass logistic loss 27 | [k,m] = size(input{1}); 28 | [loss,ind]=max(input{1}); 29 | pred = input{1}-repmat(loss,k,1); 30 | [~,y]=max(input{2}); 31 | err = sum(ind~=y)/m; 32 | 33 | valY = sum(pred.*input{2}); 34 | loss = pred - repmat(valY,k,1); 35 | loss = sum(log(sum(exp(loss))))/m; 36 | loss = [loss;err]; 37 | case 2 % Squared Loss 38 | loss = 0.5*mean(sum((input{1}-input{2}).^2)); 39 | 40 | case 3 % binary log loss 41 | loss = -input{1}.*input{2}; 42 | err = mean(mean(loss>=0)); 43 | loss(loss>0) = loss(loss>0) + log(1+exp(-loss(loss>0))); 44 | loss(loss<=0) = log(1+exp(loss(loss<=0))); 45 | loss = [mean(mean(loss)) ; err]; 46 | end 47 | end 48 | 49 | function delta=Grad(this,input) 50 | switch this.type 51 | 52 | case 1 % multiclass logistic loss 53 | bla = input{1}-repmat(max(input{1}),size(input{1},1),1); 54 | bla = exp(bla); 55 | bla=bla./repmat(sum(bla),size(bla,1),1); 56 | delta = (bla - input{2})/size(bla,2); 57 | 58 | case 2 % SquaredLoss 59 | delta = (input{1}-input{2})/size(input{2},2); 60 | 61 | case 3 % binary log loss 62 | delta = -input{2}./(1+exp(input{1}.*input{2}))/prod(size(input{2})); 63 | 64 | end 65 | 66 | end 67 | 68 | 69 | end 70 | end 71 | -------------------------------------------------------------------------------- /visualizeNetwork.m: -------------------------------------------------------------------------------- 1 | function visualizeNetwork(net,fName) 2 | % visualize network using latex tikz 3 | % Input: 4 | % net definition 5 | % fName - name of a latex output file 6 | % The function creates the file fName, in which there's a latex code that 7 | % generates a visualization of the network net 8 | 9 | lenO = 0; 10 | for i=1:length(net) 11 | lenO = max(lenO,max(net{i}.outInd)); 12 | end 13 | 14 | paperheight = lenO*3; 15 | 16 | fid = fopen(fName,'wt'); 17 | 18 | prelatex = {... 19 | '\documentclass[8pt]{article}' , ... 20 | sprintf('\\usepackage[paperwidth=6in, paperheight=%dcm]{geometry}',paperheight) , ... 21 | '\usepackage{tikz}' , ... 22 | '\usetikzlibrary{positioning}' , ... 23 | '\begin{document}' ,... 24 | ' ' , ... 25 | '\begin{tikzpicture}' , ... 26 | ' [nodestyle/.style={rectangle,draw=blue!50,fill=blue!20,thick,' ,... 27 | ' inner sep=2pt,minimum width=1cm},' , ... 28 | ' ostyle/.style={rectangle,draw=black!50,fill=black!20,thick,' ,... 29 | ' inner sep=2pt,minimum width=1cm}]' }; 30 | 31 | fprintf(fid,'%s\n',prelatex{:}); 32 | 33 | 34 | fprintf(fid,'\\node[ostyle] (O1) at (0,0) {1};\n'); 35 | for i=2:lenO, 36 | fprintf(fid,'\\node[ostyle] (O%d) [above=of O%d] {%d};\n',i,i-1,i); 37 | end 38 | fprintf(fid,'\\node[nodestyle] (L1) at (10,0) {%s}',net{1}.type); 39 | for j=1:length(net{1}.outInd) 40 | fprintf(fid,'\n edge[->,very thick,blue] (O%d)',net{1}.outInd(j)); 41 | end 42 | fprintf(fid,';\n'); 43 | for i=2:length(net), 44 | fprintf(fid,'\\node[nodestyle] (L%d) [above=of L%d] {%s}',i,i-1,net{i}.type); 45 | if ~strcmp(net{i}.type,'input') 46 | for j=1:length(net{i}.inInd) 47 | fprintf(fid,'\n edge[<-,very thick,red] (O%d)',net{i}.inInd(j)); 48 | end 49 | end 50 | for j=1:length(net{i}.outInd) 51 | fprintf(fid,'\n edge[->,very thick,blue] (O%d)',net{i}.outInd(j)); 52 | end 53 | fprintf(fid,';\n'); 54 | end 55 | 56 | 57 | % closure 58 | fprintf(fid,'\\end{tikzpicture}\n\\end{document}\n'); 59 | 60 | 61 | fclose(fid); 62 | 63 | I = find(fName == '/',1,'last'); coreName = fName(1:end-4); if ~isempty(I), coreName=coreName((I+1):end); end 64 | fprintf(1,'Done. Now run:\n\t unix(''/usr/texbin/pdflatex -output-directory=/tmp %s''); unix(''open /tmp/%s.pdf'');\n',fName,coreName); 65 | 66 | end --------------------------------------------------------------------------------