├── PSO-CNN ├── cnn │ ├── Copy_of_cnnexamples.m │ ├── Copy_of_cnnexamplesL2.asv │ ├── Copy_of_cnnexamplesL2.m │ ├── Copy_of_cnnexamples_tri.asv │ ├── Copy_of_cnnexamples_tri.m │ ├── ReadMe │ ├── cnnapplygrads.m │ ├── cnnapplygrads_original.m │ ├── cnnassign.asv │ ├── cnnassign.m │ ├── cnnbp.asv │ ├── cnnbp.m │ ├── cnnbpsvm.asv │ ├── cnnbpsvm.m │ ├── cnnexamples.m │ ├── cnnff.m │ ├── cnnsetup.asv │ ├── cnnsetup.m │ ├── cnnsetup_original.m │ ├── cnnsvm.m │ ├── cnnsvm_New.m │ ├── cnntest.asv │ ├── cnntest.m │ ├── cnntest_doublefeature.m │ ├── cnntest_original.m │ ├── cnntrain.m │ ├── cnntrain_clpso.m │ ├── cnntrain_seq.asv │ ├── cnntrain_seq.m │ ├── cnntrain_v_sgd1.m │ ├── cnntrain_v_sgd2.m │ ├── cnnupdate.m │ ├── cnnupdate_clpso.m │ ├── compare_cnn.m │ ├── result.txt │ ├── totalrun.m │ └── 实验记录.txt ├── data │ ├── data512newb48.mat │ └── mnist_uint8.mat ├── tests │ ├── runalltests.m │ ├── test_cnn_gradients_are_numerically_correct.m │ ├── test_example_CNN.m │ ├── test_example_DBN.m │ ├── test_example_NN.m │ ├── test_example_SAE.m │ └── test_nn_gradients_are_numerically_correct.m ├── util │ ├── allcomb.m │ ├── expand.m │ ├── flicker.m │ ├── flipall.m │ ├── fliplrf.m │ ├── flipudf.m │ ├── im2patches.m │ ├── makeLMfilters.m │ ├── normalize.m │ ├── patches2im.m │ ├── randcorr.m │ ├── randp.m │ ├── rnd.m │ ├── sigm.m │ ├── sigmrnd.m │ ├── softmax.m │ ├── tanh_opt.m │ ├── visualize.m │ ├── whiten.m │ └── zscore.m └── 卷积神经网络CNN代码解析-matlab.doc └── cnn-matlab ├── block_sum_2.m ├── cnn-matlab.pdf ├── cnn_cm.m ├── cnn_compute_gradient.m ├── cnn_devectorize_wb.m ├── cnn_example.m ├── cnn_example_face.m ├── cnn_example_nets.m ├── cnn_get_init_tr.m ├── cnn_getw.m ├── cnn_init.m ├── cnn_new.m ├── cnn_setw.m ├── cnn_sim.m ├── cnn_sim_verbose.m ├── cnn_train.m ├── cnn_train_gd.m ├── cnn_train_rprop.m ├── cnn_vectorize_wb.m ├── data ├── get_face_data.m ├── test_data.mat ├── train_data.mat └── trained_net.mat ├── dltanh.m ├── face.fig ├── face_result.txt ├── internal_testing ├── cnn_compute_gradient_check_script.m ├── cnn_compute_gradient_script.m ├── cnn_vectorize_check_script.m └── reshape_script.m ├── ltanh.m └── readme.txt /PSO-CNN/cnn/Copy_of_cnnexamples.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/Copy_of_cnnexamples.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/Copy_of_cnnexamplesL2.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/Copy_of_cnnexamplesL2.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/Copy_of_cnnexamplesL2.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/Copy_of_cnnexamplesL2.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/Copy_of_cnnexamples_tri.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/Copy_of_cnnexamples_tri.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/Copy_of_cnnexamples_tri.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/Copy_of_cnnexamples_tri.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/ReadMe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/ReadMe -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnapplygrads.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnapplygrads.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnapplygrads_original.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnapplygrads_original.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnassign.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnassign.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnassign.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnassign.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnbp.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnbp.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnbp.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnbp.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnbpsvm.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnbpsvm.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnbpsvm.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnbpsvm.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnexamples.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnexamples.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnff.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnff.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnsetup.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnsetup.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnsetup.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnsetup.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnsetup_original.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnsetup_original.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnsvm.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnsvm.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnsvm_New.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnsvm_New.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntest.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntest.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntest.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntest.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntest_doublefeature.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntest_doublefeature.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntest_original.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntest_original.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain_clpso.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain_clpso.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain_seq.asv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain_seq.asv -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain_seq.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain_seq.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain_v_sgd1.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain_v_sgd1.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnntrain_v_sgd2.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnntrain_v_sgd2.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnupdate.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnupdate.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/cnnupdate_clpso.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/cnnupdate_clpso.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/compare_cnn.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/compare_cnn.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/result.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/result.txt -------------------------------------------------------------------------------- /PSO-CNN/cnn/totalrun.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/totalrun.m -------------------------------------------------------------------------------- /PSO-CNN/cnn/实验记录.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/cnn/实验记录.txt -------------------------------------------------------------------------------- /PSO-CNN/data/data512newb48.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/data/data512newb48.mat -------------------------------------------------------------------------------- /PSO-CNN/data/mnist_uint8.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/data/mnist_uint8.mat -------------------------------------------------------------------------------- /PSO-CNN/tests/runalltests.m: -------------------------------------------------------------------------------- 1 | clear all; close all; clc; 2 | 3 | addpath(genpath('..')); 4 | dirlist = dir('./test_*'); 5 | for i = 1:length(dirlist) 6 | name = dirlist(i).name(1:end-2); 7 | feval(name) 8 | end 9 | -------------------------------------------------------------------------------- /PSO-CNN/tests/test_cnn_gradients_are_numerically_correct.m: -------------------------------------------------------------------------------- 1 | function test_cnn_gradients_are_numerically_correct 2 | batch_x = rand(28,28,5); 3 | batch_y = rand(10,5); 4 | cnn.layers = { 5 | struct('type', 'i') %input layer 6 | struct('type', 'c', 'outputmaps', 2, 'kernelsize', 5) %convolution layer 7 | struct('type', 's', 'scale', 2) %sub sampling layer 8 | struct('type', 'c', 'outputmaps', 2, 'kernelsize', 5) %convolution layer 9 | struct('type', 's', 'scale', 2) %subsampling layer 10 | }; 11 | cnn = cnnsetup(cnn, batch_x, batch_y); 12 | 13 | cnn = cnnff(cnn, batch_x); 14 | cnn = cnnbp(cnn, batch_y); 15 | cnnnumgradcheck(cnn, batch_x, batch_y); -------------------------------------------------------------------------------- /PSO-CNN/tests/test_example_CNN.m: -------------------------------------------------------------------------------- 1 | function test_example_CNN 2 | load mnist_uint8; 3 | 4 | train_x = double(reshape(train_x',28,28,60000))/255; 5 | test_x = double(reshape(test_x',28,28,10000))/255; 6 | train_y = double(train_y'); 7 | test_y = double(test_y'); 8 | 9 | %% ex1 Train a 6c-2s-12c-2s Convolutional neural network 10 | %will run 1 epoch in about 200 second and get around 11% error. 11 | %With 100 epochs you'll get around 1.2% error 12 | rand('state',0) 13 | cnn.layers = { 14 | struct('type', 'i') %input layer 15 | struct('type', 'c', 'outputmaps', 6, 'kernelsize', 5) %convolution layer 16 | struct('type', 's', 'scale', 2) %sub sampling layer 17 | struct('type', 'c', 'outputmaps', 12, 'kernelsize', 5) %convolution layer 18 | struct('type', 's', 'scale', 2) %subsampling layer 19 | }; 20 | cnn = cnnsetup(cnn, train_x, train_y); 21 | 22 | opts.alpha = 1; 23 | opts.batchsize = 50; 24 | opts.numepochs = 1; 25 | 26 | cnn = cnntrain(cnn, train_x, train_y, opts); 27 | 28 | [er, bad] = cnntest(cnn, test_x, test_y); 29 | 30 | %plot mean squared error 31 | figure; plot(cnn.rL); 32 | 33 | assert(er<0.12, 'Too big error'); 34 | -------------------------------------------------------------------------------- /PSO-CNN/tests/test_example_DBN.m: -------------------------------------------------------------------------------- 1 | function test_example_DBN 2 | load mnist_uint8; 3 | 4 | train_x = double(train_x) / 255; 5 | test_x = double(test_x) / 255; 6 | train_y = double(train_y); 7 | test_y = double(test_y); 8 | 9 | %% ex1 train a 100 hidden unit RBM and visualize its weights 10 | rand('state',0) 11 | dbn.sizes = [100]; 12 | opts.numepochs = 1; 13 | opts.batchsize = 100; 14 | opts.momentum = 0; 15 | opts.alpha = 1; 16 | dbn = dbnsetup(dbn, train_x, opts); 17 | dbn = dbntrain(dbn, train_x, opts); 18 | figure; visualize(dbn.rbm{1}.W'); % Visualize the RBM weights 19 | 20 | %% ex2 train a 100-100 hidden unit DBN and use its weights to initialize a NN 21 | rand('state',0) 22 | %train dbn 23 | dbn.sizes = [100 100]; 24 | opts.numepochs = 1; 25 | opts.batchsize = 100; 26 | opts.momentum = 0; 27 | opts.alpha = 1; 28 | dbn = dbnsetup(dbn, train_x, opts); 29 | dbn = dbntrain(dbn, train_x, opts); 30 | 31 | %unfold dbn to nn 32 | nn = dbnunfoldtonn(dbn, 10); 33 | nn.activation_function = 'sigm'; 34 | 35 | %train nn 36 | opts.numepochs = 1; 37 | opts.batchsize = 100; 38 | nn = nntrain(nn, train_x, train_y, opts); 39 | [er, bad] = nntest(nn, test_x, test_y); 40 | 41 | assert(er < 0.10, 'Too big error'); 42 | -------------------------------------------------------------------------------- /PSO-CNN/tests/test_example_NN.m: -------------------------------------------------------------------------------- 1 | function test_example_NN 2 | load mnist_uint8; 3 | 4 | train_x = double(train_x) / 255; 5 | test_x = double(test_x) / 255; 6 | train_y = double(train_y); 7 | test_y = double(test_y); 8 | 9 | % normalize 10 | [train_x, mu, sigma] = zscore(train_x); 11 | test_x = normalize(test_x, mu, sigma); 12 | 13 | %% ex1 vanilla neural net 14 | rand('state',0) 15 | nn = nnsetup([784 100 10]); 16 | opts.numepochs = 1; % Number of full sweeps through data 17 | opts.batchsize = 100; % Take a mean gradient step over this many samples 18 | [nn, L] = nntrain(nn, train_x, train_y, opts); 19 | 20 | [er, bad] = nntest(nn, test_x, test_y); 21 | 22 | assert(er < 0.08, 'Too big error'); 23 | 24 | %% ex2 neural net with L2 weight decay 25 | rand('state',0) 26 | nn = nnsetup([784 100 10]); 27 | 28 | nn.weightPenaltyL2 = 1e-4; % L2 weight decay 29 | opts.numepochs = 1; % Number of full sweeps through data 30 | opts.batchsize = 100; % Take a mean gradient step over this many samples 31 | 32 | nn = nntrain(nn, train_x, train_y, opts); 33 | 34 | [er, bad] = nntest(nn, test_x, test_y); 35 | assert(er < 0.1, 'Too big error'); 36 | 37 | 38 | %% ex3 neural net with dropout 39 | rand('state',0) 40 | nn = nnsetup([784 100 10]); 41 | 42 | nn.dropoutFraction = 0.5; % Dropout fraction 43 | opts.numepochs = 1; % Number of full sweeps through data 44 | opts.batchsize = 100; % Take a mean gradient step over this many samples 45 | 46 | nn = nntrain(nn, train_x, train_y, opts); 47 | 48 | [er, bad] = nntest(nn, test_x, test_y); 49 | assert(er < 0.1, 'Too big error'); 50 | 51 | %% ex4 neural net with sigmoid activation function 52 | rand('state',0) 53 | nn = nnsetup([784 100 10]); 54 | 55 | nn.activation_function = 'sigm'; % Sigmoid activation function 56 | nn.learningRate = 1; % Sigm require a lower learning rate 57 | opts.numepochs = 1; % Number of full sweeps through data 58 | opts.batchsize = 100; % Take a mean gradient step over this many samples 59 | 60 | nn = nntrain(nn, train_x, train_y, opts); 61 | 62 | [er, bad] = nntest(nn, test_x, test_y); 63 | assert(er < 0.1, 'Too big error'); 64 | 65 | %% ex5 plotting functionality 66 | rand('state',0) 67 | nn = nnsetup([784 20 10]); 68 | opts.numepochs = 5; % Number of full sweeps through data 69 | nn.output = 'softmax'; % use softmax output 70 | opts.batchsize = 1000; % Take a mean gradient step over this many samples 71 | opts.plot = 1; % enable plotting 72 | 73 | nn = nntrain(nn, train_x, train_y, opts); 74 | 75 | [er, bad] = nntest(nn, test_x, test_y); 76 | assert(er < 0.1, 'Too big error'); 77 | 78 | %% ex6 neural net with sigmoid activation and plotting of validation and training error 79 | % split training data into training and validation data 80 | vx = train_x(1:10000,:); 81 | tx = train_x(10001:end,:); 82 | vy = train_y(1:10000,:); 83 | ty = train_y(10001:end,:); 84 | 85 | rand('state',0) 86 | nn = nnsetup([784 20 10]); 87 | nn.output = 'softmax'; % use softmax output 88 | opts.numepochs = 5; % Number of full sweeps through data 89 | opts.batchsize = 1000; % Take a mean gradient step over this many samples 90 | opts.plot = 1; % enable plotting 91 | nn = nntrain(nn, tx, ty, opts, vx, vy); % nntrain takes validation set as last two arguments (optionally) 92 | 93 | [er, bad] = nntest(nn, test_x, test_y); 94 | assert(er < 0.1, 'Too big error'); 95 | -------------------------------------------------------------------------------- /PSO-CNN/tests/test_example_SAE.m: -------------------------------------------------------------------------------- 1 | function test_example_SAE 2 | load mnist_uint8; 3 | 4 | train_x = double(train_x)/255; 5 | test_x = double(test_x)/255; 6 | train_y = double(train_y); 7 | test_y = double(test_y); 8 | 9 | %% ex1 train a 100 hidden unit SDAE and use it to initialize a FFNN 10 | % Setup and train a stacked denoising autoencoder (SDAE) 11 | rand('state',0) 12 | sae = saesetup([784 100]); 13 | sae.ae{1}.activation_function = 'sigm'; 14 | sae.ae{1}.learningRate = 1; 15 | sae.ae{1}.inputZeroMaskedFraction = 0.5; 16 | opts.numepochs = 1; 17 | opts.batchsize = 100; 18 | sae = saetrain(sae, train_x, opts); 19 | visualize(sae.ae{1}.W{1}(:,2:end)') 20 | 21 | % Use the SDAE to initialize a FFNN 22 | nn = nnsetup([784 100 10]); 23 | nn.activation_function = 'sigm'; 24 | nn.learningRate = 1; 25 | nn.W{1} = sae.ae{1}.W{1}; 26 | 27 | % Train the FFNN 28 | opts.numepochs = 1; 29 | opts.batchsize = 100; 30 | nn = nntrain(nn, train_x, train_y, opts); 31 | [er, bad] = nntest(nn, test_x, test_y); 32 | assert(er < 0.16, 'Too big error'); 33 | 34 | %% ex2 train a 100-100 hidden unit SDAE and use it to initialize a FFNN 35 | % Setup and train a stacked denoising autoencoder (SDAE) 36 | rand('state',0) 37 | sae = saesetup([784 100 100]); 38 | sae.ae{1}.activation_function = 'sigm'; 39 | sae.ae{1}.learningRate = 1; 40 | sae.ae{1}.inputZeroMaskedFraction = 0.5; 41 | 42 | sae.ae{2}.activation_function = 'sigm'; 43 | sae.ae{2}.learningRate = 1; 44 | sae.ae{2}.inputZeroMaskedFraction = 0.5; 45 | 46 | opts.numepochs = 1; 47 | opts.batchsize = 100; 48 | sae = saetrain(sae, train_x, opts); 49 | visualize(sae.ae{1}.W{1}(:,2:end)') 50 | 51 | % Use the SDAE to initialize a FFNN 52 | nn = nnsetup([784 100 100 10]); 53 | nn.activation_function = 'sigm'; 54 | nn.learningRate = 1; 55 | 56 | %add pretrained weights 57 | nn.W{1} = sae.ae{1}.W{1}; 58 | nn.W{2} = sae.ae{2}.W{1}; 59 | 60 | % Train the FFNN 61 | opts.numepochs = 1; 62 | opts.batchsize = 100; 63 | nn = nntrain(nn, train_x, train_y, opts); 64 | [er, bad] = nntest(nn, test_x, test_y); 65 | assert(er < 0.1, 'Too big error'); 66 | -------------------------------------------------------------------------------- /PSO-CNN/tests/test_nn_gradients_are_numerically_correct.m: -------------------------------------------------------------------------------- 1 | function test_nn_gradients_are_numerically_correct 2 | batch_x = rand(20, 5); 3 | batch_y = rand(20, 2); 4 | 5 | for output = {'sigm', 'linear', 'softmax'} 6 | y=batch_y; 7 | if(strcmp(output,'softmax')) 8 | % softmax output requires a binary output vector 9 | y=(y==repmat(max(y,[],2),1,size(y,2))); 10 | end 11 | 12 | for activation_function = {'sigm', 'tanh_opt'} 13 | for dropoutFraction = {0 rand()} 14 | nn = nnsetup([5 3 4 2]); 15 | 16 | nn.activation_function = activation_function{1}; 17 | nn.output = output{1}; 18 | nn.dropoutFraction = dropoutFraction{1}; 19 | 20 | rand('state',0) 21 | nn = nnff(nn, batch_x, y); 22 | nn = nnbp(nn); 23 | nnchecknumgrad(nn, batch_x, y); 24 | end 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /PSO-CNN/util/allcomb.m: -------------------------------------------------------------------------------- 1 | function A = allcomb(varargin) 2 | % ALLCOMB - All combinations 3 | % B = ALLCOMB(A1,A2,A3,...,AN) returns all combinations of the elements 4 | % in A1, A2, ..., and AN. B is P-by-N matrix is which P is the product 5 | % of the number of elements of the N inputs. 6 | % Empty inputs yields an empty matrix B of size 0-by-N. Note that 7 | % previous versions (1.x) simply ignored empty inputs. 8 | % 9 | % Example: 10 | % allcomb([1 3 5],[-3 8],[0 1]) ; 11 | % 1 -3 0 12 | % 1 -3 1 13 | % 1 8 0 14 | % ... 15 | % 5 -3 1 16 | % 5 8 0 17 | % 5 8 1 18 | % 19 | % ALLCOMB(A1,..AN,'matlab') causes the first column to change fastest. 20 | % This is more consistent with matlab indexing. Example: 21 | % allcomb(1:2,3:4,5:6,'matlab') %-> 22 | % 1 3 5 23 | % 2 3 5 24 | % 1 4 5 25 | % ... 26 | % 2 4 6 27 | % 28 | % This functionality is also known as the cartesian product. 29 | % 30 | % See also NCHOOSEK, PERMS, 31 | % and COMBN (Matlab Central FEX) 32 | 33 | % for Matlab R13+ 34 | % version 2.1 (feb 2011) 35 | % (c) Jos van der Geest 36 | % email: jos@jasen.nl 37 | 38 | % History 39 | % 1.1 (feb 2006), removed minor bug when entering empty cell arrays; 40 | % added option to let the first input run fastest (suggestion by JD) 41 | % 1.2 (jan 2010), using ii as an index on the left-hand for the multiple 42 | % output by NDGRID. Thanks to Jan Simon, for showing this little trick 43 | % 2.0 (dec 2010). Bruno Luong convinced me that an empty input should 44 | % return an empty output. 45 | % 2.1 (feb 2011). A cell as input argument caused the check on the last 46 | % argument (specifying the order) to crash. 47 | 48 | error(nargchk(1,Inf,nargin)) ; 49 | 50 | % check for empty inputs 51 | q = ~cellfun('isempty',varargin) ; 52 | if any(~q), 53 | warning('ALLCOMB:EmptyInput','Empty inputs result in an empty output.') ; 54 | A = zeros(0,nargin) ; 55 | else 56 | 57 | ni = sum(q) ; 58 | 59 | argn = varargin{end} ; 60 | ischar(argn) 61 | if ischar(argn) && (strcmpi(argn,'matlab') || strcmpi(argn,'john')), 62 | % based on a suggestion by JD on the FEX 63 | ni = ni-1 ; 64 | ii = 1:ni ; 65 | q(end) = 0 ; 66 | else 67 | % enter arguments backwards, so last one (AN) is changing fastest 68 | ii = ni:-1:1 ; 69 | end 70 | 71 | if ni==0, 72 | A = [] ; 73 | else 74 | args = varargin(q) ; 75 | if ~all(cellfun('isclass',args,'double')), 76 | error('All arguments should be arrays of doubles') ; 77 | end 78 | if ni==1, 79 | A = args{1}(:) ; 80 | else 81 | % flip using ii if last column is changing fastest 82 | [A{ii}] = ndgrid(args{ii}) ; 83 | % concatenate 84 | A = reshape(cat(ni+1,A{:}),[],ni) ; 85 | end 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /PSO-CNN/util/expand.m: -------------------------------------------------------------------------------- 1 | function B = expand(A, S) 2 | %EXPAND Replicate and tile each element of an array, similar to repmat. 3 | % EXPAND(A,SZ), for array A and vector SZ replicates each element of A by 4 | % SZ. The results are tiled into an array in the same order as the 5 | % elements of A, so that the result is size: size(A).*SZ. Therefore the 6 | % number of elements of SZ must equal the number of dimensions of A, or in 7 | % MATLAB syntax: length(size(A))==length(SZ) must be true. 8 | % The result will have the same number of dimensions as does A. 9 | % There is no restriction on the number of dimensions for input A. 10 | % 11 | % Examples: 12 | % 13 | % A = [1 2; 3 4]; % 2x2 14 | % SZ = [6 5]; 15 | % B = expand(A,[6 5]) % Creates a 12x10 array. 16 | % 17 | % The following demonstrates equivalence of EXPAND and expansion acheived 18 | % through indexing the individual elements of the array: 19 | % 20 | % A = 1; B = 2; C = 3; D = 4; % Elements of the array to be expanded. 21 | % Mat = [A B;C D]; % The array to expand. 22 | % SZ = [2 3]; % The expansion vector. 23 | % ONES = ones(SZ); % The index array. 24 | % ExpMat1 = [A(ONES),B(ONES);C(ONES),D(ONES)]; % Element expansion. 25 | % ExpMat2 = expand(Mat,SZ); % Calling EXPAND. 26 | % isequal(ExpMat1,ExpMat2) % Yes 27 | % 28 | % 29 | % See also, repmat, meshgrid, ones, zeros, kron 30 | % 31 | % Author: Matt Fig 32 | % Date: 6/20/2009 33 | % Contact: popkenai@yahoo.com 34 | 35 | if nargin < 2 36 | error('Size vector must be provided. See help.'); 37 | end 38 | 39 | SA = size(A); % Get the size (and number of dimensions) of input. 40 | 41 | if length(SA) ~= length(S) 42 | error('Length of size vector must equal ndims(A). See help.') 43 | elseif any(S ~= floor(S)) 44 | error('The size vector must contain integers only. See help.') 45 | end 46 | 47 | T = cell(length(SA), 1); 48 | for ii = length(SA) : -1 : 1 49 | H = zeros(SA(ii) * S(ii), 1); % One index vector into A for each dim. 50 | H(1 : S(ii) : SA(ii) * S(ii)) = 1; % Put ones in correct places. 51 | T{ii} = cumsum(H); % Cumsumming creates the correct order. 52 | end 53 | 54 | B = A(T{:}); % Feed the indices into A. -------------------------------------------------------------------------------- /PSO-CNN/util/flicker.m: -------------------------------------------------------------------------------- 1 | function flicker(X,fps) 2 | figure; 3 | colormap gray; 4 | axis image; 5 | while 1 6 | for i=1:size(X,1); 7 | imagesc(squeeze(X(i,:,:))); drawnow; 8 | pause(1/fps); 9 | end 10 | end 11 | end -------------------------------------------------------------------------------- /PSO-CNN/util/flipall.m: -------------------------------------------------------------------------------- 1 | function X=flipall(X) 2 | for i=1:ndims(X) 3 | X = flipdim(X,i); 4 | end 5 | end -------------------------------------------------------------------------------- /PSO-CNN/util/fliplrf.m: -------------------------------------------------------------------------------- 1 | function y = fliplrf(x) 2 | %FLIPLR Flip matrix in left/right direction. 3 | % FLIPLR(X) returns X with row preserved and columns flipped 4 | % in the left/right direction. 5 | % 6 | % X = 1 2 3 becomes 3 2 1 7 | % 4 5 6 6 5 4 8 | % 9 | % Class support for input X: 10 | % float: double, single 11 | % 12 | % See also FLIPUD, ROT90, FLIPDIM. 13 | 14 | % Copyright 1984-2010 The MathWorks, Inc. 15 | % $Revision: 5.9.4.4 $ $Date: 2010/02/25 08:08:47 $ 16 | 17 | % if ~ismatrix(x) 18 | % error('MATLAB:fliplr:SizeX', 'X must be a 2-D matrix.'); 19 | % end 20 | y = x(:,end:-1:1); 21 | -------------------------------------------------------------------------------- /PSO-CNN/util/flipudf.m: -------------------------------------------------------------------------------- 1 | function y = flipudf(x) 2 | %FLIPUD Flip matrix in up/down direction. 3 | % FLIPUD(X) returns X with columns preserved and rows flipped 4 | % in the up/down direction. For example, 5 | % 6 | % X = 1 4 becomes 3 6 7 | % 2 5 2 5 8 | % 3 6 1 4 9 | % 10 | % Class support for input X: 11 | % float: double, single 12 | % 13 | % See also FLIPLR, ROT90, FLIPDIM. 14 | 15 | % Copyright 1984-2010 The MathWorks, Inc. 16 | % $Revision: 5.9.4.4 $ $Date: 2010/02/25 08:08:49 $ 17 | 18 | % if ~ismatrix(x) 19 | % error('MATLAB:flipud:SizeX', 'X must be a 2-D matrix.'); 20 | % end 21 | y = x(end:-1:1,:); 22 | -------------------------------------------------------------------------------- /PSO-CNN/util/im2patches.m: -------------------------------------------------------------------------------- 1 | function patches = im2patches(im,m,n) 2 | assert(rem(size(im,1),m)==0) 3 | assert(rem(size(im,2),n)==0) 4 | 5 | patches = []; 6 | for i=1:m:size(im,1) 7 | for u=1:n:size(im,2) 8 | patch = im(i:i+n-1,u:u+m-1); 9 | patches = [patches patch(:)]; 10 | end 11 | end 12 | patches = patches'; 13 | end -------------------------------------------------------------------------------- /PSO-CNN/util/makeLMfilters.m: -------------------------------------------------------------------------------- 1 | function F=makeLMfilters 2 | % Returns the LML filter bank of size 49x49x48 in F. To convolve an 3 | % image I with the filter bank you can either use the matlab function 4 | % conv2, i.e. responses(:,:,i)=conv2(I,F(:,:,i),'valid'), or use the 5 | % Fourier transform. 6 | 7 | SUP=49; % Support of the largest filter (must be odd) 8 | SCALEX=sqrt(2).^[1:3]; % Sigma_{x} for the oriented filters 9 | NORIENT=6; % Number of orientations 10 | 11 | NROTINV=12; 12 | NBAR=length(SCALEX)*NORIENT; 13 | NEDGE=length(SCALEX)*NORIENT; 14 | NF=NBAR+NEDGE+NROTINV; 15 | F=zeros(SUP,SUP,NF); 16 | hsup=(SUP-1)/2; 17 | [x,y]=meshgrid([-hsup:hsup],[hsup:-1:-hsup]); 18 | orgpts=[x(:) y(:)]'; 19 | 20 | count=1; 21 | for scale=1:length(SCALEX), 22 | for orient=0:NORIENT-1, 23 | angle=pi*orient/NORIENT; % Not 2pi as filters have symmetry 24 | c=cos(angle);s=sin(angle); 25 | rotpts=[c -s;s c]*orgpts; 26 | F(:,:,count)=makefilter(SCALEX(scale),0,1,rotpts,SUP); 27 | F(:,:,count+NEDGE)=makefilter(SCALEX(scale),0,2,rotpts,SUP); 28 | count=count+1; 29 | end; 30 | end; 31 | 32 | count=NBAR+NEDGE+1; 33 | SCALES=sqrt(2).^[1:4]; 34 | for i=1:length(SCALES), 35 | F(:,:,count)=normalise(fspecial('gaussian',SUP,SCALES(i))); 36 | F(:,:,count+1)=normalise(fspecial('log',SUP,SCALES(i))); 37 | F(:,:,count+2)=normalise(fspecial('log',SUP,3*SCALES(i))); 38 | count=count+3; 39 | end; 40 | return 41 | 42 | function f=makefilter(scale,phasex,phasey,pts,sup) 43 | gx=gauss1d(3*scale,0,pts(1,:),phasex); 44 | gy=gauss1d(scale,0,pts(2,:),phasey); 45 | f=normalise(reshape(gx.*gy,sup,sup)); 46 | return 47 | 48 | function g=gauss1d(sigma,mean,x,ord) 49 | % Function to compute gaussian derivatives of order 0 <= ord < 3 50 | % evaluated at x. 51 | 52 | x=x-mean;num=x.*x; 53 | variance=sigma^2; 54 | denom=2*variance; 55 | g=exp(-num/denom)/(pi*denom)^0.5; 56 | switch ord, 57 | case 1, g=-g.*(x/variance); 58 | case 2, g=g.*((num-variance)/(variance^2)); 59 | end; 60 | return 61 | 62 | function f=normalise(f), f=f-mean(f(:)); f=f/sum(abs(f(:))); return -------------------------------------------------------------------------------- /PSO-CNN/util/normalize.m: -------------------------------------------------------------------------------- 1 | function x = normalize(x, mu, sigma) 2 | x=bsxfun(@minus,x,mu); 3 | x=bsxfun(@rdivide,x,sigma); 4 | end 5 | -------------------------------------------------------------------------------- /PSO-CNN/util/patches2im.m: -------------------------------------------------------------------------------- 1 | function im = patches2im(patches,n,m) 2 | k=1; 3 | im = zeros(n,m); 4 | for i=1:10:800 5 | for u=1:10:1140 6 | patch = patches(:,k); 7 | im(u:u+9,i:i+9) = reshape(patch,10,10); 8 | k = k+1; 9 | end 10 | end 11 | end -------------------------------------------------------------------------------- /PSO-CNN/util/randcorr.m: -------------------------------------------------------------------------------- 1 | function x=randcorr(n,R) 2 | % RANDCORR Generates corremlated random variables 3 | % Generates n vector valued variates with uniform marginals and correlation 4 | % matrix R. 5 | % Returns an nxk matrix, where k is the order of R. 6 | k=size(R,1); 7 | R=2*sin((pi/6)*R); 8 | x=normcdf(randn(n,k)*chol(R)); -------------------------------------------------------------------------------- /PSO-CNN/util/randp.m: -------------------------------------------------------------------------------- 1 | function X = randp(P,varargin) 2 | % RANDP - pick random values with relative probability 3 | % 4 | % R = RANDP(PROB,..) returns integers in the range from 1 to 5 | % NUMEL(PROB) with a relative probability, so that the value X is 6 | % present approximately (PROB(X)./sum(PROB)) times in the matrix R. 7 | % 8 | % All values of PROB should be equal to or larger than 0. 9 | % 10 | % RANDP(PROB,N) is an N-by-N matrix, RANDP(PROB,M,N) and 11 | % RANDP(PROB,[M,N]) are M-by-N matrices. RANDP(PROB, M1,M2,M3,...) or 12 | % RANDP(PROB,[M1,M2,M3,...]) generate random arrays. 13 | % RANDP(PROB,SIZE(A)) is the same size as A. 14 | % 15 | % Example: 16 | % R = randp([1 3 2],1,10000) 17 | % % return a row vector with 10000 values with about 16650% 2 18 | % histc(R,1:3) ./ numel(R) 19 | % 20 | % R = randp([1 1 0 0 1],10,1) 21 | % % 10 samples evenly drawn from [1 2 5] 22 | % 23 | % 24 | % Also see RAND, RANDPERM 25 | % RANDPERMBREAK, RANDINTERVAL, RANDSWAP (MatLab File Exchange) 26 | 27 | % Created for Matlab R13+ 28 | % version 2.0 (feb 2009) 29 | % (c) Jos van der Geest 30 | % email: jos@jasen.nl 31 | % 32 | % File history: 33 | % 1.0 (nov 2005) - created 34 | % 1.1 (nov 2005) - modified slightly to check input arguments to RAND first 35 | % 1.2 (aug 2006) - fixed bug when called with scalar argument P 36 | % 2.0 (feb 2009) - use HISTC for creating the integers (faster and simplier than 37 | % previous algorithm) 38 | 39 | error(nargchk(2,Inf,nargin)) ; 40 | 41 | try 42 | X = rand(varargin{:}) ; 43 | catch 44 | E = lasterror ; 45 | E.message = strrep(E.message,'rand','randp') ; 46 | rethrow(E) ; 47 | end 48 | 49 | P = P(:) ; 50 | 51 | if any(P<0), 52 | error('All probabilities should be 0 or larger.') ; 53 | end 54 | 55 | if isempty(P) || sum(P)==0 56 | warning([mfilename ':ZeroProbabilities'],'All zero probabilities') ; 57 | X(:) = 0 ; 58 | else 59 | [junk,X] = histc(X,[0 ; cumsum(P(:))] ./ sum(P)) ; 60 | end 61 | 62 | % Method used before version 2 63 | % X = rand(varargin{:}) ; 64 | % sz = size(X) ; 65 | % P = reshape(P,1,[]) ; % row vector 66 | % P = cumsum(P) ./ sum(P) ; 67 | % X = repmat(X(:),1,numel(P)) < repmat(P,numel(X),1) ; 68 | % X = numel(P) - sum(X,2) + 1 ; 69 | % X = reshape(X,sz) ; 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /PSO-CNN/util/rnd.m: -------------------------------------------------------------------------------- 1 | function y = rnd(x) 2 | y = double(x>rand()); 3 | end -------------------------------------------------------------------------------- /PSO-CNN/util/sigm.m: -------------------------------------------------------------------------------- 1 | function X = sigm(P) 2 | X = 1./(1+exp(-P)); 3 | end -------------------------------------------------------------------------------- /PSO-CNN/util/sigmrnd.m: -------------------------------------------------------------------------------- 1 | function X = sigmrnd(P) 2 | % X = double(1./(1+exp(-P)))+1*randn(size(P)); 3 | X = double(1./(1+exp(-P)) > rand(size(P))); 4 | end -------------------------------------------------------------------------------- /PSO-CNN/util/softmax.m: -------------------------------------------------------------------------------- 1 | function mu = softmax(eta) 2 | % Softmax function 3 | % mu(i,c) = exp(eta(i,c))/sum_c' exp(eta(i,c')) 4 | 5 | % This file is from matlabtools.googlecode.com 6 | c = 3; 7 | 8 | tmp = exp(c*eta); 9 | denom = sum(tmp, 2); 10 | mu = bsxfun(@rdivide, tmp, denom); 11 | 12 | end -------------------------------------------------------------------------------- /PSO-CNN/util/tanh_opt.m: -------------------------------------------------------------------------------- 1 | function f=tanh_opt(A) 2 | f=1.7159*tanh(2/3.*A); 3 | end -------------------------------------------------------------------------------- /PSO-CNN/util/visualize.m: -------------------------------------------------------------------------------- 1 | function r=visualize(X, mm, s1, s2) 2 | %FROM RBMLIB http://code.google.com/p/matrbm/ 3 | %Visualize weights X. If the function is called as a void method, 4 | %it does the plotting. But if the function is assigned to a variable 5 | %outside of this code, the formed image is returned instead. 6 | if ~exist('mm','var') 7 | mm = [min(X(:)) max(X(:))]; 8 | end 9 | if ~exist('s1','var') 10 | s1 = 0; 11 | end 12 | if ~exist('s2','var') 13 | s2 = 0; 14 | end 15 | 16 | [D,N]= size(X); 17 | s=sqrt(D); 18 | if s==floor(s) || (s1 ~=0 && s2 ~=0) 19 | if (s1 ==0 || s2 ==0) 20 | s1 = s; s2 = s; 21 | end 22 | %its a square, so data is probably an image 23 | num=ceil(sqrt(N)); 24 | a=mm(2)*ones(num*s2+num-1,num*s1+num-1); 25 | x=0; 26 | y=0; 27 | for i=1:N 28 | im = reshape(X(:,i),s1,s2)'; 29 | a(x*s2+1+x : x*s2+s2+x, y*s1+1+y : y*s1+s1+y)=im; 30 | x=x+1; 31 | if(x>=num) 32 | x=0; 33 | y=y+1; 34 | end 35 | end 36 | d=true; 37 | else 38 | %there is not much we can do 39 | a=X; 40 | end 41 | 42 | %return the image, or plot the image 43 | if nargout==1 44 | r=a; 45 | else 46 | 47 | imagesc(a, [mm(1) mm(2)]); 48 | axis equal 49 | colormap gray 50 | 51 | end 52 | -------------------------------------------------------------------------------- /PSO-CNN/util/whiten.m: -------------------------------------------------------------------------------- 1 | function X = whiten(X, fudgefactor) 2 | C = cov(X); 3 | M = mean(X); 4 | [V,D] = eig(C); 5 | P = V * diag(sqrt(1./(diag(D) + fudgefactor))) * V'; 6 | X = bsxfun(@minus, X, M) * P; 7 | end -------------------------------------------------------------------------------- /PSO-CNN/util/zscore.m: -------------------------------------------------------------------------------- 1 | function [x, mu, sigma] = zscore(x) 2 | mu=mean(x); 3 | sigma=max(std(x),eps); 4 | x=bsxfun(@minus,x,mu); 5 | x=bsxfun(@rdivide,x,sigma); 6 | end 7 | -------------------------------------------------------------------------------- /PSO-CNN/卷积神经网络CNN代码解析-matlab.doc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/PSO-CNN/卷积神经网络CNN代码解析-matlab.doc -------------------------------------------------------------------------------- /cnn-matlab/block_sum_2.m: -------------------------------------------------------------------------------- 1 | function y = block_sum_2(x) 2 | % BLOCK_SUM_2 Compute 2x2 block sum of 3-D matrix 3 | % 4 | % SYNTAX 5 | % y = block_sum_2(x) 6 | % 7 | % PARAMETERS 8 | % x: input array, D1xD2x* 9 | % y: output array, (D1/2)x(D2/2)x* 10 | % 11 | % EXAMPLE 12 | % x = rand(4, 4, 3); 13 | % y = block_sum_2(x); 14 | % 15 | % NOTES 16 | % Son Lam Phung, started 12-Jan-2006, revised 04-Nov-2006. 17 | 18 | if (ndims(x) == 3) 19 | y = x(1:2:end, 1:2:end, :) + x(1:2:end, 2:2:end, :) + ... 20 | x(2:2:end, 1:2:end, :) + x(2:2:end, 2:2:end, :); 21 | elseif (ndims(x) == 4) 22 | y = x(1:2:end, 1:2:end, :, :) + x(1:2:end, 2:2:end, :, :) + ... 23 | x(2:2:end, 1:2:end, :, :) + x(2:2:end, 2:2:end, :, :); 24 | end -------------------------------------------------------------------------------- /cnn-matlab/cnn-matlab.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/cnn-matlab/cnn-matlab.pdf -------------------------------------------------------------------------------- /cnn-matlab/cnn_cm.m: -------------------------------------------------------------------------------- 1 | function c = cnn_cm(connect_type, src_fms, dst_fms, group_fms) 2 | % CNN_CM: Create a connection matrix from one layer to the next layer 3 | % in a CNN 4 | % SYNTAX 5 | % c = cnn_cm(connect_type, src_fms, dst_fms, group_fms); 6 | % 7 | % PARAMETERS 8 | % connect_type: a string 9 | % 'full' full connection 10 | % '1-to-1' 1 feature map to exactly 1 feature map 11 | % '1-to-2' 1 feature map branchs to exactly 2 feature maps 12 | % '2-to-1' 2 feature maps merge to 1 feature map 13 | % '1-to-2 2-to-1' combination of '1-to-2' and '2-to-1' 14 | % 'toeplitz' toeplitz-like connection 15 | % 'custom' custom connection 16 | % 17 | % src_fms : number of source feature maps 18 | % dst_fms : number of destination feature maps 19 | % group_fms : group feature maps 20 | % 21 | % EXAMPLES 22 | % c = cnn_cm('full', 3, 4) 23 | % c = cnn_cm('1-to-1', 3) 24 | % c = cnn_cm('1-to-2', 2) 25 | % c = cnn_cm('2-to-1', 4) 26 | % c = cnn_cm('1-to-2 2-to-1', 4); 27 | % c = cnn_cm('toeplitz', 3, 4, 2) 28 | % c = cnn_cm('custom', 3, 4) 29 | % 30 | % NOTES 31 | % c(i,j) = true means there is a connection 32 | % from feature map i in source layer 33 | % to feature map j in the destination layer 34 | % 35 | % Son Lam Phung, started 13-Jan-2006, revised 1-Nov-2006. 36 | 37 | %% Default parameters...................................................... 38 | if nargin < 3 39 | dst_fms = 0; 40 | end 41 | 42 | if nargin < 4 43 | group_fms = 0; 44 | end 45 | 46 | if group_fms > src_fms 47 | group_fms = mod(group_fms, src_fms) + 1; 48 | end 49 | 50 | %% Create connection matrix according to connect_type...................... 51 | switch connect_type 52 | case {'full'} 53 | % full connection 54 | c = true(src_fms, dst_fms); 55 | 56 | case {'1-to-1'} 57 | % 1 feature map connects to exactly 1 feature map 58 | c = (eye(src_fms) ~=0); 59 | 60 | case {'1-to-2'} 61 | % 1 feature map branchs to exactly 2 feature maps 62 | c = false(src_fms, 2*src_fms); 63 | for p=1:src_fms 64 | c(p, 2*p -1:2*p)= true; 65 | end 66 | 67 | case {'2-to-1'} 68 | % 2 feature maps merge to 1 feature map 69 | dst_fms = floor(src_fms/2); 70 | c = false(2*dst_fms, dst_fms); 71 | for p=1:dst_fms 72 | c(2*p-1:2*p,p)= true; 73 | end 74 | 75 | case {'1-to-2 2-to-1'} 76 | dst_fms = 2*src_fms + src_fms*(src_fms-1)/2; 77 | c = false(src_fms, dst_fms); 78 | for p=1:src_fms 79 | c(p, 2*p-1:2*p)= true; 80 | end 81 | 82 | i = 2*src_fms; 83 | for p1 = 1:src_fms-1 84 | for p2 = p1+1:src_fms 85 | i = i + 1; 86 | c(p1, i) = true; 87 | c(p2, i) = true; 88 | end 89 | end 90 | 91 | case {'toeplitz'} 92 | % toeplitz-like connection 93 | % group_frms determines how many consecutive feature maps are used 94 | c = false(src_fms, dst_fms); 95 | 96 | for q = 1:dst_fms 97 | for s = q:q+group_fms-1 98 | if s <= src_fms 99 | p = s; 100 | else 101 | p = mod(s, src_fms); 102 | if (p == 0) 103 | p = src_fms; 104 | end 105 | end 106 | c(p, q) = true; 107 | end 108 | end 109 | 110 | otherwise 111 | % custom connection 112 | % user can modify individual entries in c 113 | c = true(src_fms, dst_fms); 114 | end -------------------------------------------------------------------------------- /cnn-matlab/cnn_compute_gradient.m: -------------------------------------------------------------------------------- 1 | function [dw, db] = cnn_compute_gradient(net, x, e, y, s) 2 | % CNN_COMPUTE_GRADIENT Compute gradient of MSE function for CNN 3 | % 4 | % SYNTAX 5 | % [dw, db] = cnn_compute_gradient(net, x, e, y, s); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % x: network input, 3-D array HxWxK 10 | % e: network error, 2-D array N{L}xK 11 | % y: outputs of network layers 12 | % s: weight sums of network layers 13 | % 14 | % dw: partial derivatives w.r.t. weights 15 | % db: partial derivatives w.r.t. biases 16 | % 17 | % EXAMPLE 18 | % 19 | % 20 | % NOTES 21 | % Son Lam Phung, started 12-Jan-2006, last revised 04-Nov-2006 22 | % Version 1, 12-Jan-2006 23 | % fixed structure C1->S2->C3->S4->C5->F6 24 | % Version 2, 04-Nov-2006 25 | % generalized structure: any number of layers 26 | 27 | %% ................ Stage 1: Compute Error Sensitivity ................. %% 28 | K = size(x, 3); % Number of input samples 29 | es = cell(1, net.L); % Allocate storage for error sensitivity 30 | 31 | %% Layer L: output perceptron layer........................................ 32 | layer = net.L; 33 | N = net.no_fms(layer); 34 | es{layer} = 2/(K*N) * e .* feval(['d' net.f{layer}], s{layer}, y{layer}); 35 | 36 | %% Layer L-1: last convolutional layer..................................... 37 | layer = net.L - 1; 38 | 39 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); % f'(s) 40 | 41 | es{layer} = reshape((net.w{layer+1} * es{layer+1})', ... 42 | [1 1 K net.no_fms(layer)]); % Back-propagate 43 | 44 | es{layer} = es{layer} .* dy; % Multiply f'(s) 45 | 46 | %% Layer L - 2: last subsampling layer..................................... 47 | % This layer is special because convolution mask of layer L-1 48 | % has the same size as feature map of layer L-2 49 | layer = net.L - 2; 50 | S1 = size(s{layer},1); 51 | S2 = size(s{layer},2); 52 | es{layer} = repmat(0, size(s{layer})); 53 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); % f'(s) 54 | 55 | % Replicate matrix es{layer+1}(n,n) into a S1 x S2 x * x * 56 | es_rep = repmat(es{layer+1}, [S1 S2 1 1]); 57 | 58 | % Compute for each feature map 59 | for n = 1:net.no_fms(layer) 60 | % Back-propagate 61 | es{layer}(:,:,:,n) = es_rep(:,:,:,n) .* ... 62 | repmat(net.w{layer+1}(:,:,n,n), [1 1 K 1]); 63 | end 64 | 65 | es{layer} = es{layer} .* dy; % Multiply f'(s) 66 | 67 | %% Layers L-3 to 1: pairs of {convolution layer -> subsampling layer}...... 68 | for layer = (net.L-3):-1:1 69 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); 70 | if (mod(layer, 2) == 1) 71 | %-- Convolution layer 72 | size_s = size(s{layer}); 73 | es{layer} = repmat(0, size_s); 74 | 75 | % Enlarge es{layer+1} by a factor of 2 76 | % in first and second dimension 77 | size_es = size(es{layer+1}); 78 | es_rep = zeros([size_s(1:2) size_es(3:4)]); 79 | for i = 1:2 80 | for j = 1:2 81 | es_rep(i:2:end,j:2:end,:,:) = es{layer+1}; 82 | end 83 | end 84 | 85 | % Back-propagate 86 | for n = 1:net.no_fms(layer) 87 | es{layer}(:,:,:,n) = es_rep(:,:,:,n) * net.w{layer+1}(n); 88 | end 89 | else 90 | %-- Subsampling layer 91 | size_s = size(s{layer}); 92 | es{layer} = repmat(0, size_s); 93 | 94 | for n = 1:net.no_fms(layer) 95 | % Find all feature maps in {layer+1} 96 | % that go from feature map n 97 | fm_idxs = find(net.c{layer+1}(n, :)); 98 | 99 | % Adding up contribution from feature maps in {layer+1} 100 | for m = fm_idxs 101 | % Back-propagate 102 | es{layer}(:,:,:,n) = ... 103 | es{layer}(:,:,:,n) + ... 104 | imfilter(es{layer+1}(:,:,:,m), ... 105 | rot90(net.w{layer+1}(:,:,n,m),2), ... 106 | 'full', 'corr'); 107 | end 108 | end 109 | end 110 | es{layer} = es{layer} .* dy; % Multiply f'(s) 111 | end 112 | 113 | %% ..................... Stage 2: Compute Gradient ..................... %% 114 | % Allocate memory 115 | dw = cell(1, net.L); % Weights 116 | db = cell(1, net.L); % Biases 117 | 118 | %% Layer L: output perceptron layer........................................ 119 | layer = net.L; 120 | dw{layer} = (es{layer} * squeeze(y{layer-1}))'; 121 | db{layer} = sum(es{layer},2); 122 | 123 | %% Layer L-1: last convolutional layer..................................... 124 | layer = net.L - 1; 125 | size_y = size(y{layer-1}); 126 | 127 | % Replicate es{layer} to size_y(1) x size_y(2) x * x * 128 | es_rep = repmat(es{layer}, [size_y(1) size_y(2) 1 1]); 129 | tmp = sum(es_rep .* y{layer-1},3); 130 | 131 | % Weights 132 | for n = net.no_fms(layer-1):-1:1 133 | dw{layer}(:,:,n,n) = tmp(:,:,:,n); 134 | end 135 | 136 | % Biases 137 | db{layer} = sum(squeeze(es{layer}), 1)'; 138 | 139 | %% Layer L - 2: last subsampling layer..................................... 140 | layer = net.L - 2; 141 | es_y = block_sum_2(y{layer-1}) .* es{layer}; 142 | dw{layer} = squeeze(sum(sum(sum(es_y,3),2),1)); 143 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 144 | 145 | %% Layers L-3 to 2: pairs of {convolution layer -> subsampling layer}...... 146 | for layer = (net.L-3):-1:2 147 | if (mod(layer, 2) == 1) 148 | %-- Convolution layer 149 | size_es = size(es{layer}); 150 | size_w = size(net.w{layer}); 151 | hrec_size = net.hrec_size(layer); 152 | dw{layer} = repmat(0,size_w); 153 | 154 | for p = 1:net.no_fms(layer-1) 155 | % Find all feature maps in {layer} 156 | % that go from feature map p {layer-1} 157 | fm_idxs = find(net.c{layer}(p,:)); 158 | 159 | for m = 1:size_es(1) 160 | for n = 1:size_es(2) 161 | % Repeat es{layer}(m,n,:,fm_idxs) 162 | % into size_w(1) x size_w(2) x * x * 163 | es_rep = zeros([size_w(1) size_w(2) ... 164 | K length(fm_idxs)]); 165 | for i = 1:size_w(1) 166 | for j = 1:size_w(2) 167 | es_rep(i,j,:,:) = es{layer}(m,n,:,fm_idxs); 168 | end 169 | end 170 | 171 | %Repeat y{layer-1}(m:m+2*hrec_size,n:n+2*hrec_size,:,p) 172 | %into * x * x * x length(fm_idxs) 173 | y_rep = repmat(... 174 | y{layer-1}(m:m+2*hrec_size,n:n+2*hrec_size,:,p),... 175 | [1 1 1 length(fm_idxs)]); 176 | 177 | dw{layer}(:,:,p,fm_idxs)=dw{layer}(:,:,p,fm_idxs)+ ... 178 | sum(es_rep .* y_rep,3); 179 | end 180 | end 181 | end 182 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 183 | else 184 | % -- Subsampling layer 185 | es_y = block_sum_2(y{layer-1}) .* es{layer}; 186 | dw{layer} = squeeze(sum(sum(sum(es_y,3),2),1)); 187 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 188 | end 189 | end 190 | %% Layer 1: convolution layer.............................................. 191 | layer = 1; 192 | size_es = size(es{layer}); 193 | size_w = size(net.w{layer}); 194 | 195 | dw{layer} = zeros(size_w); 196 | hrec_size = net.hrec_size(layer); 197 | 198 | for q = 1:net.no_fms(layer) 199 | for m = 1:size_es(1) 200 | for n = 1:size_es(2) 201 | % Repeat part of es{layer} into size_w(1) x size_w(2) x * x * 202 | es_rep = repmat(es{layer}(m,n,:,q), ... 203 | [size_w(1) size_w(2) 1 1]); 204 | 205 | dw{layer}(:,:,q) = dw{layer}(:,:,q) + ... 206 | sum(es_rep .* ... 207 | x(m:m+2*hrec_size,n:n+2*hrec_size,:),3); 208 | end 209 | end 210 | end 211 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); -------------------------------------------------------------------------------- /cnn-matlab/cnn_devectorize_wb.m: -------------------------------------------------------------------------------- 1 | function [net_w, net_b] = cnn_devectorize_wb(net, w) 2 | % CNN_DEVECTORIZE_WB Extract w and b of CNN from a vector of parameters 3 | % 4 | % SYNTAX 5 | % [net_w, net_b] = cnn_devectorize_wb(net, w) 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % w: vector containing all trainable parameters 10 | % 11 | % EXAMPLE 12 | % 13 | % 14 | % NOTES 15 | % Son Lam Phung, started 12-Jan-2006. 16 | 17 | net_w = net.w; 18 | net_b = net.b; 19 | 20 | idx_end = 0; 21 | 22 | %% Layer 1: convolution layer C1........................................... 23 | layer = 1; 24 | idx_start = idx_end + 1; 25 | idx_end = idx_start + numel(net_w{layer}) - 1; 26 | net_w{layer} = reshape(w(idx_start:idx_end), size(net_w{layer})); 27 | 28 | idx_start = idx_end + 1; 29 | idx_end = idx_start + numel(net_b{layer}) - 1; 30 | net_b{layer} = reshape(w(idx_start:idx_end), size(net_b{layer})); 31 | 32 | %% Layer 2 to L - 1: pairs of {subsampling layer -> convolution layer}..... 33 | for layer = 2:net.L-1 34 | if (mod(layer,2) == 0) 35 | % -- Subsampling layer 36 | idx_start = idx_end + 1; 37 | idx_end = idx_start + numel(net_w{layer}) - 1; 38 | net_w{layer} = reshape(w(idx_start:idx_end), size(net_w{layer})); 39 | 40 | idx_start = idx_end + 1; 41 | idx_end = idx_start + numel(net_b{layer}) - 1; 42 | net_b{layer} = reshape(w(idx_start:idx_end), size(net_b{layer})); 43 | else 44 | % -- Convolution layer 45 | for p = 1:net.no_fms(layer-1) 46 | for q = 1:net.no_fms(layer) 47 | if (net.c{layer}(p,q) == true) 48 | idx_start = idx_end + 1; 49 | idx_end = idx_start + numel(net_w{layer}(:,:,p,q)) - 1; 50 | net_w{layer}(:,:,p,q) = ... 51 | reshape(w(idx_start:idx_end), ... 52 | size(net_w{layer}(:,:,p,q))); 53 | end 54 | end 55 | end 56 | 57 | idx_start = idx_end + 1; 58 | idx_end = idx_start + numel(net_b{layer}) - 1; 59 | net_b{layer} = reshape(w(idx_start:idx_end), size(net_b{layer})); 60 | end 61 | end 62 | 63 | %% Layer L: output perceptron layer........................................ 64 | layer = net.L; 65 | idx_start = idx_end + 1; 66 | idx_end = idx_start + numel(net_w{layer}) - 1; 67 | net_w{layer} = reshape(w(idx_start:idx_end), size(net_w{layer})); 68 | 69 | idx_start = idx_end + 1; 70 | idx_end = idx_start + numel(net_b{layer}) - 1; 71 | net_b{layer} = reshape(w(idx_start:idx_end), size(net_b{layer})); -------------------------------------------------------------------------------- /cnn-matlab/cnn_example.m: -------------------------------------------------------------------------------- 1 | % DESCRIPTION Testing cnn_new.m, cnn_sim.m, and cnn_sim_verbose.m 2 | % cnn_train 3 | % 4 | % NOTES 5 | % Son Lam Phung, started 04-Nov-2006. 6 | 7 | %% Network creation 8 | H = 32; % height of 2-D input 9 | W = 32; % width of 2-D input 10 | % Create connection matrix 11 | % This matrix specifies how feature maps from one layer 12 | % are connected to feature maps in the next layer 13 | c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)} 14 | % Creat CNN 15 | net = cnn_new([H W], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], repmat({'tansig'}, 1, length(c)), 'rprop'); 16 | 17 | %% Network simulation 18 | K = 2000; % Number of samples 19 | x = randn(H, W, K); % Network input 20 | [y, s] = cnn_sim_verbose(net,x); y{end} % Method 1: Network output & layer output 21 | y = cnn_sim(net,x); % Method 2: Network ouput 22 | 23 | %% Network training 24 | t = rand(2, K); % Network target output 25 | [new_net, tr] = cnn_train(net, x, t); % new_net is trained network 26 | % tr is training record 27 | 28 | %% Plot training performance 29 | plot(tr.epoch, tr.mse); 30 | xlabel('Epoch') 31 | ylabel('MSE') -------------------------------------------------------------------------------- /cnn-matlab/cnn_example_face.m: -------------------------------------------------------------------------------- 1 | % DESCRIPTION 2 | % Example of using the CNN MATLAB library 3 | % for face versus non-face classification 4 | % NOTES 5 | % This example is quite memory intensive. 6 | % It has been tested on a PC with 4GB RAM 7 | % Son Lam Phung, started 15-Apr-2009. 8 | 9 | train_x = double(reshape(train_x',48,48,596))/255; 10 | test_x = double(reshape(test_x',48,48,501))/255; 11 | train_y = double(train_y'); 12 | test_y = double(test_y'); 13 | x=train_x; 14 | d=train_y; 15 | x_test=test_x; 16 | d_test=test_y; 17 | 18 | 19 | %% Load training data 20 | % load('data\train_data.mat') 21 | whos 22 | 23 | %% Create a CNN 24 | H = 48; % height of 2-D input 25 | W = 48; % width of 2-D input 26 | 27 | % Create connection matrices 28 | cm1 = cnn_cm('full', 1, 2); % input to layer C1 29 | % C1 has 2 planes 30 | cm2 = cnn_cm('1-to-1', 2); % C1 to S2 31 | cm3 = [1 1 0 0 1; 0 0 1 1 1];% S2 to layer C3 32 | cm4 = cnn_cm('1-to-1', 5); % C3 to S4 33 | cm5 = cnn_cm('1-to-1', 5); % S4 to C5 34 | cm6 = cnn_cm('full',5,1); % C5 to F6 35 | c = {cm1, cm2, cm3, cm4, cm5, cm6}; 36 | 37 | % Receptive sizes for each layer 38 | rec_size = [5 5; % C1 39 | 2 2; % S2 40 | 3 3; % C3 41 | 2 2; % S4 42 | 0 0; % C5 auto calculated 43 | 0 0]; % F6 auto calculated 44 | 45 | % Transfer function 46 | tf_fcn = {'tansig', % layer C1 47 | 'purelin', % layer S2 48 | 'tansig', % layer C3 49 | 'purelin', % layer S4 50 | 'tansig', % layer C5 51 | 'tansig'} % layer F6 output 52 | 53 | % Training method 54 | train_method = 'rprop'; % 'gd' 55 | 56 | % Create CNN 57 | net = cnn_new([H W], c, rec_size, tf_fcn, train_method); 58 | 59 | %% Network training 60 | net.train.epochs = 1100; 61 | [new_net, tr] = cnn_train(net, x, d); 62 | % new_net is trained network, tr is training record 63 | save('data\trained_net.mat', 'new_net', 'net', 'tr'); 64 | 65 | %% Plotting training performance 66 | plot(tr.epoch, tr.mse, 'b-', 'LineWidth', 2); grid 67 | h = xlabel('Epochs'), set(h, 'FontSize', 14); 68 | h = ylabel('Training MSE'), set(h, 'FontSize', 14); 69 | set(gca, 'FontSize', 14); 70 | 71 | y = cnn_sim(new_net, x); % network output 72 | cr = sum((y >0) == (d >=0))/length(d)*100; 73 | fprintf('Classification rate (train): cr = %2.2f%%\n',cr); 74 | 75 | %% Network testing 76 | % load('data\test_data.mat') 77 | whos 78 | y_test = cnn_sim(new_net,x_test); % network output 79 | cr_test = sum((y_test >0)==(d_test>=0))/length(d_test)*100; 80 | fprintf('Classification rate (test): cr = %2.2f%%\n',cr_test); -------------------------------------------------------------------------------- /cnn-matlab/cnn_example_nets.m: -------------------------------------------------------------------------------- 1 | %% CNN used by Garcia and Delakis for face detection 2 | c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 3 | cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 4 | cnn_cm('1-to-1', 14), cnn_cm('full', 14, 1)}; 5 | net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 6 | repmat({'tansig'}, 1, length(c)), 'rprop'); 7 | 8 | %% 9 | c = {cnn_cm('full', 1, 6), cnn_cm('one', 6), cnn_cm('toeplitz', 6, 16, 3), cnn_cm('one', 16), cnn_cm('full', 16, 120), cnn_cm('full', 120, 1)} 10 | net = cnn_new([32 32], c, [5 2 5 2 5 0], repmat({'tansig'}, 1, length(c)), 'rprop') 11 | 12 | %% 13 | c = {cnn_cm('full', 1, 6), cnn_cm('one', 6), cnn_cm('toeplitz', 6, 16, 3), cnn_cm('one', 16), cnn_cm('full', 16, 10), cnn_cm('full', 10, 1)} 14 | net = cnn_new([32 32], c, [5 2 5 2 5 0], repmat({'tansig'}, 1, length(c)), 'rprop') 15 | 16 | %% 17 | c = {cnn_cm('full', 1, 4), cnn_cm('one', 4), cnn_cm('toeplitz', 4, 6, 3), cnn_cm('one', 6), cnn_cm('full', 6, 4), cnn_cm('full', 4, 1)} 18 | net = cnn_new([32 32], c, [5 2 5 2 5 0], repmat({'tansig'}, 1, length(c)), 'rprop') 19 | 20 | %% 21 | c = {cnn_cm('full', 1, 3), cnn_cm('one', 3), cnn_cm('toeplitz', 3, 6, 2), cnn_cm('one', 6), cnn_cm('full', 6, 6), cnn_cm('full', 6, 1)} 22 | net = cnn_new([32 32], c, [5 2 5 2 5 0], repmat({'tansig'}, 1, length(c)), 'rprop') 23 | 24 | %% 25 | c = {cnn_cm('full', 1, 4), cnn_cm('one', 4), cnn_cm('binary', 4, 8), cnn_cm('one', 8), cnn_cm('full',8,5), cnn_cm('full', 5, 1)} 26 | net = cnn_new([32 32], c, [5 2 5 2 5 0], repmat({'tansig'}, 1, length(c)), 'rprop') 27 | -------------------------------------------------------------------------------- /cnn-matlab/cnn_get_init_tr.m: -------------------------------------------------------------------------------- 1 | function tr = cnn_get_init_tr() 2 | % CNN_GET_INIT_TR Get initial training record for CNN 3 | % 4 | % SYNTAX 5 | % tr = cnn_get_init_tr; 6 | % 7 | % PARAMETERS 8 | % tr: initial training record 9 | % tr.mse = []; % Mean square error 10 | % tr.time = []; % Training time in seconds 11 | % tr.epoch = []; % Number of training epochs 12 | % tr.output_eval = []; % Total network output evaluations 13 | % tr.gradient_eval = []; % Total gradient evaluations 14 | % tr.hessian_eval = []; % Total Hessian matrix evaluations 15 | % tr.jacobian_eval = []; % Total Jacobian matrix evaluations 16 | % EXAMPLE 17 | % tr = cnn_get_init_tr; 18 | % 19 | % NOTES 20 | % Son Lam Phung, started 12-Jan-2006. 21 | 22 | tr.mse = []; % Mean square error 23 | tr.time = []; % Training time in seconds 24 | tr.epoch = []; % Number of training epochs 25 | tr.output_eval = []; % Total network output evaluations 26 | tr.gradient_eval = []; % Total gradient evaluations 27 | tr.hessian_eval = []; % Total Hessian matrix evaluations 28 | tr.jacobian_eval = []; % Total Jacobian matrix evaluations -------------------------------------------------------------------------------- /cnn-matlab/cnn_getw.m: -------------------------------------------------------------------------------- 1 | function w = cnn_getw(net) 2 | % CNN_GETW: Get all trainable parameters of CNN as a column vector 3 | % 4 | % SYNTAX 5 | % w = cnn_getw(net); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % w: column vector containing all trainable parameters 10 | % 11 | % EXAMPLE 12 | % 13 | % 14 | % NOTES 15 | % Son Lam Phung, started 12-Jan-2006. 16 | 17 | w = cnn_vectorize_wb(net, net.w, net.b); -------------------------------------------------------------------------------- /cnn-matlab/cnn_init.m: -------------------------------------------------------------------------------- 1 | function new_net = cnn_init(net) 2 | % CNN_INIT Initialise trainable parameters in CNN 3 | % 4 | % SYNTAX 5 | % new_net = cnn_init(net); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % new_net: new CNN structure 10 | % 11 | % EXAMPLE 12 | % new_net = cnn_init(net); 13 | % 14 | % NOTES 15 | % Son Lam Phung, started 22-Jan-2006. 16 | 17 | new_net = net; 18 | 19 | %% Layer 1: convolution layer.............................................. 20 | layer = 1; 21 | new_net.b{layer} = randn(size(new_net.b{layer})); 22 | new_net.w{layer} = randn(size(new_net.w{layer})); 23 | 24 | %% Layer 2 to L - 1: pairs of {sumsampling layer -> convolution layer}..... 25 | for layer = 2:net.L-1 26 | new_net.b{layer} = randn(size(new_net.b{layer})); 27 | 28 | if (mod(layer,2) == 0) 29 | % -- Subsampling layer 30 | new_net.w{layer} = randn(size(new_net.w{layer})); 31 | 32 | else 33 | % -- Convolution layer 34 | for p = 1:new_net.no_fms(layer-1) 35 | for q = 1:new_net.no_fms(layer) 36 | if (new_net.c{layer}(p, q) == true) 37 | new_net.w{layer}(:,:, p, q) = ... 38 | randn(new_net.rec_size(layer,:)); 39 | end 40 | end 41 | end 42 | end 43 | end 44 | 45 | %% Layer L: output perceptron layer........................................ 46 | layer = net.L; 47 | new_net.b{layer} = randn(size(new_net.b{layer})); 48 | new_net.w{layer} = randn(size(new_net.w{layer})); -------------------------------------------------------------------------------- /cnn-matlab/cnn_new.m: -------------------------------------------------------------------------------- 1 | function net = cnn_new(input_size, c, rec_size, tf_fcn, train_method) 2 | % cnn_new: Create a new convolutional neural network 3 | % C1 -> S2 -> C3 -> S4 -> C5 -> F 4 | % C1 -> S2 -> C3 -> S4 -> ... -> C2a+1 -> F2a+2 5 | % 6 | % SYNTAX 7 | % net = cnn_new(input_size, c, rec_size, tf_fcn, train_method) 8 | % 9 | % PARAMETERS 10 | % input_size: size of input image, e.g. [32 32] 11 | % 12 | % c: list of connection matrices, cell array 13 | % c{1} is connection matrix from input to C1 14 | % c{2} is connection matrix from C1 to S2, and so on 15 | % c{l}(i,j) = true if feature map i of layer l-1 16 | % is connected to feature map j of layer l 17 | % 18 | % rec_size: size of receptive fields, 2D array 19 | % rec_size(l,:) is the size of receptive field for layer l 20 | % 21 | % tf_fcn: transfer function of network layers, cell array of strings 22 | % tf_fcn{i} is the transfer function of layer Li 23 | % 24 | % train_method: training method for network, string 25 | % 'gd', 'rprop' 26 | % 27 | % EXAMPLES 28 | % c = {cnn_cm('full', 1, 2), cnn_cm('1-to-1', 2), ... 29 | % cnn_cm('1-to-2', 2), cnn_cm('1-to-1', 4), ... 30 | % cnn_cm('1-to-2', 4), cnn_cm('full', 8, 2)} 31 | % net = cnn_new([32 32], c, [5 2 5 2 0 0; 5 2 5 2 0 0]', ... 32 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 33 | % 34 | % c = {cnn_cm('full', 1, 6), cnn_cm('1-to-1', 6), ... 35 | % cnn_cm('toeplitz', 6, 16, 3), cnn_cm('1-to-1', 16), ... 36 | % cnn_cm('full', 16, 120), cnn_cm('full', 120, 2)} 37 | % net = cnn_new([32 32], c, [5 2 5 2 0 0; 5 2 5 2 0 0]', ... 38 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 39 | % 40 | % c = {cnn_cm('full', 1, 6), cnn_cm('1-to-1', 6), ... 41 | % cnn_cm('toeplitz', 6, 16, 3), cnn_cm('1-to-1', 16), ... 42 | % cnn_cm('full', 16, 10), cnn_cm('full', 10, 1)} 43 | % net = cnn_new([32 32], c, [5 2 5 2 0 0; 5 2 5 2 0 0]', ... 44 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 45 | % 46 | % NOTES 47 | % See also cnn_cm.m 48 | % 49 | % Son Lam Phung, started 11-Jan-2006, revised 01-Nov-2006. 50 | 51 | %% Default parameters...................................................... 52 | if nargin < 1 53 | input_size = [32 32]; 54 | end 55 | 56 | if nargin < 2 57 | c = {cnn_cm('full', 1, 6), cnn_cm('1-to-1', 6), ... 58 | cnn_cm('toeplitz', 6, 16, 3), cnn_cm('1-to-1', 16), ... 59 | cnn_cm('full', 16, 120), cnn_cm('full', 120, 1)}; 60 | end 61 | 62 | if nargin < 3 63 | rec_size = [5 2 5 2 0 0; 5 2 5 2 0 0]'; 64 | end 65 | 66 | if nargin < 4 67 | tf_fcn = repmat({'ltanh'}, 1, length(c)); 68 | end 69 | 70 | if nargin < 5 71 | train_method = 'rprop'; 72 | end 73 | 74 | %% ........................... Create a CNN ............................ %% 75 | net.L = length(c); % number of layers 76 | net.w = cell(1,net.L); % weights 77 | net.b = cell(1,net.L); % biases 78 | net.c = c; % connection matrices 79 | net.rec_size = rec_size; % size of receptive fields 80 | net.hrec_size = floor(net.rec_size/2); % half the size of receptive fields 81 | net.f = tf_fcn; % transfer function 82 | net.input_size = input_size; % size of input images 83 | net.no_fms = zeros(1,net.L); % number of feature maps in each layer 84 | net.fm_size = zeros(net.L, 2); % size of feature map 85 | net.layers = cell(1, net.L); % network layers 86 | 87 | %% Layer 1: convolution layer.............................................. 88 | no_params = 0; % total number of free parameters in networks 89 | layer = 1; 90 | net.layers{layer}.type = 'C'; 91 | net.layers{layer}.name = [net.layers{layer}.type int2str(layer)]; 92 | net.layers{layer}.connection = 'full'; 93 | net.no_fms(layer) = size(c{layer},2); 94 | net.fm_size(layer, :) = input_size - 2 * net.hrec_size(layer, :); 95 | 96 | net.b{layer} = randn(net.no_fms(layer),1); 97 | net.w{layer} = randn([net.rec_size(layer,:) net.no_fms(layer)]); 98 | no_params = no_params + numel(net.w{layer}) + numel(net.b{layer}); 99 | 100 | %% Layer 2 to L - 1: pairs of {sumsampling layer -> convolution layer}..... 101 | for layer = 2:net.L-1 102 | net.no_fms(layer) = size(c{layer},2); 103 | if (mod(layer,2) == 0) 104 | % -- subsampling layer S 105 | net.layers{layer}.type = 'S'; 106 | net.layers{layer}.name = [net.layers{layer}.type int2str(layer)]; 107 | net.layers{layer}.connection = '1-to-1'; 108 | net.fm_size(layer, :) = floor(net.fm_size(layer-1,:)/2); 109 | 110 | net.w{layer} = randn(net.no_fms(layer),1); % weights 111 | net.b{layer} = randn(net.no_fms(layer),1); % biases 112 | 113 | no_params = no_params + numel(net.w{layer}) + numel(net.b{layer}); 114 | else 115 | % -- convolution layer C 116 | net.layers{layer}.type = 'C'; 117 | net.layers{layer}.name = [net.layers{layer}.type int2str(layer)]; 118 | net.layers{layer}.connection = 'custom'; 119 | 120 | if (layer == net.L - 1) 121 | % receptive field of last convolution layer = 122 | % feature map of previous layer 123 | net.rec_size(layer, :) = net.fm_size(layer-1,:); 124 | end 125 | 126 | net.fm_size(layer, :) = net.fm_size(layer-1,:) ... 127 | - net.rec_size(layer, :) + 1; 128 | 129 | net.b{layer} = randn(net.no_fms(layer),1); % biases 130 | % update number of parameters 131 | no_params = no_params + numel(net.b{layer}); 132 | 133 | net.w{layer} = zeros(net.rec_size(layer,1), ... % weights 134 | net.rec_size(layer,2), ... 135 | net.no_fms(layer-1), ... 136 | net.no_fms(layer)); 137 | % initialize weights 138 | for p = 1:net.no_fms(layer-1) 139 | for q = 1:net.no_fms(layer) 140 | if (net.c{layer}(p, q) == true) 141 | net.w{layer}(:,:, p, q) = randn(net.rec_size(layer,:)); 142 | % update number of parameters 143 | no_params = no_params + prod(net.rec_size(layer,:)); 144 | end 145 | end 146 | end 147 | end 148 | end 149 | 150 | %% Layer L: output perceptron layer........................................ 151 | layer = net.L; 152 | net.layers{layer}.type = 'F'; 153 | net.layers{layer}.name = [net.layers{layer}.type int2str(layer)]; 154 | net.layers{layer}.connection = 'full'; 155 | net.no_fms(layer) = size(c{layer},2); 156 | net.w{layer} = randn(net.no_fms(layer-1), net.no_fms(layer)); 157 | net.b{layer} = randn(net.no_fms(layer),1); 158 | no_params = no_params + numel(net.w{layer}) + numel(net.b{layer}); 159 | net.P = no_params; 160 | 161 | %% .................... Network Training Parameters .................... %% 162 | net.train.epochs = 20; % epoch limit 163 | net.train.goal = 0.0; % target mse 164 | net.train.show = 1; % epochs before next show of progress 165 | net.train.method = train_method; % 'gd', 'rprop' 166 | 167 | % gradient descent 168 | net.train.gd.lr = 0.15; % learning rate 169 | 170 | % rprop 171 | net.train.rprop.etap = 1.01; % increasing multiplier 172 | net.train.rprop.etam = 0.99; % decreasing multiplier 173 | net.train.rprop.delta_init = 0.01; % intial learning rate 174 | net.train.rprop.delta_max = 10.0; % maximum learning rate -------------------------------------------------------------------------------- /cnn-matlab/cnn_setw.m: -------------------------------------------------------------------------------- 1 | function new_net = cnn_setw(net, w) 2 | % CNN_SETW: Set trainable parameters of CNN 3 | % 4 | % SYNTAX 5 | % new_net = cnn_setw(net, w); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % w: column vector containing new values for all trainable parameters 10 | % 11 | % EXAMPLE 12 | % 13 | % 14 | % NOTES 15 | % Son Lam Phung, started 12-Jan-2006. 16 | 17 | new_net = net; 18 | [w1, b1] = cnn_devectorize_wb(net, w); 19 | new_net.w = w1; 20 | new_net.b = b1; -------------------------------------------------------------------------------- /cnn-matlab/cnn_sim.m: -------------------------------------------------------------------------------- 1 | function y = cnn_sim(net, x) 2 | % CNN_SIM: Compute CNN output 3 | % 4 | % SYNTAX 5 | % y = cnn_sim(net, x) 6 | % 7 | % PARAMETERS 8 | % net: a CNN network structure 9 | % x: input, 3-D array HxWxK 10 | % 11 | % EXAMPLE 12 | % c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 13 | % cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 14 | % cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 15 | % net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 16 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 17 | % K = 3; x = randn(36, 32, K); 18 | % y = cnn_sim(net,x); 19 | % 20 | % NOTES 21 | % Son Lam Phung, started 11-Jan-2006, revised 4 Novemeber 2006 22 | 23 | %% Initialization.......................................................... 24 | y = cell(1, net.L); % output 25 | s = cell(1, net.L); % weighted sum 26 | K = size(x, 3); % number of samples 27 | 28 | %% Layer 1: Convolution C1................................................. 29 | layer = 1; 30 | s{layer} = repmat(0, [net.fm_size(layer, :), K, net.no_fms(layer)]); 31 | hrec_size = net.hrec_size(layer,:); % half receptive size 32 | 33 | % Compute each feature map 34 | for i = 1:net.no_fms(layer) 35 | % Convolution 36 | t = imfilter(x, net.w{layer}(:,:,i), 'same', 'corr'); 37 | 38 | % Extract only the meaningful part of matrix 39 | s{layer}(:,:,:,i) = t(hrec_size(1)+1:end-hrec_size(1), ... 40 | hrec_size(2)+1:end-hrec_size(2), :) + ... 41 | net.b{layer}(i); 42 | end 43 | 44 | % Apply activation function 45 | y{layer} = feval(net.f{layer}, s{layer}); % Apply activation function 46 | 47 | %% Layer 2 to L - 2: Pairs of {Subsampling Layer -> Convolution Layer}..... 48 | for layer = 2:net.L-2 49 | if (mod(layer,2) == 0) 50 | % -- Subsampling layer 51 | s{layer} = repmat(0, [net.fm_size(layer,:), K, net.no_fms(layer)]); 52 | 53 | % Compute each feature map 54 | for i = 1:net.no_fms(layer) 55 | % 2x2 block sum -> multiply weight and add bias 56 | s{layer}(:,:,:,i) = block_sum_2(y{layer-1}(:,:,:,i)) * ... 57 | net.w{layer}(i) + net.b{layer}(i); 58 | end 59 | else 60 | % -- Convolution layer 61 | s{layer} = repmat(0, [net.fm_size(layer, :),K,net.no_fms(layer)]); 62 | hrec_size = net.hrec_size(layer,:); % half receptive size 63 | 64 | % Compute each feature map 65 | for q = 1:net.no_fms(layer) 66 | % Find all feature maps in {layer-1} that 67 | % go to this feature map in {layer} 68 | fm_idxs = find(net.c{layer}(:, q))'; 69 | 70 | % Compute contribution from each feature map in {layer-1} 71 | for p = fm_idxs 72 | % Convolution 73 | t = imfilter(y{layer-1}(:,:,:,p), ... 74 | net.w{layer}(:,:,p,q), 'same', 'corr'); 75 | 76 | % Extract only the meaningful part of matrix 77 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + ... 78 | t(hrec_size(1)+1:end-hrec_size(1), ... 79 | hrec_size(2)+1:end-hrec_size(2),:); 80 | end 81 | 82 | % Add with bias term 83 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + net.b{layer}(q); 84 | end 85 | end 86 | 87 | % Apply activation function 88 | y{layer} = feval(net.f{layer}, s{layer}); 89 | end 90 | 91 | %% Layer L-1: Last convolution layer....................................... 92 | layer = net.L-1; 93 | s{layer} = repmat(0, [net.fm_size(layer, :), K, net.no_fms(layer)]); 94 | 95 | % Compute each feature map 96 | for q = 1:net.no_fms(layer) 97 | % Find all feature maps in {layer-1} that 98 | % go to this feature map in {layer} 99 | fm_idxs = find(net.c{layer}(:, q))'; 100 | 101 | % Compute contribution from each feature map in {layer-1} 102 | for p = fm_idxs 103 | % Replicate weight matrix 104 | w_rep = repmat(net.w{layer}(:,:,p,q),[1 1 K]); 105 | 106 | % Compute weighted sum 107 | t = sum(sum(y{layer-1}(:,:,:,p) .* w_rep,1),2); 108 | 109 | % Add to s 110 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + t; 111 | end 112 | 113 | % Add with bias term 114 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + net.b{layer}(q); 115 | end 116 | 117 | % Apply activation function 118 | y{layer} = feval(net.f{layer}, s{layer}); 119 | 120 | %% Layer L: output perceptron layer........................................ 121 | layer = net.L; 122 | 123 | % Re-arrange previous output 124 | yt = squeeze(y{layer-1}); % yt has size K x net.no_fms{layer-1} 125 | 126 | % Compute weighted sum and bias 127 | s{layer} = (yt * net.w{layer})' + repmat(net.b{layer}, [1 K]); 128 | 129 | % Apply activation function 130 | y{layer} = feval(net.f{layer}, s{layer}); 131 | 132 | %% Final network output.................................................... 133 | y = y{end}; -------------------------------------------------------------------------------- /cnn-matlab/cnn_sim_verbose.m: -------------------------------------------------------------------------------- 1 | function [y, s] = cnn_sim_verbose(net, x) 2 | % CNN_SIM_VERBOSE: Compute CNN output 3 | % 4 | % SYNTAX 5 | % [y, s] = cnn_sim_verbose(net, x) 6 | % 7 | % PARAMETERS 8 | % net: a CNN network structure 9 | % x: input, 3-D array HxWxK 10 | % 11 | % y: outputs for network layers 12 | % y{l} is the output of layer l 13 | % y{end} is the final network outputs 14 | % s: weighted sum (plus bias) for network layers 15 | % s{l} is the output of layer l 16 | % y{l} = f(s{l}) where f is activation function 17 | % 18 | % EXAMPLE 19 | % c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 20 | % cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 21 | % cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 22 | % net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 23 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 24 | % K = 3; x = randn(36, 32, K); 25 | % [y, s] = cnn_sim_verbose(net,x); 26 | % 27 | % NOTES 28 | % Son Lam Phung, started 11-Jan-2006, revised 4 Novemeber 2006 29 | 30 | %% Initialization.......................................................... 31 | y = cell(1, net.L); % output 32 | s = cell(1, net.L); % weighted sum 33 | K = size(x, 3); % number of samples 34 | 35 | %% Layer 1: Convolution C1................................................. 36 | layer = 1; 37 | s{layer} = repmat(0, [net.fm_size(layer, :), K, net.no_fms(layer)]); 38 | hrec_size = net.hrec_size(layer,:); % half receptive size 39 | 40 | % Compute each feature map 41 | for i = 1:net.no_fms(layer) 42 | % Convolution 43 | t = imfilter(x, net.w{layer}(:,:,i), 'same', 'corr'); 44 | 45 | % Extract only the meaningful part of matrix 46 | s{layer}(:,:,:,i) = t(hrec_size(1)+1:end-hrec_size(1), ... 47 | hrec_size(2)+1:end-hrec_size(2), :) + ... 48 | net.b{layer}(i); 49 | end 50 | 51 | % Apply activation function 52 | y{layer} = feval(net.f{layer}, s{layer}); % Apply activation function 53 | 54 | %% Layer 2 to L - 2: Pairs of {Subsampling Layer -> Convolution Layer}..... 55 | for layer = 2:net.L-2 56 | if (mod(layer,2) == 0) 57 | % -- Subsampling layer 58 | s{layer} = repmat(0, [net.fm_size(layer,:), K, net.no_fms(layer)]); 59 | 60 | % Compute each feature map 61 | for i = 1:net.no_fms(layer) 62 | % 2x2 block sum -> multiply weight and add bias 63 | s{layer}(:,:,:,i) = block_sum_2(y{layer-1}(:,:,:,i)) * ... 64 | net.w{layer}(i) + net.b{layer}(i); 65 | end 66 | else 67 | % -- Convolution layer 68 | s{layer} = repmat(0, [net.fm_size(layer, :), K, net.no_fms(layer)]); 69 | hrec_size = net.hrec_size(layer,:); % half receptive size 70 | 71 | % Compute each feature map 72 | for q = 1:net.no_fms(layer) 73 | % Find all feature maps in {layer-1} that 74 | % go to this feature map in {layer} 75 | fm_idxs = find(net.c{layer}(:, q))'; 76 | 77 | % Compute contribution from each feature map in {layer-1} 78 | for p = fm_idxs 79 | % Convolution 80 | t = imfilter(y{layer-1}(:,:,:,p), ... 81 | net.w{layer}(:,:,p,q), 'same', 'corr'); 82 | 83 | % Extract only the meaningful part of matrix 84 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + ... 85 | t(hrec_size(1)+1:end-hrec_size(1), ... 86 | hrec_size(2)+1:end-hrec_size(2),:); 87 | end 88 | 89 | % Add with bias term 90 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + net.b{layer}(q); 91 | end 92 | end 93 | 94 | % Apply activation function 95 | y{layer} = feval(net.f{layer}, s{layer}); 96 | end 97 | 98 | %% Layer L-1: Last convolution layer....................................... 99 | layer = net.L-1; 100 | s{layer} = repmat(0, [net.fm_size(layer, :), K, net.no_fms(layer)]); 101 | 102 | % Compute each feature map 103 | for q = 1:net.no_fms(layer) 104 | % Find all feature maps in {layer-1} that 105 | % go to this feature map in {layer} 106 | fm_idxs = find(net.c{layer}(:, q))'; 107 | 108 | % Compute contribution from each feature map in {layer-1} 109 | for p = fm_idxs 110 | % Replicate weight matrix 111 | w_rep = repmat(net.w{layer}(:,:,p,q),[1 1 K]); 112 | 113 | % Compute weighted sum 114 | t = sum(sum(y{layer-1}(:,:,:,p) .* w_rep,1),2); 115 | 116 | % Add to s 117 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + t; 118 | end 119 | 120 | % Add with bias term 121 | s{layer}(:,:,:,q) = s{layer}(:,:,:,q) + net.b{layer}(q); 122 | end 123 | 124 | % Apply activation function 125 | y{layer} = feval(net.f{layer}, s{layer}); 126 | 127 | %% Layer L: output perceptron layer........................................ 128 | layer = net.L; 129 | 130 | % Re-arrange previous output 131 | yt = squeeze(y{layer-1}); % yt has size K x net.no_fms{layer-1} 132 | 133 | % Compute weighted sum and bias 134 | s{layer} = (yt * net.w{layer})' + repmat(net.b{layer}, [1 K]); 135 | 136 | % Apply activation function 137 | y{layer} = feval(net.f{layer}, s{layer}); -------------------------------------------------------------------------------- /cnn-matlab/cnn_train.m: -------------------------------------------------------------------------------- 1 | function [new_net, new_tr] = cnn_train(net, x, d, tr) 2 | % CNN_TRAIN Train a CNN 3 | % 4 | % SYNTAX 5 | % [new_net, new_tr] = cnn_train(net, x, d, tr); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % x: inputs (3-D array H x W x K) 10 | % d: desired output (2-D array NL x K) 11 | % tr: existing training records 12 | % 13 | % new_tr: updated training records 14 | % new_net: trained net 15 | % 16 | % EXAMPLE 17 | % c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 18 | % cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 19 | % cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 20 | % net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 21 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 22 | % K = 5; x = randn(36, 32, K); d = randn(2,K); 23 | % [new_net, new_tr] = cnn_train(net, x, d); 24 | % 25 | % NOTES 26 | % Son Lam Phung, started 12-Jan-2006. 27 | 28 | % Call the appropriate cnn_train_... function according to 29 | % the specified training method 30 | if nargin < 4 31 | [new_net, new_tr] = ... 32 | feval(['cnn_train_' net.train.method], net, x, d); 33 | else 34 | [new_net, new_tr] = ... 35 | feval(['cnn_train_' net.train.method], net, x, d, tr); 36 | end -------------------------------------------------------------------------------- /cnn-matlab/cnn_train_gd.m: -------------------------------------------------------------------------------- 1 | function [new_net, new_tr] = cnn_train_gd(net, x, d, tr) 2 | % CNN_TRAIN_GD Train a CNN using Gradient Descent method 3 | % 4 | % SYNTAX 5 | % [new_net, new_tr] = cnn_train_gd(net, x, d, tr); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % x: inputs (3-D array H x W x K) 10 | % d: desired output (2-D array NL x K) 11 | % tr: existing training records 12 | % 13 | % new_tr: updated training records 14 | % new_net: trained net 15 | % 16 | % EXAMPLE 17 | % c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 18 | % cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 19 | % cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 20 | % net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 21 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 22 | % K = 5; x = randn(36, 32, K); d = randn(2,K); 23 | % [new_net, new_tr] = cnn_train_gd(net, x, d); 24 | % 25 | % NOTES 26 | % Son Lam Phung, started 12-Jan-2006. 27 | 28 | %% Process input parameters................................................ 29 | if nargin < 4 30 | tr = cnn_get_init_tr; 31 | end 32 | 33 | if (ischar(tr) || isempty(tr)) 34 | tr = cnn_get_init_tr; 35 | end 36 | 37 | % Starting time 38 | start_time = clock; 39 | new_net = net; % New net 40 | new_tr = tr; % New training record 41 | 42 | %% Store record fields for speed........................................... 43 | tr_mse = []; 44 | tr_time = []; 45 | tr_epoch = []; 46 | tr_output_eval = []; 47 | tr_gradient_eval = []; 48 | 49 | train_method = upper(new_net.train.method); 50 | lr = net.train.gd.lr; 51 | 52 | %% ............................. Training .............................. %% 53 | output_eval_count = 0; 54 | gradient_eval_count = 0; 55 | 56 | for epoch_count = 1:new_net.train.epochs 57 | % Compute network output 58 | output_eval_count = output_eval_count + 1; 59 | [y, s] = cnn_sim_verbose(new_net, x); 60 | 61 | % Compute mean square error 62 | e = y{end} - d; % error 63 | E = mse(e); % MSE 64 | 65 | % Record progress 66 | if ((rem(epoch_count, new_net.train.show) == 0) || ... 67 | (epoch_count == 1)) 68 | tr_epoch = [tr_epoch epoch_count]; 69 | tr_output_eval = [tr_output_eval output_eval_count]; 70 | tr_gradient_eval = [tr_gradient_eval gradient_eval_count]; 71 | tr_mse = [tr_mse E]; 72 | tr_time = [tr_time etime(clock, start_time)]; 73 | fprintf('\n%s: epoch %g, mse = %3.8g ...', ... 74 | train_method, epoch_count, E); 75 | end 76 | 77 | % Exit training if goal is achieved 78 | if (E <= new_net.train.goal) 79 | fprintf('\nTraining goal is achieved.\n'); 80 | break; 81 | end 82 | 83 | % Compute gradient 84 | gradient_eval_count = gradient_eval_count + 1; 85 | [dw, db] = cnn_compute_gradient(new_net, x, e, y, s); 86 | 87 | % Compute new weights 88 | dw = cnn_vectorize_wb(new_net, dw, db); 89 | w = cnn_vectorize_wb(new_net, new_net.w, new_net.b); 90 | new_w = w - lr * dw; 91 | 92 | [new_w, new_b] = cnn_devectorize_wb(new_net, new_w); 93 | new_net.w = new_w; 94 | new_net.b = new_b; 95 | end 96 | 97 | %% Store progress of last epoch............................................ 98 | if (rem(epoch_count, new_net.train.show) ~= 0) 99 | tr_epoch = [tr_epoch epoch_count]; 100 | tr_output_eval = [tr_output_eval output_eval_count]; 101 | tr_gradient_eval = [tr_gradient_eval gradient_eval_count]; 102 | tr_mse = [tr_mse E]; 103 | tr_time = [tr_time etime(clock, start_time)]; 104 | fprintf('\n%s: epoch %g, mse = %3.8g ...', ... 105 | train_method, epoch_count, E); 106 | end 107 | 108 | %% Add to existing training record......................................... 109 | if (~isempty(tr.time)) 110 | tr_time = tr_time + tr.time(end); 111 | end 112 | 113 | if (~isempty(tr.epoch)) 114 | tr_epoch = tr_epoch + tr.epoch(end); 115 | end 116 | 117 | if (~isempty(tr.output_eval)) 118 | tr_output_eval = tr_output_eval + tr.output_eval(end); 119 | end 120 | 121 | if (~isempty(tr.gradient_eval)) 122 | tr_gradient_eval = tr_gradient_eval + tr.gradient_eval(end); 123 | end 124 | 125 | %% Update training record.................................................. 126 | new_tr.time = [tr.time tr_time]; 127 | new_tr.mse = [tr.mse tr_mse]; 128 | new_tr.epoch = [tr.epoch tr_epoch]; 129 | new_tr.output_eval = [tr.output_eval tr_output_eval]; 130 | new_tr.gradient_eval = [tr.gradient_eval tr_gradient_eval]; 131 | fprintf('\n'); -------------------------------------------------------------------------------- /cnn-matlab/cnn_train_rprop.m: -------------------------------------------------------------------------------- 1 | function [new_net, new_tr] = cnn_train_rprop(net, x, d, tr) 2 | % CNN_TRAIN_RPROP Train a CNN using Resilient Backpropagation method 3 | % 4 | % SYNTAX 5 | % [new_net, new_tr] = cnn_train_rprop(net, x, d, tr) 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % x: inputs (3-D array H x W x K) 10 | % d: desired output (2-D array NL x K) 11 | % tr: existing training records 12 | % 13 | % new_tr: updated training records 14 | % new_net: trained net 15 | % 16 | % EXAMPLE 17 | % c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 18 | % cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 19 | % cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 20 | % net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 21 | % repmat({'tansig'}, 1, length(c)), 'rprop'); 22 | % K = 5; x = randn(36, 32, K); d = randn(2,K); 23 | % [new_net, new_tr] = cnn_train_rprop(net, x, d); 24 | % 25 | % NOTES 26 | % Son Lam Phung, started 12-Jan-2006. 27 | 28 | %% Process input parameters................................................ 29 | if nargin < 4 30 | tr = cnn_get_init_tr; 31 | end 32 | 33 | if (ischar(tr) || isempty(tr)) 34 | tr = cnn_get_init_tr; 35 | end 36 | 37 | start_time = clock; 38 | new_net = net; % New net 39 | new_tr = tr; % New training record 40 | 41 | %% Store record fields for speed........................................... 42 | tr_mse = []; 43 | tr_time = []; 44 | tr_epoch = []; 45 | tr_output_eval = []; 46 | tr_gradient_eval = []; 47 | 48 | train_method = upper(new_net.train.method); 49 | etap = new_net.train.rprop.etap; 50 | etam = new_net.train.rprop.etam; 51 | delta_init = new_net.train.rprop.delta_init; 52 | 53 | %% Retrieve previous training trends....................................... 54 | if isfield(new_net.train.rprop, 'dEdw_prev') 55 | dEdw_prev= new_net.train.rprop.dEdw_prev; 56 | else 57 | dEdw_prev = zeros(new_net.P, 1); 58 | end 59 | 60 | dEdw_curr = zeros(new_net.P, 1); 61 | 62 | if isfield(new_net.train.rprop, 'delta_w') 63 | delta_w = new_net.train.rprop.delta_w; 64 | else 65 | delta_w = delta_init * ones(new_net.P,1); 66 | end 67 | 68 | %% ............................. Training .............................. %% 69 | output_eval_count = 0; 70 | gradient_eval_count = 0; 71 | 72 | for epoch_count = 1:new_net.train.epochs 73 | % Compute network output 74 | output_eval_count = output_eval_count + 1; 75 | [y, s] = cnn_sim_verbose(new_net, x); 76 | 77 | % Compute mean square error 78 | e = y{end} - d; % error 79 | E = mse(e); % MSE 80 | 81 | if ((rem(epoch_count, new_net.train.show) == 0) || ... 82 | (epoch_count == 1)) 83 | tr_epoch = [tr_epoch epoch_count]; 84 | tr_output_eval = [tr_output_eval output_eval_count]; 85 | tr_gradient_eval = [tr_gradient_eval gradient_eval_count]; 86 | tr_mse = [tr_mse E]; 87 | tr_time = [tr_time etime(clock, start_time)]; 88 | fprintf('\n%s: epoch %g, mse = %3.8g', ... 89 | train_method, epoch_count, E); 90 | end 91 | 92 | % Exit training if goal is achieved 93 | if (E <= new_net.train.goal) 94 | fprintf('\nTraining goal is achieved.\n'); 95 | break; 96 | end 97 | 98 | % Compute gradient 99 | gradient_eval_count = gradient_eval_count + 1; 100 | [dw, db] = cnn_compute_gradient(new_net, x, e, y, s); 101 | dEdw_curr = cnn_vectorize_wb(new_net, dw, db); 102 | 103 | % Update weight steps 104 | sign_w = sign(dEdw_prev) .* sign(dEdw_curr); 105 | delta_w = delta_w .* ((sign_w == 1) * etap + ... 106 | (sign_w == -1) * etam + ... 107 | (sign_w == 0)); 108 | 109 | % Compute new weights 110 | w = cnn_vectorize_wb(new_net, new_net.w, new_net.b); 111 | w = w - sign(dEdw_curr) .* delta_w; 112 | [new_w, new_b] = cnn_devectorize_wb(new_net, w); 113 | new_net.w = new_w; new_net.b = new_b; 114 | 115 | dEdw_prev = dEdw_curr; % Store gradient 116 | end 117 | new_net.train.rprop.delta_w = delta_w; 118 | new_net.train.rprop.dEdw_prev = dEdw_curr; 119 | 120 | %% Store progress of last epoch............................................ 121 | if (rem(epoch_count, new_net.train.show) ~= 0) 122 | tr_epoch = [tr_epoch epoch_count]; 123 | tr_output_eval = [tr_output_eval output_eval_count]; 124 | tr_gradient_eval = [tr_gradient_eval gradient_eval_count]; 125 | tr_mse = [tr_mse E]; 126 | tr_time = [tr_time etime(clock, start_time)]; 127 | fprintf('\n%s: epoch %g, mse = %3.8g ...', ... 128 | train_method, epoch_count, E); 129 | end 130 | 131 | %% Add to existing training record......................................... 132 | if (~isempty(tr.time)) 133 | tr_time = tr_time + tr.time(end); 134 | end 135 | 136 | if (~isempty(tr.epoch)) 137 | tr_epoch = tr_epoch + tr.epoch(end); 138 | end 139 | 140 | if (~isempty(tr.output_eval)) 141 | tr_output_eval = tr_output_eval + tr.output_eval(end); 142 | end 143 | 144 | if (~isempty(tr.gradient_eval)) 145 | tr_gradient_eval = tr_gradient_eval + tr.gradient_eval(end); 146 | end 147 | 148 | %% Update training record.................................................. 149 | new_tr.time = [tr.time tr_time]; 150 | new_tr.mse = [tr.mse tr_mse]; 151 | new_tr.epoch = [tr.epoch tr_epoch]; 152 | new_tr.output_eval = [tr.output_eval tr_output_eval]; 153 | new_tr.gradient_eval = [tr.gradient_eval tr_gradient_eval]; 154 | fprintf('\n'); -------------------------------------------------------------------------------- /cnn-matlab/cnn_vectorize_wb.m: -------------------------------------------------------------------------------- 1 | function w = cnn_vectorize_wb(net, net_w, net_b) 2 | % CNN_VECTORIZE_WB: Vectorize w and b of CNN 3 | % 4 | % SYNTAX 5 | % w = cnn_vectorize_wb(net, net_w, net_b); 6 | % 7 | % PARAMETERS 8 | % net: CNN structure 9 | % net_w: cell array of CNN weights 10 | % net_b: cell array of CNN biases 11 | % 12 | % EXAMPLE 13 | % 14 | % 15 | % NOTES 16 | % Son Lam Phung, started 12-Jan-2006. 17 | 18 | w = zeros(net.P, 1); 19 | idx_end = 0; 20 | 21 | %% Layer 1: convolution layer C1........................................... 22 | layer = 1; 23 | idx_start = idx_end + 1; 24 | idx_end = idx_start + numel(net_w{layer}) - 1; 25 | w(idx_start:idx_end) = net_w{layer}(:); 26 | 27 | idx_start = idx_end + 1; 28 | idx_end = idx_start + numel(net_b{layer}) - 1; 29 | w(idx_start:idx_end) = net_b{layer}(:); 30 | 31 | %% Layer 2 to L - 1: pairs of {sumsampling layer -> convolution layer}..... 32 | for layer = 2:net.L-1 33 | if (mod(layer,2) == 0) 34 | % -- Subsampling layer 35 | idx_start = idx_end + 1; 36 | idx_end = idx_start + numel(net_w{layer}) - 1; 37 | w(idx_start:idx_end) = net_w{layer}(:); 38 | 39 | idx_start = idx_end + 1; 40 | idx_end = idx_start + numel(net_b{layer}) - 1; 41 | w(idx_start:idx_end) = net_b{layer}(:); 42 | else 43 | % -- Convolution layer 44 | for p = 1:net.no_fms(layer-1) 45 | for q = 1:net.no_fms(layer) 46 | if (net.c{layer}(p,q) == true) 47 | w_tmp = squeeze(net_w{layer}(:,:,p,q)); 48 | idx_start = idx_end + 1; 49 | idx_end = idx_start + numel(w_tmp) - 1; 50 | w(idx_start:idx_end) = w_tmp(:); 51 | end 52 | end 53 | end 54 | idx_start = idx_end + 1; 55 | idx_end = idx_start + numel(net_b{layer}) - 1; 56 | w(idx_start:idx_end) = net_b{layer}(:); 57 | end 58 | end 59 | 60 | %% Layer L: output perceptron layer........................................ 61 | layer = net.L; 62 | idx_start = idx_end + 1; 63 | idx_end = idx_start + numel(net_w{layer}) - 1; 64 | w(idx_start:idx_end) = net_w{layer}(:); 65 | 66 | idx_start = idx_end + 1; 67 | idx_end = idx_start + numel(net_b{layer}) - 1; 68 | w(idx_start:idx_end) = net_b{layer}(:); -------------------------------------------------------------------------------- /cnn-matlab/data/get_face_data.m: -------------------------------------------------------------------------------- 1 | % DESCRIPTION 2 | % 3 | % 4 | % NOTES 5 | % Son Lam Phung, started 14-Apr-2009. 6 | 7 | load('pyranet_train_data_face_w32'); 8 | load('pyranet_train_data_nonface_w32'); 9 | 10 | %% 11 | K = 1000; 12 | N = size(x_face,3); 13 | skip = floor(N/K); 14 | train_idxs = 1:skip:(K-1)*skip + 1; 15 | test_idxs = setdiff(1:N, train_idxs); 16 | 17 | x1 = x_face(:,:,train_idxs); 18 | x2 = x_nonface(:,:, train_idxs); 19 | d1 = ones(1,K); 20 | d2 = -ones(1,K); 21 | x = cat(3, x1, x2); 22 | d = [d1 d2]; 23 | save('train_data.mat', 'x', 'd'); 24 | 25 | %% 26 | x1 = x_face(:,:,test_idxs); 27 | x2 = x_nonface(:,:, test_idxs); 28 | K_test = length(test_idxs); 29 | d1 = ones(1,K_test); 30 | d2 = -ones(1,K_test); 31 | x_test = cat(3, x1, x2); 32 | d_test = [d1 d2]; 33 | save('test_data.mat', 'x_test', 'd_test'); -------------------------------------------------------------------------------- /cnn-matlab/data/test_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/cnn-matlab/data/test_data.mat -------------------------------------------------------------------------------- /cnn-matlab/data/train_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/cnn-matlab/data/train_data.mat -------------------------------------------------------------------------------- /cnn-matlab/data/trained_net.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/cnn-matlab/data/trained_net.mat -------------------------------------------------------------------------------- /cnn-matlab/dltanh.m: -------------------------------------------------------------------------------- 1 | function d = dltanh(x, y) 2 | % DLTANH Derivative of ltanh 3 | % 4 | % SYNTAX 5 | % d = dltanh(x, y); 6 | % 7 | % PARAMETERS 8 | % x: input 9 | % y: value of function at x 10 | % 11 | % EXAMPLE 12 | % x = 0.2; y = ltanh(x); 13 | % d = dltanh(x,y) 14 | % 15 | % NOTES 16 | % d is more efficiently computed if y is known. 17 | % 18 | % Son Lam Phung, started 11-Jan-2006. 19 | 20 | A = 1.7159; 21 | S = 2/3; 22 | d = A*S*(1-y/A) .* (1+y/A); -------------------------------------------------------------------------------- /cnn-matlab/face.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hurricanedjp/paper_code/54b3238f5645e7f4fdcce98887e7196726ad68a7/cnn-matlab/face.fig -------------------------------------------------------------------------------- /cnn-matlab/face_result.txt: -------------------------------------------------------------------------------- 1 | >> cnn_example_face 2 | Name Size Bytes Class Attributes 3 | 4 | d 1x2000 16000 double 5 | x 32x32x2000 16384000 double 6 | 7 | 8 | tf_fcn = 9 | 10 | 'tansig' 11 | 'purelin' 12 | 'tansig' 13 | 'purelin' 14 | 'tansig' 15 | 'tansig' 16 | 17 | 18 | RPROP: epoch 1, mse = 1.9986999 19 | RPROP: epoch 2, mse = 1.9978071 20 | RPROP: epoch 3, mse = 1.9972581 21 | RPROP: epoch 4, mse = 1.9968155 22 | RPROP: epoch 5, mse = 1.9963148 23 | RPROP: epoch 6, mse = 1.9959811 24 | RPROP: epoch 7, mse = 1.9953612 25 | RPROP: epoch 8, mse = 1.9948021 26 | RPROP: epoch 9, mse = 1.9941262 27 | RPROP: epoch 10, mse = 1.9932788 28 | RPROP: epoch 11, mse = 1.9921735 29 | RPROP: epoch 12, mse = 1.9913 30 | RPROP: epoch 13, mse = 1.9897832 31 | RPROP: epoch 14, mse = 1.9880601 32 | RPROP: epoch 15, mse = 1.9852042 33 | RPROP: epoch 16, mse = 1.9783972 34 | RPROP: epoch 17, mse = 1.9502257 35 | RPROP: epoch 18, mse = 1.825254 36 | RPROP: epoch 19, mse = 1.5343532 37 | RPROP: epoch 20, mse = 1.1761875 38 | RPROP: epoch 21, mse = 1.0471668 39 | RPROP: epoch 22, mse = 1.1274486 40 | RPROP: epoch 23, mse = 1.0257674 41 | RPROP: epoch 24, mse = 1.0808831 42 | RPROP: epoch 25, mse = 1.0158258 43 | RPROP: epoch 26, mse = 1.0629301 44 | RPROP: epoch 27, mse = 1.0512266 45 | RPROP: epoch 28, mse = 1.052178 46 | RPROP: epoch 29, mse = 1.0409621 47 | RPROP: epoch 30, mse = 1.0394706 48 | RPROP: epoch 31, mse = 1.0297559 49 | RPROP: epoch 32, mse = 1.0226531 50 | RPROP: epoch 33, mse = 1.0207938 51 | RPROP: epoch 34, mse = 1.0217481 52 | RPROP: epoch 35, mse = 1.0149132 53 | RPROP: epoch 36, mse = 1.0178027 54 | RPROP: epoch 37, mse = 1.0111357 55 | RPROP: epoch 38, mse = 1.0152673 56 | RPROP: epoch 39, mse = 1.0076471 57 | RPROP: epoch 40, mse = 1.0098889 58 | RPROP: epoch 41, mse = 1.0048212 59 | RPROP: epoch 42, mse = 1.0077142 60 | RPROP: epoch 43, mse = 0.99847023 61 | RPROP: epoch 44, mse = 1.0041759 62 | RPROP: epoch 45, mse = 0.98951051 63 | RPROP: epoch 46, mse = 0.99581416 64 | RPROP: epoch 47, mse = 0.96648136 65 | RPROP: epoch 48, mse = 0.98034903 66 | RPROP: epoch 49, mse = 0.92583841 67 | RPROP: epoch 50, mse = 0.95856201 68 | RPROP: epoch 51, mse = 0.89397401 69 | RPROP: epoch 52, mse = 0.94207041 70 | RPROP: epoch 53, mse = 0.8652979 71 | RPROP: epoch 54, mse = 0.92701228 72 | RPROP: epoch 55, mse = 0.84022664 73 | RPROP: epoch 56, mse = 0.89299054 74 | RPROP: epoch 57, mse = 0.82845243 75 | RPROP: epoch 58, mse = 0.85732381 76 | RPROP: epoch 59, mse = 0.81254233 77 | RPROP: epoch 60, mse = 0.82123756 78 | RPROP: epoch 61, mse = 0.78479623 79 | RPROP: epoch 62, mse = 0.78623083 80 | RPROP: epoch 63, mse = 0.76048272 81 | RPROP: epoch 64, mse = 0.76113077 82 | RPROP: epoch 65, mse = 0.71833571 83 | RPROP: epoch 66, mse = 0.72261444 84 | RPROP: epoch 67, mse = 0.67166894 85 | RPROP: epoch 68, mse = 0.66943425 86 | RPROP: epoch 69, mse = 0.62118644 87 | RPROP: epoch 70, mse = 0.62069863 88 | RPROP: epoch 71, mse = 0.58223842 89 | RPROP: epoch 72, mse = 0.57885503 90 | RPROP: epoch 73, mse = 0.5623756 91 | RPROP: epoch 74, mse = 0.55266876 92 | RPROP: epoch 75, mse = 0.54311415 93 | RPROP: epoch 76, mse = 0.53206462 94 | RPROP: epoch 77, mse = 0.52401365 95 | RPROP: epoch 78, mse = 0.51162754 96 | RPROP: epoch 79, mse = 0.50358248 97 | RPROP: epoch 80, mse = 0.48971386 98 | RPROP: epoch 81, mse = 0.48341079 99 | RPROP: epoch 82, mse = 0.46924531 100 | RPROP: epoch 83, mse = 0.46523781 101 | RPROP: epoch 84, mse = 0.45104489 102 | RPROP: epoch 85, mse = 0.44808022 103 | RPROP: epoch 86, mse = 0.43331632 104 | RPROP: epoch 87, mse = 0.43007096 105 | RPROP: epoch 88, mse = 0.4153945 106 | RPROP: epoch 89, mse = 0.41593075 107 | RPROP: epoch 90, mse = 0.41086799 108 | RPROP: epoch 91, mse = 0.40986501 109 | RPROP: epoch 92, mse = 0.40159112 110 | RPROP: epoch 93, mse = 0.4047201 111 | RPROP: epoch 94, mse = 0.3958298 112 | RPROP: epoch 95, mse = 0.3995682 113 | RPROP: epoch 96, mse = 0.39005796 114 | RPROP: epoch 97, mse = 0.39451343 115 | RPROP: epoch 98, mse = 0.38428098 116 | RPROP: epoch 99, mse = 0.38956989 117 | RPROP: epoch 100, mse = 0.38110518 118 | RPROP: epoch 101, mse = 0.38468668 119 | RPROP: epoch 102, mse = 0.37664739 120 | RPROP: epoch 103, mse = 0.37987805 121 | RPROP: epoch 104, mse = 0.37347809 122 | RPROP: epoch 105, mse = 0.37514486 123 | RPROP: epoch 106, mse = 0.36781704 124 | RPROP: epoch 107, mse = 0.37051379 125 | RPROP: epoch 108, mse = 0.36466379 126 | RPROP: epoch 109, mse = 0.36596828 127 | RPROP: epoch 110, mse = 0.36154232 128 | RPROP: epoch 111, mse = 0.36088641 129 | RPROP: epoch 112, mse = 0.35605636 130 | RPROP: epoch 113, mse = 0.35655346 131 | RPROP: epoch 114, mse = 0.35299259 132 | RPROP: epoch 115, mse = 0.35232596 133 | RPROP: epoch 116, mse = 0.34880284 134 | RPROP: epoch 117, mse = 0.34761561 135 | RPROP: epoch 118, mse = 0.34584615 136 | RPROP: epoch 119, mse = 0.3467926 137 | RPROP: epoch 120, mse = 0.34293172 138 | RPROP: epoch 121, mse = 0.34283279 139 | RPROP: epoch 122, mse = 0.34004054 140 | RPROP: epoch 123, mse = 0.34101087 141 | RPROP: epoch 124, mse = 0.33721649 142 | RPROP: epoch 125, mse = 0.33739357 143 | RPROP: epoch 126, mse = 0.33443323 144 | RPROP: epoch 127, mse = 0.3323337 145 | RPROP: epoch 128, mse = 0.32940721 146 | RPROP: epoch 129, mse = 0.33017444 147 | RPROP: epoch 130, mse = 0.32639129 148 | RPROP: epoch 131, mse = 0.32539644 149 | RPROP: epoch 132, mse = 0.32200667 150 | RPROP: epoch 133, mse = 0.32078424 151 | RPROP: epoch 134, mse = 0.31649976 152 | RPROP: epoch 135, mse = 0.31542347 153 | RPROP: epoch 136, mse = 0.31209161 154 | RPROP: epoch 137, mse = 0.31236582 155 | RPROP: epoch 138, mse = 0.3097892 156 | RPROP: epoch 139, mse = 0.30776862 157 | RPROP: epoch 140, mse = 0.30546392 158 | RPROP: epoch 141, mse = 0.30547686 159 | RPROP: epoch 142, mse = 0.30417056 160 | RPROP: epoch 143, mse = 0.30316546 161 | RPROP: epoch 144, mse = 0.2999319 162 | RPROP: epoch 145, mse = 0.29778043 163 | RPROP: epoch 146, mse = 0.29644278 164 | RPROP: epoch 147, mse = 0.29563901 165 | RPROP: epoch 148, mse = 0.29267387 166 | RPROP: epoch 149, mse = 0.29023358 167 | RPROP: epoch 150, mse = 0.28932071 168 | RPROP: epoch 151, mse = 0.28725396 169 | RPROP: epoch 152, mse = 0.2868335 170 | RPROP: epoch 153, mse = 0.28462431 171 | RPROP: epoch 154, mse = 0.28442643 172 | RPROP: epoch 155, mse = 0.28173172 173 | RPROP: epoch 156, mse = 0.28092473 174 | RPROP: epoch 157, mse = 0.27759907 175 | RPROP: epoch 158, mse = 0.27730615 176 | RPROP: epoch 159, mse = 0.27479951 177 | RPROP: epoch 160, mse = 0.27550245 178 | RPROP: epoch 161, mse = 0.27315543 179 | RPROP: epoch 162, mse = 0.27372563 180 | RPROP: epoch 163, mse = 0.2706698 181 | RPROP: epoch 164, mse = 0.26961717 182 | RPROP: epoch 165, mse = 0.26650386 183 | RPROP: epoch 166, mse = 0.26624205 184 | RPROP: epoch 167, mse = 0.26332927 185 | RPROP: epoch 168, mse = 0.2645858 186 | RPROP: epoch 169, mse = 0.26167188 187 | RPROP: epoch 170, mse = 0.26289984 188 | RPROP: epoch 171, mse = 0.26003344 189 | RPROP: epoch 172, mse = 0.26123419 190 | RPROP: epoch 173, mse = 0.25840055 191 | RPROP: epoch 174, mse = 0.25933533 192 | RPROP: epoch 175, mse = 0.25659462 193 | RPROP: epoch 176, mse = 0.25749897 194 | RPROP: epoch 177, mse = 0.25468747 195 | RPROP: epoch 178, mse = 0.25557653 196 | RPROP: epoch 179, mse = 0.25254748 197 | RPROP: epoch 180, mse = 0.25243287 198 | RPROP: epoch 181, mse = 0.24926645 199 | RPROP: epoch 182, mse = 0.25025779 200 | RPROP: epoch 183, mse = 0.24743845 201 | RPROP: epoch 184, mse = 0.24976123 202 | RPROP: epoch 185, mse = 0.24676113 203 | RPROP: epoch 186, mse = 0.24730907 204 | RPROP: epoch 187, mse = 0.24462322 205 | RPROP: epoch 188, mse = 0.24666336 206 | RPROP: epoch 189, mse = 0.24391339 207 | RPROP: epoch 190, mse = 0.24470059 208 | RPROP: epoch 191, mse = 0.24195465 209 | RPROP: epoch 192, mse = 0.24226422 210 | RPROP: epoch 193, mse = 0.23992283 211 | RPROP: epoch 194, mse = 0.24167999 212 | RPROP: epoch 195, mse = 0.23926097 213 | RPROP: epoch 196, mse = 0.24101825 214 | RPROP: epoch 197, mse = 0.23859544 215 | RPROP: epoch 198, mse = 0.24007392 216 | RPROP: epoch 199, mse = 0.23803064 217 | RPROP: epoch 200, mse = 0.23946788 218 | RPROP: epoch 201, mse = 0.23729229 219 | RPROP: epoch 202, mse = 0.23720722 220 | RPROP: epoch 203, mse = 0.23556117 221 | RPROP: epoch 204, mse = 0.23696091 222 | RPROP: epoch 205, mse = 0.23500094 223 | RPROP: epoch 206, mse = 0.23538051 224 | RPROP: epoch 207, mse = 0.23320014 225 | RPROP: epoch 208, mse = 0.23381452 226 | RPROP: epoch 209, mse = 0.23225868 227 | RPROP: epoch 210, mse = 0.23355746 228 | RPROP: epoch 211, mse = 0.23167532 229 | RPROP: epoch 212, mse = 0.23212507 230 | RPROP: epoch 213, mse = 0.23061333 231 | RPROP: epoch 214, mse = 0.23177338 232 | RPROP: epoch 215, mse = 0.23001451 233 | RPROP: epoch 216, mse = 0.23034995 234 | RPROP: epoch 217, mse = 0.22830034 235 | RPROP: epoch 218, mse = 0.22722738 236 | RPROP: epoch 219, mse = 0.22490116 237 | RPROP: epoch 220, mse = 0.22470364 238 | RPROP: epoch 221, mse = 0.22333982 239 | RPROP: epoch 222, mse = 0.22438617 240 | RPROP: epoch 223, mse = 0.22253844 241 | RPROP: epoch 224, mse = 0.22382102 242 | RPROP: epoch 225, mse = 0.2219229 243 | RPROP: epoch 226, mse = 0.22213519 244 | RPROP: epoch 227, mse = 0.22093517 245 | RPROP: epoch 228, mse = 0.22184469 246 | RPROP: epoch 229, mse = 0.22020718 247 | RPROP: epoch 230, mse = 0.22080475 248 | RPROP: epoch 231, mse = 0.21930864 249 | RPROP: epoch 232, mse = 0.2203541 250 | RPROP: epoch 233, mse = 0.21864407 251 | RPROP: epoch 234, mse = 0.2175434 252 | RPROP: epoch 235, mse = 0.21595339 253 | RPROP: epoch 236, mse = 0.21635354 254 | RPROP: epoch 237, mse = 0.21499829 255 | RPROP: epoch 238, mse = 0.21580966 256 | RPROP: epoch 239, mse = 0.21412192 257 | RPROP: epoch 240, mse = 0.21344811 258 | RPROP: epoch 241, mse = 0.21172834 259 | RPROP: epoch 242, mse = 0.21110572 260 | RPROP: epoch 243, mse = 0.20934332 261 | RPROP: epoch 244, mse = 0.20873445 262 | RPROP: epoch 245, mse = 0.20709756 263 | RPROP: epoch 246, mse = 0.20695827 264 | RPROP: epoch 247, mse = 0.20612529 265 | RPROP: epoch 248, mse = 0.20634516 266 | RPROP: epoch 249, mse = 0.20517849 267 | RPROP: epoch 250, mse = 0.20519029 268 | RPROP: epoch 251, mse = 0.20402027 269 | RPROP: epoch 252, mse = 0.20358832 270 | RPROP: epoch 253, mse = 0.20222463 271 | RPROP: epoch 254, mse = 0.20125665 272 | RPROP: epoch 255, mse = 0.1997671 273 | RPROP: epoch 256, mse = 0.19866041 274 | RPROP: epoch 257, mse = 0.19744047 275 | RPROP: epoch 258, mse = 0.19665223 276 | RPROP: epoch 259, mse = 0.19617383 277 | RPROP: epoch 260, mse = 0.19577488 278 | RPROP: epoch 261, mse = 0.19547485 279 | RPROP: epoch 262, mse = 0.19509647 280 | RPROP: epoch 263, mse = 0.19421257 281 | RPROP: epoch 264, mse = 0.19408486 282 | RPROP: epoch 265, mse = 0.19356795 283 | RPROP: epoch 266, mse = 0.19326036 284 | RPROP: epoch 267, mse = 0.19295723 285 | RPROP: epoch 268, mse = 0.19299814 286 | RPROP: epoch 269, mse = 0.19257082 287 | RPROP: epoch 270, mse = 0.1924378 288 | RPROP: epoch 271, mse = 0.19197578 289 | RPROP: epoch 272, mse = 0.19218366 290 | RPROP: epoch 273, mse = 0.19159904 291 | RPROP: epoch 274, mse = 0.19095254 292 | RPROP: epoch 275, mse = 0.1900775 293 | RPROP: epoch 276, mse = 0.18957081 294 | RPROP: epoch 277, mse = 0.18948746 295 | RPROP: epoch 278, mse = 0.18941428 296 | RPROP: epoch 279, mse = 0.18903109 297 | RPROP: epoch 280, mse = 0.1893359 298 | RPROP: epoch 281, mse = 0.1886465 299 | RPROP: epoch 282, mse = 0.18857604 300 | RPROP: epoch 283, mse = 0.18812772 301 | RPROP: epoch 284, mse = 0.18840503 302 | RPROP: epoch 285, mse = 0.18766516 303 | RPROP: epoch 286, mse = 0.18706345 304 | RPROP: epoch 287, mse = 0.18668115 305 | RPROP: epoch 288, mse = 0.18685167 306 | RPROP: epoch 289, mse = 0.18621798 307 | RPROP: epoch 290, mse = 0.18605724 308 | RPROP: epoch 291, mse = 0.18565437 309 | RPROP: epoch 292, mse = 0.1858209 310 | RPROP: epoch 293, mse = 0.18519943 311 | RPROP: epoch 294, mse = 0.18458761 312 | RPROP: epoch 295, mse = 0.18422764 313 | RPROP: epoch 296, mse = 0.1844181 314 | RPROP: epoch 297, mse = 0.18375946 315 | RPROP: epoch 298, mse = 0.18314198 316 | RPROP: epoch 299, mse = 0.18304741 317 | RPROP: epoch 300, mse = 0.18303235 318 | RPROP: epoch 301, mse = 0.18254356 319 | RPROP: epoch 302, mse = 0.18259148 320 | RPROP: epoch 303, mse = 0.18204225 321 | RPROP: epoch 304, mse = 0.18200305 322 | RPROP: epoch 305, mse = 0.18153455 323 | RPROP: epoch 306, mse = 0.18171866 324 | RPROP: epoch 307, mse = 0.18110091 325 | RPROP: epoch 308, mse = 0.18048408 326 | RPROP: epoch 309, mse = 0.17980273 327 | RPROP: epoch 310, mse = 0.17926329 328 | RPROP: epoch 311, mse = 0.17905523 329 | RPROP: epoch 312, mse = 0.17912052 330 | RPROP: epoch 313, mse = 0.17856293 331 | RPROP: epoch 314, mse = 0.17878823 332 | RPROP: epoch 315, mse = 0.1784381 333 | RPROP: epoch 316, mse = 0.17856622 334 | RPROP: epoch 317, mse = 0.17803017 335 | RPROP: epoch 318, mse = 0.17806289 336 | RPROP: epoch 319, mse = 0.17754189 337 | RPROP: epoch 320, mse = 0.17723368 338 | RPROP: epoch 321, mse = 0.17680959 339 | RPROP: epoch 322, mse = 0.17690047 340 | RPROP: epoch 323, mse = 0.17668439 341 | RPROP: epoch 324, mse = 0.17684813 342 | RPROP: epoch 325, mse = 0.17655725 343 | RPROP: epoch 326, mse = 0.17673694 344 | RPROP: epoch 327, mse = 0.17641578 345 | RPROP: epoch 328, mse = 0.17641571 346 | RPROP: epoch 329, mse = 0.1758561 347 | RPROP: epoch 330, mse = 0.17530636 348 | RPROP: epoch 331, mse = 0.1746344 349 | RPROP: epoch 332, mse = 0.17418876 350 | RPROP: epoch 333, mse = 0.17374474 351 | RPROP: epoch 334, mse = 0.17370866 352 | RPROP: epoch 335, mse = 0.17356962 353 | RPROP: epoch 336, mse = 0.17365588 354 | RPROP: epoch 337, mse = 0.17343141 355 | RPROP: epoch 338, mse = 0.17349855 356 | RPROP: epoch 339, mse = 0.17324675 357 | RPROP: epoch 340, mse = 0.17325351 358 | RPROP: epoch 341, mse = 0.17304276 359 | RPROP: epoch 342, mse = 0.17315606 360 | RPROP: epoch 343, mse = 0.17290699 361 | RPROP: epoch 344, mse = 0.17298486 362 | RPROP: epoch 345, mse = 0.17246377 363 | RPROP: epoch 346, mse = 0.17191069 364 | RPROP: epoch 347, mse = 0.17128301 365 | RPROP: epoch 348, mse = 0.17062558 366 | RPROP: epoch 349, mse = 0.16981531 367 | RPROP: epoch 350, mse = 0.16902098 368 | RPROP: epoch 351, mse = 0.16821674 369 | RPROP: epoch 352, mse = 0.16751298 370 | RPROP: epoch 353, mse = 0.16679424 371 | RPROP: epoch 354, mse = 0.16625224 372 | RPROP: epoch 355, mse = 0.16577835 373 | RPROP: epoch 356, mse = 0.16569645 374 | RPROP: epoch 357, mse = 0.16554008 375 | RPROP: epoch 358, mse = 0.16559241 376 | RPROP: epoch 359, mse = 0.16529976 377 | RPROP: epoch 360, mse = 0.16533762 378 | RPROP: epoch 361, mse = 0.16506075 379 | RPROP: epoch 362, mse = 0.16513523 380 | RPROP: epoch 363, mse = 0.16481976 381 | RPROP: epoch 364, mse = 0.16486502 382 | RPROP: epoch 365, mse = 0.16458055 383 | RPROP: epoch 366, mse = 0.16473522 384 | RPROP: epoch 367, mse = 0.164347 385 | RPROP: epoch 368, mse = 0.16429356 386 | RPROP: epoch 369, mse = 0.16403235 387 | RPROP: epoch 370, mse = 0.16416056 388 | RPROP: epoch 371, mse = 0.16375546 389 | RPROP: epoch 372, mse = 0.16368295 390 | RPROP: epoch 373, mse = 0.16310782 391 | RPROP: epoch 374, mse = 0.16258406 392 | RPROP: epoch 375, mse = 0.16207894 393 | RPROP: epoch 376, mse = 0.16158733 394 | RPROP: epoch 377, mse = 0.16113246 395 | RPROP: epoch 378, mse = 0.16108674 396 | RPROP: epoch 379, mse = 0.16079894 397 | RPROP: epoch 380, mse = 0.16093675 398 | RPROP: epoch 381, mse = 0.16049809 399 | RPROP: epoch 382, mse = 0.16030681 400 | RPROP: epoch 383, mse = 0.1599191 401 | RPROP: epoch 384, mse = 0.15984477 402 | RPROP: epoch 385, mse = 0.15940787 403 | RPROP: epoch 386, mse = 0.15888003 404 | RPROP: epoch 387, mse = 0.15824727 405 | RPROP: epoch 388, mse = 0.15762826 406 | RPROP: epoch 389, mse = 0.15702141 407 | RPROP: epoch 390, mse = 0.15649568 408 | RPROP: epoch 391, mse = 0.15596255 409 | RPROP: epoch 392, mse = 0.15548231 410 | RPROP: epoch 393, mse = 0.15507453 411 | RPROP: epoch 394, mse = 0.15510458 412 | RPROP: epoch 395, mse = 0.15486453 413 | RPROP: epoch 396, mse = 0.15487594 414 | RPROP: epoch 397, mse = 0.15466256 415 | RPROP: epoch 398, mse = 0.15457631 416 | RPROP: epoch 399, mse = 0.15427181 417 | RPROP: epoch 400, mse = 0.1543044 418 | RPROP: epoch 401, mse = 0.15406712 419 | RPROP: epoch 402, mse = 0.15402587 420 | RPROP: epoch 403, mse = 0.15370604 421 | RPROP: epoch 404, mse = 0.1537246 422 | RPROP: epoch 405, mse = 0.15349512 423 | RPROP: epoch 406, mse = 0.1534493 424 | RPROP: epoch 407, mse = 0.15301678 425 | RPROP: epoch 408, mse = 0.15252032 426 | RPROP: epoch 409, mse = 0.15200556 427 | RPROP: epoch 410, mse = 0.1515414 428 | RPROP: epoch 411, mse = 0.15105987 429 | RPROP: epoch 412, mse = 0.15058175 430 | RPROP: epoch 413, mse = 0.15009746 431 | RPROP: epoch 414, mse = 0.14962749 432 | RPROP: epoch 415, mse = 0.14914552 433 | RPROP: epoch 416, mse = 0.14869828 434 | RPROP: epoch 417, mse = 0.14831256 435 | RPROP: epoch 418, mse = 0.14829909 436 | RPROP: epoch 419, mse = 0.14807627 437 | RPROP: epoch 420, mse = 0.14806696 438 | RPROP: epoch 421, mse = 0.14783667 439 | RPROP: epoch 422, mse = 0.14775362 440 | RPROP: epoch 423, mse = 0.14745967 441 | RPROP: epoch 424, mse = 0.14741547 442 | RPROP: epoch 425, mse = 0.14709284 443 | RPROP: epoch 426, mse = 0.1470164 444 | RPROP: epoch 427, mse = 0.14672622 445 | RPROP: epoch 428, mse = 0.1466767 446 | RPROP: epoch 429, mse = 0.14633608 447 | RPROP: epoch 430, mse = 0.14598503 448 | RPROP: epoch 431, mse = 0.14563383 449 | RPROP: epoch 432, mse = 0.14552759 450 | RPROP: epoch 433, mse = 0.14523566 451 | RPROP: epoch 434, mse = 0.1451787 452 | RPROP: epoch 435, mse = 0.14499534 453 | RPROP: epoch 436, mse = 0.14502127 454 | RPROP: epoch 437, mse = 0.14477343 455 | RPROP: epoch 438, mse = 0.14464016 456 | RPROP: epoch 439, mse = 0.14424122 457 | RPROP: epoch 440, mse = 0.14378366 458 | RPROP: epoch 441, mse = 0.1433054 459 | RPROP: epoch 442, mse = 0.14282747 460 | RPROP: epoch 443, mse = 0.14235647 461 | RPROP: epoch 444, mse = 0.14189498 462 | RPROP: epoch 445, mse = 0.14142784 463 | RPROP: epoch 446, mse = 0.1409643 464 | RPROP: epoch 447, mse = 0.14050426 465 | RPROP: epoch 448, mse = 0.14005158 466 | RPROP: epoch 449, mse = 0.13960705 467 | RPROP: epoch 450, mse = 0.13919143 468 | RPROP: epoch 451, mse = 0.13875032 469 | RPROP: epoch 452, mse = 0.13831976 470 | RPROP: epoch 453, mse = 0.13786903 471 | RPROP: epoch 454, mse = 0.13742433 472 | RPROP: epoch 455, mse = 0.13698596 473 | RPROP: epoch 456, mse = 0.13654231 474 | RPROP: epoch 457, mse = 0.13610377 475 | RPROP: epoch 458, mse = 0.13568292 476 | RPROP: epoch 459, mse = 0.13534448 477 | RPROP: epoch 460, mse = 0.13541475 478 | RPROP: epoch 461, mse = 0.13511721 479 | RPROP: epoch 462, mse = 0.13513675 480 | RPROP: epoch 463, mse = 0.13497337 481 | RPROP: epoch 464, mse = 0.13504474 482 | RPROP: epoch 465, mse = 0.13481478 483 | RPROP: epoch 466, mse = 0.13495691 484 | RPROP: epoch 467, mse = 0.13471188 485 | RPROP: epoch 468, mse = 0.13466757 486 | RPROP: epoch 469, mse = 0.13438398 487 | RPROP: epoch 470, mse = 0.13440873 488 | RPROP: epoch 471, mse = 0.13423313 489 | RPROP: epoch 472, mse = 0.13432282 490 | RPROP: epoch 473, mse = 0.13409975 491 | RPROP: epoch 474, mse = 0.13414734 492 | RPROP: epoch 475, mse = 0.1339645 493 | RPROP: epoch 476, mse = 0.13405524 494 | RPROP: epoch 477, mse = 0.13383033 495 | RPROP: epoch 478, mse = 0.13375452 496 | RPROP: epoch 479, mse = 0.13360528 497 | RPROP: epoch 480, mse = 0.13365972 498 | RPROP: epoch 481, mse = 0.13345702 499 | RPROP: epoch 482, mse = 0.13334337 500 | RPROP: epoch 483, mse = 0.13303486 501 | RPROP: epoch 484, mse = 0.13274303 502 | RPROP: epoch 485, mse = 0.13244312 503 | RPROP: epoch 486, mse = 0.13232813 504 | RPROP: epoch 487, mse = 0.13208362 505 | RPROP: epoch 488, mse = 0.13203758 506 | RPROP: epoch 489, mse = 0.13185212 507 | RPROP: epoch 490, mse = 0.13182455 508 | RPROP: epoch 491, mse = 0.13159847 509 | RPROP: epoch 492, mse = 0.13150784 510 | RPROP: epoch 493, mse = 0.1311714 511 | RPROP: epoch 494, mse = 0.13080836 512 | RPROP: epoch 495, mse = 0.13046907 513 | RPROP: epoch 496, mse = 0.13011036 514 | RPROP: epoch 497, mse = 0.1297232 515 | RPROP: epoch 498, mse = 0.12932706 516 | RPROP: epoch 499, mse = 0.12892976 517 | RPROP: epoch 500, mse = 0.1285357 518 | RPROP: epoch 501, mse = 0.12814515 519 | RPROP: epoch 502, mse = 0.12775402 520 | RPROP: epoch 503, mse = 0.12736546 521 | RPROP: epoch 504, mse = 0.12697866 522 | RPROP: epoch 505, mse = 0.12660389 523 | RPROP: epoch 506, mse = 0.12625212 524 | RPROP: epoch 507, mse = 0.12606649 525 | RPROP: epoch 508, mse = 0.12581669 526 | RPROP: epoch 509, mse = 0.12558416 527 | RPROP: epoch 510, mse = 0.12526301 528 | RPROP: epoch 511, mse = 0.12505561 529 | RPROP: epoch 512, mse = 0.12476575 530 | RPROP: epoch 513, mse = 0.12453343 531 | RPROP: epoch 514, mse = 0.12419775 532 | RPROP: epoch 515, mse = 0.12393373 533 | RPROP: epoch 516, mse = 0.123665 534 | RPROP: epoch 517, mse = 0.12346327 535 | RPROP: epoch 518, mse = 0.12319594 536 | RPROP: epoch 519, mse = 0.12296094 537 | RPROP: epoch 520, mse = 0.12258748 538 | RPROP: epoch 521, mse = 0.12217039 539 | RPROP: epoch 522, mse = 0.12177974 540 | RPROP: epoch 523, mse = 0.12137915 541 | RPROP: epoch 524, mse = 0.120991 542 | RPROP: epoch 525, mse = 0.12067039 543 | RPROP: epoch 526, mse = 0.12031098 544 | RPROP: epoch 527, mse = 0.12004647 545 | RPROP: epoch 528, mse = 0.11971843 546 | RPROP: epoch 529, mse = 0.11951317 547 | RPROP: epoch 530, mse = 0.11928055 548 | RPROP: epoch 531, mse = 0.1190702 549 | RPROP: epoch 532, mse = 0.11889696 550 | RPROP: epoch 533, mse = 0.1187281 551 | RPROP: epoch 534, mse = 0.11858653 552 | RPROP: epoch 535, mse = 0.11843573 553 | RPROP: epoch 536, mse = 0.1182221 554 | RPROP: epoch 537, mse = 0.11801404 555 | RPROP: epoch 538, mse = 0.11772694 556 | RPROP: epoch 539, mse = 0.11742621 557 | RPROP: epoch 540, mse = 0.11709839 558 | RPROP: epoch 541, mse = 0.11676962 559 | RPROP: epoch 542, mse = 0.11634765 560 | RPROP: epoch 543, mse = 0.11592274 561 | RPROP: epoch 544, mse = 0.11549296 562 | RPROP: epoch 545, mse = 0.11506747 563 | RPROP: epoch 546, mse = 0.11464961 564 | RPROP: epoch 547, mse = 0.11422914 565 | RPROP: epoch 548, mse = 0.11381363 566 | RPROP: epoch 549, mse = 0.11340187 567 | RPROP: epoch 550, mse = 0.11299499 568 | RPROP: epoch 551, mse = 0.11259723 569 | RPROP: epoch 552, mse = 0.1122243 570 | RPROP: epoch 553, mse = 0.11201362 571 | RPROP: epoch 554, mse = 0.11215967 572 | RPROP: epoch 555, mse = 0.11197794 573 | RPROP: epoch 556, mse = 0.11210363 574 | RPROP: epoch 557, mse = 0.11193895 575 | RPROP: epoch 558, mse = 0.11207858 576 | RPROP: epoch 559, mse = 0.11187361 577 | RPROP: epoch 560, mse = 0.11196524 578 | RPROP: epoch 561, mse = 0.11175411 579 | RPROP: epoch 562, mse = 0.1118527 580 | RPROP: epoch 563, mse = 0.11158049 581 | RPROP: epoch 564, mse = 0.11152718 582 | RPROP: epoch 565, mse = 0.11132141 583 | RPROP: epoch 566, mse = 0.11127692 584 | RPROP: epoch 567, mse = 0.11107036 585 | RPROP: epoch 568, mse = 0.11110951 586 | RPROP: epoch 569, mse = 0.11090066 587 | RPROP: epoch 570, mse = 0.11099548 588 | RPROP: epoch 571, mse = 0.11079672 589 | RPROP: epoch 572, mse = 0.11075272 590 | RPROP: epoch 573, mse = 0.11048578 591 | RPROP: epoch 574, mse = 0.11036149 592 | RPROP: epoch 575, mse = 0.11013516 593 | RPROP: epoch 576, mse = 0.11021313 594 | RPROP: epoch 577, mse = 0.11000213 595 | RPROP: epoch 578, mse = 0.11006951 596 | RPROP: epoch 579, mse = 0.10989913 597 | RPROP: epoch 580, mse = 0.10983361 598 | RPROP: epoch 581, mse = 0.10959859 599 | RPROP: epoch 582, mse = 0.10952445 600 | RPROP: epoch 583, mse = 0.10929478 601 | RPROP: epoch 584, mse = 0.10920919 602 | RPROP: epoch 585, mse = 0.10896442 603 | RPROP: epoch 586, mse = 0.10875754 604 | RPROP: epoch 587, mse = 0.1084988 605 | RPROP: epoch 588, mse = 0.10831443 606 | RPROP: epoch 589, mse = 0.10806967 607 | RPROP: epoch 590, mse = 0.10808745 608 | RPROP: epoch 591, mse = 0.10800223 609 | RPROP: epoch 592, mse = 0.10806837 610 | RPROP: epoch 593, mse = 0.10796253 611 | RPROP: epoch 594, mse = 0.10804252 612 | RPROP: epoch 595, mse = 0.10792315 613 | RPROP: epoch 596, mse = 0.10794433 614 | RPROP: epoch 597, mse = 0.10780411 615 | RPROP: epoch 598, mse = 0.10782126 616 | RPROP: epoch 599, mse = 0.10759996 617 | RPROP: epoch 600, mse = 0.10736161 618 | RPROP: epoch 601, mse = 0.10713657 619 | RPROP: epoch 602, mse = 0.1070386 620 | RPROP: epoch 603, mse = 0.10682505 621 | RPROP: epoch 604, mse = 0.10675303 622 | RPROP: epoch 605, mse = 0.10652839 623 | RPROP: epoch 606, mse = 0.10637985 624 | RPROP: epoch 607, mse = 0.10613098 625 | RPROP: epoch 608, mse = 0.10587261 626 | RPROP: epoch 609, mse = 0.10561523 627 | RPROP: epoch 610, mse = 0.10535339 628 | RPROP: epoch 611, mse = 0.10508991 629 | RPROP: epoch 612, mse = 0.10482783 630 | RPROP: epoch 613, mse = 0.1045688 631 | RPROP: epoch 614, mse = 0.10431318 632 | RPROP: epoch 615, mse = 0.10406513 633 | RPROP: epoch 616, mse = 0.10382817 634 | RPROP: epoch 617, mse = 0.10373506 635 | RPROP: epoch 618, mse = 0.10366997 636 | RPROP: epoch 619, mse = 0.10372642 637 | RPROP: epoch 620, mse = 0.10364986 638 | RPROP: epoch 621, mse = 0.10369742 639 | RPROP: epoch 622, mse = 0.10362991 640 | RPROP: epoch 623, mse = 0.10366877 641 | RPROP: epoch 624, mse = 0.10361014 642 | RPROP: epoch 625, mse = 0.10364049 643 | RPROP: epoch 626, mse = 0.10359056 644 | RPROP: epoch 627, mse = 0.1036126 645 | RPROP: epoch 628, mse = 0.10354765 646 | RPROP: epoch 629, mse = 0.10350203 647 | RPROP: epoch 630, mse = 0.10348773 648 | RPROP: epoch 631, mse = 0.10347293 649 | RPROP: epoch 632, mse = 0.10342366 650 | RPROP: epoch 633, mse = 0.10344528 651 | RPROP: epoch 634, mse = 0.10340151 652 | RPROP: epoch 635, mse = 0.10341776 653 | RPROP: epoch 636, mse = 0.10335954 654 | RPROP: epoch 637, mse = 0.10331287 655 | RPROP: epoch 638, mse = 0.10330142 656 | RPROP: epoch 639, mse = 0.10328018 657 | RPROP: epoch 640, mse = 0.10323953 658 | RPROP: epoch 641, mse = 0.10324886 659 | RPROP: epoch 642, mse = 0.1031979 660 | RPROP: epoch 643, mse = 0.10313545 661 | RPROP: epoch 644, mse = 0.10299199 662 | RPROP: epoch 645, mse = 0.10295964 663 | RPROP: epoch 646, mse = 0.10286357 664 | RPROP: epoch 647, mse = 0.10282807 665 | RPROP: epoch 648, mse = 0.10280384 666 | RPROP: epoch 649, mse = 0.10279485 667 | RPROP: epoch 650, mse = 0.10273977 668 | RPROP: epoch 651, mse = 0.10274837 669 | RPROP: epoch 652, mse = 0.10271326 670 | RPROP: epoch 653, mse = 0.10271449 671 | RPROP: epoch 654, mse = 0.10265167 672 | RPROP: epoch 655, mse = 0.10266284 673 | RPROP: epoch 656, mse = 0.10262845 674 | RPROP: epoch 657, mse = 0.10262579 675 | RPROP: epoch 658, mse = 0.10253115 676 | RPROP: epoch 659, mse = 0.10249746 677 | RPROP: epoch 660, mse = 0.10232609 678 | RPROP: epoch 661, mse = 0.10214223 679 | RPROP: epoch 662, mse = 0.10196445 680 | RPROP: epoch 663, mse = 0.1017885 681 | RPROP: epoch 664, mse = 0.10161979 682 | RPROP: epoch 665, mse = 0.10146054 683 | RPROP: epoch 666, mse = 0.1012983 684 | RPROP: epoch 667, mse = 0.1011551 685 | RPROP: epoch 668, mse = 0.10103186 686 | RPROP: epoch 669, mse = 0.10102428 687 | RPROP: epoch 670, mse = 0.10098028 688 | RPROP: epoch 671, mse = 0.1009899 689 | RPROP: epoch 672, mse = 0.10094038 690 | RPROP: epoch 673, mse = 0.10095549 691 | RPROP: epoch 674, mse = 0.100901 692 | RPROP: epoch 675, mse = 0.1008474 693 | RPROP: epoch 676, mse = 0.10071907 694 | RPROP: epoch 677, mse = 0.10057934 695 | RPROP: epoch 678, mse = 0.1004176 696 | RPROP: epoch 679, mse = 0.10026498 697 | RPROP: epoch 680, mse = 0.10010245 698 | RPROP: epoch 681, mse = 0.09994135 699 | RPROP: epoch 682, mse = 0.099780954 700 | RPROP: epoch 683, mse = 0.099622688 701 | RPROP: epoch 684, mse = 0.099464714 702 | RPROP: epoch 685, mse = 0.099317025 703 | RPROP: epoch 686, mse = 0.099164811 704 | RPROP: epoch 687, mse = 0.099080911 705 | RPROP: epoch 688, mse = 0.09897994 706 | RPROP: epoch 689, mse = 0.098954941 707 | RPROP: epoch 690, mse = 0.098940563 708 | RPROP: epoch 691, mse = 0.098941282 709 | RPROP: epoch 692, mse = 0.098904719 710 | RPROP: epoch 693, mse = 0.098901871 711 | RPROP: epoch 694, mse = 0.098863673 712 | RPROP: epoch 695, mse = 0.098873585 713 | RPROP: epoch 696, mse = 0.098829172 714 | RPROP: epoch 697, mse = 0.09882456 715 | RPROP: epoch 698, mse = 0.098763187 716 | RPROP: epoch 699, mse = 0.098777263 717 | RPROP: epoch 700, mse = 0.098719943 718 | RPROP: epoch 701, mse = 0.09873712 719 | RPROP: epoch 702, mse = 0.098677359 720 | RPROP: epoch 703, mse = 0.098674343 721 | RPROP: epoch 704, mse = 0.098634309 722 | RPROP: epoch 705, mse = 0.098637691 723 | RPROP: epoch 706, mse = 0.098591881 724 | RPROP: epoch 707, mse = 0.098589603 725 | RPROP: epoch 708, mse = 0.098548461 726 | RPROP: epoch 709, mse = 0.098539998 727 | RPROP: epoch 710, mse = 0.098506604 728 | RPROP: epoch 711, mse = 0.09850705 729 | RPROP: epoch 712, mse = 0.098464135 730 | RPROP: epoch 713, mse = 0.09840768 731 | RPROP: epoch 714, mse = 0.098299678 732 | RPROP: epoch 715, mse = 0.098178311 733 | RPROP: epoch 716, mse = 0.098038097 734 | RPROP: epoch 717, mse = 0.097901791 735 | RPROP: epoch 718, mse = 0.097763687 736 | RPROP: epoch 719, mse = 0.097625638 737 | RPROP: epoch 720, mse = 0.097487583 738 | RPROP: epoch 721, mse = 0.097356305 739 | RPROP: epoch 722, mse = 0.097215399 740 | RPROP: epoch 723, mse = 0.097077095 741 | RPROP: epoch 724, mse = 0.09694435 742 | RPROP: epoch 725, mse = 0.096804964 743 | RPROP: epoch 726, mse = 0.096669591 744 | RPROP: epoch 727, mse = 0.096537015 745 | RPROP: epoch 728, mse = 0.096473807 746 | RPROP: epoch 729, mse = 0.096343678 747 | RPROP: epoch 730, mse = 0.096267543 748 | RPROP: epoch 731, mse = 0.096151373 749 | RPROP: epoch 732, mse = 0.096117829 750 | RPROP: epoch 733, mse = 0.096105939 751 | RPROP: epoch 734, mse = 0.096093283 752 | RPROP: epoch 735, mse = 0.09605381 753 | RPROP: epoch 736, mse = 0.096055976 754 | RPROP: epoch 737, mse = 0.096003354 755 | RPROP: epoch 738, mse = 0.095991116 756 | RPROP: epoch 739, mse = 0.095948121 757 | RPROP: epoch 740, mse = 0.095941009 758 | RPROP: epoch 741, mse = 0.09589314 759 | RPROP: epoch 742, mse = 0.095891134 760 | RPROP: epoch 743, mse = 0.095843528 761 | RPROP: epoch 744, mse = 0.095828647 762 | RPROP: epoch 745, mse = 0.095789156 763 | RPROP: epoch 746, mse = 0.095793384 764 | RPROP: epoch 747, mse = 0.095722841 765 | RPROP: epoch 748, mse = 0.095657704 766 | RPROP: epoch 749, mse = 0.095515625 767 | RPROP: epoch 750, mse = 0.095385971 768 | RPROP: epoch 751, mse = 0.095254596 769 | RPROP: epoch 752, mse = 0.095151112 770 | RPROP: epoch 753, mse = 0.09502556 771 | RPROP: epoch 754, mse = 0.094900931 772 | RPROP: epoch 755, mse = 0.094766997 773 | RPROP: epoch 756, mse = 0.094636343 774 | RPROP: epoch 757, mse = 0.094508169 775 | RPROP: epoch 758, mse = 0.094377426 776 | RPROP: epoch 759, mse = 0.094277349 777 | RPROP: epoch 760, mse = 0.0941482 778 | RPROP: epoch 761, mse = 0.094103176 779 | RPROP: epoch 762, mse = 0.094058474 780 | RPROP: epoch 763, mse = 0.094076474 781 | RPROP: epoch 764, mse = 0.093996991 782 | RPROP: epoch 765, mse = 0.093973097 783 | RPROP: epoch 766, mse = 0.093924693 784 | RPROP: epoch 767, mse = 0.093936393 785 | RPROP: epoch 768, mse = 0.093864405 786 | RPROP: epoch 769, mse = 0.09382246 787 | RPROP: epoch 770, mse = 0.093755556 788 | RPROP: epoch 771, mse = 0.0937538 789 | RPROP: epoch 772, mse = 0.093652879 790 | RPROP: epoch 773, mse = 0.09356162 791 | RPROP: epoch 774, mse = 0.093420316 792 | RPROP: epoch 775, mse = 0.093290051 793 | RPROP: epoch 776, mse = 0.093160476 794 | RPROP: epoch 777, mse = 0.093036429 795 | RPROP: epoch 778, mse = 0.092908059 796 | RPROP: epoch 779, mse = 0.092821491 797 | RPROP: epoch 780, mse = 0.092708897 798 | RPROP: epoch 781, mse = 0.092671593 799 | RPROP: epoch 782, mse = 0.092628895 800 | RPROP: epoch 783, mse = 0.092615525 801 | RPROP: epoch 784, mse = 0.092583404 802 | RPROP: epoch 785, mse = 0.092572637 803 | RPROP: epoch 786, mse = 0.092507636 804 | RPROP: epoch 787, mse = 0.092472193 805 | RPROP: epoch 788, mse = 0.092398492 806 | RPROP: epoch 789, mse = 0.092365826 807 | RPROP: epoch 790, mse = 0.092287892 808 | RPROP: epoch 791, mse = 0.092228109 809 | RPROP: epoch 792, mse = 0.092105786 810 | RPROP: epoch 793, mse = 0.091988734 811 | RPROP: epoch 794, mse = 0.091854634 812 | RPROP: epoch 795, mse = 0.09172479 813 | RPROP: epoch 796, mse = 0.091603253 814 | RPROP: epoch 797, mse = 0.091474064 815 | RPROP: epoch 798, mse = 0.091400628 816 | RPROP: epoch 799, mse = 0.091275339 817 | RPROP: epoch 800, mse = 0.091184675 818 | RPROP: epoch 801, mse = 0.091049527 819 | RPROP: epoch 802, mse = 0.090923393 820 | RPROP: epoch 803, mse = 0.09079431 821 | RPROP: epoch 804, mse = 0.090750553 822 | RPROP: epoch 805, mse = 0.090682185 823 | RPROP: epoch 806, mse = 0.090671139 824 | RPROP: epoch 807, mse = 0.090664491 825 | RPROP: epoch 808, mse = 0.090635719 826 | RPROP: epoch 809, mse = 0.090638236 827 | RPROP: epoch 810, mse = 0.090600119 828 | RPROP: epoch 811, mse = 0.090544508 829 | RPROP: epoch 812, mse = 0.090521961 830 | RPROP: epoch 813, mse = 0.090517533 831 | RPROP: epoch 814, mse = 0.090482987 832 | RPROP: epoch 815, mse = 0.090489261 833 | RPROP: epoch 816, mse = 0.090443804 834 | RPROP: epoch 817, mse = 0.090381771 835 | RPROP: epoch 818, mse = 0.090344853 836 | RPROP: epoch 819, mse = 0.090260183 837 | RPROP: epoch 820, mse = 0.090153507 838 | RPROP: epoch 821, mse = 0.090026057 839 | RPROP: epoch 822, mse = 0.089896724 840 | RPROP: epoch 823, mse = 0.089767691 841 | RPROP: epoch 824, mse = 0.089638634 842 | RPROP: epoch 825, mse = 0.089510198 843 | RPROP: epoch 826, mse = 0.089394777 844 | RPROP: epoch 827, mse = 0.089272179 845 | RPROP: epoch 828, mse = 0.089206571 846 | RPROP: epoch 829, mse = 0.089160093 847 | RPROP: epoch 830, mse = 0.089138569 848 | RPROP: epoch 831, mse = 0.089135118 849 | RPROP: epoch 832, mse = 0.089092125 850 | RPROP: epoch 833, mse = 0.089095096 851 | RPROP: epoch 834, mse = 0.089045479 852 | RPROP: epoch 835, mse = 0.089010669 853 | RPROP: epoch 836, mse = 0.088999839 854 | RPROP: epoch 837, mse = 0.08898186 855 | RPROP: epoch 838, mse = 0.088950924 856 | RPROP: epoch 839, mse = 0.088941097 857 | RPROP: epoch 840, mse = 0.088901982 858 | RPROP: epoch 841, mse = 0.088842131 859 | RPROP: epoch 842, mse = 0.088802827 860 | RPROP: epoch 843, mse = 0.088763012 861 | RPROP: epoch 844, mse = 0.08874746 862 | RPROP: epoch 845, mse = 0.088731774 863 | RPROP: epoch 846, mse = 0.088695215 864 | RPROP: epoch 847, mse = 0.088636908 865 | RPROP: epoch 848, mse = 0.088576833 866 | RPROP: epoch 849, mse = 0.088461052 867 | RPROP: epoch 850, mse = 0.0883361 868 | RPROP: epoch 851, mse = 0.088212192 869 | RPROP: epoch 852, mse = 0.088090424 870 | RPROP: epoch 853, mse = 0.087965526 871 | RPROP: epoch 854, mse = 0.087841776 872 | RPROP: epoch 855, mse = 0.087721134 873 | RPROP: epoch 856, mse = 0.087599516 874 | RPROP: epoch 857, mse = 0.087516438 875 | RPROP: epoch 858, mse = 0.087436659 876 | RPROP: epoch 859, mse = 0.087396023 877 | RPROP: epoch 860, mse = 0.087389633 878 | RPROP: epoch 861, mse = 0.08733627 879 | RPROP: epoch 862, mse = 0.08729905 880 | RPROP: epoch 863, mse = 0.087275972 881 | RPROP: epoch 864, mse = 0.08726301 882 | RPROP: epoch 865, mse = 0.087214733 883 | RPROP: epoch 866, mse = 0.087199044 884 | RPROP: epoch 867, mse = 0.087153483 885 | RPROP: epoch 868, mse = 0.087135376 886 | RPROP: epoch 869, mse = 0.087092769 887 | RPROP: epoch 870, mse = 0.087037732 888 | RPROP: epoch 871, mse = 0.086982216 889 | RPROP: epoch 872, mse = 0.086935914 890 | RPROP: epoch 873, mse = 0.086897255 891 | RPROP: epoch 874, mse = 0.086892556 892 | RPROP: epoch 875, mse = 0.086837799 893 | RPROP: epoch 876, mse = 0.086791337 894 | RPROP: epoch 877, mse = 0.086728279 895 | RPROP: epoch 878, mse = 0.086641863 896 | RPROP: epoch 879, mse = 0.086551265 897 | RPROP: epoch 880, mse = 0.086445952 898 | RPROP: epoch 881, mse = 0.086334314 899 | RPROP: epoch 882, mse = 0.08621529 900 | RPROP: epoch 883, mse = 0.086099016 901 | RPROP: epoch 884, mse = 0.085981789 902 | RPROP: epoch 885, mse = 0.085868029 903 | RPROP: epoch 886, mse = 0.08575257 904 | RPROP: epoch 887, mse = 0.085640077 905 | RPROP: epoch 888, mse = 0.085528148 906 | RPROP: epoch 889, mse = 0.085418571 907 | RPROP: epoch 890, mse = 0.085318457 908 | RPROP: epoch 891, mse = 0.08522544 909 | RPROP: epoch 892, mse = 0.085170096 910 | RPROP: epoch 893, mse = 0.085131956 911 | RPROP: epoch 894, mse = 0.085089698 912 | RPROP: epoch 895, mse = 0.085072894 913 | RPROP: epoch 896, mse = 0.085036787 914 | RPROP: epoch 897, mse = 0.085015106 915 | RPROP: epoch 898, mse = 0.084979219 916 | RPROP: epoch 899, mse = 0.084959022 917 | RPROP: epoch 900, mse = 0.084923635 918 | RPROP: epoch 901, mse = 0.08487257 919 | RPROP: epoch 902, mse = 0.084805474 920 | RPROP: epoch 903, mse = 0.084745489 921 | RPROP: epoch 904, mse = 0.084703249 922 | RPROP: epoch 905, mse = 0.084658543 923 | RPROP: epoch 906, mse = 0.08460151 924 | RPROP: epoch 907, mse = 0.084528513 925 | RPROP: epoch 908, mse = 0.084449081 926 | RPROP: epoch 909, mse = 0.084367374 927 | RPROP: epoch 910, mse = 0.084270259 928 | RPROP: epoch 911, mse = 0.084179037 929 | RPROP: epoch 912, mse = 0.084082722 930 | RPROP: epoch 913, mse = 0.084000595 931 | RPROP: epoch 914, mse = 0.083916701 932 | RPROP: epoch 915, mse = 0.083843373 933 | RPROP: epoch 916, mse = 0.083791897 934 | RPROP: epoch 917, mse = 0.083742096 935 | RPROP: epoch 918, mse = 0.083707356 936 | RPROP: epoch 919, mse = 0.083662414 937 | RPROP: epoch 920, mse = 0.083644453 938 | RPROP: epoch 921, mse = 0.083604977 939 | RPROP: epoch 922, mse = 0.083554235 940 | RPROP: epoch 923, mse = 0.083507793 941 | RPROP: epoch 924, mse = 0.083463029 942 | RPROP: epoch 925, mse = 0.083414101 943 | RPROP: epoch 926, mse = 0.083362608 944 | RPROP: epoch 927, mse = 0.083316524 945 | RPROP: epoch 928, mse = 0.083303027 946 | RPROP: epoch 929, mse = 0.083265707 947 | RPROP: epoch 930, mse = 0.08326092 948 | RPROP: epoch 931, mse = 0.083242184 949 | RPROP: epoch 932, mse = 0.083221997 950 | RPROP: epoch 933, mse = 0.083177034 951 | RPROP: epoch 934, mse = 0.083130152 952 | RPROP: epoch 935, mse = 0.083051212 953 | RPROP: epoch 936, mse = 0.082972658 954 | RPROP: epoch 937, mse = 0.082902034 955 | RPROP: epoch 938, mse = 0.082833157 956 | RPROP: epoch 939, mse = 0.082764212 957 | RPROP: epoch 940, mse = 0.082694422 958 | RPROP: epoch 941, mse = 0.082625078 959 | RPROP: epoch 942, mse = 0.082556378 960 | RPROP: epoch 943, mse = 0.082506814 961 | RPROP: epoch 944, mse = 0.082485419 962 | RPROP: epoch 945, mse = 0.082469199 963 | RPROP: epoch 946, mse = 0.082456511 964 | RPROP: epoch 947, mse = 0.082447649 965 | RPROP: epoch 948, mse = 0.082435977 966 | RPROP: epoch 949, mse = 0.082425916 967 | RPROP: epoch 950, mse = 0.082406983 968 | RPROP: epoch 951, mse = 0.082384441 969 | RPROP: epoch 952, mse = 0.082374496 970 | RPROP: epoch 953, mse = 0.082361653 971 | RPROP: epoch 954, mse = 0.082340816 972 | RPROP: epoch 955, mse = 0.082329722 973 | RPROP: epoch 956, mse = 0.082306855 974 | RPROP: epoch 957, mse = 0.08226512 975 | RPROP: epoch 958, mse = 0.08222196 976 | RPROP: epoch 959, mse = 0.082175322 977 | RPROP: epoch 960, mse = 0.082154336 978 | RPROP: epoch 961, mse = 0.082143086 979 | RPROP: epoch 962, mse = 0.082117863 980 | RPROP: epoch 963, mse = 0.082078754 981 | RPROP: epoch 964, mse = 0.082056551 982 | RPROP: epoch 965, mse = 0.082044157 983 | RPROP: epoch 966, mse = 0.082019353 984 | RPROP: epoch 967, mse = 0.082007558 985 | RPROP: epoch 968, mse = 0.081981411 986 | RPROP: epoch 969, mse = 0.08193848 987 | RPROP: epoch 970, mse = 0.081873986 988 | RPROP: epoch 971, mse = 0.08181394 989 | RPROP: epoch 972, mse = 0.081751534 990 | RPROP: epoch 973, mse = 0.081689348 991 | RPROP: epoch 974, mse = 0.081627613 992 | RPROP: epoch 975, mse = 0.081577001 993 | RPROP: epoch 976, mse = 0.081515316 994 | RPROP: epoch 977, mse = 0.081456654 995 | RPROP: epoch 978, mse = 0.081396947 996 | RPROP: epoch 979, mse = 0.081344686 997 | RPROP: epoch 980, mse = 0.081296654 998 | RPROP: epoch 981, mse = 0.081254789 999 | RPROP: epoch 982, mse = 0.081250948 1000 | RPROP: epoch 983, mse = 0.081222782 1001 | RPROP: epoch 984, mse = 0.081216397 1002 | RPROP: epoch 985, mse = 0.081191732 1003 | RPROP: epoch 986, mse = 0.081173307 1004 | RPROP: epoch 987, mse = 0.081154842 1005 | RPROP: epoch 988, mse = 0.081139566 1006 | RPROP: epoch 989, mse = 0.081116719 1007 | RPROP: epoch 990, mse = 0.081095987 1008 | RPROP: epoch 991, mse = 0.081073866 1009 | RPROP: epoch 992, mse = 0.081052181 1010 | RPROP: epoch 993, mse = 0.081029838 1011 | RPROP: epoch 994, mse = 0.081006542 1012 | RPROP: epoch 995, mse = 0.080982204 1013 | RPROP: epoch 996, mse = 0.080956217 1014 | RPROP: epoch 997, mse = 0.080930185 1015 | RPROP: epoch 998, mse = 0.080904027 1016 | RPROP: epoch 999, mse = 0.080881097 1017 | RPROP: epoch 1000, mse = 0.080860143 1018 | RPROP: epoch 1001, mse = 0.080832279 1019 | RPROP: epoch 1002, mse = 0.080802114 1020 | RPROP: epoch 1003, mse = 0.0807673 1021 | RPROP: epoch 1004, mse = 0.080727733 1022 | RPROP: epoch 1005, mse = 0.080674555 1023 | RPROP: epoch 1006, mse = 0.080619114 1024 | RPROP: epoch 1007, mse = 0.080563321 1025 | RPROP: epoch 1008, mse = 0.080507179 1026 | RPROP: epoch 1009, mse = 0.080451732 1027 | RPROP: epoch 1010, mse = 0.080395821 1028 | RPROP: epoch 1011, mse = 0.080341176 1029 | RPROP: epoch 1012, mse = 0.080286103 1030 | RPROP: epoch 1013, mse = 0.08023388 1031 | RPROP: epoch 1014, mse = 0.080181051 1032 | RPROP: epoch 1015, mse = 0.080137031 1033 | RPROP: epoch 1016, mse = 0.080130124 1034 | RPROP: epoch 1017, mse = 0.080104743 1035 | RPROP: epoch 1018, mse = 0.080083217 1036 | RPROP: epoch 1019, mse = 0.080052735 1037 | RPROP: epoch 1020, mse = 0.080033156 1038 | RPROP: epoch 1021, mse = 0.080007248 1039 | RPROP: epoch 1022, mse = 0.079985522 1040 | RPROP: epoch 1023, mse = 0.079960361 1041 | RPROP: epoch 1024, mse = 0.079937142 1042 | RPROP: epoch 1025, mse = 0.079912722 1043 | RPROP: epoch 1026, mse = 0.079887946 1044 | RPROP: epoch 1027, mse = 0.079856517 1045 | RPROP: epoch 1028, mse = 0.079827963 1046 | RPROP: epoch 1029, mse = 0.079800025 1047 | RPROP: epoch 1030, mse = 0.079775995 1048 | RPROP: epoch 1031, mse = 0.079751327 1049 | RPROP: epoch 1032, mse = 0.079723789 1050 | RPROP: epoch 1033, mse = 0.079697723 1051 | RPROP: epoch 1034, mse = 0.079669477 1052 | RPROP: epoch 1035, mse = 0.079644907 1053 | RPROP: epoch 1036, mse = 0.079614179 1054 | RPROP: epoch 1037, mse = 0.079574087 1055 | RPROP: epoch 1038, mse = 0.079545983 1056 | RPROP: epoch 1039, mse = 0.079515307 1057 | RPROP: epoch 1040, mse = 0.079485517 1058 | RPROP: epoch 1041, mse = 0.079460421 1059 | RPROP: epoch 1042, mse = 0.079426455 1060 | RPROP: epoch 1043, mse = 0.079372436 1061 | RPROP: epoch 1044, mse = 0.079317054 1062 | RPROP: epoch 1045, mse = 0.079260132 1063 | RPROP: epoch 1046, mse = 0.079207016 1064 | RPROP: epoch 1047, mse = 0.079149416 1065 | RPROP: epoch 1048, mse = 0.079093818 1066 | RPROP: epoch 1049, mse = 0.079035681 1067 | RPROP: epoch 1050, mse = 0.078979978 1068 | RPROP: epoch 1051, mse = 0.078921606 1069 | RPROP: epoch 1052, mse = 0.078866385 1070 | RPROP: epoch 1053, mse = 0.078806941 1071 | RPROP: epoch 1054, mse = 0.078748775 1072 | RPROP: epoch 1055, mse = 0.078690289 1073 | RPROP: epoch 1056, mse = 0.078648065 1074 | RPROP: epoch 1057, mse = 0.07859865 1075 | RPROP: epoch 1058, mse = 0.078559441 1076 | RPROP: epoch 1059, mse = 0.07851152 1077 | RPROP: epoch 1060, mse = 0.078481501 1078 | RPROP: epoch 1061, mse = 0.078441923 1079 | RPROP: epoch 1062, mse = 0.07840965 1080 | RPROP: epoch 1063, mse = 0.07837168 1081 | RPROP: epoch 1064, mse = 0.078337272 1082 | RPROP: epoch 1065, mse = 0.078301209 1083 | RPROP: epoch 1066, mse = 0.07826466 1084 | RPROP: epoch 1067, mse = 0.078221916 1085 | RPROP: epoch 1068, mse = 0.078192047 1086 | RPROP: epoch 1069, mse = 0.078157581 1087 | RPROP: epoch 1070, mse = 0.078115934 1088 | RPROP: epoch 1071, mse = 0.078061714 1089 | RPROP: epoch 1072, mse = 0.078004802 1090 | RPROP: epoch 1073, mse = 0.077953369 1091 | RPROP: epoch 1074, mse = 0.077909024 1092 | RPROP: epoch 1075, mse = 0.077857474 1093 | RPROP: epoch 1076, mse = 0.077799394 1094 | RPROP: epoch 1077, mse = 0.077741768 1095 | RPROP: epoch 1078, mse = 0.07768057 1096 | RPROP: epoch 1079, mse = 0.077620758 1097 | RPROP: epoch 1080, mse = 0.07756283 1098 | RPROP: epoch 1081, mse = 0.077502548 1099 | RPROP: epoch 1082, mse = 0.077443424 1100 | RPROP: epoch 1083, mse = 0.077386693 1101 | RPROP: epoch 1084, mse = 0.077337411 1102 | RPROP: epoch 1085, mse = 0.077292262 1103 | RPROP: epoch 1086, mse = 0.07725656 1104 | RPROP: epoch 1087, mse = 0.07722392 1105 | RPROP: epoch 1088, mse = 0.077178628 1106 | RPROP: epoch 1089, mse = 0.07713564 1107 | RPROP: epoch 1090, mse = 0.077095363 1108 | RPROP: epoch 1091, mse = 0.077062167 1109 | RPROP: epoch 1092, mse = 0.077020222 1110 | RPROP: epoch 1093, mse = 0.076981245 1111 | RPROP: epoch 1094, mse = 0.076935009 1112 | RPROP: epoch 1095, mse = 0.07688688 1113 | RPROP: epoch 1096, mse = 0.076840444 1114 | RPROP: epoch 1097, mse = 0.076796658 1115 | RPROP: epoch 1098, mse = 0.076748105 1116 | RPROP: epoch 1099, mse = 0.076696548 1117 | RPROP: epoch 1100, mse = 0.076639806 1118 | 1119 | h = 1120 | 1121 | Text (Epochs) with properties: 1122 | 1123 | String: 'Epochs' 1124 | FontSize: 11 1125 | FontWeight: 'normal' 1126 | FontName: 'Helvetica' 1127 | Color: [0.1500 0.1500 0.1500] 1128 | HorizontalAlignment: 'center' 1129 | Position: [600.0006 -0.1380 -1] 1130 | Units: 'data' 1131 | 1132 | Show all properties 1133 | 1134 | 1135 | h = 1136 | 1137 | Text (Training MSE) with properties: 1138 | 1139 | String: 'Training MSE' 1140 | FontSize: 11 1141 | FontWeight: 'normal' 1142 | FontName: 'Helvetica' 1143 | Color: [0.1500 0.1500 0.1500] 1144 | HorizontalAlignment: 'center' 1145 | Position: [-78.3410 1.0000 -1.0000] 1146 | Units: 'data' 1147 | 1148 | Show all properties 1149 | 1150 | Classification rate (train): cr = 98.05% 1151 | Name Size Bytes Class Attributes 1152 | 1153 | H 1x1 8 double 1154 | W 1x1 8 double 1155 | c 1x6 813 cell 1156 | cm1 1x2 2 logical 1157 | cm2 2x2 4 logical 1158 | cm3 2x5 80 double 1159 | cm4 5x5 25 logical 1160 | cm5 5x5 25 logical 1161 | cm6 5x1 5 logical 1162 | cr 1x1 8 double 1163 | d 1x2000 16000 double 1164 | d_test 1x10000 80000 double 1165 | h 1x1 112 matlab.graphics.primitive.Text 1166 | net 1x1 20087 struct 1167 | new_net 1x1 25495 struct 1168 | rec_size 6x2 96 double 1169 | tf_fcn 6x1 748 cell 1170 | tr 1x1 45232 struct 1171 | train_method 1x5 10 char 1172 | x 32x32x2000 16384000 double 1173 | x_test 32x32x10000 81920000 double 1174 | y 1x2000 16000 double 1175 | 1176 | Classification rate (test): cr = 95.32% -------------------------------------------------------------------------------- /cnn-matlab/internal_testing/cnn_compute_gradient_check_script.m: -------------------------------------------------------------------------------- 1 | % DESCRIPTION Script to check function cnn_compute_gradient.m 2 | % 3 | % 4 | % NOTES 5 | % Son Lam Phung, started 05-Nov-2006. 6 | 7 | %% Creat network 8 | clear; clc 9 | H = 36; 10 | W = 32; 11 | c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 12 | cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 13 | cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 14 | net = cnn_new([H W], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 15 | repmat({'tansig'}, 1, length(c)), 'rprop'); 16 | fprintf('Number of network parameters %g.\n', net.P); 17 | 18 | %% Compute gradient 19 | K = 5; x = randn(36, 32, K); d = randn(2,K); 20 | w = cnn_getw(net); 21 | [y, s] = cnn_sim_verbose(net, x); 22 | e = y{end} - d; 23 | E = mse(e); 24 | [dw, db] = cnn_compute_gradient(net, x, e, y, s); % Compute gradient 25 | dE = cnn_vectorize_wb(net, dw, db); % Vectorize gradient 26 | 27 | %% Check computed gradient 28 | ratio = zeros(size(dE)); 29 | clc 30 | for i = 1:length(w) 31 | st = 0.000001; 32 | % Modify only one weight by a small amount 33 | w_new = w; w_new(i) = w_new(i) + st; 34 | 35 | % Compute output by new net 36 | net_new = cnn_setw(net, w_new); 37 | y_new = cnn_sim(net_new, x); 38 | e_new = y_new - d; 39 | E_new = mse(e_new); % MSE of new net 40 | ratio(i) = (E_new - E)/st; % Limit deltaE/deltaw = gradient 41 | fprintf(['Weight %g: theoretical gradient = %2.6f, ' ... 42 | 'actual gradient = %2.6f.\n'],i, dE(i), ratio(i)); 43 | end 44 | %% Display weights that have error in gradient computation 45 | dev = abs(dE - ratio); % Difference between theoretical and actual gradient 46 | idxs = find(dev > 0.00001)' 47 | length(idxs) -------------------------------------------------------------------------------- /cnn-matlab/internal_testing/cnn_compute_gradient_script.m: -------------------------------------------------------------------------------- 1 | %% Network creation 2 | % clear; clc 3 | H = 36; 4 | W = 32; 5 | c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 6 | cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 7 | cnn_cm('1-to-1', 14), cnn_cm('full', 14, 2)}; 8 | net = cnn_new([H W], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 9 | repmat({'tansig'}, 1, length(c)), 'rprop'); 10 | net.P 11 | %% Network training 12 | K = 5; x = randn(36, 32, K); d = randn(2,K); 13 | [y, s] = cnn_sim_verbose(net, x); 14 | e = y{end} - d 15 | 16 | %% ................ Stage 1: Compute Error Sensitivity ................. %% 17 | K = size(x, 3); % Number of input samples 18 | es = cell(1, net.L); % Allocate storage for error sensitivity 19 | 20 | %% Layer L: output perceptron layer........................................ 21 | layer = net.L; 22 | N = net.no_fms(layer); 23 | es{layer} = 2/(K*N) * e .* feval(['d' net.f{layer}], s{layer}, y{layer}); 24 | 25 | %% Layer L-1: last convolutional layer..................................... 26 | layer = net.L - 1; 27 | 28 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); % f'(s) 29 | 30 | es{layer} = reshape((net.w{layer+1} * es{layer+1})', ... 31 | [1 1 K net.no_fms(layer)]); % Back-propagate 32 | 33 | es{layer} = es{layer} .* dy; % Multiply f'(s) 34 | 35 | %% Layer L - 2: last subsampling layer..................................... 36 | % This layer is special because convolution mask of layer L-1 37 | % has the same size as feature map of layer L-2 38 | layer = net.L - 2; 39 | S1 = size(s{layer},1); 40 | S2 = size(s{layer},2); 41 | es{layer} = repmat(0, size(s{layer})); 42 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); % f'(s) 43 | 44 | % Replicate matrix es{layer+1}(n,n) into a S1 x S2 x * x * 45 | es_rep = repmat(es{layer+1}, [S1 S2 1 1]); 46 | 47 | % Compute for each feature map 48 | for n = 1:net.no_fms(layer) 49 | % Back-propagate 50 | es{layer}(:,:,:,n) = es_rep(:,:,:,n) .* ... 51 | repmat(net.w{layer+1}(:,:,n,n), [1 1 K 1]); 52 | end 53 | 54 | es{layer} = es{layer} .* dy; % Multiply f'(s) 55 | 56 | %% Layers L-3 to 1: pairs of {convolution layer -> subsampling layer}...... 57 | for layer = (net.L-3):-1:1 58 | dy = feval(['d' net.f{layer}], s{layer}, y{layer}); 59 | if (mod(layer, 2) == 1) 60 | %-- Convolution layer 61 | size_s = size(s{layer}); 62 | es{layer} = repmat(0, size_s); 63 | 64 | % Enlarge es{layer+1} by a factor of 2 65 | % in first and second dimension 66 | size_es = size(es{layer+1}); 67 | es_rep = zeros([size_s(1:2) size_es(3:4)]); 68 | for i = 1:2 69 | for j = 1:2 70 | es_rep(i:2:end,j:2:end,:,:) = es{layer+1}; 71 | end 72 | end 73 | 74 | % Back-propagate 75 | for n = 1:net.no_fms(layer) 76 | es{layer}(:,:,:,n) = es_rep(:,:,:,n) * net.w{layer+1}(n); 77 | end 78 | else 79 | %-- Subsampling layer 80 | size_s = size(s{layer}); 81 | es{layer} = repmat(0, size_s); 82 | 83 | for n = 1:net.no_fms(layer) 84 | % Find all feature maps in {layer+1} 85 | % that go from feature map n 86 | fm_idxs = find(net.c{layer+1}(n, :)); 87 | 88 | % Adding up contribution from feature maps in {layer+1} 89 | for m = fm_idxs 90 | % Back-propagate 91 | es{layer}(:,:,:,n) = ... 92 | es{layer}(:,:,:,n) + ... 93 | imfilter(es{layer+1}(:,:,:,m), ... 94 | rot90(net.w{layer+1}(:,:,n,m),2), ... 95 | 'full', 'corr'); 96 | end 97 | end 98 | end 99 | es{layer} = es{layer} .* dy; % Multiply f'(s) 100 | end 101 | 102 | %% ..................... Stage 2: Compute Gradient ..................... %% 103 | % Allocate memory 104 | dw = cell(1, net.L); % Weights 105 | db = cell(1, net.L); % Biases 106 | 107 | %% Layer L: output perceptron layer........................................ 108 | layer = net.L; 109 | dw{layer} = (es{layer} * squeeze(y{layer-1}))'; 110 | db{layer} = sum(es{layer},2); 111 | 112 | %% Layer L-1: last convolutional layer..................................... 113 | layer = net.L - 1; 114 | size_y = size(y{layer-1}); 115 | 116 | % Replicate es{layer} to size_y(1) x size_y(2) x * x * 117 | es_rep = repmat(es{layer}, [size_y(1) size_y(2) 1 1]); 118 | tmp = sum(es_rep .* y{layer-1},3); 119 | 120 | % Weights 121 | for n = net.no_fms(layer-1):-1:1 122 | dw{layer}(:,:,n,n) = tmp(:,:,:,n); 123 | end 124 | 125 | % Biases 126 | db{layer} = sum(squeeze(es{layer}), 1)'; 127 | 128 | %% Layer L - 2: last subsampling layer..................................... 129 | layer = net.L - 2; 130 | es_y = block_sum_2(y{layer-1}) .* es{layer}; 131 | dw{layer} = squeeze(sum(sum(sum(es_y,3),2),1)); 132 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 133 | 134 | %% Layers L-3 to 2: pairs of {convolution layer -> subsampling layer}...... 135 | for layer = (net.L-3):-1:2 136 | if (mod(layer, 2) == 1) 137 | %-- Convolution layer 138 | size_es = size(es{layer}); 139 | size_w = size(net.w{layer}); 140 | hrec_size = net.hrec_size(layer); 141 | dw{layer} = repmat(0,size_w); 142 | 143 | for p = 1:net.no_fms(layer-1) 144 | % Find all feature maps in {layer} 145 | % that go from feature map p {layer-1} 146 | fm_idxs = find(net.c{layer}(p,:)); 147 | 148 | for m = 1:size_es(1) 149 | for n = 1:size_es(2) 150 | % Repeat es{layer}(m,n,:,fm_idxs) 151 | % into size_w(1) x size_w(2) x * x * 152 | es_rep = zeros([size_w(1) size_w(2) ... 153 | K length(fm_idxs)]); 154 | for i = 1:size_w(1) 155 | for j = 1:size_w(2) 156 | es_rep(i,j,:,:) = es{layer}(m,n,:,fm_idxs); 157 | end 158 | end 159 | 160 | %Repeat y{layer-1}(m:m+2*hrec_size,n:n+2*hrec_size,:,p) 161 | %into * x * x * x length(fm_idxs) 162 | y_rep = repmat(... 163 | y{layer-1}(m:m+2*hrec_size,n:n+2*hrec_size,:,p),... 164 | [1 1 1 length(fm_idxs)]); 165 | 166 | dw{layer}(:,:,p,fm_idxs)=dw{layer}(:,:,p,fm_idxs)+ ... 167 | sum(es_rep .* y_rep,3); 168 | end 169 | end 170 | end 171 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 172 | else 173 | % -- Subsampling layer 174 | es_y = block_sum_2(y{layer-1}) .* es{layer}; 175 | dw{layer} = squeeze(sum(sum(sum(es_y,3),2),1)); 176 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); 177 | end 178 | end 179 | %% Layer 1: convolution layer.............................................. 180 | layer = 1; 181 | size_es = size(es{layer}); 182 | size_w = size(net.w{layer}); 183 | 184 | dw{layer} = zeros(size_w); 185 | hrec_size = net.hrec_size(layer); 186 | 187 | for q = 1:net.no_fms(layer) 188 | for m = 1:size_es(1) 189 | for n = 1:size_es(2) 190 | % Repeat part of es{layer} into size_w(1) x size_w(2) x * x * 191 | es_rep = repmat(es{layer}(m,n,:,q), ... 192 | [size_w(1) size_w(2) 1 1]); 193 | 194 | dw{layer}(:,:,q) = dw{layer}(:,:,q) + ... 195 | sum(es_rep .* ... 196 | x(m:m+2*hrec_size,n:n+2*hrec_size,:),3); 197 | end 198 | end 199 | end 200 | db{layer} = squeeze(sum(sum(sum(es{layer},3),2),1)); -------------------------------------------------------------------------------- /cnn-matlab/internal_testing/cnn_vectorize_check_script.m: -------------------------------------------------------------------------------- 1 | %% 2 | clear all; clc 3 | c = {cnn_cm('full', 1, 4), cnn_cm('1-to-1', 4), ... 4 | cnn_cm('1-to-2 2-to-1', 4), cnn_cm('1-to-1', 14), ... 5 | cnn_cm('1-to-1', 14), cnn_cm('full', 14, 1)}; 6 | net = cnn_new([36 32], c, [5 5; 2 2; 3 3; 2 2; 0 0; 0 0], ... 7 | repmat({'tansig'}, 1, length(c)), 'rprop'); 8 | K = 300; x = randn(36, 32, K); d = randn(1,K); 9 | %% 10 | w = cnn_vectorize_wb(net, net.w, net.b); 11 | [w1, b1] = cnn_devectorize_wb(net, w); 12 | net1 = net; net1.w = w1; net1.b = b1; 13 | %% 14 | y = cnn_sim(net, x); 15 | y1 = cnn_sim(net1, x); 16 | sumn(y ~= y1) -------------------------------------------------------------------------------- /cnn-matlab/internal_testing/reshape_script.m: -------------------------------------------------------------------------------- 1 | % DESCRIPTION 2 | % 3 | % 4 | % NOTES 5 | % Son Lam Phung, started 05-Nov-2006. 6 | 7 | clc 8 | N = 4; 9 | K = 3; 10 | t = rand(N,K) 11 | %% 12 | t1 = reshape(t', [1 1 K N]); % this is the way to reshape 13 | %% 14 | k = 2; n = 3; 15 | [t(n,k) t1(1,1,k,n)] 16 | %% 17 | k = 1; n = 2; 18 | [t(n,k) t1(1,1,k,n)] 19 | %% 20 | k = 3; n = 2; 21 | [t(n,k) t1(1,1,k,n)] -------------------------------------------------------------------------------- /cnn-matlab/ltanh.m: -------------------------------------------------------------------------------- 1 | function y = ltanh(x) 2 | % LTANH Hyperbolic tangent function 3 | % Proposed by LeCun 4 | % 5 | % SYNTAX 6 | % y = ltanh(x) 7 | % 8 | % PARAMETERS 9 | % x: input 10 | % y: output 11 | % 12 | % EXAMPLE 13 | % y = ltanh(0.2); 14 | % 15 | % NOTES 16 | % y = 1.7159 * tanh(2/3*x); 17 | % 18 | % Son Lam Phung, started 11-Jan-2006. 19 | 20 | A = 1.7159; 21 | S = 2/3; 22 | y = A*tanh(x*S); -------------------------------------------------------------------------------- /cnn-matlab/readme.txt: -------------------------------------------------------------------------------- 1 | This is MATLAB implementation of LeCun Convolutional Neural Network. 2 | 3 | See script cnn_example_face.m on how to use it. 4 | 5 | Lam Phung, 2009. --------------------------------------------------------------------------------