├── DeepAEwithELM_MNIST.m
├── NN
├── nnapplygrads.m
├── nnbp.m
├── nnchecknumgrad.m
├── nneval.m
├── nnff.m
├── nnpredict.m
├── nnsetup.m
├── nntest.m
├── nntrain.m
└── nnupdatefigures.m
├── README.md
├── SAE
├── saesetup.m
└── saetrain.m
└── util
├── ELM.m
├── allcomb.m
├── expand.m
├── flicker.m
├── flipall.m
├── fliplrf.m
├── flipudf.m
├── im2patches.m
├── isOctave.m
├── makeLMfilters.m
├── mnist_uint8.mat
├── myOctaveVersion.m
├── normalize.m
├── patches2im.m
├── randcorr.m
├── randp.m
├── rnd.m
├── sigm.m
├── sigmrnd.m
├── softmax.m
├── tanh_opt.m
├── visualize.m
├── whiten.m
└── zscore.m
/DeepAEwithELM_MNIST.m:
--------------------------------------------------------------------------------
1 | function DeepAEwithELM_MNIST
2 | %% The remaining example is training your own data with Deep Autoencoders+ELM kernels
3 | %The example training is experimented on MNIST database
4 | load mnist_uint8;
5 | %Preprocessng for normalization of image pixels
6 | train_x = double(train_x)/255;
7 | %train_x: [sample_size x feature_size]
8 | test_x = double(test_x)/255;
9 | %test_x: [sample_size x feature_size]
10 | train_y = double(train_y);
11 | %train_y: [sample_size x classes ]
12 | test_y = double(test_y);
13 | %test_y: [sample_size x classes ]
14 |
15 | % Setup and train a Deep AE
16 | rand('state',0)
17 | DeepAE_ELM = saesetup([784 100 40 30 40 100]);
18 | %Create your DeepAE model [feature_size AE(1) AE(2) CompressedAE AE(2) AE(1)]
19 | DeepAE_ELM.name='SAE';
20 | DeepAE_ELM.ae{1}.learningRate = 0.01;
21 | %Set your learning rate for DeepAE
22 | DeepAE_ELM.ae{1}.inputZeroMaskedFraction = 0.5;
23 |
24 | opts.numepochs = 10;
25 | %Set your epoch number for DeepAE
26 | opts.batchsize = 100;
27 | %Set your batch size for DeepAE (must be divisible by the number of samples)
28 |
29 |
30 | %The script enables training DeepAE_ELM for the both sigmoid and hiperbolic tangent function at AE stage
31 |
32 | DeepAE_ELM.ae{1}.activation_function = 'sigm'; % 'sigm', 'tanh_opt'
33 | %Set activation function for DeepAE_ELM
34 | DeepAE_ELM = saetrain(DeepAE_ELM, train_x, opts);
35 | %Train DeepAE_ELM
36 | visualize(DeepAE_ELM.ae{1}.W{1}(:,2:end)')
37 | %Visualize the predicted autoencoder output for desired layer
38 |
39 | %TRAIN model using Neural Networks
40 |
41 | %The script enables testing the model with both sigmoid and hiperbolic tangent function at supervised stage
42 |
43 | nn = nnsetup([784 100 10]);
44 | %Create your neural network model [feature_size HiddenLayer1 HiddenLayer2 .... classes ]
45 | % Use the DeepAE to initialize a Feedforward neural networks model
46 | nn.name= strcat('SAE-' ,'sigm'); % 'sigm', 'tanh_opt'
47 | nn.activation_function = 'sigm';
48 | %Set activation function for Neural networks
49 | nn.learningRate = 1;
50 | %Set your learning rate for neural networks
51 | nn.W{1} = DeepAE_ELM.ae{1}.W{1};
52 | %Set Neural network weights using DeepAE pre-trained weights
53 |
54 | % Train stage of the Feedforward Neural Networks
55 | opts.numepochs = 100;
56 | %Set your epoch number for neural network
57 | opts.batchsize = 100;
58 | %Set your batch size for Neural network (must be divisible by the number of samples)
59 | nn = nntrain(nn, train_x, train_y, opts);
60 | %Train Neural Networks model
61 | [error_NN, bad] = nntest(nn, test_x, test_y);
62 | %Test Neural Networks model with your separate testing data
63 |
64 |
65 |
66 | %TRAIN the compressed layer of DeepAE using Extreme Learning Machines with Moore Penrose
67 | %ELM has a single hidden layer
68 | ELM_neuronsize_min=10;
69 | ELM_neuronsize_max=200;
70 | ELM_neuronsize_increment=10;
71 | %Set neuron number of hidden layer in ELM
72 |
73 |
74 | %Get the compression layer of DeepELM as input to ELM
75 | train_x_ELM=[];
76 | for i=1:(size(DeepAE_ELM.ae,2)+1)/2
77 | if i==1
78 | train_x_ELM=train_x*DeepAE_ELM.ae{i}.W{1}(:,1:end-1)';
79 | else
80 | train_x_ELM=train_x_ELM*DeepAE_ELM.ae{i}.W{1}(:,1:end-1)';
81 | end
82 | end
83 |
84 | %TRAIN Extreme Learning Machine for various neuron numbers in the hidden layer
85 | for ELM_neuronsize=ELM_neuronsize_min:ELM_neuronsize_increment:ELM_neuronsize_max
86 | [error_ELM(ELM_neuronsize/ELM_neuronsize_increment)]=ELM(train_x_ELM, train_y,ELM_neuronsize);
87 | %Start training of ExtremeLearnin Macines with Moore Penrose
88 | end
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/NN/nnapplygrads.m:
--------------------------------------------------------------------------------
1 | function nn = nnapplygrads(nn)
2 | %NNAPPLYGRADS updates weights and biases with calculated gradients
3 | % nn = nnapplygrads(nn) returns an neural network structure with updated
4 | % weights and biases
5 |
6 | for i = 1 : (nn.n - 1)
7 | if(nn.weightPenaltyL2>0)
8 | dW = nn.dW{i} + nn.weightPenaltyL2 * [zeros(size(nn.W{i},1),1) nn.W{i}(:,2:end)];
9 | else
10 | dW = nn.dW{i};
11 | end
12 |
13 | dW = nn.learningRate * dW;
14 |
15 | if(nn.momentum>0)
16 | nn.vW{i} = nn.momentum*nn.vW{i} + dW;
17 | dW = nn.vW{i};
18 | end
19 |
20 | nn.W{i} = nn.W{i} - dW;
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/NN/nnbp.m:
--------------------------------------------------------------------------------
1 | function nn = nnbp(nn)
2 | %NNBP performs backpropagation
3 | % nn = nnbp(nn) returns an neural network structure with updated weights
4 |
5 | n = nn.n;
6 | sparsityError = 0;
7 | switch nn.output
8 | case 'sigm'
9 | d{n} = - nn.e .* (nn.a{n} .* (1 - nn.a{n}));
10 | case {'softmax','linear'}
11 | d{n} = - nn.e;
12 | end
13 | for i = (n - 1) : -1 : 2
14 | % Derivative of the activation function
15 | switch nn.activation_function
16 | case 'sigm'
17 | d_act = nn.a{i} .* (1 - nn.a{i});
18 | case 'tanh_opt'
19 | d_act = 1.7159 * 2/3 * (1 - 1/(1.7159)^2 * nn.a{i}.^2);
20 | end
21 |
22 | if(nn.nonSparsityPenalty>0)
23 | pi = repmat(nn.p{i}, size(nn.a{i}, 1), 1);
24 | sparsityError = [zeros(size(nn.a{i},1),1) nn.nonSparsityPenalty * (-nn.sparsityTarget ./ pi + (1 - nn.sparsityTarget) ./ (1 - pi))];
25 | end
26 |
27 | % Backpropagate first derivatives
28 | if i+1==n % in this case in d{n} there is not the bias term to be removed
29 | d{i} = (d{i + 1} * nn.W{i} + sparsityError) .* d_act; % Bishop (5.56)
30 | else % in this case in d{i} the bias term has to be removed
31 | d{i} = (d{i + 1}(:,2:end) * nn.W{i} + sparsityError) .* d_act;
32 | end
33 |
34 | if(nn.dropoutFraction>0)
35 | d{i} = d{i} .* [ones(size(d{i},1),1) nn.dropOutMask{i}];
36 | end
37 |
38 | end
39 |
40 | for i = 1 : (n - 1)
41 | if i+1==n
42 | nn.dW{i} = (d{i + 1}' * nn.a{i}) / size(d{i + 1}, 1);
43 | else
44 | nn.dW{i} = (d{i + 1}(:,2:end)' * nn.a{i}) / size(d{i + 1}, 1);
45 | end
46 | end
47 | end
48 |
--------------------------------------------------------------------------------
/NN/nnchecknumgrad.m:
--------------------------------------------------------------------------------
1 | function nnchecknumgrad(nn, x, y)
2 | epsilon = 1e-6;
3 | er = 1e-7;
4 | n = nn.n;
5 | for l = 1 : (n - 1)
6 | for i = 1 : size(nn.W{l}, 1)
7 | for j = 1 : size(nn.W{l}, 2)
8 | nn_m = nn; nn_p = nn;
9 | nn_m.W{l}(i, j) = nn.W{l}(i, j) - epsilon;
10 | nn_p.W{l}(i, j) = nn.W{l}(i, j) + epsilon;
11 | rand('state',0)
12 | nn_m = nnff(nn_m, x, y);
13 | rand('state',0)
14 | nn_p = nnff(nn_p, x, y);
15 | dW = (nn_p.L - nn_m.L) / (2 * epsilon);
16 | e = abs(dW - nn.dW{l}(i, j));
17 |
18 | assert(e < er, 'numerical gradient checking failed');
19 | end
20 | end
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/NN/nneval.m:
--------------------------------------------------------------------------------
1 | function [loss] = nneval(nn, loss, train_x, train_y, val_x, val_y)
2 | %NNEVAL evaluates performance of neural network
3 | % Returns a updated loss struct
4 | assert(nargin == 4 || nargin == 6, 'Wrong number of arguments');
5 |
6 | nn.testing = 1;
7 | % training performance
8 | nn = nnff(nn, train_x, train_y);
9 | loss.train.e(end + 1) = nn.L;
10 |
11 | % validation performance
12 | if nargin == 6
13 | nn = nnff(nn, val_x, val_y);
14 | loss.val.e(end + 1) = nn.L;
15 | end
16 | nn.testing = 0;
17 | %calc misclassification rate if softmax
18 | if strcmp(nn.output,'softmax')
19 | [er_train, dummy] = nntest(nn, train_x, train_y);
20 | loss.train.e_frac(end+1) = er_train;
21 |
22 | if nargin == 6
23 | [er_val, dummy] = nntest(nn, val_x, val_y);
24 | loss.val.e_frac(end+1) = er_val;
25 | end
26 | end
27 |
28 | end
29 |
--------------------------------------------------------------------------------
/NN/nnff.m:
--------------------------------------------------------------------------------
1 | function nn = nnff(nn, x, y)
2 | %NNFF performs a feedforward pass
3 | % nn = nnff(nn, x, y) returns an neural network structure with updated
4 | % layer activations, error and loss (nn.a, nn.e and nn.L)
5 |
6 | n = nn.n;
7 | m = size(x, 1);
8 |
9 | x = [ones(m,1) x];
10 | nn.a{1} = x;
11 |
12 | %feedforward pass
13 | for i = 2 : n-1
14 | switch nn.activation_function
15 | case 'sigm'
16 | % Calculate the unit's outputs (including the bias term)
17 | nn.a{i} = sigm(nn.a{i - 1} * nn.W{i - 1}');
18 | case 'tanh_opt'
19 | nn.a{i} = tanh_opt(nn.a{i - 1} * nn.W{i - 1}');
20 | end
21 |
22 | %dropout
23 | if(nn.dropoutFraction > 0)
24 | if(nn.testing)
25 | nn.a{i} = nn.a{i}.*(1 - nn.dropoutFraction);
26 | else
27 | nn.dropOutMask{i} = (rand(size(nn.a{i}))>nn.dropoutFraction);
28 | nn.a{i} = nn.a{i}.*nn.dropOutMask{i};
29 | end
30 | end
31 |
32 | %calculate running exponential activations for use with sparsity
33 | if(nn.nonSparsityPenalty>0)
34 | nn.p{i} = 0.99 * nn.p{i} + 0.01 * mean(nn.a{i}, 1);
35 | end
36 |
37 | %Add the bias term
38 | nn.a{i} = [ones(m,1) nn.a{i}];
39 | end
40 | switch nn.output
41 | case 'sigm'
42 | nn.a{n} = sigm(nn.a{n - 1} * nn.W{n - 1}');
43 | case 'linear'
44 | nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
45 | case 'softmax'
46 | nn.a{n} = nn.a{n - 1} * nn.W{n - 1}';
47 | nn.a{n} = exp(bsxfun(@minus, nn.a{n}, max(nn.a{n},[],2)));
48 | nn.a{n} = bsxfun(@rdivide, nn.a{n}, sum(nn.a{n}, 2));
49 | end
50 |
51 | %error and loss
52 | nn.e = y - nn.a{n};
53 |
54 | switch nn.output
55 | case {'sigm', 'linear'}
56 | nn.L = 1/2 * sum(sum(nn.e .^ 2)) / m;
57 | case 'softmax'
58 | nn.L = -sum(sum(y .* log(nn.a{n}))) / m;
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/NN/nnpredict.m:
--------------------------------------------------------------------------------
1 | function labels = nnpredict(nn, x)
2 | nn.testing = 1;
3 | nn = nnff(nn, x, zeros(size(x,1), nn.size(end)));
4 | nn.testing = 0;
5 |
6 | [dummy, i] = max(nn.a{end},[],2);
7 | labels = i;
8 | end
9 |
--------------------------------------------------------------------------------
/NN/nnsetup.m:
--------------------------------------------------------------------------------
1 | function nn = nnsetup(architecture)
2 | %NNSETUP creates a Feedforward Backpropagate Neural Network
3 | % nn = nnsetup(architecture) returns an neural network structure with n=numel(architecture)
4 | % layers, architecture being a n x 1 vector of layer sizes e.g. [784 100 10]
5 |
6 | nn.size = architecture;
7 | nn.n = numel(nn.size);
8 |
9 | nn.activation_function = 'tanh_opt'; % Activation functions of hidden layers: 'sigm' (sigmoid) or 'tanh_opt' (optimal tanh).
10 | nn.learningRate = 2; % learning rate Note: typically needs to be lower when using 'sigm' activation function and non-normalized inputs.
11 | nn.momentum = 0.5; % Momentum
12 | nn.scaling_learningRate = 1; % Scaling factor for the learning rate (each epoch)
13 | nn.weightPenaltyL2 = 0; % L2 regularization
14 | nn.nonSparsityPenalty = 0; % Non sparsity penalty
15 | nn.sparsityTarget = 0.05; % Sparsity target
16 | nn.inputZeroMaskedFraction = 0; % Used for Denoising AutoEncoders
17 | nn.dropoutFraction = 0; % Dropout level (http://www.cs.toronto.edu/~hinton/absps/dropout.pdf)
18 | nn.testing = 0; % Internal variable. nntest sets this to one.
19 | nn.output = 'sigm'; % output unit 'sigm' (=logistic), 'softmax' and 'linear'
20 |
21 | for i = 2 : nn.n
22 | % weights and weight momentum
23 | nn.W{i - 1} = (rand(nn.size(i), nn.size(i - 1)+1) - 0.5) * 2 * 4 * sqrt(6 / (nn.size(i) + nn.size(i - 1)));
24 | nn.vW{i - 1} = zeros(size(nn.W{i - 1}));
25 |
26 | % average activations (for use with sparsity)
27 | nn.p{i} = zeros(1, nn.size(i));
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/NN/nntest.m:
--------------------------------------------------------------------------------
1 | function [er, bad] = nntest(nn, x, y)
2 | labels = nnpredict(nn, x);
3 | [dummy, expected] = max(y,[],2);
4 | bad = find(labels ~= expected);
5 | er = numel(bad) / size(x, 1);
6 | end
7 |
--------------------------------------------------------------------------------
/NN/nntrain.m:
--------------------------------------------------------------------------------
1 | function [nn, L] = nntrain(nn, train_x, train_y, opts, val_x, val_y)
2 | %NNTRAIN trains a neural net
3 | % [nn, L] = nnff(nn, x, y, opts) trains the neural network nn with input x and
4 | % output y for opts.numepochs epochs, with minibatches of size
5 | % opts.batchsize. Returns a neural network nn with updated activations,
6 | % errors, weights and biases, (nn.a, nn.e, nn.W, nn.b) and L, the sum
7 | % squared error for each training minibatch.
8 |
9 | assert(isfloat(train_x), 'train_x must be a float');
10 | assert(nargin == 4 || nargin == 6,'number ofinput arguments must be 4 or 6')
11 |
12 | loss.train.e = [];
13 | loss.train.e_frac = [];
14 | loss.val.e = [];
15 | loss.val.e_frac = [];
16 | opts.validation = 0;
17 | if nargin == 6
18 | opts.validation = 1;
19 | end
20 |
21 | fhandle = [];
22 | if isfield(opts,'plot') && opts.plot == 1
23 | fhandle = figure();
24 | end
25 |
26 | m = size(train_x, 1);
27 |
28 | batchsize = opts.batchsize;
29 | numepochs = opts.numepochs;
30 |
31 | numbatches = m / batchsize;
32 |
33 | assert(rem(numbatches, 1) == 0, 'numbatches must be a integer');
34 |
35 | L = zeros(numepochs*numbatches,1);
36 | n = 1;
37 | for i = 1 : numepochs
38 | tic;
39 |
40 | kk = randperm(m);
41 | for l = 1 : numbatches
42 | batch_x = train_x(kk((l - 1) * batchsize + 1 : l * batchsize), :);
43 |
44 | %Add noise to input (for use in denoising autoencoder)
45 | if(nn.inputZeroMaskedFraction ~= 0)
46 | batch_x = batch_x.*(rand(size(batch_x))>nn.inputZeroMaskedFraction);
47 | end
48 |
49 | batch_y = train_y(kk((l - 1) * batchsize + 1 : l * batchsize), :);
50 |
51 | nn = nnff(nn, batch_x, batch_y);
52 | nn = nnbp(nn);
53 | nn = nnapplygrads(nn);
54 |
55 | L(n) = nn.L;
56 |
57 | n = n + 1;
58 | end
59 |
60 | t = toc;
61 |
62 | if opts.validation == 1
63 | loss = nneval(nn, loss, train_x, train_y, val_x, val_y);
64 | str_perf = sprintf('; Full-batch train mse = %f, val mse = %f', loss.train.e(end), loss.val.e(end));
65 | else
66 | loss = nneval(nn, loss, train_x, train_y);
67 | str_perf = sprintf('; Full-batch train err = %f', loss.train.e(end));
68 | end
69 | if ishandle(fhandle)
70 | nnupdatefigures(nn, fhandle, loss, opts, i);
71 | end
72 |
73 | disp(['epoch ' num2str(i) '/' num2str(opts.numepochs) '. Took ' num2str(t) ' seconds' '. Mini-batch mean squared error on training set is ' num2str(mean(L((n-numbatches):(n-1)))) str_perf]);
74 | nn.learningRate = nn.learningRate * nn.scaling_learningRate;
75 | end
76 | end
77 |
78 |
--------------------------------------------------------------------------------
/NN/nnupdatefigures.m:
--------------------------------------------------------------------------------
1 | function nnupdatefigures(nn,fhandle,L,opts,i)
2 | %NNUPDATEFIGURES updates figures during training
3 | if i > 1 %dont plot first point, its only a point
4 | x_ax = 1:i;
5 | % create legend
6 | if opts.validation == 1
7 | M = {'Training','Validation'};
8 | else
9 | M = {'Training'};
10 | end
11 |
12 | %create data for plots
13 | if strcmp(nn.output,'softmax')
14 | plot_x = x_ax';
15 | plot_ye = L.train.e';
16 | plot_yfrac = L.train.e_frac';
17 |
18 | else
19 | plot_x = x_ax';
20 | plot_ye = L.train.e';
21 | end
22 |
23 | %add error on validation data if present
24 | if opts.validation == 1
25 | plot_x = [plot_x, x_ax'];
26 | plot_ye = [plot_ye,L.val.e'];
27 | end
28 |
29 |
30 | %add classification error on validation data if present
31 | if opts.validation == 1 && strcmp(nn.output,'softmax')
32 | plot_yfrac = [plot_yfrac, L.val.e_frac'];
33 | end
34 |
35 | % plotting
36 | figure(fhandle);
37 | if strcmp(nn.output,'softmax') %also plot classification error
38 |
39 | p1 = subplot(1,2,1);
40 | plot(plot_x,plot_ye);
41 | xlabel('Number of epochs'); ylabel('Error');title('Error');
42 | title('Error')
43 | legend(p1, M,'Location','NorthEast');
44 | set(p1, 'Xlim',[0,opts.numepochs + 1])
45 |
46 | p2 = subplot(1,2,2);
47 | plot(plot_x,plot_yfrac);
48 | xlabel('Number of epochs'); ylabel('Misclassification rate');
49 | title('Misclassification rate')
50 | legend(p2, M,'Location','NorthEast');
51 | set(p2, 'Xlim',[0,opts.numepochs + 1])
52 |
53 | else
54 |
55 | p = plot(plot_x,plot_ye);
56 | xlabel('Number of epochs'); ylabel('Error');title('Error');
57 | legend(p, M,'Location','NorthEast');
58 | set(gca, 'Xlim',[0,opts.numepochs + 1])
59 |
60 | end
61 | drawnow;
62 | end
63 | end
64 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Deep Autoencoder with ELM
2 | ## Deep Autoencoder with Extreme Learning Machines
3 |
4 |
5 | The script helps to train your own Deep Autoencoder with Extreme Learning Machines.
6 | It performs a Deep Autoencoder model with with a specified model.
7 |
8 | After that, it utilizes both Neural Networks and Extreme Learning to compare the efficiency of machine learning algorithms.
9 |
10 |

11 |
12 | Whereas the first encoder weight matrix of the Deep Autoencoder is unfolded as the first weight matrix of the neural network model,
13 | The ELM is fed with the middle autoencoder layer that sustains significant and compressed representation of Deep Autoencoder.
14 |
15 | Example training procedure of Deep Autoencoder with Extreme Learning Machines is in DeepAEwithELM_MNIST.m with the instructons how to easily run a training on MNIST dataset.
16 |
17 | It can be easily modified for your own data. The script is modified from [1].
18 |
19 | ## Example of Deep Autoencoder with Extreme Learning Machines on MNIST
20 |
21 | The example training is experimented on MNIST database:
22 | ```
23 | load mnist_uint8;
24 | %Preprocessng for normalization of image pixels
25 | train_x = double(train_x)/255;
26 | %train_x: [sample_size x feature_size]
27 | test_x = double(test_x)/255;
28 | %test_x: [sample_size x feature_size]
29 | train_y = double(train_y);
30 | %train_y: [sample_size x classes ]
31 | test_y = double(test_y);
32 | %test_y: [sample_size x classes ]
33 |
34 | % Setup and train a Deep AE
35 | rand('state',0)
36 | DeepAE_ELM = saesetup([784 100 40 30 40 100]);
37 | %Create your DeepAE model [feature_size AE(1) AE(2) CompressedAE AE(2) AE(1)]
38 | DeepAE_ELM.name='SAE';
39 | DeepAE_ELM.ae{1}.learningRate = 0.01;
40 | %Set your learning rate for DeepAE
41 | DeepAE_ELM.ae{1}.inputZeroMaskedFraction = 0.5;
42 |
43 | opts.numepochs = 10;
44 | %Set your epoch number for DeepAE
45 | opts.batchsize = 100;
46 | %Set your batch size for DeepAE (must be divisible by the number of samples)
47 |
48 |
49 | %The script enables training DeepAE_ELM for the both sigmoid and hiperbolic tangent function at AE stage
50 |
51 | DeepAE_ELM.ae{1}.activation_function = 'sigm'; % 'sigm', 'tanh_opt'
52 | %Set activation function for DeepAE_ELM
53 | DeepAE_ELM = saetrain(DeepAE_ELM, train_x, opts);
54 | %Train DeepAE_ELM
55 | visualize(DeepAE_ELM.ae{1}.W{1}(:,2:end)')
56 | %Visualize the predicted autoencoder output for desired layer
57 |
58 | %TRAIN model using Neural Networks
59 |
60 | %The script enables testing the model with both sigmoid and hiperbolic tangent function at supervised stage
61 |
62 | nn = nnsetup([784 100 10]);
63 | %Create your neural network model [feature_size HiddenLayer1 HiddenLayer2 .... classes ]
64 | % Use the DeepAE to initialize a Feedforward neural networks model
65 | nn.name= strcat('SAE-' ,'sigm'); % 'sigm', 'tanh_opt'
66 | nn.activation_function = 'sigm';
67 | %Set activation function for Neural networks
68 | nn.learningRate = 1;
69 | %Set your learning rate for neural networks
70 | nn.W{1} = DeepAE_ELM.ae{1}.W{1};
71 | %Set Neural network weights using DeepAE pre-trained weights
72 |
73 | % Train stage of the Feedforward Neural Networks
74 | opts.numepochs = 100;
75 | %Set your epoch number for neural network
76 | opts.batchsize = 100;
77 | %Set your batch size for Neural network (must be divisible by the number of samples)
78 | nn = nntrain(nn, train_x, train_y, opts);
79 | %Train Neural Networks model
80 | [error_NN, bad] = nntest(nn, test_x, test_y);
81 | %Test Neural Networks model with your separate testing data
82 |
83 |
84 |
85 | %TRAIN the compressed layer of DeepAE using Extreme Learning Machines with Moore Penrose
86 | %ELM has a single hidden layer
87 | ELM_neuronsize_min=10;
88 | ELM_neuronsize_max=200;
89 | ELM_neuronsize_increment=10;
90 | %Set neuron number of hidden layer in ELM
91 |
92 |
93 | %Get the compression layer of DeepELM as input to ELM
94 | train_x_ELM=[];
95 | for i=1:(size(DeepAE_ELM.ae,2)+1)/2
96 | if i==1
97 | train_x_ELM=train_x*DeepAE_ELM.ae{i}.W{1}(:,1:end-1)';
98 | else
99 | train_x_ELM=train_x_ELM*DeepAE_ELM.ae{i}.W{1}(:,1:end-1)';
100 | end
101 | end
102 |
103 | %TRAIN Extreme Learning Machine for various neuron numbers in the hidden layer
104 | for ELM_neuronsize=ELM_neuronsize_min:ELM_neuronsize_increment:ELM_neuronsize_max
105 | [error_ELM(ELM_neuronsize/ELM_neuronsize_increment)]=ELM(train_x_ELM, train_y,ELM_neuronsize);
106 | %Start training of ExtremeLearnin Macines with Moore Penrose
107 | end
108 |
109 |
110 | ```
111 |
112 |
113 | Please cite this article :
Gokhan ALTAN, SecureDeepNet-IoT: A Deep Learning application for Invasion Detection in IIoT sensing systems, Transactions on Emerging Telecommunications Technologies, Wiley, Vol.32, Issue.4, e4228, 2021 (SCI) DOI: https://doi.org/10.1002/ett.4228
114 |
115 |
116 | [1] R. B. Palm, Prediction as a candidate for learning deep hierarchical models of data, 2012, Master Thesis, https://github.com/rasmusbergpalm/DeepLearnToolbox
117 |
118 |
119 |
120 |
--------------------------------------------------------------------------------
/SAE/saesetup.m:
--------------------------------------------------------------------------------
1 | function sae = saesetup(size)
2 | for u = 2 : numel(size)
3 | sae.ae{u-1} = nnsetup([size(u-1) size(u) size(u-1)]);
4 | end
5 | end
6 |
--------------------------------------------------------------------------------
/SAE/saetrain.m:
--------------------------------------------------------------------------------
1 | function sae = saetrain(sae, x, opts)
2 | for i = 1 : numel(sae.ae);
3 | disp(['Training AE ' num2str(i) '/' num2str(numel(sae.ae))]);
4 | sae.ae{i} = nntrain(sae.ae{i}, x, x, opts);
5 | t = nnff(sae.ae{i}, x, x);
6 | x = t.a{2};
7 | %remove bias term
8 | x = x(:,2:end);
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/util/ELM.m:
--------------------------------------------------------------------------------
1 | function [error]=ELM(train_x,train_y,number_neurons)
2 | % ELM: this function allows to train Singe layer feedforward neural Network
3 | % ELM uses Moore-Penrose pseudoinverse of matrx.
4 |
5 |
6 | input_weights=rand(number_neurons,size(train_x,2))*2-1;
7 | % Generate random input weights
8 | H=radbas(input_weights*train_x');
9 | %Calculate the value of Hidden layer
10 | %%%% 3rd step: calculate the output weights beta
11 | B=pinv(H') * train_y ;
12 | %Calculate the output weights using Moore-Penrose pseudoinverse
13 |
14 | output=(H' * B)' ;
15 | % Calculate the actual output
16 | error=sqrt(mse(train_y'-output));
17 | %Root mean sequared error
18 | disp(sprintf('-Training error for %d neurons in ELM: %f ', number_neurons, error));
19 | end
--------------------------------------------------------------------------------
/util/allcomb.m:
--------------------------------------------------------------------------------
1 | function A = allcomb(varargin)
2 | % ALLCOMB - All combinations
3 | % B = ALLCOMB(A1,A2,A3,...,AN) returns all combinations of the elements
4 | % in A1, A2, ..., and AN. B is P-by-N matrix is which P is the product
5 | % of the number of elements of the N inputs.
6 | % Empty inputs yields an empty matrix B of size 0-by-N. Note that
7 | % previous versions (1.x) simply ignored empty inputs.
8 | %
9 | % Example:
10 | % allcomb([1 3 5],[-3 8],[0 1]) ;
11 | % 1 -3 0
12 | % 1 -3 1
13 | % 1 8 0
14 | % ...
15 | % 5 -3 1
16 | % 5 8 0
17 | % 5 8 1
18 | %
19 | % ALLCOMB(A1,..AN,'matlab') causes the first column to change fastest.
20 | % This is more consistent with matlab indexing. Example:
21 | % allcomb(1:2,3:4,5:6,'matlab') %->
22 | % 1 3 5
23 | % 2 3 5
24 | % 1 4 5
25 | % ...
26 | % 2 4 6
27 | %
28 | % This functionality is also known as the cartesian product.
29 | %
30 | % See also NCHOOSEK, PERMS,
31 | % and COMBN (Matlab Central FEX)
32 |
33 | % for Matlab R13+
34 | % version 2.1 (feb 2011)
35 | % (c) Jos van der Geest
36 | % email: jos@jasen.nl
37 |
38 | % History
39 | % 1.1 (feb 2006), removed minor bug when entering empty cell arrays;
40 | % added option to let the first input run fastest (suggestion by JD)
41 | % 1.2 (jan 2010), using ii as an index on the left-hand for the multiple
42 | % output by NDGRID. Thanks to Jan Simon, for showing this little trick
43 | % 2.0 (dec 2010). Bruno Luong convinced me that an empty input should
44 | % return an empty output.
45 | % 2.1 (feb 2011). A cell as input argument caused the check on the last
46 | % argument (specifying the order) to crash.
47 |
48 | error(nargchk(1,Inf,nargin)) ;
49 |
50 | % check for empty inputs
51 | q = ~cellfun('isempty',varargin) ;
52 | if any(~q),
53 | warning('ALLCOMB:EmptyInput','Empty inputs result in an empty output.') ;
54 | A = zeros(0,nargin) ;
55 | else
56 |
57 | ni = sum(q) ;
58 |
59 | argn = varargin{end} ;
60 | ischar(argn)
61 | if ischar(argn) && (strcmpi(argn,'matlab') || strcmpi(argn,'john')),
62 | % based on a suggestion by JD on the FEX
63 | ni = ni-1 ;
64 | ii = 1:ni ;
65 | q(end) = 0 ;
66 | else
67 | % enter arguments backwards, so last one (AN) is changing fastest
68 | ii = ni:-1:1 ;
69 | end
70 |
71 | if ni==0,
72 | A = [] ;
73 | else
74 | args = varargin(q) ;
75 | if ~all(cellfun('isclass',args,'double')),
76 | error('All arguments should be arrays of doubles') ;
77 | end
78 | if ni==1,
79 | A = args{1}(:) ;
80 | else
81 | % flip using ii if last column is changing fastest
82 | [A{ii}] = ndgrid(args{ii}) ;
83 | % concatenate
84 | A = reshape(cat(ni+1,A{:}),[],ni) ;
85 | end
86 | end
87 | end
88 |
--------------------------------------------------------------------------------
/util/expand.m:
--------------------------------------------------------------------------------
1 | function B = expand(A, S)
2 | %EXPAND Replicate and tile each element of an array, similar to repmat.
3 | % EXPAND(A,SZ), for array A and vector SZ replicates each element of A by
4 | % SZ. The results are tiled into an array in the same order as the
5 | % elements of A, so that the result is size: size(A).*SZ. Therefore the
6 | % number of elements of SZ must equal the number of dimensions of A, or in
7 | % MATLAB syntax: length(size(A))==length(SZ) must be true.
8 | % The result will have the same number of dimensions as does A.
9 | % There is no restriction on the number of dimensions for input A.
10 | %
11 | % Examples:
12 | %
13 | % A = [1 2; 3 4]; % 2x2
14 | % SZ = [6 5];
15 | % B = expand(A,[6 5]) % Creates a 12x10 array.
16 | %
17 | % The following demonstrates equivalence of EXPAND and expansion acheived
18 | % through indexing the individual elements of the array:
19 | %
20 | % A = 1; B = 2; C = 3; D = 4; % Elements of the array to be expanded.
21 | % Mat = [A B;C D]; % The array to expand.
22 | % SZ = [2 3]; % The expansion vector.
23 | % ONES = ones(SZ); % The index array.
24 | % ExpMat1 = [A(ONES),B(ONES);C(ONES),D(ONES)]; % Element expansion.
25 | % ExpMat2 = expand(Mat,SZ); % Calling EXPAND.
26 | % isequal(ExpMat1,ExpMat2) % Yes
27 | %
28 | %
29 | % See also, repmat, meshgrid, ones, zeros, kron
30 | %
31 | % Author: Matt Fig
32 | % Date: 6/20/2009
33 | % Contact: popkenai@yahoo.com
34 |
35 | if nargin < 2
36 | error('Size vector must be provided. See help.');
37 | end
38 |
39 | SA = size(A); % Get the size (and number of dimensions) of input.
40 |
41 | if length(SA) ~= length(S)
42 | error('Length of size vector must equal ndims(A). See help.')
43 | elseif any(S ~= floor(S))
44 | error('The size vector must contain integers only. See help.')
45 | end
46 |
47 | T = cell(length(SA), 1);
48 | for ii = length(SA) : -1 : 1
49 | H = zeros(SA(ii) * S(ii), 1); % One index vector into A for each dim.
50 | H(1 : S(ii) : SA(ii) * S(ii)) = 1; % Put ones in correct places.
51 | T{ii} = cumsum(H); % Cumsumming creates the correct order.
52 | end
53 |
54 | B = A(T{:}); % Feed the indices into A.
--------------------------------------------------------------------------------
/util/flicker.m:
--------------------------------------------------------------------------------
1 | function flicker(X,fps)
2 | figure;
3 | colormap gray;
4 | axis image;
5 | while 1
6 | for i=1:size(X,1);
7 | imagesc(squeeze(X(i,:,:))); drawnow;
8 | pause(1/fps);
9 | end
10 | end
11 | end
--------------------------------------------------------------------------------
/util/flipall.m:
--------------------------------------------------------------------------------
1 | function X=flipall(X)
2 | for i=1:ndims(X)
3 | X = flipdim(X,i);
4 | end
5 | end
--------------------------------------------------------------------------------
/util/fliplrf.m:
--------------------------------------------------------------------------------
1 | function y = fliplrf(x)
2 | %FLIPLR Flip matrix in left/right direction.
3 | % FLIPLR(X) returns X with row preserved and columns flipped
4 | % in the left/right direction.
5 | %
6 | % X = 1 2 3 becomes 3 2 1
7 | % 4 5 6 6 5 4
8 | %
9 | % Class support for input X:
10 | % float: double, single
11 | %
12 | % See also FLIPUD, ROT90, FLIPDIM.
13 |
14 | % Copyright 1984-2010 The MathWorks, Inc.
15 | % $Revision: 5.9.4.4 $ $Date: 2010/02/25 08:08:47 $
16 |
17 | % if ~ismatrix(x)
18 | % error('MATLAB:fliplr:SizeX', 'X must be a 2-D matrix.');
19 | % end
20 | y = x(:,end:-1:1);
21 |
--------------------------------------------------------------------------------
/util/flipudf.m:
--------------------------------------------------------------------------------
1 | function y = flipudf(x)
2 | %FLIPUD Flip matrix in up/down direction.
3 | % FLIPUD(X) returns X with columns preserved and rows flipped
4 | % in the up/down direction. For example,
5 | %
6 | % X = 1 4 becomes 3 6
7 | % 2 5 2 5
8 | % 3 6 1 4
9 | %
10 | % Class support for input X:
11 | % float: double, single
12 | %
13 | % See also FLIPLR, ROT90, FLIPDIM.
14 |
15 | % Copyright 1984-2010 The MathWorks, Inc.
16 | % $Revision: 5.9.4.4 $ $Date: 2010/02/25 08:08:49 $
17 |
18 | % if ~ismatrix(x)
19 | % error('MATLAB:flipud:SizeX', 'X must be a 2-D matrix.');
20 | % end
21 | y = x(end:-1:1,:);
22 |
--------------------------------------------------------------------------------
/util/im2patches.m:
--------------------------------------------------------------------------------
1 | function patches = im2patches(im,m,n)
2 | assert(rem(size(im,1),m)==0)
3 | assert(rem(size(im,2),n)==0)
4 |
5 | patches = [];
6 | for i=1:m:size(im,1)
7 | for u=1:n:size(im,2)
8 | patch = im(i:i+n-1,u:u+m-1);
9 | patches = [patches patch(:)];
10 | end
11 | end
12 | patches = patches';
13 | end
--------------------------------------------------------------------------------
/util/isOctave.m:
--------------------------------------------------------------------------------
1 | %detects if we're running Octave
2 | function result = isOctave()
3 | result = exist('OCTAVE_VERSION') ~= 0;
4 | end
--------------------------------------------------------------------------------
/util/makeLMfilters.m:
--------------------------------------------------------------------------------
1 | function F=makeLMfilters
2 | % Returns the LML filter bank of size 49x49x48 in F. To convolve an
3 | % image I with the filter bank you can either use the matlab function
4 | % conv2, i.e. responses(:,:,i)=conv2(I,F(:,:,i),'valid'), or use the
5 | % Fourier transform.
6 |
7 | SUP=49; % Support of the largest filter (must be odd)
8 | SCALEX=sqrt(2).^[1:3]; % Sigma_{x} for the oriented filters
9 | NORIENT=6; % Number of orientations
10 |
11 | NROTINV=12;
12 | NBAR=length(SCALEX)*NORIENT;
13 | NEDGE=length(SCALEX)*NORIENT;
14 | NF=NBAR+NEDGE+NROTINV;
15 | F=zeros(SUP,SUP,NF);
16 | hsup=(SUP-1)/2;
17 | [x,y]=meshgrid([-hsup:hsup],[hsup:-1:-hsup]);
18 | orgpts=[x(:) y(:)]';
19 |
20 | count=1;
21 | for scale=1:length(SCALEX),
22 | for orient=0:NORIENT-1,
23 | angle=pi*orient/NORIENT; % Not 2pi as filters have symmetry
24 | c=cos(angle);s=sin(angle);
25 | rotpts=[c -s;s c]*orgpts;
26 | F(:,:,count)=makefilter(SCALEX(scale),0,1,rotpts,SUP);
27 | F(:,:,count+NEDGE)=makefilter(SCALEX(scale),0,2,rotpts,SUP);
28 | count=count+1;
29 | end;
30 | end;
31 |
32 | count=NBAR+NEDGE+1;
33 | SCALES=sqrt(2).^[1:4];
34 | for i=1:length(SCALES),
35 | F(:,:,count)=normalise(fspecial('gaussian',SUP,SCALES(i)));
36 | F(:,:,count+1)=normalise(fspecial('log',SUP,SCALES(i)));
37 | F(:,:,count+2)=normalise(fspecial('log',SUP,3*SCALES(i)));
38 | count=count+3;
39 | end;
40 | return
41 |
42 | function f=makefilter(scale,phasex,phasey,pts,sup)
43 | gx=gauss1d(3*scale,0,pts(1,:),phasex);
44 | gy=gauss1d(scale,0,pts(2,:),phasey);
45 | f=normalise(reshape(gx.*gy,sup,sup));
46 | return
47 |
48 | function g=gauss1d(sigma,mean,x,ord)
49 | % Function to compute gaussian derivatives of order 0 <= ord < 3
50 | % evaluated at x.
51 |
52 | x=x-mean;num=x.*x;
53 | variance=sigma^2;
54 | denom=2*variance;
55 | g=exp(-num/denom)/(pi*denom)^0.5;
56 | switch ord,
57 | case 1, g=-g.*(x/variance);
58 | case 2, g=g.*((num-variance)/(variance^2));
59 | end;
60 | return
61 |
62 | function f=normalise(f), f=f-mean(f(:)); f=f/sum(abs(f(:))); return
--------------------------------------------------------------------------------
/util/mnist_uint8.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/galtan-PhD/Deep_Autoencoder_with_ELM/b824e3793c2757be7b747e96acdabe501f39e577/util/mnist_uint8.mat
--------------------------------------------------------------------------------
/util/myOctaveVersion.m:
--------------------------------------------------------------------------------
1 | % return OCTAVE_VERSION or 'undefined' as a string
2 | function result = myOctaveVersion()
3 | if isOctave()
4 | result = OCTAVE_VERSION;
5 | else
6 | result = 'undefined';
7 | end
8 |
--------------------------------------------------------------------------------
/util/normalize.m:
--------------------------------------------------------------------------------
1 | function x = normalize(x, mu, sigma)
2 | x=bsxfun(@minus,x,mu);
3 | x=bsxfun(@rdivide,x,sigma);
4 | end
5 |
--------------------------------------------------------------------------------
/util/patches2im.m:
--------------------------------------------------------------------------------
1 | function im = patches2im(patches,n,m)
2 | k=1;
3 | im = zeros(n,m);
4 | for i=1:10:800
5 | for u=1:10:1140
6 | patch = patches(:,k);
7 | im(u:u+9,i:i+9) = reshape(patch,10,10);
8 | k = k+1;
9 | end
10 | end
11 | end
--------------------------------------------------------------------------------
/util/randcorr.m:
--------------------------------------------------------------------------------
1 | function x=randcorr(n,R)
2 | % RANDCORR Generates corremlated random variables
3 | % Generates n vector valued variates with uniform marginals and correlation
4 | % matrix R.
5 | % Returns an nxk matrix, where k is the order of R.
6 | k=size(R,1);
7 | R=2*sin((pi/6)*R);
8 | x=normcdf(randn(n,k)*chol(R));
--------------------------------------------------------------------------------
/util/randp.m:
--------------------------------------------------------------------------------
1 | function X = randp(P,varargin)
2 | % RANDP - pick random values with relative probability
3 | %
4 | % R = RANDP(PROB,..) returns integers in the range from 1 to
5 | % NUMEL(PROB) with a relative probability, so that the value X is
6 | % present approximately (PROB(X)./sum(PROB)) times in the matrix R.
7 | %
8 | % All values of PROB should be equal to or larger than 0.
9 | %
10 | % RANDP(PROB,N) is an N-by-N matrix, RANDP(PROB,M,N) and
11 | % RANDP(PROB,[M,N]) are M-by-N matrices. RANDP(PROB, M1,M2,M3,...) or
12 | % RANDP(PROB,[M1,M2,M3,...]) generate random arrays.
13 | % RANDP(PROB,SIZE(A)) is the same size as A.
14 | %
15 | % Example:
16 | % R = randp([1 3 2],1,10000)
17 | % % return a row vector with 10000 values with about 16650% 2
18 | % histc(R,1:3) ./ numel(R)
19 | %
20 | % R = randp([1 1 0 0 1],10,1)
21 | % % 10 samples evenly drawn from [1 2 5]
22 | %
23 | %
24 | % Also see RAND, RANDPERM
25 | % RANDPERMBREAK, RANDINTERVAL, RANDSWAP (MatLab File Exchange)
26 |
27 | % Created for Matlab R13+
28 | % version 2.0 (feb 2009)
29 | % (c) Jos van der Geest
30 | % email: jos@jasen.nl
31 | %
32 | % File history:
33 | % 1.0 (nov 2005) - created
34 | % 1.1 (nov 2005) - modified slightly to check input arguments to RAND first
35 | % 1.2 (aug 2006) - fixed bug when called with scalar argument P
36 | % 2.0 (feb 2009) - use HISTC for creating the integers (faster and simplier than
37 | % previous algorithm)
38 |
39 | error(nargchk(2,Inf,nargin)) ;
40 |
41 | try
42 | X = rand(varargin{:}) ;
43 | catch
44 | E = lasterror ;
45 | E.message = strrep(E.message,'rand','randp') ;
46 | rethrow(E) ;
47 | end
48 |
49 | P = P(:) ;
50 |
51 | if any(P<0),
52 | error('All probabilities should be 0 or larger.') ;
53 | end
54 |
55 | if isempty(P) || sum(P)==0
56 | warning([mfilename ':ZeroProbabilities'],'All zero probabilities') ;
57 | X(:) = 0 ;
58 | else
59 | [junk,X] = histc(X,[0 ; cumsum(P(:))] ./ sum(P)) ;
60 | end
61 |
62 | % Method used before version 2
63 | % X = rand(varargin{:}) ;
64 | % sz = size(X) ;
65 | % P = reshape(P,1,[]) ; % row vector
66 | % P = cumsum(P) ./ sum(P) ;
67 | % X = repmat(X(:),1,numel(P)) < repmat(P,numel(X),1) ;
68 | % X = numel(P) - sum(X,2) + 1 ;
69 | % X = reshape(X,sz) ;
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/util/rnd.m:
--------------------------------------------------------------------------------
1 | function y = rnd(x)
2 | y = double(x>rand());
3 | end
--------------------------------------------------------------------------------
/util/sigm.m:
--------------------------------------------------------------------------------
1 | function X = sigm(P)
2 | X = 1./(1+exp(-P));
3 | end
--------------------------------------------------------------------------------
/util/sigmrnd.m:
--------------------------------------------------------------------------------
1 | function X = sigmrnd(P)
2 | % X = double(1./(1+exp(-P)))+1*randn(size(P));
3 | X = double(1./(1+exp(-P)) > rand(size(P)));
4 | end
--------------------------------------------------------------------------------
/util/softmax.m:
--------------------------------------------------------------------------------
1 | function mu = softmax(eta)
2 | % Softmax function
3 | % mu(i,c) = exp(eta(i,c))/sum_c' exp(eta(i,c'))
4 |
5 | % This file is from matlabtools.googlecode.com
6 | c = 3;
7 |
8 | tmp = exp(c*eta);
9 | denom = sum(tmp, 2);
10 | mu = bsxfun(@rdivide, tmp, denom);
11 |
12 | end
--------------------------------------------------------------------------------
/util/tanh_opt.m:
--------------------------------------------------------------------------------
1 | function f=tanh_opt(A)
2 | f=1.7159*tanh(2/3.*A);
3 | end
--------------------------------------------------------------------------------
/util/visualize.m:
--------------------------------------------------------------------------------
1 | function r=visualize(X, mm, s1, s2)
2 | %FROM RBMLIB http://code.google.com/p/matrbm/
3 | %Visualize weights X. If the function is called as a void method,
4 | %it does the plotting. But if the function is assigned to a variable
5 | %outside of this code, the formed image is returned instead.
6 | if ~exist('mm','var')
7 | mm = [min(X(:)) max(X(:))];
8 | end
9 | if ~exist('s1','var')
10 | s1 = 0;
11 | end
12 | if ~exist('s2','var')
13 | s2 = 0;
14 | end
15 |
16 | [D,N]= size(X);
17 | s=sqrt(D);
18 | if s==floor(s) || (s1 ~=0 && s2 ~=0)
19 | if (s1 ==0 || s2 ==0)
20 | s1 = s; s2 = s;
21 | end
22 | %its a square, so data is probably an image
23 | num=ceil(sqrt(N));
24 | a=mm(2)*ones(num*s2+num-1,num*s1+num-1);
25 | x=0;
26 | y=0;
27 | for i=1:N
28 | im = reshape(X(:,i),s1,s2)';
29 | a(x*s2+1+x : x*s2+s2+x, y*s1+1+y : y*s1+s1+y)=im;
30 | x=x+1;
31 | if(x>=num)
32 | x=0;
33 | y=y+1;
34 | end
35 | end
36 | d=true;
37 | else
38 | %there is not much we can do
39 | a=X;
40 | end
41 |
42 | %return the image, or plot the image
43 | if nargout==1
44 | r=a;
45 | else
46 |
47 | imagesc(a, [mm(1) mm(2)]);
48 | axis equal
49 | colormap gray
50 |
51 | end
52 |
--------------------------------------------------------------------------------
/util/whiten.m:
--------------------------------------------------------------------------------
1 | function X = whiten(X, fudgefactor)
2 | C = cov(X);
3 | M = mean(X);
4 | [V,D] = eig(C);
5 | P = V * diag(sqrt(1./(diag(D) + fudgefactor))) * V';
6 | X = bsxfun(@minus, X, M) * P;
7 | end
--------------------------------------------------------------------------------
/util/zscore.m:
--------------------------------------------------------------------------------
1 | function [x, mu, sigma] = zscore(x)
2 | mu=mean(x);
3 | sigma=max(std(x),eps);
4 | x=bsxfun(@minus,x,mu);
5 | x=bsxfun(@rdivide,x,sigma);
6 | end
7 |
--------------------------------------------------------------------------------