├── deepGP
├── html
│ ├── M_h.png
│ ├── X_0.png
│ ├── X_H.png
│ ├── Xs.png
│ ├── toyAll.png
│ ├── gpSample.png
│ ├── digitsAll.png
│ ├── hierarchy2.png
│ ├── example1Bold.png
│ ├── hierarchy_tr.png
│ ├── toyRegResults.png
│ ├── digitsExperiment.png
│ ├── highFiveExperiment.png
│ ├── H5_latentSpaceDigits.png
│ └── hierarchyFull_compact2.png
├── tex
│ └── diagrams
│ │ ├── toyAll.pdf
│ │ ├── digitsAll.pdf
│ │ ├── gpSample1.png
│ │ ├── gpSample2.png
│ │ ├── gpSample3.png
│ │ ├── hierarchy2.pdf
│ │ ├── sampleL1
│ │ ├── 6.pdf
│ │ ├── 102.pdf
│ │ ├── 147.pdf
│ │ ├── 219.pdf
│ │ ├── 381.pdf
│ │ ├── 426.pdf
│ │ └── 73.pdf
│ │ ├── sampleL2
│ │ ├── 1.pdf
│ │ ├── 130.pdf
│ │ ├── 190.pdf
│ │ ├── 22.pdf
│ │ ├── 274.pdf
│ │ ├── 370.pdf
│ │ └── 448.pdf
│ │ ├── example1Bold.pdf
│ │ ├── hierarchy_tr.pdf
│ │ ├── toyRegResults.pdf
│ │ ├── sampleL5Dim5
│ │ ├── 1.pdf
│ │ ├── 157.pdf
│ │ ├── 175.pdf
│ │ ├── 190.pdf
│ │ ├── 226.pdf
│ │ ├── 262.pdf
│ │ └── 415.pdf
│ │ ├── sampleL5Dim6
│ │ ├── 1.pdf
│ │ ├── 46.pdf
│ │ ├── 199.pdf
│ │ ├── 253.pdf
│ │ ├── 289.pdf
│ │ ├── 307.pdf
│ │ ├── 340.pdf
│ │ └── 344.png
│ │ ├── digitsExperiment.pdf
│ │ ├── highFiveExperiment.pdf
│ │ ├── H5_latentSpaceDigits.png
│ │ └── hierarchyFull_compact.pdf
├── matlab
│ ├── demHighFiveHgplvm1.mat
│ ├── demToy_vargplvm270.mat
│ ├── hgplvmSampleDataTr1.mat
│ ├── demHighFiveHsvargplvm9.mat
│ ├── demUsps3ClassHsvargplvm19.mat
│ ├── demToy_hgplvmSampleTr1Hsvargplvm1.mat
│ ├── hsvargplvmShowScalesSorted.m
│ ├── myPlot.m
│ ├── hsvargplvmShowSNR.m
│ ├── hsvargplvmObjective.m
│ ├── hsvargplvmPruneModel.m
│ ├── hsvargplvmReconstructInputs.m
│ ├── Ytochannels.m
│ ├── hsvargplvmControlSNR.m
│ ├── hsvargplvmAddParentPrior.m
│ ├── hsvargplvmGradient.m
│ ├── hsvargplvmPropagateField.m
│ ├── demToyDynamicsSplitDataset.m
│ ├── hsvargplvmSampleLayer.m
│ ├── skelGetChannels.m
│ ├── hsvargplvmRetainedScales.m
│ ├── hsvargplvmObjectiveGradient.m
│ ├── hsvargplvmShowSkel2.m
│ ├── hsvargplvmLogLikeGradients2.m
│ ├── hsvargplvmShowSkel.m
│ ├── hsvargplvmEmbedScript.m
│ ├── hsvargplvmAnalyseResults.m
│ ├── hsvargplvmDisplay.m
│ ├── hsvargplvmInitXOptions.m
│ ├── vargplvmExtractParamNoVardist.m
│ ├── hsvargplvmAddParamPrior.m
│ ├── vargpCovGrads.m
│ ├── hsvargplvmCheckSNR.m
│ ├── demDigitsDemonstrationInit.m
│ ├── vargplvmPosteriorMeanVarHier.m
│ ├── hsvargplvmOptions.m
│ ├── hsvargplvmStaticImageVisualise.m
│ ├── hsvargplvmCreateOptions.m
│ ├── demToyDynamicsPredictions.m
│ ├── hsvargplvmLogLikelihood.m
│ ├── hsvargplvmShowScales.m
│ ├── hsvargplvmRegressionInitX.m
│ ├── demHighFiveDemonstration.m
│ ├── hsvargplvmLoadSkelData.m
│ ├── lvmScatterPlotNoVar2.m
│ ├── hsvargplvmOptimise.m
│ ├── demToyUnsupervised.m
│ ├── hsvargplvmRestorePrunedModel.m
│ ├── hsvargplvmPlotX.m
│ ├── hsvargplvmPosteriorMeanVarSimple.m
│ ├── hsvargplvmCreateToyData.m
│ ├── hsvargplvmClusterScales.m
│ ├── demHsvargplvmClassification.m
│ ├── hsvargplvmPosteriorMeanVar.m
│ ├── hsvargplvmExtractParam.m
│ ├── hsvargplvmExpandParam.m
│ ├── demToyHsvargplvm1.m
│ ├── demStepFunction.m
│ ├── demHighFive1.m
│ ├── demHsvargplvmHighFive1.m
│ ├── demToyRegressionSimple.m
│ ├── demHsvargplvmRegression.m
│ ├── hsvargplvmFeatureClassification.m
│ ├── loadMocapData.m
│ ├── hsvargplvmOptimiseModel.m
│ ├── lvmVisualiseHierarchical.m
│ ├── lvmRealtimeAudio.m
│ ├── hsvargplvmUpdateStats.m
│ ├── deepGPRegression.m
│ └── hierSetPlotNoVar.m
├── README.md
└── LICENSE.txt
├── README.md
└── LICENSE.txt
/deepGP/html/M_h.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/M_h.png
--------------------------------------------------------------------------------
/deepGP/html/X_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/X_0.png
--------------------------------------------------------------------------------
/deepGP/html/X_H.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/X_H.png
--------------------------------------------------------------------------------
/deepGP/html/Xs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/Xs.png
--------------------------------------------------------------------------------
/deepGP/html/toyAll.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/toyAll.png
--------------------------------------------------------------------------------
/deepGP/html/gpSample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/gpSample.png
--------------------------------------------------------------------------------
/deepGP/html/digitsAll.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/digitsAll.png
--------------------------------------------------------------------------------
/deepGP/html/hierarchy2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/hierarchy2.png
--------------------------------------------------------------------------------
/deepGP/html/example1Bold.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/example1Bold.png
--------------------------------------------------------------------------------
/deepGP/html/hierarchy_tr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/hierarchy_tr.png
--------------------------------------------------------------------------------
/deepGP/html/toyRegResults.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/toyRegResults.png
--------------------------------------------------------------------------------
/deepGP/html/digitsExperiment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/digitsExperiment.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/toyAll.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/toyAll.pdf
--------------------------------------------------------------------------------
/deepGP/html/highFiveExperiment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/highFiveExperiment.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/digitsAll.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/digitsAll.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/gpSample1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/gpSample1.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/gpSample2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/gpSample2.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/gpSample3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/gpSample3.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/hierarchy2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/hierarchy2.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/6.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/6.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/1.pdf
--------------------------------------------------------------------------------
/deepGP/html/H5_latentSpaceDigits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/H5_latentSpaceDigits.png
--------------------------------------------------------------------------------
/deepGP/matlab/demHighFiveHgplvm1.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/demHighFiveHgplvm1.mat
--------------------------------------------------------------------------------
/deepGP/matlab/demToy_vargplvm270.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/demToy_vargplvm270.mat
--------------------------------------------------------------------------------
/deepGP/matlab/hgplvmSampleDataTr1.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/hgplvmSampleDataTr1.mat
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/example1Bold.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/example1Bold.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/hierarchy_tr.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/hierarchy_tr.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/102.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/102.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/147.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/147.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/219.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/219.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/381.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/381.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/426.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/426.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL1/73.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL1/73.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/130.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/130.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/190.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/190.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/22.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/22.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/274.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/274.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/370.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/370.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL2/448.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL2/448.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/toyRegResults.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/toyRegResults.pdf
--------------------------------------------------------------------------------
/deepGP/html/hierarchyFull_compact2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/html/hierarchyFull_compact2.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/1.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/1.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/46.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/46.pdf
--------------------------------------------------------------------------------
/deepGP/matlab/demHighFiveHsvargplvm9.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/demHighFiveHsvargplvm9.mat
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/digitsExperiment.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/digitsExperiment.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/highFiveExperiment.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/highFiveExperiment.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/157.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/157.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/175.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/175.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/190.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/190.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/226.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/226.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/262.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/262.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim5/415.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim5/415.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/199.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/199.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/253.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/253.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/289.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/289.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/307.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/307.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/340.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/340.pdf
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/sampleL5Dim6/344.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/sampleL5Dim6/344.png
--------------------------------------------------------------------------------
/deepGP/matlab/demUsps3ClassHsvargplvm19.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/demUsps3ClassHsvargplvm19.mat
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/H5_latentSpaceDigits.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/H5_latentSpaceDigits.png
--------------------------------------------------------------------------------
/deepGP/tex/diagrams/hierarchyFull_compact.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/tex/diagrams/hierarchyFull_compact.pdf
--------------------------------------------------------------------------------
/deepGP/matlab/demToy_hgplvmSampleTr1Hsvargplvm1.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SheffieldML/deepGP/HEAD/deepGP/matlab/demToy_hgplvmSampleTr1Hsvargplvm1.mat
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmShowScalesSorted.m:
--------------------------------------------------------------------------------
1 | function scales = hsvargplvmShowScalesSorted(model)
2 | for h=1:model.H
3 | scalesAll{h} = zeros(1, model.layer{h}.q);
4 | subplot(model.H,1,h)
5 | scales{h} = vargplvmShowScales(model.layer{h}.comp{1},0);
6 | %scales{h} = sort(scales{h},'descend');
7 | scales{h} = scales{h} ./ max(scales{h});
8 | bar([scales{h} NaN*ones(1,15-length(scales{h}))])
9 | set(gca, 'XTickLabelMode', 'manual', 'XTickLabel', []);
10 | set(gca, 'YTickLabelMode', 'manual', 'YTickLabel', []);
11 |
12 | %title(['Layer ' num2str(h)]);
13 | end
14 |
--------------------------------------------------------------------------------
/deepGP/matlab/myPlot.m:
--------------------------------------------------------------------------------
1 | function myPlot(X, t, fileName, root, lSizes, newFig)
2 | if nargin < 2, t = []; end
3 | if nargin < 3, fileName = []; end
4 | if nargin < 4, root = []; end
5 | if nargin < 5 || isempty(lSizes), lSizes{1}=4; lSizes{2}=14; end
6 | if nargin < 6, newFig = true; end
7 |
8 | if newFig, figure; end
9 | plot(X(:,1), X(:,2),'--x','LineWidth',lSizes{1},...
10 | 'MarkerEdgeColor','r',...
11 | 'MarkerFaceColor','g',...
12 | 'MarkerSize',lSizes{2}); title(t); axis off
13 |
14 | %pause
15 | if ~isempty(fileName)
16 | if ~isempty(root)
17 | fileName = [root filesep fileName];
18 | end
19 | print('-depsc', [fileName '.eps']);
20 | print('-dpdf', [fileName '.pdf']);
21 | print('-dpng', [fileName '.png']);
22 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmShowSNR.m:
--------------------------------------------------------------------------------
1 | function SNR = hsvargplvmShowSNR(model, layers, displ)
2 |
3 | if nargin < 3
4 | displ = true;
5 | end
6 |
7 | if nargin < 2 || isempty(layers)
8 | layers = 1:model.H;
9 | end
10 |
11 | for h=layers
12 | if displ
13 | fprintf('# SNR Layer %d\n',h)
14 | end
15 | for m=1:model.layer{h}.M
16 | if isfield(model.layer{h}.comp{m}, 'mOrig')
17 | varY = var(model.layer{h}.comp{m}.mOrig(:));
18 | else
19 | varY = var(model.layer{h}.comp{m}.m(:));
20 | end
21 | beta = model.layer{h}.comp{m}.beta;
22 | SNR{h}(m) = varY * beta;
23 | if displ
24 | fprintf(' Model %d: %f (varY=%f, 1/beta=%f)\n', m, SNR{h}(m), varY, 1/beta)
25 | end
26 | end
27 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmObjective.m:
--------------------------------------------------------------------------------
1 | function f = hsvargplvmObjective(params, model)
2 |
3 | % HSVARGPLVMOBJECTIVE Wrapper function for hierarchical var-GP-LVM objective.
4 | % FORMAT
5 | % DESC provides a wrapper function for the varihierarchical var-GP-LVM, it
6 | % takes the negative of the log likelihood, feeding the parameters
7 | % correctly to the model.
8 | % ARG params : the parameters of the variational GP-LVM model.
9 | % ARG model : the model structure in which the parameters are to be
10 | % placed.
11 | % RETURN f : the negative of the log likelihood of the model.
12 | %
13 | % SEEALSO : hsvargplvmCreate, hsvargplvmLogLikelihood, hsvargplvmExpandParam
14 | %
15 | % COPYRIGHT : Andreas C. Damianou 2012
16 |
17 | % HSVARGPLVM
18 |
19 |
20 | model = modelExpandParam(model, params);
21 | f = - modelLogLikelihood(model);
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmPruneModel.m:
--------------------------------------------------------------------------------
1 | % HSVARGPLVMPRUNEMODEL Prune a shared var-GPLVM model.
2 | % FORMAT
3 | % DESC prunes a Shared VAR-GPLVM model by removing some fields which can later be
4 | % reconstructed based on what is being kept. Used when storing a model.
5 | % ARG model: the model to be pruned
6 | % ARG onlyData: only prune the data parts. Useful when saving a model which
7 | % is updated after predictions.
8 | % RETURN model : the shared variational GP-LVM model after being pruned
9 | %
10 | % COPYRIGHT: Andreas Damianou, 2011
11 | %
12 | % SEEALSO : vargplvmPruneModel, svargplvmRestorePrunedModel
13 |
14 | % SVARGPLVM
15 |
16 | function model = hsvargplvmPruneModel(model, onlyData)
17 |
18 | if nargin == 1
19 | onlyData = 0;
20 | end
21 |
22 | for h=1:model.H
23 | for i=1:model.layer{h}.M
24 | model.layer{h}.comp{i} = vargplvmPruneModel(model.layer{h}.comp{i}, onlyData);
25 | end
26 | end
27 |
28 | model.isPruned = true;
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmReconstructInputs.m:
--------------------------------------------------------------------------------
1 | % Try to reconstruct the training inputs of layer lInp to the oututs of
2 | % layer lOut. A well-trained model should return outputs very close to the
3 | % real outputs Y.
4 |
5 | function mu = hsvargplvmReconstructInputs(model, Y, lInp, lOut, ind)
6 |
7 | if nargin <2 || isempty(Y)
8 | Y = multvargplvmJoinY(model.layer{lOut});
9 | end
10 |
11 | if nargin < 4 || isempty(lOut)
12 | lOut = 1;
13 | end
14 |
15 | if nargin < 3 || isempty(lInp)
16 | lInp = model.H;
17 | end
18 |
19 | % -1 means all
20 | if nargin > 4 && ~isempty(ind) && ind == -1
21 | ind = 1:model.layer{lOut}.M;
22 | elseif nargin < 5 || isempty(ind)
23 | ind = model.layer{lOut}.M;
24 | end
25 |
26 |
27 | X = model.layer{lInp}.vardist.means;
28 |
29 |
30 | [X,mu]=hsvargplvmSampleLayer(model,lInp,lOut,ind,X);
31 |
32 |
33 | imagesc(Y); title('original data')
34 | figure
35 | imagesc(mu); title('reconstruction');
36 |
37 |
--------------------------------------------------------------------------------
/deepGP/README.md:
--------------------------------------------------------------------------------
1 | deepGP v.1.0
2 | ========
3 |
4 | Matlab code for deep Gaussian processes (Damianou and Lawrence, AISTATS 2013)
5 |
6 | Dependencies graph:
7 | - (1) vargplvm - Bayesian GPLVM/VGPDS/MRD toolbox: https://github.com/SheffieldML/vargplvm
8 | - (2) GPmat - Neil Lawrence's GP matlab toolbox: https://github.com/SheffieldML/GPmat
9 | - (3) Netlab v.3.3: http://www1.aston.ac.uk/ncrg/
10 | - (4) Isomap.m: http://web.mit.edu/cocosci/isomap/code/Isomap.m
11 | - (5) L2_distance.m: http://web.mit.edu/cocosci/isomap/code/L2_distance.m
12 | - (6) keep.m: http://www.mathworks.com/matlabcentral/fileexchange/181-keep/content/keep.m
13 |
14 | L2_distance ---- Isomap ---- GPmat ---- vargplvm ---- deepGP
15 | / /
16 | Netlab ----------------
17 |
18 | Getting started:
19 | - Please check deepGP/html/index.html for a short overview of this package (TODO!).
20 | - Check deepGP/matlab/README.txt for a quick manual.
21 | - Check deepGP/matlab/tutorial.m for introductory demonstrations.
--------------------------------------------------------------------------------
/deepGP/matlab/Ytochannels.m:
--------------------------------------------------------------------------------
1 | function channels = Ytochannels(Y)
2 |
3 | % YTOCHANNELS Convert Y to channel values.
4 |
5 | xyzInd = [2];
6 | xyzDiffInd = [1 3];
7 | rotInd = [4 6];
8 | rotDiffInd = [5];
9 | generalInd = [7:38 41:47 49:50 53:59 61:62];
10 | startInd = 1;
11 | endInd = length(generalInd);
12 | channels(:, generalInd) = 180*Y(:, startInd:endInd)/pi;
13 | startInd = endInd + 1;
14 | endInd = endInd + length(xyzDiffInd);
15 | channels(:, xyzDiffInd) = cumsum(Y(:, startInd:endInd), 1);
16 | startInd = endInd + 1;
17 | endInd = endInd + length(xyzInd);
18 | channels(:, xyzInd) = Y(:, startInd:endInd);
19 | startInd = endInd + 1;
20 | endInd = endInd + length(xyzDiffInd);
21 | channels(:, xyzDiffInd) = cumsum(Y(:, startInd:endInd), 1);
22 | startInd = endInd + 1;
23 | endInd = endInd + length(rotInd);
24 | channels(:, rotInd) = asin(Y(:, startInd:endInd))*180/pi;
25 | channels(:, rotInd(end)) = channels(:, rotInd(end))+270;
26 | startInd = endInd + 1;
27 | endInd = endInd + length(rotDiffInd);
28 | channels(:, rotDiffInd) = 0;%cumsum(asin(Y(:, startInd:endInd)), 1))*180/pi;
29 |
30 | end
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | deepGP v.1.0
2 | ========
3 |
4 | Matlab code for deep Gaussian processes (Damianou and Lawrence, AISTATS 2013)
5 |
6 | Dependencies graph:
7 | - (1) vargplvm - Bayesian GPLVM/VGPDS/MRD toolbox: https://github.com/SheffieldML/vargplvm
8 | - (2) GPmat - Neil Lawrence's GP matlab toolbox: https://github.com/SheffieldML/GPmat
9 | - (3) Netlab v.3.3: http://www1.aston.ac.uk/ncrg/ Mirror (untested): https://uk.mathworks.com/matlabcentral/fileexchange/2654-netlab
10 | - (4) Isomap.m: https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/62449/versions/1/previews/IsoMap.m/index.html
11 | - (5) L2_distance.m: https://adamian.github.io/var/L2_distance.m
12 | - (6) keep.m: http://www.mathworks.com/matlabcentral/fileexchange/181-keep/content/keep.m
13 |
14 | L2_distance ---- Isomap ---- GPmat ---- vargplvm ---- deepGP
15 | / /
16 | Netlab ----------------
17 |
18 | Getting started:
19 | - Please check deepGP/html/index.html for a short overview of this package.
20 | - Check deepGP/matlab/README.txt for a quick manual.
21 | - Check deepGP/matlab/tutorial.m for introductory demonstrations.
22 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmControlSNR.m:
--------------------------------------------------------------------------------
1 | % Add a prior on the beta parameter to avoid low Signal to Noise Ratio
2 | % problems.
3 | function model = hsvargplvmControlSNR(model, meanSNR, layer, view, priorInfo, priorScale)
4 |
5 | if nargin < 1, error('At least one argument needed!'); end
6 | if nargin < 6, priorScale = 25; end
7 | if nargin < 5, priorInfo = []; end
8 | if nargin < 4 || isempty(view), view = 1; end
9 | if nargin < 3 || isempty(layer), layer = model.H; end
10 | % Where I want the expected value of my inv gamma if it was on SNR
11 | if nargin < 2 || isempty(meanSNR), meanSNR = 150; end
12 |
13 | if isempty(priorInfo)
14 | priorInfo.name = 'invgamma'; % What type of prior
15 | varData = var(model.layer{layer}.comp{view}.mOrig(:));
16 | meanB = meanSNR./varData;
17 | a=0.08;%1.0001; % Relatively large right-tail
18 | b=meanB*(a+1); % Because mode = b/(a-1)
19 | priorInfo.params = [a b];
20 | end
21 |
22 | model = hsvargplvmAddParamPrior(model, layer, 1, 'beta', priorInfo.name, priorInfo.params);
23 | if ~isempty('priorScale')
24 | model.layer{layer}.comp{view}.paramPriors{1}.prior.scale = priorScale;
25 | end
26 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmAddParentPrior.m:
--------------------------------------------------------------------------------
1 | % Takes a hsvargplvm model and adds a non-(standard normal) prior on the
2 | % parent (see: addDynamics functions in the other vargplvm-related
3 | % packages).
4 |
5 | % See also: svargplvmAddDynamics.m
6 |
7 | function model = hsvargplvmAddParentPrior(model, globalOpt, optionsDyn)
8 |
9 | modelParent = model.layer{model.H};
10 | modelParent.comp{1}.vardist = modelParent.vardist;
11 | modelParent.comp{1}.X = modelParent.vardist.means;
12 | % TODO: if center means....
13 | if model.H > 1
14 | modelParent.comp{1}.y = model.layer{end-1}.vardist.means;
15 | elseif length(model.layer{1}.comp) == 1
16 | modelParent.comp{1}.y = model.layer{1}.comp{1}.y;
17 | else
18 | error('Not implemented!')
19 | end
20 |
21 | modelParent.comp{1} = svargplvmAddDynamics(modelParent.comp{1}, globalOpt, optionsDyn);
22 |
23 |
24 | modelParent.vardist = modelParent.comp{1}.vardist;
25 | modelParent.dynamics = modelParent.comp{1}.dynamics;
26 | modelParent.comp{1} = rmfield(modelParent.comp{1}, 'vardist');
27 | modelParent.comp{1} = rmfield(modelParent.comp{1}, 'dynamics');
28 | modelParent.comp{1} = rmfield(modelParent.comp{1}, 'X');
29 | modelParent.comp{1} = rmfield(modelParent.comp{1}, 'y');
30 |
31 |
32 | model.layer{model.H} = modelParent;
33 |
34 | %params = hsvargplvmExtractParam(model);
35 | %model = hsvargplvmExpandParam(model, params);
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmGradient.m:
--------------------------------------------------------------------------------
1 | function g = hsvargplvmGradient(params, model)
2 |
3 | % VARGPLVMGRADIENT Variational GP-LVM gradient wrapper.
4 | % FORMAT
5 | % DESC is a wrapper function for the gradient of the negative log
6 | % likelihood of an variatioanl GP-LVM model with respect to the latent postions
7 | % and parameters.
8 | % ARG params : vector of parameters and latent postions where the
9 | % gradient is to be evaluated.
10 | % ARG model : the model structure into which the latent positions
11 | % and the parameters will be placed.
12 | % RETURN g : the gradient of the negative log likelihood with
13 | % respect to the latent positions and the parameters at the given
14 | % point.
15 | %
16 | % SEEALSO : vargplvmLogLikeGradients, vargplvmExpandParam
17 | %
18 | % COPYRIGHT : Michalis K. Titsias, 2009 - 2011
19 | %
20 | % COPYRIGHT : Neil D. Lawrence, 2006, 2005, 2010-2011
21 |
22 | % VARGPLVM
23 |
24 | model = modelExpandParam(model, params);
25 |
26 | g = - modelLogLikeGradients(model);
27 |
28 | % sum gradients of tied parameters, then assign corresponding summed gradients to each
29 | % group of tied parameters
30 | % if isfield( model, 'ties' )
31 | % g = g * model.T; % model.T == model.ties' * model.ties;
32 | % end
33 | % fprintf(1,'# G: %.13f\n',sum(abs(g))); %%% DEBUG
34 | %fprintf(1,'# G: %.13f\n', norm(g)); %%% DEBUG (close to convergence this should go -> 0)
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmPropagateField.m:
--------------------------------------------------------------------------------
1 | % HSVARGPLVMPROPAGATEFIELD Set the value for a (potentially not yet existing) field for every sub-model of the main hsvargplvm.
2 | % DESC Set the value for a (potentially not yet existing) field for every sub-model of the main hsvargplvm.
3 | % ARG model: the hsvargplvm model (containing the submodels) for which the
4 | % we have to visit every submodel and set the given value to the specified
5 | % field.
6 | % ARG fieldName: the name of the field to set (with fieldValue) to every
7 | % submodel.
8 | % ARG fieldValue: the value for the fieldName
9 | % ARG dynamics: if set to 1, then IN ADDITION to setting
10 | % model.comp{i}.fieldName = fieldValue
11 | % the algorithm will ALSO set:
12 | % model.comp{i}.dymamics.fieldName = fieldValue.
13 | % If dynamics is omitted, or if there is no field dynamics already,
14 | % the argument dynamics takes the default falue FALSE.
15 | % COPYRIGHT: Andreas C. Damianou, 2011
16 | % SEEALSO : svargplvmModelCreate
17 | %
18 | % SVARGPLVM
19 |
20 | function model = hsvargplvmPropagateField(model, fieldName, fieldValue, layers, dynamics)
21 |
22 | if nargin < 5 || ~isfield(model, 'dynamics') || isempty(model.dynamics)
23 | dynamics = 0;
24 | end
25 |
26 | if nargin < 4 || isempty(layers)
27 | layers = 1:model.H;
28 | end
29 |
30 | for h=layers
31 | for i=1:model.layer{h}.M
32 | model.layer{h}.comp{i}.(fieldName) = fieldValue;
33 | if dynamics
34 | model.comp{i}.layer{h}.dynamics.(fieldName) = fieldValue;
35 | end
36 | end
37 | end
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013, Andreas Damianou, Neil Lawrence
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 | * Redistributions of source code must retain the above copyright
7 | notice, this list of conditions and the following disclaimer.
8 | * Redistributions in binary form must reproduce the above copyright
9 | notice, this list of conditions and the following disclaimer in the
10 | documentation and/or other materials provided with the distribution.
11 | * Neither the name of the nor the
12 | names of its contributors may be used to endorse or promote products
13 | derived from this software without specific prior written permission.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY
19 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/deepGP/matlab/demToyDynamicsSplitDataset.m:
--------------------------------------------------------------------------------
1 | % If true, then the training/test set are created by biasing towards creating blocks
2 | % (of size biased towards a predefined number)
3 | if ~exist('splBlocks'), splBlocks = false; end
4 |
5 | % Create a test set
6 | if ~splBlocks
7 | indTr = randperm(Ntoy);
8 | indTr = sort(indTr(1:Ntr));
9 | indTs = setdiff(1:Ntoy, indTr);
10 | else
11 | %--- Split training and test sets
12 | mask = [];
13 | lastTrPts = 0; %The last lastTrPts will be from YTr necessarily if > 0
14 | r=1; % start with tr. set
15 | while length(mask) nor the
12 | names of its contributors may be used to endorse or promote products
13 | derived from this software without specific prior written permission.
14 |
15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY
19 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmSampleLayer.m:
--------------------------------------------------------------------------------
1 | % Sample points from the vardistr. of the layer "lInp" and find outputs in
2 | % layer "lOut" for the outputs "ind".
3 | function [X, mu, sigma] = hsvargplvmSampleLayer(model, lInp, lOut, ind, dim,X, startingPoint)
4 |
5 | if nargin <7 || isempty(startingPoint)
6 | % This point will be initially drawn. Then, we will sample and alter
7 | % only one if its dimensions.
8 | startingPoint = 1;
9 | end
10 |
11 | if nargin < 3 || isempty(lOut)
12 | lOut = 1;
13 | end
14 |
15 | if nargin < 2 || isempty(lInp)
16 | lInp = model.H;
17 | end
18 |
19 | % -1 means all
20 | if nargin > 3 && ~isempty(ind) && ind == -1
21 | ind = 1:model.layer{lOut}.M;
22 | elseif nargin < 4 || isempty(ind)
23 | ind = 1:model.layer{lOut}.M;
24 | end
25 |
26 | if nargin < 6 || isempty(X)
27 | Xorig = model.layer{lInp}.vardist.means;
28 | N = size(Xorig,1);
29 | xmin = min(Xorig(:,dim));
30 | xmax = max(Xorig(:,dim));
31 | df = xmax - xmin;
32 | xmin = xmin - 4*df/N; % also catch some points before xmin
33 | xmax = xmax + 4*df/N;
34 | x = linspace(xmin,xmax, 3*N); % this is the series of changes made in a specific dimension
35 | X = repmat(Xorig(startingPoint,:), length(x),1); % Just select some initial point
36 | X(:,dim) = x';
37 | end
38 |
39 | %fprintf('# Sampling from layer %d to layer %d for dimension %d...\n', lInp, lOut, dim)
40 |
41 |
42 | if nargout > 2
43 | [mu sigma] = hsvargplvmPosteriorMeanVar(model, X, [], lInp, lOut, ind);
44 | else
45 | mu = hsvargplvmPosteriorMeanVar(model, X, [], lInp, lOut, ind);
46 | end
47 |
48 |
--------------------------------------------------------------------------------
/deepGP/matlab/skelGetChannels.m:
--------------------------------------------------------------------------------
1 | function [channels, xyzDiffIndices, rotIndices] = skelGetChannels(Ytest)
2 |
3 | xyzDiffIndices = [];
4 | rotIndices = [];
5 |
6 | %left indices
7 | xyzInd = [2];
8 | xyzDiffInd = [1 3];
9 | rotInd = [4 6];
10 | rotDiffInd = [5];
11 | generalInd = [7:38 41:47 49:50 53:59 61:62];
12 | startInd = 1;
13 | endInd = length(generalInd);
14 | channels(:, generalInd) = 180*Ytest(:, startInd:endInd)/pi;
15 | startInd = endInd + 1;
16 | endInd = endInd + length(xyzDiffInd);
17 | channels(:, xyzDiffInd) = cumsum(Ytest(:, startInd:endInd), 1);
18 | startInd = endInd + 1;
19 | endInd = endInd + length(xyzInd);
20 | channels(:, xyzInd) = Ytest(:, startInd:endInd);
21 | %xyzDiffIndices = [xyzDiffIndices startInd:endInd]; %%%%%%%%%%%
22 | startInd = endInd + 1;
23 | endInd = endInd + length(xyzDiffInd);
24 | channels(:, xyzDiffInd) = cumsum(Ytest(:, startInd:endInd), 1);
25 | xyzDiffIndices = [xyzDiffIndices startInd:endInd];%%%%%%%%%
26 | startInd = endInd + 1;
27 | endInd = endInd + length(rotInd);
28 | channels(:, rotInd) = asin(Ytest(:, startInd:endInd))*180/pi;
29 | channels(:, rotInd(end)) = channels(:, rotInd(end))+270;
30 | rotIndices = [rotIndices startInd:endInd];
31 | startInd = endInd + 1;
32 | endInd = endInd + length(rotDiffInd);
33 | channels(:, rotDiffInd) = 0;%cumsum(asin(Ytest(:, startInd:endInd)), 1))*180/pi;
34 | xyzDiffIndices = [xyzDiffIndices startInd:endInd];%%%%%%%%%
35 |
36 | % skelPlayData(skel, channels, 1/25);
37 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmRetainedScales.m:
--------------------------------------------------------------------------------
1 | function [scales, scalesK] = hsvargplvmRetainedScales(model, thresh, displPlots, displ)
2 |
3 | if nargin < 4 || isempty(displ)
4 | displ = false;
5 | end
6 |
7 | if nargin < 3 || isempty(displPlots)
8 | displPlots = true;
9 | end
10 |
11 | if nargin < 2 || isempty(thresh)
12 | thresh = 0.01;
13 | end
14 |
15 |
16 | for h=1:model.H
17 | scalesK{h} = zeros(model.layer{h}.M, model.layer{h}.q);
18 | scalesAll{h} = zeros(model.layer{h}.M, model.layer{h}.q);
19 | scalesAllTmp{h} = svargplvmScales('get', model.layer{h});
20 |
21 | for m=1:model.layer{h}.M
22 | scales{h}{m} = vargplvmRetainedScales(model.layer{h}.comp{m},thresh);
23 | scalesK{h}(m,scales{h}{m}) = 1;
24 | scalesAll{h}(m,:) = scalesAllTmp{h}{m};
25 | scalesAll{h}(m,:) = scalesAll{h}(m,:) / max(scalesAll{h}(m,:)); %% Scale so that 1 is max
26 | end
27 | end
28 |
29 | if displPlots
30 | for h=1:model.H
31 | if model.layer{h}.M > 1
32 | figure
33 | imagesc(scalesAll{h}); title(['All scales for layer ' num2str(h)]); set(gca,'XGrid','on')
34 | figure
35 | imagesc(scalesK{h}); title(['Binarized scales for layer ' num2str(h)]); set(gca,'XGrid','on')
36 | end
37 | end
38 | end
39 |
40 | if displ
41 | for h=1:model.H
42 | fprintf('# Layer %d\n', h)
43 | fprintf(' q | Scales\n')
44 | fprintf('------------------\n')
45 | for q=1:model.layer{h}.M
46 | fprintf(' %d | %s \n',q,num2str(scales{h}{q}));
47 | end
48 | fprintf('\n');
49 | end
50 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmObjectiveGradient.m:
--------------------------------------------------------------------------------
1 | function [f, g] = hsvargplvmObjectiveGradient(params, model)
2 |
3 | % SVARGPLVMOBJECTIVEGRADIENT Wrapper function for shared VARGPLVM objective and gradient.
4 | % FORMAT
5 | % DESC returns the negative log likelihood of a Gaussian process
6 | % model given the model structure and a vector of parameters. This
7 | % allows the use of NETLAB minimisation functions to find the model
8 | % parameters.
9 | % ARG params : the parameters of the model for which the objective
10 | % will be evaluated.
11 | % ARG model : the model structure for which the objective will be
12 | % evaluated.
13 | % RETURN f : the negative log likelihood of the SVARGPLVM model.
14 | % RETURN g : the gradient of the negative log likelihood of the SVARGPLVM
15 | % model with respect to the parameters.
16 | %
17 | % SEEALSO : minimize, svargplvmModelCreate, svargplvmGradient, svargplvmLogLikelihood, svargplvmOptimise
18 | %
19 | % COPYRIGHT : Andreas C. Damianou, 2011
20 |
21 | % SVARGPLVM
22 |
23 | % Check how the optimiser has given the parameters
24 | if size(params, 1) > size(params, 2)
25 | % As a column vector ... transpose everything.
26 | transpose = true;
27 | model = hsvargplvmExpandParam(model, params');
28 | else
29 | transpose = false;
30 | model = hsvargplvmExpandParam(model, params);
31 | end
32 |
33 | f = - hsvargplvmLogLikelihood(model);
34 | % fprintf(1,'# F: %.13f\n',f); %%% DEBUG
35 | if nargout > 1
36 | g = - hsvargplvmLogLikeGradients(model);
37 | % fprintf(1,'# G: %.13f .\n',sum(abs(g))); %%% DEBUG
38 | end
39 | if transpose
40 | g = g';
41 | end
42 |
43 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmShowSkel2.m:
--------------------------------------------------------------------------------
1 | function hsvargplvmShowSkel2(model, visModality, visLayer)
2 |
3 | if nargin < 2
4 | visModality = 1;
5 | end
6 |
7 | if nargin < 3
8 | visLayer = 1;
9 | end
10 |
11 | baseDir = datasetsDirectory;
12 | dirSep = filesep;
13 |
14 | skel{1} = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20.asf']);
15 | [YA, skel{1}] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20_11.amc'], skel{1});
16 | seqInd = [50:4:113 114:155 156:4:size(YA, 1)];
17 | YA = YA(seqInd, :);
18 | % YA(:, [4:end]) = asind(sind(YA(:, [4:end])));
19 | skel{2} = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21.asf']);
20 | [YB, skel{2}] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21_11.amc'], skel{2});
21 | YB = YB(seqInd, :);
22 |
23 | Yall{1} = YA;
24 | Yall{2} = YB;
25 |
26 | %%
27 |
28 |
29 | figure
30 | gca;
31 | dataType = 'skel';
32 | % Now from the parent
33 | modelP = model;
34 | modelP.type = 'hsvargplvm';
35 | modelP.vis.index=visModality;
36 | modelP.vis.layer = visLayer; % The layer we are visualising FROM
37 |
38 | modelP.vardist = model.layer{modelP.vis.layer}.vardist;
39 | modelP.X = modelP.vardist.means;
40 | modelP.q = size(modelP.X,2);
41 |
42 | modelP.d = model.layer{model.H}.M;
43 | %YY = multvargplvmJoinY(model.layer{1});
44 | %Ynew = zeros(size(YY,1), size(YY,2)+length(vA));
45 | %Ynew(:, dms) = YY;
46 | modelP.y = Yall{visModality}; % Ytochannels(Ynew);
47 | %modelP.Ytochannels = true;
48 |
49 |
50 | lvmVisualiseGeneral(modelP, [], [dataType 'Visualise'], [dataType 'Modify'],false, skel{visModality});
51 | ylim([-20, 30]);
52 | zlim([0, 40]);
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmLogLikeGradients2.m:
--------------------------------------------------------------------------------
1 | for h=1:model.H
2 | gTheta = [];
3 | gInd = [];
4 | gIndThetaBeta = [];
5 | gVarmeans = [];
6 | gVarcovs = [];
7 | gDyn = [];
8 |
9 | gTheta_i = [];
10 | gInd_i = [];
11 | gVarmeans_i = 0;
12 | gVarcovs_i = 0;
13 | gDyn_i = [];
14 |
15 | for i=1:model.layer{h}.M
16 | % Derivatives of L_{hm} term for non-vardistr. params
17 | model.comp{i}.vardist = model.vardist;
18 |
19 | if h ~= model.H % Not parent
20 | model.comp{i}.onlyLikelihood = true;
21 | end
22 | % For not parent, gVarmeansAll_i and covs are []
23 | [gVarmeansLik_i gVarcovsLik_i gVarmeansAll_i gVarcovsAll_i gInd_i gTheta_i gBeta_i] = ...
24 | vargplvmLogLikelihoodParts(model.layer{h}.comp{i});
25 |
26 | gIndThetaBeta = [gIndThetaBeta gInd_i gTheta_i gBeta_i];
27 |
28 |
29 |
30 | if h == model.H
31 | % Parent: we have KL terms that need to be added
32 | % gVarmeans_
33 | %...
34 | else
35 | % Derivatives of L_{hm} term for vardistr. params (need to be added
36 | % since we only have one vardistr. which is common).
37 | gVarmeans_i = gVarmeans_i + gVarmeansLik;
38 | gVarcovs_i = gVarcovs_i + gVarcovsLik;
39 | end
40 | end
41 |
42 | if h ~= model.H
43 | % Derivatives of H_h term for vardistr. params (also need to be
44 | % added) - that's just the covars.
45 | %gVarcovsEntr_i = ... ;
46 | end
47 |
48 | % Now amend the derivs. of the vardistr. of the previous layer
49 | if h ~= 1
50 | % ...
51 | end
52 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmShowSkel.m:
--------------------------------------------------------------------------------
1 | function hsvargplvmShowSkel(model)
2 |
3 |
4 | baseDir = datasetsDirectory;
5 | dirSep = filesep;
6 |
7 | skel{1} = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20.asf']);
8 | [YA, skel{1}] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20_11.amc'], skel{1});
9 | seqInd = [50:4:113 114:155 156:4:size(YA, 1)];
10 | YA = YA(seqInd, :);
11 | % YA(:, [4:end]) = asind(sind(YA(:, [4:end])));
12 | skel{2} = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21.asf']);
13 | [YB, skel{2}] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21_11.amc'], skel{2});
14 | YB = YB(seqInd, :);
15 |
16 | Yall{1} = YA;
17 | Yall{2} = YB;
18 |
19 | %%
20 | m = 2;
21 | figure
22 | % First, sample from the intermediate layer
23 | model2 = model.layer{1}.comp{m};
24 | model2.vardist = model.layer{1}.vardist;
25 | model2.X = model2.vardist.means;
26 | %channelsA = skelGetChannels(Yall{m});
27 | dataType = 'skel';
28 | lvmVisualiseGeneral(model2, [], [dataType 'Visualise'], [dataType 'Modify'],false, skel{m});
29 | ylim([-18 18])
30 | %%
31 | figure
32 | m = 2;
33 | dataType = 'skel';
34 | % Now from the parent
35 | modelP = model;
36 | modelP.type = 'hsvargplvm';
37 | modelP.vardist = model.layer{2}.vardist;
38 | modelP.X = modelP.vardist.means;
39 | modelP.q = size(modelP.X,2);
40 |
41 | modelP.d = size(Yall{m},2);
42 | modelP.y = Yall{m};
43 | modelP.vis.index=m;
44 | modelP.vis.layer = 1;
45 | lvmVisualiseGeneral(modelP, [], [dataType 'Visualise'], [dataType 'Modify'],false, skel{m});
46 | ylim([-20, 30]);
47 | % USE e.g. ylim([-18 8]) to set the axis right if needed
48 | end
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmEmbedScript.m:
--------------------------------------------------------------------------------
1 | %
2 | % DESC
3 | %
4 | % COPYRIGHT: Andreas C. Damianou, 2015
5 | %
6 | % SEE ALSO:
7 | %
8 | % DEEPGP
9 |
10 | %% ------ CONFIGURING THE DEEP GP
11 | %--- Mandatory configurations
12 | if ~exist('Ytr', 'var'), error('You need to specify your outputs in Ytr{1}=...'); end
13 |
14 | %--- Optional configurations: Whatever configuration variable is not already set (ie does not exist
15 | % as a variable in the workspace) is set to a default value.
16 | if ~exist('experimentNo','var'), experimentNo = 404; end
17 | if ~exist('K','var'), K = 30; end
18 | if ~exist('Q','var'), Q = 6; end
19 | if ~exist('baseKern','var'), baseKern = 'rbfardjit'; end % {'rbfard2','white','bias'}; end
20 |
21 |
22 | hsvargplvm_init;
23 |
24 |
25 | %%
26 | options = hsvargplvmOptions(globalOpt);
27 | options.optimiser = 'scg2';
28 | initXOptions = hsvargplvmInitXOptions(Ytr, options, globalOpt);
29 |
30 |
31 | % Create a deepGP model, parametrized by its local options, global options
32 | % and options that say how to initialise the latent spaces X
33 | model = hsvargplvmModelCreate(Ytr, options, globalOpt, initXOptions);
34 | %!!!!!!!!!!!!!!!!!!!!!!!!-----------------------
35 | if exist('DEBUG_entropy','var') && DEBUG_entropy
36 | model.DEBUG_entropy = true;for itmp=1:model.H, model.layer{itmp}.DEBUG_entropy = true; end
37 | end
38 | params = hsvargplvmExtractParam(model); model = hsvargplvmExpandParam(model, params);
39 |
40 | %% Optimise deep GP model
41 | model.globalOpt = globalOpt;
42 | [model,modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
43 |
44 | % Uncomment if you decide to train for more iterations later...
45 | %modelOld = model;
46 | %model = hsvargplvmOptimiseModel(model, true, true, [], {0, [1000 1000 1000]});
47 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmAnalyseResults.m:
--------------------------------------------------------------------------------
1 | switch globalOpt.demoType
2 | case 'skelDecompose'
3 |
4 | % load connectivity matrix
5 | % Load the results and display dynamically.
6 | %lvmResultsDynamic(model.type, dataSetName, experimentNo, 'skel', connect)
7 | dataType = 'skel';
8 | skel = acclaimReadSkel('35.asf');
9 | [tmpchan, skel] = acclaimLoadChannels('35_01.amc', skel);
10 | %channels = demCmu35VargplvmLoadChannels(Y1,skel);
11 | %channels = skelGetChannels(Y1, skel);
12 | %skelPlayData(skel, channels, 1/30);
13 |
14 | %%%% TODO: Change model so that model.y contains all submodel's y's and
15 | %%%% model.d = model.numModels. Then call lvmVisualise with model and
16 | %%%% create (for lvmClassVisualise) a function hsvargplvmPosteriorMeanVar to compute like posteriorMeanVar for every sampled
17 | %%%% X for all submodels and join them together immediately before the call
18 | %%%% to skelGetChannels.
19 | model2 = model;
20 | %model2.y = multvargplvmJoinY(model);
21 | model2.y = skelGetChannels(multvargplvmJoinY(model));%, skel);
22 | model2.d = size(model2.y,2);
23 | model2.type = 'multvargplvm';
24 | lvmVisualiseGeneral(model2, lbls, [dataType 'Visualise'], [dataType 'Modify'],false, skel);
25 |
26 | %% Understand what each dimension is doing
27 | %{
28 | YY = multvargplvmJoinY(model);
29 | for i=1:size(YY,2)
30 | Ytemp = YY(1,:); Ytemp(i) = -10; close all;subplot(1,2,1), skelVisualise(skelGetChannels(Ytemp),skel); subplot(1,2,2); skelVisualise(skelGetChannels(YY(1,:)),skel);
31 | title(num2str(i))
32 | pause
33 | end
34 | %}
35 | end
36 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmDisplay.m:
--------------------------------------------------------------------------------
1 | % HSVARGPLVMDISPLAY Display as a text a deepGP model
2 | % COPYRIGHT: Andreas C. Damianou, 2014
3 | % SEEALSO: modelDisplay.m
4 |
5 | function hsvargplvmDisplay(model)
6 |
7 | fprintf('# Model type: %s\n', model.type)
8 | fprintf('# Mapping kernels:\n')
9 | for i = 1:length(model.layer)
10 | for j = 1:length(model.layer{i}.comp)
11 | fprintf(' Layer %d modality %d:\n', i,j);
12 | kernDisplay(model.layer{i}.comp{j}.kern,6);
13 | end
14 | end
15 | fprintf('# Number of inducing points:\n');
16 | for i = 1:length(model.layer)
17 | for j = 1:length(model.layer{i}.comp)
18 | if model.layer{i}.comp{j}.fixInducing
19 | tmp = ' (Tied to X)';
20 | else
21 | tmp = '';
22 | end
23 | fprintf(' Layer %d modality %d: %d%s\n', i,j, model.layer{i}.comp{j}.k,tmp);
24 | end
25 | end
26 | fprintf('# SNR:\n');
27 | SNRs=hsvargplvmShowSNR(model,1:model.H,0);
28 | for i = 1:length(model.layer)
29 | for j = 1:length(model.layer{i}.comp)
30 | if model.layer{i}.comp{j}.initVardist
31 | tmp = ' (Fixed)';
32 | else
33 | tmp = '';
34 | end
35 | fprintf(' Layer %d modality %d: %.5f%s\n', i,j, SNRs{i}(j), tmp);
36 | end
37 | end
38 |
39 | fprintf('# D >> N mode activated:\n');
40 | for i = 1:length(model.layer)
41 | for j = 1:length(model.layer{i}.comp)
42 | if model.layer{i}.comp{j}.DgtN
43 | tmp = 'Yes';
44 | else
45 | tmp = 'No';
46 | end
47 | fprintf(' Layer %d modality %d: %s\n', i,j, tmp);
48 | end
49 | end
50 |
51 | if isfield(model.layer{end}, 'dynamics') && ~isempty(model.layer{end}.dynamics)
52 | fprintf('# -- Dynamics --\n')
53 | constr = model.layer{end}.dynamics.constrainType{1};
54 | for ii = 2:length(model.layer{end}.dynamics.constrainType)
55 | constr = [constr ' ' model.layer{end}.dynamics.constrainType{ii}];
56 | end
57 | fprintf('# constrainType: %s\n', constr);
58 | fprintf('# Top layer kernel:\n')
59 | kernDisplay(model.layer{end}.dynamics.kern, 4);
60 | end
61 | fprintf('# (Approximate) log. marginal likelihood: %.3f\n',hsvargplvmLogLikelihood(model))
62 |
63 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmInitXOptions.m:
--------------------------------------------------------------------------------
1 | function [initXOptions, optionsAll] = hsvargplvmInitXOptions(Ytr, options, globalOpt)
2 |
3 | stackedOpt = globalOpt.stackedInitOpt;
4 |
5 | %--- Here we have the option of using Bayesian GPLVM or GPLVM for
6 | % initialising the latent spaces. If this is the case, train the
7 | % corresponding models
8 | optionsAll = hsvargplvmCreateOptions(Ytr, options, globalOpt);
9 | initXOptions = cell(1, options.H);
10 | for h=1:options.H
11 | if strcmp(optionsAll.initX{h}, 'vargplvm') | strcmp(optionsAll.initX{h}, 'fgplvm')
12 | initXOptions{h}{1} = optionsAll;
13 | % DOn't allow the D >> N trick for layers > 1
14 | if h~=1
15 | if isfield(initXOptions{h}{1}, 'enableDgtN')
16 | initXOptions{h}{1}.enableDgtN = false;
17 | end
18 | end
19 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
20 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
21 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
22 | initXOptions{h}{1}.initX = 'ppca';
23 | initXOptions{h}{1}.initSNR = 90;
24 | initXOptions{h}{1}.numActive = 50;
25 | initXOptions{h}{2} = 160;
26 | initXOptions{h}{3} = 30;
27 | if ~isempty(stackedOpt)
28 | if isfield(stackedOpt, 'stackedInitVardistIters') && ~isempty(stackedOpt.stackedInitVardistIters)
29 | initXOptions{h}{2} = stackedOpt.stackedInitVardistIters;
30 | end
31 | if isfield(stackedOpt, 'stackedInitIters') && ~isempty(stackedOpt.stackedInitIters)
32 | initXOptions{h}{3} = stackedOpt.stackedInitIters;
33 | end
34 | if isfield(stackedOpt, 'stackedInitSNR') && ~isempty(stackedOpt.stackedInitSNR)
35 | initXOptions{h}{1}.initSNR = stackedOpt.stackedInitSNR;
36 | end
37 | if isfield(stackedOpt, 'stackedInitK') && ~isempty(stackedOpt.stackedInitK)
38 | initXOptions{h}{1}.numActive = stackedInitK;
39 | end
40 | end
41 | elseif ~isempty(stackedOpt) && (iscell(stackedOpt) && ~isempty(stackedOpt{h}))
42 | initXOptions{h} = stackedOpt{h};
43 | else
44 | initXOptions{h} = {};
45 | end
46 | end
--------------------------------------------------------------------------------
/deepGP/matlab/vargplvmExtractParamNoVardist.m:
--------------------------------------------------------------------------------
1 | function [params, names] = vargplvmExtractParamNoVardist(model)
2 |
3 | % VARGPLVMEXTRACTPARAMNOVARDIST Extract a parameter vector from a variational GP-LVM model.
4 | % ignoring the variational distribution. See vargplvmExtractParam.
5 |
6 |
7 | if nargout > 1
8 | returnNames = true;
9 | else
10 | returnNames = false;
11 | end
12 | params = [];
13 | names = {};
14 |
15 |
16 |
17 | % Inducing inputs
18 | if ~model.fixInducing
19 | if ~isfield(model, 'learnInducing') || (isfield(model, 'learnInducing') && model.learnInducing)
20 | params = [params model.X_u(:)'];
21 | if returnNames
22 | for i = 1:size(model.X_u, 1)
23 | for j = 1:size(model.X_u, 2)
24 | X_uNames{i, j} = ['X_u(' num2str(i) ', ' num2str(j) ')'];
25 | end
26 | end
27 | names = {names{:}, X_uNames{:}};
28 | end
29 | end
30 | end
31 |
32 |
33 | % Kernel parameters
34 | if returnNames
35 | [kernParams, kernParamNames] = kernExtractParam(model.kern);
36 | for i = 1:length(kernParamNames)
37 | kernParamNames{i} = ['Kernel, ' kernParamNames{i}];
38 | end
39 | names = {names{:}, kernParamNames{:}};
40 | else
41 | kernParams = kernExtractParam(model.kern);
42 | end
43 | params = [params kernParams];
44 |
45 |
46 | % beta in the likelihood
47 | if model.optimiseBeta
48 |
49 | if ~isstruct(model.betaTransform)
50 | fhandle = str2func([model.betaTransform 'Transform']);
51 | betaParam = fhandle(model.beta, 'xtoa');
52 | else
53 | if isfield(model.betaTransform,'transformsettings') && ~isempty(model.betaTransform.transformsettings)
54 | fhandle = str2func([model.betaTransform.type 'Transform']);
55 | betaParam = fhandle(model.beta, 'xtoa', model.betaTransform.transformsettings);
56 | else
57 | error('vargplvmExtractParam: Invalid transform specified for beta.');
58 | end
59 | end
60 |
61 | params = [params betaParam(:)'];
62 |
63 | if returnNames
64 | for i = 1:length(betaParam)
65 | betaParamNames{i} = ['Beta ' num2str(i)];
66 | end
67 | names = {names{:}, betaParamNames{:}};
68 | end
69 | end
70 |
71 |
72 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmAddParamPrior.m:
--------------------------------------------------------------------------------
1 | function model = hsvargplvmAddParamPrior(model, h, m, paramName, priorName, varargin)
2 | % HSVARGPLVMADDPARAMPRIOR Add a prior on some of the parameters of the
3 | % model
4 | %
5 | % DESC The objective function of the model can be written (in the log space) as
6 | % F = F_likelihood + F_prior, where the F_likelihood term tries to fit the
7 | % data and the F_prior term includes our prior assumptions/biases. The
8 | % current function allows to incorporate prior distributions on some of the
9 | % parameters of the model. Check hsvargplvmTestPrior.m for a tutorial on
10 | % how to do that. The current function is a wrapper for vargplvmAddParamPrior.m
11 | %
12 | % ARG model: The deep GP (hsvargplvm) model for which to add the prior
13 | % ARG h: Which layer of the model is receiving the prior
14 | % ARG m: Which modality of the h-th layer to receive the prior (currently
15 | % only m=1)
16 | % ARG paramName: The name of the parameter to receive the prior. Check
17 | % vargplvmAddParamPrior to see the names; the names are same as returned
18 | % from [~,names]=hsvargplvmExtractParam(model), and a regexp is used to
19 | % match the given name.
20 | % ARG priorName: The name of the prior to use. Check the availability of
21 | % PriorLogProb.m functions to see which priors are implemented.
22 | % ARG varargin: Any arguments needed for the specific constructor of the
23 | % prior are going to be pased.
24 | %
25 | % SEEALSO: vargplvmAddParamPrior.m, hsvargplvmTestPrior.m
26 | %
27 | % COPYRIGHT: Andreas Damianou, 2015
28 | % DEEPGP
29 |
30 | if m > 1
31 | error('Not tested for m > 1 yet')
32 | end
33 |
34 |
35 | model.layer{h}.comp{m}.vardist = model.layer{h}.vardist;
36 | model.layer{h}.comp{m} = vargplvmAddParamPrior(model.layer{h}.comp{m}, paramName,priorName, varargin{:});
37 | model.layer{h}.comp{m} = rmfield(model.layer{h}.comp{m}, 'vardist');
38 |
39 | % If there are dynamics, the prior index needs fixing
40 | if isfield(model.layer{end}, 'dynamics')
41 | model.layer{h}.comp{m}.paramPriors{end}.index = model.layer{h}.comp{m}.paramPriors{end}.index + model.layer{end}.dynamics.kern.nParams;
42 | end
43 |
44 |
--------------------------------------------------------------------------------
/deepGP/matlab/vargpCovGrads.m:
--------------------------------------------------------------------------------
1 |
2 | function [gK_uu, gPsi0, gPsi1, gPsi2, g_Lambda, gBeta, tmpV] = vargpCovGrads(model)
3 |
4 | gPsi1 = model.beta * model.m * model.B';
5 | gPsi1 = gPsi1'; % because it is passed to "kernVardistPsi1Gradient" as gPsi1'...
6 |
7 | gPsi2 = (model.beta/2) * model.T1;
8 |
9 | gPsi0 = -0.5 * model.beta * model.d;
10 |
11 | gK_uu = 0.5 * (model.T1 - (model.beta * model.d) * model.invLmT * model.C * model.invLm);
12 |
13 | sigm = 1/model.beta; % beta^-1
14 |
15 | PLm = model.invLatT*model.P;
16 | tmpV = sum(sum(PLm.*PLm));
17 | gBeta = 0.5*(model.d*(model.TrC + (model.N-model.k)*sigm -model.Psi0) ...
18 | - model.TrYY + model.TrPP ...
19 | + (1/(model.beta^2)) * model.d * sum(sum(model.invLat.*model.invLat)) + sigm*tmpV);
20 |
21 | %%%%TEMP
22 | %{
23 | load TEMPbetaGradTrC;
24 | TEMPbetaGradTrC = [TEMPbetaGradTrC model.d*0.5*model.TrC];
25 | save 'TEMPbetaGradTrC.mat' 'TEMPbetaGradTrC';
26 |
27 | load TEMPbetaGradNksigm;
28 | TEMPbetaGradNksigm=[TEMPbetaGradNksigm model.d*0.5*(model.N-model.k)*sigm];
29 | save 'TEMPbetaGradNksigm.mat' 'TEMPbetaGradNksigm';
30 |
31 | load TEMPbetaGradPsi0;
32 | TEMPbetaGradPsi0=[TEMPbetaGradPsi0 (-0.5*model.d*model.Psi0)];
33 | save 'TEMPbetaGradPsi0.mat' 'TEMPbetaGradPsi0';
34 |
35 | load TEMPbetaGradTrPP;
36 | TEMPbetaGradTrPP=[TEMPbetaGradTrPP 0.5*model.TrPP];
37 | save 'TEMPbetaGradTrPP.mat' 'TEMPbetaGradTrPP';
38 |
39 | load TEMPbetaGradLat;
40 | TEMPbetaGradLat=[TEMPbetaGradLat (1/(model.beta^2)) * model.d * sum(sum(model.invLat.*model.invLat))*0.5];
41 | save 'TEMPbetaGradLat.mat' 'TEMPbetaGradLat';
42 |
43 | load TEMPbetaGradPlm;
44 | TEMPbetaGradPlm=[TEMPbetaGradPlm sigm*sum(sum(PLm.*PLm))*0.5];
45 | save 'TEMPbetaGradPlm.mat' 'TEMPbetaGradPlm';
46 | %}
47 | %%%%%
48 |
49 |
50 | %gBeta = 0.5*(model.d*(model.TrC + (model.N-model.k)*sigm -model.Psi0) ...
51 | % - model.TrYY + model.TrPP ...
52 | % + sigm * sum(sum(model.K_uu .* model.Tb)));
53 |
54 | if ~isstruct(model.betaTransform)
55 | fhandle = str2func([model.betaTransform 'Transform']);
56 | gBeta = gBeta*fhandle(model.beta, 'gradfact');
57 | else
58 | fhandle = str2func([model.betaTransform.type 'Transform']);
59 | gBeta = gBeta*fhandle(model.beta, 'gradfact', model.betaTransform.transformsettings);
60 | end
61 |
62 |
63 | g_Lambda = repmat(-0.5*model.beta*model.d, 1, model.N);
64 | end
65 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmCheckSNR.m:
--------------------------------------------------------------------------------
1 | function warnings = hsvargplvmCheckSNR(SNR, errLimit, warLimit, throwError)
2 | % HSVARGPLVMCHECKSNR Check Signal to Noise Ratio after
3 | % optimisation, to ensure that the trivial local minimum
4 | % of learning only noise is avoided.
5 | % DESC Check SNR of optimised model
6 | % FORMAT
7 | % ARG SNR: the SNR of optiomised model
8 | % ARG errLimit: Optional, the limit below which an error message
9 | % is printed
10 | % ARG warLimit: Optional, the limit below which a warning message
11 | % is printed
12 | % RETURN warnings as strings pairs (layer, modality) for these pairs that
13 | % the SNR is low enough to be considered a warning.
14 | %
15 | % COPYRIGHT: Andreas C. Damianou, 2013
16 | %
17 | % DEEPGP
18 |
19 | if nargin < 4 || isempty(throwError), throwError = true; end
20 | if nargin < 3 || isempty(warLimit), warLimit = 10; end
21 | if nargin < 2 || isempty(errLimit), errLimit = 2; end
22 | if nargin < 1, error('Not enough arguments given'); end
23 |
24 | errStr = sprintf(['\nThis means that a bad local minimum has been reached\n', ...
25 | 'where everything is explained by noise. Please try a different\n', ...
26 | 'initialisation and/or consult the manual.\n']);
27 | warStr = sprintf(['\nThis means that a bad local minimum has been reached\n', ...
28 | 'where everything is explained by noise. Consider trying a different\n', ...
29 | 'initialisation and/or consult the manual.\n']);
30 |
31 | errors = [];
32 | warnings = [];
33 | for i = 1:length(SNR)
34 | for j = 1:length(SNR{i})
35 | if ~isempty(SNR{i}(j)) && SNR{i}(j) <= errLimit
36 | errors = [errors '(' num2str(i) ',' num2str(j) ') '];
37 | fprintf(1, 'SNR: Layer %d, modality %d: %f', i, j, SNR{i}(j))
38 | end
39 | end
40 | end
41 |
42 | if ~isempty(errors)
43 | errMsg = 'Error! Low SNR in (layer/modality) pairs: ';
44 | if throwError
45 | errMsg = [errMsg errors];
46 | errMsg = [errMsg errStr];
47 | error('hsvagplvm:checkSNR:lowSNR',errMsg);
48 | end
49 | else
50 | for i = 1:length(SNR)
51 | for j = 1:length(SNR{i})
52 | if ~isempty(SNR{i}(j)) && SNR{i}(j) <= warLimit
53 | warnings = [warnings '(' num2str(i) ',' num2str(j) ') '];
54 | end
55 | end
56 | end
57 | end
58 |
59 | if ~isempty(warnings)
60 | warMsg = 'WARNING! Low SNR in (layer/modality) pairs: ';
61 | warMsg = [warMsg warnings];
62 | warMsg = [warMsg warStr];
63 | warning(warMsg);
64 | end
--------------------------------------------------------------------------------
/deepGP/matlab/demDigitsDemonstrationInit.m:
--------------------------------------------------------------------------------
1 | if ~exist('experimentNo'), experimentNo = 404; end
2 | if ~exist('initial_X'), initial_X = 'separately'; end
3 | if ~exist('baseKern'), baseKern = {'linard2','white','bias'}; end
4 | if ~exist('itNo'), itNo = 500; end
5 | if ~exist('initVardistIters'), initVardistIters = []; end
6 | if ~exist('multVargplvm'), multVargplvm = false; end
7 |
8 | % That's for the ToyData2 function:
9 | if ~exist('toyType'), toyType = 'fols'; end % Other options: 'gps'
10 | if ~exist('hierSignalStrength'), hierSignalStrength = 1; end
11 | if ~exist('noiseLevel'), noiseLevel = 0.05; end
12 | if ~exist('numHierDims'), numHierDims = 1; end
13 | if ~exist('numSharedDims'), numSharedDims = 5; end
14 | if ~exist('Dtoy'), Dtoy = 10; end
15 | if ~exist('Ntoy'), Ntoy = 100; end
16 |
17 | hsvargplvm_init;
18 |
19 | Y=lvmLoadData('usps');
20 |
21 | globalOpt.dataSetName = 'usps';
22 |
23 | switch dataMerge
24 | case 'modalities'
25 | YA = Y(1:100,:); % 0
26 | YB = Y(5001:5100,:); % 6
27 | Ytr{1} = YA;
28 | Ytr{2} = YB;
29 | case 'vercat'
30 | YA = Y(100:150,:); % 0
31 | YB = Y(5000:5050,:); % 6
32 | Ytr{1} = [YA; YB];
33 | case 'vercatBig'
34 | YA = Y(1:70,:); NA = size(YA,1);% 0
35 | YB = Y(5001:5070,:); NB = size(YB,1); % 6
36 | Ytr{1} = [YA; YB];
37 | lbls = zeros(size(Ytr{1},1),2);
38 | lbls(1:NA,1)=1;
39 | lbls(NA+1:end,2)=1;
40 | case 'vercat2'
41 | YA = Y(1:50,:); NA = size(YA,1);% 0
42 | YB = Y(5001:5050,:); NB = size(YB,1); % 6
43 | YC = Y(1601:1650,:); NC = size(YC,1); % ones
44 | Ytr{1} = [YA ; YB ; YC];
45 | lbls = zeros(size(Ytr{1},1),3);
46 | lbls(1:NA,1)=1;
47 | lbls(NA+1:NA+NB,2)=1;
48 | lbls(NA+NB+1:end,3)=1;
49 | globalOpt.dataSetName = 'usps3Class';
50 | case 'vercat3'
51 | YA = Y(1:40,:); NA = size(YA,1);% 0
52 | YB = Y(5001:5040,:); NB = size(YB,1); % 6
53 | YC = Y(1601:1640,:); NC = size(YC,1); % 1's
54 | YD = Y(3041:3080,:); ND = size(YD,1); % 3's
55 | Ytr{1} = [YA ; YB ; YC; YD];
56 | lbls = zeros(size(Ytr{1},1),4);
57 | lbls(1:NA,1)=1;
58 | lbls(NA+1:NA+NB,2)=1;
59 | lbls(NA+NB+1:NA+NB+NC,3)=1;
60 | lbls(NA+NB+NC+1:end,4)=1;
61 | globalOpt.dataSetName = 'usps4Class';
62 | end
63 |
64 | model = hsvargplvmRestorePrunedModel(model, Ytr);
--------------------------------------------------------------------------------
/deepGP/matlab/vargplvmPosteriorMeanVarHier.m:
--------------------------------------------------------------------------------
1 | % The same as vargplvmPosteriorMeanVar, but a small change in the
2 | % calculations involving model.m, because in the indermediate layers
3 | % model.m is replaced by the expectation varmu*varmu'+Sn wrt the X of the
4 | % bottom layer.
5 | % SEEALSO: hsvargplvmPosteriorMeanVar
6 | function [mu, varsigma] = vargplvmPosteriorMeanVarHier(model, X, varX)
7 |
8 |
9 |
10 | if nargin < 3
11 | vardistX.covars = repmat(0.0, size(X, 1), size(X, 2));%zeros(size(X, 1), size(X, 2));
12 | else
13 | vardistX.covars = varX;
14 | end
15 | vardistX.latentDimension = size(X, 2);
16 | vardistX.numData = size(X, 1);
17 | %model.vardist.covars = 0*model.vardist.covars;
18 | vardistX.means = X;
19 | %model = vargplvmUpdateStats(model, model.X_u);
20 |
21 |
22 | Ainv = model.P1' * model.P1; % size: NxN
23 |
24 | if ~isfield(model,'alpha')
25 | if isfield(model, 'mOrig')
26 | model.alpha = Ainv*model.Psi1'*model.mOrig; % size: 1xD
27 | else
28 | model.alpha = Ainv*model.Psi1'*model.m; % size: 1xD
29 | end
30 | end
31 | Psi1_star = kernVardistPsi1Compute(model.kern, vardistX, model.X_u);
32 |
33 | % mean prediction
34 | mu = Psi1_star*model.alpha; % size: 1xD
35 |
36 | if nargout > 1
37 | %
38 | % precomputations
39 | vard = vardistCreate(zeros(1,model.q), model.q, 'gaussian');
40 | Kinvk = (model.invK_uu - (1/model.beta)*Ainv);
41 | %
42 | for i=1:size(vardistX.means,1)
43 | %
44 | vard.means = vardistX.means(i,:);
45 | vard.covars = vardistX.covars(i,:);
46 | % compute psi0 term
47 | Psi0_star = kernVardistPsi0Compute(model.kern, vard);
48 | % compute psi2 term
49 | Psi2_star = kernVardistPsi2Compute(model.kern, vard, model.X_u);
50 |
51 | vars = Psi0_star - sum(sum(Kinvk.*Psi2_star));
52 |
53 | for j=1:model.d
54 | %[model.alpha(:,j)'*(Psi2_star*model.alpha(:,j)), mu(i,j)^2]
55 | varsigma(i,j) = model.alpha(:,j)'*(Psi2_star*model.alpha(:,j)) - mu(i,j)^2;
56 | end
57 | varsigma(i,:) = varsigma(i,:) + vars;
58 | %
59 | end
60 | %
61 | if isfield(model, 'beta')
62 | varsigma = varsigma + (1/model.beta);
63 | end
64 | %
65 | end
66 |
67 | % Rescale the mean
68 | mu = mu.*repmat(model.scale, size(vardistX.means,1), 1);
69 |
70 | % Add the bias back in
71 | mu = mu + repmat(model.bias, size(vardistX.means,1), 1);
72 |
73 | % rescale the variances
74 | if nargout > 1
75 | varsigma = varsigma.*repmat(model.scale.*model.scale, size(vardistX.means,1), 1);
76 | end
77 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmOptions.m:
--------------------------------------------------------------------------------
1 |
2 | % See also: hsvargplvm_init
3 |
4 | function [options, optionsDyn] = hsvargplvmOptions(globalOpt, timeStampsTraining, labelsTrain)
5 |
6 | if nargin < 2
7 | timeStampsTraining = [];
8 | end
9 |
10 | if nargin < 3
11 | labelsTrain = [];
12 | end
13 |
14 | %-- One options structure where there are some parts shared for all
15 | % models/layers and some parts specific for a few layers / submodels.
16 |
17 |
18 | options = vargplvmOptions('dtcvar');
19 |
20 | % Taken from globalOpt
21 | options.H = globalOpt.H;
22 | options.baseKern = globalOpt.baseKern;
23 | options.Q = globalOpt.Q;
24 | options.K = globalOpt.K;
25 | options.enableDgtN = globalOpt.DgtN;
26 | options.initial_X = globalOpt.initial_X;
27 | options.initX = globalOpt.initX;
28 | options.multOutput = globalOpt.multOutput;
29 | if options.multOutput > 2
30 | warning('Multoutput > 2 has wrong derivatives!!')
31 | end
32 | %
33 | options.optimiser = 'scg2';
34 |
35 |
36 |
37 | % !!!!! Be careful to use the same type of scaling and bias for all models!!!
38 | % scale = std(Ytr);
39 | % scale(find(scale==0)) = 1;
40 | %options.scaleVal = mean(std(Ytr));
41 | % options.scaleVal = sqrt(var(Ytr{i}(:))); %%% ??
42 | options.scale2var1 = globalOpt.scale2var1;
43 |
44 | options.fixInducing = globalOpt.fixInducing;
45 |
46 | %----- Parent prior (we call priors "dynamics", but it can actually be some
47 | % other type of prior, eg labels etc.).
48 | % The relevant fields of globalOpt are coming from the svargplvm_init
49 | % (called within hsvargplvm_init).
50 | if isempty(globalOpt.dynamicsConstrainType) || nargout < 2
51 | optionsDyn = [];
52 | else
53 | if ~isempty(labelsTrain)
54 | optionsDyn.labelsTrain = labelsTrain;
55 | end
56 | % This does not mean it needs time inputs, it's just saying that it'll
57 | % use the kernel types and reparametrization used in VGPDS (regressive
58 | % "dynamics" etc).
59 | optionsDyn.type = 'vargpTime';
60 | optionsDyn.inverseWidth=30;
61 | optionsDyn.vardistCovars = globalOpt.vardistCovarsMult;
62 | if iscell(globalOpt.initX)
63 | optionsDyn.initX = globalOpt.initX{end};
64 | else
65 | optionsDyn.initX = globalOpt.initX;
66 | end
67 | optionsDyn.constrainType = globalOpt.dynamicsConstrainType;
68 | if ~isempty(timeStampsTraining)
69 | optionsDyn.t = timeStampsTraining;
70 | end
71 | optionsDyn.kern = globalOpt.dynamicKern;
72 | optionsDyn.initCovarMedian = globalOpt.dynamicsInitCovarMedian;
73 | optionsDyn.initCovarMedianLowest = globalOpt.initCovarMedianLowest;
74 | optionsDyn.initCovarMedianHighest = globalOpt.initCovarMedianHighest;
75 | end
76 |
77 |
78 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmStaticImageVisualise.m:
--------------------------------------------------------------------------------
1 | %{
2 | dataType = 'image';
3 | varargs{1} = [16 16];
4 | varargs{2} = 1;
5 | varargs{3} = 1;
6 | varargs{4} = 1;
7 | hsvargplvmStaticImageVisualise(mm2, Y, [dataType 'Visualise'], 0.03, varargs{:});
8 |
9 | varargin = varargs;
10 | visualiseFunction = 'imageVisualise';
11 | axesWidth = 0.03;
12 | Y = Ytr{1};
13 | %}
14 |
15 |
16 |
17 | function hsvargplvmStaticImageVisualise(mm2, Y, remPoints, visualiseFunction, axesWidth, varargin)
18 |
19 | % GPLVMSTATICIMAGEVISUALISE Generate a scatter plot of the images without overlap.
20 |
21 | % GPLVM
22 |
23 | % set random seeds
24 | randn('seed', 1e5)
25 | rand('seed', 1e5)
26 |
27 | colordef white
28 |
29 |
30 | if isempty(remPoints)
31 | remPoints = true;
32 | end
33 |
34 | % Turn Y into grayscale
35 | try
36 | [plotAxes, data] = gplvmScatterPlot(mm2, []);
37 | catch e
38 | [plotAxes, data] = lvmScatterPlotNoVar2(mm2, []);
39 | end
40 |
41 | xLim = get(plotAxes, 'xLim');
42 | yLim = get(plotAxes, 'yLim');
43 | posit = get(plotAxes, 'position');
44 |
45 | widthVal = axesWidth*(xLim(2) - xLim(1))/posit(3);
46 | heightVal = axesWidth*(yLim(2) - yLim(1))/posit(4);
47 | numData = size(mm2.X, 1);
48 |
49 | visitOrder = randperm(numData);
50 | initVisitOrder = visitOrder;
51 |
52 | % Plot the images
53 | while ~isempty(visitOrder)
54 | i = visitOrder(1);
55 | if mm2.X(i, 1) > xLim(1) & mm2.X(i, 1) < xLim(2) ...
56 | & mm2.X(i, 2) > yLim(1) & mm2.X(i, 2) < yLim(2)
57 | point = invGetNormAxesPoint(mm2.X(i, :), plotAxes);
58 | x = point(1);
59 | y = point(2);
60 |
61 | digitAxes(i) = axes('position', ...
62 | [x - axesWidth/2 ...
63 | y - axesWidth/2 ...
64 | axesWidth ...
65 | axesWidth]);
66 | handle = feval(visualiseFunction, Y(i, :), varargin{:});
67 | colormap gray
68 | axis image
69 | axis off
70 |
71 | widthLimScale = 2.6; % ORIG: 2
72 | heightLimScale = 1.5; % Orig: 1
73 | if remPoints
74 | removePoints = find(abs(mm2.X(visitOrder, 1) - mm2.X(i, 1)) < widthVal/widthLimScale ...
75 | & abs(mm2.X(visitOrder, 2) - mm2.X(i, 2)) < heightVal/heightLimScale);
76 | visitOrder(removePoints) = [];
77 | else
78 | removePoints = find(abs(mm2.X(visitOrder, 1) - mm2.X(i, 1)) < widthVal/50 ...
79 | & abs(mm2.X(visitOrder, 2) - mm2.X(i, 2)) < heightVal/50);
80 | visitOrder(removePoints) = [];
81 | end
82 | else
83 | visitOrder(1) = [];
84 | end
85 | end
86 | set(plotAxes, 'xlim', xLim);
87 | set(plotAxes, 'ylim', yLim);
88 | set(data, 'visible', 'off');
89 | %ticks = [-4 -2 0 2 4];
90 | %set(plotAxes, 'xtick', ticks)
91 | %set(plotAxes, 'ytick', ticks)
92 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmCreateOptions.m:
--------------------------------------------------------------------------------
1 |
2 | % Some options are given in globalOpt as cell arrays, some are given as
3 | % single values, meaning that they be propagated for each layer. This
4 | % function builds a complete struct with options. After this function is
5 | % run, the field options will have a 2-D cell array:
6 | % options.F{h}{m}
7 | % for every field F which changes according to the layer and h indexes
8 | % layers and m indexes subsets of latent spaces (has to be there, even if
9 | % at layer h there is only a single space, we would have a single cell, ie
10 | % F{h}{1}
11 | function options = hsvargplvmCreateOptions(Ytr, options, globalOpt)
12 |
13 | % Q
14 | if ~iscell(options.Q)
15 | Q = options.Q;
16 | options = rmfield(options, 'Q');
17 |
18 | for h=1:options.H
19 | options.Q{h} = Q;
20 | end
21 | end
22 |
23 | % initX
24 | if ~iscell(options.initX)
25 | initX = options.initX;
26 |
27 | options = rmfield(options, 'initX');
28 | for h=1:options.H
29 | options.initX{h} = initX;
30 | end
31 | end
32 |
33 |
34 | % K and M
35 | if ~iscell(Ytr)
36 | Ytr = {Ytr};
37 | end
38 |
39 | options.Kold = options.K;
40 | options = rmfield(options, 'K');
41 |
42 | M{1} = length(Ytr);
43 | N = size(Ytr{1},1);
44 | for h=1:options.H
45 | for m = 1:M{h}
46 | if ~iscell(options.Kold)
47 | options.K{h}{m} = options.Kold;
48 | else
49 | options.K{h}{m} = options.Kold{h}{m}; %%% new
50 | end
51 | if options.K{h}{m} == -1
52 | options.K{h}{m} = N;
53 | end
54 | end
55 | if h ~= options.H
56 | if options.multOutput > h + 1
57 | M{h+1} = options.Q{H};
58 | else
59 | M{h+1} = 1;
60 | end
61 | end
62 | end
63 | options.M = M;
64 | options = rmfield(options, 'Kold');
65 |
66 |
67 |
68 | % kern, SNR
69 | %if isfield(options, 'kern')
70 | options = rmfield(options, 'kern');
71 | %end
72 | for h=1:options.H
73 | for m=1:options.M{h}
74 | % Kern
75 | if iscell(options.baseKern) && iscell(options.baseKern{1})
76 | options.kern{h}{m} = globalOpt.baseKern{h}{m}; %{'rbfard2', 'bias', 'white'};
77 | else
78 | options.kern{h}{m} = globalOpt.baseKern;
79 | end
80 |
81 | if isfield(globalOpt, 'inputScales') && ~isempty(globalOpt.inputScales)
82 | options.inpScales{h} = globalOpt.inputScales;
83 | end
84 |
85 |
86 |
87 | %SNR
88 | if iscell(globalOpt.initSNR)
89 | options.initSNR{h} = globalOpt.initSNR{h};
90 | else
91 | options.initSNR{h} = globalOpt.initSNR;
92 | end
93 | end
94 | end
95 | %options.baseKern = options.kern; % Unecessary...
96 |
97 |
98 |
--------------------------------------------------------------------------------
/deepGP/matlab/demToyDynamicsPredictions.m:
--------------------------------------------------------------------------------
1 | if ~(exist('runDeepGP') && ~runDeepGP)
2 | [Testmeans Testcovars] = vargplvmPredictPoint(model.layer{end}.dynamics, Xstar);
3 | %[mu, varsigma]
4 | %mu = hsvargplvmPosteriorMeanVar(model, Testmeans2, Testcovars2, model.H, 1, -1);
5 | [mu, varsigma] = hsvargplvmPosteriorMeanVarSimple(model, Testmeans, Testcovars);
6 |
7 | [TestmeansIn TestcovarsIn] = vargplvmPredictPoint(modelInitVardist.layer{end}.dynamics, Xstar);
8 | [muIn, varsigmaIn] = hsvargplvmPosteriorMeanVarSimple(model, TestmeansIn, TestcovarsIn);
9 |
10 | errorDeepGP = sum(mean(abs(mu-Yts{1}),1));
11 | errorDeepGPNoCovars = sum(mean(abs(hsvargplvmPosteriorMeanVarSimple(model, Testmeans)-Yts{1}),1));
12 | errorDeepGPIn = sum(mean(abs(muIn-Yts{1}), 1));
13 |
14 | %{
15 | muAll = 0;
16 | for i=1:140
17 | [muCur varsigmaCur] = hsvargplvmPosteriorMeanVarSimple(model, Testmeans);%, Testcovars);
18 | muAll = muAll + gaussianSample(muCur, varsigmaCur) ;
19 | end
20 | muAll = muAll ./ 140;
21 | sum(mean(abs(muAll-Yts{1}),1))
22 | %}
23 | [TestmeansTr TestcovarsTr] = vargplvmPredictPoint(model.layer{end}.dynamics, inpX);
24 | errorRecDeepGP = sum(mean(abs(hsvargplvmPosteriorMeanVarSimple(model, TestmeansTr, TestcovarsTr)-Ytr{1}),1));
25 | errorRecDeepGPNoCovars = sum(mean(abs(hsvargplvmPosteriorMeanVarSimple(model, TestmeansTr)-Ytr{1}),1));
26 | end
27 |
28 | errorMean = sum(mean(abs(repmat(mean(Ytr{1}),size(Yts{1},1),1) - Yts{1}),1));
29 |
30 | for dd=1:size(Ytr{1},2)
31 | [p, ErrorEst] = polyfit(inpX,Ytr{1}(:,dd),2);
32 | yLinReg(:,dd)=polyval(p,Xstar);
33 | end
34 | errorLinReg = sum(mean(abs(yLinReg - Yts{1}),1));
35 |
36 |
37 |
38 | fprintf('\n\n#### ERRORS:\n')
39 | try
40 | fprintf('# Error GP pred : %.4f\n', errorGP);
41 | fprintf('# Error GPfitc pred : %.4f\n', errorGPfitc);
42 | end
43 | try
44 | fprintf('# Error DeepGP pred : %.4f / %.4f (with/without covars)\n', errorDeepGP, errorDeepGPNoCovars);
45 | fprintf('# Error DeepGPInitPred : %.4f\n',errorDeepGPIn);
46 | end
47 | if runVGPDS
48 | fprintf('# Error VGPDS pred : %.4f\n', errorVGPDS);
49 | fprintf('# Error VGPDSInVpred : %.4f\n', errorVGPDSIn);
50 | end
51 | fprintf('\n')
52 | fprintf('# Error Mean : %.4f\n', errorMean);
53 | fprintf('# Error LinReg : %.4f\n', errorLinReg);
54 | try, fprintf('# Error GP rec : %.4f\n', errorRecGP);end
55 | try, fprintf('# Error DeepGP rec : %.4f / %.4f (with/without covars)\n', errorRecDeepGP, errorRecDeepGPNoCovars); end
56 | if runVGPDS
57 | fprintf('# Error VGPDS rec : %.4f\n', errorRecVGPDS);
58 | end
59 | %%
60 | %{
61 |
62 | close
63 | for d=1:size(Yts{1},2)
64 | plot(Yts{1}(:,d), '.-');
65 | hold on
66 | plot(muGP(:,d), 'x:g');
67 | plot(mu(:,d), 'o--r');
68 | legend('orig', 'gp','gplvm');
69 | pause
70 | hold off
71 | end
72 |
73 | %}
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmLogLikelihood.m:
--------------------------------------------------------------------------------
1 | function ll = hsvargplvmLogLikelihood(model)
2 |
3 | F_leaves = hsvargplvmLogLikelihoodLeaves(model.layer{1});
4 | F_nodes = hsvargplvmLogLikelihoodNode(model);
5 | F_entropies = hsvargplvmLogLikelihoodEntropies(model);
6 | % This refers to the KL quantity of the top node. The likelihood part is
7 | % computed in hsvargplvmLogLikelihoodNode.
8 | F_parent = hsvargplvmLogLikelihoodParent(model.layer{model.H});
9 |
10 | ll = F_leaves + F_nodes + F_entropies + F_parent;
11 |
12 | %--- NEW!!! TEST!
13 | % %{
14 | for h=1:model.H
15 | % If there's a prior on some parameter, add the (minus) corresponding
16 | % likelihood.
17 | for m=1:model.layer{h}.M
18 | ll = ll + vargplvmParamPriorLogProb(model.layer{h}.comp{m});
19 | end
20 | end
21 | % %}
22 | %---
23 |
24 | end
25 |
26 | % The ln p(Y|X) terms
27 | function F_leaves = hsvargplvmLogLikelihoodLeaves(modelLeaves)
28 |
29 | F_leaves = 0;
30 | for m=1:modelLeaves.M
31 | modelLeaves.comp{m}.onlyLikelihood = true;
32 | F_leaves = F_leaves + vargplvmLogLikelihood(modelLeaves.comp{m});
33 | end
34 | end
35 |
36 | % The _{q(X_{h-1}} nodes
37 | function F_nodes = hsvargplvmLogLikelihoodNode(model)
38 | F_nodes = 0;
39 | for h=2:model.H
40 | % It's just like the leaves computation, the only difference is the
41 | % trace(Y*Y') term which now is replaced by an expectation w.r.t the
42 | % latent space of the previous layer. However, this replacement is done
43 | % in hsvargplvmUpdateStats and we dont have to worry here about it
44 | F_nodes = F_nodes + hsvargplvmLogLikelihoodLeaves(model.layer{h});
45 | end
46 |
47 | end
48 |
49 | % The H_{q(X_h)} nodes, h ~= H
50 | function F_entropies = hsvargplvmLogLikelihoodEntropies(model)
51 | F_entropies = 0;
52 | for h=1:model.H-1
53 | vardist = model.layer{h}.vardist;
54 | F_entropies = F_entropies + 0.5*(vardist.numData*vardist.latentDimension* ...
55 | (log(2*pi) + 1) + sum(sum(log(vardist.covars))));
56 | end
57 | if isfield(model, 'DEBUG_entropy') && model.DEBUG_entropy
58 | F_entropies = - F_entropies;
59 | end
60 |
61 | end
62 |
63 | % The -KL[q(X_H) || p(X_H)]
64 | function F_parent = hsvargplvmLogLikelihoodParent(modelParent)
65 | %if modelParent.M > 1
66 | % warning('Not implemented multiple models in parent node yet')
67 | %end
68 | % Copied from vargplvmLogLikelihood:
69 | if isfield(modelParent, 'dynamics') & ~isempty(modelParent.dynamics)
70 | % A dynamics model is being used.
71 | F_parent = modelVarPriorBound(modelParent);
72 | F_parent = F_parent + 0.5*modelParent.q*modelParent.N; %%% The constant term!!
73 | else
74 | varmeans = sum(sum(modelParent.vardist.means.*modelParent.vardist.means));
75 | varcovs = sum(sum(modelParent.vardist.covars - log(modelParent.vardist.covars)));
76 | F_parent = -0.5*(varmeans + varcovs) + 0.5*modelParent.q*modelParent.N;
77 | end
78 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmShowScales.m:
--------------------------------------------------------------------------------
1 | function scales = hsvargplvmShowScales(model, displ, varargin)
2 | if nargin < 2 || isempty(displ)
3 | displ = true;
4 | end
5 |
6 | % Layers to visualise
7 | layers = model.H:-1:1;
8 |
9 | % Do not show individual scales in mult-output
10 | skipComp = false;
11 |
12 | displayAsMatrix = false;
13 |
14 | if nargin > 2
15 | if ~isempty(varargin{1}), layers = varargin{1}; end
16 | if length(varargin) > 1 && ~isempty(varargin{2})
17 | skipComp = varargin{2};
18 | end
19 | if length(varargin) > 2 && ~isempty(varargin{3})
20 | displayAsMatrix = varargin{3};
21 | end
22 | end
23 |
24 | %{
25 | if ~model.multOutput
26 | for h=1:model.H
27 | if displ
28 | subplot(model.H,1,h)
29 | end
30 | vargplvmShowScales(model.layer{h}.comp{1},displ); title(num2str(h));
31 | end
32 | return
33 | end
34 | %}
35 |
36 | for hCount=1:length(layers)
37 | h = layers(hCount);
38 | scalesAll{h} = zeros(1, model.layer{h}.q);
39 | if model.layer{h}.M > 10 && displ && ~displayAsMatrix
40 | for i=1:model.layer{h}.M
41 | sc = vargplvmShowScales(model.layer{h}.comp{i}, ~skipComp);
42 | scalesAll{h} = scalesAll{h} + sc;
43 | if ~skipComp
44 | title(['Scales for layer ' num2str(h) ', model ' num2str(i)])
45 | pause
46 | end
47 | end
48 | scalesAll{h} = scalesAll{h} ./ max(scalesAll{h});
49 | else
50 | if displ && ~displayAsMatrix
51 | if ~model.multOutput
52 | subplot(model.H,1,hCount)
53 | else
54 | figure
55 | end
56 | end
57 | scales{h} = svargplvmShowScales(model.layer{h}, (displ && ~displayAsMatrix));
58 | if displ && ~displayAsMatrix
59 | title(['Layer ' num2str(h)]);
60 | end
61 | if model.layer{h}.M < 2
62 | scalesAll{h} = scales{h}{1};
63 | scalesAll{h} = scalesAll{h} ./ max(scalesAll{h});
64 | end
65 | end
66 |
67 | if model.layer{h}.M > 10 && displ && ~displayAsMatrix
68 | bar(scalesAll{h}); title(['Normalised sum of scales for layer ' num2str(h)])
69 | end
70 | end
71 |
72 | if displayAsMatrix
73 | maxQ = length(scalesAll{1});
74 | for h = 2:model.H
75 | if length(scalesAll{h}) > maxQ
76 | maxQ = length(scalesAll{h});
77 | end
78 | end
79 |
80 | scalesAllMat = zeros(model.H, maxQ);
81 | for hh = 1:model.H
82 | %h = hh; % This will put layer 1 on top
83 | h = model.H - hh +1; % This will put layer H on top
84 | for q = 1:maxQ
85 | if q <= length(scalesAll{hh})
86 | scalesAllMat(h,q) = scalesAll{hh}(q);
87 | else
88 | scalesAllMat(h,q) = NaN;
89 | end
90 | end
91 | end
92 | h=imagesc(scalesAllMat);
93 | set(h,'alphadata',~isnan(scalesAllMat))
94 | colorbar
95 | end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmRegressionInitX.m:
--------------------------------------------------------------------------------
1 | function [globalOpt, options, optionsDyn, initXOptions] = ...
2 | hsvargplvmRegressionInitX(globalOpt, options, optionsDyn, inpX, Ytr, stackedOpt)
3 |
4 | if nargin < 6 || isempty(stackedOpt)
5 | stackedOpt = [];
6 | end
7 |
8 | % ---- Potential special initialisations for X -----
9 | if ~iscell(globalOpt.initX) && strcmp(globalOpt.initX, 'inputs')
10 | options = rmfield(options, 'initX');
11 | for i=1:options.H
12 | options.initX{i} = inpX;
13 | end
14 | optionsDyn.initX = inpX;
15 | globalOpt.initX = options.initX;
16 | end
17 |
18 | % Initialise half of the latent spaces with inputs, half with PCA on outputs
19 | if ~iscell(globalOpt.initX) && strcmp(globalOpt.initX, 'inputsOutputs')
20 | options = rmfield(options, 'initX');
21 | if iscell(globalOpt.Q)
22 | oldQ = globalOpt.Q{end};
23 | else
24 | oldQ = globalOpt.Q;
25 | end
26 | for i=options.H:-1:floor(options.H/2)+1
27 | options.initX{i} = inpX;
28 | Q{i} = size(inpX,2);
29 | end
30 | optionsDyn.initX = inpX;
31 |
32 | YtrScaled = scaleData(Ytr{1}, options.scale2var1);
33 | Xpca = ppcaEmbed(YtrScaled, oldQ);
34 | for i=1:floor(options.H/2)
35 | options.initX{i} = Xpca;
36 | Q{i} = oldQ;
37 | end
38 | options.Q = Q;
39 | globalOpt.Q = Q;
40 | globalOpt.initX = options.initX;
41 | end
42 |
43 |
44 | % Just rewrite all options into a struct of cells
45 | optionsAll = hsvargplvmCreateOptions(Ytr, options, globalOpt);
46 | % Don't mind the following for loop... it just gives the extra possibility
47 | % of initialising the latent space with Bayesian GPLVM or GPLVM (see
48 | % hsvargplvm_init on how to activate this).
49 | initXOptions = cell(1, options.H);
50 | for h=1:options.H
51 | if strcmp(optionsAll.initX, 'vargplvm') | strcmp(optionsAll.initX, 'fgplvm')
52 | initXOptions{h}{1} = optionsAll;
53 | % DOn't allow the D >> N trick for layers > 1
54 | if h~=1
55 | if isfield(initXOptions{h}{1}, 'enableDgtN')
56 | initXOptions{h}{1}.enableDgtN = false;
57 | end
58 | end
59 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
60 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
61 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
62 | initXOptions{h}{1}.initX = 'ppca';
63 | initXOptions{h}{1}.initSNR = 90;
64 | initXOptions{h}{1}.numActive = 50;
65 | initXOptions{h}{2} = 160;
66 | initXOptions{h}{3} = 30;
67 | if isfield(stackedOpt, 'stackedInitVardistIters'), initXOptions{h}{2} = stackedOpt.stackedInitVardistIters; end
68 | if isfield(stackedOpt, 'stackedInitIters'), initXOptions{h}{3} = stackedOpt.stackedInitIters; end
69 | if isfield(stackedOpt, 'stackedInitSNR'), initXOptions{h}{1}.initSNR = stackedOpt.stackedInitSNR; end
70 | if isfield(stackedOpt, 'stackedInitK'), initXOptions{h}{1}.numActive = stackedOpt.stackedInitK; end
71 | else
72 | initXOptions{h} = {};
73 | end
74 | end
--------------------------------------------------------------------------------
/deepGP/matlab/demHighFiveDemonstration.m:
--------------------------------------------------------------------------------
1 | clear
2 |
3 | %--- Load data and model ------
4 | dataSetName = 'highFive';
5 | hsvargplvm_init;
6 |
7 | % Load data
8 | Y = vargplvmLoadData('demHighFiveHgplvm1');
9 | Yall{1} = Y.YA;
10 | Yall{2} = Y.YB;
11 | %Yall{1} = vargplvmLoadData('hierarchical/demHighFiveHgplvm1',[],[],'YA');
12 | %Yall{2} = vargplvmLoadData('hierarchical/demHighFiveHgplvm1',[],[],'YB');
13 |
14 | % Load pre-trained model
15 | try
16 | load demHighFiveHsvargplvm9
17 | catch e
18 | if strcmp(e.identifier, 'MATLAB:load:couldNotReadFile')
19 | error('Seems like you are missing the .mat file which contains the training model! Check the matFiles folder!')
20 | end
21 | error(e.getReport)
22 | end
23 |
24 | % Restore model
25 | for h=1:model.H
26 | model.layer{h}.comp{1}.latentIndices ={};
27 | end
28 | model = hsvargplvmRestorePrunedModel(model, Yall);
29 | model.multOutput=false;
30 |
31 | %% DEMO
32 | warning off
33 | fprintf('!! I''m turning off your MATLAB warnings...! \n\n');
34 | fprintf('#------ Description --------\n\n');
35 | fprintf('# We trained a 2-layer deep GP on two separate modalities, each representing \n')
36 | fprintf('# a person walking, approaching the other subject and doing a high-five. \n')
37 | fprintf('# For this demo we did not use any dynamics but nevertheless the latent spaces \n')
38 | fprintf('# found are quite similar to the ones for hgplvm. \n\n')
39 |
40 | close all
41 | fprintf('#------ SCALES --------\n\n');
42 | fprintf('# Here are the optimised lengthscales (upside-down; parent is at bottom...)\n')
43 | fprintf('# Press any key to continue...')
44 | hsvargplvmShowScales(model);
45 |
46 | pause
47 |
48 | fprintf('\n\n#------- SAMPLING -------\n\n');
49 | fprintf('#---- Sampling from the intermediate layer\n')
50 | fprintf('#-- 1st modality (blue scales), shared space (2 vs 7)... \n')
51 | fprintf('# Wait to load... and then press any key to continue... \n')
52 | close all
53 | model.vis.startDim = {2 7};
54 | hsvargplvmShowSkel2(model, 1, 1);
55 | figure; hsvargplvmShowScales(model);
56 |
57 | pause
58 |
59 | fprintf('#-- 2nd modality (red scales), shared space (2 vs 7)...\n')
60 | fprintf('# Wait to load... and then press any key to continue... \n')
61 | close all
62 | model.vis.startDim = {2 7};
63 | hsvargplvmShowSkel2(model, 2, 1);
64 | figure; hsvargplvmShowScales(model);
65 |
66 | pause
67 |
68 | fprintf('#-- 2nd modality (red scales), private space (8 vs 9)... \n')
69 | fprintf('# Wait to load... and then press any key to continue... \n')
70 | close all
71 | model.vis.startDim = {8 9};
72 | hsvargplvmShowSkel2(model, 2, 1);
73 | figure; hsvargplvmShowScales(model);
74 |
75 | pause
76 |
77 | fprintf('#---- Sampling from the PARENT layer\n')
78 | fprintf('#-- 2nd modality (blue scales), dominant dimensions (1 vs 3) ... \n')
79 | fprintf('# Wait to load... and then press any key to continue... \n')
80 | close all
81 | model.vis.startDim = {1 3};
82 | hsvargplvmShowSkel2(model, 2, 2);
83 | figure; hsvargplvmShowScales(model);
84 |
85 | pause
86 |
87 |
88 | fprintf('\n\n# End of demo! Feel free to experiment with different combinations of dimensions.')
89 |
90 | warning on
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmLoadSkelData.m:
--------------------------------------------------------------------------------
1 | function [Y, skel, remIndices] = hsvargplvmLoadSkelData(subject, trimDims)
2 |
3 | if nargin < 2
4 | trimDims = 1;
5 | end
6 | % ------- LOAD DATASET
7 |
8 | switch subject
9 | case '13'
10 | examples = {'14','16','17','18'};%laugh,laugh,box,box,
11 | try
12 | load 'cmu13_14_16_17_18.mat';
13 | catch
14 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '13', examples);
15 | save 'cmu13_14_16_17_18.mat' 'Y' 'lbls' 'skel'
16 | end
17 | Ynew{1}=Y(1:15:end,:);
18 | case '93'
19 | examples = {'02'};
20 | try
21 | load 'cmu93_02.mat';
22 | catch
23 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '93', examples);
24 | save 'cmu93_02.mat' 'Y' 'lbls' 'skel'
25 | end
26 | Ynew{1}=Y([1:3:100 101:2:270],:);
27 | case '02'
28 | examples= {'01','05','10'};
29 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '02', examples);
30 | seq = cumsum(sum(lbls)) - [1:31];
31 | seq = seq(1:length(examples));
32 | % Ywalk = Y(1:3:70,:); % Orig: Y(1:85,:);
33 | % Ypunch = Y(86:13:548,:); % Orig: Y(86:548,:);
34 | % Ywash = Y(549:20:1209,:); % Orig: Y(549:1209,:);
35 | Ynew{1} = Y(1:2:70,:); % Orig: Y(1:85,:); % Walk
36 | Ynew{2} = Y(86:4:250,:); % Orig: Y(86:548,:); % Punch
37 | Ynew{3} = Y(830:6:1140,:); % Orig: Y(549:1209,:); % Wash
38 | case '17'
39 | %[Y,skel, channels] = loadMocapData();
40 | % Motions: angry walk, hobble, whistle/jauntily, box
41 | examples = {'02','05','07','10'};
42 | try
43 | load cmu17_02_05_07_10.mat
44 | catch
45 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '17', examples);
46 | save 'cmu17_02_05_07_10.mat' 'Y' 'lbls' 'Ytest' 'lblstest' 'skel'
47 | end
48 | seq = cumsum(sum(lbls)) - [1:31];
49 | seq = seq(1:length(examples));
50 | Ynew{1} = Y(1:10:400,:); % Orig: Y(1:85,:);
51 | Ynew{2} = Y(seq(1)+1:40:seq(2)-280,:); % Orig: Y(86:548,:);
52 | Ynew{3} = Y(seq(2)+1:55:seq(3)-200,:); % Orig: Y(549:1209,:);
53 | Ynew{4} = Y(seq(3)+1:8:5000,:);
54 | case '20'
55 | %[Y,skel, channels] = loadMocapData();
56 | % Motions: angry walk, hobble, whistle/jauntily, box
57 | examples = {'01','05','11'};
58 | try
59 | load cmu20_01_05_11.mat
60 | catch
61 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '20', examples);
62 | save 'cmu20_01_05_11.mat' 'Y' 'lbls' 'skel'
63 | end
64 | seq = cumsum(sum(lbls)) - [1:31];
65 | seq = seq(1:length(examples));
66 | Ynew{1} = Y;
67 | end
68 | Yorig = Y;
69 |
70 |
71 |
72 | if trimDims
73 | for i=1:length(Ynew)
74 | [channels, xyzDiffIndices{i}, rotInd{i}] = skelGetChannels(Ynew{i});
75 | %%% ATTENTION: 'remIndices' should be the same for all!! (for
76 | %%% consistency)
77 | remIndices{i} = [xyzDiffIndices{i} rotInd{i}];
78 | Ynew{i}(:, remIndices{i}) = zeros(size(Ynew{i}(:, remIndices{i}) ));
79 | end
80 | else
81 | remIndices = {};
82 | end
83 |
84 | Y = [];
85 | for i=1:length(Ynew)
86 | Y = [Y ; Ynew{i}];
87 | end
88 |
89 | %{
90 | [channels] = skelGetChannels(Ynew{1});
91 | close; skelPlayData(skel, channels, 1/5);
92 | %}
--------------------------------------------------------------------------------
/deepGP/matlab/lvmScatterPlotNoVar2.m:
--------------------------------------------------------------------------------
1 | function [ax, data] = lvmScatterPlotNoVar2(model, YLbls, ax, dims, defaultVals, figHandle)
2 |
3 | % Just a Temporary replacement for lvmScatterPlotNoVar, here we just use
4 | % our own symbols...
5 | if nargin < 5
6 | defaultVals = zeros(1, size(model.X, 2));
7 |
8 | if nargin < 4
9 | dims = [1, 2];
10 | if nargin<3
11 | ax = [];
12 | if nargin < 2
13 | YLbls = [];
14 | end
15 | end
16 | end
17 | end
18 | if isempty(YLbls)
19 | symbol = {'w.'};%getSymbols(1);;
20 | else
21 | if iscell(YLbls)
22 | symbol = getSymbols(size(YLbls{1},2));
23 | else
24 | symbol = getSymbols(size(YLbls,2));
25 | end
26 | end
27 | x1Min = min(model.X(:, dims(1)));
28 | x1Max = max(model.X(:, dims(1)));
29 | x1Span = x1Max - x1Min;
30 | x1Min = x1Min - 0.05*x1Span;
31 | x1Max = x1Max + 0.05*x1Span;
32 | x1 = linspace(x1Min, x1Max, 150);
33 |
34 | x2Min = min(model.X(:, dims(2)));
35 | x2Max = max(model.X(:, dims(2)));
36 | x2Span = x2Max - x2Min;
37 | x2Min = x2Min - 0.05*x2Span;
38 | x2Max = x2Max + 0.05*x2Span;
39 | x2 = linspace(x2Min, x2Max, 150);
40 |
41 | %if size(model.X, 2)==2
42 |
43 | %try
44 | % [X1, X2] = meshgrid(x1, x2);
45 | % XTest = repmat(defaultVals, prod(size(X1)), 1);
46 | % XTest(:, dims(1)) = X1(:);
47 | % XTest(:, dims(2)) = X2(:);
48 | % varsigma = modelPosteriorVar(model, XTest);
49 | % posteriorVarDefined = true;
50 | %catch
51 | % [lastMsg, lastId] = lasterr;
52 | % disp(lastId)
53 | % if isoctave || strcmp(lastId, 'MATLAB:UndefinedFunction')
54 | posteriorVarDefined = false;
55 | % else
56 | % rethrow(lasterror);
57 | % end
58 | %end
59 | if posteriorVarDefined
60 | d = model.d;
61 | if size(varsigma, 2) == 1
62 | dataMaxProb = -0.5*d*log(varsigma);
63 | else
64 | dataMaxProb = -.5*sum(log(varsigma), 2);
65 | end
66 |
67 | if isempty(ax)
68 | figure(1)
69 | clf
70 | % Create the plot for the data
71 | ax = axes('position', [0.05 0.05 0.9 0.9]);
72 | else
73 | axes(ax);
74 | end
75 | hold on
76 |
77 | C = reshape(dataMaxProb, size(X1));
78 |
79 | % Rescale it
80 | C = C - min(min(C));
81 | if max(max(C))~=0
82 | C = C/max(max(C));
83 | C = round(C*63);
84 | image(x1, x2, C);
85 | end
86 |
87 | %[c, h] = contourf(X1, X2, log10(reshape(1./varsigma(:, 1), size(X1))), 128);
88 | % shading flat
89 | colormap gray;
90 | %colorbar
91 | end
92 |
93 | if nargin > 5
94 | data = lvmTwoDPlot(model.X(:, dims), YLbls, symbol, ax);
95 | else
96 | data = lvmTwoDPlot(model.X(:, dims), YLbls, symbol);
97 | end
98 | switch model.type
99 | case 'dnet'
100 | plot(model.X_u(:, dims(1)), model.X_u(:, dims(2)), 'g.')
101 | end
102 | % elseif size(model.X, 2)==3
103 | % x3Min = min(model.X(:, 3));
104 | % x3Max = max(model.X(:, 3));
105 | % x3Span = x3Max - x3Min;
106 | % x3Min = x3Min - 0.05*x3Span;
107 | % x3Max = x3Max + 0.05*x3Span;
108 | % x3 = linspace(x3Min, x3Max, 150);
109 |
110 | % data = lvmThreeDPlot(model.X, YLbls, symbol);
111 | % end
112 |
113 | xLim = [min(x1) max(x1)];
114 | yLim = [min(x2) max(x2)];
115 | set(gca, 'xlim', xLim);
116 | set(gca, 'ylim', yLim);
117 | % if size(model.X, 2) == 3
118 | % zLim = [min(x3) max(x3)];
119 | % set(ax, 'zLim', zLim);
120 | % end
121 | set(gca, 'fontname', 'arial');
122 | set(gca, 'fontsize', 20);
123 |
124 | ax = gca;
125 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmOptimise.m:
--------------------------------------------------------------------------------
1 | function [model, gradEvaluations, objEvaluations, grChek] = hsvargplvmOptimise(model, display, iters, varargin)
2 |
3 | % HSVARGPLVMOPTIMISE Optimise a deep GP.
4 | % FORMAT
5 | % DESC takes a given deepGP model structure and optimises with
6 | % respect to parameters and latent positions.
7 | % ARG model : the model to be optimised.
8 | % ARG display : flag dictating whether or not to display
9 | % optimisation progress (set to greater than zero) (default value 1).
10 | % ARG iters : number of iterations to run the optimiser
11 | % for (default value 2000).
12 | % RETURN model : the optimised model.
13 | %
14 | % SEEALSO :
15 | %
16 | % COPYRIGHT: Andreas C. Damianou, 2012
17 |
18 | % DEEPGP
19 |
20 |
21 | if nargin < 3
22 | iters = 2000;
23 | if nargin < 2
24 | display = 1;
25 | end
26 | end
27 |
28 | gradEvaluations = 0;
29 | objEvaluations = 0;
30 |
31 | options = optOptions;
32 | params = modelExtractParam(model);
33 |
34 | if isfield(model, 'throwSNRError') && model.throwSNRError
35 | throwSNRError = true;
36 | else
37 | throwSNRError = false;
38 | end
39 |
40 | if isfield(model, 'SNRErrorLimit')
41 | SNRErrorLimit = model.SNRErrorLimit;
42 | else
43 | SNRErrorLimit = [];
44 | end
45 |
46 | if length(varargin) == 2
47 | if strcmp(varargin{1}, 'gradcheck')
48 | assert(islogical(varargin{2}));
49 | %options(9) = varargin{2};
50 | doGradchek = varargin{2};
51 | if doGradchek
52 | [gradient, delta] = feval('gradchek', params, @modelObjective, @modelGradient, model);
53 | deltaf = gradient - delta;
54 | d=norm(deltaf - gradient)/norm(gradient + deltaf); %%
55 | d1=norm(deltaf - gradient,1)/norm(gradient + deltaf,1); %%
56 | fprintf(1,' Norm1 difference: %d\n Norm2 difference: %d\n',d1,d);
57 | grChek.delta = delta;
58 | grChek.gradient = gradient;
59 | grChek.deltaf = deltaf;
60 | grChek.normDiff0 = d;
61 | grChek.normDiff1 = d1;
62 | else
63 | grChek = [];
64 | end
65 | end
66 | end
67 |
68 | options(2) = 0.1*options(2);
69 | options(3) = 0.1*options(3);
70 |
71 | if display
72 | options(1) = 1;
73 | if length(params) <= 100
74 | options(9) = 1; % gradchek
75 | end
76 | end
77 | options(14) = iters;
78 |
79 | if iters > 0
80 | if isfield(model, 'optimiser')
81 | optim = str2func(model.optimiser);
82 | else
83 | optim = str2func('scg');
84 | end
85 |
86 | if strcmp(func2str(optim), 'optimiMinimize')
87 | % Carl Rasmussen's minimize function
88 | params = optim('hsvargplvmObjectiveGradient', params, options, model);
89 | elseif strcmp(func2str(optim), 'scg2')
90 | % NETLAB style optimization with a slight modification so that an
91 | % objectiveGradient can be used where applicable, in order to re-use
92 | % precomputed quantities.
93 | [params, opt]= optim('hsvargplvmObjectiveGradient', params, options, 'hsvargplvmGradient', model);
94 | gradEvaluations = opt(11);
95 | objEvaluations = opt(10);
96 | else
97 | % NETLAB style optimization.
98 | [params, opt] = optim('hsvargplvmObjective', params, options, 'hsvargplvmGradient', model);
99 | gradEvaluations = opt(9);
100 | objEvaluations = opt(10);
101 | end
102 |
103 | model = hsvargplvmExpandParam(model, params);
104 |
105 | % Check SNR of optimised model
106 | hsvargplvmCheckSNR(hsvargplvmShowSNR(model), SNRErrorLimit, [], throwSNRError);
107 | end
--------------------------------------------------------------------------------
/deepGP/matlab/demToyUnsupervised.m:
--------------------------------------------------------------------------------
1 | % DEMTOYUNSUPERVISED A script to run unsupervised deep GP on toy hierarchical data.
2 | %
3 | % DESC A script to run unsupervised deep GP on toy hierarchical data. The script provides
4 | % the option of parametrising the model and initialisation in many many
5 | % different ways... but the core of the demo (define a deep GP and train
6 | % it) is actually not that big, if you decide to use the default options.
7 | %
8 | % COPYRIGHT: Andreas C. Damianou, 2013
9 | %
10 | % SEE ALSO: demToyRegression.m
11 | %
12 | % DEEPGP
13 |
14 |
15 | experimentNo = 1;
16 | toyType = 'hgplvmSampleTr1';
17 | baseKern='rbfardjit'; % The mapping kernel between the layers
18 | Q = {6,4}; % Dimensionality of the latent space in each layer
19 | initSNR = {100, 50}; % Initial Signal to Noise ration per layer
20 | % How to initialise X when multiple output modalities are present. See
21 | % hsvargplvm_init for details
22 | initial_X = 'separately';
23 | % How to initialise X (more specifically, means of q(X)) for each layer.
24 | % The selected method (potentially
25 | % different per layer) will be applied in a sequential fashion, eg if we
26 | % use PCA we obtain X_1 from PCA on Y, then X_2 from PCA on X_1 etc. Deep
27 | % GPs are completely different than a stacked method, since X's will be
28 | % integrated out (the initial X's above are actually the initial means of the var.
29 | % distribution) and everythin will be optimised jointly.
30 | % Here we opt for a Bayesian GPLVM that gives the initial X.
31 | % See hsvargplvm_init for other options (eg pca).
32 | initX = 'vargplvm';
33 | %- options for the BayesianGPLVM used to initialise the variational means
34 | stackedInitIters = 200;
35 | stackedInitVardistIters = 100;
36 | stackedInitSNR = 100;
37 | initVardistIters = 100;
38 | demToyHsvargplvm1; % Run the actual demo
39 |
40 | %% --- Plot true data
41 | subplot(3,2,1)
42 | myPlot(Z{3},'X2',[],[],{3,8},0)
43 | subplot(3,2,3)
44 | myPlot(Z{1},'XA',[],[],{3,8},0)
45 | subplot(3,2,4)
46 | myPlot(Z{2},'XB',[],[],{3,8},0)
47 | subplot(3,2,5)
48 | plot(Ytr{1},'x-'); title('YA');
49 | subplot(3,2,6)
50 | plot(Ytr{2},'x-'); title('YB');
51 |
52 | %% -- Plot spaces discovered by deep GPs (two most dominant dimensions for
53 | %% top layer and similarly for each of the two modalities of layer 1)
54 | figure
55 | hsvargplvmShowScales(model);
56 |
57 | s2 = sort(vargplvmRetainedScales(model.layer{2}.comp{1}));
58 | sA = sort(vargplvmRetainedScales(model.layer{1}.comp{1}));
59 | sB = sort(vargplvmRetainedScales(model.layer{1}.comp{2}));
60 |
61 | figure
62 | subplot(2,2,1)
63 | myPlot(model.layer{2}.vardist.means(:,s2(1:2)),'deepGP_X2',[],[],{3,8},0)
64 | subplot(2,2,3)
65 | myPlot(model.layer{1}.vardist.means(:,sA(1:2)),'deepGP_XA',[],[],{3,8},0)
66 | subplot(2,2,4)
67 | myPlot(model.layer{1}.vardist.means(:,sB(1:2)),'deepGP_XB',[],[],{3,8},0)
68 |
69 |
70 | %% --- Compare with stacked Bayesian GP-LVM % TODO
71 | %[XA, s, WA, modelA] = vargplvmEmbed(Ytr{1}, 5, initXOptions{1}{:});
72 | %[XB, s, WB, modelB] = vargplvmEmbed(Ytr{2}, 5, initXOptions{1}{:});
73 | %[X2, s, W2, model2] = vargplvmEmbed([XA XB], 5, initXOptions{2}{:});
74 |
75 | %% --- Compare with stacked PCA and isomap
76 |
77 | figure
78 | pcaXA = ppcaEmbed(Ytr{1}, 2);
79 | pcaXB = ppcaEmbed(Ytr{2},2);
80 | pcaX2 = ppcaEmbed([pcaXA pcaXB],2);
81 | subplot(2,2,1)
82 | myPlot(pcaX2,'pcaX2',[],[],{3,8},0)
83 | subplot(2,2,3)
84 | myPlot(pcaXA,'pcaXA',[],[],{3,8},0)
85 | subplot(2,2,4)
86 | myPlot(pcaXB,'pcaXB',[],[],{3,8},0)
87 |
88 | figure
89 | isomapXA = isomap2Embed(Ytr{1}, 2);
90 | isomapXB = isomap2Embed(Ytr{2},2);
91 | isomapX2 = isomap2Embed([isomapXA isomapXB],2);
92 | subplot(2,2,1)
93 | myPlot(isomapX2,'isomapX2',[],[],{3,8},0)
94 | subplot(2,2,3)
95 | myPlot(isomapXA,'isomapXA',[],[],{3,8},0)
96 | subplot(2,2,4)
97 | myPlot(isomapXB,'isomapXB',[],[],{3,8},0)
98 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmRestorePrunedModel.m:
--------------------------------------------------------------------------------
1 | function model = hsvargplvmRestorePrunedModel(model, Ytr, onlyData, options)
2 | % SVARGPLVMRESTOREPRUNEDMODEL Restore a pruned shared var-GPLVM model.
3 | % FORMAT
4 | % DESC restores a svargplvm model which has been pruned and it brings it in
5 | % the same state that it was before prunning.
6 | % ARG model: the model to be restored
7 | % ARG Ytr: the training data (it has to be a cell array of length equal to
8 | % model.numModels)
9 | % ARG onlyData: only pruned the data parts. Useful when saving a model which
10 | % is updated after predictions.
11 | % RETURN model : the variational GP-LVM model after being restored
12 | %
13 | % COPYRIGHT: Andreas Damianou, 2011
14 | %
15 | % SEEALSO : svargplvmPruneModel, vargplvmRestorePrunedModel
16 |
17 | % SVARGPLVM
18 |
19 | if nargin <3
20 | onlyData = 0;
21 | end
22 |
23 | if nargin <4
24 | options = [];
25 | end
26 |
27 |
28 | % FIrst, restore leaves
29 | for i=1:model.layer{1}.M
30 | model.layer{1}.comp{i} = vargplvmRestorePrunedModel2(model.layer{1}.comp{i},Ytr{i}, onlyData, options);
31 | end
32 |
33 | % Now restore the rest
34 | for h=2:model.H
35 | for i=1:model.layer{h}.M
36 | Ycur = model.layer{h-1}.vardist.means;
37 | model.layer{h}.comp{i} = vargplvmRestorePrunedModel2(model.layer{h}.comp{i},Ycur, onlyData, options);
38 | if h ~= 1
39 | model.layer{h}.comp{i} = rmfield(model.layer{h}.comp{i}, 'y');
40 | end
41 | end
42 | end
43 |
44 | if isfield(model, 'isPruned')
45 | model.isPruned = false;
46 | end
47 |
48 | params = hsvargplvmExtractParam(model);
49 | model = hsvargplvmExpandParam(model, params);
50 |
51 | end
52 |
53 |
54 |
55 | % A variant of vargplvmRestorePrunedModel to fit the specific structure of
56 | % hsvargplvm's model
57 | function model = vargplvmRestorePrunedModel2(model, Ytr, onlyData, options)
58 |
59 |
60 | if exist('onlyData') && onlyData
61 | % model.mOrig = model.m;
62 | model.bias = mean(Ytr); % Default, has to be changed later if it was different
63 |
64 | if (nargin > 3) && ~isempty(options) && isfield(options,'scale2var1')
65 | if(options.scale2var1)
66 | model.scale = std(Ytr);
67 | model.scale(find(model.scale==0)) = 1;
68 | if(model.learnScales)
69 | warning('Both learn scales and scale2var1 set for GP');
70 | end
71 | if(isfield(options, 'scaleVal'))
72 | warning('Both scale2var1 and scaleVal set for GP');
73 | end
74 | end
75 | elseif (nargin > 3) && ~isempty(options) && isfield(options, 'scaleVal')
76 | model.scale = repmat(options.scaleVal, 1, size(Ytr,2));
77 | else
78 | model.scale = ones(1,size(Ytr,2));
79 | end
80 | end
81 |
82 | model.y = Ytr;
83 | model.m= gpComputeM(model);
84 |
85 | if isfield(model, 'dynamics') && ~isempty(model.dynamics)
86 | model.dynamics.X = model.X;
87 | end
88 |
89 | if model.DgtN
90 | model.mOrig = model.m;
91 | YYT = model.m * model.m'; % NxN
92 | % Replace data with the cholesky of Y*Y'.Same effect, since Y only appears as Y*Y'.
93 | %%% model.m = chol(YYT, 'lower'); %%% Put a switch here!!!!
94 | [U S V]=svd(YYT);
95 | model.m=U*sqrt(abs(S));
96 | model.TrYY = sum(diag(YYT)); % scalar
97 | else
98 | model.TrYY = sum(sum(model.m .* model.m));
99 | end
100 |
101 | if exist('onlyData') && onlyData
102 | try
103 | model.P = model.P1 * (model.Psi1' * model.m);
104 | model.B = model.P1' * model.P;
105 | catch e
106 | warning(e.message);
107 | end
108 | model = orderfields(model);
109 | return
110 | end
111 |
112 |
113 | model = orderfields(model);
114 |
115 | end
116 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmPlotX.m:
--------------------------------------------------------------------------------
1 | function [h, hax, hplot] = hsvargplvmPlotX(model, layer, dims, symb, theta, newFig, classes, fancyPlot)
2 | % HSVARGPLVMPLOTX Plot the latent space for a selected layer and selected
3 | % pairs of dimensions.
4 | %
5 | % DESC Plot the latent space for a selected layer and selected
6 | % pairs of dimensions. If the dimensions are not given in the third
7 | % argument (dims), then the two most dominant dimensions will be used.
8 | %
9 | % FORMAT: hsvargplvmPlotX(model, layer, dims, , , , )
10 | %
11 | % COPYRIGHT: Andreas C. Damianou, 2013, 2014
12 | %
13 | % DEEPGP
14 |
15 | if nargin < 8 || isempty(fancyPlot), fancyPlot = false; end
16 | if nargin < 7, classes = []; end
17 | if nargin < 6 || isempty(newFig), newFig = true; end
18 | % This argument allows us to rotate a 2-D visualisation by theta degrees
19 | % (rad)
20 | if nargin < 5 || isempty(theta), theta = 0; end % TODO
21 | if nargin < 4 || isempty(symb), symb = '-x'; end
22 | if nargin < 2, error(sprintf('At least three arguments required. Usage:\nhsvargplvmPlotX(model, layer, dims, symb, newFig, theta, classes, fancyPlot)')); end
23 | if newFig
24 | h = figure;
25 | hax = axes;
26 | else
27 | h=[]; hax=[];
28 | end
29 | if nargin < 3 || isempty(dims)
30 | scales = hsvargplvmShowScales(model,0);
31 | [~,ind]=sort(scales{layer}{1}, 'descend');
32 | dims = ind(1:2);
33 | end
34 | if length(dims) > 3
35 | %error('Can only plot two or three dimensions against each other')
36 | for d=1:length(dims)
37 | hplot = plot(model.layer{layer}.vardist.means(:, dims(d)), symb); title(['d=' num2str(dims(d))])
38 | pause
39 | end
40 | end
41 |
42 | if (isempty(classes) || length(dims) > 2) && ~fancyPlot
43 | if ~isempty(classes) && size(classes,2) > 1
44 | % Labels are given in 1-of-K encoding. Transform to discrete ids
45 | hplot = plot_cl(model.layer{layer}.vardist.means, dims, transformLabels(classes), symb);
46 | else
47 | hplot = plot_cl(model.layer{layer}.vardist.means, dims, classes, symb);
48 | end
49 | else
50 | if length(dims)~=2
51 | error('Dims must be 2 for this plot')
52 | end
53 | %-----
54 | if layer ~= 1
55 | model.layer{layer}.comp{1}.y = model.layer{layer-1}.vardist.means;
56 | end
57 | model.layer{layer}.comp{1}.vardist = model.layer{layer}.vardist;
58 | model.layer{layer}.comp{1}.X = model.layer{layer}.vardist.means;
59 | %figure; ax = axes;
60 | if model.layer{layer}.comp{1}.q > 2
61 | mm = vargplvmReduceModel2(model.layer{layer}.comp{1},[],dims);
62 | if ~isempty(classes)
63 | errors = fgplvmNearestNeighbour(mm, classes);
64 | end
65 | lvmScatterPlot(mm, classes, hax);
66 | else
67 | if ~isempty(classes)
68 | errors = fgplvmNearestNeighbour(model.layer{layer}.comp{1}, classes);
69 | end
70 | lvmScatterPlot(model.layer{layer}.comp{1}, classes, hax);
71 | end
72 | if ~isempty(classes)
73 | title(['Layer ' num2str(layer) ' (errors:' num2str(errors) ')'])
74 | fprintf('# Errors in the 2-D projection: %d \n', errors)
75 | else
76 | title(['Layer ' num2str(layer) ])
77 | end
78 | % plot the two largest latent dimensions
79 | %ax=subplot(model.H,1,h);
80 | %-----
81 | end
82 |
83 |
84 | % switch length(dims)
85 | % case 1
86 | % hplot = plot(model.layer{layer}.vardist.means(:, dims(1)), symb);
87 | % case 2
88 | % hplot = plot(model.layer{layer}.vardist.means(:,dims(1)), model.layer{layer}.vardist.means(:, dims(2)), symb);
89 | % case 3
90 | % hplot = plot3(model.layer{layer}.vardist.means(:,dims(1)), ...
91 | % model.layer{layer}.vardist.means(:, dims(2)), ...
92 | % model.layer{layer}.vardist.means(:, dims(3)), symb); grid on
93 | % end
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmPosteriorMeanVarSimple.m:
--------------------------------------------------------------------------------
1 | % ARG model: the hsvargplvm model
2 | % ARG X: the test latent points of the TOP layer (parent)
3 | % ARG varX: the variance associated with X. Can be left empty ([])
4 |
5 | % TODO: Replace vargplvmPosteriorMeanVarHier2 with
6 | % vargplvmPosteriorMeanVarHier which is an external function (check what
7 | % changes were made)
8 |
9 | % This function is very similar to hsvargplvmPosteriorMeanVar, but without
10 | % the extra arguments.
11 |
12 | function [mu, varsigma] = hsvargplvmPosteriorMeanVarSimple(model, X, varX)
13 |
14 | if nargin < 3
15 | varX = [];
16 | end
17 |
18 | H = model.H;
19 |
20 | Xall{H} = X;
21 | varXall{H} = varX;
22 |
23 |
24 | for h=H-1:-1:1
25 | if ~isempty(varX)
26 | [Xall{h} varXall{h}] = vargplvmPosteriorMeanVarHier2(model.layer{h+1}.comp{1}, Xall{h+1}, varXall{h+1});
27 | else
28 | [Xall{h} varXall{h}] = vargplvmPosteriorMeanVarHier2(model.layer{h+1}.comp{1}, Xall{h+1});
29 | end
30 | end
31 |
32 | if ~isempty(varX)
33 | [mu, varsigma] = vargplvmPosteriorMeanVarHier2(model.layer{1}.comp{1}, Xall{1}, varXall{1});
34 | else
35 | [mu, varsigma] = vargplvmPosteriorMeanVarHier2(model.layer{1}.comp{1}, Xall{1});
36 | end
37 |
38 |
39 | % h=1;sc = vargplvmRetainedScales(model.layer{h}.comp{1}); close all; plotDistr(varXall{h}(:,sc));
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | % The same as vargplvmPosteriorMeanVar, but a small change in the
48 | % calculations involving model.m, because in the indermediate layers
49 | % model.m is replaced by the expectation varmu*varmu'+Sn wrt the X of the
50 | % bottom layer.
51 | function [mu, varsigma] = vargplvmPosteriorMeanVarHier2(model, X, varX)
52 |
53 |
54 |
55 | if nargin < 3
56 | vardistX.covars = repmat(0.0, size(X, 1), size(X, 2));%zeros(size(X, 1), size(X, 2));
57 | else
58 | vardistX.covars = varX;
59 | end
60 | vardistX.latentDimension = size(X, 2);
61 | vardistX.numData = size(X, 1);
62 | %model.vardist.covars = 0*model.vardist.covars;
63 | vardistX.means = X;
64 | %model = vargplvmUpdateStats(model, model.X_u);
65 |
66 |
67 | Ainv = model.P1' * model.P1; % size: NxN
68 |
69 | if ~isfield(model,'alpha')
70 | if isfield(model, 'mOrig')
71 | model.alpha = Ainv*model.Psi1'*model.mOrig; % size: 1xD
72 | else
73 | model.alpha = Ainv*model.Psi1'*model.m; % size: 1xD
74 | end
75 | end
76 | Psi1_star = kernVardistPsi1Compute(model.kern, vardistX, model.X_u);
77 |
78 | % mean prediction
79 | mu = Psi1_star*model.alpha; % size: 1xD
80 |
81 | if nargout > 1
82 | %
83 | % precomputations
84 | vard = vardistCreate(zeros(1,model.q), model.q, 'gaussian');
85 | Kinvk = (model.invK_uu - (1/model.beta)*Ainv);
86 | %
87 | for i=1:size(vardistX.means,1)
88 | %
89 | vard.means = vardistX.means(i,:);
90 | vard.covars = vardistX.covars(i,:);
91 | % compute psi0 term
92 | Psi0_star = kernVardistPsi0Compute(model.kern, vard);
93 | % compute psi2 term
94 | Psi2_star = kernVardistPsi2Compute(model.kern, vard, model.X_u);
95 |
96 | vars = Psi0_star - sum(sum(Kinvk.*Psi2_star));
97 |
98 | for j=1:model.d
99 | %[model.alpha(:,j)'*(Psi2_star*model.alpha(:,j)), mu(i,j)^2]
100 | varsigma(i,j) = model.alpha(:,j)'*(Psi2_star*model.alpha(:,j)) - mu(i,j)^2;
101 | end
102 | varsigma(i,:) = varsigma(i,:) + vars;
103 | %
104 | end
105 | %
106 | if isfield(model, 'beta')
107 | varsigma = varsigma + (1/model.beta);
108 | end
109 | %
110 | end
111 |
112 | % Rescale the mean
113 | mu = mu.*repmat(model.scale, size(vardistX.means,1), 1);
114 |
115 | % Add the bias back in
116 | mu = mu + repmat(model.bias, size(vardistX.means,1), 1);
117 |
118 | % rescale the variances
119 | if nargout > 1
120 | varsigma = varsigma.*repmat(model.scale.*model.scale, size(vardistX.means,1), 1);
121 | end
122 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmCreateToyData.m:
--------------------------------------------------------------------------------
1 | % Create toy data. Give [] as an argument if the default value is to be
2 | % used for the corresponding parameter.
3 |
4 | function [Yall, dataSetNames, Z] = hsvargplvmCreateToyData(type, N, D, numSharedDims, numHierDims, noiseLevel, hierSignalStrength)
5 |
6 | if nargin < 7 || isempty(hierSignalStrength), hierSignalStrength = 0.6; end
7 | if nargin < 6 || isempty(noiseLevel), noiseLevel = 0.1; end
8 | if nargin < 5 || isempty(numHierDims), numHierDims = 1; end
9 | if nargin < 4 || isempty(numSharedDims), numSharedDims = 5; end
10 | if nargin < 3 || isempty(D), D = 10; end
11 | if nargin < 2 || isempty(N), N = 100; end
12 | if nargin < 1 || isempty(type), type = 'fols'; end
13 |
14 |
15 | switch type
16 | case 'fols'
17 | alpha = linspace(0,4*pi,N);
18 | privSignalInd = [1 2];
19 | sharedSignalInd = 3;
20 | hierSignalInd = 4;
21 |
22 |
23 | Z{1} = cos(alpha)';
24 | Z{2} = sin(alpha)';
25 |
26 | Z{3}= (cos(alpha)').^2;
27 | Z{4} = heaviside(linspace(-10,10,N))'; % Step function
28 | % Z{3} = heaviside(Z{3}); % This turns the signal into a step function
29 | % Z{3} = 2*cos(2*alpha)' + 2*sin(2*alpha)' ; %
30 |
31 |
32 | % Scale and center data
33 | for i=1:length(Z)
34 | bias_Z{i} = mean(Z{i});
35 | Z{i} = Z{i} - repmat(bias_Z{i},size(Z{i},1),1);
36 | scale_Z{i} = max(max(abs(Z{i})));
37 | Z{i} = Z{i} ./scale_Z{i};
38 | end
39 |
40 | % Do the following only for the private signals
41 | for i=privSignalInd
42 | % Map 1-Dim to D-Dim and add some noise
43 | Zp{i} = Z{i}*rand(1,D-numSharedDims);
44 | Zp{i} = Zp{i} + noiseLevel.*randn(size(Zp{i}));
45 | end
46 |
47 | % This is the shared signal
48 | i = sharedSignalInd;
49 | Zp{i} = Z{i}*rand(1,numSharedDims);
50 | Zp{i} = Zp{i} + noiseLevel.*randn(size(Zp{i}));
51 |
52 | % This is the high-level signal
53 | i = hierSignalInd;
54 | %Zp{i} = Z{i}*rand(1,D);
55 | Zp{i} = Z{i}*ones(1,D);
56 | Zp{i} = Zp{i} + noiseLevel.*randn(size(Zp{i}));
57 |
58 |
59 | % pca(Zp{2}) % This shows that it is actually a 1-D dataset
60 |
61 | % Y = [Zp{1} Zp{2}];
62 | % We like the numer of latent dims to be 2+numSharedDims, ideally 3. With
63 | % vargplvm we set Q=6 and expect the other 3 or 4 to be switched off.
64 | % [U,V] = pca(Y,6);
65 | % Xp = Y*V;
66 | % pca(Xp)
67 |
68 | %---
69 | allPr = []; allPr1 = [];
70 | for i=privSignalInd
71 | Zp{i} = [Zp{i} Zp{sharedSignalInd}];
72 | allPr1 = [allPr1 Zp{i}]; %%%%% DEBUG
73 | Yall1{i} = Zp{i}; %%%%% DEBUG
74 | end
75 |
76 | for i=privSignalInd
77 | % Apply the high-level signal to the private ones
78 | % Zp{i} = Zp{i} .* Zp{hierSignalInd}; % ORIGINAL
79 | Zp{i} = Zp{i} + hierSignalStrength*Zp{hierSignalInd}; %
80 | allPr = [allPr Zp{i}];
81 | Yall{i} = Zp{i};
82 | end
83 |
84 |
85 | bar(pca(allPr))
86 | % return
87 | %---
88 |
89 | dataSetNames={'fols_cos', 'fols_sin'};
90 |
91 | for i=privSignalInd
92 | figure
93 | title(['model ' num2str(i)])
94 | subplot(2,1,1)
95 | plot(Z{i}), hold on
96 | plot(Z{sharedSignalInd}, 'r')
97 | plot(pcaEmbed(Yall{i},1), 'm')
98 | legend('Orig.','Shared','Final')
99 | subplot(2,1,2)
100 | plot(Z{hierSignalInd});
101 | legend('Hier.')
102 | end
103 | end
104 |
105 | %%
106 | for i=length(privSignalInd)
107 | ZZ{i} = [Z{privSignalInd(i)} Z{sharedSignalInd}]+repmat(Z{hierSignalInd},1,size([Z{privSignalInd(i)} Z{sharedSignalInd}],2)).*0.6;
108 | end
109 | %%
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmClusterScales.m:
--------------------------------------------------------------------------------
1 | % Here, a single layer is passed as a model
2 | function [clu, clu2, clu3] = hsvargplvmClusterScales(model, noClusters, labelType, exclDims)
3 |
4 | if nargin < 4
5 | exclDims = [];
6 | end
7 |
8 | if nargin < 3
9 | labelType = [];
10 | end
11 |
12 | if nargin < 2 || isempty(noClusters), noClusters = 2; end
13 |
14 | % for compatibility with svargplvm
15 | if ~isfield(model, 'type') || ~strcmp(model.type, 'svargplvm')
16 | model.numModels = model.M;
17 | end
18 |
19 | allScales = svargplvmScales('get',model);
20 | % thresh = max(model.comp{obsMod}.kern.comp{1}.inputScales) * 0.001;
21 | thresh = 0.01; % 0.005
22 | binaryScales = zeros(model.numModels, model.q);
23 | allScalesMat = zeros(model.numModels, model.q);
24 | allScalesMat2 = zeros(model.numModels, model.q);
25 | for i=1:model.numModels
26 | % Normalise values between 0 and 1
27 | allScales{i} = allScales{i} / max(allScales{i});
28 | retainedScales{i} = find(allScales{i} > thresh);
29 | allScalesMat(i,:) = allScales{i};
30 | allScalesMat2(i,:) = allScalesMat(i,:);
31 | allScalesMat2(i, retainedScales{i}) = 0; %%% Scales too smal are set to 0 for better clustering
32 | binaryScales(i,retainedScales{i}) = 1;
33 | end
34 | % sharedDims = intersect(retainedScales{obsMod}, retainedScales{infMod});
35 | %imagesc(binaryScales')
36 | %htree = linkage(allScalesMat,'single');
37 | %clu = cluster(htree, 12);
38 | clu = kmeans(allScalesMat,noClusters, 'emptyact', 'drop','distance','sqeuclidean');
39 | clu(exclDims) = 0; %%% These submodels are excluded, taken the value 0 which, therefore, constitutes a new cluster marking irrelevant dims.
40 | clu3 = kmeans(allScalesMat2,noClusters, 'emptyact', 'drop','distance','sqeuclidean');
41 | clu3(exclDims) = 0; %%% These submodels are excluded, taken the value 0 which, therefore, constitutes a new cluster marking irrelevant dims.
42 | clu2 = kmeans(binaryScales, noClusters, 'emptyact', 'drop','distance','sqeuclidean');
43 | clu2(exclDims) = 0; %%% These submodels are excluded, taken the value 0 which, therefore, constitutes a new cluster marking irrelevant dims.
44 | %%
45 | scrsz = get(0,'ScreenSize');
46 | figure('Position',[scrsz(3)/4.86 scrsz(4)/1 1.2*scrsz(3)/1.6457 0.6*scrsz(4)/3.4682])
47 |
48 | imagesc(clu'); title('Clustering scales')
49 | cl = caxis;
50 | if ~isempty(labelType) && strcmp(labelType,'skel59dim')
51 | Xt = [1 8 15 24 33 42 51 53 54 55 56 57 58];
52 | Xl = [0 60]; %[1 59]; % limit
53 | myLbls = {'Lleg';'Rleg';'Torso';'Head';'Lhand';'Rhand';'Nothing?';'MoveInY';'MoveInX';'MoveInZ';'Rotation?';'UpsDown?';'Nothing?' };
54 | textAxes(Xt, Xl, myLbls);
55 | end
56 | figure('Position',[scrsz(3)/4.86 scrsz(4)/3 1.2*scrsz(3)/1.6457 0.6*scrsz(4)/3.4682])
57 | imagesc(clu3'); title('Clustering scales2'); caxis(cl);
58 | if ~isempty(labelType) && strcmp(labelType,'skel59dim')
59 | Xt = [1 8 15 24 33 42 51 53 54 55 56 57 58];
60 | Xl = [0 60]; %[1 59]; % limit
61 | myLbls = {'Lleg';'Rleg';'Torso';'Head';'Lhand';'Rhand';'Nothing?';'MoveInY';'MoveInX';'MoveInZ';'Rotation?';'UpsDown?';'Nothing?' };
62 | textAxes(Xt, Xl, myLbls);
63 | end
64 | figure('Position',[scrsz(3)/4.86 scrsz(4)/10 1.2*scrsz(3)/1.6457 0.6*scrsz(4)/3.4682])
65 | imagesc(clu2'); title('Clustering binary scales'); caxis(cl);
66 | if ~isempty(labelType) && strcmp(labelType,'skel59dim')
67 | textAxes(Xt, Xl, myLbls);
68 | end
69 |
70 | %{
71 | for i=1:model.numModels
72 | bar(allScales{i})
73 | title(num2str(i))
74 | pause
75 | end
76 | %}
77 |
78 |
79 | function textAxes(Xt, Xl, myLbls)
80 | pos = get(gca,'Position');
81 | set(gca,'Position',[pos(1), .2, pos(3) .65])
82 |
83 | set(gca,'XTick',Xt,'XLim',Xl);
84 | set(gca,'XGrid','on')
85 |
86 | ax = axis; % Current axis limits
87 | axis(axis); % Set the axis limit modes (e.g. XLimMode) to manual
88 | Yl = ax(3:4); % Y-axis limits
89 |
90 | % Place the text labels
91 | t = text(Xt,Yl(1)*ones(1,length(Xt)),myLbls);
92 | % Due to rotation, hor. alignment is actually vertical and vice versa
93 | set(t,'HorizontalAlignment','right','VerticalAlignment','top', ... % v.alignment: also: middle
94 | 'Rotation',90, 'Color', 'w','FontWeight', 'bold');
95 |
96 | %set(gca,'XTickLabel','') % Remove the default labels
97 |
98 |
--------------------------------------------------------------------------------
/deepGP/matlab/demHsvargplvmClassification.m:
--------------------------------------------------------------------------------
1 | % DEMHSVARGPLVMCLASSIFICATION Classification demo for deepGP demo, where
2 | % data are put as inputs and labels correspond the outputs.
3 | %
4 | % This is a generic demo. You can replace the data used here with your own
5 | % data and run it (ie Ytr{1} has to be the observed data and inpX has to be
6 | % your observed labels).
7 | %
8 | % To configure the deepGP used here, do the following:
9 | % 1. If any of the fields you want to change appear in this demo in the
10 | % "configuring the deep gp" section, they change it directly there.
11 | % 2. If the field you're looking for is not there, then check the available
12 | % configuration options in hsvargplvm_init.m. The way this works, is that
13 | % you just need to overwrite the corresponding workspace variables.
14 | % e.g. if in hsvargplvm_init.m you see a field "fixInducing", you can
15 | % overwrite this field by just calling this demo as:
16 | % >> fixInducing = true; demHsvargplvmClassification
17 | % 3. If the field you're looking for is not in hsvargplvm_init.m, then
18 | % check (in this order) svargplvm_init.m and vargplvm_init.m, again
19 | % overwritting the configuration by specifying the variable to exist, as
20 | % above.
21 | %
22 | % demHsvargplvmRegression.m
23 | %
24 | % COPYRIGHT: Andreas Damianou, 2014, 2015
25 |
26 | %% -- Loading the data. Replace this with your own data if you want
27 | [Y, lbls] = vargplvmLoadData('oil100');
28 | %-- Sort data according to labels (without loss of generality
29 | labels = transformLabels(lbls);
30 | [~,ss] = sort(labels);
31 | Y = Y(ss,:);
32 | lbls = lbls(ss,:);
33 | labels = transformLabels(lbls);
34 | %-------
35 |
36 | % GEt a subset of the data for training, one for test
37 | perm = randperm(size(Y,1));
38 | indTr = perm(1:50);
39 | indTs = perm(51:100);
40 |
41 | inpX = Y(indTr,:);
42 | Ytr{1} = lbls(indTr,:);
43 |
44 | inpXtest = Y(indTs,:);
45 | labelsTest = transformLabels(lbls(indTs,:))';
46 | Ytest{1} = lbls(indTs,:);
47 |
48 | %% ---- Configuring the DEEP GP ---
49 |
50 | % For the kernel that models the input data, we are not constrained to use
51 | % tractable for the Psi statistics... we can use whatever kernel!
52 | dynamicKern = {'rbf','white','bias'};
53 | % Number of iterations performed for initialising the variational
54 | % distribution, and number of training iterations
55 | initVardistIters = 100; itNo = [50 50];
56 | H=2; % Number of layers
57 | initSNR = 100; % Initial Signal To Noise ration per layer
58 | K=25; % Number of inducing points to use
59 | Q=5; % Dimensionality of latent space (can potentially be different per layer)
60 |
61 | % Since both Y and initX are observed, how are we supposed to initialise
62 | % the latent spaces? One option is to do stacked PCA, but in the case where
63 | % the rich information is in the inputs we do that in the inputs, instead
64 | % of the standard initialisation from the outputs.
65 | initX={};
66 | curX = inpX;
67 | for h=H:-1:1
68 | if isscalar(Q), curQ = Q; else curQ = Q{h}; end
69 | curX = ppcaEmbed(curX, curQ);
70 | initX{h} = curX;
71 | end
72 |
73 | %% --- RUN THE ACTUAL DEMO
74 | demHsvargplvmRegression
75 |
76 | %% --- INSPECTION: Plot the latent space with colors showing the class
77 | % labels
78 | layer = 2; % Change this to plot another layer
79 | %hsvargplvmPlotX(model, layer, [],[], [], [], transformLabels(Ytr{1})');
80 | modelP = model.layer{layer}.comp{1};
81 | modelP.vardist = model.layer{layer}.vardist;
82 | modelP.X = modelP.vardist.means;
83 | if layer ~= 1
84 | modelP.y = model.layer{layer-1}.vardist.means;
85 | end
86 | modelP = vargplvmReduceModel(modelP,2);
87 | lvmScatterPlot(modelP, Ytr{1});
88 | %% --- PREDICTIONS for test data0
89 | [Testmeans Testcovars] = vargplvmPredictPoint(model.layer{end}.dynamics, inpXtest);
90 | [mu, varsigma] = hsvargplvmPosteriorMeanVarSimple(model, Testmeans, Testcovars);
91 |
92 | [~,ind]=max(mu');
93 | errors=0;
94 | for i=1:size( Ytest{1},1)
95 | errors = errors + (ind(i) ~= labelsTest(i));
96 | end
97 |
98 | threshold = 0.5;
99 | mu(mu>0.5) = 1;
100 | mu(mu<=0.5) = 0;
101 | figure
102 | % Errors made
103 | imagesc(abs(mu - Ytest{1})); colorbar; title('Predictions differences');
104 | Nstar = size(Ytest{1},1);
105 |
106 | fprintf('# Missclassified: %d out of %d (%.2f%% accuracy).\n', errors, Nstar, 100-errors*100/Nstar)
107 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmPosteriorMeanVar.m:
--------------------------------------------------------------------------------
1 | % This function is used for predicting in the hierarchical model.
2 | % All intermediate mu and sigma should be returned, (TODO!!!) or
3 | % the user can give extra arguments to define which outputs to get.
4 |
5 | % ARG model: the hsvargplvm model
6 | % ARG X: the test latent points of the TOP layer (parent)
7 | % ARG varX: the variance associated with X. Can be left empty ([])
8 | % ARG lInp: the layer we predict FROM, in the most general case the parent
9 | % ARG lOut: the layer we predict at, ie the prediction is propagated from
10 | % the lInp layer down to this layer.
11 | % ARG ind: If there are multiple output nodes in layer "layer", we predict
12 | % for the node(s) idexed with "ind", ie ind is a vector.
13 |
14 | function [mu, varsigma] = hsvargplvmPosteriorMeanVar(model, X, varX, lInp, lOut, ind)
15 |
16 | if nargin < 5 || isempty(lOut)
17 | lOut = 1;
18 | end
19 |
20 | if nargin < 4 || isempty(lInp)
21 | lInp = model.H;
22 | end
23 |
24 | % -1 means all
25 | if nargin > 5 && ~isempty(ind) && ind == -1
26 | ind = 1:model.layer{lOut}.M;
27 | elseif nargin < 6 || isempty(ind)
28 | ind = model.layer{lOut}.M;
29 | end
30 |
31 | if model.layer{lInp}.q ~= size(X,2)
32 | error('Latent position given has the wrong dimensions!')
33 | end
34 | if nargin < 3
35 | varX = [];
36 | end
37 |
38 | Xcur = X;
39 | varXcur = varX;
40 | for h=lInp:-1:lOut
41 | if h == lOut % This if-else is not really needed...
42 | muPart = []; varxPart = [];
43 | for m=ind
44 | % If we reach the layer that we actually want to predict at, then
45 | % this is our final prediction.
46 | if isempty(varXcur)
47 | if nargout > 1
48 | [mu, varsigma] = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur);
49 | varxPart = [varxPart varX];
50 | else
51 | mu = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur);
52 | end
53 | muPart = [muPart mu];
54 | else
55 | if nargout > 1
56 | [mu,varsigma] = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur, varXcur);
57 | varxPart = [varxPart varX];
58 | else
59 | mu = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur, varXcur);
60 | end
61 | muPart = [muPart mu];
62 | end
63 | end
64 | mu = muPart;
65 | if nargout > 1
66 | varX = varxPart;
67 | end
68 | else
69 | % If this is just an intermediate node until the layer we want to
70 | % reach at, then we have to go through all the submodels in the
71 | % current layer, predict in each one of them and join the
72 | % predictions into a single output which now becomes the input for
73 | % the next level.
74 | muPart = []; varxPart = [];
75 | for m=1:model.layer{h}.M
76 | if isempty(varXcur)
77 | if nargout > 1
78 | [mu, varx] = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur);
79 | varxPart = [varxPart varX];
80 | else
81 | mu = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{m}, Xcur);
82 | end
83 | muPart = [muPart mu];
84 | else
85 | if nargout > 1
86 | [mu, varX] = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{ind}, Xcur, varXcur);
87 | varxPart = [varxPart varX];
88 | else
89 | mu = vargplvmPosteriorMeanVarHier(model.layer{h}.comp{ind}, Xcur, varXcur);
90 | end
91 | muPart = [muPart mu];
92 | end
93 | end
94 | Xcur = muPart;
95 | varXcur = varxPart;
96 | end
97 | end
98 |
99 |
100 | % Some dimensions might be not learned but nevertheless required as an
101 | % output so that the dimensions are right (e.g. hsvargplvmClassVisualise).
102 | % In that case, just pad these dims. with zeros
103 | if isfield(model, 'zeroPadding') && ~isempty(model.zeroPadding)
104 | muNew = zeros(size(mu,1), length(model.zeroPadding.rem)+length(model.zeroPadding.keep));
105 | muNew(:, model.zeroPadding.keep) = mu;
106 | mu = muNew;
107 | end
108 |
109 |
110 |
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmExtractParam.m:
--------------------------------------------------------------------------------
1 | function [params, names] = hsvargplvmExtractParam(model)
2 |
3 | % HSVARGPLVMEXTRACTPARAM Extract a parameter vector from a hierarchical Var-GP-LVM model.
4 | % FORMAT
5 | % DESC extracts a parameter vector from a given SVARGPLVM model.
6 | % ARG model : svargplvm model from which to extract parameters
7 | % RETURN params : model parameter vector
8 | %
9 | %
10 | % We assume a graphical model:
11 | %
12 | % X_H -> X_{H-1} -> ......... -> X_{1} -> {Y{1}, Y{2}, ..., Y{M} }
13 | % |_______| |_____| |____| |______________|
14 | % layer{H} layer{H-1} layer{2} layer{1}
15 | %
16 | % Parameters are extracted as a vector in the following order (left to right)
17 | % Imagine a stack where get extracts from the BOTTOM (not the top)
18 | % - parameter | size :
19 | %
20 | % for h=1:H % h=1 is the very lower layer in the graphical model
21 | % get: varmeans_{q(X_h)}(:)' --> N x Q_h
22 | % get: varcovs_{q(X_h}(:)' --> N x Q_h
23 | % for i=1:M (for the M of the current layer h)
24 | % get: X_u(:)'_{h,i} % inducing points --> K_{h,i} x Q_{h}
25 | % get: \theta_{h,i} % kernel hyperparameters --> ...
26 | % get: beta_{h,i} ---> 1
27 | % end
28 | % end
29 | %
30 | % If there is a non-(standard normal) prior on the parent node, then we
31 | % have "dynamics", ie reparametrized variational means/variances plus
32 | % hyperparameters of the "dynamics" kernel:
33 | %
34 | % for h=1:H-1 % h=1 is the very lower layer in the graphical model
35 | % get: varmeans_{q(X_h)}(:)' --> N x Q_h
36 | % get: varcovs_{q(X_h}(:)' --> N x Q_h
37 | % for i=1:M (for the M of the current layer h)
38 | % get: X_u(:)'_{h,i} % inducing points --> K_{h,i} x Q_{h}
39 | % get: \theta_{h,i} % kernel hyperparameters --> ...
40 | % get: beta_{h,i} ---> 1
41 | % end
42 | % end
43 | % get: mubar_{q(X_H)}(:)'
44 | % get: lambda_{q(X_H)}(:)'
45 | % get: \theta_x{H}
46 | % for i=1:M (for the M of the parent layer h)
47 | % get: X_u(:)'_{h,i} % inducing points --> K_{H,i} x Q_{H}
48 | % get: \theta_{h,i} % kernel hyperparameters --> ...
49 | % get: beta_{H,i} ---> 1
50 | % end
51 | %
52 | % SEEALSO : svargplvmCreate, svargplvmExpandParam, modelExtractParam
53 | %
54 | % COPYRIGHT : Andreas C. Damianou, 2011
55 |
56 | % SVARGPLVM
57 |
58 | if nargout > 1
59 | returnNames = true;
60 | else
61 | returnNames = false;
62 | end
63 |
64 | params = [];
65 | names = {};
66 |
67 | for h=1:model.H
68 | % Variational distribution
69 | if h==model.H & isfield(model.layer{h}, 'dynamics') & ~isempty(model.layer{h}.dynamics)
70 | % [VariationalParameters(reparam) dynKernelParameters]
71 | if returnNames
72 | [dynParams, dynParamNames] = modelExtractParam(model.layer{h}.dynamics);
73 | names = {names{:} dynParamNames{:}};
74 | else
75 | dynParams = modelExtractParam(model.layer{h}.dynamics);
76 | end
77 | params = [params dynParams];
78 | else
79 | if returnNames
80 | [params_i, names_i] = modelExtractParam(model.layer{h}.vardist);
81 | params = [params params_i];
82 | names = {names{:} names_i{:}};
83 | else
84 | params = [params modelExtractParam(model.layer{h}.vardist)];
85 | end
86 | end
87 | % Now extract the "private" parameters of every sub-model. This is done by
88 | % just calling vargplvmExtractParam and then ignoring the parameter indices
89 | % that are shared for all models (because we want these parameters to be
90 | % included only once).
91 | for i = 1:model.layer{h}.M
92 | if returnNames
93 | [params_i,names_i] = vargplvmExtractParamNoVardist(model.layer{h}.comp{i});
94 | else
95 | params_i = vargplvmExtractParamNoVardist(model.layer{h}.comp{i});
96 | end
97 |
98 | params = [params params_i];
99 |
100 | if returnNames
101 | names = {names{:} names_i{:}};
102 | end
103 | end
104 |
105 | end
106 |
107 |
108 |
109 | % Make fixed indices to have an asterisk
110 | if returnNames && isfield(model, 'fixParamIndices')
111 | for i = 1:length(model.fixParamIndices)
112 | j = model.fixParamIndices(i);
113 | names{j} = [names{j} '*'];
114 | end
115 | end
116 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmExpandParam.m:
--------------------------------------------------------------------------------
1 | function model = hsvargplvmExpandParam(model, params)
2 |
3 | % HSVARGPLVMEXPANDPARAM Expand a parameter vector into a hierarchical variational GP-LVM model.
4 | % FORMAT
5 | % DESC takes a HSVARGPLVM structure and a vector of parameters, and
6 | % fills the structure with the given parameters. Also performs any
7 | % necessary precomputation for likelihood and gradient
8 | % computations, so can be computationally intensive to call.
9 |
10 | startVal = 0;
11 | endVal = 0;
12 | for h=1:model.H %%% FOR EACH LAYER
13 | startVal = endVal + 1;
14 | dynUsed = false;
15 | if h==model.H & isfield(model.layer{h}, 'dynamics') & ~isempty(model.layer{h}.dynamics)
16 | dynUsed = true;
17 | endVal = endVal + model.layer{h}.dynamics.nParams;
18 | model.layer{h}.dynamics = modelExpandParam(model.layer{h}.dynamics, params(startVal:endVal));
19 | else
20 | endVal = endVal + model.layer{h}.vardist.nParams;
21 | model.layer{h}.vardist = modelExpandParam(model.layer{h}.vardist, params(startVal:endVal));
22 | end
23 |
24 | totLayerParams = 0;
25 | for m = 1:model.layer{h}.M %%% FOR EACH SUB-MODEL in the same layer
26 | endValPrev = endVal;
27 | %--- inducing inputs
28 | startVal = endVal+1;
29 | % if model.layer{h}.comp{m}.fixInducing
30 | if model.layer{h}.comp{m}.fixInducing || (isfield(model.layer{h}.comp{m}, 'learnInducing') && ~model.layer{h}.comp{m}.learnInducing)
31 | if ~(isfield(model.layer{h}.comp{m}, 'learnInducing') && ~model.layer{h}.comp{m}.learnInducing) % If this is true, don't change the values at all
32 | model.layer{h}.comp{m}.X_u = model.layer{h}.vardist.means(model.layer{h}.comp{m}.inducingIndices, :); % static
33 | % X_u values are taken from X values.
34 | % model.X_u = model.X(model.inducingIndices, :);
35 | end
36 | else
37 | % Parameters include inducing variables.
38 | endVal = endVal + model.layer{h}.q*model.layer{h}.comp{m}.k;
39 | model.layer{h}.comp{m}.X_u = reshape(params(startVal:endVal),model.layer{h}.comp{m}.k,model.layer{h}.q);
40 | end
41 |
42 | %--- kernel hyperparameters
43 | startVal = endVal+1;
44 | endVal = endVal + model.layer{h}.comp{m}.kern.nParams;
45 | model.layer{h}.comp{m}.kern = kernExpandParam(model.layer{h}.comp{m}.kern, params(startVal:endVal));
46 |
47 | %--- likelihood beta parameters
48 | if model.layer{h}.comp{m}.optimiseBeta
49 | startVal = endVal + 1;
50 | endVal = endVal + prod(size(model.layer{h}.comp{m}.beta));
51 | if ~isstruct(model.layer{h}.comp{m}.betaTransform)
52 | fhandle = str2func([model.layer{h}.comp{m}.betaTransform 'Transform']);
53 | model.layer{h}.comp{m}.beta = fhandle(params(startVal:endVal), 'atox');
54 | else
55 | if isfield(model.layer{h}.comp{m}.betaTransform,'transformsettings') && ~isempty(model.layer{h}.comp{m}.betaTransform.transformsettings)
56 | fhandle = str2func([model.layer{h}.comp{m}.betaTransform.type 'Transform']);
57 | model.layer{h}.comp{m}.beta = fhandle(params(startVal:endVal), 'atox', model.layer{h}.comp{m}.betaTransform.transformsettings);
58 | else
59 | error('vargplvmExtractParam: Invalid transform specified for beta.');
60 | end
61 | end
62 | end
63 | model.layer{h}.comp{m}.nParams = endVal - endValPrev;
64 | totLayerParams = totLayerParams + model.layer{h}.comp{m}.nParams;
65 | end
66 | if dynUsed
67 | model.layer{h}.nParams = totLayerParams + model.layer{h}.dynamics.nParams;
68 | else
69 | model.layer{h}.nParams = totLayerParams + model.layer{h}.vardist.nParams;
70 | end
71 | end
72 | model.nParams = endVal;
73 |
74 | % Force kernel computations etc
75 | model = hsvargplvmUpdateStats(model);
76 |
77 | % Will check SNR in each iteration, to make sure it's OK
78 | if isfield(model, 'checkSNR') && model.checkSNR > 0
79 | snr=hsvargplvmShowSNR(model,[],0);
80 | for h=1:model.H
81 | [wr,errs] = svargplvmCheckSNR(num2cell(snr{h}),[],[],0,0);
82 | for m=1:model.layer{h}.M
83 | if ~isempty(wr) || ~isempty(errs)
84 | fprintf('# WARNING! Low SNR (%.2f) in layer/modality: %d/%d\n',snr{h}(m),h,m)
85 | end
86 | end
87 | end
88 | end
89 |
--------------------------------------------------------------------------------
/deepGP/matlab/demToyHsvargplvm1.m:
--------------------------------------------------------------------------------
1 | % DEMTOYHSVARGPLVM1 A simple script to run deep GPs on simple hierarchical toy data for
2 | % unsupervised learning.
3 | %
4 | % DESC A simple script to run deep GPs on simple hierarchical toy data for
5 | % unsupervised learning.
6 | %
7 | % COPYRIGHT: Andreas C. Damianou, 2013
8 | %
9 | % SEE ALSO: demMultvargplvmStackToy1.m, demToyRegression.,
10 | %
11 | % DEEPGP
12 |
13 | % Fix seeds
14 | randn('seed', 1e5);
15 | rand('seed', 1e5);
16 |
17 | % Whatever configuration variables are not set, are taken here a default
18 | % value
19 | if ~exist('experimentNo'), experimentNo = 404; end
20 | if ~exist('initial_X'), initial_X = 'separately'; end
21 | if ~exist('baseKern'), baseKern = {'linard2','white','bias'}; end
22 | if ~exist('itNo'), itNo = 500; end
23 | if ~exist('initVardistIters'), initVardistIters = []; end
24 | if ~exist('multVargplvm'), multVargplvm = false; end
25 |
26 | % That's for the ToyData2 function:
27 | if ~exist('toyType'), toyType = ''; end % Other options: 'fols','gps'
28 | if ~exist('hierSignalStrength'), hierSignalStrength = 1; end
29 | if ~exist('noiseLevel'), noiseLevel = 0.05; end
30 | if ~exist('numHierDims'), numHierDims = 1; end
31 | if ~exist('numSharedDims'), numSharedDims = 5; end
32 | if ~exist('Dtoy'), Dtoy = 10; end
33 | if ~exist('Ntoy'), Ntoy = 100; end
34 |
35 | hsvargplvm_init;
36 |
37 | if exist('Yall')
38 | Ytr = Yall;
39 | else
40 | [Ytr, dataSetNames, Z] = hsvargplvmCreateToyData2(toyType,Ntoy,Dtoy,numSharedDims,numHierDims, noiseLevel,hierSignalStrength);
41 | end
42 |
43 | globalOpt.dataSetName = ['toy_' toyType];
44 |
45 | % This code allows for having multipled modalities (conditional
46 | % independencies) in the layers.
47 | % Skip this if you want multOutput only in 2nd layer
48 | % If this option is active, then instead of having one modality for each
49 | % signal, we'll have one modality per dimension of the concatenated signal
50 | if globalOpt.multOutput
51 | fprintf('### Mult - hsvargplvm!! \n ###')
52 | initial_X = 'concatenated';
53 | Ynew=[];
54 | for i=1:length(Ytr)
55 | Ynew = [Ynew Ytr{i}];
56 | end
57 | clear Ytr
58 | for d=1:size(Ynew,2)
59 | Ytr{d} = Ynew(:,d);
60 | end
61 | clear Ynew
62 | end
63 | %%
64 | options = hsvargplvmOptions(globalOpt);
65 | options.optimiser = 'scg2';
66 |
67 | %--- Here we have the option of using Bayesian GPLVM or GPLVM for
68 | % initialising the latent spaces. If this is the case, train the
69 | % corresponding models
70 | optionsAll = hsvargplvmCreateOptions(Ytr, options, globalOpt);
71 | initXOptions = cell(1, options.H);
72 | for h=1:options.H
73 | if strcmp(optionsAll.initX, 'vargplvm') | strcmp(optionsAll.initX, 'fgplvm')
74 | initXOptions{h}{1} = optionsAll;
75 | % DOn't allow the D >> N trick for layers > 1
76 | if h~=1
77 | if isfield(initXOptions{h}{1}, 'enableDgtN')
78 | initXOptions{h}{1}.enableDgtN = false;
79 | end
80 | end
81 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
82 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
83 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
84 | initXOptions{h}{1}.initX = 'ppca';
85 | initXOptions{h}{1}.initSNR = 90;
86 | initXOptions{h}{1}.numActive = 50;
87 | initXOptions{h}{2} = 160;
88 | initXOptions{h}{3} = 30;
89 | if exist('stackedInitVardistIters'), initXOptions{h}{2} = stackedInitVardistIters; end
90 | if exist('stackedInitIters'), initXOptions{h}{3} = stackedInitIters; end
91 | if exist('stackedInitSNR'), initXOptions{h}{1}.initSNR = stackedInitSNR; end
92 | if exist('stackedInitK'), initXOptions{h}{1}.numActive = stackedInitK; end
93 | else
94 | initXOptions{h} = {};
95 | end
96 | end
97 | %---
98 |
99 | % Create a deepGP model, parametrized by its local options, global options
100 | % and options that say how to initialise the latent spaces X
101 | model = hsvargplvmModelCreate(Ytr, options, globalOpt, initXOptions);
102 | params = hsvargplvmExtractParam(model);
103 | model = hsvargplvmExpandParam(model, params);
104 |
105 | %% Optimise deep GP model
106 | model.globalOpt = globalOpt;
107 | [model,modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
108 |
109 | % Uncomment if you decide to train for more iterations later...
110 | %modelOld = model;
111 | %model = hsvargplvmOptimiseModel(model, true, true, [], {0, [1000 1000 1000]});
112 |
--------------------------------------------------------------------------------
/deepGP/matlab/demStepFunction.m:
--------------------------------------------------------------------------------
1 | % DEMTOYHSVARGPLVM1 A simple script to run deep GPs on simple hierarchical toy data for
2 | % unsupervised learning.
3 | %
4 | % DESC A simple script to run deep GPs on simple hierarchical toy data for
5 | % unsupervised learning.
6 | %
7 | % COPYRIGHT: Andreas C. Damianou, 2013
8 | %
9 | % SEE ALSO: demMultvargplvmStackToy1.m, demToyRegression.,
10 | %
11 | % DEEPGP
12 |
13 |
14 | %%
15 |
16 | % Fix seeds
17 | randn('seed', 1e5);
18 | rand('seed', 1e5);
19 |
20 |
21 | %-------------------
22 | experimentNo = 1;
23 | toyType = 'step';
24 | baseKern='rbfardjit'; % The mapping kernel between the layers
25 | Q = {1, 1, 1}; % Dimensionality of the latent space in each layer
26 | H = 3;
27 | initSNR = {100, 100, 100}; % Initial Signal to Noise ration per layer
28 |
29 | initX = 'ppca';
30 | % initX = 'vargplvm';
31 | %initX = 'fgplvm';
32 |
33 | % %- options for the BayesianGPLVM used to initialise the variational means
34 | stackedInitIters = 150;
35 | stackedInitVardistIters = 100;
36 | stackedInitSNR = 100;
37 |
38 |
39 | initVardistIters = 200;
40 | itNo = 1000;
41 | N = 40;
42 | K = 15;
43 |
44 | %_-------------------------
45 |
46 | % Whatever configuration variables are not set, are taken here a default
47 | % value
48 | if ~exist('experimentNo'), experimentNo = 404; end
49 | if ~exist('initial_X'), initial_X = 'concatenated'; end
50 | if ~exist('baseKern'), baseKern = {'linard2','white','bias'}; end
51 | if ~exist('itNo'), itNo = 100; end
52 | if ~exist('initVardistIters'), initVardistIters = []; end
53 | if ~exist('multVargplvm'), multVargplvm = false; end
54 | if ~exist('dynamicsConstrainType'), dynamicsConstrainType = {'time'}; end
55 |
56 | hsvargplvm_init;
57 | % Automatically calibrate initial variational covariances
58 | globalOpt.vardistCovarsMult = [];
59 |
60 | a = -1; b = 0;
61 | inpX = a + (b-a).*rand(floor(N/2),1);
62 | Ytr = zeros(floor(N/2), 1) + 0.005.*randn(floor(N/2),1);
63 | a = 0.0000001; b = 1;
64 | inpX = [inpX; a + (b-a).*rand(floor(N/2),1)];
65 | Ytr = [Ytr; ones(floor(N/2), 1) + 0.005.*randn(floor(N/2),1)];
66 |
67 | globalOpt.dataSetName = toyType;
68 |
69 | [options, optionsDyn] = hsvargplvmOptions(globalOpt, inpX);
70 |
71 |
72 | % ---- Potential special initialisations for X -----
73 | if ~iscell(globalOpt.initX) && strcmp(globalOpt.initX, 'inputs')
74 | options = rmfield(options, 'initX');
75 | for i=1:options.H
76 | options.initX{i} = inpX;
77 | end
78 | optionsDyn.initX = inpX;
79 | globalOpt.initX = options.initX;
80 | end
81 |
82 |
83 |
84 | options.optimiser = 'scg2';
85 |
86 | % Just rewrite all options into a struct of cells
87 | optionsAll = hsvargplvmCreateOptions(Ytr, options, globalOpt);
88 | % Don't mind the following for loop... it just gives the extra possibility
89 | % of initialising the latent space with Bayesian GPLVM or GPLVM (see
90 | % hsvargplvm_init on how to activate this).
91 | initXOptions = cell(1, options.H);
92 | for h=1:options.H
93 | if strcmp(optionsAll.initX, 'vargplvm') | strcmp(optionsAll.initX, 'fgplvm')
94 | initXOptions{h}{1} = optionsAll;
95 | % DOn't allow the D >> N trick for layers > 1
96 | if h~=1
97 | if isfield(initXOptions{h}{1}, 'enableDgtN')
98 | initXOptions{h}{1}.enableDgtN = false;
99 | end
100 | end
101 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
102 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
103 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
104 | initXOptions{h}{1}.initX = 'ppca';
105 | initXOptions{h}{1}.initSNR = 90;
106 | initXOptions{h}{1}.numActive = 50;
107 | initXOptions{h}{2} = 160;
108 | initXOptions{h}{3} = 30;
109 | if exist('stackedInitVardistIters'), initXOptions{h}{2} = stackedInitVardistIters; end
110 | if exist('stackedInitIters'), initXOptions{h}{3} = stackedInitIters; end
111 | if exist('stackedInitSNR'), initXOptions{h}{1}.initSNR = stackedInitSNR; end
112 | if exist('stackedInitK'), initXOptions{h}{1}.numActive = stackedInitK; end
113 | else
114 | initXOptions{h} = {};
115 | end
116 | end
117 | %---
118 |
119 | % Create the deep GP based on the model options, global options
120 | % (configuration) and options for initialising the latent spaces X
121 | model = hsvargplvmModelCreate(Ytr, options, globalOpt, initXOptions);
122 |
123 | % Since we do regression, we need to add a GP on the parent node. This GP
124 | % couples the inputs and is parametrised by options in a struct "optionsDyn".
125 | model = hsvargplvmAddParentPrior(model, globalOpt, optionsDyn);
126 |
127 |
128 | params = hsvargplvmExtractParam(model);
129 | model = hsvargplvmExpandParam(model, params);
130 | model.globalOpt = globalOpt;
131 |
132 | fprintf('# Scales after init. latent space:\n')
133 | hsvargplvmShowScales(model,false);
134 | %%
135 | [model,modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
136 |
--------------------------------------------------------------------------------
/deepGP/matlab/demHighFive1.m:
--------------------------------------------------------------------------------
1 |
2 | % DEMHIGHFIVE1 Demonstration of hierarchical GP-LVM on walking and running data.
3 | %
4 | % Description:
5 | %
6 |
7 | % Copyright (c) 2007 Neil D. Lawrence
8 | % demHighFive1.m version 1.1
9 |
10 | if exist('diaryFile')
11 | diary(diaryFile)
12 | end
13 |
14 | % Fix seeds
15 | randn('seed', 1e5);
16 | rand('seed', 1e5);
17 |
18 | hsvargplvm_init;
19 |
20 |
21 |
22 | dataSetName = 'highFive';
23 | capName = dataSetName;
24 | capName(1) = upper(capName(1));
25 | experimentNo = 1;
26 | dirSep = filesep;
27 | baseDir = datasetsDirectory;
28 |
29 |
30 | %--- Load data
31 | try
32 | load([baseDir 'dem' dataSetName]);
33 | catch
34 | [void, errid] = lasterr;
35 | if strcmp(errid, 'MATLAB:load:couldNotReadFile');
36 | skelA = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20.asf']);
37 | [YA, skelA] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '20' dirSep '20_11.amc'], skelA);
38 | seqInd = [50:4:113 114:155 156:4:size(YA, 1)];
39 | YA = YA(seqInd, :);
40 | % YA(:, [4:end]) = asind(sind(YA(:, [4:end])));
41 | skelB = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21.asf']);
42 | [YB, skelB] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '21' dirSep '21_11.amc'], skelB);
43 | YB = YB(seqInd, :);
44 | % YB(:, [4:end]) = asind(sind(YB(:, [4:end])));
45 | save([baseDir 'dem' dataSetName], 'YA', 'YB', 'skelA', 'skelB', ...
46 | 'seqInd');
47 | else
48 | error(lasterr);
49 | end
50 | end
51 |
52 | Yall{1} = YA;
53 | Yall{2} = YB;
54 | clear('YA','YB');
55 |
56 | %-- Set up model
57 | numberOfDatasets = length(Yall);
58 | globalOpt.indPoints = min(globalOpt.indPoints, size(Yall{1},1));
59 |
60 | %-- Load datasets
61 | for i=1:numberOfDatasets
62 | Y = Yall{i};
63 | dims{i} = size(Y,2);
64 | N{i} = size(Y,1);
65 | indTr = globalOpt.indTr;
66 | if indTr == -1
67 | indTr = 1:N{i};
68 | end
69 | if ~exist('Yts')
70 | indTs = setdiff(1:size(Y,1), indTr);
71 | Yts{i} = Y(indTs,:);
72 | end
73 | Ytr{i} = Y(indTr,:);
74 |
75 | t{i} = linspace(0, 2*pi, size(Y, 1)+1)'; t{i} = t{i}(1:end-1, 1);
76 | timeStampsTraining{i} = t{i}(indTr,1); %timeStampsTest = t(indTs,1);
77 | end
78 |
79 | for i=2:numberOfDatasets
80 | if N{i} ~= N{i-1}
81 | error('The number of observations in each dataset must be the same!');
82 | end
83 | end
84 |
85 |
86 |
87 | clear('Y')
88 |
89 |
90 | %%--- Optimise %%--------- TO FIX (from this point and below) (this is now taken from svargplvm)
91 | options = svargplvmOptions(Ytr, globalOpt, labelsTrain);
92 |
93 |
94 |
95 | if ~isempty(globalOpt.dynamicsConstrainType)
96 | for i=1:numberOfDatasets
97 | % Set up dynamcis (i.e. back-constraints) model
98 | optionsDyn{i}.type = 'vargpTime';
99 | optionsDyn{i}.inverseWidth=30;
100 | % optionsDyn.vardistCovars = vardistCovarsMult;
101 | optionsDyn{i}.initX = globalOpt.initX;
102 | optionsDyn{i}.constrainType = globalOpt.dynamicsConstrainType;
103 |
104 | if exist('timeStampsTraining')
105 | optionsDyn{i}.t = timeStampsTraining;
106 | end
107 | if exist('labelsTrain') && ~isempty(labelsTrain)
108 | optionsDyn{i}.labels = labelsTrain;
109 | end
110 | end
111 | else
112 | optionsDyn= [];
113 | end
114 |
115 |
116 |
117 |
118 | model = svargplvmModelCreate(Ytr, globalOpt, options, optionsDyn);
119 | if exist('diaryFile')
120 | model.diaryFile = diaryFile;
121 | end
122 |
123 | model.globalOpt = globalOpt;
124 | model.options = options;
125 |
126 | %%%% TEMP
127 | if exist('whiteVar')
128 | model.dynamics.kern.comp{2}.variance = whiteVar;
129 | end
130 | %%%%
131 |
132 | % Force kernel computations
133 | params = svargplvmExtractParam(model);
134 | model = svargplvmExpandParam(model, params);
135 |
136 | %%
137 | %fprintf('# Median of vardist. covars: %d \n',median(median(model.vardist.covars)));
138 | %fprintf('# Min of vardist. covars: %d \n',min(min(model.vardist.covars)));
139 | %fprintf('# Max of vardist. covars: %d \n',max(max(model.vardist.covars)));
140 |
141 | if displayIters
142 | model = svargplvmOptimiseModel(model);
143 | else
144 | model = svargplvmOptimiseModelNoDisplay(model);
145 | end
146 |
147 | %--------
148 |
149 |
150 |
151 |
152 | %%
153 | colordef white
154 | ax = hgplvmHierarchicalVisualise(model, visualiseNodes, [], [0.03 ...
155 | 0.5 0.03 0.03])
156 | tar = get(ax, 'cameratarget');
157 | pos = get(ax, 'cameraposition');
158 | newPos = tar + (rotationMatrix(0, -pi/8, 3*pi/2)*(pos - tar)')';
159 | set(ax, 'cameraposition', newPos)
160 | set(ax, 'xlim', [-20 25]);
161 | set(ax, 'ylim', [-15 8])
162 | set(ax, 'visible', 'off')
163 |
164 |
165 |
166 |
--------------------------------------------------------------------------------
/deepGP/matlab/demHsvargplvmHighFive1.m:
--------------------------------------------------------------------------------
1 | %{
2 | clear; experimentNo=1; baseKern = 'rbfardjit'; initial_X = 'separately'; tic; demHsvargplvmHighFive1; toc
3 | %}
4 |
5 | % Fix seeds
6 | randn('seed', 1e5);
7 | rand('seed', 1e5);
8 |
9 |
10 | if ~exist('experimentNo'), experimentNo = 404; end
11 | if ~exist('baseKern'), baseKern = 'rbfardjit'; end %baseKern = {'linard2','white','bias'};
12 | if ~exist('initial_X'), initial_X = 'separately'; end
13 |
14 | dataSetName = 'highFive';
15 |
16 |
17 | hsvargplvm_init;
18 |
19 | % ------- LOAD DATASET
20 | YA = vargplvmLoadData('hierarchical/demHighFiveHgplvm1',[],[],'YA');
21 | YB = vargplvmLoadData('hierarchical/demHighFiveHgplvm1',[],[],'YB');
22 |
23 |
24 |
25 | if globalOpt.multOutput
26 | mergeData = 'horConcat';
27 | %if ~exist('mergeData') || ~mergeData
28 | % warning('Mult.Output selected but operating on separate datasets..!')
29 | %end
30 | if ~globalOpt.enableParallelism
31 | warning('Mult. Output option selected but without parallelism.!')
32 | end
33 | %------- REMOVE dims with very small var (then when sampling outputs, we
34 | % can replace these dimensions with the mean)
35 | vA = find(var(YA) < 1e-7);
36 | meanRedundantDimA = mean(YA(:, vA));
37 | dmsA = setdiff(1:size(YA,2), vA);
38 | YA = YA(:,dmsA);
39 |
40 | vB = find(var(YB) < 1e-7);
41 | meanRedundantDimB = mean(YB(:, vB));
42 | dmsB = setdiff(1:size(YB,2), vB);
43 | YB = YB(:,dmsB);
44 | %---
45 | end
46 |
47 | Yall{1} = YA; Yall{2} = YB;
48 |
49 | if exist('mergeData')
50 | switch mergeData
51 | % Horizontally concatenated the two motions and present them as a
52 | % single dataset which will have multiple outputs
53 | case 'horConcat'
54 | Ynew = [Yall{1} Yall{2}];
55 | % Subsample
56 | if exist('subsample') && subsample
57 | Ynew = Ynew(1:2:end,:); %%%%%%%%%%
58 | end
59 | clear Yall;
60 | for d=1:length(Ynew)
61 | Yall{d} = Ynew(:,d);
62 | end
63 | end
64 | end
65 |
66 | %%
67 |
68 | options = hsvargplvmOptions(globalOpt);
69 | options.optimiser = 'scg2';
70 |
71 |
72 |
73 | %--- in case vargplvmEmbed is used for init,, the latent spaces...
74 | optionsAll = hsvargplvmCreateOptions(Yall, options, globalOpt);
75 | initXOptions = cell(1, options.H);
76 | for h=1:options.H
77 | if strcmp(optionsAll.initX, 'vargplvm') | strcmp(optionsAll.initX, 'fgplvm')
78 | initXOptions{h}{1} = optionsAll;
79 | % DOn't allow the D >> N trick for layers > 1
80 | if h~=1
81 | if isfield(initXOptions{h}{1}, 'enableDgtN')
82 | initXOptions{h}{1}.enableDgtN = false;
83 | end
84 | end
85 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
86 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
87 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
88 | initXOptions{h}{1}.initX = 'ppca';
89 | initXOptions{h}{1}.initSNR = 90;
90 | initXOptions{h}{1}.numActive = 50;
91 | initXOptions{h}{2} = 160;
92 | initXOptions{h}{3} = 30;
93 | if exist('stackedInitVardistIters'), initXOptions{h}{2} = stackedInitVardistIters; end
94 | if exist('stackedInitIters'), initXOptions{h}{3} = stackedInitIters; end
95 | if exist('stackedInitSNR'), initXOptions{h}{1}.initSNR = stackedInitSNR; end
96 | if exist('stackedInitK'), initXOptions{h}{1}.numActive = stackedInitK; end
97 | else
98 | initXOptions{h} = {};
99 | end
100 | end
101 | %---
102 |
103 |
104 | model = hsvargplvmModelCreate(Yall, options, globalOpt, initXOptions);
105 |
106 |
107 | params = hsvargplvmExtractParam(model);
108 | model = hsvargplvmExpandParam(model, params);
109 | modelInit = model;
110 |
111 | %%
112 | model.globalOpt = globalOpt;
113 |
114 | [model, prunedModel, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
115 |
116 | % For more iters...
117 | %modelOld = model;
118 | %model = hsvargplvmOptimiseModel(model, true, true, [], {0, [1000]});
119 |
120 |
121 | return
122 | %%%%%%%%%%%%%%%%%%%%%%% VISUALISATION %%%%%%%%%%%%%%%%%%%%%%%%
123 | % Now call:
124 |
125 | %% %--- Scales
126 | if globalOpt.multOutput
127 | close all
128 | SNR = hsvargplvmShowSNR(model,[],false);
129 | exclDim = find(SNR{1} < 6); % Exclude from the computations the dimensions that were learned with very low SNR
130 | [clu, clu2]= hsvargplvmClusterScales(model.layer{1}, 4,[],exclDim);
131 | scales = hsvargplvmRetainedScales(model);
132 | imagesc(clu(1:56)'); title('Scales model A')
133 | cl = caxis;
134 | figure;
135 | imagesc(clu(57:end)'); caxis(cl); title('Scales model B')
136 |
137 | figure
138 | imagesc(clu2(1:56)'); cl = caxis; title('Bin. Scales model A')
139 | figure
140 | imagesc(clu2(57:end)');cl = caxis; title('Bin. Scales model B')
141 | end
142 |
143 | %% % --- Skeleton
144 | %%
145 | % Now call:
146 | %hsvargplvmShowSkel(model);
147 |
148 | %hsvargplvmShowSkel2(model); %%% BETTER
--------------------------------------------------------------------------------
/deepGP/matlab/demToyRegressionSimple.m:
--------------------------------------------------------------------------------
1 | % Regression demo with Deep GPs on toy data.
2 | % This demo is kept minimal. See tutorial.m for a more complete demo.
3 | % Andreas Damianou, 2015
4 |
5 | clear
6 |
7 | rand('seed', 2)
8 | randn('seed', 2)
9 |
10 | % ----- Configuration
11 | Ntr=25; % Number of training data
12 | K=10; % Number of inducing points
13 | Q=6; % Dimensionality of hidden (latent) layers
14 | H=2; % Number of layers
15 | dynamicKern = {'lin','white','bias'}; % The kernel to be used in the uppermost level which "sees" the inputs
16 | baseKern = 'rbfardjit'; % The kernel to be used in the intermediate levels
17 | initX = 'inputsOutputs';
18 |
19 | % This is called "dynamics" and "time" for historical reasons.. What it actually
20 | % means is that the inputs in the uppermost layer are treated as coupled.
21 | dynamicsConstrainType = {'time'};
22 |
23 | % Number of interations to perform for initialising the variational
24 | % distribution (initVardistIters) and for normal optimisation. By passing
25 | % vectors instead of single numbers we manage a sort of "annealing"
26 | % optimisation schedule, e.g. itNo = [100 100] means that after 100
27 | % optimistion steps the optimiser will restart (sometimes this helps
28 | % avoiding local optima).
29 | if ~exist('initVardistIters','var'), initVardistIters = [repmat(500,1,5)]; end
30 | if ~exist('itNo','var'), itNo = [1000 1000]; end
31 | if ~exist('initSNR','var'), initSNR = {150, 350}; end % Initial Signal To Noise ration per layer
32 |
33 | % Initialise script based on the above variables. This returns a struct
34 | % "globalOpt" which contains all configuration options
35 | hsvargplvm_init;
36 | % Automatically calibrate initial variational covariances
37 | globalOpt.vardistCovarsMult = [];
38 | globalOpt.dataSetName = 'toyRegression';
39 |
40 | % Create toy data. After the following three lines are called, we will
41 | % have the following:
42 | % inpX / Ytr: the training inputs / outputs
43 | % Xstar / Yts: the test outputs
44 | Ntoy = 150; Dtoy=15; toyType='hierGpsNEW';
45 | demToyDynamicsCreateData
46 | demToyDynamicsSplitDataset % Split into training and test set
47 |
48 | %% ----- Run a normal GP to compare
49 |
50 | fprintf('# ----- Training a fitc GP... \n')
51 | optionsGP = gpOptions('fitc');
52 | optionsGP.numActive = globalOpt.K; %size(inpX,1);
53 |
54 | modelGPfitc = gpCreate(size(inpX,2), size(Ytr{1},2), inpX, Ytr{1}, optionsGP);
55 | modelGPfitc = gpOptimise(modelGPfitc, 1, 500);
56 | [muGPfitc, varSigmaGPfitc] = gpPosteriorMeanVar(modelGPfitc, Xstar);
57 | errorGPfitc = sum(mean(abs(muGPfitc-Yts{1}),1));
58 | errorRecGPfitc = sum(mean(abs(gpPosteriorMeanVar(modelGPfitc, inpX)-Ytr{1}),1));
59 |
60 | %% ------ Now run a deep GP
61 |
62 | [options, optionsDyn] = hsvargplvmOptions(globalOpt, inpX);
63 |
64 | % Create the deep GP based on the model options, global options
65 | % (configuration) and options for initialising the latent spaces X
66 | [model, options, globalOpt, optionsDyn] = hsvargplvmModelCreate(Ytr, options, globalOpt, [], optionsDyn);
67 |
68 | % Since we do regression, we need to add a GP on the parent node. This GP
69 | % couples the inputs and is parametrised by options in a struct "optionsDyn".
70 | model = hsvargplvmAddParentPrior(model, globalOpt, optionsDyn);
71 |
72 | % This just ensures model parameters are up-to-date
73 | params = hsvargplvmExtractParam(model); model = hsvargplvmExpandParam(model, params);
74 |
75 | % Complain if SNR is too low
76 | model.throwSNRError = true; model.SNRErrorLimit = 3;
77 |
78 | % Add a prior on the beta parameter, to avoid low SNR problems.
79 | model = hsvargplvmControlSNR(model);
80 |
81 | fprintf('# Scales after init. latent space:\n')
82 | hsvargplvmShowScales(model,false);
83 |
84 | % Optimisation
85 | modelInitVardist = hsvargplvmOptimiseModel(model, 0, 0, [], {globalOpt.initVardistIters, 0});
86 | model = hsvargplvmOptimiseModel(modelInitVardist, 1, 1, [], {0, globalOpt.itNo});
87 |
88 | %% ------------ Predictions and Errors
89 |
90 | % Prediction from the deep GP
91 | [Testmeans Testcovars] = vargplvmPredictPoint(model.layer{end}.dynamics, Xstar);
92 | [mu, varsigma] = hsvargplvmPosteriorMeanVarSimple(model, Testmeans, Testcovars);
93 | errorDeepGP = sum(mean(abs(mu-Yts{1}),1));
94 | errorDeepGPNoCovars = sum(mean(abs(hsvargplvmPosteriorMeanVarSimple(model, Testmeans)-Yts{1}),1));
95 |
96 | % Mean predictor's error
97 | errorMean = sum(mean(abs(repmat(mean(Ytr{1}),size(Yts{1},1),1) - Yts{1}),1));
98 |
99 | % Linear regression's error
100 | for dd=1:size(Ytr{1},2)
101 | [p, ErrorEst] = polyfit(inpX,Ytr{1}(:,dd),2);
102 | yLinReg(:,dd)=polyval(p,Xstar);
103 | end
104 | errorLinReg = sum(mean(abs(yLinReg - Yts{1}),1));
105 |
106 | % Print all
107 | fprintf('\n\n#### ERRORS:\n')
108 | fprintf('# Error GPfitc pred : %.4f\n', errorGPfitc);
109 | fprintf('# Error DeepGP pred : %.4f / %.4f (with/without covars)\n', errorDeepGP, errorDeepGPNoCovars);
110 | fprintf('# Error Mean : %.4f\n', errorMean);
111 | fprintf('# Error LinReg : %.4f\n', errorLinReg);
112 |
--------------------------------------------------------------------------------
/deepGP/matlab/demHsvargplvmRegression.m:
--------------------------------------------------------------------------------
1 | % DEMHSVARGPLVMREGRESSION A script to run deep GP regression.
2 | %
3 | % This is a generic demo. You can replace the data used here with your own
4 | % data and run it (ie Ytr{1} has to be the observed data and inpX has to be
5 | % your observed labels).
6 | %
7 | % To configure the deepGP used here, do the following:
8 | % 1. If any of the fields you want to change appear in this demo in the
9 | % "configuring the deep gp" section, they change it directly there.
10 | % 2. If the field you're looking for is not there, then check the available
11 | % configuration options in hsvargplvm_init.m. The way this works, is that
12 | % you just need to overwrite the corresponding workspace variables.
13 | % e.g. if in hsvargplvm_init.m you see a field "fixInducing", you can
14 | % overwrite this field by just calling this demo as:
15 | % >> fixInducing = true; demHsvargplvmClassification
16 | % 3. If the field you're looking for is not in hsvargplvm_init.m, then
17 | % check (in this order) svargplvm_init.m and vargplvm_init.m, again
18 | % overwritting the configuration by specifying the variable to exist, as
19 | % above.
20 | %
21 | % SEEALSO: demHsvargplvmClassification.m
22 | %
23 | % COPYRIGHT: Andreas Damianou, 2014
24 | % DEEPGP
25 |
26 | %% ------ CONFIGURING THE DEEP GP
27 | %--- Mandatory configurations
28 | if ~exist('Ytr', 'var'), error('You need to specify your outputs in Ytr{1}=...'); end
29 | if ~exist('inpX', 'var'), error('You need to specify your inputs in inpX=...'); end
30 |
31 | %--- Optional configurations: Whatever configuration variable is not already set (ie does not exist
32 | % as a variable in the workspace) is set to a default value.
33 | if ~exist('experimentNo'), experimentNo = 404; end
34 | if ~exist('K'), K = 30; end
35 | if ~exist('Q'), Q = 6; end
36 | if ~exist('baseKern'), baseKern = 'rbfardjit'; end % {'rbfard2','white','bias'}; end
37 | % This is called "dynamics" and "time" for historical reasons.. It actually refers to a coupling GP in the uppermost level
38 | if ~exist('dynamicsConstrainType'), dynamicsConstrainType = {'time'}; end
39 | stackedOpt = [];
40 | if exist('stackedInitVardistIters', 'var'), stackedOpt.stackedInitVardistIters=stackedInitVardistIters; end
41 | if exist('stackedInitIters', 'var'), stackedOpt.stackedInitIters=stackedInitIters; end
42 | if exist('stackedInitSNR', 'var'), stackedOpt.stackedInitSNR=stackedInitSNR; end
43 | if exist('stackedInitK', 'var'), stackedOpt.stackedInitK=stackedInitK; end
44 | if ~exist('initXOptions', 'var'), initXOptions = []; end
45 |
46 | % Initialise script based on the above variables. This returns a struct
47 | % "globalOpt" which contains all configuration options
48 | hsvargplvm_init;
49 |
50 | % Automatically calibrate initial variational covariances - better to not change that
51 | globalOpt.vardistCovarsMult = [];
52 |
53 | [options, optionsDyn] = hsvargplvmOptions(globalOpt, inpX);
54 |
55 | %% ------------- Initialisation and model creation
56 | % Initialise latent spaces, unless the user already did that
57 | if ~(iscell(options.initX) && prod(size(options.initX{1})) > 1)
58 | [globalOpt, options, optionsDyn, initXOptions] = hsvargplvmRegressionInitX(globalOpt, options, optionsDyn, inpX, Ytr, stackedOpt);
59 | end
60 |
61 |
62 | % Create the deep GP based on the model options, global options
63 | % (configuration) and options for initialising the latent spaces X
64 | model = hsvargplvmModelCreate(Ytr, options, globalOpt, initXOptions);
65 |
66 | % Since we do regression, we need to add a GP on the parent node. This GP
67 | % couples the inputs and is parametrised by options in a struct "optionsDyn".
68 | model = hsvargplvmAddParentPrior(model, globalOpt, optionsDyn);
69 |
70 |
71 | %-- We have the option to not learn the inducing points and/or fix them to
72 | % the given inputs.
73 | % Learn inducing points? (that's different to fixInducing, ie tie them
74 | % to X's, if learnInducing is false they will stay in their original
75 | % values, ie they won't constitute parameters of the model).
76 | if exist('learnInducing') && ~learnInducing
77 | model = hsvargplvmPropagateField(model, 'learnInducing', false);
78 | end
79 | %--
80 |
81 | if globalOpt.fixInducing && globalOpt.fixInducing
82 | model = hsvargplvmPropagateField(model, 'fixInducing', true);
83 | for m=1:model.layer{end}.M % Not implemented yet for parent node
84 | model.layer{end}.comp{m}.fixInducing = false;
85 | end
86 | end
87 |
88 |
89 | %!!!!!!!!!!!!!!!!!!!!!!!!-----------------------
90 | if exist('DEBUG_entropy','var') && DEBUG_entropy
91 | model.DEBUG_entropy = true;for itmp=1:model.H, model.layer{itmp}.DEBUG_entropy = true; end
92 | end
93 |
94 | params = hsvargplvmExtractParam(model);
95 | model = hsvargplvmExpandParam(model, params);
96 | model.globalOpt = globalOpt;
97 | % Computations can be made in parallel, if option is activated
98 | model.parallel = globalOpt.enableParallelism;
99 |
100 | fprintf('# Scales after init. latent space:\n')
101 | hsvargplvmShowScales(model,false);
102 | %% OPTIMISATION
103 | [model,modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
104 |
105 | % If you decide to train for further iterations...
106 | % modelOld = model; [model,modelPruned, ~] = hsvargplvmOptimiseModel(model, true, true, [], {0, [100]});
107 |
108 |
109 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmFeatureClassification.m:
--------------------------------------------------------------------------------
1 | % HSVARGPLVMFEATURECLASSIFICATION Use a deepGP features to perform
2 | % discriminative classification with log. regression.
3 | %
4 | % Andreas C. Damianou, 2015
5 | % SEEALSO: vargplvmFeatureClassification.m
6 |
7 | function [LogRegError, LogRegErrorExt, LogRegErrorExtOut] = hsvargplvmFeatureClassification(model, data, options)
8 |
9 | % Train classifiers from X to labels
10 | labels = transformLabels(data.lbls)';
11 | labelsTs = transformLabels(data.lblsTs)';
12 |
13 | lOut = options.lOut;
14 | samplesPerObserved = options.samplesPerObserved;
15 | samplesPerOutput = options.samplesPerOutput;
16 | % Use only SOME dimensions as features (e.g. the ones with best ARD wight)
17 | if ~isfield(options, 'dims') || isempty(options.dims)
18 | dims = 1:model.layer{lOut}.q;
19 | else
20 | dims = options.dims;
21 | end
22 |
23 | % Populate training set
24 | %--- Take variance into account by sampling new data from the distribution
25 | % Init sizes
26 | Xnew = nan(size(model.layer{lOut}.vardist.means,1)*samplesPerObserved, size(model.layer{lOut}.vardist.means,2));
27 | labelsNew = nan(size(Xnew,1), size(labels,2));
28 | k=1;
29 | % Take samples
30 | for n=1:size(model.layer{lOut}.vardist.means,1)
31 | for kk = 1:samplesPerObserved
32 | Xnew(k,:) = model.layer{lOut}.vardist.means(n,:) + randn(size(model.layer{lOut}.vardist.means(n,:))).*sqrt(model.layer{lOut}.vardist.covars(n,:));
33 | labelsNew(k,:) = labels(n,:);
34 | k = k + 1;
35 | end
36 | end
37 | % Augment set with samples
38 | Xext = [model.layer{lOut}.vardist.means; Xnew];
39 | labelsExt = [labels; labelsNew];
40 | clear 'Xnew' 'labelsNew';
41 |
42 |
43 | % If we need to predict from top layer, we need to GP conditional to
44 | % predict q(x*) from the coupled q(X) = \prod_q q(x_q).
45 | if ~isfield(data, 'X_pred') || isempty(data.X_pred)
46 | [X_Hpred, varX_Hpred] = vargplvmPredictPoint(model.layer{end}.dynamics, data.Yts);
47 | if lOut == model.H
48 | X_pred = X_Hpred;
49 | varX_pred = varX_Hpred;
50 | else
51 | [X_pred, varX_pred] = hsvargplvmPosteriorMeanVar(model, X_Hpred, varX_Hpred, model.H, lOut+1);
52 | end
53 | else
54 | X_pred = data.X_pred;
55 | varX_pred = data.varX_pred;
56 | end
57 |
58 |
59 | % CHECK ALSO: mnrfit and mnrval
60 |
61 | % Training of logistic regression classifier. One for each label
62 | % separately.
63 | nClasses = length(unique(labels));
64 | clear 'B' 'BExt'
65 | for i=1:nClasses
66 | fprintf('\n # LogReg training for class # %d\n', i)
67 | lb = zeros(size(model.layer{lOut}.vardist.means(:,dims),1),1);
68 | lbExt = zeros(size(Xext(:,dims),1),1);
69 | lb(labels == i) = 1;
70 | lbExt(labelsExt == i) = 1;
71 | B{i} = glmfitWrapper(model.layer{lOut}.vardist.means(:,dims), lb,'binomial','logit',[],[],[],[],1000); % Logistic regression
72 | %BExt{i} = glmfit(Xext, lbExt,'binomial','logit'); % Logistic regression
73 | BExt{i} = glmfitWrapper(Xext(:,dims), lbExt,'binomial','logit',[],[],[],[],1000); % Logistic regression
74 | end
75 |
76 | svmmodel = svmtrain(labels, model.layer{options.lOut}.vardist.means(:,dims),'-q');
77 | %svmmodelExt = svmtrain(transformLabels(lbExt), model.layer{options.lOut}.vardist.means(:,dims),'-q');
78 |
79 | [~, acc,~] = svmpredict(labelsTs, X_pred(:,dims), svmmodel,'-q');
80 | %[~, accExt] = svmpredict(labelsTs',data.X_pred, svmmodelExt);
81 |
82 | % Prediction of each binary classifier
83 | Ypred_logReg = zeros(size(data.lblsTs));
84 | Ypred_logRegExtOut = zeros(size(data.lblsTs));
85 | Ypred_logRegExt = zeros(size(data.lblsTs));
86 | for i=1:nClasses
87 | for k=1:samplesPerOutput
88 | % Sample from the OUTPUT distribution, and then average predictions
89 | Xsamp = X_pred + randn(size(X_pred)).*sqrt(varX_pred);
90 | Ypred_logRegExtOut(:,i) = Ypred_logRegExtOut(:,i)+1/samplesPerOutput*glmval(BExt{i}, Xsamp(:,dims), 'logit');
91 | end
92 | Ypred_logReg(:,i) = glmval(B{i}, X_pred(:,dims), 'logit')';
93 | Ypred_logRegExt(:,i) = glmval(BExt{i}, X_pred(:,dims), 'logit')';
94 | end
95 | % Replace predictions with maximum probability (ie, make a decision)
96 | [~,ind]=max(Ypred_logReg');
97 | [~,indExt]=max(Ypred_logRegExt');
98 | [~,indExtOut]=max(Ypred_logRegExtOut');
99 | LogRegError = 0;
100 | LogRegErrorExt = 0;
101 | LogRegErrorExtOut = 0;
102 | for i=1:size(X_pred,1)
103 | LogRegError = LogRegError + (ind(i) ~= labelsTs(i));
104 | LogRegErrorExt = LogRegErrorExt + (indExt(i) ~= labelsTs(i));
105 | LogRegErrorExtOut = LogRegErrorExtOut + (indExtOut(i) ~= labelsTs(i));
106 | end
107 |
108 | N = size(X_pred,1);
109 | fprintf('\n========================== REPORT ===========================\n')
110 | fprintf('# Used features of layer : %d\n',lOut);
111 | fprintf('# Acc Reg: : %.2f%%\n', (N-LogRegError)/N * 100);
112 | fprintf('# Acc Reg: with %d samp. PerObserved : %.2f%%\n', samplesPerObserved,(N-LogRegErrorExt)/N*100);
113 | fprintf('# Acc Reg: with %d samp. PerObserved, %d samp.PerOutput : %.2f%%\n', samplesPerObserved, samplesPerOutput,(N-LogRegErrorExtOut)/N*100);
114 | fprintf('# Acc SVM: : %.2f%%\n', acc(1));
115 | %fprintf('# Acc SVM: with % samp. PerObserved : %.2f%%\n', samplesPerObserved,accExt(1));
116 | fprintf('----------------------------------------------------------------\n\n');
--------------------------------------------------------------------------------
/deepGP/matlab/loadMocapData.m:
--------------------------------------------------------------------------------
1 | function [Y,skel, channels] = loadMocapData()
2 |
3 | %YA = vargplvmLoadData('hierarchical/demHighFiveHgplvm1',[],[],'YA');
4 | %{
5 | curDir = pwd;
6 | cd ../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/02/
7 | fileNameAsf='02.asf';
8 | fileNameAmc='02_05.amc';
9 | skel = acclaimReadSkel(fileNameAsf);
10 | [channels, skel] = acclaimLoadChannels(fileNameAmc, skel);
11 | % Remove root node?
12 | channels(:, [1 3]) = zeros(size(channels, 1), 2);
13 | skelPlayData(skel, channels, 1/40);
14 | cd(curDir)
15 | %}
16 | %%
17 | clear
18 | close all
19 | [Y, lbls, Ytest, lblstest,skel] = lvmLoadData2('cmuXNoRoot', '02', {'01','05','10'});
20 | Yorig = Y;
21 | % REmove motion in xz?
22 |
23 | seq = cumsum(sum(lbls)) - [1:31];
24 | % Ywalk = Y(1:3:70,:); % Orig: Y(1:85,:);
25 | % Ypunch = Y(86:13:548,:); % Orig: Y(86:548,:);
26 | % Ywash = Y(549:20:1209,:); % Orig: Y(549:1209,:);
27 | Ywalk = Y(1:2:70,:); % Orig: Y(1:85,:);
28 | Ypunch = Y(86:13:548,:); % Orig: Y(86:548,:);
29 | Ywash = Y(830:6:1140,:); % Orig: Y(549:1209,:);
30 |
31 | [channels xyzDiffIndices] = skelGetChannels(Ywalk);
32 | Ywalk(:, xyzDiffIndices) = zeros(size(Ywalk(:, xyzDiffIndices) ));
33 | [channels xyzDiffIndices] = skelGetChannels(Ypunch);
34 | Ypunch(:, xyzDiffIndices) = zeros(size(Ypunch(:, xyzDiffIndices) ));
35 | [channels xyzDiffIndices] = skelGetChannels(Ywash);
36 | Ywash(:, xyzDiffIndices) = zeros(size(Ywash(:, xyzDiffIndices) ));
37 |
38 |
39 | Y = [Ywalk; Ywash];
40 | [channels] = skelGetChannels(Y);
41 | close; skelPlayData(skel, channels, 1/5);
42 |
43 |
44 | %%
45 | %{
46 | [channelsWalk] = skelGetChannels(Ywalk);
47 | [channelsPunch] = skelGetChannels(Ypunch);
48 | [channelsWash] = skelGetChannels(Ywash);
49 | close; skelPlayData(skel, channelsWalk, 1/20);
50 | close; skelPlayData(skel, channelsPunch, 1/20);
51 | close; skelPlayData(skel, channelsWash, 1/20);
52 | %}
53 |
54 |
55 |
56 | %{
57 | try
58 | load '../cmu13Data.mat'
59 | catch
60 | [Y, lbls] = lvmLoadData2('cmu13');
61 | seq = cumsum(sum(lbls)) - [1:31];
62 |
63 | % load data
64 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
65 | skel = acclaimReadSkel('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13.asf');
66 | % (I think) any motion of the specific subject would do here, just to get
67 | % the channels
68 | [tmpchan, skel] = acclaimLoadChannels('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13_13.amc', skel);
69 | end
70 | %}
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 | %{
79 | See cmu49 for more coherent motions
80 | try
81 | load '../cmu13Data.mat'
82 | catch
83 | [Y, lbls] = lvmLoadData2('cmu13');
84 | seq = cumsum(sum(lbls)) - [1:31];
85 |
86 | % load data
87 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
88 | skel = acclaimReadSkel('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13.asf');
89 | % (I think) any motion of the specific subject would do here, just to get
90 | % the channels
91 | [tmpchan, skel] = acclaimLoadChannels('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13_13.amc', skel);
92 | end
93 | Yjump = Y(35:90,:);
94 | Yjacks = Y(677:end,:);
95 | channels = skelGetChannels(Yjump);
96 | close; skelPlayData(skel, channels, 1/20);
97 | %}
98 |
99 | %%
100 | function createCmuData(subject, motions)
101 | baseDir = datasetsDirectory;
102 | dirSep = filesep;
103 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13');
104 | skel = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '13' dirSep '13.asf']);
105 | seq = cumsum(sum(lbls)) - [1:31];
106 | [tmpchan, skel] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '13' dirSep '13_16.amc'], skel);
107 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
108 |
109 |
110 | try
111 | load '../cmu13Data.mat'
112 | catch
113 | [Y, lbls] = lvmLoadData2('cmu13');
114 | seq = cumsum(sum(lbls)) - [1:31];
115 |
116 | % load data
117 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
118 | skel = acclaimReadSkel('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13.asf');
119 | % (I think) any motion of the specific subject would do here, just to get
120 | % the channels
121 | [tmpchan, skel] = acclaimLoadChannels('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13_13.amc', skel);
122 | end
123 |
124 |
125 |
126 |
127 |
128 |
129 | %%
130 | %{
131 | %%%%%%%%%%%%%%%%%
132 | %baseDir = datasetsDirectory;
133 | %dirSep = filesep;
134 | %[Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13');
135 | %skel = acclaimReadSkel([baseDir 'mocap' dirSep 'cmu' dirSep '13' dirSep '13.asf']);
136 | %seq = cumsum(sum(lbls)) - [1:31];
137 | %[tmpchan, skel] = acclaimLoadChannels([baseDir 'mocap' dirSep 'cmu' dirSep '13' dirSep '13_16.amc'], skel);
138 | %[Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
139 |
140 |
141 | try
142 | load '../cmu13Data.mat'
143 | catch
144 | [Y, lbls] = lvmLoadData2('cmu13');
145 | seq = cumsum(sum(lbls)) - [1:31];
146 |
147 | % load data
148 | [Y, lbls, Ytest, lblstest] = lvmLoadData2('cmu13NoRoot');
149 | skel = acclaimReadSkel('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13.asf');
150 | % (I think) any motion of the specific subject would do here, just to get
151 | % the channels
152 | [tmpchan, skel] = acclaimLoadChannels('../../../vargplvmDEPENDENCIES/DATASETS0p1371/mocap/cmu/13/13_13.amc', skel);
153 | end
154 |
155 | Y = Y(1:12:1430,:);
156 | Y = Y([1:45 72:end],:);
157 | Y = Y([1:50 70:end],:);
158 |
159 | [Ywalk, lbls, Ytest, lblstest] = lvmLoadData('cmu35gplvm');
160 | Y = [Y; Ywalk(1:12:100,:)];
161 |
162 |
163 | channels = skelGetChannels(Y);
164 |
165 |
166 |
167 | %skelPlayData(skel, channels, 1/20);
168 | %}
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmOptimiseModel.m:
--------------------------------------------------------------------------------
1 | function [model, modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, varargin)
2 | % HSVARGPLVMOPTIMIEMODEL High-level optimiser of a deep GP model.
3 | % FORMAT (SHORT): model, pruneModel, saveModel, globalOpt, {initVardistIters, itNo}
4 | % FORMAT:
5 | % ARG model: the initial model to be optimised
6 | % (the last two arguments override the globalOpt values)
7 | % varargin (all optional) can be:
8 | % ARG pruneModel: prune model before saving it, if set to true (saves
9 | % space), if also saveModel is true
10 | % ARG saveModel: whether to save the optimised model between runs
11 | % ARG globalOpt: The structure of global experiment options as configured
12 | % by hsvargplvm_init.
13 | % ARG {initVardistIters, itNo}: how many iterations to train the model for
14 | % initialising the variational distribution (first cell) and for normal
15 | % iterations. These values are normally present to globalOpt (if provided),
16 | % but if they are also given here as arguments, they overried the
17 | % aforementioned.
18 | %
19 | % RETURN: The optimised model, the pruned model and the model obtained
20 | % after initialising the variational distribution
21 | %
22 | % See also: hsvargplvmOptimise.m, hsvargplvm_init.m, tutorial.m
23 | %
24 | % COPYRIGHT: Andreas C. Damianou, 2013
25 | %
26 | % DEEPGP
27 |
28 | modelPruned = [];
29 | modelInitVardist = [];
30 | pruneModel = true;
31 | saveModel = true;
32 |
33 | if isfield(model, 'saveName')
34 | if strcmp(model.saveName, 'noSave')
35 | saveModel = false;
36 | end
37 | end
38 |
39 | if isfield(model, 'globalOpt') && ((nargin>2 && length(varargin)<3) || isempty(varargin{3}))
40 | globalOpt = model.globalOpt;
41 | else
42 | globalOpt = varargin{3};
43 | end
44 |
45 |
46 | if nargin > 2
47 | pruneModel = varargin{1};
48 | if length(varargin) > 1
49 | saveModel = varargin{2};
50 | end
51 |
52 | if length(varargin) > 3
53 | globalOpt.initVardistIters = varargin{4}{1};
54 | globalOpt.itNo = varargin{4}{2};
55 | end
56 | end
57 |
58 | if ~isfield(model, 'optim'), model.optim = []; end
59 | if ~isfield(model.optim, 'iters'), model.optim.iters=0; end
60 | if ~isfield(model.optim, 'initVardistIters'), model.optim.initVardistIters = 0; end
61 | % Number of evaluatiosn of the gradient
62 | if ~isfield(model.optim, 'gradEvaluations'), model.optim.gradEvaluations = 0; end
63 | % Number of evaluations of the objective (including line searches)
64 | if ~isfield(model.optim, 'objEvaluations'), model.optim.objEvaluations = 0; end
65 |
66 |
67 | display = 1;
68 |
69 |
70 | i=1;
71 | while ~isempty(globalOpt.initVardistIters(i:end)) || ~isempty(globalOpt.itNo(i:end))
72 | model = hsvargplvmPropagateField(model,'initVardist', false, 1:model.H); % NEW
73 | model = hsvargplvmPropagateField(model,'learnSigmaf', true, 1:model.H); % NEW
74 | % do not learn beta for few iterations for intitilization
75 | if ~isempty(globalOpt.initVardistIters(i:end)) && globalOpt.initVardistIters(i)
76 | %model.initVardist = 1; model.learnSigmaf = 0;
77 | model = hsvargplvmPropagateField(model,'initVardist', true, globalOpt.initVardistLayers);
78 | model = hsvargplvmPropagateField(model,'learnSigmaf', false, globalOpt.initVardistLayers);
79 | fprintf(1,'# Intitiliazing the variational distribution for %d iterations...\n', globalOpt.initVardistIters(i));
80 | [model, gradEvaluations, objEvaluations] = hsvargplvmOptimise(model, display, globalOpt.initVardistIters(i)); % Default: 20
81 | %SNR = hsvargplvmShowSNR(model,[2:model.H]);
82 | %hsvargplvmCheckSNR(SNR);
83 | model.optim.initVardistIters = model.optim.initVardistIters + globalOpt.initVardistIters(i);
84 | model.optim.gradEvaluations = model.optim.gradEvaluations + gradEvaluations;
85 | model.optim.objEvaluations = model.optim.objEvaluations + objEvaluations;
86 | if saveModel
87 | if pruneModel
88 | modelPruned = hsvargplvmPruneModel(model);
89 | fileName=vargplvmWriteResult(modelPruned, modelPruned.type, globalOpt.dataSetName, globalOpt.experimentNo);
90 | else
91 | fileName=vargplvmWriteResult(model, model.type, globalOpt.dataSetName, globalOpt.experimentNo);
92 | end
93 | fprintf('# Saved model %s after initialising the var. distr. for %d iterations...\n\n', fileName,globalOpt.initVardistIters(i))
94 |
95 | end
96 | modelInitVardist=model;
97 | end
98 |
99 | hsvargplvmShowScales(model, false);
100 |
101 | % Optimise the model.
102 |
103 | model.date = date;
104 | if ~isempty(globalOpt.itNo(i:end)) && globalOpt.itNo(i)
105 | model = hsvargplvmPropagateField(model,'initVardist', false, 1:model.H); % NEW
106 | model = hsvargplvmPropagateField(model,'learnSigmaf', true, 1:model.H); % NEW
107 |
108 | iters = globalOpt.itNo(i); % Default: 1000
109 | fprintf(1,'# Optimising the model for %d iterations (session %d)...\n',iters,i);
110 | [model, gradEvaluations, objEvaluations] = hsvargplvmOptimise(model, display, iters);
111 | model.optim.iters = model.optim.iters + iters;
112 | model.optim.gradEvaluations = model.optim.gradEvaluations + gradEvaluations;
113 | model.optim.objEvaluations = model.optim.objEvaluations + objEvaluations;
114 | % Save the results.
115 | if saveModel
116 | if pruneModel
117 | modelPruned = hsvargplvmPruneModel(model);
118 | fileName=vargplvmWriteResult(modelPruned, modelPruned.type, globalOpt.dataSetName, globalOpt.experimentNo);
119 | else
120 | fileName=vargplvmWriteResult(model, model.type, globalOpt.dataSetName, globalOpt.experimentNo);
121 | end
122 | fprintf(1,'# Saved model %s after doing %d iterations\n\n',fileName,iters)
123 | end
124 | end
125 | i = i+1;
126 | end
127 | model.optim.lastLogLik = modelLogLikelihood(model);
128 |
129 |
--------------------------------------------------------------------------------
/deepGP/matlab/lvmVisualiseHierarchical.m:
--------------------------------------------------------------------------------
1 | function lvmVisualiseHierarchical(model, YLbls, ...
2 | visualiseFunction, visualiseModify, showVariance, varargin)
3 |
4 | % LVMVISUALISEGENERAL Visualise the manifold.
5 | % This is a copy of lvmVisualise where the classVisualise function depends on the
6 | % model type. Additionally, there is a flag showVariance which, when set to
7 | % false, does not plot the variance of the inputs in the scatter plot,
8 | % something which saves a lot of computational time for high-dimensional
9 | % data.
10 | %
11 | % SEEALSO : lvmVisualise, lvmClassVisualise, lvmScatterPlot,
12 | % lvmScatterPlotNoVar
13 | %
14 |
15 | % MLTOOLS
16 |
17 | global visualiseInfo
18 |
19 | visualiseInfo.showVariance = showVariance;
20 |
21 | visualiseInfo.activeLayer = 1;
22 | for i = 1:model.numLayers
23 | hierarchicalScatterPlot(model.layer{i}, i, YLbls, showVariance);
24 | end
25 |
26 |
27 |
28 | % Now the data figure (skeleton, image, etc)
29 |
30 | figure(model.numLayers + 1)
31 | clf
32 |
33 | if length(visualiseFunction)>4 & strcmp(visualiseFunction(1:5), 'image') & length(varargin)>0
34 | set(gcf, 'menubar', 'none')
35 | xPixels = 115;
36 | yPixels = 115;
37 | set(gcf, 'position', [232 572 xPixels yPixels/varargin{1}(1)*varargin{1}(2)])
38 | visualiseInfo.comp{1}.visualiseAxes = subplot(1, 1, 1);
39 | xWidth = varargin{1}(1)/xPixels;
40 | yHeight = varargin{1}(2)/yPixels;
41 | set(visualiseInfo.comp{1}.visualiseAxes, 'position', [0.5-xWidth/2 0.5-yHeight/2 xWidth yHeight])
42 | else
43 | visualiseInfo.comp{1}.visualiseAxes =subplot(1, 1, 1);
44 | end
45 | visData = zeros(1,model.layer{1}.numModels);
46 | if(length(visualiseFunction)>4 & strcmp(visualiseFunction(1:5), 'image'))
47 | visData(1) = min(min(model.layer{1}.y));
48 | visData(end) = max(max(model.layer{1}.y));
49 | else
50 | [void, indMax]= max(sum((model.layer{1}.y.*model.layer{1}.y), 2));
51 | visData = model.layer{1}.y(indMax, :);
52 | end
53 |
54 | set(get(visualiseInfo.comp{1}.visualiseAxes, 'title'), 'string', 'Y', 'fontsize', 30);
55 | set(visualiseInfo.comp{1}.visualiseAxes, 'position', [0.05 0.05 0.9 0.8]);
56 |
57 | visualiseInfo.comp{1}.visualiseFunction = str2func(visualiseFunction);
58 | visHandle = visualiseInfo.comp{1}.visualiseFunction(visData, varargin{:});
59 | set(visHandle, 'erasemode', 'xor')
60 |
61 | % Pass the data to visualiseInfo
62 | % visualiseInfo.model = model;
63 | visualiseInfo.comp{1}.varargin = varargin;
64 | visualiseInfo.comp{1}.visualiseModify = str2func(visualiseModify);
65 | visualiseInfo.comp{1}.visHandle = visHandle;
66 |
67 |
68 | hold off
69 |
70 |
71 | function hierarchicalScatterPlot(model, curLayer, YLbls, showVariance)
72 | global visualiseInfo
73 |
74 |
75 | %lvmClassVisualiseFunc = [model.type 'ClassVisualise'];
76 | lvmClassVisualiseFunc = ['stackedvargplvm' 'ClassVisualise'];
77 | if ~exist(lvmClassVisualiseFunc)
78 | lvmClassVisualiseFunc = 'lvmClassVisualise';
79 | end
80 |
81 |
82 | figure(curLayer)
83 | clf
84 | visualiseInfo.comp{curLayer}.dim1 = 1;
85 | visualiseInfo.comp{curLayer}.dim2 = 2;
86 | visualiseInfo.comp{curLayer}.latentPos = zeros(1, model.q);
87 | visualiseInfo.comp{curLayer}.model = model;
88 | visualiseInfo.comp{curLayer}.lbls = YLbls;
89 | if showVariance
90 | visualiseInfo.comp{curLayer}.plotAxes = lvmScatterPlot(model, YLbls);
91 | else
92 | visualiseInfo.comp{curLayer}.plotAxes = lvmScatterPlotNoVar(model, YLbls);
93 | end
94 |
95 | if showVariance
96 | lvmSetPlot;
97 | else
98 | hierSetPlotNoVar(lvmClassVisualiseFunc, curLayer);
99 | end
100 | visualiseInfo.comp{curLayer}.latentHandle = line(0, 0, 'markersize', 20, 'color', ...
101 | [0 0 0], 'marker', '.', 'visible', ...
102 | 'on', 'erasemode', 'xor');
103 |
104 | visualiseInfo.comp{curLayer}.clicked = 0;
105 | visualiseInfo.comp{curLayer}.digitAxes = [];
106 | visualiseInfo.comp{curLayer}.digitIndex = [];
107 |
108 | visualiseInfo.comp{curLayer}.dynamicsRadio = ...
109 | uicontrol('Style', 'radiobutton', ...
110 | 'String', 'Run Dynamics', ...
111 | 'units', 'normalized', ...
112 | 'position', [0 0 0.2 0.05], ...
113 | 'Callback', [lvmClassVisualiseFunc '(''toggleDynamics'')'], ...
114 | 'value', 0);
115 |
116 | visualiseInfo.comp{curLayer}.dynamicsSlider = ...
117 | uicontrol('Style', 'slider', ...
118 | 'String', 'Time', ...
119 | 'sliderStep', [0.01, 0.1], ...
120 | 'units', 'normalized', ...
121 | 'position', [0 0.95 1 0.05], ...
122 | 'callback', [lvmClassVisualiseFunc '(''dynamicsSliderChange'')']);
123 |
124 | if ~isfield(model, 'dynamics') | isempty(model.dynamics)
125 | set(visualiseInfo.comp{curLayer}.dynamicsRadio, 'visible', 'off');
126 | set(visualiseInfo.comp{curLayer}.dynamicsSlider, 'visible', 'off');
127 | else
128 | if ~isfield(model.dynamics, 'dynamicsType')
129 | set(visualiseInfo.comp{curLayer}.dynamicsRadio, 'visible', 'on');
130 | set(visualiseInfo.comp{curLayer}.dynamicsSlider, 'visible', 'off');
131 | else
132 | switch model.dynamics.dynamicsType
133 | case 'regressive'
134 | set(visualiseInfo.comp{curLayer}.dynamicsRadio, 'visible', 'off');
135 | set(visualiseInfo.comp{curLayer}.dynamicsSlider, 'visible', 'on');
136 | set(visualiseInfo.comp{curLayer}.dynamicsSlider, 'min', min(model.dynamics.X), ...
137 | 'max', max(model.dynamics.X), ...
138 | 'value', model.dynamics.X(1))
139 | case 'auto-regressive'
140 | set(visualiseInfo.comp{curLayer}.dynamicsRadio, 'visible', 'on');
141 | set(visualiseInfo.comp{curLayer}.dynamicsSlider, 'visible', 'off');
142 | end
143 | end
144 | end
145 | visualiseInfo.comp{curLayer}.runDynamics = false;
146 |
147 | % Set the callback function
148 | set(gcf, 'WindowButtonMotionFcn', [lvmClassVisualiseFunc '(' '''move''' ',' num2str(curLayer) ')'])
149 | set(gcf, 'WindowButtonDownFcn', [lvmClassVisualiseFunc '(' '''click''' ',' num2str(curLayer) ')'])
150 |
151 |
--------------------------------------------------------------------------------
/deepGP/matlab/lvmRealtimeAudio.m:
--------------------------------------------------------------------------------
1 | % Like lvmVisualiseGeneral but for audio
2 | function lvmRealtimeAudio(model, YLbls, ...
3 | visualiseFunction, visualiseModify, showVariance, varargin)
4 |
5 | % LVMVISUALISEGENERAL Visualise the manifold.
6 | % This is a copy of lvmVisualise where the classVisualise function depends on the
7 | % model type. Additionally, there is a flag showVariance which, when set to
8 | % false, does not plot the variance of the inputs in the scatter plot,
9 | % something which saves a lot of computational time for high-dimensional
10 | % data.
11 | %
12 | % SEEALSO : lvmVisualise, lvmClassVisualise, lvmScatterPlot,
13 | % lvmScatterPlotNoVar
14 | %
15 |
16 | % MLTOOLS
17 |
18 | global visualiseInfo
19 |
20 | visualiseInfo.showVariance = showVariance;
21 |
22 | lvmClassVisualiseFunc = [model.type 'ClassVisualise'];
23 | if ~exist(lvmClassVisualiseFunc)
24 | lvmClassVisualiseFunc = 'lvmClassVisualise';
25 | end
26 |
27 | if isfield(model, 'vis') && isfield(model.vis, 'figHandle')
28 | figure(model.vis.figHandle{1});
29 | else
30 | figure(1)
31 | end
32 |
33 | clf
34 | if isfield(model, 'vis') && isfield(model.vis, 'startDim')
35 | visualiseInfo.dim1 = model.vis.startDim{1};
36 | visualiseInfo.dim2 = model.vis.startDim{2};
37 | else
38 | visualiseInfo.dim1 = 1;
39 | visualiseInfo.dim2 = 2;
40 | end
41 | if isfield(model, 'vis') && isfield(model.vis, 'startPos')
42 | visualiseInfo.latentPos = model.vis.startPos;
43 | else
44 | visualiseInfo.latentPos = zeros(1, model.q);
45 | end
46 | visualiseInfo.model = model;
47 | visualiseInfo.lbls = YLbls;
48 | if showVariance
49 | visualiseInfo.plotAxes = lvmScatterPlot(model, YLbls);
50 | else
51 | visualiseInfo.plotAxes = lvmScatterPlotNoVar(model, YLbls, [], [visualiseInfo.dim1 visualiseInfo.dim2]);
52 | end
53 |
54 | if showVariance
55 | lvmSetPlot;
56 | else
57 | lvmSetPlotNoVar(lvmClassVisualiseFunc);
58 | end
59 | visualiseInfo.latentHandle = line(visualiseInfo.latentPos(visualiseInfo.dim1), visualiseInfo.latentPos(visualiseInfo.dim2), 'markersize', 20, 'color', ...
60 | [0 0 0], 'marker', '.', 'visible', ...
61 | 'on', 'erasemode', 'xor');
62 |
63 | visualiseInfo.clicked = 0;
64 | visualiseInfo.digitAxes = [];
65 | visualiseInfo.digitIndex = [];
66 |
67 |
68 | visualiseInfo.dynamicsRadio = ...
69 | uicontrol('Style', 'radiobutton', ...
70 | 'String', 'Run Dynamics', ...
71 | 'units', 'normalized', ...
72 | 'position', [0 0 0.2 0.05], ...
73 | 'Callback', [lvmClassVisualiseFunc '(''toggleDynamics'')'], ...
74 | 'value', 0);
75 |
76 | visualiseInfo.dynamicsSlider = ...
77 | uicontrol('Style', 'slider', ...
78 | 'String', 'Time', ...
79 | 'sliderStep', [0.01, 0.1], ...
80 | 'units', 'normalized', ...
81 | 'position', [0 0.95 1 0.05], ...
82 | 'callback', [lvmClassVisualiseFunc '(''dynamicsSliderChange'')']);
83 |
84 | if ~isfield(model, 'dynamics') | isempty(model.dynamics)
85 | set(visualiseInfo.dynamicsRadio, 'visible', 'off');
86 | set(visualiseInfo.dynamicsSlider, 'visible', 'off');
87 | else
88 | if ~isfield(model.dynamics, 'dynamicsType')
89 | set(visualiseInfo.dynamicsRadio, 'visible', 'on');
90 | set(visualiseInfo.dynamicsSlider, 'visible', 'off');
91 | else
92 | switch model.dynamics.dynamicsType
93 | case 'regressive'
94 | set(visualiseInfo.dynamicsRadio, 'visible', 'off');
95 | set(visualiseInfo.dynamicsSlider, 'visible', 'on');
96 | set(visualiseInfo.dynamicsSlider, 'min', min(model.dynamics.X), ...
97 | 'max', max(model.dynamics.X), ...
98 | 'value', model.dynamics.X(1))
99 | case 'auto-regressive'
100 | set(visualiseInfo.dynamicsRadio, 'visible', 'on');
101 | set(visualiseInfo.dynamicsSlider, 'visible', 'off');
102 | end
103 | end
104 | end
105 |
106 | visualiseInfo.runDynamics = false;
107 |
108 | % Set the callback function
109 | set(gcf, 'WindowButtonMotionFcn', [lvmClassVisualiseFunc '(''move'')'])
110 | set(gcf, 'WindowButtonDownFcn', [lvmClassVisualiseFunc '(''click'')'])
111 |
112 |
113 |
114 | if isfield(model, 'vis') && isfield(model.vis, 'figHandle')
115 | figure(model.vis.figHandle{2});
116 | else
117 | figure(2)
118 | end
119 | clf
120 |
121 | %
122 | % if length(visualiseFunction)>4 & strcmp(visualiseFunction(1:5), 'image') & length(varargin)>0
123 | % set(gcf, 'menubar', 'none')
124 | % xPixels = 115;
125 | % yPixels = 115;
126 | % set(gcf, 'position', [232 572 xPixels yPixels/varargin{1}(1)*varargin{1}(2)])
127 | % visualiseInfo.visualiseAxes = subplot(1, 1, 1);
128 | % xWidth = varargin{1}(1)/xPixels;
129 | % yHeight = varargin{1}(2)/yPixels;
130 | % set(visualiseInfo.visualiseAxes, 'position', [0.5-xWidth/2 0.5-yHeight/2 xWidth yHeight])
131 | % else
132 | visualiseInfo.visualiseAxes =subplot(1, 1, 1);
133 | % end
134 |
135 |
136 | visData = zeros(1,model.d);
137 | if(length(visualiseFunction)>4 & strcmp(visualiseFunction(1:5), 'image'))
138 | visData(1) = min(min(model.y));
139 | visData(end) = max(max(model.y));
140 | else
141 | [void, indMax]= max(sum((model.y.*model.y), 2));
142 | visData = model.y(indMax, :);
143 | end
144 |
145 | set(get(visualiseInfo.visualiseAxes, 'title'), 'string', 'Y', 'fontsize',30);
146 | set(visualiseInfo.visualiseAxes, 'position', [0.05 0.05 0.9 0.8]);
147 |
148 | visualiseInfo.visualiseFunction = str2func(visualiseFunction);
149 | visHandle = visualiseInfo.visualiseFunction(visData, varargin{:});
150 | handleType = get(visHandle, 'type');
151 | if ~strcmp(handleType, 'figure')
152 | set(visHandle, 'erasemode', 'xor');
153 | end
154 | % Pass the data to visualiseInfo
155 | visualiseInfo.model = model;
156 | visualiseInfo.varargin = varargin;
157 | visualiseInfo.visualiseModify = str2func(visualiseModify);
158 | visualiseInfo.visHandle = visHandle;
159 |
160 |
161 | hold off
162 |
163 | %hsvargplvmClassVisualise('updateLatentRepresentation')
164 | %visualiseInfo.clicked = 1; %%%%
165 | %hsvargplvmClassVisualise('move')
166 | %hsvargplvmClassVisualise('click')
167 |
168 |
169 |
170 |
171 |
--------------------------------------------------------------------------------
/deepGP/matlab/hsvargplvmUpdateStats.m:
--------------------------------------------------------------------------------
1 | function model = hsvargplvmUpdateStats(model)
2 |
3 | jitter = 1e-6;
4 |
5 | for h=1:model.H
6 | if h==model.H & isfield(model.layer{h}, 'dynamics') & ~isempty(model.layer{h}.dynamics)
7 | model.layer{model.H} = vargplvmDynamicsUpdateStats(model.layer{model.H});
8 | end
9 |
10 | for m=1:model.layer{h}.M
11 | if h~=1 % Not leaf
12 | % This is an intermediate node. Its data change in every
13 | % iteration and we have to reset the scaled data to the means
14 | % of the vardist. of the previous node (future implementations
15 | % with more latent spaces can also have indices here!!)
16 | means = model.layer{h-1}.vardist.means;
17 | covars = model.layer{h-1}.vardist.covars;
18 | if ~isempty(model.layer{h}.comp{m}.latentIndices)
19 | % In this layer h the "means" ie the X of layer h-1 are
20 | % here outputs. We also have multOutput option, i.e. only
21 | % the full output space "means" will be grouped into
22 | % smaller subpsaces as defined in latentIndices.
23 | means = means(:, model.layer{h}.comp{m}.latentIndices);
24 | covars = covars(:, model.layer{h}.comp{m}.latentIndices);
25 | end
26 | if model.centerMeans
27 | [Y, bias, scale] = scaleData(means); %% ??????????????????????
28 | model.layer{h}.comp{m}.scale = scale; %%% ???????
29 | model.layer{h}.comp{m}.bias = bias; %%% ???
30 | % Probably centering the means would also change the bound,
31 | % because now the expectation is not but
32 | % <(x-bias)(x-bias)'>, so what we do here is not enough!
33 | else
34 | Y = means; %%%%%%%% ???????????
35 | end
36 | model.layer{h}.comp{m}.mOrig = Y;
37 |
38 |
39 | %!!! TODO: The following should be Y.*Y, i.e. the centered version
40 | % of the means. That would also change FURTHER the bound,
41 | % because now we will have an expectation of <(x-bias)(x-bias)'>
42 | model.layer{h}.comp{m}.TrYY = sum(sum(means.*means)) + sum(sum(covars));
43 |
44 | %%%%
45 | % This is the part that changes from the leaf nodes/vargplvm to
46 | % the intermediate nodes. Tr(YY') becomes tr()
47 | sumAll = 0;
48 | for q=1:size(means,2)
49 | sumAll = sumAll + (means(:,q)*means(:,q)'+diag(covars(:,q)));
50 | end
51 | % This term substitutes Y for the intermediate nodes. Same
52 | % trick as for the high-dimensionality problems.
53 | model.layer{h}.comp{m}.m = jitChol(sumAll)';
54 | %%%
55 | end
56 | % The following is executed for leaf and intermediate nodes. The
57 | % only difference is in the "m" field, but this is handled above and UpdateStats2
58 | % is not aware of any differences.
59 | model.layer{h}.comp{m} = vargplvmUpdateStats2(model.layer{h}.comp{m}, model.layer{h}.vardist);
60 |
61 | if h~=1
62 | % That's for the derivative of the intermediate nodes w.r.t the
63 | % latent space of the previous layers.
64 | Z = model.layer{h}.comp{m}.P1*model.layer{h}.comp{m}.Psi1';
65 | model.layer{h}.comp{m}.Z = Z'*Z;
66 | end
67 | end
68 | end
69 | end
70 |
71 |
72 | % That's ALL copied from vargplvmUpdateStats, without the last line that
73 | % sets model.X = model.vardist.means. Also, vardist is separated from
74 | % model.vardist.
75 | function model = vargplvmUpdateStats2(model, vardist)
76 | jitter = 1e-6;
77 |
78 |
79 | X_u = model.X_u;
80 |
81 | model.K_uu = kernCompute(model.kern, X_u);
82 |
83 | % Always add jitter (so that the inducing variables are "jitter" function variables)
84 | % and the above value represents the minimum jitter value
85 | % Putting jitter always ("if" in comments) is like having a second
86 | % whiteVariance in the kernel which is constant.
87 | %if (~isfield(model.kern, 'whiteVariance')) | model.kern.whiteVariance < jitter
88 | %K_uu_jit = model.K_uu + model.jitter*eye(model.k);
89 | %model.Lm = chol(K_uu_jit, 'lower');
90 | %end
91 |
92 | % There is no white noise term so add some jitter.
93 | if ~strcmp(model.kern.type, 'rbfardjit')
94 | model.K_uu = model.K_uu ...
95 | + sparseDiag(repmat(jitter, size(model.K_uu, 1), 1));
96 | end
97 |
98 | model.Psi0 = kernVardistPsi0Compute(model.kern, vardist);
99 | model.Psi1 = kernVardistPsi1Compute(model.kern, vardist, X_u);
100 | [model.Psi2, AS] = kernVardistPsi2Compute(model.kern, vardist, X_u);
101 |
102 | % M is model.k
103 | %model.Lm = chol(model.K_uu, 'lower');
104 | model.Lm = jitChol(model.K_uu)'; % M x M: L_m (lower triangular) ---- O(m^3)
105 | model.invLm = model.Lm\eye(model.k); % M x M: L_m^{-1} ---- O(m^3)
106 | model.invLmT = model.invLm'; % L_m^{-T}
107 | model.C = model.invLm * model.Psi2 * model.invLmT;
108 | model.TrC = sum(diag(model.C)); % Tr(C)
109 | % Matrix At replaces the matrix A of the old implementation; At is more stable
110 | % since it has a much smaller condition number than A=sigma^2 K_uu + Psi2
111 | model.At = (1/model.beta) * eye(size(model.C,1)) + model.C; % At = beta^{-1} I + C
112 | model.Lat = jitChol(model.At)';
113 | model.invLat = model.Lat\eye(size(model.Lat,1));
114 | model.invLatT = model.invLat';
115 | model.logDetAt = 2*(sum(log(diag(model.Lat)))); % log |At|
116 |
117 | model.P1 = model.invLat * model.invLm; % M x M
118 |
119 | % First multiply the two last factors; so, the large N is only involved
120 | % once in the calculations (P1: MxM, Psi1':MxN, Y: NxD)
121 | model.P = model.P1 * (model.Psi1' * model.m);
122 |
123 | % Needed for both, the bound's and the derivs. calculations.
124 | model.TrPP = sum(sum(model.P .* model.P));
125 |
126 | %%% Precomputations for the derivatives (of the likelihood term) of the bound %%%
127 | %model.B = model.invLmT * model.invLatT * model.P; %next line is better
128 | model.B = model.P1' * model.P;
129 | model.invK_uu = model.invLmT * model.invLm;
130 | Tb = (1/model.beta) * model.d * (model.P1' * model.P1);
131 | Tb = Tb + (model.B * model.B');
132 | model.T1 = model.d * model.invK_uu - Tb;
133 | end
--------------------------------------------------------------------------------
/deepGP/matlab/deepGPRegression.m:
--------------------------------------------------------------------------------
1 | randn('seed', 1e4);
2 | rand('seed', 1e4);
3 |
4 | if ~exist('dynamicsConstrainType', 'var'), dynamicsConstrainType = {'time'}; end
5 | if ~exist('dynamicKern', 'var'), dynamicKern = {'rbf','white','bias'}; end
6 | if ~exist('initX', 'var'), initX = 'ppca'; end
7 | % This allows to take the inputs (or even other matrices) into account when
8 | % initialising the latent spaces. Check hsvargplvmModelCreate.m.
9 | if ~exist('doExtraInit','var'), doExtraInit = false; end
10 | vardistCovarsMult = [];
11 |
12 | hsvargplvm_init;
13 |
14 | assert(exist('inpX','var') && exist('Ytr','var'), 'Inputs inpX and outputs Ytr must be already in the workspace.');
15 |
16 | [options, optionsDyn] = hsvargplvmOptions(globalOpt, inpX);
17 |
18 | if doExtraInit
19 | options.extraInit = inpX;
20 | end
21 |
22 | % ---- Potential special initialisations for X -----
23 | if ~iscell(globalOpt.initX) && strcmp(globalOpt.initX, 'inputs')
24 | options = rmfield(options, 'initX');
25 | for i=1:options.H
26 | options.initX{i} = inpX;
27 | end
28 | optionsDyn.initX = inpX;
29 | globalOpt.initX = options.initX;
30 | end
31 |
32 | % Initialise half of the latent spaces with inputs, half with PCA on outputs
33 | if ~iscell(globalOpt.initX) && strcmp(globalOpt.initX, 'inputsOutputs')
34 | options = rmfield(options, 'initX');
35 | oldQ = Q; clear Q
36 | for i=options.H:-1:floor(options.H/2)+1
37 | options.initX{i} = inpX;
38 | Q{i} = size(inpX,2);
39 | end
40 | optionsDyn.initX = inpX;
41 |
42 | YtrScaled = scaleData(Ytr{1}, options.scale2var1);
43 | Xpca = ppcaEmbed(YtrScaled, oldQ);
44 | for i=1:floor(options.H/2)
45 | options.initX{i} = Xpca;
46 | Q{i} = oldQ;
47 | end
48 | options.Q = Q;
49 | globalOpt.Q = Q;
50 | globalOpt.initX = options.initX;
51 | end
52 |
53 | options.optimiser = 'scg2';
54 |
55 | % Just rewrite all options into a struct of cells
56 | optionsAll = hsvargplvmCreateOptions(Ytr, options, globalOpt);
57 | % Don't mind the following for loop... it just gives the extra possibility
58 | % of initialising the latent space with Bayesian GPLVM or GPLVM (see
59 | % hsvargplvm_init on how to activate this).
60 | initXOptions = cell(1, options.H);
61 | for h=1:options.H
62 | if strcmp(optionsAll.initX{h}, 'vargplvm') | strcmp(optionsAll.initX{h}, 'fgplvm')
63 | initXOptions{h}{1} = optionsAll;
64 | % DOn't allow the D >> N trick for layers > 1
65 | if h~=1
66 | if isfield(initXOptions{h}{1}, 'enableDgtN')
67 | initXOptions{h}{1}.enableDgtN = false;
68 | end
69 | end
70 | initXOptions{h}{1}.latentDim = optionsAll.Q{h};
71 | initXOptions{h}{1}.numActive = optionsAll.K{h}{1};
72 | initXOptions{h}{1}.kern = optionsAll.kern{h}{1};
73 | initXOptions{h}{1}.initX = 'ppca';
74 | initXOptions{h}{1}.initSNR = 100;
75 | initXOptions{h}{1}.numActive = min(50, size(inpX,1));
76 | initXOptions{h}{2} = 100;
77 | initXOptions{h}{3} = 200;
78 | if exist('stackedInitVardistIters'), initXOptions{h}{2} = stackedInitVardistIters; end
79 | if exist('stackedInitIters'), initXOptions{h}{3} = stackedInitIters; end
80 | if exist('stackedInitSNR'), initXOptions{h}{1}.initSNR = stackedInitSNR; end
81 | if exist('stackedInitK'), initXOptions{h}{1}.numActive = stackedInitK; end
82 | else
83 | initXOptions{h} = {};
84 | end
85 | end
86 | %---
87 |
88 | % Create the deep GP based on the model options, global options
89 | % (configuration) and options for initialising the latent spaces X
90 | model = hsvargplvmModelCreate(Ytr, options, globalOpt, initXOptions);
91 | optionsDyn.initX = model.layer{end}.vardist.means;
92 | % Since we do regression, we need to add a GP on the parent node. This GP
93 | % couples the inputs and is parametrised by options in a struct "optionsDyn".
94 | model = hsvargplvmAddParentPrior(model, globalOpt, optionsDyn);
95 |
96 |
97 | %-- We have the option to not learn the inducing points and/or fix them to
98 | % the given inputs.
99 | % Learn inducing points? (that's different to fixInducing, ie tie them
100 | % to X's, if learnInducing is false they will stay in their original
101 | % values, ie they won't constitute parameters of the model).
102 | if exist('learnInducing') && ~learnInducing
103 | model = hsvargplvmPropagateField(model, 'learnInducing', false);
104 | % If we initialise X with the inputs (for regression) then fix the
105 | % inducing points to these inputs (that's not necessarily good, check
106 | % also without this option).
107 | % for h=1:options.H
108 | % if ~ischar(options.initX{h})
109 | % for m=1:model.layer{h}.M
110 | % model.layer{h}.comp{m}.X_u = inpX;
111 | % end
112 | % end
113 | % end
114 | end
115 | %--
116 |
117 | if globalOpt.fixInducing && globalOpt.fixInducing
118 | model = hsvargplvmPropagateField(model, 'fixInducing', true);
119 | for m=1:model.layer{end}.M % Not implemented yet for parent node
120 | model.layer{end}.comp{m}.fixInducing = false;
121 | end
122 | end
123 |
124 | params = hsvargplvmExtractParam(model);
125 | model = hsvargplvmExpandParam(model, params);
126 | model.globalOpt = globalOpt;
127 | % Computations can be made in parallel, if option is activated
128 | model.parallel = globalOpt.enableParallelism;
129 |
130 |
131 | % Add a prior on the beta parameter, to avoid low SNR problems.
132 | if exist('addBetaPrior','var') && addBetaPrior
133 | meanSNR = 150; % Where I want the expected value of my inv gamma if it was on SNR
134 | priorName = 'invgamma'; % What type of prior
135 | varData = var(model.layer{model.H}.comp{1}.mOrig(:));
136 | meanB = meanSNR./varData;
137 | a=0.08;%1.0001; % Relatively large right-tail
138 | b=meanB*(a+1); % Because mode = b/(a-1)
139 | model = hsvargplvmAddParamPrior(model, model.H, 1, 'beta', priorName, [a b]);
140 | if exist('priorScale','var')
141 | model.layer{model.H}.comp{1}.paramPriors{1}.prior.scale = priorScale;
142 | end
143 | end
144 |
145 | fprintf('# Scales after init. latent space:\n')
146 | hsvargplvmShowScales(model,false);
147 | %%
148 | if exist('doGradchek') && doGradchek
149 | %model = hsvargplvmOptimise(model, true, itNo);
150 | if isfield(model.layer{end}, 'dynamics')
151 | model.layer{end}.dynamics.learnVariance = 1; % For the gradchek to pass
152 | end
153 | model = hsvargplvmOptimise(model, true, itNo, 'gradcheck', true);
154 | else
155 | [model,modelPruned, modelInitVardist] = hsvargplvmOptimiseModel(model, true, true);
156 | end
157 |
158 | % If you decide to train for further iterations...
159 | % modelOld = model; [model,modelPruned, ~] = hsvargplvmOptimiseModel(model, true, true, [], {0, [100]});
160 |
161 |
162 |
163 |
--------------------------------------------------------------------------------
/deepGP/matlab/hierSetPlotNoVar.m:
--------------------------------------------------------------------------------
1 | function hierSetPlotNoVar(lvmClassVisualiseFunc, curLayer)
2 |
3 | % LVMSETPLOTNOVAR A copy of lvmSetPlot where the variance in the input
4 | % space is not plotted (making it faster for high-dimensional data)
5 | %
6 | % SEEALSO lvmSetPlot
7 | %
8 |
9 | % MLTOOLS
10 |
11 | global visualiseInfo
12 |
13 | if nargin <2
14 | visualiseInfo = localFunc(lvmClassVisualiseFunc,visualiseInfo);
15 | else
16 | visualiseInfo.comp{curLayer} = localFunc(lvmClassVisualiseFunc,visualiseInfo.comp{curLayer}, curLayer);
17 | end
18 |
19 | function visualiseInfo = localFunc(lvmClassVisualiseFunc,v, curLayer)
20 | visualiseInfo = v; % Now visualiseInfo is in local scope
21 |
22 | model = visualiseInfo.model;
23 | YLbls = visualiseInfo.lbls;
24 | if nargin > 2
25 | fhandle = curLayer;
26 | else
27 | fhandle = gcf; %%% Default: gcf
28 | end
29 |
30 | cla(visualiseInfo.plotAxes);
31 | if nargin > 2
32 | lvmScatterPlotNoVar(model, YLbls, visualiseInfo.plotAxes, ...
33 | [visualiseInfo.dim1, visualiseInfo.dim2], ...
34 | visualiseInfo.latentPos, gca(fhandle));
35 | else
36 | lvmScatterPlotNoVar(model, YLbls, visualiseInfo.plotAxes, ...
37 | [visualiseInfo.dim1, visualiseInfo.dim2], ...
38 | visualiseInfo.latentPos);
39 | end
40 |
41 |
42 | set(get(visualiseInfo.plotAxes, 'title'), 'string', 'X', 'fontsize', 30);
43 | set(visualiseInfo.plotAxes, 'position', [0.05 0.05 0.9 0.9]);
44 |
45 | % Set up the X limits and Y limits of the main plot
46 | xLim = [min(model.X(:, visualiseInfo.dim1)) max(model.X(:, visualiseInfo.dim1))];
47 | xSpan = xLim(2) - xLim(1);
48 | xLim(1) = xLim(1) - 0.05*xSpan;
49 | xLim(2) = xLim(2) + 0.05*xSpan;
50 | xSpan = xLim(2) - xLim(1);
51 |
52 | yLim = [min(model.X(:, visualiseInfo.dim2)) max(model.X(:, visualiseInfo.dim2))];
53 | ySpan = yLim(2) - yLim(1);
54 | yLim(1) = yLim(1) - 0.05*ySpan;
55 | yLim(2) = yLim(2) + 0.05*ySpan;
56 | ySpan = yLim(2) - yLim(1);
57 |
58 | set(visualiseInfo.plotAxes, 'XLim', xLim)
59 | set(visualiseInfo.plotAxes, 'YLim', yLim)
60 |
61 | numLatentDims = model.q;
62 | numSliders = model.q - 2;
63 | pos = get(visualiseInfo.plotAxes, 'position');
64 | if numSliders > 0
65 | sliderHeight = min([0.1 0.3/(model.q-2)]);
66 | pos(4) = pos(4) - sliderHeight*numSliders;
67 | pos(2) = pos(2) + sliderHeight*numSliders;
68 | end
69 | a = ver('matlab');
70 | if strcmp(a.Version, '7.0.1')
71 | menu = 'listbox';
72 | else
73 | menu = 'popupmenu';
74 | end
75 | if numSliders > 0
76 | pos(3) = pos(3)-0.2;
77 | for i = 1:model.q
78 | string{i} = num2str(i);
79 | end
80 | uicontrol('Style', 'text', ...
81 | 'Parent', fhandle, ...
82 | 'Units', 'normalized', ...
83 | 'Position',[0.75 0.9 0.1 0.05], ...
84 | 'String', 'X');
85 | visualiseInfo.xDimension = uicontrol('Style', menu, ...
86 | 'Parent', fhandle, ...
87 | 'Units', 'normalized', ...
88 | 'Position', [0.85 0.9 0.1 0.05], ...
89 | 'String', string, ...
90 | 'Min', 1, ...
91 | 'Max', length(string), ...
92 | 'Value', visualiseInfo.dim1);
93 | h = visualiseInfo.xDimension;
94 | if(strcmp(menu, 'listbox'))
95 | set(h, 'listboxtop', get(h, 'value'));
96 | end
97 |
98 | uicontrol('Style', 'text', ...
99 | 'Parent', fhandle, ...
100 | 'Units', 'normalized', ...
101 | 'Position', [0.75 0.85 0.1 0.05], ...
102 | 'String', 'Y');
103 | visualiseInfo.yDimension = uicontrol('Style', menu, ...
104 | 'Parent', fhandle, ...
105 | 'Units','normalized', ...
106 | 'Position',[0.85 0.85 0.1 0.05], ...
107 | 'String', string, ...
108 | 'Min', 1, ...
109 | 'Max', length(string), ...
110 | 'Value', visualiseInfo.dim2);
111 |
112 | if nargin > 2
113 | newCallBackFunc = [lvmClassVisualiseFunc '(' '''updateLatentRepresentation''' ',' num2str(curLayer) ')'];
114 | else
115 | newCallBackFunc = [lvmClassVisualiseFunc '(''updateLatentRepresentation'')'];
116 | end
117 | uicontrol('Parent', fhandle, ...
118 | 'Units','normalized', ...
119 | 'Callback',newCallBackFunc, ...
120 | 'Position',[0.75 0.80 0.2 0.05], ...
121 | 'String','Update');
122 |
123 | h = visualiseInfo.yDimension;
124 | if(strcmp(menu, 'listbox'))
125 | set(h, 'listboxtop', get(h, 'value'));
126 | end
127 |
128 | sliderOffset = 0;
129 | counter = 0;
130 | for i = model.q:-1:1
131 | if i ~= visualiseInfo.dim1 && i ~= visualiseInfo.dim2
132 | counter = counter + 1;
133 | sliderOffset = sliderOffset + sliderHeight;
134 |
135 | xLim = [min(model.X(:, i)) max(model.X(:, i))];
136 | xSpan = xLim(2) - xLim(1);
137 | xLim(1) = xLim(1) - 0.05*xSpan;
138 | xLim(2) = xLim(2) + 0.05*xSpan;
139 | xSpan = xLim(2) - xLim(1);
140 |
141 | visualiseInfo.latentSlider(counter) = ...
142 | uicontrol('Style', 'slider', ...
143 | 'String', 'Time', ...
144 | 'sliderStep', [xSpan/100, xSpan/10], ...
145 | 'units', 'normalized', ...
146 | 'position', [0.1 sliderOffset 0.8 sliderHeight], ...
147 | 'value', visualiseInfo.latentPos(i), ...
148 | 'min', xLim(1), ...
149 | 'max', xLim(2), ...
150 | 'callback', [lvmClassVisualiseFunc '(''latentSliderChange'')']);
151 | visualiseInfo.sliderText(counter) = uicontrol('Style', 'text', ...
152 | 'Parent', fhandle, ...
153 | 'Units', 'normalized', ...
154 | 'Position',[0 sliderOffset 0.1 sliderHeight], ...
155 | 'String', num2str(i));
156 | visualiseInfo.sliderTextVal(counter) = uicontrol('Style', 'text', ...
157 | 'Parent', fhandle, ...
158 | 'Units', 'normalized', ...
159 | 'Position',[0.9 sliderOffset 0.1 sliderHeight], ...
160 | 'String', num2str(visualiseInfo.latentPos(i)));
161 | end
162 |
163 | end
164 | end
165 |
166 | set(visualiseInfo.plotAxes, 'position', pos);
167 |
168 |
169 |
170 |
--------------------------------------------------------------------------------