├── LICENSE ├── README.md ├── Toy_predictionFTC2.png ├── Toy_predictionFTC5.png ├── figureCd.png ├── figureCu.png ├── figureSchool.png ├── matlab ├── additionalLfmCmuFourWalks3.mat ├── additionalfiles.txt ├── balanceModify.m ├── balancePlayData.m ├── balanceVisualise.m ├── batchCompilerGgwhiteDTC.m ├── batchHighInputsGgwhite.m ├── batchHighInputsGgwhite10Outs.m ├── batchHighInputsGgwhite2Outs.m ├── batchHighInputsGgwhite5Outs.m ├── batchJura1.m ├── batchJura3.m ├── batchJura4.m ├── batch_demJura.m ├── blockChol.m ├── checkKernelSymmetry.m ├── cmu49BalanceResults.m ├── cmu49BalanceResults2.m ├── compareFxDataResultsDtc.m ├── compilerFullCore.m ├── compilerGgDTCVAR.m ├── compilerGgDTCVAR.sh ├── compilerGgFITC.m ├── compilerGgFITC.sh ├── compilerGgPITC.m ├── compilerGgPITC.sh ├── compilerGgwhiteDTCVAR.m ├── compilerGgwhiteDTCVAR.sh ├── compilerGgwhiteDTCVARSeveralVIK.m ├── compilerGgwhiteFTC.m ├── compilerResults.m ├── compilerSparseCore.m ├── compilerSparseCoreDTCVAR.m ├── convolveDiagram.m ├── data10.mat ├── data12.mat ├── data16.mat ├── data35.mat ├── demAistats.m ├── demCmu07WalkingFeetDTCVAR1.m ├── demCmu49BalanceArm1.m ├── demCmu49BalanceArm4.m ├── demCmu49BalanceArm5.m ├── demCompilerGgwhiteDTC.m ├── demCompilerGgwhiteFITC.m ├── demCompilerGgwhitePITC.m ├── demDrosMelFTCHEAT.m ├── demDrosMelICMGeneHb.m ├── demDrosMelNoTransferHEATGeneHbBatch.m ├── demFourWalksWithLfm.m ├── demGgJura.m ├── demGgNoiseToy1.m ├── demGgToy1.m ├── demGgToyInd.m ├── demGgwhiteInd.m ├── demJuraBatch.m ├── demLfmCmuFourWalks10.mat ├── demLfmCmuFourWalks12.mat ├── demLfmCmuFourWalks16.mat ├── demLfmCmuFourWalks35.mat ├── demLfmFourWalks1Forces2Approxdtcvar.mat ├── demLfmToy.m ├── demPitcCmu49Balance.m ├── demPpcaFxData.m ├── demSarcosGgDTC.m ├── demSchoolGgwhiteDTC.m ├── demSchoolGgwhiteFITC.m ├── demSchoolGgwhitePITC.m ├── demSchoolICMDTC.m ├── demSchoolICMDTCVAR.m ├── demSchoolICMFITC.m ├── demSchoolICMPITC.m ├── demSchoolNoTransfer.m ├── demSchoolSLFMDTC.m ├── demSchoolSLFMDTCVAR.m ├── demSchoolSLFMFITC.m ├── demSchoolSLFMPITC.m ├── demSensorsDtcvar1.m ├── demSensorsInd1.m ├── demSimDtcFxData.m ├── demSimFxData.m ├── demSimp53.m ├── demSimwhiteDtcFxData.m ├── demSimwhiteFxData.m ├── demSpmgpCmu49BalanceArm5.m ├── demSpmgpGgToy1.m ├── demSpmgpGgToy3.m ├── demSpmgpGgToy3KL.m ├── demSpmgpGgToy4.m ├── demSpmgpGgToy4KL.m ├── demSpmgpGgToy5.m ├── demSpmgpGgToy5KL.m ├── demSpmgpJuraBatch.m ├── demSpmgpJuraBatchKL.m ├── demSpmgpLfmToy.m ├── demSpmgpNoiseToy1.m ├── demSpmgpNoiseToy1KL.m ├── demSpmgpNoiseToy2.m ├── demSpmgpNoiseToy2KL.m ├── demSpmgpNoiseToy3.m ├── demSpmgpNoiseToy4.m ├── demSpmgpSimYeastSpellman.m ├── demSpmgpSimYeastSpellmanPitc.m ├── demSpmgpSimp53.m ├── demToy1DGgFTCConvolutionExample.m ├── demToy1DGgFTCConvolutionExample2.m ├── demToy1DICMFTCExample.m ├── demToy1DICMFTCExample2.m ├── demToy1DLMCDTC1.m ├── demToy1DLMCDTCVAR1.m ├── demToy1DLMCFITC1.m ├── demToy1DLMCFTC1.m ├── demToy1DLMCPITC1.m ├── demToy1DSLFMFTCExample.m ├── demToy1DSLFMFTCExample2.m ├── demToyBatch.m ├── demToyComparisonICMLMCConv.m ├── demToyComparisonICMRank1.m ├── demToyComparisonICMRank2.m ├── demToyComparisonLMC.m ├── demToyComparisonLMCForConv.m ├── demToyComparisonSLFM.m ├── demWalkingLFM.m ├── demYeastSpellmanSpmgpSimVar.m ├── examplesCadmiumGgError.sh ├── examplesCadmiumGgKL.sh ├── examplesCadmiumGgwhiteError.sh ├── examplesCadmiumGgwhiteKL.sh ├── examplesToy1DGgKL.sh ├── examplesToy1DGgwhiteKL.sh ├── fixParamsMultigp.m ├── fxDataCompareTestError.m ├── fxDataCompareTrainingError.m ├── fxDataCompareVariationalBound.m ├── fxDataHintonDiag.m ├── fxDataResults.m ├── fxDataResultsPpca.m ├── fxdata2454103-2454466-13outputs.txt ├── fxdata2454103-2454831-13outputs.txt ├── fxdata2454468-2454897-6outputs.txt ├── fxdata2454774-2454897-6outputs.txt ├── fxdata2454834-2454896-13outputs.txt ├── fxdata2454834-2454897-4outputs.txt ├── fxdata2454834-2454897-6outputs.txt ├── gaussianKernGradTransfer.m ├── gaussianKernParamTransfer.m ├── gaussianaXgaussianKernCompute.m ├── gaussianvXgaussianKernCompute.m ├── generateDataset.m ├── generateMocapDataSet.m ├── ggKernGradTransfer.m ├── ggKernParamTransfer.m ├── ggMultigpFixParam.m ├── ggMultigpKernOptions.m ├── ggMultigpTieParam.m ├── ggSpmgpToyResults.m ├── ggToyResults.m ├── ggglobalKernExpandParam.m ├── ggglobalKernExtractParam.m ├── ggglobalKernGradCat.m ├── ggglobalKernGradInit.m ├── ggglobalKernParamInit.m ├── ggwhiteMultigpFixParam.m ├── ggwhiteMultigpKernOptions.m ├── ggwhiteMultigpTieParam.m ├── globalKernCompute.m ├── globalKernDiagCompute.m ├── globalKernGradient.m ├── gplfmToy.m ├── heatMultigpTieParam.m ├── helperCreateNames.m ├── highInputsGgwhite.m ├── ignorefiles.txt ├── initDemSimDtcFxData.m ├── initInducingMultigp.m ├── lfmKernGradTransfer.m ├── lfmKernParamTransfer.m ├── lfmMeanCompute.m ├── lfmMeanCreate.m ├── lfmMeanExpandParam.m ├── lfmMeanExtractParam.m ├── lfmMeanGradient.m ├── lfmMultigpFixParam.m ├── lfmMultigpTieParam.m ├── lfmToyResults.m ├── lfmglobalKernDisplay.m ├── lfmglobalKernExpandParam.m ├── lfmglobalKernExtractParam.m ├── lfmglobalKernGradCat.m ├── lfmglobalKernGradInit.m ├── lfmglobalKernParamInit.m ├── lfmwhiteMeanCompute.m ├── lfmwhiteMeanCreate.m ├── lfmwhiteMeanExpandParam.m ├── lfmwhiteMeanExtractParam.m ├── lfmwhiteMeanGradient.m ├── lfmwhiteMultigpFixParam.m ├── lfmwhiteMultigpTieParam.m ├── lmcMultigpFixParam.m ├── lmcMultigpTieParam.m ├── loadFxData.m ├── meanCompute.m ├── meanCreate.m ├── meanExpandParam.m ├── meanExtractParam.m ├── meanFactors.m ├── meanGradient.m ├── multigpComputeAlpha.m ├── multigpComputeM.m ├── multigpCreate.m ├── multigpDisplay.m ├── multigpErrorMeasures.m ├── multigpExpandParam.m ├── multigpExtractParam.m ├── multigpGradient.m ├── multigpKernComposer.m ├── multigpLogLikeGradients.m ├── multigpLogLikelihood.m ├── multigpObjective.m ├── multigpObjectiveGradient.m ├── multigpOptimise.m ├── multigpOptions.m ├── multigpOut.m ├── multigpPosteriorMeanVar.m ├── multigpToolboxes.m ├── multigpUpdateKernels.m ├── multigpUpdateTopLevelParams.m ├── plotFxData.m ├── rbfKernGradTransfer.m ├── rbfKernParamTransfer.m ├── readme.txt ├── resultsDemMocap1.m ├── resultsDemMocap1PredictionTest.m ├── resultsDemMocap1PredictionTraining.m ├── robotGgIND.m ├── robotGgPITC.m ├── robotWiFiResults.m ├── schoolFullCore.m ├── schoolGgDTC.m ├── schoolGgDTCVAR.m ├── schoolGgFITC.m ├── schoolGgPITC.m ├── schoolGgwhiteDTCVAR.m ├── schoolGgwhiteDTCVARSeveralVIKs.m ├── schoolResults.m ├── schoolSparseCore.m ├── schoolSparseCoreDTCVAR.m ├── scriptBatchJuraGg.m ├── scriptBatchJuraGgFullCd.m ├── scriptBatchJuraGgSpmgpCd1.m ├── scriptBatchJuraGgSpmgpCd1KL.m ├── scriptBatchJuraGgSpmgpCd2.m ├── scriptBatchJuraGgSpmgpCd2KL.m ├── scriptBatchJuraGgSpmgpCd3.m ├── scriptBatchJuraGgSpmgpCd3KL.m ├── scriptBatchJuraGgSpmgpCd4.m ├── scriptBatchJuraGgSpmgpCu1.m ├── scriptBatchJuraGgSpmgpCu2.m ├── scriptBatchJuraGgSpmgpCu3.m ├── scriptBatchJuraGgSpmgpCu4.m ├── scriptBatchJuraGgwhiteFull.m ├── scriptBatchJuraGgwhiteFullCd.m ├── scriptBatchJuraGgwhiteSpmgpCd1.m ├── scriptBatchJuraGgwhiteSpmgpCd1KL.m ├── scriptBatchJuraGgwhiteSpmgpCd2.m ├── scriptBatchJuraGgwhiteSpmgpCd2KL.m ├── simKernGradTransfer.m ├── simKernParamTransfer.m ├── simMeanCompute.m ├── simMeanCreate.m ├── simMeanExpandParam.m ├── simMeanExtractParam.m ├── simMeanGradient.m ├── simMultigpFixParam.m ├── simMultigpKernOptions.m ├── simMultigpTieParam.m ├── simMultimodelFixParam.m ├── simMultimodelKernOptions.m ├── simMultimodelTieParam.m ├── simResults.m ├── simglobalKernCompute.m ├── simglobalKernComputeTest.m ├── simglobalKernExpandParam.m ├── simglobalKernExtractParam.m ├── simglobalKernGradCat.m ├── simglobalKernGradInit.m ├── simglobalKernGradient.m ├── simglobalKernParamInit.m ├── simglobalMultimodelTieParam.m ├── simwhiteMeanCompute.m ├── simwhiteMeanCreate.m ├── simwhiteMeanExpandParam.m ├── simwhiteMeanExtractParam.m ├── simwhiteMeanGradient.m ├── simwhiteMultigpFixParam.m ├── simwhiteMultigpKernOptions.m ├── simwhiteMultigpTieParam.m ├── skelForWalkingWithLfm.mat ├── sparseKernCompute.m ├── sparseKernGradient.m ├── spmultigpCreate.m ├── spmultigpExpandParam.m ├── spmultigpExtractParam.m ├── spmultigpLocalCovGradient.m ├── spmultigpLogLikelihood.m ├── spmultigpTiePseudoInputs.m ├── spmultigpUpdateAD.m ├── spmultigpUpdateKernels.m ├── spmultimodelComputeHessianSensitivities.m ├── spmultimodelCreate.m ├── spmultimodelExpandParam.m ├── spmultimodelExtractParam.m ├── spmultimodelGetParameters.m ├── spmultimodelGradient.m ├── spmultimodelKernCompute.m ├── spmultimodelKernGradient.m ├── spmultimodelLocalCovGradient.m ├── spmultimodelLogLikeGradients.m ├── spmultimodelLogLikelihood.m ├── spmultimodelObjective.m ├── spmultimodelOptimiseVar.m ├── spmultimodelPlotResults.m ├── spmultimodelPlotSensitivities.m ├── spmultimodelPosteriorMeanVar.m ├── spmultimodelTieParam.m ├── spmultimodelUpdateAD.m ├── spmultimodelUpdateVariational.m ├── spmultimodelVarSInit.m ├── toy1DGgDTCExample.m ├── toy1DGgDTCMissing.m ├── toy1DGgFITCExample.m ├── toy1DGgFITCMissing.m ├── toy1DGgFTCExample.m ├── toy1DGgFTCMissing.m ├── toy1DGgINDMissing.m ├── toy1DGgPITCExample.m ├── toy1DGgPITCMissing.m ├── trainModelHighInGgwhite.m ├── walkSamplePlayData.m └── walkingPlayData.m ├── toy1DDTC4TrainTest.png ├── toy1DFITC4TrainTest.png ├── toy1DFTC4TrainTest.png ├── toy1DPITC4TrainTest.png ├── toyPredictionFITC2.png ├── toyPredictionFITC5.png ├── toyPredictionPITC2.png ├── toyPredictionPITC5.png ├── yeastSpellmanPITC2.png ├── yeastSpellmanPITC2GeneExpression.png ├── yeastSpellmanPITC93.png └── yeastSpellmanPITC93GeneExpression.png /Toy_predictionFTC2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/Toy_predictionFTC2.png -------------------------------------------------------------------------------- /Toy_predictionFTC5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/Toy_predictionFTC5.png -------------------------------------------------------------------------------- /figureCd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/figureCd.png -------------------------------------------------------------------------------- /figureCu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/figureCu.png -------------------------------------------------------------------------------- /figureSchool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/figureSchool.png -------------------------------------------------------------------------------- /matlab/additionalLfmCmuFourWalks3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/additionalLfmCmuFourWalks3.mat -------------------------------------------------------------------------------- /matlab/additionalfiles.txt: -------------------------------------------------------------------------------- 1 | # These are files that should be packaged for the multigp software 2 | dir: html 3 | ~/mlprojects/multigp/html/index.html 4 | ~/mlprojects/multigp/html/Toy_predictionFTC2.png 5 | ~/mlprojects/multigp/html/Toy_predictionFTC5.png 6 | ~/mlprojects/multigp/html/toyPredictionPITC2.png 7 | ~/mlprojects/multigp/html/toyPredictionPITC5.png 8 | ~/mlprojects/multigp/html/toyPredictionFITC2.png 9 | ~/mlprojects/multigp/html/toyPredictionFITC5.png 10 | ~/mlprojects/multigp/html/figureCd.png 11 | ~/mlprojects/multigp/html/figureCu.png 12 | -------------------------------------------------------------------------------- /matlab/balancePlayData.m: -------------------------------------------------------------------------------- 1 | function balancePlayData(skelStruct, channels, limits, motion, subject, frameLength) 2 | 3 | % BALANCEPLAYDATA Play balance motion capture data. 4 | % FORMAT 5 | % DESC plays channels from a motion capture skeleton and channels. 6 | % ARG skelStruct : the skeleton for the motion. 7 | % ARG channels : the channels for the motion. 8 | % ARG limits : limits to plot the axes 9 | % ARG motion : number ID for the motion to be displayed in the plot 10 | % ARG subject : number of the subject to be displayed in the plot 11 | % ARG frameLength : the framelength for the motion. 12 | % 13 | % COPYRIGHT : Mauricio Alvarez, Neil D. Lawrence, 2009 14 | % 15 | % SEEALSO : skelPlayData, acclaimPlayData 16 | 17 | % MULTIGP 18 | 19 | if nargin < 4 20 | frameLength = 1/120; 21 | end 22 | 23 | clf 24 | handle = skelVisualise(channels(1, :), skelStruct); 25 | 26 | xlim = [limits(1,1) limits(1,2)]; 27 | ylim = [limits(2,1) limits(2,2)]; 28 | zlim = [limits(3,1) limits(3,2)]; 29 | set(gca, 'xlim', xlim, ... 30 | 'ylim', ylim, ... 31 | 'zlim', zlim); 32 | title(['Subject ' num2str(subject) ' Motion ' num2str(motion)], 'FontSize', 15); 33 | 34 | % Play the motion 35 | for j = 1:size(channels, 1) 36 | pause(frameLength) 37 | skelModify(handle, channels(j, :), skelStruct); 38 | end 39 | -------------------------------------------------------------------------------- /matlab/balanceVisualise.m: -------------------------------------------------------------------------------- 1 | function handle = balanceVisualise(channels, skel, padding) 2 | 3 | % BALANCEVISUALISE Draws a skel representation of 3-D data balance motion 4 | % FORMAT 5 | % DESC draws a skeleton representation in a 3-D plot. 6 | % ARG channels : the channels to update the skeleton with. 7 | % ARG skel : the skeleton structure. 8 | % ARG padding : a vector with positions to fill the channel (default zeros) 9 | % RETURN handle : a vector of handles to the plotted structure. 10 | % 11 | % SEEALSO : balanceModify 12 | % 13 | % COPYRIGHT : Mauricio Alvarez, Neil D. Lawrence, 2009 14 | 15 | % MULTIGP 16 | 17 | global ffPos 18 | global rotMat 19 | if nargin<3 20 | padding = 0; 21 | end 22 | channels = [channels zeros(1, padding)]; 23 | vals = skel2xyz(skel, channels); 24 | connect = skelConnectionMatrix(skel); 25 | 26 | indices = find(connect); 27 | [I, J] = ind2sub(size(connect), indices); 28 | 29 | 30 | vals = vals - repmat(vals(4,:), size(vals,1), 1); 31 | ffPos = vals(5, :)'; 32 | % thet = acos(ffPos(2)/sqrt(ffPos(2:3)'*ffPos(2:3))); 33 | % rotMat2 = rotationMatrix(thet, 0, 0, 'x'); 34 | % ffPos = rotMat2*ffPos; 35 | thet = asin(ffPos(1)/sqrt(ffPos(1:2)'*ffPos(1:2))); 36 | rotMat = rotationMatrix(0, thet, 0, 'y'); 37 | vals = vals*rotMat'; 38 | ffPos = rotMat*ffPos; 39 | handle(1) = plot3(vals(:, 1), vals(:, 3), vals(:, 2), '.'); 40 | axis ij % make sure the left is on the left. 41 | set(handle(1), 'markersize', 20); 42 | %/~ 43 | %set(handle(1), 'visible', 'off') 44 | %~/ 45 | hold on 46 | grid on 47 | for i = 1:length(indices) 48 | handle(i+1) = line([vals(I(i), 1) vals(J(i), 1)], ... 49 | [vals(I(i), 3) vals(J(i), 3)], ... 50 | [vals(I(i), 2) vals(J(i), 2)]); 51 | set(handle(i+1), 'linewidth', 2); 52 | end 53 | axis equal 54 | xlabel('x') 55 | ylabel('z') 56 | zlabel('y') 57 | axis on -------------------------------------------------------------------------------- /matlab/batchHighInputsGgwhite.m: -------------------------------------------------------------------------------- 1 | % BATCHHIGHINPUTSGGWHITE Computes DTC VAR bound and full gp likelihood 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | 8 | rand('twister',1e6); 9 | randn('state',1e6); 10 | 11 | indexOut = 2; 12 | 13 | inputDim = [2 5 10]; 14 | nOuts = [1 2 4 8]; 15 | folds = 1; 16 | nTrainingPoints = 100; 17 | iters = 2000; 18 | 19 | llFull = zeros(length(inputDim),1); 20 | llAprox = cell(length(inputDim),1); 21 | 22 | limitNumActive{1} = [20 50 100]; 23 | limitNumActive{2} = [80 50 100 200]; 24 | limitNumActive{3} = [20 50 100 200 400]; 25 | limitNumActive{4} = [20 50 100 200 400 800]; 26 | 27 | for i = 1:length(inputDim) 28 | [llFull(i), X, y] = generateDataset('ggwhite', inputDim(i), nOuts(indexOut), nTrainingPoints); 29 | for j = 1:length(limitNumActive{indexOut}) 30 | for k =1:folds 31 | rand('twister',10^(k)); 32 | randn('state',10^(k)); 33 | llAprox{i}(j,k) = trainModelHighInGgwhite(X, y, limitNumActive{indexOut}(j), ... 34 | nOuts(indexOut), iters); 35 | save(['demBatchHighInputsGgwhite' num2str(nOuts(indexOut))] ,'llFull','llAprox') 36 | end 37 | end 38 | end 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /matlab/batchHighInputsGgwhite10Outs.m: -------------------------------------------------------------------------------- 1 | % BATCHHIGHINPUTSGGWHITE2OUTS Computes DTC VAR bound and full gp likelihood 2 | % for two outputs 3 | 4 | % MULTIGP 5 | 6 | 7 | clc 8 | clear 9 | rand('twister',1e3); 10 | randn('state',1e3); 11 | nOuts = 10; 12 | inverseWidth = 5 + 10*rand(1,nOuts); 13 | sensitivity = 1 + 5*rand(1,nOuts); 14 | inputDim = [2 5 10]; 15 | nInducing = [50 100 200 500 1000]; 16 | iters = 5000; 17 | paramInit = 1e1; 18 | methodInit = 'randomComplete'; 19 | methodInitMGP = false; 20 | folds = 10; 21 | nTrainingPoints = 100; 22 | llApprox = cell(length(inputDim),1); 23 | llFull = zeros(length(inputDim),1); 24 | for i = 1:length(inputDim) 25 | [llFull(i), X, y, Kout, noisePerOutput] = generateDataset('ggwhite', inputDim(i), ... 26 | nOuts, nTrainingPoints, inverseWidth, sensitivity); 27 | llApprox{i} = highInputsGgwhite(X, y, nInducing, nOuts, ... 28 | iters, noisePerOutput, inverseWidth, sensitivity,... 29 | paramInit, methodInit, methodInitMGP, folds); 30 | end 31 | -------------------------------------------------------------------------------- /matlab/batchHighInputsGgwhite2Outs.m: -------------------------------------------------------------------------------- 1 | % BATCHHIGHINPUTSGGWHITE2OUTS Computes DTC VAR bound and full gp likelihood 2 | % for two outputs 3 | 4 | % MULTIGP 5 | 6 | 7 | clc 8 | clear 9 | addToolboxes(0,1); 10 | rand('twister',1e3); 11 | randn('state',1e3); 12 | nOuts = 2; 13 | inverseWidth = 5 + 10*rand(1,nOuts); 14 | sensitivity = 1 + 5*rand(1,nOuts); 15 | inputDim = [2 5 10]; 16 | nInducing = [20 50 100 200]; 17 | iters = 10; 18 | paramInit = 1e1; 19 | methodInit = 'randomComplete'; 20 | methodInitMGP = false; 21 | folds = 10; 22 | nTrainingPoints = 100; 23 | llApprox = cell(length(inputDim),1); 24 | llFull = zeros(length(inputDim),1); 25 | for i = 1:length(inputDim) 26 | [llFull(i), X, y, Kout, noisePerOutput] = generateDataset('ggwhite', inputDim(i), ... 27 | nOuts, nTrainingPoints, inverseWidth, sensitivity); 28 | llApprox{i} = highInputsGgwhite(X, y, nInducing, nOuts, ... 29 | iters, noisePerOutput, inverseWidth, sensitivity,... 30 | paramInit, methodInit, methodInitMGP, folds); 31 | end 32 | save('resultsBatchHighInputsGgwhite2Outs'); 33 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /matlab/batchHighInputsGgwhite5Outs.m: -------------------------------------------------------------------------------- 1 | % BATCHHIGHINPUTSGGWHITE2OUTS Computes DTC VAR bound and full gp likelihood 2 | % for two outputs 3 | 4 | % MULTIGP 5 | 6 | 7 | clc 8 | clear 9 | rand('twister',1e3); 10 | randn('state',1e3); 11 | nOuts = 5; 12 | inverseWidth = 5 + 10*rand(1,nOuts); 13 | sensitivity = 1 + 5*rand(1,nOuts); 14 | inputDim = [2 5 10]; 15 | nInducing = [50 100 200 500]; 16 | iters = 5000; 17 | paramInit = 1e1; 18 | methodInit = 'randomComplete'; 19 | methodInitMGP = false; 20 | folds = 10; 21 | nTrainingPoints = 100; 22 | llApprox = cell(length(inputDim),1); 23 | llFull = zeros(length(inputDim),1); 24 | for i = 1:length(inputDim) 25 | [llFull(i), X, y, Kout, noisePerOutput] = generateDataset('ggwhite', inputDim(i), ... 26 | nOuts, nTrainingPoints, inverseWidth, sensitivity); 27 | llApprox{i} = highInputsGgwhite(X, y, nInducing, nOuts, ... 28 | iters, noisePerOutput, inverseWidth, sensitivity,... 29 | paramInit, methodInit, methodInitMGP, folds); 30 | end 31 | -------------------------------------------------------------------------------- /matlab/batchJura1.m: -------------------------------------------------------------------------------- 1 | % BATCHJURA1 Batch of the Full Multi Output Gaussian Process using the Jura Dataset. 2 | 3 | % MULTIGP 4 | 5 | %In this demo, we use the 6 | % Gaussian Kernel for all the covariances (or Kernels) involved and only one hidden function. 7 | % When changing the kernel, the fix values in this code and the indeces in 8 | % the tieParam vector in multigpCreate must also be changed. 9 | 10 | 11 | clc 12 | clear 13 | % n1 = rand('seed'); 14 | % n2 = randn('seed'); 15 | % 16 | % Add necessary toolboxes 17 | % file = {'Cd', 'Co', 'Cu', 'Pb'}; 18 | file = {'Cu', 'Co', 'Cd', 'Pb'}; 19 | dataSetName = 'data_jura'; 20 | experimentNo = 1; 21 | ntrainX =200; 22 | ntrainX2 =50; 23 | iters = 1000; 24 | approx = 'none'; 25 | saveFigures=1; 26 | 27 | 28 | 29 | 30 | % Inclusion of the latent function 31 | options.isSparse = 0; % Indicates if the scheme is sparse (1) or if it is full (0) 32 | options.stdBiasX = 0; % Indicates if the inputs are to be standarized 33 | options.stdBiasY = 1; % Indicates if the outputs are to be standarized 34 | options.outKernelName = 'rbf'; % Indicates the name of the output kernel: lfm, sim or gg 35 | 36 | total_t = zeros(10,4); 37 | maerror = zeros(10,4); 38 | 39 | for q =1:4 40 | for k = 1:10, 41 | data = mapLoadData([dataSetName '_' file{q}]); 42 | data.nin = 1; % Number of latent functions 43 | missingData = cell(data.nout,1); 44 | [total_t(k,q), maerror(k,q)] = batch_demJura(options, data, ntrainX, ntrainX2, approx, missingData, iters); 45 | save('./results/Jura4Ind.mat','total_t','maerror'); 46 | end 47 | end 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /matlab/batchJura3.m: -------------------------------------------------------------------------------- 1 | % BATCHJURA3 Batch of the Sparse Multi Output Gaussian Process using the PITC approx over the Jura datatset. 2 | 3 | % MULTIGP 4 | 5 | %In this demo, we use the 6 | % Gaussian Kernel for all the covariances (or Kernels) involved and only one hidden function. 7 | % When changing the kernel, the fix values in this code and the indexes in 8 | % the tieParam vector in multigpCreate must also be changed. 9 | 10 | clc 11 | clear 12 | n1 = rand('seed'); 13 | n2 = randn('seed'); 14 | % 15 | % Add necessary toolboxes 16 | file = {'Cd', 'Co', 'Cu', 'Pb'}; 17 | dataSetName = 'data_jura'; 18 | experimentNo = 1; 19 | ntrainX =200; 20 | ntrainX2 =[10 50 100 200 500]; 21 | iters = 30; 22 | approx = 'pitc'; 23 | saveFigures=1; 24 | % load data 25 | 26 | 27 | 28 | % Inclusion of the latent function 29 | options.isSparse = 1; % Indicates if the scheme is sparse (1) or if it is full (0) 30 | options.stdBiasX = 0; % Indicates if the inputs are to be standarized 31 | options.stdBiasY = 1; % Indicates if the outputs are to be standarized 32 | options.outKernelName = 'gg'; % Indicates the name of the output kernel: lfm, sim or gg 33 | total_t = zeros(10,4,length(ntrainX2)); 34 | maerror = zeros(10,4,length(ntrainX2)); 35 | 36 | for r=1:length(ntrainX2), 37 | for q =1:4, 38 | for k = 1:10, 39 | data = mapLoadData([dataSetName '_' file{q}]); 40 | data.nin = 1; % Number of latent functions 41 | missingData = cell(data.nout,1); 42 | [total_t(k,q,r), maerror(k,q,r)] = batch_demJura(options, data, ntrainX, ntrainX2(r), approx, missingData, iters); 43 | save('./results/Jura3','total_t','maerror', 'n1', 'n2'); 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /matlab/batchJura4.m: -------------------------------------------------------------------------------- 1 | % BATCHJURA4 Batch of an Independent Gaussian Process using the Jura Dataset. 2 | 3 | % MULTIGP 4 | 5 | % In this demo, we use the 6 | % Gaussian Kernel for all the covariances (or Kernels) involved and only one hidden function. 7 | % When changing the kernel, the fix values in this code and the indexes in 8 | % the tieParam vector in multigpCreate must also be changed. 9 | 10 | 11 | clc 12 | clear 13 | % n1 = rand('seed'); 14 | % n2 = randn('seed'); 15 | % % 16 | file = {'Cd', 'Co', 'Cu', 'Pb'}; 17 | dataSetName = 'data_jura'; 18 | experimentNo = 1; 19 | ntrainX =200; 20 | ntrainX2 =50; 21 | iters = 1000; 22 | approx = 'none'; 23 | saveFigures=1; 24 | % load data 25 | 26 | 27 | 28 | % Inclusion of the latent function 29 | options.isSparse = 0; % Indicates if the scheme is sparse (1) or if it is full (0) 30 | options.stdBiasX = 0; % Indicates if the inputs are to be standarized 31 | options.stdBiasY = 1; % Indicates if the outputs are to be standarized 32 | options.outKernelName = 'gg'; % Indicates the name of the output kernel: lfm, sim or gg 33 | 34 | total_t = zeros(10,4); 35 | maerror = zeros(10,4); 36 | 37 | for q =1:4 38 | for k = 1:10, 39 | fprintf('Experiment: %s Cross-Validation: %f\n',file{q},k); 40 | rand('seed',(q+k+2)*10^6); 41 | randn('seed',(q+k+2)*10^6); 42 | data = mapLoadData([dataSetName '_' file{q}]); 43 | data.nin = 1; % Number of latent functions 44 | data.nout = 1; 45 | data.Xtrain = data.Xtrain(1); 46 | data.Ytrain = data.Ytrain(:,1); 47 | data.Xtest = data.Xtest(1); 48 | data.Ytest = data.Ytest(:,1); 49 | missingData = cell(data.nout,1); 50 | [total_t(k,q), maerror(k,q)] = batch_demJura(options, data, ntrainX, ntrainX2, approx, missingData, iters); 51 | save('./results/Jura4Ind','total_t','maerror'); 52 | end 53 | end 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /matlab/batch_demJura.m: -------------------------------------------------------------------------------- 1 | function [total_t, maerror] = batch_demJura(options, data, ntrainX, ntrainX2, approx, missingData, iters) 2 | 3 | % BATCH_DEMJURA 4 | 5 | % MULTIGP 6 | % Setup model 7 | [options, MXtrain, Xtrain, MYtrain, Ytrain, Xtest, Ytest] = multigpOptionsJura(options, data, ntrainX,missingData, ntrainX2, approx); 8 | options.optimiser = 'scg'; 9 | options.includeInd = 0; 10 | % Create the model 11 | model = multigpCreate(q, data.nout, MYtrain, MXtrain, options); 12 | % Change the initial parameters 13 | params = multigpExtractParam(model); 14 | params = params*rand + rand; 15 | model = multigpExpandParam(model, params); 16 | % Train the model 17 | ini_t = cputime; 18 | model = multigpOptimise(model,1,iters); 19 | total_t = cputime - ini_t; 20 | % Compute the error 21 | mu = multigpPredictionMeanVar(model, Xtest, options); 22 | maerror = mean(abs((Ytest{1} - mu{1}))); 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /matlab/blockChol.m: -------------------------------------------------------------------------------- 1 | function [L, jitter] = blockChol(model) 2 | 3 | % BLOCKCHOL obtains a block Cholesky Factorization of a Matriz A according 4 | % to the algorithm 5 | 6 | % MULTIGP 7 | 8 | % 9 | % http://www.netlib.org/utk/papers/factor/node9.html 10 | 11 | 12 | dim1 = zeros(1,model.kern.numBlocks); 13 | for i = 1:model.kern.numBlocks 14 | dim1(i) = size(model.X{i}, 1); 15 | end 16 | 17 | AA = model.K; 18 | L = zeros(size(AA)); 19 | jitter = zeros(model.kern.numBlocks,1); 20 | 21 | for i = 1:model.kern.numBlocks, 22 | A11 = AA(1:dim1(i),1:dim1(i)); 23 | A21 = AA(dim1(i)+1:end,1:dim1(i)); 24 | A22 = AA(dim1(i)+1:end,dim1(i)+1:end); 25 | [U11, jitter(i)] = jitChol(A11); 26 | L11 = U11'; 27 | invL11T = (L11')\eye(size(A11,1)); 28 | L21 = A21*invL11T; 29 | AA = A22 - L21*L21'; 30 | L(sum(dim1(1:i-1))+1:sum(dim1(1:i)),sum(dim1(1:i-1))+1:sum(dim1(1:i))) = L11; 31 | L(sum(dim1(1:i))+1:end,sum(dim1(1:i-1))+1:sum(dim1(1:i))) = L21; 32 | end -------------------------------------------------------------------------------- /matlab/checkKernelSymmetry.m: -------------------------------------------------------------------------------- 1 | function [K, asim_K] = checkKernelSymmetry( K ) 2 | 3 | % CHECKKERNELSYMMETRY Check the kernel symmetry. 4 | 5 | % MULTIGP 6 | 7 | asim_K = max(max(K-K.')); 8 | if (asim_K ~= 0) 9 | K = (K + K.')/2; 10 | end; 11 | -------------------------------------------------------------------------------- /matlab/compilerGgDTCVAR.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | 4 | addToolboxes(0,1) 5 | dataSetName = 'compilerData'; 6 | 7 | options = multigpOptions('dtc'); 8 | options.kernType = 'gg'; 9 | options.optimiser = 'scg'; 10 | options.nlf = 1; 11 | options.initialInducingPositionMethod = 'randomDataIsotopic'; 12 | options.beta = 1e3; 13 | options.tieOptions.selectMethod = 'free'; 14 | options.isArd = true; 15 | options.nVIKs = 1; 16 | options.fixInducing = false; 17 | 18 | numTraining = [16 32 64 128]; 19 | 20 | numActive{1} = [ 8 16]; 21 | numActive{2} = [16 32]; 22 | numActive{3} = [16 32 64]; 23 | numActive{4} = [32 64 128]; 24 | 25 | display = 0; 26 | iters = 1000; 27 | totFolds = 10; 28 | 29 | [totalError, elapsed_time_train, elapsed_time_test] = ... 30 | compilerSparseCore(dataSetName, options, numTraining, ... 31 | numActive, display, iters, totFolds); 32 | 33 | save('compilerGgDTCVAR.mat') -------------------------------------------------------------------------------- /matlab/compilerGgDTCVAR.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < compilerGgDTCVAR.m 7 | 8 | exit 0 -------------------------------------------------------------------------------- /matlab/compilerGgFITC.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | 4 | addToolboxes(0,1) 5 | dataSetName = 'compilerData'; 6 | 7 | options = multigpOptions('fitc'); 8 | options.kernType = 'gg'; 9 | options.optimiser = 'scg'; 10 | options.nlf = 1; 11 | options.initialInducingPositionMethod = 'randomDataIsotopic'; 12 | options.beta = 1e3; 13 | options.tieOptions.selectMethod = 'free'; 14 | options.isArd = true; 15 | options.nVIKs = 1; 16 | options.fixInducing = false; 17 | 18 | numTraining = [16 32 64 128]; 19 | 20 | numActive{1} = [ 8 16]; 21 | numActive{2} = [16 32]; 22 | numActive{3} = [16 32 64]; 23 | numActive{4} = [32 64 128]; 24 | 25 | display = 0; 26 | iters = 1000; 27 | totFolds = 10; 28 | 29 | [totalError, elapsed_time_train, elapsed_time_test] = ... 30 | compilerSparseCore(dataSetName, options, numTraining, ... 31 | numActive, display, iters, totFolds); 32 | 33 | save('compilerGgFITC.mat') -------------------------------------------------------------------------------- /matlab/compilerGgFITC.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < compilerGgFITC.m 7 | 8 | exit 0 -------------------------------------------------------------------------------- /matlab/compilerGgPITC.m: -------------------------------------------------------------------------------- 1 | % COMPILERGGPITC Runs the COMPILER DATA EXPERIMENT with PITC and GG kernel 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | 10 | dataSetName = 'compilerData'; 11 | options = multigpOptions('pitc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.beta = 1e3; 17 | options.tieOptions.selectMethod = 'free'; 18 | options.isArd = true; 19 | options.nVIKs = 1; 20 | options.fixInducing = false; 21 | 22 | numTraining = [16 32 64 128]; 23 | 24 | numActive{1} = [ 8 16]; 25 | numActive{2} = [16 32]; 26 | numActive{3} = [16 32 64]; 27 | numActive{4} = [32 64 128]; 28 | 29 | display = 0; 30 | iters = 1000; 31 | totFolds = 10; 32 | 33 | [totalError, elapsed_time_train, elapsed_time_test] = ... 34 | compilerSparseCore(dataSetName, options, numTraining, ... 35 | numActive, display, iters, totFolds); 36 | 37 | save('compilerGgPITC.mat') 38 | -------------------------------------------------------------------------------- /matlab/compilerGgPITC.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < compilerGgPITC.m 7 | 8 | exit 0 -------------------------------------------------------------------------------- /matlab/compilerGgwhiteDTCVAR.m: -------------------------------------------------------------------------------- 1 | % COMPILERGGWHITEDTCVAR Runs the COMPILER DATA EXPERIMENT with DTCVAR and one inducing kernel 2 | 3 | % MULTIGP 4 | 5 | 6 | clc 7 | clear 8 | 9 | dataSetName = 'compilerData'; 10 | 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'ggwhite'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.beta = 1e-3; 17 | options.tieOptions.selectMethod = 'free'; 18 | options.isArd = true; 19 | options.nVIKs = 1; 20 | options.fixInducing = false; 21 | 22 | numTraining = [16 32 64 128]; 23 | %numTraining = [128]; 24 | %numActive{1} = [128]; 25 | 26 | numActive{1} = [ 8 16]; 27 | numActive{2} = [16 32]; 28 | numActive{3} = [16 32 64]; 29 | numActive{4} = [32 64 128]; 30 | 31 | display = 0; 32 | iters = 1000; 33 | totFolds = 10; 34 | 35 | [totalError, elapsed_time_train, elapsed_time_test] = ... 36 | compilerSparseCore(dataSetName, options, numTraining, ... 37 | numActive, display, iters, totFolds); 38 | 39 | save('compilerGgwhiteDTCVAR.mat') 40 | -------------------------------------------------------------------------------- /matlab/compilerGgwhiteDTCVAR.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < compilerGgwhiteDTCVAR.m 7 | 8 | exit 0 -------------------------------------------------------------------------------- /matlab/compilerGgwhiteDTCVARSeveralVIK.m: -------------------------------------------------------------------------------- 1 | % COMPILERGGWHITEDTCVARSEVERALVIK Runs the COMPILER DATA EXPERIMENT with DTCVAR and several inducing kernel 2 | 3 | % MULTIGP 4 | 5 | 6 | clc 7 | clear 8 | 9 | dataSetName = 'compilerData'; 10 | 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'ggwhite'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.beta = 1e-3; 17 | options.tieOptions.selectMethod = 'free'; 18 | options.isArd = true; 19 | options.nVIKs = 1; 20 | options.fixInducing = false; 21 | 22 | numTraining = [16 32 64 128]; 23 | 24 | numActive{1} = [ 8 16]; 25 | numActive{2} = [16 32]; 26 | numActive{3} = [16 32 64]; 27 | numActive{4} = [32 64 128]; 28 | 29 | display = 0; 30 | iters = 1000; 31 | totFolds = 10; 32 | 33 | [totalError, elapsed_time_train, elapsed_time_test] = ... 34 | compilerSparseCoreDTCVAR(dataSetName, options, numTraining, ... 35 | numActive, display, iters, totFolds); 36 | 37 | -------------------------------------------------------------------------------- /matlab/compilerGgwhiteFTC.m: -------------------------------------------------------------------------------- 1 | % COMPILERGGWHITEFTC Runs the COMPILER DATA EXPERIMENT with full gp 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | 8 | dataSetName = 'compilerData'; 9 | 10 | options = multigpOptions('ftc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.tieOptions.selectMethod = 'free'; 15 | options.noise = 1e3; 16 | options.isArd = true; 17 | 18 | numTraining = [16 32 64 128]; 19 | 20 | display = 0; 21 | iters = 1000; 22 | totFolds = 10; 23 | 24 | [totalError, elapsed_time_train, elapsed_time_test] = ... 25 | compilerFullCore(dataSetName, options, numTraining, ... 26 | display, iters, totFolds); 27 | 28 | save('compilerGgwhiteFTC.mat') 29 | 30 | -------------------------------------------------------------------------------- /matlab/compilerResults.m: -------------------------------------------------------------------------------- 1 | function [totalError, elapsed_time] = compilerResults(model, XTest, yTest) 2 | 3 | % COMPILERRESULTS description. 4 | 5 | % MULTIGP 6 | 7 | maxTest = length(yTest{1}); 8 | numPart = 1000; 9 | step = floor(maxTest/numPart); 10 | if strcmp(model.approx, 'ftc') 11 | XtestMod = cell(model.nout+ model.nlf,1); 12 | for j=1:model.nlf 13 | XtestMod{j} = ones(1,13); 14 | end 15 | else 16 | XtestMod = cell(model.nout,1); 17 | end 18 | mserror = zeros(maxTest,1); 19 | 20 | elapsed_time = 0; 21 | for j =1:(numPart+1), 22 | if (j==numPart+1) 23 | indexes = indexes(end)+1:maxTest; 24 | else 25 | indexes = (j-1)*step+1:j*step; 26 | end 27 | for k= 1:model.nout, 28 | if strcmp(model.approx, 'ftc') 29 | XtestMod{k+model.nlf} = XTest{k}(indexes,:); 30 | else 31 | XtestMod{k} = XTest{k}(indexes,:); 32 | end 33 | end 34 | tic 35 | [mu, void] = multigpPosteriorMeanVar(model, XtestMod); 36 | localTime = toc; 37 | elapsed_time = elapsed_time + localTime; 38 | for k= 1:model.nout, 39 | mserror(indexes,k) = abs((yTest{k}(indexes) - mu{k+model.nlf})); 40 | end 41 | end 42 | 43 | totalError = mean(mserror)'; -------------------------------------------------------------------------------- /matlab/convolveDiagram.m: -------------------------------------------------------------------------------- 1 | % CONVOLVEDIAGRAM Plots for a diagram of convolution. 2 | 3 | % MULTIGP 4 | 5 | randn('seed', 1e6) 6 | randn('seed', 1e6) 7 | 8 | z = linspace(-10, 10, 1000)'; 9 | t = linspace(-1, 1, 100)'; 10 | kern_u = kernCreate(t, 'rbf'); 11 | kern_u.inverseWidth = 400; 12 | Ku = kernCompute(kern_u, z); 13 | 14 | u = gsamp(zeros(size(z)), Ku, 1)'; 15 | 16 | [T, Z] = meshgrid(t, z); 17 | kz1 = 1/(sqrt(2*pi*(1/100)))*exp(-0.5*(T-Z).*(T-Z)/(1/30)); 18 | kz2 = 1/(sqrt(2*pi*(1/40)))*exp(-0.5*(T-Z).*(T-Z)/(1/5)); 19 | 20 | f1 = sum(kz1.*repmat(u, 1, length(t)), 1)'*((max(z)-min(z))/length(z)); 21 | f2 = sum(kz2.*repmat(u, 1, length(t)), 1)'*((max(z)-min(z))/length(z)); 22 | kern_w1 = kernCreate(t, 'rbf'); 23 | kern_w1.inverseWidth = 100; 24 | kern_w1.variance = 0.01; 25 | Kw1 = kernCompute(kern_w1, t); 26 | w1 = gsamp(zeros(size(t)), Kw1, 1)'; 27 | y1= w1+f1; 28 | 29 | kern_w2 = kernCreate(t, 'rbf'); 30 | kern_w2.inverseWidth = 100; 31 | kern_w2.variance = 0.01; 32 | Kw2 = kernCompute(kern_w2, t); 33 | w2 = gsamp(zeros(size(t)), Kw2, 1)'; 34 | y2= w2+f2; 35 | 36 | subplot(3, 3, 1) 37 | plot(z(451:550), u(451:550)); 38 | axis off 39 | subplot(3, 3, 2) 40 | plot(t, kz1(500, :)); 41 | axis off 42 | subplot(3, 3, 3) 43 | plot(t, kz2(500, :)); 44 | axis off 45 | 46 | subplot(3, 3, 4) 47 | plot(t, f1); 48 | axis off 49 | set(gca, 'ylim', [-2 2]) 50 | subplot(3, 3, 5) 51 | plot(t, f2); 52 | axis off 53 | set(gca, 'ylim', [-2 2]) 54 | subplot(3, 3, 6) 55 | plot(t, w1); 56 | axis off 57 | set(gca, 'ylim', [-2 2]) 58 | 59 | subplot(3, 3, 7) 60 | plot(t, w2); 61 | axis off 62 | set(gca, 'ylim', [-2 2]) 63 | subplot(3, 3, 8) 64 | plot(t, y1); 65 | axis off 66 | set(gca, 'ylim', [-2 2]) 67 | subplot(3, 3, 9) 68 | plot(t, y2); 69 | axis off 70 | set(gca, 'ylim', [-2 2]) 71 | -------------------------------------------------------------------------------- /matlab/data10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/data10.mat -------------------------------------------------------------------------------- /matlab/data12.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/data12.mat -------------------------------------------------------------------------------- /matlab/data16.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/data16.mat -------------------------------------------------------------------------------- /matlab/data35.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/data35.mat -------------------------------------------------------------------------------- /matlab/demCmu49BalanceArm1.m: -------------------------------------------------------------------------------- 1 | % DEMCMU49BALANCEARM1 Demonstrate latent force model on CMU data. 2 | 3 | % MULTIGP 4 | 5 | rand('seed', 1e6); 6 | randn('seed', 1e6); 7 | 8 | dataSetName = 'cmu49BalanceArm'; 9 | experimentNo = 1; 10 | 11 | 12 | % load data 13 | [y, void, yTest, void] = lvmLoadData(dataSetName); 14 | 15 | % Get the time index. 16 | fps = 120/32; 17 | t = 1:size(y, 1); 18 | t = t'/fps; 19 | y = y/sqrt(sum(var(y))); 20 | 21 | for i = 1:size(y, 2) 22 | Y{i} = y(:, i); 23 | X{i} = t; 24 | end 25 | 26 | 27 | % Set up model 28 | options = multigpOptions('ftc'); 29 | options.optimiser = 'conjgrad'; 30 | options.kernType = 'lfm'; 31 | 32 | options.tieOptions.selectMethod = 'free'; 33 | 34 | q = 1; 35 | d = size(y, 2); 36 | 37 | % Creates the model 38 | model = multigpCreate(q, d, X, Y, options); 39 | %model.scale = repmat(sqrt(sum(var(y))/model.d), 1, model.d); 40 | 41 | model.scale = repmat(1, 1, model.d); 42 | model.m = multigpComputeM(model); 43 | 44 | 45 | display = 2; 46 | iters = 1000; 47 | 48 | % Trains the model and counts the training time 49 | model = multigpOptimise(model, display, iters); 50 | 51 | % Save the results. 52 | capName = dataSetName; 53 | capName(1) = upper(capName(1)); 54 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 55 | 56 | 57 | -------------------------------------------------------------------------------- /matlab/demCompilerGgwhiteDTC.m: -------------------------------------------------------------------------------- 1 | % DEMCOMPILERGGWHITEDTC Runs the COMPILER DATA EXPERIMENT with DTC and one inducing kernel 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | 8 | dataSetName = 'compilerData'; 9 | 10 | options = multigpOptions('dtc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 15 | options.beta = 1e-3; 16 | options.tieOptions.selectMethod = 'free'; 17 | options.isArd = true; 18 | options.nVIKs = 1; 19 | options.fixInducing = false; 20 | 21 | numTraining = [16 32 64 128]; 22 | 23 | numActive{1} = [ 8 16]; 24 | numActive{2} = [16 32]; 25 | numActive{3} = [16 32 64]; 26 | numActive{4} = [32 64 128]; 27 | 28 | display = 0; 29 | iters = 1000; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train, elapsed_time_test] = ... 33 | compilerSparseCore(dataSetName, options, numTraining, ... 34 | numActive, display, iters, totFolds); 35 | 36 | save('compilerGgwhiteDTC.mat') 37 | -------------------------------------------------------------------------------- /matlab/demCompilerGgwhiteFITC.m: -------------------------------------------------------------------------------- 1 | % DEMCOMPILERGGWHITEFITC Runs the COMPILER DATA EXPERIMENT with FITC and one inducing kernel 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | 8 | dataSetName = 'compilerData'; 9 | 10 | options = multigpOptions('fitc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 15 | options.beta = 1e-3; 16 | options.tieOptions.selectMethod = 'free'; 17 | options.isArd = true; 18 | options.nVIKs = 1; 19 | options.fixInducing = false; 20 | 21 | numTraining = [16 32 64 128]; 22 | 23 | numActive{1} = [ 8 16]; 24 | numActive{2} = [16 32]; 25 | numActive{3} = [16 32 64]; 26 | numActive{4} = [32 64 128]; 27 | 28 | display = 0; 29 | iters = 1000; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train, elapsed_time_test] = ... 33 | compilerSparseCore(dataSetName, options, numTraining, ... 34 | numActive, display, iters, totFolds); 35 | 36 | save('compilerGgwhiteFITC.mat') 37 | -------------------------------------------------------------------------------- /matlab/demCompilerGgwhitePITC.m: -------------------------------------------------------------------------------- 1 | % DEMCOMPILERGGWHITEPITC Runs the COMPILER DATA EXPERIMENT with PITC and one inducing kernel 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | 8 | dataSetName = 'compilerData'; 9 | 10 | options = multigpOptions('pitc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 15 | options.beta = 1e-3; 16 | options.tieOptions.selectMethod = 'free'; 17 | options.isArd = true; 18 | options.nVIKs = 1; 19 | options.fixInducing = false; 20 | 21 | numTraining = [16 32 64 128]; 22 | 23 | numActive{1} = [ 8 16]; 24 | numActive{2} = [16 32]; 25 | numActive{3} = [16 32 64]; 26 | numActive{4} = [32 64 128]; 27 | 28 | display = 0; 29 | iters = 1000; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train, elapsed_time_test] = ... 33 | compilerSparseCore(dataSetName, options, numTraining, ... 34 | numActive, display, iters, totFolds); 35 | 36 | save('compilerGgwhitePITC.mat') 37 | -------------------------------------------------------------------------------- /matlab/demGgJura.m: -------------------------------------------------------------------------------- 1 | % DEMGGJURA Demonstrate multigp convolution model on JURA data using 2 | % the FULL covariance matrix. 3 | 4 | % MULTIGP 5 | 6 | rand('twister', 1e6); 7 | randn('state', 1e6); 8 | 9 | dataSetName = 'juraData'; 10 | experimentNo = 1; 11 | file = 'Cd'; 12 | 13 | 14 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData([dataSetName file]); 15 | 16 | scaleVal = zeros(1,size(yTemp, 2)); 17 | biasVal = zeros(1,size(yTemp, 2)); 18 | for k =1:size(yTemp, 2), 19 | biasVal(k) = mean(yTemp{k}); 20 | scaleVal(k) = sqrt(var(yTemp{k})); 21 | end 22 | 23 | options = multigpOptions('ftc'); 24 | options.kernType = 'gg'; 25 | options.optimiser = 'scg'; 26 | options.nlf = 1; 27 | options.beta = ones(1, size(yTemp, 2)); 28 | options.bias = [zeros(1, options.nlf) biasVal]; 29 | options.scale = [zeros(1, options.nlf) scaleVal]; 30 | 31 | q = 2; 32 | d = size(yTemp, 2) + options.nlf; 33 | 34 | X = cell(size(yTemp, 2)+options.nlf,1); 35 | y = cell(size(yTemp, 2)+options.nlf,1); 36 | 37 | for j=1:options.nlf 38 | y{j} = []; 39 | X{j} = zeros(1, q); 40 | end 41 | for i = 1:size(yTemp, 2) 42 | y{i+options.nlf} = yTemp{i}; 43 | X{i+options.nlf} = XTemp{i}; 44 | end 45 | 46 | XTest = cell(size(yTemp, 2)+options.nlf,1); 47 | 48 | for j=1:options.nlf 49 | XTest{j} = ones(1, q); 50 | end 51 | for i = 1:size(yTemp, 2) 52 | XTest{i+options.nlf} = XTestTemp{i}; 53 | end 54 | 55 | 56 | % Creates the model 57 | model = multigpCreate(q, d, X, y, options); 58 | 59 | display = 1; 60 | iters = 10; 61 | model = multigpOptimise(model, display, iters); 62 | 63 | % Prediction 64 | mu = multigpPosteriorMeanVar(model, XTest); 65 | maerror = mean(abs((yTestTemp{1} - mu{model.nlf + 1}))); 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /matlab/demGgNoiseToy1.m: -------------------------------------------------------------------------------- 1 | % DEMGGWHITETOY1 Demo of full multi output GP with missing data and GGWHITE 2 | % kernel 3 | % FORMAT 4 | % DESC Demo of Full Multi Output Gaussian Process with missing data and 5 | % GGWHITE kernel 6 | 7 | % MULTIGP 8 | 9 | clc 10 | clear 11 | rand('twister',1e6); 12 | randn('state',1e6); 13 | % 14 | dataSetName = 'ggwhiteToyMissing'; 15 | experimentNo = 1; 16 | 17 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 18 | 19 | options = multigpOptions('ftc'); 20 | options.kernType = 'ggwhite'; 21 | options.optimiser = 'scg'; 22 | options.nlf = 1; 23 | 24 | X = cell(size(yTemp, 2)+options.nlf,1); 25 | y = cell(size(yTemp, 2)+options.nlf,1); 26 | 27 | for j=1:options.nlf 28 | y{j} = []; 29 | X{j} = 1; 30 | end 31 | for i = 1:size(yTemp, 2) 32 | y{i+options.nlf} = yTemp{i}; 33 | X{i+options.nlf} = XTemp{i}; 34 | end 35 | 36 | q = 1; 37 | d = size(yTemp, 2) + options.nlf; 38 | 39 | % Creates the model 40 | model = multigpCreate(q, d, X, y, options); 41 | 42 | params = modelExtractParam(model); 43 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 44 | params(index) = log(100); 45 | model = modelExpandParam(model, params); 46 | 47 | display = 2; 48 | iters = 1000; 49 | 50 | % Trains the model 51 | init_time = cputime; 52 | model = multigpOptimise(model, display, iters); 53 | elapsed_time = cputime - init_time; 54 | 55 | % Save the results. 56 | capName = dataSetName; 57 | capName(1) = upper(capName(1)); 58 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 59 | 60 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp); 61 | 62 | -------------------------------------------------------------------------------- /matlab/demGgToy1.m: -------------------------------------------------------------------------------- 1 | % DEMGGTOY1 Demo of full multi output GP with missing data. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with missing data. In this demo, we use the 4 | % Gaussian Kernel for all the covariances (or Kernels) involved and only one hidden function. 5 | 6 | % MULTIGP 7 | 8 | clc 9 | clear 10 | rand('twister',1e5); 11 | randn('state',1e5); 12 | % 13 | dataSetName = 'ggToyMissing'; 14 | experimentNo = 1; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'gg'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 1; 22 | 23 | q = 1; % Input dimension 24 | d = size(yTemp, 2) + options.nlf; 25 | 26 | X = cell(size(yTemp, 2)+options.nlf,1); 27 | y = cell(size(yTemp, 2)+options.nlf,1); 28 | 29 | % When we want to include the structure of the latent force kernel within 30 | % the whole kernel structure, and we don't have access to any data from the 31 | % latent force, we just put ones in the vector X and empty in the vector y. 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | model = multigpCreate(q, d, X, y, options); 44 | 45 | params = modelExtractParam(model); 46 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 47 | params(index) = log(100+10*rand(1,length(index))); 48 | model = modelExpandParam(model, params); 49 | 50 | display = 1; 51 | iters = 1000; 52 | 53 | % Trains the model 54 | init_time = cputime; 55 | model = multigpOptimise(model, display, iters); 56 | elapsed_time = cputime - init_time; 57 | 58 | % Save the results. 59 | capName = dataSetName; 60 | capName(1) = upper(capName(1)); 61 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 62 | 63 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp); 64 | 65 | -------------------------------------------------------------------------------- /matlab/demGgToyInd.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | 4 | rand('twister',1e6); 5 | randn('state',1e6); 6 | % 7 | dataSetName = 'ggToyMissing'; 8 | experimentNo = 15; 9 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 10 | % Configuration of data 11 | q = size(XTemp{1},2); 12 | d = size(yTemp, 2); 13 | nout = size(yTemp, 2); 14 | 15 | options.type = 'gp'; 16 | options.numModels = nout; 17 | options.compOptions = gpOptions('ftc'); 18 | options.compOptions.optimiser = 'scg'; 19 | options.compOptions.kern = {'gg', 'white'}; 20 | options.separate = []; 21 | options.optimiser = 'scg'; 22 | iters =100; 23 | display = 1; 24 | 25 | X = cell(1,size(yTemp, 2)); 26 | y = cell(1,size(yTemp, 2)); 27 | 28 | for i = 1:size(yTemp, 2) 29 | X{i} = XTemp{i}; 30 | y{i} = yTemp{i}; 31 | end 32 | % Configuration of parameters 33 | 34 | model = multimodelCreate(q, 1, X, y, options); 35 | params = modelExtractParam(model); 36 | options.separate = 1:length(params); 37 | model = multimodelCreate(q, 1, X, y, options); 38 | 39 | params = modelExtractParam(model); 40 | index = paramNameRegularExpressionLookup(model, 'multimodel .* inverse .*'); 41 | params(index) = log(100); 42 | 43 | model = modelExpandParam(model, params); 44 | 45 | model = modelOptimise(model, [], [], display, iters); 46 | 47 | 48 | -------------------------------------------------------------------------------- /matlab/demGgwhiteInd.m: -------------------------------------------------------------------------------- 1 | clc 2 | clear 3 | 4 | rand('twister',1e6); 5 | randn('state',1e6); 6 | % 7 | dataSetName = 'ggwhiteToyMissing'; 8 | experimentNo = 15; 9 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 10 | % Configuration of data 11 | q = size(XTemp{1},2); 12 | d = size(yTemp, 2); 13 | nout = size(yTemp, 2); 14 | 15 | options.type = 'gp'; 16 | options.numModels = nout; 17 | options.compOptions = gpOptions('ftc'); 18 | options.compOptions.optimiser = 'scg'; 19 | options.compOptions.kern = {'ggwhite', 'white'}; 20 | options.separate = []; 21 | options.optimiser = 'scg'; 22 | iters =100; 23 | display = 1; 24 | 25 | X = cell(1,size(yTemp, 2)); 26 | y = cell(1,size(yTemp, 2)); 27 | 28 | for i = 1:size(yTemp, 2) 29 | X{i} = XTemp{i}; 30 | y{i} = yTemp{i}; 31 | end 32 | % Configuration of parameters 33 | 34 | model = multimodelCreate(q, 1, X, y, options); 35 | params = modelExtractParam(model); 36 | options.separate = 1:length(params); 37 | model = multimodelCreate(q, 1, X, y, options); 38 | 39 | params = modelExtractParam(model); 40 | index = paramNameRegularExpressionLookup(model, 'multimodel .* inverse .*'); 41 | params(index) = log(100); 42 | 43 | model = modelExpandParam(model, params); 44 | 45 | model = modelOptimise(model, [], [], display, iters); 46 | 47 | 48 | -------------------------------------------------------------------------------- /matlab/demLfmCmuFourWalks10.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/demLfmCmuFourWalks10.mat -------------------------------------------------------------------------------- /matlab/demLfmCmuFourWalks12.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/demLfmCmuFourWalks12.mat -------------------------------------------------------------------------------- /matlab/demLfmCmuFourWalks16.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/demLfmCmuFourWalks16.mat -------------------------------------------------------------------------------- /matlab/demLfmCmuFourWalks35.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/demLfmCmuFourWalks35.mat -------------------------------------------------------------------------------- /matlab/demLfmFourWalks1Forces2Approxdtcvar.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/demLfmFourWalks1Forces2Approxdtcvar.mat -------------------------------------------------------------------------------- /matlab/demSarcosGgDTC.m: -------------------------------------------------------------------------------- 1 | % DEMSARCOSGGDTC Demonstrate sparse convolution models on SARCOS data. 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e6) 6 | randn('state', 1e6) 7 | 8 | dataSetName = 'sarcosMultiGP'; 9 | experimentNo = 2; 10 | 11 | 12 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 13 | 14 | 15 | scaleVal = zeros(1,size(yTemp, 2)); 16 | biasVal = zeros(1,size(yTemp, 2)); 17 | for k =1:size(yTemp, 2), 18 | biasVal(k) = mean(yTemp{k}); 19 | scaleVal(k) = sqrt(var(yTemp{k})); 20 | end 21 | 22 | options = multigpOptions('dtc'); 23 | options.kernType = 'gg'; 24 | options.optimiser = 'scg'; 25 | options.nlf = 1; 26 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 27 | options.numActive = 50; 28 | options.fixInducing = 0; 29 | options.bias = biasVal; 30 | options.scale = scaleVal; 31 | options.beta = ones(1, size(yTemp, 2)); 32 | 33 | 34 | X = cell(size(yTemp, 2),1); 35 | y = cell(size(yTemp, 2),1); 36 | 37 | for i = 1:size(yTemp, 2) 38 | y{i} = yTemp{i}; 39 | X{i} = XTemp{i}; 40 | end 41 | 42 | q = 2; 43 | d = size(yTemp, 2); 44 | 45 | % Creates the model 46 | model = multigpCreate(q, d, X', y, options); 47 | 48 | 49 | params = modelExtractParam(model); 50 | index = paramNameRegularExpressionLookup(model, '.* inverse .*'); 51 | params(index) = log(100); 52 | model = modelExpandParam(model, params); 53 | 54 | display = 1; 55 | iters = 1000; 56 | model = multigpOptimise(model, display, iters); 57 | 58 | % Prediction 59 | mu = multigpPosteriorMeanVar(model, XTestTemp); 60 | maerror = mean(abs((yTestTemp{1} - mu{model.nlf + 1}))); 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /matlab/demSchoolGgwhiteDTC.m: -------------------------------------------------------------------------------- 1 | % DEMSCHOOLGGWHITEDTC SCHOOL DATA EXPERIMENT with DTC and Ggwhite kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e7) 8 | randn('state', 1e7) 9 | 10 | dataSetName = 'schoolData2'; 11 | % Configuration of options 12 | options = multigpOptions('dtc'); 13 | options.kernType = 'ggwhite'; 14 | options.optimiser = 'scg'; 15 | options.nlf = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.fixInducing = false; 22 | options.nVIKs = 1; 23 | 24 | 25 | numActive = [ 5 20 50]; 26 | 27 | 28 | display = 0; 29 | iters = 500; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 33 | numActive, display, iters, totFolds); 34 | 35 | save('schoolGgwhiteDTC.mat') 36 | -------------------------------------------------------------------------------- /matlab/demSchoolGgwhiteFITC.m: -------------------------------------------------------------------------------- 1 | % DEMSCHOOLGGWHITEFITC SCHOOL DATA EXPERIMENT with FITC and Ggwhite kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e7) 8 | randn('state', 1e7) 9 | 10 | dataSetName = 'schoolData2'; 11 | % Configuration of options 12 | options = multigpOptions('fitc'); 13 | options.kernType = 'ggwhite'; 14 | options.optimiser = 'scg'; 15 | options.nlf = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.fixInducing = false; 22 | options.nVIKs = 1; 23 | 24 | 25 | numActive = [ 5 20 50]; 26 | 27 | 28 | display = 0; 29 | iters = 500; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 33 | numActive, display, iters, totFolds); 34 | 35 | save('schoolGgwhiteFITC.mat') 36 | -------------------------------------------------------------------------------- /matlab/demSchoolGgwhitePITC.m: -------------------------------------------------------------------------------- 1 | % DEMSCHOOLGGWHITEPITC SCHOOL DATA EXPERIMENT with PITC and Ggwhite kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e7) 8 | randn('state', 1e7) 9 | 10 | dataSetName = 'schoolData2'; 11 | % Configuration of options 12 | options = multigpOptions('pitc'); 13 | options.kernType = 'ggwhite'; 14 | options.optimiser = 'scg'; 15 | options.nlf = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.fixInducing = false; 22 | options.nVIKs = 1; 23 | 24 | 25 | numActive = [ 5 20 50]; 26 | 27 | 28 | display = 0; 29 | iters = 500; 30 | totFolds = 10; 31 | 32 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 33 | numActive, display, iters, totFolds); 34 | 35 | save('schoolGgwhitePITC.mat') 36 | -------------------------------------------------------------------------------- /matlab/demSchoolICMDTC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('dtc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | 27 | numActive = [ 5 20 50]; 28 | rankOpts = [1 2 3 5 10]; 29 | 30 | display = 0; 31 | iters = 200; 32 | totFolds = 10; 33 | totalErrorNlf = cell(length(rankOpts),1); 34 | elapsedTime = cell(length(rankOpts),1); 35 | for rank=1:length(rankOpts) 36 | options.rankCorregMatrix = rankOpts(rank); 37 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{rank} = totalError; 41 | elapsedTime{rank} = elapsed_time_train; 42 | save('schoolGgDTC_ICM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolICMDTCVAR.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | 27 | numActive = [ 5 20 50]; 28 | rankOpts = [1 2 3 5 10]; 29 | 30 | display = 0; 31 | iters = 200; 32 | totFolds = 10; 33 | totalErrorNlf = cell(length(rankOpts),1); 34 | elapsedTime = cell(length(rankOpts),1); 35 | for rank=1:length(rankOpts) 36 | options.rankCorregMatrix = rankOpts(rank); 37 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{rank} = totalError; 41 | elapsedTime{rank} = elapsed_time_train; 42 | save('schoolGgDTCVAR_ICM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolICMFITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('fitc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | 27 | numActive = [ 5 20 50]; 28 | rankOpts = [1 2 3 5 10]; 29 | 30 | display = 0; 31 | iters = 200; 32 | totFolds = 10; 33 | totalErrorNlf = cell(length(rankOpts),1); 34 | elapsedTime = cell(length(rankOpts),1); 35 | for rank=1:length(rankOpts) 36 | options.rankCorregMatrix = rankOpts(rank); 37 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{rank} = totalError; 41 | elapsedTime{rank} = elapsed_time_train; 42 | save('schoolGgFITC_ICM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolICMPITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('pitc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | 27 | numActive = [ 5 20 50]; 28 | rankOpts = [1 2 3 5 10]; 29 | 30 | display = 0; 31 | iters = 200; 32 | totFolds = 10; 33 | totalErrorNlf = cell(length(rankOpts),1); 34 | elapsedTime = cell(length(rankOpts),1); 35 | for rank=1:length(rankOpts) 36 | options.rankCorregMatrix = rankOpts(rank); 37 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{rank} = totalError; 41 | elapsedTime{rank} = elapsed_time_train; 42 | save('schoolGgPITC_ICM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolSLFMDTC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('dtc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.rankCorregMatrix = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 27 | 28 | numActive = [ 5 20 50]; 29 | nlfOpts = [1 2 3 5 10]; 30 | 31 | display = 0; 32 | iters = 200; 33 | totFolds = 10; 34 | totalErrorNlf = cell(length(nlfOpts),1); 35 | elapsedTime = cell(length(nlfOpts),1); 36 | for nlf=1:length(nlfOpts) 37 | fprintf('NUMBER OF LATENT FORCES: %d \n',nlfOpts(nlf)); 38 | options.nlf = nlfOpts(nlf); 39 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 40 | numActive, display, iters, totFolds); 41 | totalErrorNlf{nlf} = totalError; 42 | elapsedTime{nlf} = elapsed_time_train; 43 | save('schoolGgDTC_SLFM.mat') 44 | end -------------------------------------------------------------------------------- /matlab/demSchoolSLFMDTCVAR.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.rankCorregMatrix = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 27 | 28 | numActive = [ 5 20 50]; 29 | nlfOpts = [1 2 3 5 10]; 30 | 31 | display = 0; 32 | iters = 200; 33 | totFolds = 10; 34 | totalErrorNlf = cell(length(nlfOpts),1); 35 | elapsedTime = cell(length(nlfOpts),1); 36 | for nlf=1:length(nlfOpts) 37 | options.nlf = nlfOpts(nlf); 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{nlf} = totalError; 41 | elapsedTime{nlf} = elapsed_time_train; 42 | save('schoolGgDTCVAR_SLFM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolSLFMFITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('fitc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.rankCorregMatrix = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 27 | 28 | numActive = [ 5 20 50]; 29 | nlfOpts = [1 2 3 5 10]; 30 | 31 | display = 1; 32 | iters = 200; 33 | totFolds = 10; 34 | totalErrorNlf = cell(length(nlfOpts),1); 35 | elapsedTime = cell(length(nlfOpts),1); 36 | for nlf=1:length(nlfOpts) 37 | options.nlf = nlfOpts(nlf); 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{nlf} = totalError; 41 | elapsedTime{nlf} = elapsed_time_train; 42 | save('schoolGgFITC_SLFM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSchoolSLFMPITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and IMC kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('pitc'); 12 | options.kernType = 'lmc'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.rankCorregMatrix = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.kern.isArd = options.isArd; 22 | options.fixInducing = false; 23 | options.includeNoise = false; 24 | options.gamma = exp(-2); 25 | options.kern.nout = 139; 26 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 27 | 28 | numActive = [ 5 20 50]; 29 | nlfOpts = [1 2 3 5 10]; 30 | 31 | display = 0; 32 | iters = 200; 33 | totFolds = 10; 34 | totalErrorNlf = cell(length(nlfOpts),1); 35 | elapsedTime = cell(length(nlfOpts),1); 36 | for nlf=1:length(nlfOpts) 37 | options.nlf = nlfOpts(nlf); 38 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 39 | numActive, display, iters, totFolds); 40 | totalErrorNlf{nlf} = totalError; 41 | elapsedTime{nlf} = elapsed_time_train; 42 | save('schoolGgPITC_SLFM.mat') 43 | end -------------------------------------------------------------------------------- /matlab/demSensorsDtcvar1.m: -------------------------------------------------------------------------------- 1 | % DEMSENSORSDTCVAR1 Sparse VIKs multigp on temperature sensor data 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'sensorsTemperature'; 9 | experimentNo = 1; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('dtcvar'); 14 | options.kernType = 'simwhite'; 15 | options.optimiser = 'scg'; 16 | options.includeInd = true; 17 | options.nlf = 1; 18 | options.initialInducingPositionMethod = 'espaced'; 19 | options.numActive = 200; 20 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 21 | options.fixInducing = true; 22 | options.isStationary = true; 23 | 24 | X = cell(size(yTemp, 2),1); 25 | y = cell(size(yTemp, 2),1); 26 | 27 | for i = 1:size(yTemp, 2) 28 | y{i} = yTemp{i}; 29 | X{i} = XTemp{i}; 30 | options.bias(i) = mean(y{i}); 31 | options.scale(i) = std(y{i}); 32 | end 33 | 34 | q = 1; 35 | d = size(yTemp, 2); 36 | 37 | % Creates the model 38 | model = multigpCreate(q, d, X, y, options); 39 | 40 | display = 1; 41 | iters = 50; 42 | 43 | % Train the model 44 | init_time = cputime; 45 | model = multigpOptimise(model, display, iters); 46 | elapsed_time = cputime - init_time; 47 | 48 | % Save the results. 49 | capName = dataSetName; 50 | capName(1) = upper(capName(1)); 51 | save(['demSensors' capName num2str(experimentNo) '.mat'], 'model'); 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /matlab/demSensorsInd1.m: -------------------------------------------------------------------------------- 1 | % DEMSENSORSIND1 Independent GPs over each temperature sensor data 2 | 3 | % MULTIGP 4 | 5 | rand('twister',1e6); 6 | randn('state',1e6); 7 | 8 | dataSetName = 'sensorsTemperature'; 9 | experimentNo = 1; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | q = 1; 14 | d = size(yTemp, 2); 15 | nout = size(yTemp, 2); 16 | 17 | optionsG = gpOptions('ftc'); 18 | isRbf = 1; 19 | if isRbf 20 | optionsG.kern = {'rbf', 'white'}; 21 | else 22 | optionsG.kern = {'simwhite', 'white'}; 23 | end 24 | 25 | optionsG.scale2var1 = 1; 26 | itersSingleGp = 50; 27 | displaySingleGp = 1; 28 | 29 | X = cell(size(yTemp, 2),1); 30 | y = cell(size(yTemp, 2),1); 31 | 32 | for i = 1:size(yTemp, 2) 33 | y{i} = yTemp{i}; 34 | X{i} = XTemp{i}; 35 | end 36 | 37 | % Configuration of parameters 38 | gpModel = cell(nout,1); 39 | for k =1:nout, 40 | gpModel{k} = gpCreate(q, 1, X{k}, y{k}, optionsG); 41 | gpModel{k} = gpOptimise(gpModel{k}, 1, itersSingleGp); 42 | end 43 | -------------------------------------------------------------------------------- /matlab/demSpmgpGgToy1.m: -------------------------------------------------------------------------------- 1 | % DEMSPMGPGGTOY1 Demonstrate sparse multigp on TOY data using the PITC 2 | % approximation 3 | 4 | % In this demo, we use the Gaussian Kernel for all the covariances 5 | % (or Kernels) involved and only one hidden function. 6 | 7 | % MULTIGP 8 | 9 | rand('twister', 1e5); 10 | randn('state', 1e5); 11 | 12 | dataSetName = 'ggToyMissing'; 13 | experimentNo = 2; 14 | 15 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 16 | 17 | options = multigpOptions('pitc'); 18 | options.kernType = 'gg'; 19 | options.optimiser = 'scg'; 20 | options.nlf = 1; 21 | options.initialInducingPositionMethod = 'espaced'; 22 | options.numActive = 30; 23 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 24 | options.fixInducing = true; 25 | 26 | X = cell(size(yTemp, 2),1); 27 | y = cell(size(yTemp, 2),1); 28 | 29 | for i = 1:size(yTemp, 2) 30 | y{i} = yTemp{i}; 31 | X{i} = XTemp{i}; 32 | end 33 | 34 | q = 1; 35 | d = size(yTemp, 2); 36 | 37 | % Creates the model 38 | model = multigpCreate(q, d, X, y, options); 39 | 40 | display = 2; 41 | iters = 2000; 42 | 43 | % Change default initial parameters of length scale 44 | params = modelExtractParam(model); 45 | index = paramNameRegularExpressionLookup(model, '.* inverse .*'); 46 | params(index) = log(100); 47 | model = modelExpandParam(model, params); 48 | 49 | % Train the model 50 | init_time = cputime; 51 | model = multigpOptimise(model, display, iters); 52 | elapsed_time = cputime - init_time; 53 | 54 | % Save the results. 55 | capName = dataSetName; 56 | capName(1) = upper(capName(1)); 57 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 58 | 59 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 60 | 61 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 62 | 63 | -------------------------------------------------------------------------------- /matlab/demSpmgpGgToy3.m: -------------------------------------------------------------------------------- 1 | % DEMSPMGPGGTOY3 Sparse multigp on TOY data using PITC 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyMissing'; 9 | experimentNo = 3; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('pitc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | % Change the variance (the amplitude of the kernel) 42 | if options.fixInducing == 0 43 | params = modelExtractParam(model); 44 | %for i = 1:model.nout, 45 | paramInd = paramNameRegularExpressionLookup(model, 'X_u .*'); 46 | initialLoc = 0.05*randn(1,length(model.X_u)); 47 | params(paramInd) = initialLoc; 48 | %end 49 | model = modelExpandParam(model, params); 50 | end 51 | 52 | 53 | display = 1; 54 | iters = 2000; 55 | 56 | % Train the model 57 | init_time = cputime; 58 | model = multigpOptimise(model, display, iters); 59 | elapsed_time = cputime - init_time; 60 | 61 | % Save the results. 62 | capName = dataSetName; 63 | capName(1) = upper(capName(1)); 64 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 65 | 66 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, initialLoc); 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /matlab/demSpmgpGgToy4.m: -------------------------------------------------------------------------------- 1 | % DEMSPMGPGGTOY4 Sparse multigp on TOY data using DTC VAR with GG kernel 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e6); 6 | randn('state', 1e6); 7 | 8 | dataSetName = 'ggToyTrainTest'; 9 | experimentNo = 4; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('dtc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = 1e-1*ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | 42 | % Change the variance (the amplitude of the kernel) 43 | % if options.fixInducing == 0 44 | % params = modelExtractParam(model); 45 | % %for i = 1:model.nout, 46 | % paramInd = paramNameRegularExpressionLookup(model, 'X_u .*'); 47 | % initialLoc = 0.05*randn(1,length(model.X_u)); 48 | % params(paramInd) = initialLoc; 49 | % %end 50 | % model = modelExpandParam(model, params); 51 | % end 52 | 53 | 54 | display = 1; 55 | iters = 2000; 56 | 57 | % Train the model 58 | init_time = cputime; 59 | model = multigpOptimise(model, display, iters); 60 | elapsed_time = cputime - init_time; 61 | 62 | % Save the results. 63 | capName = dataSetName; 64 | capName(1) = upper(capName(1)); 65 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 66 | 67 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, initialLoc); 68 | 69 | -------------------------------------------------------------------------------- /matlab/demSpmgpGgToy5.m: -------------------------------------------------------------------------------- 1 | % DEMSPMGPGGTOY5 Sparse multigp on TOY data using FITC with GG kernel and 2 | % chaning the initial positions to random. 3 | 4 | % MULTIGP 5 | 6 | rand('twister', 1e6); 7 | randn('state', 1e6); 8 | 9 | dataSetName = 'ggToyMissing'; 10 | experimentNo = 5; 11 | 12 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 13 | 14 | options = multigpOptions('fitc'); 15 | options.kernType = 'gg'; 16 | options.optimiser = 'scg'; 17 | options.nlf = 1; 18 | options.initialInducingPositionMethod = 'espaced'; 19 | options.numActive = 30; 20 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 21 | options.fixInducing = false; 22 | 23 | X = cell(size(yTemp, 2),1); 24 | y = cell(size(yTemp, 2),1); 25 | 26 | for i = 1:size(yTemp, 2) 27 | y{i} = yTemp{i}; 28 | X{i} = XTemp{i}; 29 | end 30 | 31 | q = 1; 32 | d = size(yTemp, 2); 33 | 34 | % Creates the model 35 | model = multigpCreate(q, d, X, y, options); 36 | 37 | params = modelExtractParam(model); 38 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 39 | params(index) = log(100); 40 | model = modelExpandParam(model, params); 41 | 42 | % Change the variance (the amplitude of the kernel) 43 | if options.fixInducing == 0 44 | params = modelExtractParam(model); 45 | %for i = 1:model.nout, 46 | paramInd = paramNameRegularExpressionLookup(model, 'X_u .*'); 47 | initialLoc = 0.05*randn(1,length(model.X_u)); 48 | params(paramInd) = initialLoc; 49 | %end 50 | model = modelExpandParam(model, params); 51 | end 52 | 53 | 54 | display = 1; 55 | iters = 2000; 56 | 57 | % Train the model 58 | init_time = cputime; 59 | model = multigpOptimise(model, display, iters); 60 | elapsed_time = cputime - init_time; 61 | 62 | % Save the results. 63 | capName = dataSetName; 64 | capName(1) = upper(capName(1)); 65 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 66 | 67 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, initLoc); 68 | 69 | -------------------------------------------------------------------------------- /matlab/demToy1DGgFTCConvolutionExample.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGFTCEXAMPLE Demo of full multi output GP with Gaussian kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process. 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | dataSetName = 'ggToyConvolution'; 13 | experimentNo = 1; 14 | 15 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 16 | 17 | options = multigpOptions('ftc'); 18 | options.kernType = 'gg'; 19 | options.optimiser = 'scg'; 20 | options.nlf = 1; 21 | 22 | q = 1; % Input dimension 23 | d = size(yTemp, 2) + options.nlf; 24 | 25 | X = cell(size(yTemp, 2)+options.nlf,1); 26 | y = cell(size(yTemp, 2)+options.nlf,1); 27 | 28 | for j=1:options.nlf 29 | y{j} = []; 30 | X{j} = zeros(1, q); 31 | end 32 | for i = 1:size(yTemp, 2) 33 | y{i+options.nlf} = yTemp{i}; 34 | X{i+options.nlf} = XTemp{i}; 35 | end 36 | 37 | % Creates the model 38 | model = multigpCreate(q, d, X, y, options); 39 | 40 | % params = modelExtractParam(model); 41 | % index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 42 | % params(index) = log(100); 43 | % model = modelExpandParam(model, params); 44 | 45 | display = 1; 46 | iters = 500; 47 | 48 | % Trains the model 49 | init_time = cputime; 50 | model = multigpOptimise(model, display, iters); 51 | elapsed_time = cputime - init_time; 52 | 53 | % Save the results. 54 | capName = dataSetName; 55 | capName(1) = upper(capName(1)); 56 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 57 | 58 | % Load complete data to plot the ground truth 59 | 60 | [XGT, void, void, fGT] = mapLoadData('ggToyConvolution'); 61 | 62 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 63 | 64 | -------------------------------------------------------------------------------- /matlab/demToy1DICMFTCExample.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCFTC1 Demo of full multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | %dataSetName = 'ggToyTrainTest'; 13 | dataSetName = 'ggToyConvolution'; 14 | experimentNo = 11; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'lmc'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 1; 22 | options.rankCorregMatrix = 2; 23 | options.kern.nout = size(yTemp, 2); 24 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 25 | 26 | 27 | q = 1; % Input dimension 28 | d = size(yTemp, 2) + options.nlf; 29 | 30 | X = cell(size(yTemp, 2)+options.nlf,1); 31 | y = cell(size(yTemp, 2)+options.nlf,1); 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | warning('off','multiKernParamInit:noCrossKernel') 44 | 45 | model = multigpCreate(q, d, X, y, options); 46 | 47 | params = modelExtractParam(model); 48 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 49 | params(index) = log(100); 50 | model = modelExpandParam(model, params); 51 | 52 | display = 1; 53 | iters = 200; 54 | 55 | % Trains the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | % Load complete data to plot the ground truth 66 | 67 | [XGT, void, void, fGT] = mapLoadData(dataSetName); 68 | 69 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 70 | 71 | -------------------------------------------------------------------------------- /matlab/demToy1DICMFTCExample2.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DICMFTCExample2 Demo of full multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | %dataSetName = 'ggToyTrainTest'; 13 | dataSetName = 'ggToyICM'; 14 | experimentNo = 11; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'lmc'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 1; 22 | options.rankCorregMatrix = 1; 23 | options.kern.nout = size(yTemp, 2); 24 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 25 | 26 | 27 | q = 1; % Input dimension 28 | d = size(yTemp, 2) + options.nlf; 29 | 30 | X = cell(size(yTemp, 2)+options.nlf,1); 31 | y = cell(size(yTemp, 2)+options.nlf,1); 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | warning('off','multiKernParamInit:noCrossKernel') 44 | 45 | model = multigpCreate(q, d, X, y, options); 46 | 47 | params = modelExtractParam(model); 48 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 49 | params(index) = log(100); 50 | model = modelExpandParam(model, params); 51 | 52 | display = 1; 53 | iters = 500; 54 | 55 | % Trains the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | % Load complete data to plot the ground truth 66 | 67 | [XGT, void, void, fGT] = mapLoadData(dataSetName); 68 | 69 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 70 | 71 | -------------------------------------------------------------------------------- /matlab/demToy1DLMCDTC1.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCPITC1 Demo of DTC multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of DTC Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | rand('twister', 1e5); 8 | randn('state', 1e5); 9 | 10 | dataSetName = 'ggToyMissing'; 11 | experimentNo = 35; 12 | 13 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 14 | 15 | options = multigpOptions('dtc'); 16 | options.kernType = 'lmc'; 17 | options.optimiser = 'scg'; 18 | options.nlf = 1; 19 | options.initialInducingPositionMethod = 'espaced'; 20 | options.numActive = 30; 21 | options.beta = 1e-1; 22 | options.fixInducing = false; 23 | options.rankCorregMatrix = 2; 24 | options.kern.nout = size(yTemp, 2); 25 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 26 | options.kern.isArd = 0; 27 | options.includeNoise = false; 28 | options.gamma = exp(-2); 29 | 30 | warning('off', 'multiKernParamInit:noCrossKernel') 31 | 32 | X = cell(size(yTemp, 2),1); 33 | y = cell(size(yTemp, 2),1); 34 | 35 | for i = 1:size(yTemp, 2) 36 | y{i} = yTemp{i}; 37 | X{i} = XTemp{i}; 38 | end 39 | 40 | q = 1; 41 | d = size(yTemp, 2); 42 | 43 | % Creates the model 44 | model = multigpCreate(q, d, X, y, options); 45 | 46 | params = modelExtractParam(model); 47 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 48 | params(index) = log(100); 49 | model = modelExpandParam(model, params); 50 | 51 | 52 | display = 2; 53 | iters = 500; 54 | 55 | % Train the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 66 | 67 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 68 | XGT, fGT); 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /matlab/demToy1DLMCFITC1.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCPITC1 Demo of FITC multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of FITC Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | rand('twister', 1e5); 8 | randn('state', 1e5); 9 | 10 | dataSetName = 'ggToyMissing'; 11 | experimentNo = 34; 12 | 13 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 14 | 15 | options = multigpOptions('fitc'); 16 | options.kernType = 'lmc'; 17 | options.optimiser = 'scg'; 18 | options.nlf = 1; 19 | options.initialInducingPositionMethod = 'espaced'; 20 | options.numActive = 30; 21 | options.beta = 1e-1; 22 | options.fixInducing = false; 23 | options.rankCorregMatrix = 2; 24 | options.kern.nout = size(yTemp, 2); 25 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 26 | options.kern.isArd = 0; 27 | options.includeNoise = false; 28 | options.gamma = exp(-2); 29 | 30 | warning('off', 'multiKernParamInit:noCrossKernel') 31 | 32 | X = cell(size(yTemp, 2),1); 33 | y = cell(size(yTemp, 2),1); 34 | 35 | for i = 1:size(yTemp, 2) 36 | y{i} = yTemp{i}; 37 | X{i} = XTemp{i}; 38 | end 39 | 40 | q = 1; 41 | d = size(yTemp, 2); 42 | 43 | % Creates the model 44 | model = multigpCreate(q, d, X, y, options); 45 | 46 | params = modelExtractParam(model); 47 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 48 | params(index) = log(100); 49 | model = modelExpandParam(model, params); 50 | 51 | 52 | display = 2; 53 | iters = 500; 54 | 55 | % Train the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 66 | 67 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 68 | XGT, fGT); 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /matlab/demToy1DLMCFTC1.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCFTC1 Demo of full multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | %dataSetName = 'ggToyTrainTest'; 13 | dataSetName = 'ggToyMissing'; 14 | experimentNo = 11; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'lmc'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 1; 22 | options.rankCorregMatrix = 2; 23 | options.kern.nout = size(yTemp, 2); 24 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 25 | 26 | 27 | q = 1; % Input dimension 28 | d = size(yTemp, 2) + options.nlf; 29 | 30 | X = cell(size(yTemp, 2)+options.nlf,1); 31 | y = cell(size(yTemp, 2)+options.nlf,1); 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | warning('off','multiKernParamInit:noCrossKernel') 44 | 45 | model = multigpCreate(q, d, X, y, options); 46 | 47 | params = modelExtractParam(model); 48 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 49 | params(index) = log(100); 50 | model = modelExpandParam(model, params); 51 | 52 | display = 2; 53 | iters = 200; 54 | 55 | % Trains the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | % Load complete data to plot the ground truth 66 | 67 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 68 | 69 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 70 | 71 | -------------------------------------------------------------------------------- /matlab/demToy1DLMCPITC1.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCPITC1 Demo of PITC multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of PITC Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | rand('twister', 1e5); 8 | randn('state', 1e5); 9 | 10 | dataSetName = 'ggToyMissing'; 11 | experimentNo = 33; 12 | 13 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 14 | 15 | options = multigpOptions('pitc'); 16 | options.kernType = 'lmc'; 17 | options.optimiser = 'scg'; 18 | options.nlf = 1; 19 | options.initialInducingPositionMethod = 'espaced'; 20 | options.numActive = 30; 21 | options.beta = 1e-1; 22 | options.fixInducing = false; 23 | options.rankCorregMatrix = 2; 24 | options.kern.nout = size(yTemp, 2); 25 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 26 | options.kern.isArd = 0; 27 | options.includeNoise = false; 28 | options.gamma = exp(-2); 29 | 30 | warning('off', 'multiKernParamInit:noCrossKernel') 31 | 32 | X = cell(size(yTemp, 2),1); 33 | y = cell(size(yTemp, 2),1); 34 | 35 | for i = 1:size(yTemp, 2) 36 | y{i} = yTemp{i}; 37 | X{i} = XTemp{i}; 38 | end 39 | 40 | q = 1; 41 | d = size(yTemp, 2); 42 | 43 | % Creates the model 44 | model = multigpCreate(q, d, X, y, options); 45 | 46 | params = modelExtractParam(model); 47 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 48 | params(index) = log(100); 49 | model = modelExpandParam(model, params); 50 | 51 | 52 | display = 2; 53 | iters = 500; 54 | 55 | % Train the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 66 | 67 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 68 | XGT, fGT); 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /matlab/demToy1DSLFMFTCExample.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCFTC1 Demo of full multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | %dataSetName = 'ggToyTrainTest'; 13 | dataSetName = 'ggToyConvolution'; 14 | experimentNo = 11; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'lmc'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 2; 22 | options.rankCorregMatrix = 1; 23 | options.kern.nout = size(yTemp, 2); 24 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 25 | 26 | 27 | q = 1; % Input dimension 28 | d = size(yTemp, 2) + options.nlf; 29 | 30 | X = cell(size(yTemp, 2)+options.nlf,1); 31 | y = cell(size(yTemp, 2)+options.nlf,1); 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | warning('off','multiKernParamInit:noCrossKernel') 44 | 45 | model = multigpCreate(q, d, X, y, options); 46 | 47 | params = modelExtractParam(model); 48 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 49 | params(index) = log(100); 50 | model = modelExpandParam(model, params); 51 | 52 | display = 1; 53 | iters = 200; 54 | 55 | % Trains the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | % Load complete data to plot the ground truth 66 | 67 | [XGT, void, void, fGT] = mapLoadData(dataSetName); 68 | 69 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 70 | 71 | -------------------------------------------------------------------------------- /matlab/demToy1DSLFMFTCExample2.m: -------------------------------------------------------------------------------- 1 | % DEMTOY1DLMCFTC1 Demo of full multi output GP with LMC kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process with LMC kernel 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | %dataSetName = 'ggToyTrainTest'; 13 | dataSetName = 'ggToyICM'; 14 | experimentNo = 11; 15 | 16 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 17 | 18 | options = multigpOptions('ftc'); 19 | options.kernType = 'lmc'; 20 | options.optimiser = 'scg'; 21 | options.nlf = 1; 22 | options.rankCorregMatrix = 1; 23 | options.kern.nout = size(yTemp, 2); 24 | options.kern.rankCorregMatrix = options.rankCorregMatrix; 25 | 26 | 27 | q = 1; % Input dimension 28 | d = size(yTemp, 2) + options.nlf; 29 | 30 | X = cell(size(yTemp, 2)+options.nlf,1); 31 | y = cell(size(yTemp, 2)+options.nlf,1); 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | warning('off','multiKernParamInit:noCrossKernel') 44 | 45 | model = multigpCreate(q, d, X, y, options); 46 | 47 | params = modelExtractParam(model); 48 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 49 | params(index) = log(100); 50 | model = modelExpandParam(model, params); 51 | 52 | display = 1; 53 | iters = 200; 54 | 55 | % Trains the model 56 | init_time = cputime; 57 | model = multigpOptimise(model, display, iters); 58 | elapsed_time = cputime - init_time; 59 | 60 | % Save the results. 61 | capName = dataSetName; 62 | capName(1) = upper(capName(1)); 63 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 64 | 65 | % Load complete data to plot the ground truth 66 | 67 | [XGT, void, void, fGT] = mapLoadData(dataSetName); 68 | 69 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 70 | 71 | -------------------------------------------------------------------------------- /matlab/examplesCadmiumGgError.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < scriptBatchJuraGgFullCd.m 7 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd1.m 8 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd2.m 9 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd3.m 10 | exit 0 11 | -------------------------------------------------------------------------------- /matlab/examplesCadmiumGgKL.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd1KL.m 7 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd2KL.m 8 | matlab -nodisplay -nojvm < scriptBatchJuraGgSpmgpCd3KL.m 9 | exit 0 10 | 11 | -------------------------------------------------------------------------------- /matlab/examplesCadmiumGgwhiteError.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < scriptBatchJuraGgwhiteFullCd.m 7 | matlab -nodisplay -nojvm < scriptBatchJuraGgwhiteSpgmpCd1.m 8 | matlab -nodisplay -nojvm < scriptBatchJuraGgwhiteSpgmpCd2.m 9 | exit 0 10 | -------------------------------------------------------------------------------- /matlab/examplesCadmiumGgwhiteKL.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < scriptBatchJuraGgwhiteSpgmpCd1KL.m 7 | matlab -nodisplay -nojvm < scriptBatchJuraGgwhiteSpgmpCd2KL.m 8 | exit 0 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /matlab/examplesToy1DGgKL.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < demSpmgpGgToy3KL.m 7 | matlab -nodisplay -nojvm < demSpmgpGgToy4KL.m 8 | matlab -nodisplay -nojvm < demSpmgpGgToy5KL.m 9 | exit 0 10 | -------------------------------------------------------------------------------- /matlab/examplesToy1DGgwhiteKL.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | #$ -cwd 4 | #$ -S /bin/bash 5 | # 6 | matlab -nodisplay -nojvm < demSpmgpNoiseToy1KL.m 7 | matlab -nodisplay -nojvm < demSpmgpNoiseToy2KL.m 8 | 9 | exit 0 10 | 11 | 12 | -------------------------------------------------------------------------------- /matlab/fxDataCompareTestError.m: -------------------------------------------------------------------------------- 1 | % FXDATACOMPARETESTERROR description 2 | 3 | % MULTIGP 4 | 5 | % Data 6 | 7 | R = 1:1:6; 8 | smse_test_best = [53.61 29.98 33.00 27.95 20.33 28.47]; 9 | smse_test_sim = [53.61 34.06 35.48 28.15 41.92 110.17]; 10 | smse_test_ou = [56.00 29.98 33.00 34.89 20.33 70.37]; 11 | smse_test_lmc = [56.41 39.27 43.17 43.20 48.19 56.41 59.75]; 12 | 13 | baseDirResults = './'; 14 | 15 | % Plotting the results 16 | 17 | figure 18 | hold on 19 | % Plotting the true data 20 | c = plot(R, smse_test_best, 'k-*'); 21 | set(c, 'markerSize', 10, 'lineWidth', 2); 22 | d = plot(R, smse_test_sim, 'r-s'); 23 | set(d, 'markerSize', 10, 'lineWidth', 2); 24 | e = plot(R, smse_test_ou, 'b-o'); 25 | set(e, 'markerSize', 10, 'lineWidth', 2); 26 | f = plot(R, smse_test_lmc(1:length(R)), 'g-v'); 27 | set(f, 'markerSize', 10, 'lineWidth', 2); 28 | set(gca, 'fontname', 'arial', 'fontsize', 15, 'xlim', xlim, 'Color', 'none') 29 | hl = legend('Best', 'Only SIM', 'Only OU', 'LMC', 'Location', 'NorthWest'); 30 | box on 31 | fileName = 'fxDataCompareTestError'; 32 | print('-depsc', [baseDirResults 'results/' fileName]); 33 | saveas(gcf,[baseDirResults 'results/' fileName],'fig'); 34 | pos = get(gcf, 'paperposition'); 35 | origpos = pos; 36 | pos(3) = pos(3)/2; 37 | pos(4) = pos(4)/2; 38 | set(gcf, 'paperposition', pos); 39 | lineWidth = get(gca, 'lineWidth'); 40 | set(gca, 'lineWidth', lineWidth); 41 | print('-dpng', [baseDirResults 'results/' fileName]) 42 | set(gca, 'lineWidth', lineWidth); 43 | set(gcf, 'paperposition', origpos); 44 | -------------------------------------------------------------------------------- /matlab/fxDataCompareTrainingError.m: -------------------------------------------------------------------------------- 1 | % FXDATACOMPARETRAININGERROR description 2 | 3 | % MULTIGP 4 | 5 | % Data 6 | 7 | R = 1:1:6; 8 | smse_train_best = [33.15 19.66 11.61 6.49 5.34 4.50]; 9 | smse_train_sim = [34.88 20.29 13.97 11.15 7.16 5.04]; 10 | smse_train_ou = [33.15 19.66 13.28 7.94 5.34 4.50]; 11 | smse_train_lmc = [32.71 19.66 13.32 7.46 5.18 3.83 2.67]; 12 | 13 | baseDirResults = './'; 14 | 15 | % Plotting the results 16 | 17 | figure 18 | hold on 19 | % Plotting the true data 20 | c = plot(R, smse_train_best, 'k-*'); 21 | set(c, 'markerSize', 10, 'lineWidth', 2); 22 | d = plot(R, smse_train_sim, 'r-s'); 23 | set(d, 'markerSize', 10, 'lineWidth', 2); 24 | e = plot(R, smse_train_ou, 'b-o'); 25 | set(e, 'markerSize', 10, 'lineWidth', 2); 26 | f = plot(R, smse_train_lmc(1:length(R)), 'g-v'); 27 | set(f, 'markerSize', 10, 'lineWidth', 2); 28 | set(gca, 'fontname', 'arial', 'fontsize', 15, 'xlim', xlim, 'Color', 'none') 29 | hl = legend('Best', 'Only SIM', 'Only OU', 'LMC', 'Location', 'NorthEast'); 30 | box on 31 | fileName = 'fxDataCompareTrainingError'; 32 | print('-depsc', [baseDirResults 'results/' fileName]); 33 | saveas(gcf,[baseDirResults 'results/' fileName],'fig'); 34 | pos = get(gcf, 'paperposition'); 35 | origpos = pos; 36 | pos(3) = pos(3)/2; 37 | pos(4) = pos(4)/2; 38 | set(gcf, 'paperposition', pos); 39 | lineWidth = get(gca, 'lineWidth'); 40 | set(gca, 'lineWidth', lineWidth); 41 | print('-dpng', [baseDirResults 'results/' fileName]) 42 | set(gca, 'lineWidth', lineWidth); 43 | set(gcf, 'paperposition', origpos); 44 | -------------------------------------------------------------------------------- /matlab/fxDataCompareVariationalBound.m: -------------------------------------------------------------------------------- 1 | % FXDATACOMPAREVARIATIONALBOUND description 2 | 3 | % MULTIGP 4 | 5 | % Data 6 | 7 | R = 1:1:6; 8 | Fv_best = [-2115.52 -1173.91 -692.28 -235.11 216.31 555.25]; 9 | Fv_sim = [-2115.52 -1173.91 -692.28 -257.39 216.31 555.25]; 10 | Fv_ou = [-2257.29 -1408.13 -977.31 -440.58 -123.49 95.83]; 11 | 12 | baseDirResults = './'; 13 | 14 | % Plotting the results 15 | 16 | figure 17 | hold on 18 | % Plotting the true data 19 | c = plot(R, Fv_best, 'k-*'); 20 | set(c, 'markerSize', 10, 'lineWidth', 2); 21 | d = plot(R, Fv_sim, 'r-s'); 22 | set(d, 'markerSize', 10, 'lineWidth', 2); 23 | e = plot(R, Fv_ou, 'b-o'); 24 | set(e, 'markerSize', 10, 'lineWidth', 2); 25 | set(gca, 'fontname', 'arial', 'fontsize', 15, 'xlim', xlim, 'Color', 'none') 26 | hl = legend('Best', 'Only SIM', 'Only OU', 'Location', 'SouthEast'); 27 | box on 28 | fileName = 'fxDataCompareVariationalBound'; 29 | print('-depsc', [baseDirResults 'results/' fileName]); 30 | saveas(gcf,[baseDirResults 'results/' fileName],'fig'); 31 | pos = get(gcf, 'paperposition'); 32 | origpos = pos; 33 | pos(3) = pos(3)/2; 34 | pos(4) = pos(4)/2; 35 | set(gcf, 'paperposition', pos); 36 | lineWidth = get(gca, 'lineWidth'); 37 | set(gca, 'lineWidth', lineWidth); 38 | print('-dpng', [baseDirResults 'results/' fileName]) 39 | set(gca, 'lineWidth', lineWidth); 40 | set(gcf, 'paperposition', origpos); 41 | -------------------------------------------------------------------------------- /matlab/gaussianKernGradTransfer.m: -------------------------------------------------------------------------------- 1 | function kern = gaussianKernGradTransfer(kern, kernLat, localGrad, whichLat) 2 | 3 | % GUASSIANKERNGRADTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | params = kern.funcNames.extractLat(kernLat); 10 | 11 | gradInvWithLatent = localGrad(1:kern.lfParamsTemplate).*params(1:kern.lfParamsTemplate); 12 | 13 | kern.grad.precisionU(:, whichLat) = kern.grad.precisionU(:, whichLat) + gradInvWithLatent'; 14 | -------------------------------------------------------------------------------- /matlab/gaussianKernParamTransfer.m: -------------------------------------------------------------------------------- 1 | function gaussianKern = gaussianKernParamTransfer(kern, gaussianKern, whichLatent) 2 | 3 | % GUASSIANKERNPARAMTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | gaussianKern.precisionU = kern.precisionU(:, whichLatent); 10 | 11 | -------------------------------------------------------------------------------- /matlab/gaussianaXgaussianKernCompute.m: -------------------------------------------------------------------------------- 1 | function [K, Kbase, n2] = gaussianaXgaussianKernCompute(kern, x, x2) 2 | 3 | % GAUSSIANAXGAUSSIANKERNCOMPUTE Derivative of acceleration of the Gaussian 4 | % kernel 5 | % FORMAT 6 | % DESC computes the kernel matrix for the kernel formed when taking the 7 | % second derivative of the Gaussian kernel for the first argument. Only 8 | % works when x and x2 have dimension one. 9 | % RETURN K : the kernel matrix computed at the given points. 10 | % ARG kern : the kernel structure for which the matrix is computed. 11 | % ARG X : the input matrix associated with the rows of the kernel. 12 | % ARG X2 : the input matrix associated with the columns of the kernel. 13 | % 14 | % FORMAT 15 | % DESC computes the kernel matrix for the kernel formed when taking the 16 | % second derivative of the Gaussian kernel for the first argument. Only 17 | % works when x have dimension one. 18 | % RETURN K : the kernel matrix computed at the given points. 19 | % ARG kern : the kernel structure for which the matrix is computed. 20 | % ARG x : input data matrix in the form of a design matrix. 21 | % 22 | % SEEALSO : gaussianKernCompute, gaussianvXgaussianKernCompute 23 | % 24 | % COPYRIGHT : Mauricio A. Alvarez, 2010 25 | 26 | % KERN 27 | 28 | if size(x,2) > 1 29 | error('The current version of this kernel only works for 1 D inputs') 30 | end 31 | 32 | if nargin > 2 33 | if size(x2,2) > 1 34 | error('The current version of this kernel only works for 1 D inputs') 35 | end 36 | end 37 | 38 | if nargin < 3 39 | x2 = x; 40 | end 41 | 42 | n2 = dist2(x, x2); 43 | Kbase = exp(-0.5*kern.precisionU*n2); 44 | 45 | X = x(:, ones(1, size(x2,1))); 46 | pX2 = x2'; 47 | X2 = pX2(ones(size(x,1),1), :); 48 | X_X2 = X - X2; 49 | 50 | 51 | K = kern.sigma2Latent*kern.precisionU*(kern.precisionU*(X_X2.^2) - 1).*Kbase; 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /matlab/gaussianvXgaussianKernCompute.m: -------------------------------------------------------------------------------- 1 | function [K, Kbase, n2] = gaussianvXgaussianKernCompute(kern, x, x2) 2 | 3 | % GAUSSIANVXGAUSSIANKERNCOMPUTE Derivative of velocity of the Gaussian 4 | % kernel 5 | % FORMAT 6 | % DESC computes the kernel matrix for the kernel formed when taking the 7 | % derivative of the Gaussian kernel for the first argument. Only works when 8 | % x and x2 have dimension one. 9 | % RETURN K : the kernel matrix computed at the given points. 10 | % ARG kern : the kernel structure for which the matrix is computed. 11 | % ARG X : the input matrix associated with the rows of the kernel. 12 | % ARG X2 : the input matrix associated with the columns of the kernel. 13 | % 14 | % FORMAT 15 | % DESC computes the kernel matrix for the kernel formed when taking the 16 | % derivative of the Gaussian kernel for the first argument. Only works when 17 | % x have dimension one. 18 | % RETURN K : the kernel matrix computed at the given points. 19 | % ARG kern : the kernel structure for which the matrix is computed. 20 | % ARG x : input data matrix in the form of a design matrix. 21 | % 22 | % SEEALSO : gaussianKernParamInit, kernCompute, kernCreate, gaussianKernDiagCompute 23 | % 24 | % COPYRIGHT : Mauricio A. Alvarez, 2010 25 | 26 | % KERN 27 | 28 | if size(x,2) > 1 29 | error('The current version of this kernel only works for 1 D inputs') 30 | end 31 | 32 | if nargin > 2 33 | if size(x2,2) > 1 34 | error('The current version of this kernel only works for 1 D inputs ') 35 | end 36 | end 37 | 38 | if nargin < 3 39 | x2 = x; 40 | end 41 | 42 | n2 = dist2(x, x2); 43 | Kbase = exp(-0.5*kern.precisionU*n2); 44 | 45 | X = x(:, ones(1, size(x2,1))); 46 | pX2 = x2'; 47 | X2 = pX2(ones(size(x,1),1), :); 48 | X_X2 = X - X2; 49 | 50 | K = - kern.sigma2Latent*kern.precisionU*X_X2.*Kbase; 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /matlab/ggKernGradTransfer.m: -------------------------------------------------------------------------------- 1 | function kern = ggKernGradTransfer(kern, kernOut, localGrad, whichOut, whichLat) 2 | 3 | % GGKERNGRADTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | params = kern.funcNames.extractOut(kernOut); 10 | gradInvWithLatent = localGrad(1:kern.lfParamsTemplate).*params(1:kern.lfParamsTemplate); 11 | gradInvWidthOutput = localGrad(kern.lfParamsTemplate+1:kern.lfParamsTemplate+kern.outParamsTemplate)... 12 | .*params(kern.lfParamsTemplate+1:kern.lfParamsTemplate+kern.outParamsTemplate); 13 | 14 | gradSensitivity = localGrad(end); 15 | 16 | kern.grad.precisionU(:, whichLat) = kern.grad.precisionU(:, whichLat) + gradInvWithLatent'; 17 | if kern.tieOutputParams 18 | kern.grad.precisionG(:, whichOut) = kern.grad.precisionG(:, whichOut) + gradInvWidthOutput; 19 | else 20 | kern.grad.precisionG(:, whichOut, whichLat) = kern.grad.precisionG(:, whichOut, whichLat) + gradInvWidthOutput; 21 | end 22 | 23 | kern.grad.sensitivity(whichOut, whichLat) = kern.grad.sensitivity(whichOut, whichLat) + gradSensitivity; 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /matlab/ggKernParamTransfer.m: -------------------------------------------------------------------------------- 1 | function ggKern = ggKernParamTransfer(kern, ggKern, whichOutput, whichLatent) 2 | 3 | % GGKERNPARAMTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | ggKern.precisionU = kern.precisionU(:,whichLatent); 10 | if kern.tieOutputParams 11 | ggKern.precisionG = kern.precisionG(:,whichOutput); 12 | else 13 | ggKern.precisionG = kern.precisionG(:,whichOutput, whichLatent); 14 | end 15 | ggKern.sensitivity = kern.sensitivity(whichOutput, whichLatent); 16 | 17 | -------------------------------------------------------------------------------- /matlab/ggMultigpKernOptions.m: -------------------------------------------------------------------------------- 1 | function model = ggMultigpKernOptions(model, options) 2 | 3 | % GGMULTIGPKERNOPTIONS Changes the default options for GG kernels 4 | % FORMAT 5 | % DESC Changes default options for the GG kernel and GAUSSIAN kernels 6 | % RETURN model : model with kernels modified 7 | % ARG model : model created 8 | % ARG options : options for particular kernel 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, 2009 11 | 12 | % MULTIGP 13 | 14 | if isfield(options, 'isArd') && ~isempty(options.isArd) 15 | fhandle1 = str2func([model.kern.comp{1}.comp{1}.type 'KernParamInit']); 16 | fhandle2 = str2func([model.kern.comp{1}.comp{model.nlf+1}.type 'KernParamInit']); 17 | nParamsKernInit = model.kern.nParams; 18 | nParamsInit = 0; 19 | nParamsKern = 0; 20 | for k=1:model.nlf 21 | nParams = 0; 22 | model.kern.comp{k}.comp{k} = fhandle1(... 23 | model.kern.comp{k}.comp{k}, options.isArd); 24 | nParams = nParams + model.kern.comp{k}.comp{k}.nParams; 25 | for j=1:model.nout, 26 | model.kern.comp{k}.comp{model.nlf+j} = fhandle2(... 27 | model.kern.comp{k}.comp{model.nlf+j}, options.isArd); 28 | nParams = nParams + model.kern.comp{k}.comp{model.nlf+j}.nParams; 29 | end 30 | nParamsInit = nParamsInit + model.kern.comp{k}.nParams; 31 | model.kern.comp{k}.nParams = nParams; 32 | model.kern.comp{k}.paramGroups = speye(nParams); 33 | nParamsKern = nParamsKern + nParams; 34 | end 35 | model.kern.nParams = nParamsKernInit - nParamsInit + nParamsKern; 36 | model.kern.paramGroups = speye(nParamsKernInit - nParamsInit + nParamsKern); 37 | end 38 | -------------------------------------------------------------------------------- /matlab/ggglobalKernExpandParam.m: -------------------------------------------------------------------------------- 1 | function kern = ggglobalKernExpandParam(kern, params) 2 | 3 | % GGGLOBALKERNEXPANDPARAM 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if kern.isArd 10 | nParamsLat = kern.inputDimension*kern.nlf; 11 | kern.precisionU = reshape(params(1:nParamsLat), kern.inputDimension, kern.nlf); 12 | if kern.tieOutputParams 13 | nParamsOut = kern.inputDimension*kern.out; 14 | kern.precisionG = reshape(params(nParamsLat+1:nParamsLat+nParamsOut), kern.inputDimension, kern.nout); 15 | else 16 | nParamsOut = kern.inputDimension*kern.out*kern.nlf; 17 | kern.precisionG = reshape(params(nParamsLat+1:nParamsLat+nParamsOut), kern.inputDimension, kern.nout, kern.nlf); 18 | end 19 | else 20 | nParamsLat = kern.nlf; 21 | kern.precisionU = reshape(params(1:nParamsLat), 1, kern.nlf); 22 | if kern.tieOutputParams 23 | nParamsOut = kern.nout; 24 | kern.precisionG = reshape(params(nParamsLat+1:nParamsLat+nParamsOut), 1, kern.nout); 25 | else 26 | nParamsOut = kern.nout*kern.nlf; 27 | kern.precisionG = reshape(params(nParamsLat+1:nParamsLat+nParamsOut), 1, kern.nout, kern.nlf); 28 | end 29 | end 30 | 31 | kern.sensitivity = reshape(params(nParamsLat+nParamsOut+1:end), kern.nout, kern.nlf); -------------------------------------------------------------------------------- /matlab/ggglobalKernGradCat.m: -------------------------------------------------------------------------------- 1 | function g = ggglobalKernGradCat(kern) 2 | 3 | % GGGGLOBALKERNGRADCAT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | g = [kern.grad.precisionU(:)' kern.grad.precisionG(:)' kern.grad.sensitivity(:)']; 10 | 11 | -------------------------------------------------------------------------------- /matlab/ggglobalKernGradInit.m: -------------------------------------------------------------------------------- 1 | function kern = ggglobalKernGradInit(kern) 2 | 3 | % GGGLOBALKERNGRADINIT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | kern.grad.precisionU = zeros(size(kern.precisionU)); 10 | kern.grad.precisionG = zeros(size(kern.precisionG)); 11 | kern.grad.sensitivity = zeros(size(kern.sensitivity)); 12 | -------------------------------------------------------------------------------- /matlab/helperCreateNames.m: -------------------------------------------------------------------------------- 1 | function helperCreateNames(numberDims, numberInducing) 2 | 3 | % create Names 4 | if exist('gaussianwhiteNames.txt', 'file') 5 | delete('gaussianwhiteNames.txt') 6 | end 7 | if exist('Xunames.txt', 'file') 8 | delete('Xunames.txt') 9 | end 10 | fidGaussianwhite = fopen('gaussianwhiteNames.txt','a'); 11 | fidXu = fopen('Xunames.txt','a'); 12 | for k=1:numberInducing, 13 | message =['VIK ' num2str(k) ' inverse width latent']; 14 | fprintf(fidGaussianwhite,'%s\n',message); 15 | end 16 | fclose(fidGaussianwhite); 17 | for i = 1:numberInducing 18 | for j = 1:numberDims 19 | message = ['X_u (' num2str(i) ', ' num2str(j) ')']; 20 | fprintf(fidXu,'%s\n',message); 21 | end 22 | end 23 | fclose(fidXu); -------------------------------------------------------------------------------- /matlab/lfmKernGradTransfer.m: -------------------------------------------------------------------------------- 1 | function kern = lfmKernGradTransfer(kern, kernOut, localGrad, whichOut, whichLat) 2 | 3 | % LFMKERNGRADTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | params = kern.funcNames.extractOut(kernOut); 10 | 11 | if ~kern.isMassFixed 12 | gradMass = localGrad(1)*params(1); 13 | kern.grad.massVector(whichOut) = kern.grad.massVector(whichOut) + gradMass; 14 | end 15 | 16 | gradSpring = localGrad(2)*params(2); 17 | gradDamper = localGrad(3)*params(3); 18 | gradInvWidth = localGrad(4)*params(4); 19 | kern.grad.springVector(whichOut) = kern.grad.springVector(whichOut) + gradSpring; 20 | kern.grad.damperVector(whichOut) = kern.grad.damperVector(whichOut) + gradDamper; 21 | kern.grad.inverseWidthVector(whichLat) = kern.grad.inverseWidthVector(whichLat) + gradInvWidth; 22 | 23 | gradSensitivity = localGrad(5); 24 | kern.grad.sensitivity(whichOut, whichLat) = kern.grad.sensitivity(whichOut, whichLat) + gradSensitivity; 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /matlab/lfmKernParamTransfer.m: -------------------------------------------------------------------------------- 1 | function lfmKern = lfmKernParamTransfer(kern, lfmKern, whichOutput, whichLatent) 2 | 3 | % LFMKERNPARAMTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | if kern.isMassFixed 9 | lfmKern.mass = kern.massFixedVal; 10 | else 11 | lfmKern.mass = kern.massVector(whichOutput); 12 | end 13 | lfmKern.spring = kern.springVector(whichOutput); 14 | lfmKern.damper = kern.damperVector(whichOutput); 15 | lfmKern.inverseWidth = kern.inverseWidthVector(whichLatent); 16 | lfmKern.sensitivity = kern.sensitivity(whichOutput, whichLatent); 17 | 18 | -------------------------------------------------------------------------------- /matlab/lfmMeanCompute.m: -------------------------------------------------------------------------------- 1 | function m = lfmMeanCompute(meanFunction, X, varargin) 2 | 3 | % LFMMEANCOMPUTE Give the output of the lfm mean function model for given X. 4 | % FORMAT 5 | % DESC gives the output of the lfm mean function model for a given input X. 6 | % ARG model : structure specifying the model. 7 | % ARG X : input location(s) for which output is to be computed. 8 | % RETURN Y : output location(s) corresponding to given input 9 | % locations. 10 | % 11 | % SEEALSO : lfmMeanCreate 12 | % 13 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 14 | 15 | 16 | % MULTIGP 17 | 18 | nlf = varargin{1}; 19 | startVal=1; 20 | endVal=0; 21 | for i =1:nlf, 22 | endVal = endVal + size(X{i}, 1); 23 | m(startVal:endVal, 1) = zeros(length(X{i}),1); 24 | startVal = endVal+1; 25 | end 26 | 27 | for i = nlf+1:length(X), 28 | endVal = endVal + size(X{i}, 1); 29 | m(startVal:endVal, 1) = meanFunction.basal(i-nlf)/meanFunction.spring(i-nlf) * ... 30 | ones(length(X{i}),1); 31 | startVal = endVal+1; 32 | end -------------------------------------------------------------------------------- /matlab/lfmMeanCreate.m: -------------------------------------------------------------------------------- 1 | function meanFunction = lfmMeanCreate(q, d, varargin) 2 | 3 | % LFMMEANCREATE creates the mean function for a multi output GP 4 | % model based in the LFM kernel (second order differential equation) 5 | % The outputs of the model are generated according to 6 | % 7 | % mean_q = B_q/D_q 8 | % 9 | % where mean_q is an output constant corresponding to the mean of the 10 | % output function q, B_q is basal transcription and D_q is the spring 11 | % constant. 12 | % 13 | % FORMAT 14 | % DESC returns a structure for the mean function for the multiple output 15 | % Gaussian process model that uses the LFM kernel. 16 | % RETURN model : the structure for the multigp model 17 | % ARG q : input dimension size. 18 | % ARG d : output dimension size. 19 | % ARG options : contains the options for the MEAN of the MULTIGP model. 20 | % 21 | % SEE ALSO: lfmKernParamInit, lfmKernCompute 22 | % 23 | % COPYRIGHT : Mauricio A. Alvarez and Neil D. Lawrence, 2008 24 | 25 | % MULTIGP 26 | 27 | if q > 1 28 | error('LFM MEAN FUNCTION only valid for one-D input.') 29 | end 30 | 31 | meanFunction.type = 'lfm'; 32 | meanFunction.basal = ones(d,1); 33 | meanFunction.spring = ones(d,1); 34 | meanFunction.transforms.index = d+1:2*d; 35 | meanFunction.transforms.type = optimiDefaultConstraint('positive'); 36 | % Only the parameters of basal rates are counted. The springs are already 37 | % counted in the kernel 38 | meanFunction.nParams = 2*d; -------------------------------------------------------------------------------- /matlab/lfmMeanExpandParam.m: -------------------------------------------------------------------------------- 1 | function meanFunction = lfmMeanExpandParam(meanFunction, params) 2 | 3 | % LFMMEANEXPANDPARAM Extract the parameters of the vector parameter and put 4 | % them back in the mean function structure for the LFM model. 5 | % DESC returns a mean function lfm structure filled with the 6 | % parameters in the given vector. This is used as a helper function to 7 | % enable parameters to be optimised in, for example, the NETLAB 8 | % optimisation functions. 9 | % ARG meanFunction : the meanFunction structure in which the parameters are to be 10 | % placed. 11 | % ARG param : vector of parameters which are to be placed in the 12 | % kernel structure. 13 | % RETURN meanFunction : mean function structure with the given parameters in the 14 | % relevant locations. 15 | % 16 | % SEEALSO : lfmMeanCreate, lfmMeanExtractParam, kernExpandParam 17 | % 18 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 19 | 20 | % MULTIGP 21 | 22 | meanFunction.basal = params(1:meanFunction.nParams/2)'; 23 | meanFunction.spring = params(meanFunction.nParams/2+1:meanFunction.nParams)'; 24 | 25 | -------------------------------------------------------------------------------- /matlab/lfmMeanExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = lfmMeanExtractParam(meanFunction) 2 | 3 | % LFMMEANEXTRACTPARAM Extract parameters from the LFM MEAN function structure. 4 | % FORMAT 5 | % DESC Extract parameters from the mean funtion structure of the lfm model 6 | % into a vector of parameters for optimisation. 7 | % ARG meanFunction : the mean function structure containing the parameters to be 8 | % extracted. 9 | % RETURN param : vector of parameters extracted from the kernel. 10 | % 11 | % FORMAT 12 | % DESC Extract parameters and their names from mean funtion structure of 13 | % the lfm model 14 | % ARG meanFunction : the mean function structure containing the parameters to be 15 | % extracted. 16 | % RETURN param : vector of parameters extracted from the kernel. 17 | % RETURN names : cell array of strings containing parameter names. 18 | % 19 | % SEEALSO lfmMeanCreate, lfmMeanExpandParam, lfmKernCreate, 20 | % lfmkernExtractParam 21 | % 22 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 23 | 24 | % MULTIGP 25 | 26 | params = [meanFunction.basal' meanFunction.spring']; 27 | if nargout > 1 28 | names = cell(1, 2*meanFunction.nParams/2); 29 | for i=1:meanFunction.nParams/2 30 | names{i} = ['lfm ' num2str(i) ' basal']; 31 | end 32 | for i=meanFunction.nParams/2+1:2*meanFunction.nParams/2 33 | names{i} = ['lfm ' num2str(i-meanFunction.nParams/2) ' spring']; 34 | end 35 | end 36 | 37 | 38 | -------------------------------------------------------------------------------- /matlab/lfmMeanGradient.m: -------------------------------------------------------------------------------- 1 | function g = lfmMeanGradient(meanFunction, varargin) 2 | 3 | % LFMMEANGRADIENT Gradient of the parameters of the mean function in the 4 | % multigp model with LFM kernel 5 | % FORMAT 6 | % DESC gives the gradient of the objective function for the parameters of 7 | % the mean function in the multigp model with LFM kernel (second order 8 | % differential equation). 9 | % ARG meanFunction : mean function structure to optimise. 10 | % ARG P1, P2, P3 ... : optional additional arguments. 11 | % RETURN g : the gradient of the error function to be minimised. 12 | % 13 | % SEEALSO : lfmMeanCreate, lfmMeanOut 14 | % 15 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 16 | 17 | % MULTIGP 18 | 19 | gmu = varargin{1}'; 20 | gB = gmu./meanFunction.spring; 21 | gD = -gmu.*meanFunction.basal./(meanFunction.spring.*meanFunction.spring); 22 | g = [gB' gD']; 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /matlab/lfmMultigpFixParam.m: -------------------------------------------------------------------------------- 1 | function model = lfmMultigpFixParam(model, options) 2 | 3 | % LFMMULTIGPFIXPARAM Fix the parameters for a multigp model with LFM kernel 4 | % FORMAT 5 | % DESC Fix the parameters for a multigp model that uses LFM kernel. 6 | % RETURN model : model with fixed parameters included 7 | % ARG model : model before fixing the parameters 8 | % ARG options : options for fixing parameters 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 11 | 12 | % MULTIGP 13 | 14 | % This code fixes masses and latent variances to 1. 15 | index = paramNameRegularExpressionLookup(model, ['multi ' ... 16 | '[0-9]+ rbf [0-9]+ variance']); 17 | count = 0; 18 | for k=1:length(index); 19 | count = count + 1; 20 | model.fix(count).index = index(k); 21 | model.fix(count).value = expTransform(1, 'xtoa'); 22 | end 23 | % Fix the masses of the lfm kernels. 24 | index = paramNameRegularExpressionLookup(model, ['multi ' ... 25 | '[0-9]+ lfm [0-9]+ mass']); 26 | for k=1:length(index); 27 | count = count + 1; 28 | model.fix(count).index = index(k); 29 | model.fix(count).value = expTransform(1, 'xtoa'); 30 | end 31 | 32 | if ~strcmp(model.approx, 'ftc') && options.includeNoise 33 | % In the approximations fitc, pitc and dtc, this function is 34 | % accomplished by the parameter beta 35 | % If there is noise then is at the end and it's not necessary to look 36 | % for them using paramNameRegularExpressionLookUp 37 | nParamsKern = 0; 38 | nToLook = options.nlf + options.includeInd; 39 | for k =1:nToLook, 40 | nParamsKern = nParamsKern + model.kern.comp{k}.nParams; 41 | end 42 | index = (nParamsKern + options.nlf + 1):... 43 | (nParamsKern + options.nlf +model.nout); 44 | for k=1:length(index); 45 | count = count + 1; 46 | model.fix(count).index = index(k); 47 | model.fix(count).value = expTransform(1e-9, 'xtoa'); 48 | end 49 | end -------------------------------------------------------------------------------- /matlab/lfmMultigpTieParam.m: -------------------------------------------------------------------------------- 1 | function tieInd = lfmMultigpTieParam(model, options) 2 | 3 | % LFMMULTIGPTIEPARAM Tie the parameters for a multigp model with LFM kernel 4 | % FORMAT 5 | % DESC Tie the parameters for a multigp model that uses LFM kernel. 6 | % RETURN tieInd : cell with elements containing the indexes of parameters 7 | % to tie. 8 | % ARG model : model created 9 | % ARG options : options for tying parameters 10 | % 11 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 12 | 13 | % MULTIGP 14 | 15 | for i = 1:options.nlf 16 | tieInd{i} = paramNameRegularExpressionLookup(model, ['multi ' num2str(i) ... 17 | ' .* inverse width']); 18 | end 19 | for i = 1:model.nout 20 | tieInd{end+1} = paramNameRegularExpressionLookup(model, ['. lfm ' num2str(i) ' spring']); 21 | tieInd{end+1} = paramNameRegularExpressionLookup(model, ['multi ' ... 22 | '[0-9]+ lfm ' num2str(i) ' damper']); 23 | end -------------------------------------------------------------------------------- /matlab/lfmglobalKernExpandParam.m: -------------------------------------------------------------------------------- 1 | function kern = lfmglobalKernExpandParam(kern, params) 2 | 3 | % LFMGLOBALKERNEXPANDPARAM 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if kern.isMassFixed 10 | kern.springVector = params(1:kern.nout); 11 | kern.damperVector = params(kern.nout+1:2*kern.nout); 12 | kern.inverseWidthVector = params((2*kern.nout)+1:(2*kern.nout)+kern.nlf); 13 | kern.sensitivity = reshape(params(kern.nlf+2*kern.nout+1:end), kern.nout, kern.nlf); 14 | massVector = kern.massFixedVal*ones(1, kern.nout); 15 | else 16 | kern.massVector = params(1:kern.nout); 17 | kern.springVector = params(kern.nout+1:2*kern.nout); 18 | kern.damperVector = params((2*kern.nout)+1:3*kern.nout); 19 | kern.inverseWidthVector = params((3*kern.nout)+1:(3*kern.nout)+kern.nlf); 20 | kern.sensitivity = reshape(params(kern.nlf+3*kern.nout+1:end), kern.nout, kern.nlf); 21 | massVector = kern.massVector; 22 | end 23 | 24 | kern.alphaVector = kern.damperVector./(2*massVector); 25 | kern.omegaVector = sqrt(kern.springVector./massVector-kern.alphaVector.^2); 26 | kern.gammaVector = kern.alphaVector + 1i*kern.omegaVector; 27 | 28 | kern.zetaVector = kern.damperVector./(2*sqrt(massVector.*kern.springVector)); 29 | kern.omega_0Vector = sqrt(kern.springVector./massVector); 30 | -------------------------------------------------------------------------------- /matlab/lfmglobalKernExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = lfmglobalKernExtractParam(kern) 2 | 3 | % LFMGLOBALKERNEXTRACTPARAM 4 | % 5 | % COPYRIGHT 6 | 7 | % MULTIGP 8 | 9 | if kern.isMassFixed 10 | params = [kern.springVector kern.damperVector ... 11 | kern.inverseWidthVector kern.sensitivity(:)']; 12 | else 13 | params = [kern.massVector kern.springVector kern.damperVector ... 14 | kern.inverseWidthVector kern.sensitivity(:)']; 15 | end 16 | 17 | if nargout > 1 18 | namesSpring = cell(kern.nout,1); 19 | namesDamper = cell(kern.nout,1); 20 | namesInvWidth = cell(kern.nlf,1); 21 | for i=1:kern.nout 22 | namesSpring{i} = ['spring ' num2str(i) '.']; 23 | namesDamper{i} = ['damper ' num2str(i) '.']; 24 | end 25 | for i=1:kern.nlf 26 | namesInvWidth{i} = ['inverse width ' num2str(i) '.']; 27 | end 28 | if ~kern.isMassFixed 29 | namesMass = cell(kern.nout,1); 30 | for i=1:kern.nout 31 | namesMass{i} = ['mass ' num2str(i) '.']; 32 | end 33 | end 34 | if kern.isMassFixed 35 | names = [namesSpring(:)' namesDamper(:)' namesInvWidth(:)']; 36 | else 37 | names = [namesMass(:)' namesSpring(:)' namesDamper(:)' namesInvWidth(:)']; 38 | end 39 | namesSensitivity = cell(kern.nout, kern.nlf); 40 | for i=1:kern.nout 41 | output = num2str(i); 42 | for j=1:kern.nlf 43 | force = num2str(j); 44 | namesSensitivity{i,j} = ['sensitivity output ' output ' force ' force '.']; 45 | end 46 | end 47 | names = [names(:)' namesSensitivity(:)']; 48 | end 49 | -------------------------------------------------------------------------------- /matlab/lfmglobalKernGradCat.m: -------------------------------------------------------------------------------- 1 | function g = lfmglobalKernGradCat(kern) 2 | 3 | % LFMGLOBALKERNGRADCAT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if kern.isMassFixed 10 | g = [kern.grad.springVector kern.grad.damperVector ... 11 | kern.grad.inverseWidthVector]; 12 | else 13 | g = [kern.grad.massVector kern.grad.springVector kern.grad.damperVector ... 14 | kern.grad.inverseWidthVector]; 15 | end 16 | g = [g kern.grad.sensitivity(:)']; 17 | 18 | -------------------------------------------------------------------------------- /matlab/lfmglobalKernGradInit.m: -------------------------------------------------------------------------------- 1 | function kern = lfmglobalKernGradInit(kern) 2 | 3 | % LFMGLOBALKERNGRADINIT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if ~kern.isMassFixed 10 | kern.grad.massVector = zeros(size(kern.massVector)); 11 | end 12 | kern.grad.springVector = zeros(size(kern.springVector)); 13 | kern.grad.damperVector = zeros(size(kern.damperVector)); 14 | kern.grad.inverseWidthVector = zeros(size(kern.inverseWidthVector)); 15 | kern.grad.sensitivity = zeros(size(kern.sensitivity)); 16 | 17 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMeanCompute.m: -------------------------------------------------------------------------------- 1 | function m = lfmwhiteMeanCompute(meanFunction, X, varargin) 2 | 3 | % LFMWHITEMEANCOMPUTE Give the output of the LFM-WHITE mean function model 4 | % for given X. 5 | % FORMAT 6 | % DESC gives the output of the LFM-WHITE mean function model for a given 7 | % input X. 8 | % ARG model : structure specifying the model. 9 | % ARG X : input location(s) for which output is to be computed. 10 | % RETURN Y : output location(s) corresponding to given input locations. 11 | % 12 | % SEEALSO : lfmwhiteMeanCreate 13 | % 14 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 15 | % 16 | % MODIFICATIONS : David Luengo, 2009 17 | 18 | % MULTIGP 19 | 20 | nlf = varargin{1}; 21 | startVal=1; 22 | endVal=0; 23 | for i =1:nlf, 24 | endVal = endVal + size(X{i}, 1); 25 | m(startVal:endVal, 1) = zeros(length(X{i}),1); 26 | startVal = endVal+1; 27 | end 28 | 29 | for i = nlf+1:length(X), 30 | endVal = endVal + size(X{i}, 1); 31 | m(startVal:endVal, 1) = meanFunction.basal(i-nlf)/meanFunction.spring(i-nlf) * ... 32 | ones(length(X{i}),1); 33 | startVal = endVal+1; 34 | end 35 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMeanCreate.m: -------------------------------------------------------------------------------- 1 | function meanFunction = lfmwhiteMeanCreate(q, d, varargin) 2 | 3 | % LFMWHITEMEANCREATE returns a structure for the mean function LFM-WHITE kernel. 4 | % FORMAT 5 | % DESC creates the mean function for a multi output 6 | % GP model based in the LFM-WHITE kernel (second order ODE excited by a 7 | % white noise input process). The outputs of the model are generated 8 | % according to 9 | % 10 | % mean_q = B_q/D_q 11 | % 12 | % where mean_q is an output constant corresponding to the mean of the 13 | % output function q, B_q is basal transcription and D_q is the spring 14 | % constant. 15 | % RETURN model : the structure for the multigp model 16 | % ARG q : input dimension size. 17 | % ARG d : output dimension size. 18 | % ARG options : contains the options for the MEAN of the MULTIGP model. 19 | % 20 | % SEE ALSO: lfmwhiteKernParamInit, lfmwhiteKernCompute 21 | % 22 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 23 | % 24 | % MODIFICATIONS : David Luengo, 2009 25 | 26 | % MULTIGP 27 | 28 | if q > 1 29 | error('LFM-WHITE MEAN FUNCTION only valid for one-D input.') 30 | end 31 | 32 | meanFunction.type = 'lfmwhite'; 33 | meanFunction.basal = ones(d,1); 34 | meanFunction.spring = ones(d,1); 35 | meanFunction.transforms.index = d+1:2*d; 36 | meanFunction.transforms.type = optimiDefaultConstraint('positive'); 37 | % Only the parameters of basal rates are counted. The springs are already 38 | % counted in the kernel 39 | meanFunction.nParams = 2*d; 40 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMeanExpandParam.m: -------------------------------------------------------------------------------- 1 | function meanFunction = lfmwhiteMeanExpandParam(meanFunction, params) 2 | 3 | % LFMWHITEMEANEXPANDPARAM Extract the parameters of the vector parameter 4 | % and put them back in the mean function structure for the LFM-WHITE model. 5 | % FORMAT 6 | % DESC returns a mean function LFM-WHITE structure filled with the 7 | % parameters in the given vector. This is used as a helper function to 8 | % enable parameters to be optimised in, for example, the NETLAB 9 | % optimisation functions. 10 | % ARG meanFunction : the meanFunction structure in which the parameters are 11 | % to be placed. 12 | % ARG param : vector of parameters which are to be placed in the kernel 13 | % structure. 14 | % RETURN meanFunction : mean function structure with the given parameters 15 | % in the relevant locations. 16 | % 17 | % SEEALSO : lfmwhiteMeanCreate, lfmwhiteMeanExtractParam, kernExpandParam 18 | % 19 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 20 | % 21 | % MODIFICATIONS : David Luengo, 2009 22 | 23 | % MULTIGP 24 | 25 | meanFunction.basal = params(1:meanFunction.nParams/2)'; 26 | meanFunction.spring = params(meanFunction.nParams/2+1:meanFunction.nParams)'; 27 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMeanExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = lfmwhiteMeanExtractParam(meanFunction) 2 | 3 | % LFMWHITEMEANEXTRACTPARAM Extract parameters from the LFM-WHITE mean function structure. 4 | % FORMAT 5 | % DESC Extract parameters from the mean funtion structure of the LFM-WHITE 6 | % model into a vector of parameters for optimisation. 7 | % ARG meanFunction : the mean function structure containing the parameters 8 | % to be extracted. 9 | % RETURN param : vector of parameters extracted from the kernel. 10 | % 11 | % FORMAT 12 | % DESC Extract parameters and their names from mean funtion structure of 13 | % the LFM-WHIE model 14 | % ARG meanFunction : the mean function structure containing the parameters 15 | % to be extracted. 16 | % RETURN param : vector of parameters extracted from the kernel. 17 | % RETURN names : cell array of strings containing parameter names. 18 | % 19 | % SEEALSO lfmwhiteMeanCreate, lfmwhiteMeanExpandParam, lfmwhiteKernCreate, 20 | % lfmwhiteKernExtractParam 21 | % 22 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 23 | % 24 | % MODIFICATIONS : David Luengo, 2009 25 | 26 | % MULTIGP 27 | 28 | params = [meanFunction.basal' meanFunction.spring']; 29 | if nargout > 1 30 | names = cell(1, 2*meanFunction.nParams/2); 31 | for i=1:meanFunction.nParams/2 32 | names{i} = ['lfmwhite ' num2str(i) ' basal']; 33 | end 34 | for i=meanFunction.nParams/2+1:2*meanFunction.nParams/2 35 | names{i} = ['lfmwhite ' num2str(i-meanFunction.nParams/2) ' spring']; 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMeanGradient.m: -------------------------------------------------------------------------------- 1 | function g = lfmwhiteMeanGradient(meanFunction, varargin) 2 | 3 | % LFMWHITEMEANGRADIENT Gradient of the parameters of the mean function in 4 | % the MULTIGP model with LFM-WHITE kernel 5 | % FORMAT 6 | % DESC gives the gradient of the objective function for the parameters of 7 | % the mean function in the multigp model with LFM-WHITE kernel (second order 8 | % differential equation). 9 | % ARG meanFunction : mean function structure to optimise. 10 | % ARG P1, P2, P3 ... : optional additional arguments. 11 | % RETURN g : the gradient of the error function to be minimised. 12 | % 13 | % SEEALSO : lfmwhiteMeanCreate, lfmwhiteMeanOut 14 | % 15 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 16 | % 17 | % MODIFICATIONS : David Luengo, 2009 18 | 19 | % MULTIGP 20 | 21 | gmu = varargin{1}'; 22 | gB = gmu./meanFunction.spring; 23 | gD = -gmu.*meanFunction.basal./(meanFunction.spring.*meanFunction.spring); 24 | g = [gB' gD']; 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /matlab/lfmwhiteMultigpFixParam.m: -------------------------------------------------------------------------------- 1 | function model = lfmwhiteMultigpFixParam(model, options) 2 | 3 | % LFMWHITEMULTIGPFIXPARAM Fix parameters for a multigp with LFMWHITE kernel 4 | % FORMAT 5 | % DESC Fix the parameters for a multigp model that uses LFMWHITE kernel. 6 | % RETURN model : model with fixed parameters included 7 | % ARG model : model before fixing the parameters 8 | % ARG options : options for fixing parameters 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 11 | 12 | % MULTIGP 13 | 14 | % This code fixes the latent variances to 1. 15 | index = paramNameRegularExpressionLookup(model, ... 16 | ['multi [1-' num2str(model.nlf) '] .* variance']); 17 | count = 0; 18 | for k=1:length(index); 19 | count = count + 1; 20 | model.fix(count).index = index(k); 21 | model.fix(count).value = expTransform(1, 'xtoa'); 22 | end 23 | % Fix the masses of the lfm kernels. 24 | index = paramNameRegularExpressionLookup(model, ['multi ' ... 25 | '[0-9]+ lfmwhite [0-9]+ mass']); 26 | for k=1:length(index); 27 | count = count + 1; 28 | model.fix(count).index = index(k); 29 | model.fix(count).value = expTransform(1, 'xtoa'); 30 | end -------------------------------------------------------------------------------- /matlab/lfmwhiteMultigpTieParam.m: -------------------------------------------------------------------------------- 1 | function tieInd = lfmwhiteMultigpTieParam(model, options) 2 | 3 | % LFMWHITEMULTIGPTIEPARAM Tie parameters for a multigp with LFMWHITE kernel 4 | % FORMAT 5 | % DESC Tie the parameters for a multigp model that uses LFMWHITE kernel. 6 | % RETURN tieInd : cell with elements containing the indexes of parameters 7 | % to tie. 8 | % ARG model : model created 9 | % ARG options : options for tying parameters 10 | % 11 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 12 | 13 | % MULTIGP 14 | 15 | for i = 1:options.nlf 16 | tieInd{i} = paramNameRegularExpressionLookup(model, ['multi ' num2str(i) ... 17 | ' .* variance']); 18 | end 19 | for i = 1:model.nout 20 | % tieInd{end+1} = paramNameRegularExpressionLookup(model, ['multi ' ... 21 | % '[0-9]+ lfm ' num2str(i) ' spring']); 22 | tieInd{end+1} = paramNameRegularExpressionLookup(model, ['. lfmwhite ' num2str(i) ' spring']); 23 | tieInd{end+1} = paramNameRegularExpressionLookup(model, ['multi ' ... 24 | '[0-9]+ lfmwhite ' num2str(i) ' damper']); 25 | end -------------------------------------------------------------------------------- /matlab/lmcMultigpFixParam.m: -------------------------------------------------------------------------------- 1 | function model = lmcMultigpFixParam(model, options) 2 | 3 | % LMCMULTIGPFIXPARAM Fix the parameters for a multigp model with LMC kernel 4 | % FORMAT 5 | % DESC Fix the parameters for a multigp model that uses LMC kernel. 6 | % RETURN model : model with fixed parameters included 7 | % ARG model : model before fixing the parameters 8 | % ARG options : options for fixing parameters 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, 2010 11 | 12 | % MULTIGP 13 | 14 | 15 | index = paramNameRegularExpressionLookup(model, 'multi .* variance latent'); 16 | count = 0; 17 | for k=1:length(index); 18 | count = count + 1; 19 | model.fix(count).index = index(k); 20 | model.fix(count).value = expTransform(1, 'xtoa'); 21 | end 22 | 23 | % index = paramNameRegularExpressionLookup(model, 'Beta .*'); 24 | % for k=1:length(index); 25 | % count = count + 1; 26 | % model.fix(count).index = index(k); 27 | % model.fix(count).value = expTransform(1e-2, 'xtoa'); 28 | % end 29 | 30 | 31 | 32 | % if ~strcmp(model.approx, 'ftc') 33 | % % In the approximations fitc, pitc and dtc, this function is 34 | % % accomplished by the parameter beta 35 | % % If there is noise then is at the end and it's not necessary to look 36 | % % for them using paramNameRegularExpressionLookUp 37 | % nParamsKern = 0; 38 | % nToLook = options.nlf + options.includeInd; 39 | % for k =1:nToLook, 40 | % nParamsKern = nParamsKern + model.kern.comp{k}.nParams; 41 | % end 42 | % index = (nParamsKern + options.nlf + 1):... 43 | % (nParamsKern + options.nlf +model.nout); 44 | % for k=1:length(index); 45 | % count = count + 1; 46 | % model.fix(count).index = index(k); 47 | % model.fix(count).value = expTransform(1e-9, 'xtoa'); 48 | % end 49 | % end -------------------------------------------------------------------------------- /matlab/meanCompute.m: -------------------------------------------------------------------------------- 1 | function m = meanCompute(meanFunction, X, varargin) 2 | 3 | % MEANCOMPUTE Give the output of the lfm mean function model for given X. 4 | % FORMAT 5 | % DESC gives the output of a mean function model for a given input X. 6 | % ARG meanFunction : structure specifying the model. 7 | % ARG X : input location(s) for which output is to be computed. 8 | % RETURN Y : output location(s) corresponding to given input 9 | % locations. 10 | % 11 | % SEEALSO : meanCreate 12 | % 13 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 14 | 15 | % MULTIGP 16 | 17 | fhandle = str2func([meanFunction.type 'MeanCompute']); 18 | m = fhandle(meanFunction, X, varargin{:}); 19 | -------------------------------------------------------------------------------- /matlab/meanCreate.m: -------------------------------------------------------------------------------- 1 | function model = meanCreate(q, d, X, y, options) 2 | 3 | % MEANCREATE creates the mean function for a multi output GP 4 | % 5 | % FORMAT 6 | % DESC returns a structure for the mean function for the multiple output 7 | % Gaussian process model 8 | % RETURN model : the structure for the mean function of the multigp model 9 | % ARG q : input dimension size. 10 | % ARG d : output dimension size. 11 | % ARG X : set of training inputs 12 | % ARG y : set of training observations 13 | % ARG options : contains the options for the MEAN of the MULTIGP model. 14 | % 15 | % SEE ALSO: meanCompute 16 | % 17 | % COPYRIGHT : Mauricio A. Alvarez and Neil D. Lawrence, 2008 18 | 19 | % MULTIGP 20 | 21 | fhandle = str2func([options.type 'MeanCreate' ]); 22 | model = fhandle(q , d, options); 23 | model.paramGroups = speye(model.nParams); -------------------------------------------------------------------------------- /matlab/meanExpandParam.m: -------------------------------------------------------------------------------- 1 | function meanFunction = meanExpandParam(meanFunction, params) 2 | 3 | % MEANEXPANDPARAM Extract the parameters of the vector parameter and put 4 | % them back in a mean function structure. 5 | % DESC returns a mean function structure filled with the 6 | % parameters in the given vector. This is used as a helper function to 7 | % enable parameters to be optimised in, for example, the NETLAB 8 | % optimisation functions. 9 | % ARG meanFunction : the meanFunction structure in which the parameters are to be 10 | % placed. 11 | % ARG param : vector of parameters which are to be placed in the 12 | % kernel structure. 13 | % RETURN meanFunction : mean function structure with the given parameters in the 14 | % relevant locations. 15 | % 16 | % SEEALSO : meanCreate, meanExtractParam, meanExpandParam 17 | % 18 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 19 | 20 | % MULTIGP 21 | 22 | if isfield(meanFunction, 'paramGroups') 23 | params = params*meanFunction.paramGroups'; 24 | end 25 | 26 | % Check if parameters are being optimised in a transformed space. 27 | if ~isempty(meanFunction.transforms) 28 | for i = 1:length(meanFunction.transforms) 29 | index = meanFunction.transforms(i).index; 30 | fhandle = str2func([meanFunction.transforms(i).type 'Transform']); 31 | params(index) = fhandle(params(index), 'atox'); 32 | end 33 | end 34 | fhandle = str2func([meanFunction.type 'MeanExpandParam']); 35 | meanFunction = fhandle(meanFunction, params); 36 | 37 | 38 | -------------------------------------------------------------------------------- /matlab/meanExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = meanExtractParam(meanFunction) 2 | 3 | % MEANEXTRACTPARAM Extract parameters from a MEAN FUNCTION structure. 4 | % FORMAT 5 | % DESC Extract parameters from a mean funtion structure 6 | % into a vector of parameters for optimisation. 7 | % ARG model : the mean function structure containing the parameters to be 8 | % extracted. 9 | % RETURN param : vector of parameters extracted from the mean function. 10 | % 11 | % FORMAT 12 | % DESC Extract parameters and their names from a mean funtion structure 13 | % ARG model : the mean function structure containing the parameters to be 14 | % extracted. 15 | % RETURN param : vector of parameters extracted from the mean function. 16 | % RETURN names : cell array of strings containing parameter names. 17 | % 18 | % SEEALSO meanCreate, meanExpandParam, kernCreate, 19 | % kernExtractParam 20 | % 21 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 22 | 23 | % MULTIGP 24 | 25 | fhandle = str2func([meanFunction.type 'MeanExtractParam']); 26 | 27 | if nargout > 1 28 | [params, names] = fhandle(meanFunction); 29 | else 30 | params = fhandle(meanFunction); 31 | end 32 | 33 | % Check if parameters are being optimised in a transformed space. 34 | if ~isempty(meanFunction.transforms) 35 | for i = 1:length(meanFunction.transforms) 36 | index = meanFunction.transforms(i).index; 37 | fhandle = str2func([meanFunction.transforms(i).type 'Transform']); 38 | params(index) = fhandle(params(index), 'xtoa'); 39 | end 40 | end 41 | 42 | params = params*meanFunction.paramGroups; 43 | -------------------------------------------------------------------------------- /matlab/meanFactors.m: -------------------------------------------------------------------------------- 1 | function factors = meanFactors(meanFunction, factorType) 2 | 3 | % MEANFACTORS Extract factors associated with transformed optimisation space. 4 | 5 | % MULTIGP 6 | 7 | factors.index = []; 8 | factors.val = []; 9 | if ~isempty(meanFunction.transforms) 10 | fhandle = str2func([meanFunction.type 'MeanExtractParam']); 11 | params = fhandle(meanFunction); 12 | for i = 1:length(meanFunction.transforms) 13 | index = meanFunction.transforms(i).index; 14 | factors.index = [factors.index index]; 15 | fhandle = str2func([meanFunction.transforms(i).type 'Transform']); 16 | factors.val = [factors.val ... 17 | fhandle(params(index), factorType)]; 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /matlab/meanGradient.m: -------------------------------------------------------------------------------- 1 | function g = meanGradient(meanFunction, varargin) 2 | 3 | % MEANGRADIENT Gradient of the parameters of the mean function in the 4 | % multigp model 5 | % FORMAT 6 | % DESC gives the gradient of the objective function for the parameters of 7 | % the mean function in the multigp model 8 | % ARG meanFunction : mean function structure to optimise. 9 | % ARG P1, P2, P3 ... : optional additional arguments. 10 | % RETURN g : the gradient of the error function to be minimised. 11 | % 12 | % SEEALSO : meanCreate, meanCompute 13 | % 14 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 15 | 16 | % MULTIGP 17 | 18 | fhandle = str2func([meanFunction.type 'MeanGradient']); 19 | g = fhandle( meanFunction, varargin{:}); 20 | 21 | factors = meanFactors(meanFunction, 'gradfact'); 22 | g(factors.index) = g(factors.index).*factors.val; 23 | 24 | -------------------------------------------------------------------------------- /matlab/multigpComputeAlpha.m: -------------------------------------------------------------------------------- 1 | function alpha = multigpComputeAlpha(model, m) 2 | 3 | % MULTIGPCOMPUTEALPHA Update the vector `alpha' for computing posterior mean quickly. 4 | % FORMAT 5 | % DESC updates the vectors that are known as `alpha' in the support 6 | % vector machine, in other words invK*y, where y is the target values. 7 | % ARG model : the model for which the alphas are going to be 8 | % updated. 9 | % ARG m : the values of m for which the updates will be made. 10 | % RETURN model : the model with the updated alphas. 11 | % 12 | % SEEALSO : multigpCreate, multigpUpdateAD, multigpUpdateKernels 13 | % 14 | % COPYRIGHT : Neil D. Lawrence, 2008 15 | 16 | % MULTIGP 17 | 18 | if nargin < 2 19 | m = model.m; 20 | end 21 | 22 | switch model.approx 23 | case 'ftc' 24 | alpha = model.invK*m; 25 | case {'dtc','fitc','pitc', 'dtcvar'} 26 | if isfield(model, 'beta') && ~isempty(model.beta) 27 | alpha = model.AinvKuyDinvy; 28 | else 29 | alpha =[]; 30 | end 31 | otherwise 32 | error('Alpha update not yet implemented for sparse kernels'); 33 | end -------------------------------------------------------------------------------- /matlab/multigpGradient.m: -------------------------------------------------------------------------------- 1 | function g = multigpGradient(params, model) 2 | 3 | % MULTIGPGRADIENT Gradient wrapper for a MULTIGP model. 4 | 5 | % MULTIGP 6 | 7 | model = modelExpandParam(model, params); 8 | g = - modelLogLikeGradients(model); 9 | -------------------------------------------------------------------------------- /matlab/multigpLogLikelihood.m: -------------------------------------------------------------------------------- 1 | function ll = multigpLogLikelihood( model) 2 | 3 | % MULTIGPLOGLIKELIHOOD Compute the log likelihood of a MULTIGP. 4 | 5 | % COPYRIGHT : Mauricio A Alvarez, 2008 6 | 7 | % MULTIGP 8 | 9 | switch model.approx 10 | case 'ftc' 11 | dim = size(model.m, 1); 12 | ll = -dim*log(2*pi) - model.logDetK - model.m'*model.invK*model.m; 13 | ll = ll*0.5; 14 | % MAP optimization for gpsim with more than two latent functions 15 | % if strcmp(model.kernType, 'sim') && (model.nlf>1) && ... 16 | % strcmp(model.inference, 'map') 17 | % ll = ll - sum(log(model.S)); 18 | % end 19 | case {'dtc','fitc','pitc','dtcvar'} 20 | ll = spmultigpLogLikelihood( model); 21 | end 22 | 23 | -------------------------------------------------------------------------------- /matlab/multigpObjective.m: -------------------------------------------------------------------------------- 1 | function f = multigpObjective(params, model) 2 | 3 | % MULTIGPOBJECTIVE Wrapper function for MULTIGPOPTIMISE objective. 4 | 5 | % MULTIGP 6 | 7 | model = modelExpandParam(model, params); 8 | f = - multigpLogLikelihood(model); 9 | -------------------------------------------------------------------------------- /matlab/multigpObjectiveGradient.m: -------------------------------------------------------------------------------- 1 | function [f, g] = multigpObjectiveGradient(params, model) 2 | 3 | % MULTIGPOBJECTIVEGRADIENT Wrapper for MULTIGP objective and gradient. 4 | % FORMAT 5 | % DESC returns the negative log likelihood of a Gaussian process 6 | % model given the model structure and a vector of parameters. This 7 | % allows the use of NETLAB minimisation functions to find the model 8 | % parameters. 9 | % ARG params : the parameters of the model for which the objective 10 | % will be evaluated. 11 | % ARG model : the model structure for which the objective will be 12 | % evaluated. 13 | % RETURN f : the negative log likelihood of the GP model. 14 | % RETURN g : the gradient of the negative log likelihood of the GP 15 | % model with respect to the parameters. 16 | % 17 | % SEEALSO : minimize, gpCreate, gpGradient, gpLogLikelihood, gpOptimise 18 | % 19 | % COPYRIGHT : Neil D. Lawrence, 2005, 2006 20 | 21 | % MULTIGP 22 | 23 | % Check how the optimiser has given the parameters 24 | if size(params, 1) > size(params, 2) 25 | % As a column vector ... transpose everything. 26 | transpose = true; 27 | model = modelExpandParam(model, params'); 28 | else 29 | transpose = false; 30 | model = modelExpandParam(model, params); 31 | end 32 | 33 | f = - multigpLogLikelihood(model); 34 | if nargout > 1 35 | g = - modelLogLikeGradients(model); 36 | end 37 | if transpose 38 | g = g'; 39 | end -------------------------------------------------------------------------------- /matlab/multigpOptimise.m: -------------------------------------------------------------------------------- 1 | function [model, params] = multigpOptimise(model, display, iters) 2 | 3 | % MULTIGPOPTIMISE Optimise the inducing variable multigp based kernel. 4 | % FORMAT 5 | % DESC optimises the Gaussian 6 | % process model for a given number of iterations. 7 | % RETURN model : the optimised model. 8 | % RETURN params : the optimised parameter vector. 9 | % ARG model : the model to be optimised. 10 | % ARG display : whether or not to display while optimisation proceeds, 11 | % set to 2 for the most verbose and 0 for the least verbose. 12 | % ARG iters : number of iterations for the optimisation. 13 | % 14 | % SEEALSO : scg, conjgrad, multigpOptimiseCreate, 15 | % multigpOptimiseGradient, multigpOptimiseObjective 16 | % 17 | % COPYRIGHT : Neil D. Lawrence, 2005, 2006 18 | % 19 | % MODIFIED : Mauricio A. Alvarez, 2008 20 | 21 | % MULTIGP 22 | 23 | if nargin < 3 24 | iters = 1000; 25 | if nargin < 2 26 | display = 1; 27 | end 28 | end 29 | 30 | params = modelExtractParam(model); 31 | 32 | options = optOptions; 33 | if display 34 | options(1) = 1; 35 | if length(params) <= 100 && display > 1 36 | options(9) = 1; 37 | end 38 | end 39 | options(14) = iters; 40 | 41 | if isfield(model, 'optimiser') 42 | optim = str2func(model.optimiser); 43 | else 44 | optim = str2func('conjgrad'); 45 | end 46 | 47 | if strcmp(func2str(optim), 'optimiMinimize') 48 | % Carl Rasmussen's minimize function 49 | params = optim('multigpObjectiveGradient', params, options, model); 50 | else 51 | % NETLAB style optimization. 52 | params = optim('multigpObjective', params, options, ... 53 | 'multigpGradient', model); 54 | end 55 | 56 | %model = multigpExpandParam(model, params); 57 | 58 | model = modelExpandParam(model, params); 59 | -------------------------------------------------------------------------------- /matlab/multigpToolboxes.m: -------------------------------------------------------------------------------- 1 | % MULTIGPTOOLBOXES Load in the relevant toolboxes for MULTIGP. 2 | 3 | importLatest('drawing') 4 | importLatest('netlab') 5 | importLatest('optimi') 6 | importTool('mltools') 7 | importLatest('kern') 8 | importLatest('ndlutil') 9 | importLatest('gp') 10 | importLatest('mocap') 11 | importLatest('datasets') 12 | importLatest('voicebox') -------------------------------------------------------------------------------- /matlab/rbfKernGradTransfer.m: -------------------------------------------------------------------------------- 1 | function kern = rbfKernGradTransfer(kern, kernLat, localGrad, whichLat) 2 | 3 | % RBFKERNGRADTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | params = kern.funcNames.extractLat(kernLat); 10 | 11 | gradInvWith = localGrad(1)*params(1); 12 | 13 | kern.grad.inverseWidthVector(whichLat) = kern.grad.inverseWidthVector(whichLat) ... 14 | + gradInvWith'; 15 | 16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /matlab/rbfKernParamTransfer.m: -------------------------------------------------------------------------------- 1 | function rbfKern = rbfKernParamTransfer(kern, rbfKern, whichLatent) 2 | 3 | % RBFKERNPARAMTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | rbfKern.inverseWidth = kern.inverseWidthVector(whichLatent); 10 | 11 | -------------------------------------------------------------------------------- /matlab/readme.txt: -------------------------------------------------------------------------------- 1 | The multigp toolbox is a toolbox for multiple output Gaussian 2 | processes. Version 0.1 is the first official release, we have an older 3 | release 0.001 which was used to publish the results in our NIPS paper, 4 | this older release is available on request. 5 | 6 | The toolbox allows the use of multioutput Gaussian processes in 7 | combination with the "multi" kern type, first developed for use with 8 | the gpsim code. The aim for this toolbox is that it should be more 9 | general than the gpsim code. In particular it should allow for sparse 10 | approximations for multiple output GPs. 11 | 12 | The multioutput GPs are constructed through convolution processes. For 13 | more details see our NIPS paper and work by Dave Higdon and the NIPS 14 | paper by Boyle and Frean. 15 | 16 | 17 | Version 0.13 18 | ------------ 19 | 20 | Contains updates to the code for the technical report. 21 | 22 | 23 | Version 0.11 24 | ------------ 25 | 26 | Updated version which allows for variational outputs and includes a financial data example. 27 | 28 | Version 0.1 29 | ----------- 30 | 31 | First version of the software with implementation of the 2008 NIPS paper. 32 | -------------------------------------------------------------------------------- /matlab/robotGgIND.m: -------------------------------------------------------------------------------- 1 | % ROBOTGGIND .. 2 | 3 | 4 | 5 | clc 6 | clear 7 | 8 | rand('twister', 1e5); 9 | randn('state', 1e5); 10 | addToolboxes(0,1) 11 | dataSetName = 'robotWireless'; 12 | experimentNo = 3; 13 | 14 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 15 | % Configuration of data 16 | q = size(XTemp{1},2); 17 | d = size(yTemp, 2); 18 | nout = size(yTemp, 2); 19 | 20 | options.type = 'gp'; 21 | options.numModels = nout; 22 | options.compOptions = gpOptions('ftc'); 23 | options.compOptions.optimiser = 'scg'; 24 | options.compOptions.kern = {'gg', 'white'}; 25 | options.separate = []; 26 | options.optimiser = 'scg'; 27 | iters = 1000; 28 | display = 1; 29 | 30 | X = cell(1,size(yTemp, 2)); 31 | y = cell(1,size(yTemp, 2)); 32 | 33 | for i = 1:size(yTemp, 2) 34 | X{i} = XTemp{i}; 35 | y{i} = yTemp{i}; 36 | end 37 | % Configuration of parameters 38 | 39 | model = multimodelCreate(q, 1, X, y, options); 40 | params = modelExtractParam(model); 41 | options.separate = 1:length(params); 42 | model = multimodelCreate(q, 1, X, y, options); 43 | 44 | params = modelExtractParam(model); 45 | index = paramNameRegularExpressionLookup(model, 'multimodel .* inverse .*'); 46 | params(index) = log(100); 47 | 48 | model = modelExpandParam(model, params); 49 | 50 | model = modelOptimise(model, [], [], display, iters); 51 | 52 | % Save the results. 53 | capName = dataSetName; 54 | capName(1) = upper(capName(1)); 55 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 56 | 57 | -------------------------------------------------------------------------------- /matlab/robotGgPITC.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | dataSetName ='robotWireless'; 7 | nlf =[1 2 3 4]; 8 | iters = 1000; 9 | options = multigpOptions('dtcvar'); 10 | options.kernType = 'gg'; 11 | options.optimiser = 'scg'; 12 | options.nlf = 1; 13 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 14 | options.isArd = false; 15 | options.fixInducing = 0; 16 | options.numActive = 50; 17 | options.beta = 1e-1; 18 | options.normalise = false; 19 | 20 | [maerror, elapsed_time] = robotSparseCore(dataSetName, options, nlf, iters); 21 | 22 | save([dataSetName upper(options.approx)], 'maerror', 'elapsed_time') 23 | -------------------------------------------------------------------------------- /matlab/robotWiFiResults.m: -------------------------------------------------------------------------------- 1 | function meanAbsError = robotWiFiResults(model, XTest, yTest) 2 | 3 | % MULTIGP 4 | 5 | 6 | 7 | 8 | if strcmp(model.type, 'multimodel') 9 | meanAbsError = zeros(model.numModels,1); 10 | for k=1:model.numModels, 11 | muP = gpPosteriorMeanVar(model.comp{k}, XTest{k}); 12 | meanAbsError(k) = mean(abs(muP - yTest{k})); 13 | end 14 | else 15 | meanAbsError = zeros(model.nout,1); 16 | for k=1:model.nout, 17 | if strcmp(model.approx, 'ftc') 18 | XtestMod = XTest{k}; 19 | else 20 | XtestMod = XTest{k}; 21 | end 22 | Ytest = yTest{k}; 23 | muP = multigpPosteriorMeanVar(model, XtestMod); 24 | kk = muP{model.nlf+k}; 25 | meanAbsError(k) = mean(abs(Ytest - kk)); 26 | end 27 | end -------------------------------------------------------------------------------- /matlab/schoolGgDTC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGDTC Runs the SCHOOL DATA EXPERIMENT with DTC and GG kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('dtc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.beta = 1e-2; 17 | options.noiseOpt = 1; 18 | options.tieOptions.selectMethod = 'nofree'; 19 | options.isArd = false; 20 | options.fixInducing = false; 21 | 22 | numActive = [5 20 50]; 23 | 24 | % Options for the batch 25 | typeOfInit = 0; 26 | isScaled = 1; 27 | numberSeed = 1e3; 28 | isBatch = 1; 29 | 30 | display = 1; 31 | iters = 5; 32 | totFolds = 5; 33 | 34 | [totalError, elapsed_time_train, totalErrorSMSE, totalErrorSMLL] = schoolSparseCore(dataSetName, options, ... 35 | numActive, display, iters, totFolds, typeOfInit, isScaled, numberSeed, isBatch); 36 | 37 | save('schoolGgDTC.mat') 38 | -------------------------------------------------------------------------------- /matlab/schoolGgDTCVAR.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | dataSetName = 'schoolData2'; 7 | % Configuration of options 8 | options = multigpOptions('dtcvar'); 9 | options.kernType = 'gg'; 10 | options.optimiser = 'scg'; 11 | options.nlf = 1; 12 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 13 | options.beta = 1e3; 14 | options.noiseOpt = 1; 15 | options.tieOptions.selectMethod = 'nofree'; 16 | options.isArd = true; 17 | options.fixInducing = false; 18 | 19 | numActive = [ 5 20 50 100]; 20 | 21 | 22 | display = 0; 23 | iters = 200; 24 | totFolds = 10; 25 | 26 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 27 | numActive, display, iters, totFolds); 28 | 29 | save('schoolGgDTCVAR.mat') -------------------------------------------------------------------------------- /matlab/schoolGgFITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGFITC Runs the SCHOOL DATA EXPERIMENT with FITC and GG kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e6) 8 | randn('state', 1e6) 9 | dataSetName = 'schoolData2'; 10 | % Configuration of options 11 | options = multigpOptions('fitc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.beta = 1e3; 17 | options.noiseOpt = 1; 18 | options.tieOptions.selectMethod = 'nofree'; 19 | options.isArd = true; 20 | options.fixInducing = false; 21 | 22 | numActive = [ 5 20 50]; 23 | 24 | 25 | display = 0; 26 | iters = 200; 27 | totFolds = 10; 28 | 29 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 30 | numActive, display, iters, totFolds); 31 | 32 | save('schoolGgFITC.mat') 33 | -------------------------------------------------------------------------------- /matlab/schoolGgPITC.m: -------------------------------------------------------------------------------- 1 | % SCHOOLGGPITC Runs the SCHOOL DATA EXPERIMENT with PITC and GG kernel 2 | 3 | % MULTIGP 4 | 5 | clear 6 | clc 7 | rand('twister', 1e7) 8 | randn('state', 1e7) 9 | 10 | dataSetName = 'schoolData2'; 11 | % Configuration of options 12 | options = multigpOptions('pitc'); 13 | options.kernType = 'gg'; 14 | options.optimiser = 'scg'; 15 | options.nlf = 1; 16 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 17 | options.beta = 1e-3; 18 | options.noiseOpt = 1; 19 | options.tieOptions.selectMethod = 'nofree'; 20 | options.isArd = true; 21 | options.fixInducing = false; 22 | 23 | numActive = [ 5 20 50]; 24 | 25 | 26 | display = 0; 27 | iters = 500; 28 | totFolds = 10; 29 | 30 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 31 | numActive, display, iters, totFolds); 32 | 33 | save('schoolGgPITC.mat') 34 | -------------------------------------------------------------------------------- /matlab/schoolGgwhiteDTCVAR.m: -------------------------------------------------------------------------------- 1 | % Runs the SCHOOL DATA EXPERIMENT with DTCVAR and GGWHITE kernel. One 2 | % inducing kernel. 3 | 4 | % MULTIGP 5 | 6 | clear 7 | clc 8 | rand('twister', 1e7) 9 | randn('state', 1e7) 10 | 11 | dataSetName = 'schoolData2'; 12 | % Configuration of options 13 | options = multigpOptions('dtcvar'); 14 | options.kernType = 'ggwhite'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 18 | options.beta = 1e-3; 19 | options.noiseOpt = 1; 20 | options.tieOptions.selectMethod = 'nofree'; 21 | options.isArd = true; 22 | options.fixInducing = false; 23 | options.nVIKs = 1; 24 | 25 | numActive = [5 20 50]; 26 | 27 | display = 0; 28 | iters = 500; 29 | totFolds = 10; 30 | 31 | [totalError, elapsed_time_train] = schoolSparseCore(dataSetName, options, ... 32 | numActive, display, iters, totFolds); 33 | 34 | save('schoolGgwhiteDTCVAR.mat') 35 | -------------------------------------------------------------------------------- /matlab/schoolGgwhiteDTCVARSeveralVIKs.m: -------------------------------------------------------------------------------- 1 | % Runs the SCHOOL DATA EXPERIMENT with DTCVAR and GGWHITE kernel. One 2 | % inducing kernel per inducing input. 3 | 4 | % MULTIGP 5 | 6 | 7 | clear 8 | clc 9 | rand('twister', 1e7) 10 | randn('state', 1e7) 11 | 12 | dataSetName = 'schoolData2'; 13 | % Configuration of options 14 | options = multigpOptions('dtcvar'); 15 | options.kernType = 'ggwhite'; 16 | options.optimiser = 'scg'; 17 | options.nlf = 1; 18 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 19 | options.beta = 1e-3; 20 | options.noiseOpt = 1; 21 | options.tieOptions.selectMethod = 'nofree'; 22 | options.isArd = true; 23 | options.fixInducing = false; 24 | options.nVIKs = 1; 25 | 26 | numActive = [5 20 50]; 27 | 28 | 29 | display = 0; 30 | iters = 500; 31 | totFolds = 10; 32 | 33 | [totalError, elapsed_time_train] = schoolSparseCoreDTCVAR(dataSetName, options, ... 34 | numActive, display, iters, totFolds); 35 | 36 | save('schoolGgwhiteDTCVARSeveralVIKs.mat') 37 | -------------------------------------------------------------------------------- /matlab/schoolResults.m: -------------------------------------------------------------------------------- 1 | function CC = schoolResults(model, XTest, yTest) 2 | 3 | % SCHOOLRESULTS description. 4 | 5 | % MULTIGP 6 | 7 | CC = zeros(1, model.nout); 8 | 9 | for k= 1:model.nout, 10 | if strcmp(model.approx, 'ftc') 11 | XtestMod = XTest{k}; 12 | else 13 | XtestMod = XTest{k}; 14 | end 15 | Ytest = yTest{k}; 16 | muP = multigpPosteriorMeanVar(model, XtestMod); 17 | kk = muP{model.nlf+k}; 18 | cR2 = corrcoef(Ytest,kk); 19 | CC(k) = cR2(2,1)^2; 20 | end 21 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGg.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | 4 | file = {'Cd', 'Co', 'Cu', 'Pb'}; 5 | numActive =[10 50 100 200 500]; 6 | approx = 'pitc'; 7 | nlf = 1; 8 | numFolds = 10; 9 | experimentNo = 2; 10 | iters = 1000; 11 | kernType = 'gg'; 12 | initialPosition = 'randomComplete'; 13 | 14 | 15 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, numActive, approx, nlf, ... 16 | numFolds, experimentNo, iters, kernType, initialPosition); -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgFullCd.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cd'; 7 | nlf = 1; 8 | numFolds = 10; 9 | iters = 100; 10 | options = multigpOptions('ftc'); 11 | options.kernType = 'gg'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.beta = 1e3; 15 | options.isArd = false; 16 | [maerror, elapsed_time] = demJuraBatch(file, options, numFolds, iters); 17 | 18 | save('scriptBatchJuraGgFullCdIsotopic' , 'maerror', 'elapsed_time') -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCd1.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cd'; 7 | isScaled = 1; 8 | isBatch = 1; 9 | numberSeed = 1e3; 10 | typeOfInit = 1; 11 | numActive = [50 100 200]; 12 | numFolds = 5; 13 | iters = 5; 14 | options = multigpOptions('pitc'); 15 | options.kernType = 'gg'; 16 | options.optimiser = 'scg'; 17 | options.nlf = 2; 18 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 19 | options.isArd = 1; 20 | options.tieInducing = 1; 21 | options.fixInducing = 0; 22 | 23 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, ... 24 | iters, numFolds, typeOfInit, isScaled, numberSeed, isBatch); 25 | 26 | save('scriptBatchJuraGgSpmgpCd1', 'maerror', 'elapsed_time') 27 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCd2.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cd'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('fitc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCd2', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCd3.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cd'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('dtc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCd3', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCd4.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cd'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCd4', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCu1.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cu'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('pitc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCu1', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCu2.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cu'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('fitc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCu2', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCu3.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cu'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('dtc'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCu3', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgSpmgpCu4.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | rand('twister', 1e6) 4 | randn('state', 1e6) 5 | addToolboxes(0,1) 6 | file = 'Cu'; 7 | numActive =[50 100 200 500 600]; 8 | nlf = 1; 9 | numFolds = 10; 10 | iters = 1000; 11 | options = multigpOptions('dtcvar'); 12 | options.kernType = 'gg'; 13 | options.optimiser = 'scg'; 14 | options.nlf = 1; 15 | options.initialInducingPositionMethod = 'kmeansHeterotopic'; 16 | options.isArd = 1; 17 | options.fixInducing = 0; 18 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 19 | save('scriptBatchJuraGgSpmgpCu4', 'maerror', 'elapsed_time') 20 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgwhiteFull.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | 4 | file = {'Cd', 'Co'}; 5 | nlf = 1; 6 | numFolds = 10; 7 | experimentNo = 1; 8 | iters = 1000; 9 | kernType = 'ggwhite'; 10 | 11 | [maerror, elapsed_time] = demJuraBatch(file, nlf, numFolds, experimentNo,... 12 | iters, kernType); -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgwhiteFullCd.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | addToolboxes(0,1) 4 | file = 'Cd'; 5 | nlf = 1; 6 | numFolds = 10; 7 | iters = 1000; 8 | options = multigpOptions('ftc'); 9 | options.kernType = 'ggwhite'; 10 | options.optimiser = 'scg'; 11 | options.nlf = 1; 12 | options.beta = 1e3; 13 | options.isArd = 1; 14 | [maerror, elapsed_time] = demJuraBatch(file, options, numFolds, iters); 15 | 16 | save('scriptBatchJuraGgwhiteFullCd' , 'maerror', 'elapsed_time') -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgwhiteSpmgpCd1.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | addToolboxes(0,1) 4 | file = 'Cd'; 5 | numActive =[50 100 200 500 600]; 6 | approx = 'dtc'; 7 | nlf = 1; 8 | numFolds = 10; 9 | iters = 1000; 10 | options = multigpOptions('dtc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.initialInducingPositionMethod = 'randomDataIsotopic'; 15 | options.isArd = 1; 16 | options.nVIKs = 1; 17 | options.flagVIKs = false; 18 | options.fixInducing = 0; 19 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 20 | save('scriptBatchJuraGgwhiteSpmgpCd1', 'maerror', 'elapsed_time') 21 | -------------------------------------------------------------------------------- /matlab/scriptBatchJuraGgwhiteSpmgpCd2.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | addToolboxes(0,1) 4 | file = 'Cd'; 5 | numActive =[50 100 200 500 600]; 6 | approx = 'dtc'; 7 | nlf = 1; 8 | numFolds = 10; 9 | iters = 1000; 10 | options = multigpOptions('dtc'); 11 | options.kernType = 'ggwhite'; 12 | options.optimiser = 'scg'; 13 | options.nlf = 1; 14 | options.initialInducingPositionMethod = 'randomDataIsotopic'; 15 | options.isArd = 1; 16 | options.nVIKs = 1; 17 | options.flagVIKs = true; 18 | options.fixInducing = 0; 19 | [maerror, elapsed_time] = demSpmgpJuraBatch(file, options, numActive, iters, numFolds); 20 | save('scriptBatchJuraGgwhiteSpmgpCd2', 'maerror', 'elapsed_time') 21 | -------------------------------------------------------------------------------- /matlab/simKernGradTransfer.m: -------------------------------------------------------------------------------- 1 | function kern = simKernGradTransfer(kern, kernOut, localGrad, whichOut, whichLat) 2 | 3 | % SIMKERNGRADTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | params = kern.funcNames.extractOut(kernOut); 10 | 11 | gradDecay = localGrad(1)*params(1); 12 | gradInvWidth = localGrad(2)*params(2); 13 | 14 | kern.grad.decayVector(whichOut) = kern.grad.decayVector(whichOut) + gradDecay; 15 | kern.grad.inverseWidthVector(whichLat) = kern.grad.inverseWidthVector(whichLat) + gradInvWidth; 16 | 17 | if ~kern.isVarS 18 | if kern.isNegativeS 19 | gradSensitivity = localGrad(3); 20 | else 21 | gradSensitivity = localGrad(3)*params(3); 22 | end 23 | kern.grad.sensitivity(whichOut, whichLat) = kern.grad.sensitivity(whichOut, whichLat) + gradSensitivity; 24 | end 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /matlab/simKernParamTransfer.m: -------------------------------------------------------------------------------- 1 | function simKern = simKernParamTransfer(kern, simKern, whichOutput, whichLatent) 2 | 3 | % SIMKERNPARAMTRANSFER 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | simKern.decay = kern.decayVector(whichOutput); 10 | simKern.inverseWidth = kern.inverseWidthVector(whichLatent); 11 | 12 | if ~kern.isVarS 13 | if kern.isNegativeS 14 | simKern.sensitivity = kern.sensitivity(whichOutput, whichLatent); 15 | else 16 | simKern.variance = kern.sensitivity(whichOutput, whichLatent); 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /matlab/simMeanCompute.m: -------------------------------------------------------------------------------- 1 | function m = simMeanCompute(meanFunction, X, varargin) 2 | 3 | % SIMMEANCOMPUTE Give the output of the SIM mean function model for given X. 4 | % FORMAT 5 | % DESC gives the output of the sim mean function model for a given input X. 6 | % ARG model : structure specifying the model. 7 | % ARG X : input location(s) for which output is to be computed. 8 | % RETURN Y : output location(s) corresponding to given input 9 | % locations. 10 | % 11 | % SEEALSO : simMeanCreate 12 | % 13 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 14 | 15 | 16 | % MULTIGP 17 | 18 | nlf = varargin{1}; 19 | startVal=1; 20 | endVal=0; 21 | for i =1:nlf, 22 | endVal = endVal + size(X{i}, 1); 23 | m(startVal:endVal, 1) = zeros(length(X{i}),1); 24 | startVal = endVal+1; 25 | end 26 | 27 | for i = nlf+1:length(X), 28 | endVal = endVal + size(X{i}, 1); 29 | m(startVal:endVal, 1) = meanFunction.basal(i-nlf)/meanFunction.decay(i-nlf) * ... 30 | ones(length(X{i}),1); 31 | startVal = endVal+1; 32 | end -------------------------------------------------------------------------------- /matlab/simMeanCreate.m: -------------------------------------------------------------------------------- 1 | function meanFunction = simMeanCreate(q, d, options) 2 | 3 | % SIMMEANCREATE returns a structure for the SIM mean function. 4 | % FORMAT 5 | % DESC creates the mean function for a multi output GP 6 | % model based in the SIM kernel (first order differential equation) 7 | % The outputs of the model are generated according to 8 | % 9 | % mean_q = B_q/D_q 10 | % 11 | % where mean_q is an output constant corresponding to the mean of the 12 | % output function q, B_q is basal transcription and D_q is the decay 13 | % constant. 14 | % RETURN model : the structure for the multigp model 15 | % ARG q : input dimension size. 16 | % ARG d : output dimension size. 17 | % ARG options : contains the options for the MEAN of the MULTIGP model. 18 | % 19 | % SEE ALSO: simKernParamInit, simKernCompute 20 | % 21 | % COPYRIGHT : Mauricio A. Alvarez and Neil D. Lawrence, 2008 22 | 23 | % MULTIGP 24 | 25 | if q > 1 26 | error('SIM MEAN FUNCTION only valid for one-D input.') 27 | end 28 | 29 | meanFunction.type = 'sim'; 30 | meanFunction.basal = ones(d,1); 31 | meanFunction.decay = ones(d,1); 32 | meanFunction.transforms.index = 1:2*d; 33 | meanFunction.transforms.type = optimiDefaultConstraint('positive'); 34 | % Only the parameters of basal rates are counted. The springs are already 35 | % counted in the kernel 36 | meanFunction.nParams = 2*d; -------------------------------------------------------------------------------- /matlab/simMeanExpandParam.m: -------------------------------------------------------------------------------- 1 | function meanFunction = simMeanExpandParam(meanFunction, params) 2 | 3 | % SIMMEANEXPANDPARAM Extract the parameters of the vector parameter and put 4 | % them back in the mean function structure for the SIM model. 5 | % DESC returns a mean function sim structure filled with the 6 | % parameters in the given vector. This is used as a helper function to 7 | % enable parameters to be optimised in, for example, the NETLAB 8 | % optimisation functions. 9 | % ARG meanFunction : the meanFunction structure in which the parameters are to be 10 | % placed. 11 | % ARG param : vector of parameters which are to be placed in the 12 | % kernel structure. 13 | % RETURN meanFunction : mean function structure with the given parameters in the 14 | % relevant locations. 15 | % 16 | % SEEALSO : simMeanCreate, simMeanExtractParam, kernExpandParam 17 | % 18 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 19 | 20 | % MULTIGP 21 | 22 | meanFunction.basal = params(1:meanFunction.nParams/2)'; 23 | meanFunction.decay = params(meanFunction.nParams/2+1:meanFunction.nParams)'; 24 | 25 | -------------------------------------------------------------------------------- /matlab/simMeanExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = simMeanExtractParam(meanFunction) 2 | 3 | % SIMMEANEXTRACTPARAM Extract parameters from the SIM MEAN FUNCTION structure. 4 | % FORMAT 5 | % DESC Extract parameters from the mean funtion structure of the sim model 6 | % into a vector of parameters for optimisation. 7 | % ARG meanFunction : the mean function structure containing the parameters to be 8 | % extracted. 9 | % RETURN param : vector of parameters extracted from the kernel. 10 | % 11 | % FORMAT 12 | % DESC Extract parameters and their names from mean funtion structure of 13 | % the sim model 14 | % ARG meanFunction : the mean function structure containing the parameters to be 15 | % extracted. 16 | % RETURN param : vector of parameters extracted from the kernel. 17 | % RETURN names : cell array of strings containing parameter names. 18 | % 19 | % SEEALSO simMeanCreate, simMeanExpandParam, simKernCreate, 20 | % simkernExtractParam 21 | % 22 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 23 | 24 | % MULTIGP 25 | 26 | params = [meanFunction.basal' meanFunction.decay']; 27 | if nargout > 1 28 | names = cell(1, 2*meanFunction.nParams/2); 29 | for i=1:meanFunction.nParams/2 30 | names{i} = ['sim ' num2str(i) ' basal']; 31 | end 32 | for i=meanFunction.nParams/2+1:2*meanFunction.nParams/2 33 | names{i} = ['sim ' num2str(i-meanFunction.nParams/2) ' decay']; 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /matlab/simMeanGradient.m: -------------------------------------------------------------------------------- 1 | function g = simMeanGradient(meanFunction, varargin) 2 | 3 | % SIMMEANGRADIENT Gradient of the parameters of the mean function in the 4 | % multigp model with SIM kernel 5 | % FORMAT 6 | % DESC gives the gradient of the objective function for the parameters of 7 | % the mean function in the multigp model with LFM kernel (second order 8 | % differential equation). 9 | % ARG meanFunction : mean function structure to optimise. 10 | % ARG P1, P2, P3 ... : optional additional arguments. 11 | % RETURN g : the gradient of the error function to be minimised. 12 | % 13 | % SEEALSO : simMeanCreate, simMeanOut 14 | % 15 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 16 | 17 | % MULTIGP 18 | 19 | gmu = varargin{1}'; 20 | gB = gmu./meanFunction.decay; 21 | gD = -gmu.*meanFunction.basal./(meanFunction.decay.*meanFunction.decay); 22 | g = [gB' gD']; 23 | -------------------------------------------------------------------------------- /matlab/simMultigpKernOptions.m: -------------------------------------------------------------------------------- 1 | function model = simMultigpKernOptions(model, options) 2 | 3 | % SIMMULTIGPKERNOPTIONS Changes the default options for SIM kernels 4 | % FORMAT 5 | % DESC Changes default options for the SIM kernel and RBF kernels 6 | % RETURN model : model with kernels modified 7 | % ARG model : model created 8 | % ARG options : options for particular kernel 9 | % 10 | % COPYRIGHT : David Luengo, Mauricio A. Alvarez, 2009 11 | 12 | % MULTIGP 13 | 14 | if isfield(options, 'isNormalised') && ~isempty(options.isNormalised) 15 | for i=1:model.nlf 16 | for j=1:model.nlf 17 | model.kern.comp{i}.comp{j}.isNormalised = options.isNormalised; 18 | end 19 | for j=1:model.nout 20 | model.kern.comp{i}.comp{model.nlf + j}.isNormalised = options.isNormalised; 21 | end 22 | end 23 | end 24 | 25 | if isfield(options, 'isStationary') && ~isempty(options.isStationary) 26 | for i=1:model.nlf 27 | for j=1:model.nlf 28 | model.kern.comp{i}.comp{j}.isStationary = options.isStationary; 29 | end 30 | for j=1:model.nout 31 | model.kern.comp{i}.comp{model.nlf+j}.isStationary = options.isStationary; 32 | end 33 | end 34 | end 35 | 36 | -------------------------------------------------------------------------------- /matlab/simMultimodelFixParam.m: -------------------------------------------------------------------------------- 1 | function model = simMultimodelFixParam(model, options) 2 | 3 | % SIMMULTIMODELFIXPARAM Fix parameters for a sparse multi model with SIM 4 | % FORMAT 5 | % DESC Fix the parameters for a sparse multi model that uses SIM kernel. 6 | % RETURN model : model with fixed parameters included 7 | % ARG model : model before fixing the parameters 8 | % ARG options : options for fixing parameters 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, 2009 11 | 12 | % MULTIGP 13 | 14 | if isfield(options, 'typeLf') && ~isempty(options.typeLf) 15 | 16 | else 17 | [params, names] = modelExtractParam(model); 18 | index = findExpression(names, '.* rbf .* variance'); 19 | count = 0; 20 | for k=1:length(index); 21 | count = count + 1; 22 | model.fix(count).index = index(k); 23 | model.fix(count).value = expTransform(1, 'xtoa'); 24 | end 25 | end 26 | 27 | function ind = findExpression(names, pattern) 28 | ind = []; 29 | for i = 1:length(names) 30 | if(regexp(names{i}, pattern)) 31 | ind = [ind i]; 32 | end 33 | end -------------------------------------------------------------------------------- /matlab/simglobalKernCompute.m: -------------------------------------------------------------------------------- 1 | function [Kyy, Kyu, Kuu] = simglobalKernCompute(kern, outX, latX, gamma) 2 | 3 | % SIMGLOBALKERNCOMPUTE 4 | % 5 | % COPYRIGTH : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if nargin < 4 10 | gamma = []; 11 | end 12 | 13 | Kuu = cell(kern.nlf,1); 14 | Kyu = cell(kern.nout, kern.nlf); 15 | Kyy = cell(kern.nout, kern.nlf); 16 | 17 | 18 | kernLat = kern.template.latent; 19 | kernOut = kern.template.output; 20 | 21 | % Compute Kuu 22 | for k = 1:kern.nlf 23 | % First we need to expand the parameters in the vector to the local 24 | % kernel 25 | kernLat.inverseWidth = kern.inverseWidthVector(k); 26 | Kuu{k} = real(kern.funcNames.computeLat(kernLat, latX{k})); 27 | if ~isempty(gamma) 28 | Kuu{k} = Kuu{k} + gamma(k)*eye(size(Kuu{k})); 29 | end 30 | end 31 | for i = 1:kern.nout, 32 | % Expand the parameter decay 33 | kernOut.decay = kern.decayVector(i); 34 | for j = 1: kern.nlf 35 | % Expand the parameter inverseWidth 36 | kernOut.inverseWidth = kern.inverseWidthVector(j); 37 | kernLat.inverseWidth = kern.inverseWidthVector(j); 38 | % Compute Kff 39 | Kyy{i,j} = real(kern.funcNames.computeOut(kernOut, outX{i})); 40 | % Compute Kfu, which corresponds to K_{\hat{f}}u, really. 41 | Kyu{i,j} = real(kern.funcNames.computeCross(kernOut, kernLat, outX{i}, latX{j})); 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /matlab/simglobalKernComputeTest.m: -------------------------------------------------------------------------------- 1 | function [Kyy, Kyu, Kuu] = simglobalKernComputeTest(kern, latX, outXs, latXs, gamma) 2 | 3 | % SIMGLOBALKERNCOMPUTETEST 4 | % 5 | % COPYRIGTH : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | if nargin < 5 10 | gamma = []; 11 | end 12 | 13 | Kuu = cell(kern.nlf,1); 14 | Kyu = cell(kern.nout, kern.nlf); 15 | Kyy = cell(kern.nout, kern.nlf); 16 | 17 | 18 | kernLat = kern.template.latent; 19 | kernOut = kern.template.output; 20 | 21 | % Compute Kuu 22 | for k = 1:kern.nlf 23 | % First we need to expand the parameters in the vector to the local 24 | % kernel 25 | kernLat.inverseWidth = kern.inverseWidthVector(k); 26 | Kuu{k} = real(kern.funcNames.computeLat(kernLat, latXs, latX{k})); 27 | if ~isempty(gamma) 28 | Kuu{k} = Kuu{k} + gamma(k)*eye(size(Kuu{k})); 29 | end 30 | end 31 | for i = 1:kern.nout, 32 | % Expand the parameter decay 33 | kernOut.decay = kern.decayVector(i); 34 | for j = 1: kern.nlf 35 | % Expand the parameter inverseWidth 36 | kernOut.inverseWidth = kern.inverseWidthVector(j); 37 | kernLat.inverseWidth = kern.inverseWidthVector(j); 38 | % Compute Kff 39 | Kyy{i,j} = real(kern.funcNames.computeOut(kernOut, outXs{i})); 40 | % Compute Kfu, which corresponds to K_{\hat{f}}u, really. 41 | Kyu{i,j} = real(kern.funcNames.computeCross(kernOut, kernLat, outXs{i}, latX{j})); 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /matlab/simglobalKernExpandParam.m: -------------------------------------------------------------------------------- 1 | function kern = simglobalKernExpandParam(kern, params) 2 | 3 | % SIMGLOBALKERNEXPANDPARAM 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | kern.decayVector = params(1:kern.nout); 10 | kern.inverseWidthVector = params(kern.nout+1:kern.nout+kern.nlf); 11 | if ~kern.isVarS 12 | kern.sensitivity = reshape(params(kern.nlf+kern.nout+1:end), kern.nout, kern.nlf); 13 | end 14 | -------------------------------------------------------------------------------- /matlab/simglobalKernExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = simglobalKernExtractParam(kern) 2 | 3 | % SIMGLOBALKERNEXTRACTPARAM 4 | % 5 | % COPYRIGHT 6 | 7 | % MULTIGP 8 | 9 | if kern.isVarS 10 | params = [kern.decayVector kern.inverseWidthVector]; 11 | else 12 | params = [kern.decayVector kern.inverseWidthVector kern.sensitivity(:)']; 13 | end 14 | 15 | if nargout > 1 16 | namesDecay = cell(kern.nout,1); 17 | namesInvWidth = cell(kern.nlf,1); 18 | for i=1:kern.nout 19 | namesDecay{i} = ['decay ' num2str(i) '.']; 20 | end 21 | for i=1:kern.nlf 22 | namesInvWidth{i} = ['inverse width ' num2str(i) '.']; 23 | end 24 | names = [namesDecay(:)' namesInvWidth(:)']; 25 | if ~kern.isVarS 26 | namesSensitivity = cell(kern.nout, kern.nlf); 27 | for i=1:kern.nout 28 | output = num2str(i); 29 | for j=1:kern.nlf 30 | force = num2str(j); 31 | namesSensitivity{i,j} = ['sensitivity output ' output ' force ' force '.']; 32 | end 33 | end 34 | end 35 | names = [names(:)' namesSensitivity(:)']; 36 | end 37 | -------------------------------------------------------------------------------- /matlab/simglobalKernGradCat.m: -------------------------------------------------------------------------------- 1 | function g = simglobalKernGradCat(kern) 2 | 3 | % SIMGLOBALKERNGRADCAT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | g = [kern.grad.decayVector kern.grad.inverseWidthVector]; 10 | 11 | if ~kern.isVarS 12 | g = [g kern.grad.sensitivity(:)']; 13 | end 14 | -------------------------------------------------------------------------------- /matlab/simglobalKernGradInit.m: -------------------------------------------------------------------------------- 1 | function kern = simglobalKernGradInit(kern) 2 | 3 | % SIMGLOBALKERNGRADINIT 4 | % 5 | % COPYRIGHT : Mauricio A. Alvarez, 2010 6 | 7 | % MULTIGP 8 | 9 | kern.grad.inverseWidthVector = zeros(size(kern.inverseWidthVector)); 10 | kern.grad.decayVector = zeros(size(kern.decayVector)); 11 | 12 | if ~kern.isVarS 13 | kern.grad.sensitivity = zeros(size(kern.sensitivity)); 14 | end 15 | -------------------------------------------------------------------------------- /matlab/simglobalMultimodelTieParam.m: -------------------------------------------------------------------------------- 1 | function tieInd = simglobalMultimodelTieParam(model) 2 | 3 | % SIMGLOBALMULTIMODELTIEPARAM Tie parameters for a sparse multimodel 4 | % FORMAT 5 | % DESC Tie the parameters for a sparse multimodel that uses SIM kernel. 6 | % RETURN tieInd : cell with elements containing the indexes of parameters 7 | % to tie. 8 | % ARG model : model created 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, 2009, 2010 11 | 12 | % MULTIGP 13 | 14 | [params, names] = modelExtractParam(model); 15 | 16 | % Tie decays 17 | indexDecaysKernel = findExpression(names, 'kernel .* decay'); 18 | indexDecaysMean = findExpression(names, 'mean Func .* decay'); 19 | indexDecays = [indexDecaysKernel' indexDecaysMean']; 20 | tieInd = (mat2cell(indexDecays, ones(1, model.nout), 2))'; 21 | 22 | function ind = findExpression(names, pattern) 23 | ind = []; 24 | for i = 1:length(names) 25 | if(regexp(names{i}, pattern)) 26 | ind = [ind i]; 27 | end 28 | end -------------------------------------------------------------------------------- /matlab/simwhiteMeanCompute.m: -------------------------------------------------------------------------------- 1 | function m = simwhiteMeanCompute(meanFunction, X, varargin) 2 | 3 | % SIMWHITEMEANCOMPUTE Give the output of the SIM-WHITE mean function model 4 | % for given X. 5 | % FORMAT 6 | % DESC gives the output of the SIM-WHITE mean function model for a given 7 | % input X. 8 | % ARG model : structure specifying the model. 9 | % ARG X : input location(s) for which output is to be computed. 10 | % RETURN Y : output location(s) corresponding to given input locations. 11 | % 12 | % SEEALSO : simwhiteMeanCreate 13 | % 14 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 15 | % 16 | % MODIFICATIONS : David Luengo, 2009 17 | 18 | 19 | % MULTIGP 20 | 21 | nlf = varargin{1}; 22 | startVal=1; 23 | endVal=0; 24 | for i =1:nlf, 25 | endVal = endVal + size(X{i}, 1); 26 | m(startVal:endVal, 1) = zeros(length(X{i}),1); 27 | startVal = endVal+1; 28 | end 29 | 30 | for i = nlf+1:length(X), 31 | endVal = endVal + size(X{i}, 1); 32 | m(startVal:endVal, 1) = meanFunction.basal(i-nlf)/meanFunction.decay(i-nlf) * ... 33 | ones(length(X{i}),1); 34 | startVal = endVal+1; 35 | end 36 | -------------------------------------------------------------------------------- /matlab/simwhiteMeanCreate.m: -------------------------------------------------------------------------------- 1 | function meanFunction = simwhiteMeanCreate(q, d, options) 2 | 3 | % SIMWHITEMEANCREATE mean function structure for the SIM-WHITE kernel. 4 | % FORMAT 5 | % DESC creates the mean function for a multi output 6 | % GP model based in the SIM-WHITE kernel (first order differential equation 7 | % with white noise input process). The outputs of the model are generated 8 | % according to 9 | % 10 | % mean_q = B_q/D_q 11 | % 12 | % where mean_q is an output constant corresponding to the mean of the 13 | % output function q, B_q is basal transcription and D_q is the decay 14 | % constant. 15 | % RETURN model : the structure for the multigp model 16 | % ARG q : input dimension size. 17 | % ARG d : output dimension size. 18 | % ARG options : contains the options for the MEAN of the MULTIGP model. 19 | % 20 | % SEE ALSO: simwhiteKernParamInit, simwhiteKernCompute 21 | % 22 | % COPYRIGHT : Mauricio A. Alvarez and Neil D. Lawrence, 2008 23 | % 24 | % MODIFICATIONS : David Luengo, 2009 25 | 26 | % MULTIGP 27 | 28 | if q > 1 29 | error('SIM-WHITE MEAN FUNCTION only valid for one-D input.') 30 | end 31 | 32 | meanFunction.type = 'simwhite'; 33 | meanFunction.basal = ones(d,1); 34 | meanFunction.decay = ones(d,1); 35 | meanFunction.transforms.index = d+1:2*d; 36 | meanFunction.transforms.type = optimiDefaultConstraint('positive'); 37 | % Only the parameters of basal rates are counted. The springs are already 38 | % counted in the kernel 39 | meanFunction.nParams = 2*d; 40 | -------------------------------------------------------------------------------- /matlab/simwhiteMeanExpandParam.m: -------------------------------------------------------------------------------- 1 | function meanFunction = simwhiteMeanExpandParam(meanFunction, params) 2 | 3 | % SIMWHITEMEANEXPANDPARAM Extract the parameters of the vector parameter 4 | % and put them back in the mean function structure for the SIM-WHITE model. 5 | % DESC returns a mean function SIM-WHITE structure filled with the 6 | % parameters in the given vector. This is used as a helper function to 7 | % enable parameters to be optimised in, for example, the NETLAB 8 | % optimisation functions. 9 | % ARG meanFunction : the meanFunction structure in which the parameters are 10 | % to be placed. 11 | % ARG param : vector of parameters which are to be placed in the 12 | % kernel structure. 13 | % RETURN meanFunction : mean function structure with the given parameters 14 | % in the relevant locations. 15 | % 16 | % SEEALSO : simwhiteMeanCreate, simwhiteMeanExtractParam, kernExpandParam 17 | % 18 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 19 | % 20 | % MODIFICATIONS : David Luengo, 2009 21 | 22 | % MULTIGP 23 | 24 | meanFunction.basal = params(1:meanFunction.nParams/2)'; 25 | meanFunction.decay = params(meanFunction.nParams/2+1:meanFunction.nParams)'; 26 | 27 | -------------------------------------------------------------------------------- /matlab/simwhiteMeanExtractParam.m: -------------------------------------------------------------------------------- 1 | function [params, names] = simwhiteMeanExtractParam(meanFunction) 2 | 3 | % SIMWHITEMEANEXTRACTPARAM Extract parameters from the SIM-WHITE mean function structure. 4 | % FORMAT 5 | % DESC Extract parameters from the mean funtion structure of the SIM-WHITE 6 | % model into a vector of parameters for optimisation. 7 | % ARG meanFunction : the mean function structure containing the parameters 8 | % to be extracted. 9 | % RETURN param : vector of parameters extracted from the kernel. 10 | % 11 | % DESC Extract parameters and their names from mean funtion structure of 12 | % the SIM-WHITE model 13 | % ARG meanFunction : the mean function structure containing the parameters 14 | % to be extracted. 15 | % RETURN param : vector of parameters extracted from the kernel. 16 | % RETURN names : cell array of strings containing parameter names. 17 | % 18 | % SEEALSO simwhiteMeanCreate, simwhiteMeanExpandParam, simwhiteKernCreate, 19 | % simwhiteKernExtractParam 20 | % 21 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 22 | % 23 | % MODIFICATIONS : David Luengo, 2009 24 | 25 | % MULTIGP 26 | 27 | params = [meanFunction.basal' meanFunction.decay']; 28 | if nargout > 1 29 | names = cell(1, 2*meanFunction.nParams/2); 30 | for i=1:meanFunction.nParams/2 31 | names{i} = ['simwhite ' num2str(i) ' basal']; 32 | end 33 | for i=meanFunction.nParams/2+1:2*meanFunction.nParams/2 34 | names{i} = ['simwhite ' num2str(i-meanFunction.nParams/2) ' decay']; 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /matlab/simwhiteMeanGradient.m: -------------------------------------------------------------------------------- 1 | function g = simwhiteMeanGradient(meanFunction, varargin) 2 | 3 | % SIMWHITEMEANGRADIENT Gradient of the parameters of the mean function in 4 | % the multigp model with SIM-WHITE kernel. 5 | % FORMAT 6 | % DESC gives the gradient of the objective function for the parameters of 7 | % the mean function in the multigp model with SIM-WHITE kernel (second order 8 | % differential equation with white noise process input). 9 | % ARG meanFunction : mean function structure to optimise. 10 | % ARG P1, P2, P3 ... : optional additional arguments. 11 | % RETURN g : the gradient of the error function to be minimised. 12 | % 13 | % SEEALSO : simwhiteMeanCreate, simwhiteMeanOut 14 | % 15 | % COPYRIGHT : Mauricio Alvarez and Neil D. Lawrence, 2008 16 | % 17 | % MODIFICATIONS : David Luengo, 2009 18 | 19 | % MULTIGP 20 | 21 | gmu = varargin{1}'; 22 | gB = gmu./meanFunction.decay; 23 | gD = -gmu.*meanFunction.basal./(meanFunction.decay.*meanFunction.decay); 24 | g = [gB' gD']; 25 | -------------------------------------------------------------------------------- /matlab/simwhiteMultigpFixParam.m: -------------------------------------------------------------------------------- 1 | function model = simwhiteMultigpFixParam(model, options) 2 | 3 | % SIMWHITEMULTIGPFIXPARAM Fix parameters for a multigp with SIMWHITE kernel 4 | % FORMAT 5 | % DESC Fix the parameters for a multigp model that uses SIMWHITE kernel. 6 | % RETURN model : model with fixed parameters included 7 | % ARG model : model before fixing the parameters 8 | % ARG options : options for fixing parameters 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 11 | 12 | % MULTIGP 13 | 14 | % This code fixes the latent variances to (1/2)^i (i=0, 1, 2, ..., nlf-1). 15 | count = 0; 16 | for i = 1:model.nlf 17 | index = paramNameRegularExpressionLookup(model, ... 18 | ['multi [' num2str(i) '] .* variance']); 19 | for k=1:length(index); 20 | count = count + 1; 21 | model.fix(count).index = index(k); 22 | model.fix(count).value = expTransform(2^(-i+1), 'xtoa'); 23 | end 24 | end 25 | % This code fixes the variance of the noise in the latent forces to 1e-9. 26 | for i = 1:model.nlf 27 | index = paramNameRegularExpressionLookup(model, ... 28 | ['multi [' num2str(model.nlf+1) '] white [' num2str(i) '] .*']); 29 | for k=1:length(index); 30 | count = count + 1; 31 | model.fix(count).index = index(k); 32 | model.fix(count).value = expTransform(1e-9, 'xtoa'); 33 | end 34 | end -------------------------------------------------------------------------------- /matlab/simwhiteMultigpKernOptions.m: -------------------------------------------------------------------------------- 1 | function model = simwhiteMultigpKernOptions(model, options) 2 | 3 | % SIMWHITEMULTIGPKERNOPTIONS Changes the default options for SIMWHITE kernels 4 | % FORMAT 5 | % DESC Changes default options for the SIMWHITE kernel and RBF kernels in the 6 | % multigp structure. 7 | % RETURN model : model with kernels modified 8 | % ARG model : model created 9 | % ARG options : options for particular kernel 10 | % 11 | % COPYRIGHT : David Luengo, Mauricio A. Alvarez, 2010 12 | 13 | % MULTIGP 14 | 15 | 16 | if isfield(options, 'positiveTime') && ~isempty(options.positiveTime) 17 | for i=1:model.nlf 18 | for j=1:model.nlf 19 | model.kern.comp{i}.comp{j}.positiveTime = options.positiveTime; 20 | end 21 | for j=1:model.nout 22 | model.kern.comp{i}.comp{model.nlf + j}.positiveTime = options.positiveTime; 23 | end 24 | end 25 | end 26 | 27 | if isfield(options, 'isStationary') && ~isempty(options.isStationary) 28 | for i=1:model.nlf 29 | for j=1:model.nlf 30 | model.kern.comp{i}.comp{j}.isStationary = options.isStationary; 31 | end 32 | for j=1:model.nout 33 | model.kern.comp{i}.comp{model.nlf+j}.isStationary = options.isStationary; 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /matlab/simwhiteMultigpTieParam.m: -------------------------------------------------------------------------------- 1 | function tieInd = simwhiteMultigpTieParam(model, options) 2 | 3 | % SIMWHITEMULTIGPTIEPARAM Tie parameters for a multigp with SIMWHITE kernel 4 | % FORMAT 5 | % DESC Tie the parameters for a multigp model that uses SIMWHITE kernel. 6 | % RETURN tieInd : cell with elements containing the indexes of parameters 7 | % to tie. 8 | % ARG model : model created 9 | % ARG options : options for tying parameters 10 | % 11 | % COPYRIGHT : Mauricio A. Alvarez, David Luengo 2009 12 | 13 | % MULTIGP 14 | 15 | for i = 1:options.nlf 16 | tieInd{i} = paramNameRegularExpressionLookup(model, ['multi ' num2str(i) ... 17 | ' .* variance']); 18 | end 19 | for i = 1:model.nout 20 | tieInd{end+1} = paramNameRegularExpressionLookup(model, ['.* ' num2str(i) ' decay']); 21 | end -------------------------------------------------------------------------------- /matlab/skelForWalkingWithLfm.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/skelForWalkingWithLfm.mat -------------------------------------------------------------------------------- /matlab/spmultigpCreate.m: -------------------------------------------------------------------------------- 1 | function model = spmultigpCreate(model, options) 2 | 3 | % SPMULTIGPCREATE 4 | % DESC incorporates into de model options retaled with the sparse methods 5 | % RETURN model : the structure for the sparse multigp model 6 | % ARG model : input sparse model. 7 | % ARG options : contains the options for the sparse multigp model 8 | 9 | % COPYRIGHT : Mauricio Alvarez 2008 10 | 11 | % MODIFICATIONS : Mauricio Alvarez 2009 12 | 13 | % MULTIGP 14 | 15 | switch options.approx 16 | case {'dtc','fitc', 'pitc', 'dtcvar'} 17 | % Sub-sample inducing variables. 18 | model.k = options.numActive; 19 | model.fixInducing = options.fixInducing; 20 | model.X_u = model.X{1}; 21 | for k=2:options.nlf 22 | model.X_u = [model.X_u; model.X{k}]; 23 | end 24 | end 25 | if isfield(options, 'tieInducing') && ~isempty(options.tieInducing) && ... 26 | options.tieInducing 27 | effPS = 1; 28 | else 29 | effPS = sum(model.k); 30 | end 31 | if effPS>model.N 32 | error('Number of active points cannot be greater than number of data.') 33 | end 34 | -------------------------------------------------------------------------------- /matlab/spmultigpExpandParam.m: -------------------------------------------------------------------------------- 1 | function model = spmultigpExpandParam(model, params) 2 | 3 | % SPMULTIGPEXPANDPARAM Expand a parameter vector into a SPMULTIGP model. 4 | % FORMAT 5 | % DESC expands the model parameters to a structure containing 6 | % the information about a sparse multi-output Gaussian process. 7 | % ARG model : the sparse model structure containing the information about 8 | % the model. 9 | % ARG params : a vector of parameters from the model. 10 | % RETURN model : the model structure containing the information about 11 | % the sparse model updated with the new parameter vector. 12 | % 13 | % SEEALSO : multigpCreate, spmultigpExtractParam, multigpExtractParam, 14 | % 15 | % 16 | % COPYRIGHT : Mauricio Alvarez, 2008 17 | 18 | 19 | % MULTIGP 20 | 21 | model.X_u = reshape(params, sum(model.k), model.q); 22 | startVal = 1; 23 | endVal = 0; 24 | for i = 1:model.nlf 25 | endVal = endVal + model.k(i); 26 | model.X{i} = model.X_u(startVal:endVal,:); 27 | startVal = endVal + 1; 28 | end 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /matlab/spmultigpTiePseudoInputs.m: -------------------------------------------------------------------------------- 1 | function tieInd = spmultigpTiePseudoInputs(model) 2 | 3 | % SPMULTIGPTIEPSEUDOINPUTS Gives the indeces to tie the pseudo inputs 4 | % DESC Gives the indexes to tie the pseudo points 5 | % ARG model : input sparse model. 6 | % ARG tieInd : a cell where each component refers to the elements of the 7 | % pseudo inputs to be tied. 8 | % 9 | % COPYRIGHT : Mauricio A. Alvarez, 2010 10 | 11 | % MULTIGP 12 | 13 | [params, names] = modelExtractParam(model); 14 | indexes = zeros(model.k(1)*model.q, model.nlf); 15 | 16 | for k=1:model.nlf 17 | indexes(:,k) = findExpression(names, ['X_u .* Force ' num2str(k) '\.']); 18 | end 19 | 20 | tieInd = mat2cell(indexes, ones(1, model.k(1)*model.q), model.nlf)'; 21 | 22 | function ind = findExpression(names, pattern) 23 | ind = []; 24 | for i = 1:length(names) 25 | if(regexp(names{i}, pattern)) 26 | ind = [ind i]; 27 | end 28 | end -------------------------------------------------------------------------------- /matlab/spmultigpUpdateKernels.m: -------------------------------------------------------------------------------- 1 | function model = spmultigpUpdateKernels(model) 2 | 3 | % SPMULTIGPUPDATEKERNELS 4 | % FORMAT 5 | % DESC Update the kernels that are needed for the sparse multigp 6 | % ARG model : the model structure containing the model parameters 7 | % RETURN model : the model structure with updated kernels. 8 | % 9 | % COPYRIGHT : Mauricio A. Alvarez, 2008, 2009 10 | 11 | % MULTIGP 12 | 13 | switch model.approx 14 | case {'dtc','fitc', 'pitc','dtcvar'} 15 | model = sparseKernCompute(model); 16 | otherwise 17 | end 18 | 19 | if isfield(model, 'beta') && ~isempty(model.beta) 20 | model = spmultigpUpdateAD(model); 21 | end -------------------------------------------------------------------------------- /matlab/spmultimodelGetParameters.m: -------------------------------------------------------------------------------- 1 | function [S, D] = spmultimodelGetParameters(model, tf, dirSaveYeastResults, saveFigures) 2 | 3 | % SPMULTIMODELGETPARAMETERS Sensitivities and decays in spmultimodel struc. 4 | % FORMAT 5 | % DESC 6 | % Returns the sensitivities and decays of a spmultimodel structure and 7 | % plots the histogram of the sensitivities, the decays and the parameter 8 | % formed as the sensitivity over the decay. 9 | % ARG model : the spmultimodel structure. 10 | % ARG tf : the transcription factor for which asscociated sensitivities and 11 | % decays are needed. 12 | % ARG dirSaveYeastResults : path to the directory where the plots of the 13 | % histograms will be saved. 14 | % ARG saveFigures : indicates whether the plots are saved or not. 15 | % RETURN S : values for the sensitivties associated to the 'tf' 16 | % RETURN D : value of the decays of the outputs asscociated to the 'tf' 17 | % 18 | % COPYRIGHT : Mauricio A Alvarez, 2009 19 | 20 | % MULTIGP 21 | 22 | 23 | S = zeros(1, model.numOutGivenLat(tf)); 24 | D = zeros(1, model.numOutGivenLat(tf)); 25 | 26 | 27 | 28 | for i=1:model.numOutGivenLat(tf) 29 | S(i) = model.kern.comp{tf}.comp{i+1}.variance; 30 | D(i) = model.kern.comp{tf}.comp{i+1}.decay; 31 | end 32 | 33 | figure 34 | hist(S, 18) 35 | title('Histogram of sensitivities', 'fontsize', 20) 36 | if saveFigures 37 | print('-dpng', [dirSaveYeastResults '/histS']); 38 | end 39 | figure 40 | hist(D, 18) 41 | title('Histogram of decays', 'fontsize', 20) 42 | if saveFigures 43 | print('-dpng', [dirSaveYeastResults '/histD']); 44 | end 45 | figure 46 | hist(S./D, 18) 47 | title('Histogram of S/D', 'fontsize', 20) 48 | if saveFigures 49 | print('-dpng', [dirSaveYeastResults '/histSD']); 50 | end -------------------------------------------------------------------------------- /matlab/spmultimodelGradient.m: -------------------------------------------------------------------------------- 1 | function g = spmultimodelGradient(params, model) 2 | 3 | % SPMULTIMODELGRADIENT Gradient wrapper for a SPMULTIMODEL model. 4 | 5 | % MULTIGP 6 | 7 | model = modelExpandParam(model, params); 8 | g = - modelLogLikeGradients(model); 9 | 10 | -------------------------------------------------------------------------------- /matlab/spmultimodelObjective.m: -------------------------------------------------------------------------------- 1 | function f = spmultimodelObjective(params, model) 2 | 3 | % SPMULTIMODELOBJECTIVE Wrapper function for MODELOPTIMISE objective. 4 | 5 | % MULTIGP 6 | 7 | model = modelExpandParam(model, params); 8 | f = - modelLogLikelihood(model); 9 | -------------------------------------------------------------------------------- /matlab/spmultimodelPlotSensitivities.m: -------------------------------------------------------------------------------- 1 | function spmultimodelPlotSensitivities(dataSetName, figToPlot, approx, tf, options, saveFigures) 2 | 3 | % SPMULTIMODELPLOTSENSITIVTIES Plot the histograms for sensitivities. 4 | 5 | % MULTIGP 6 | 7 | load(['S2' tf '.mat']); 8 | varS = -1./diag(gS2); 9 | stdS = sqrt(varS)'; 10 | SNR = S./stdS; 11 | figure 12 | hist(SNR, 15, 'Color', 'none') 13 | h = findobj(gca,'Type','patch'); 14 | set(h,'FaceColor','k','EdgeColor','w') 15 | g = xlabel(options.xlabel, 'fontsize', options.fontsizeLegend); 16 | prop = get(g); 17 | posXlabel = prop.Position; 18 | posXlabel(2) = posXlabel(2) -1; 19 | xlabel(options.xlabel, 'fontsize', options.fontsizeLegend, 'Position', posXlabel); 20 | ylabel(options.ylabel, 'fontsize', options.fontsizeLegend) 21 | set(gca, 'fontname', 'arial') 22 | set(gca, 'ylim', options.ylim, 'xlim', options.xlim, 'fontsize', options.fontsize) 23 | set(gca, 'position', options.position) 24 | high = get(0, 'screensize'); 25 | set(gcf, 'position', high) 26 | set(gcf, 'PaperPositionMode', 'auto'); 27 | box on 28 | 29 | if saveFigures==1 30 | fileName = [dataSetName 'Hist' approx num2str(figToPlot)]; 31 | print('-depsc', ['./resultsYeast/' fileName]); 32 | saveas(gcf,['./resultsYeast/' fileName],'fig'); 33 | print('-dpng', ['./resultsYeast/' fileName]) 34 | print('-dpdf', ['./resultsYeast/' fileName]) 35 | end -------------------------------------------------------------------------------- /matlab/spmultimodelVarSInit.m: -------------------------------------------------------------------------------- 1 | function model = spmultimodelVarSInit(model) 2 | 3 | % SIMMULTIMODELVARSINIT Initialize variational distribution for S 4 | % FORMAT 5 | % DESC Initialize the variational distribution of sensitivities for a 6 | % sparse multi model. 7 | % RETURN model : model with initialized distribution. 8 | % ARG model : model before initializing distribution. 9 | % 10 | % COPYRIGHT : Mauricio A. Alvarez, 2010 11 | 12 | % MULTIGP 13 | 14 | 15 | for k=1:model.nout; 16 | if isfield(model, 'connect') && ~isempty(model.connect) 17 | model.qs.mean(k,:) = model.connect(k,:); 18 | else 19 | model.qs.mean(k,:) = ones(model.nlf,1); 20 | end 21 | %A = rand(model.nlf); 22 | %B = A*A'; 23 | %model.qs.Sigma(:,:,k) = B - diag(diag(B)) + eye(model.nlf); 24 | model.qs.Sigma(:,:,k) = 1e-2*eye(model.nlf); 25 | %model.qs.Sigma(:,:,k) = zeros(model.nlf); 26 | end 27 | 28 | -------------------------------------------------------------------------------- /matlab/toy1DGgDTCExample.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGDTCEXAMPLE Sparse multigp on TOY data using DTC 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyTrainTest'; 9 | experimentNo = 5; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('dtc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | display = 1; 42 | iters = 2000; 43 | 44 | % Train the model 45 | init_time = cputime; 46 | model = multigpOptimise(model, display, iters); 47 | elapsed_time = cputime - init_time; 48 | 49 | % Save the results. 50 | capName = dataSetName; 51 | capName(1) = upper(capName(1)); 52 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model', 'initialLoc'); 53 | 54 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 55 | 56 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 57 | XGT, fGT); 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /matlab/toy1DGgDTCMissing.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGDTCMISSING Sparse multigp on TOY data using DTC with missing data 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyMissing'; 9 | experimentNo = 1; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('dtc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | display = 1; 42 | iters = 2000; 43 | 44 | % Train the model 45 | init_time = cputime; 46 | model = multigpOptimise(model, display, iters); 47 | elapsed_time = cputime - init_time; 48 | 49 | % Save the results. 50 | capName = dataSetName; 51 | capName(1) = upper(capName(1)); 52 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 53 | 54 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 55 | 56 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 57 | XGT, fGT); 58 | 59 | -------------------------------------------------------------------------------- /matlab/toy1DGgFITCExample.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGFITCEXAMPLE Sparse multigp on TOY data using FITC 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyTrainTest'; 9 | experimentNo = 4; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('fitc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = 1e-3*ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | display = 1; 42 | iters = 2000; 43 | 44 | % Train the model 45 | init_time = cputime; 46 | model = multigpOptimise(model, display, iters); 47 | elapsed_time = cputime - init_time; 48 | 49 | % Save the results. 50 | capName = dataSetName; 51 | capName(1) = upper(capName(1)); 52 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model', 'initialLoc'); 53 | 54 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 55 | 56 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 57 | XGT, fGT); 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /matlab/toy1DGgFITCMissing.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGFITCMISSING Sparse multigp on TOY data using FITC with missing Data 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyMissing'; 9 | experimentNo = 2; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('fitc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | display = 1; 42 | iters = 2000; 43 | 44 | % Train the model 45 | init_time = cputime; 46 | model = multigpOptimise(model, display, iters); 47 | elapsed_time = cputime - init_time; 48 | 49 | % Save the results. 50 | capName = dataSetName; 51 | capName(1) = upper(capName(1)); 52 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 53 | 54 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 55 | 56 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 57 | XGT, fGT); 58 | 59 | -------------------------------------------------------------------------------- /matlab/toy1DGgFTCExample.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGFTCEXAMPLE Demo of full multi output GP with Gaussian kernel. 2 | % FORMAT 3 | % DESC Demo of Full Multi Output Gaussian Process. 4 | 5 | % MULTIGP 6 | 7 | clc 8 | clear 9 | rand('twister',1e5); 10 | randn('state',1e5); 11 | % 12 | dataSetName = 'ggToyTrainTest'; 13 | experimentNo = 1; 14 | 15 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 16 | 17 | options = multigpOptions('ftc'); 18 | options.kernType = 'gg'; 19 | options.optimiser = 'scg'; 20 | options.nlf = 1; 21 | 22 | q = 1; % Input dimension 23 | d = size(yTemp, 2) + options.nlf; 24 | 25 | X = cell(size(yTemp, 2)+options.nlf,1); 26 | y = cell(size(yTemp, 2)+options.nlf,1); 27 | 28 | for j=1:options.nlf 29 | y{j} = []; 30 | X{j} = zeros(1, q); 31 | end 32 | for i = 1:size(yTemp, 2) 33 | y{i+options.nlf} = yTemp{i}; 34 | X{i+options.nlf} = XTemp{i}; 35 | end 36 | 37 | % Creates the model 38 | model = multigpCreate(q, d, X, y, options); 39 | 40 | params = modelExtractParam(model); 41 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 42 | params(index) = log(100); 43 | model = modelExpandParam(model, params); 44 | 45 | display = 1; 46 | iters = 1000; 47 | 48 | % Trains the model 49 | init_time = cputime; 50 | model = multigpOptimise(model, display, iters); 51 | elapsed_time = cputime - init_time; 52 | 53 | % Save the results. 54 | capName = dataSetName; 55 | capName(1) = upper(capName(1)); 56 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 57 | 58 | % Load complete data to plot the ground truth 59 | 60 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 61 | 62 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT, fGT); 63 | 64 | -------------------------------------------------------------------------------- /matlab/toy1DGgFTCMissing.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/matlab/toy1DGgFTCMissing.m -------------------------------------------------------------------------------- /matlab/toy1DGgINDMissing.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGINDMISSING Independent GP with missing Data 2 | 3 | % MULTIGP 4 | 5 | clc 6 | clear 7 | rand('twister',1e5); 8 | randn('state',1e5); 9 | % 10 | dataSetName = 'ggToyMissing'; 11 | experimentNo = 15; 12 | 13 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 14 | 15 | %Just the fourth output 16 | 17 | XTemp = XTemp(4); 18 | yTemp = yTemp(4); 19 | 20 | 21 | options = multigpOptions('ftc'); 22 | options.kernType = 'gg'; 23 | options.optimiser = 'scg'; 24 | options.nlf = 1; 25 | 26 | q = 1; % Input dimension 27 | d = size(yTemp, 2) + options.nlf; 28 | 29 | X = cell(size(yTemp, 2)+options.nlf,1); 30 | y = cell(size(yTemp, 2)+options.nlf,1); 31 | 32 | 33 | for j=1:options.nlf 34 | y{j} = []; 35 | X{j} = zeros(1, q); 36 | end 37 | for i = 1:size(yTemp, 2) 38 | y{i+options.nlf} = yTemp{i}; 39 | X{i+options.nlf} = XTemp{i}; 40 | end 41 | 42 | % Creates the model 43 | model = multigpCreate(q, d, X, y, options); 44 | 45 | params = modelExtractParam(model); 46 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 47 | params(index) = log(100); 48 | model = modelExpandParam(model, params); 49 | 50 | display = 1; 51 | iters = 1000; 52 | 53 | % Trains the model 54 | init_time = cputime; 55 | model = multigpOptimise(model, display, iters); 56 | elapsed_time = cputime - init_time; 57 | 58 | % Save the results. 59 | capName = dataSetName; 60 | capName(1) = upper(capName(1)); 61 | save(['dem' capName num2str(experimentNo) '.mat'], 'model'); 62 | 63 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 64 | 65 | ggToyResults(dataSetName, experimentNo, XTemp, yTemp, XGT(4), fGT(4)) 66 | 67 | -------------------------------------------------------------------------------- /matlab/toy1DGgPITCExample.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGPITCEXAMPLE Sparse multigp on TOY data using PITC 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyTrainTest'; 9 | experimentNo = 3; 10 | 11 | warning('off', 'multiKernParamInit:noCrossKernel') 12 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 13 | 14 | options = multigpOptions('pitc'); 15 | options.kernType = 'gg'; 16 | options.optimiser = 'scg'; 17 | options.nlf = 1; 18 | options.initialInducingPositionMethod = 'espaced'; 19 | options.numActive = 30; 20 | %options.beta = 1e-1*ones(1, size(yTemp, 2)); 21 | options.beta = 1e-1; 22 | options.fixInducing = false; 23 | 24 | 25 | 26 | X = cell(size(yTemp, 2),1); 27 | y = cell(size(yTemp, 2),1); 28 | 29 | for i = 1:size(yTemp, 2) 30 | y{i} = yTemp{i}; 31 | X{i} = XTemp{i}; 32 | end 33 | 34 | % for i = 1:50 35 | % y{i} = yTemp{1}; 36 | % X{i} = XTemp{1}; 37 | % end 38 | 39 | q = 1; 40 | d = size(yTemp, 2); 41 | 42 | % q = 1; 43 | % d = 50; 44 | 45 | 46 | % Creates the model 47 | model = multigpCreate(q, d, X, y, options); 48 | 49 | params = modelExtractParam(model); 50 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 51 | params(index) = log(100); 52 | model = modelExpandParam(model, params); 53 | 54 | display = 1; 55 | iters = 20; 56 | 57 | % Train the model 58 | init_time = cputime; 59 | model = multigpOptimise(model, display, iters); 60 | elapsed_time = cputime - init_time; 61 | 62 | % Save the results. 63 | capName = dataSetName; 64 | capName(1) = upper(capName(1)); 65 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 66 | 67 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 68 | 69 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 70 | XGT, fGT); 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /matlab/toy1DGgPITCMissing.m: -------------------------------------------------------------------------------- 1 | % TOY1DGGPITCMISSING Sparse multigp on TOY data using PITC with missingData 2 | 3 | % MULTIGP 4 | 5 | rand('twister', 1e5); 6 | randn('state', 1e5); 7 | 8 | dataSetName = 'ggToyMissing'; 9 | experimentNo = 3; 10 | 11 | [XTemp, yTemp, XTestTemp, yTestTemp] = mapLoadData(dataSetName); 12 | 13 | options = multigpOptions('pitc'); 14 | options.kernType = 'gg'; 15 | options.optimiser = 'scg'; 16 | options.nlf = 1; 17 | options.initialInducingPositionMethod = 'espaced'; 18 | options.numActive = 30; 19 | options.beta = 1e3*ones(1, size(yTemp, 2)); 20 | options.fixInducing = false; 21 | 22 | X = cell(size(yTemp, 2),1); 23 | y = cell(size(yTemp, 2),1); 24 | 25 | for i = 1:size(yTemp, 2) 26 | y{i} = yTemp{i}; 27 | X{i} = XTemp{i}; 28 | end 29 | 30 | q = 1; 31 | d = size(yTemp, 2); 32 | 33 | % Creates the model 34 | model = multigpCreate(q, d, X, y, options); 35 | 36 | params = modelExtractParam(model); 37 | index = paramNameRegularExpressionLookup(model, 'multi .* inverse .*'); 38 | params(index) = log(100); 39 | model = modelExpandParam(model, params); 40 | 41 | 42 | display = 1; 43 | iters = 2000; 44 | 45 | % Train the model 46 | init_time = cputime; 47 | model = multigpOptimise(model, display, iters); 48 | elapsed_time = cputime - init_time; 49 | 50 | % Save the results. 51 | capName = dataSetName; 52 | capName(1) = upper(capName(1)); 53 | save(['demSpmgp' capName num2str(experimentNo) '.mat'], 'model'); 54 | 55 | [XGT, void, void, fGT] = mapLoadData('ggToy'); 56 | 57 | ggSpmgpToyResults(dataSetName, experimentNo, XTemp, yTemp, ... 58 | XGT, fGT); 59 | 60 | -------------------------------------------------------------------------------- /matlab/walkSamplePlayData.m: -------------------------------------------------------------------------------- 1 | function walkSamplePlayData(skelStruct, channels, limits, frameLength) 2 | 3 | % WALKSAMPLEPLAYDATA Play sampled walking motion. 4 | % FORMAT 5 | % DESC plays channels from a motion capture skeleton and channels. 6 | % ARG skelStruct : the skeleton for the motion. 7 | % ARG channels : the channels for the motion. 8 | % ARG limits : limits to plot the axes 9 | % ARG frameLength : the framelength for the motion. 10 | % 11 | % COPYRIGHT : Mauricio A. Alvarez, Neil D. Lawrence, 2010 12 | % 13 | % SEEALSO : skelPlayData, acclaimPlayData 14 | 15 | % MULTIGP 16 | 17 | if nargin < 4 18 | frameLength = 1/120; 19 | end 20 | 21 | clf 22 | handle = skelVisualise(channels(1, :), skelStruct); 23 | 24 | xlim = [limits(1,1) limits(1,2)]; 25 | ylim = [limits(2,1) limits(2,2)]; 26 | zlim = [limits(3,1) limits(3,2)]; 27 | set(gca, 'xlim', xlim, ... 28 | 'ylim', ylim, ... 29 | 'zlim', zlim); 30 | title('Walking motion', 'FontSize', 15); 31 | 32 | % Play the motion 33 | for j = 1:size(channels, 1) 34 | pause(frameLength) 35 | skelModify(handle, channels(j, :), skelStruct); 36 | end 37 | -------------------------------------------------------------------------------- /matlab/walkingPlayData.m: -------------------------------------------------------------------------------- 1 | function walkingPlayData(skelStruct, channels, limits, motion, subject, frameLength) 2 | 3 | % WALKINGPLAYDATA Play walking motion capture data. 4 | % FORMAT 5 | % DESC plays channels from a motion capture skeleton and channels. 6 | % ARG skelStruct : the skeleton for the motion. 7 | % ARG channels : the channels for the motion. 8 | % ARG limits : limits to plot the axes 9 | % ARG motion : number ID for the motion to be displayed in the plot 10 | % ARG subject : number of the subject to be displayed in the plot 11 | % ARG frameLength : the framelength for the motion. 12 | % 13 | % COPYRIGHT : Mauricio Alvarez, Neil D. Lawrence, 2012 14 | % 15 | % SEEALSO : skelPlayData, acclaimPlayData 16 | 17 | % MULTIGP 18 | 19 | if nargin < 4 20 | frameLength = 1/120; 21 | end 22 | 23 | clf 24 | handle = skelVisualise(channels(1, :), skelStruct); 25 | 26 | xlim = [limits(1,1) limits(1,2)]; 27 | ylim = [limits(2,1) limits(2,2)]; 28 | zlim = [limits(3,1) limits(3,2)]; 29 | set(gca, 'xlim', xlim, ... 30 | 'ylim', ylim, ... 31 | 'zlim', zlim); 32 | title(['Subject ' num2str(subject) ' Motion ' num2str(motion)], 'FontSize', 15); 33 | 34 | % Play the motion 35 | for j = 1:size(channels, 1) 36 | pause(frameLength) 37 | skelModify(handle, channels(j, :), skelStruct); 38 | end 39 | -------------------------------------------------------------------------------- /toy1DDTC4TrainTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toy1DDTC4TrainTest.png -------------------------------------------------------------------------------- /toy1DFITC4TrainTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toy1DFITC4TrainTest.png -------------------------------------------------------------------------------- /toy1DFTC4TrainTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toy1DFTC4TrainTest.png -------------------------------------------------------------------------------- /toy1DPITC4TrainTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toy1DPITC4TrainTest.png -------------------------------------------------------------------------------- /toyPredictionFITC2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toyPredictionFITC2.png -------------------------------------------------------------------------------- /toyPredictionFITC5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toyPredictionFITC5.png -------------------------------------------------------------------------------- /toyPredictionPITC2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toyPredictionPITC2.png -------------------------------------------------------------------------------- /toyPredictionPITC5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/toyPredictionPITC5.png -------------------------------------------------------------------------------- /yeastSpellmanPITC2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/yeastSpellmanPITC2.png -------------------------------------------------------------------------------- /yeastSpellmanPITC2GeneExpression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/yeastSpellmanPITC2GeneExpression.png -------------------------------------------------------------------------------- /yeastSpellmanPITC93.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/yeastSpellmanPITC93.png -------------------------------------------------------------------------------- /yeastSpellmanPITC93GeneExpression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/multigp/c5c1d8eedeafd123367e2c0902ebdd6693f6878b/yeastSpellmanPITC93GeneExpression.png --------------------------------------------------------------------------------