├── README.md
├── ch2_codes
├── channelEq
│ ├── PART1.m
│ └── PART2.m
├── mg_prediction
│ ├── KLMS1.m
│ ├── KLMS1_LC.m
│ ├── KLMS3.m
│ ├── LMS1.m
│ ├── MK30.mat
│ ├── PART1.m
│ ├── PART10.m
│ ├── PART2.m
│ ├── PART3.m
│ ├── PART4.m
│ ├── PART5.m
│ ├── PART6.m
│ ├── PART7.m
│ ├── PART8.m
│ ├── PART9.m
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── regularizationNetwork.m
│ └── sparseKLMS1.m
└── regularization_function
│ └── regularizationfuntion.m
├── ch3_codes
├── channelEq
│ ├── APA1.m
│ ├── APA1s.m
│ ├── LMS1.m
│ ├── LMS1s.m
│ ├── LMS2.m
│ ├── PART1.m
│ ├── PART2.m
│ ├── PART3.m
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── sparseKAPA1.m
│ ├── sparseKAPA1s.m
│ ├── sparseKAPA2.m
│ ├── sparseKAPA2s.m
│ ├── sparseKLMS1.m
│ └── sparseKLMS1s.m
├── mg_prediction
│ ├── KAPA1.m
│ ├── KAPA2.m
│ ├── KLMS1.m
│ ├── KRLS.m
│ ├── LMS1.m
│ ├── MK30.mat
│ ├── PART1.m
│ ├── PART2.m
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── slidingWindowKRLS.m
│ ├── sparseKAPA1.m
│ ├── sparseKAPA2.m
│ └── sparseKLMS1.m
└── noiseCancelation
│ ├── LMS2.m
│ ├── PART1.m
│ ├── PART2.m
│ ├── fmri.mat
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── sparseKAPA2.m
│ └── sparseKLMS1.m
├── ch4_codes
├── channelEq
│ ├── KRLS_ALDs.m
│ ├── PART1.asv
│ ├── PART1.m
│ ├── PART3.asv
│ ├── PART3.m
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── sparseKLMS1.m
│ ├── sparseKLMS1s.asv
│ └── sparseKLMS1s.m
├── gpr
│ ├── PART1.m
│ ├── PART2.m
│ └── gpml-matlab
│ │ └── gpml
│ │ ├── Contents.m
│ │ ├── Copyright
│ │ ├── Makefile
│ │ ├── approxEP.m
│ │ ├── approxLA.m
│ │ ├── approximations.m
│ │ ├── binaryEPGP.m
│ │ ├── binaryGP.m
│ │ ├── binaryLaplaceGP.m
│ │ ├── covConst.m
│ │ ├── covFunctions.m
│ │ ├── covLINard.m
│ │ ├── covLINone.m
│ │ ├── covMatern3iso.m
│ │ ├── covMatern5iso.m
│ │ ├── covNNone.m
│ │ ├── covNoise.m
│ │ ├── covPeriodic.m
│ │ ├── covProd.m
│ │ ├── covRQard.m
│ │ ├── covRQiso.m
│ │ ├── covSEard.m
│ │ ├── covSEiso.m
│ │ ├── covSum.m
│ │ ├── cumGauss.m
│ │ ├── gauher.m
│ │ ├── gpr.m
│ │ ├── gprSRPP.m
│ │ ├── likelihoods.m
│ │ ├── logistic.m
│ │ ├── minimize.m
│ │ ├── solve_chol.c
│ │ ├── solve_chol.m
│ │ ├── sq_dist.c
│ │ └── sq_dist.m
└── mg_prediction
│ ├── KRLS_ALD.m
│ ├── KRLS_ENC.m
│ ├── KRLS_NC.m
│ ├── MK30.mat
│ ├── PART1.m
│ ├── PART2.asv
│ ├── PART2.m
│ └── ker_eval.m
├── ch5_codes
├── Lorenz
│ ├── EX_KRLS.m
│ ├── EX_KRLS_ALD_2.m
│ ├── EX_RLS.m
│ ├── KRLS.m
│ ├── KRLS_ALD.m
│ ├── LMS2.m
│ ├── PART1.m
│ ├── PART2.m
│ ├── PART3.m
│ ├── PART4.m
│ ├── RLS.m
│ ├── SWKRLS.m
│ ├── SWKRLS2.m
│ ├── gramMatrix.m
│ ├── ker_eval.m
│ ├── kernelparameter.fig
│ ├── lorenz.m
│ ├── lorenz.mat
│ ├── lorenz_data_display.m
│ ├── nlG.m
│ └── slidingWindowKRLS.m
└── RayleighChannelTracking
│ ├── AOGR_ALD.m
│ ├── EX_KRLS.m
│ ├── EX_KRLS_ALD.m
│ ├── EX_KRLS_ALD_2.m
│ ├── EX_RLS.m
│ ├── KRLS.m
│ ├── KRLS_ALD.asv
│ ├── KRLS_ALD.m
│ ├── LMS2.m
│ ├── MLE.m
│ ├── PART1.m
│ ├── PART2.m
│ ├── PART3.m
│ ├── PART4.m
│ ├── RLS.m
│ ├── SIR.m
│ ├── genesam.m
│ ├── ker_eval.m
│ ├── kernel.m
│ ├── ls.m
│ ├── maxp.m
│ ├── nlG.m
│ ├── normw.m
│ ├── particlefilter.m
│ ├── rayleigh.m
│ ├── rayleighTracking.m
│ ├── resample.m
│ ├── silverKerWidth.m
│ ├── sparseKLMS1.m
│ ├── sparseKLMS1s.m
│ ├── systemdata.m
│ └── testChannel.m
├── ch6_codes
├── CO2
│ ├── AOGR.m
│ ├── AOGR1.m
│ ├── AOGR2.m
│ ├── AOGR_ALD.m
│ ├── AOGR_CI.m
│ ├── CO2_data.mat
│ ├── CO2forecasting.m
│ ├── SCKLMS.m
│ ├── co2.mat
│ ├── co2_PART1.m
│ ├── co2_PART2.m
│ ├── co2_PART3.m
│ ├── co2_PART4.m
│ ├── co2_PART5.m
│ ├── co2_reshaped.mat
│ ├── co2_shortterm.m
│ ├── data_analysis.m
│ └── ker_eval.m
├── mg_prediction
│ ├── AOGR.m
│ ├── AOGR1.m
│ ├── AOGR2.m
│ ├── AOGR_ALD.m
│ ├── AOGR_CI.m
│ ├── CCKLMS1.m
│ ├── LMS1.m
│ ├── MK30.mat
│ ├── PART1.m
│ ├── PART2.m
│ ├── PART3.asv
│ ├── PART3.m
│ ├── RAN.m
│ ├── SCKLMS.m
│ ├── ker_eval.m
│ └── sparseKLMS1.m
└── regression
│ ├── AOGR.m
│ ├── AOGR1.m
│ ├── AOGR2.m
│ ├── KRLS_old.m
│ ├── PART1.m
│ ├── PART2.m
│ ├── PART3.m
│ ├── ch5-6.fig
│ ├── ker_eval.m
│ └── nlG.m
├── images
└── cover.jpg
└── samples
├── chapter1.pdf
├── contents.pdf
└── preface.pdf
/README.md:
--------------------------------------------------------------------------------
1 | # Kernel Adaptive Filtering: A Comprehensive Introduction
2 |
3 |
4 | [Weifeng Liu](http://cnel.ufl.edu/people/people.php?name=wfliu), [Jose C. Principe](http://cnel.ufl.edu/people/people.php?name=principe), Simon Haykin.
5 |
6 | John Wiley, 2010
7 |
8 | ## Code by chapter
9 | 1. [Background and Preview](./samples/chapter1.pdf) (sample chapter)
10 | 2. [Kernel least-mean-square Algorithm](./ch2_codes)
11 | 3. [Kernel Affine Projection Algorithms](./ch3_codes)
12 | 4. [Kernel Recursive Least-Squares Algorithm](./ch4_codes)
13 | 5. [Extended Kernel Recursive Least-Squares](./ch5_codes)
14 | 6. [Designing Sparse Kernel Adaptive Filters](./ch6_codes)
15 |
16 | ## Links
17 | * The [book](http://www.amazon.com/gp/product/0470447532?ie=UTF8&tag=weiswebsit-20&linkCode=as2&camp=1789&creative=390957&creativeASIN=0470447532) at Amazon
18 | * A python implementation of the Kernel Adaptive Algorithm following scikit-learn's API is being developed [here](https://github.com/EderSantana/adaptive_kernel_methods).
19 |
--------------------------------------------------------------------------------
/ch2_codes/channelEq/PART1.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | %Copyright Aaron Liu
3 | %CNEL
4 | %July 1, 2008
5 | %
6 | %description:
7 | %compare the performance of LMS and KLMS in nonlinear channel equalization
8 | %learning curve
9 | %
10 | %Usage:
11 | %ch2, nonlinear channel equalization, figure 2-5
12 | %
13 | %Outside functions called:
14 | %None
15 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
16 |
17 |
18 | clear all,
19 | close all
20 | clc
21 | %======filter config=======
22 | %time delay (embedding) length
23 | TD = 5;
24 | D = 2;
25 | h = .1;%kernel size
26 | %======end of config=======
27 |
28 | %=========data===============
29 | % Generate binary data
30 | u = randn(1,2500)>0;
31 | u = 2*u-1;
32 |
33 | % Nonlinear channel
34 | z = u+0.5*[0,u(1:end-1)];
35 | % Channel noise
36 | ns = 0.4*randn(1,length(u));
37 | % Ouput of the nonlinear channel
38 | y = z - 0.9*z.^2 + ns;
39 |
40 | %data size
41 | N_tr = 1000;
42 | N_te = 50;
43 |
44 | %data embedding
45 | X = zeros(TD,N_tr);
46 | for k=1:N_tr
47 | X(:,k) = y(k:k+TD-1)';
48 | end
49 | % Test data
50 | X_te = zeros(TD,N_te);
51 | for k=1:N_te
52 | X_te(:,k) = y(k+N_tr:k+TD-1+N_tr)';
53 | end
54 |
55 | % Desired signal
56 | T = zeros(N_tr,1);
57 | for ii=1:N_tr
58 | T(ii) = u(D+ii);
59 | end
60 |
61 | T_te = zeros(N_te,1);
62 | for ii=1:N_te
63 | T_te(ii) = u(D+ii+N_tr);
64 | end
65 | %======end of data===========
66 |
67 | %=======init================
68 | mse_te = zeros(1,N_tr);
69 | mse_te_k = zeros(1,N_tr);
70 | %=======end of init=========
71 |
72 | %=========Linear LMS===================
73 | %learning rate (step size)
74 | lr = .01;%learning rate
75 | w1 = zeros(1,TD);
76 | e_l = zeros(N_tr,1);
77 | for n=1:N_tr
78 | y = w1*X(:,n);
79 | e_l(n) = T(n) - y;
80 | w1 = w1 + lr*e_l(n)*X(:,n)';
81 | %testing
82 | err = T_te'-(w1*X_te);
83 | mse_te(n) = mean(err.^2);
84 | end
85 | %=========end of Linear LMS================
86 |
87 | %=========Kernel LMS===================
88 | lr_k = .2;
89 | %init
90 | e_k = zeros(N_tr,1);
91 | y = zeros(N_tr,1);
92 | y_te = zeros(N_te,1);
93 | % n=1 init
94 | e_k(1) = T(1);
95 | y(1) = 0;
96 | mse_te_k(1) = mean(T_te.^2);
97 | % start
98 | for n=2:N_tr
99 | %training
100 | ii = 1:n-1;
101 | y(n) = lr_k*e_k(ii)'*(exp(-sum((X(:,n)*ones(1,n-1)-X(:,ii)).^2)*h))';
102 |
103 | e_k(n) = T(n) - y(n);
104 |
105 | %testing
106 | y_te = zeros(N_te,1);
107 | for jj = 1:N_te
108 | ii = 1:n;
109 | y_te(jj) = lr_k*e_k(ii)'*(exp(-sum((X_te(:,jj)*ones(1,n)-X(:,ii)).^2)*h))';
110 | end
111 | err = T_te - y_te;
112 | mse_te_k(n) = mean(err.^2);
113 | end
114 | %=========end of Kernel LMS================
115 |
116 | %==========plot and test===================
117 | figure
118 | plot(mse_te,'b-','LineWidth',2)
119 | hold on
120 | plot(mse_te_k,'r--','LineWidth',2)
121 | set(gca, 'FontSize', 14);
122 | set(gca, 'FontName', 'Arial');
123 |
124 | legend('LMS','KLMS')
125 | xlabel('iteration')
126 | ylabel('testing MSE')
127 |
128 |
129 |
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/KLMS1.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,weightVector,biasTerm,learningCurve] = ...
2 | KLMS1(trainInput,trainTarget,typeKernel,paramKernel,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Input:
5 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
6 | % trainSize is the number of training data
7 | %trainTarget: desired signal for training trainSize*1
8 | %
9 | %typeKernel: 'Gauss', 'Poly'
10 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
11 | %
12 | %stepSizeFeatureVector: learning rate for kernel part
13 | %stepSizeWeightVector: learning rate for linear part, set to zero to disable
14 | %stepSizeBias: learning rate for bias term, set to zero to disable
15 | %
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 | %Output:
18 | %expansionCoefficient: consisting of coefficients of the kernel expansion
19 | %weightVector: the linear weight vector
20 | %biasTerm: the bias term
21 | %learningCurve: trainSize*1 used for learning curve
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Notes: none.
24 |
25 |
26 | % memeory initialization
27 | trainSize = length(trainTarget);
28 | expansionCoefficient = zeros(trainSize,1);
29 | learningCurve = zeros(trainSize,1);
30 |
31 | % n=1 init
32 | aprioriErr = trainTarget(1);
33 | weightVector = stepSizeWeightVector*aprioriErr*trainInput(:,1);
34 | biasTerm = stepSizeBias*aprioriErr;
35 | expansionCoefficient(1) = stepSizeFeatureVector*aprioriErr;
36 | learningCurve(1) = aprioriErr^2;
37 |
38 | % start
39 | for n = 2:trainSize
40 | % training
41 | % filtering
42 | ii = 1:n-1;
43 | networkOutput = expansionCoefficient(ii)'*ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*trainInput(:,n) + biasTerm;
44 | aprioriErr = trainTarget(n) - networkOutput;
45 | % updating
46 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n);
47 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
48 | expansionCoefficient(n) = stepSizeFeatureVector*aprioriErr;
49 |
50 | learningCurve(n) = aprioriErr^2;
51 | end
52 |
53 | return
54 |
55 |
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/KLMS1_LC.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,weightVector,biasTerm,learningCurve] = ...
2 | KLMS1_LC(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Input:
5 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
6 | % trainSize is the number of training data
7 | %trainTarget: desired signal for training trainSize*1
8 | %
9 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
10 | %testTarget: desired signal for testing testSize*1
11 | %
12 | %typeKernel: 'Gauss', 'Poly'
13 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
14 | %
15 | %stepSizeFeatureVector: learning rate for kernel part
16 | %stepSizeWeightVector: learning rate for linear part, set to zero to disable
17 | %stepSizeBias: learning rate for bias term, set to zero to disable
18 | %
19 | %flagLearningCurve: control if calculating the learning curve
20 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
21 | %Output:
22 | %expansionCoefficient: consisting of coefficients of the kernel expansion
23 | %weightVector: the linear weight vector
24 | %biasTerm: the bias term
25 | %learningCurve: trainSize*1 used for learning curve
26 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
27 | %Notes: none.
28 |
29 |
30 | % memeory initialization
31 | trainSize = length(trainTarget);
32 | testSize = length(testTarget);
33 |
34 | expansionCoefficient = zeros(trainSize,1);
35 |
36 | if flagLearningCurve
37 | learningCurve = zeros(trainSize,1);
38 | learningCurve(1) = mean(testTarget.^2);
39 | else
40 | learningCurve = [];
41 | end
42 |
43 | % n=1 init
44 | aprioriErr = trainTarget(1);
45 | weightVector = stepSizeWeightVector*aprioriErr*trainInput(:,1);
46 | biasTerm = stepSizeBias*aprioriErr;
47 | expansionCoefficient(1) = stepSizeFeatureVector*aprioriErr;
48 | % start
49 | for n = 2:trainSize
50 | % training
51 | % filtering
52 | ii = 1:n-1;
53 | networkOutput = expansionCoefficient(ii)'*ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*trainInput(:,n) + biasTerm;
54 | aprioriErr = trainTarget(n) - networkOutput;
55 | % updating
56 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n);
57 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
58 | expansionCoefficient(n) = stepSizeFeatureVector*aprioriErr;
59 |
60 | if flagLearningCurve
61 | % testing
62 | y_te = zeros(testSize,1);
63 | for jj = 1:testSize
64 | ii = 1:n;
65 | y_te(jj) = expansionCoefficient(ii)'*ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*testInput(:,jj) + biasTerm;
66 | end
67 | err = testTarget - y_te;
68 | learningCurve(n) = mean(err.^2);
69 | end
70 | end
71 |
72 | return
73 |
74 |
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/KLMS3.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,weightVector,biasTerm,learningCurve] = ...
2 | KLMS3(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,paramRegularization,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KLMS3
5 | %leaky KLMS
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %paramRegularization: regularization parameter in the cost function
19 | %
20 | %stepSizeFeatureVector: learning rate for kernel part
21 | %stepSizeWeightVector: learning rate for linear part, set to zero to disable
22 | %stepSizeBias: learning rate for bias term, set to zero to disable
23 | %
24 | %flagLearningCurve: control if calculating the learning curve
25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
26 | %Output:
27 | %expansionCoefficient: consisting of coefficients of the kernel
28 | % expansion
29 | %weightVector: the linear weight vector
30 | %biasTerm: the bias term
31 | %learningCurve: trainSize*1 used for learning curve
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 | %Notes: none.
34 |
35 |
36 | % memeory initialization
37 | trainSize = length(trainTarget);
38 | testSize = length(testTarget);
39 |
40 | expansionCoefficient = zeros(trainSize,1);
41 |
42 | % forgetting term
43 | forgettingTerm = 1 - paramRegularization*stepSizeFeatureVector;
44 | if abs(forgettingTerm)>1
45 | disp('forgetting term warning');
46 | end
47 |
48 | if flagLearningCurve
49 | learningCurve = zeros(trainSize,1);
50 | learningCurve(1) = mean(testTarget.^2);
51 | else
52 | learningCurve = [];
53 | end
54 |
55 | % n=1 init
56 | aprioriErr = trainTarget(1);
57 | weightVector = stepSizeWeightVector*aprioriErr*trainInput(:,1);
58 | biasTerm = stepSizeBias*aprioriErr;
59 | expansionCoefficient(1) = stepSizeFeatureVector*aprioriErr;
60 |
61 | % start
62 | for n = 2:trainSize
63 | % training
64 |
65 | % filtering
66 | ii = 1:n-1;
67 | networkOutput = expansionCoefficient(ii)'*...
68 | ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*trainInput(:,n) + biasTerm;
69 | aprioriErr = trainTarget(n) - networkOutput;
70 | % updating
71 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n);
72 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
73 |
74 | expansionCoefficient(ii) = expansionCoefficient(ii)*forgettingTerm;
75 | expansionCoefficient(n) = stepSizeFeatureVector*aprioriErr;
76 |
77 | if flagLearningCurve
78 | % testing
79 | y_te = zeros(testSize,1);
80 | for jj = 1:testSize
81 | ii = 1:n;
82 | y_te(jj) = expansionCoefficient(ii)'*ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*testInput(:,jj) + biasTerm;
83 | end
84 | err = testTarget - y_te;
85 | learningCurve(n) = mean(err.^2);
86 | end
87 | end
88 |
89 | return
90 |
91 |
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/LMS1.m:
--------------------------------------------------------------------------------
1 | function [weightVector,biasTerm,learningCurve]= ...
2 | LMS1(trainInput,trainTarget,stepSizeWeightVector,stepSizeBias)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
14 | %stepSizeBias: learning rate for bias term, set to zero to disable
15 | %
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 | %Output:
18 | %weightVector: the linear coefficients
19 | %biasTerm: the bias term
20 | %learningCurve: trainSize*1 used for learning curve
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | %Notes: none.
23 |
24 | % memeory initialization
25 | [inputDimension,trainSize] = size(trainInput);
26 |
27 | learningCurve = zeros(trainSize,1);
28 |
29 | weightVector = zeros(inputDimension,1);
30 | biasTerm = 0;
31 |
32 | % training
33 | for n = 1:trainSize
34 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
35 | aprioriErr = trainTarget(n) - networkOutput;
36 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n);
37 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
38 | learningCurve(n) = aprioriErr^2;
39 | end
40 |
41 | return
42 |
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/MK30.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch2_codes/mg_prediction/MK30.mat
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/PART1.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | %Copyright Weifeng Liu
3 | %CNEL
4 | %July 1, 2008
5 | %
6 | %description:
7 | %compare the performance of LMS and KLMS for Mackey Glass time series
8 | %one step prediction
9 | %Learning curves
10 | %
11 | %Usage:
12 | %ch2, m-g prediction
13 | %
14 | %Outside functions called:
15 | %none
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | clear all,
19 | close all
20 | clc
21 | %======filter config=======
22 | %time delay (embedding) length
23 | TD = 10;
24 | %kernel parameter
25 | a = 1;%fixed
26 | %noise std
27 | np =.04;
28 | %data size
29 | N_tr = 500;
30 | N_te = 100;%
31 | %%======end of config=======
32 |
33 |
34 | disp('Learning curves are generating. Please wait...');
35 |
36 | %======data formatting===========
37 | load MK30 %MK30 5000*1
38 | MK30 = MK30+np*randn(size(MK30));
39 | MK30 = MK30 - mean(MK30);
40 |
41 | %500 training data
42 | train_set = MK30(1501:4500);
43 |
44 | %100 testing data
45 | test_set = MK30(4601:4900);
46 |
47 | %data embedding
48 | X = zeros(TD,N_tr);
49 | for k=1:N_tr
50 | X(:,k) = train_set(k:k+TD-1)';
51 | end
52 | T = train_set(TD+1:TD+N_tr);
53 |
54 | X_te = zeros(TD,N_te);
55 | for k=1:N_te
56 | X_te(:,k) = test_set(k:k+TD-1)';
57 | end
58 | T_te = test_set(TD+1:TD+N_te);
59 | %======end of data formatting===========
60 |
61 | %
62 | mse_te_l = zeros(N_tr,1);
63 |
64 | %=========Linear LMS===================
65 | %learning rate (step size)
66 | lr_l = .2;%learning rate
67 | w1 = zeros(1,TD);
68 | e_l = zeros(N_tr,1);
69 | for n=1:N_tr
70 | y = w1*X(:,n);
71 | e_l(n) = T(n) - y;
72 | w1 = w1 + lr_l*e_l(n)*X(:,n)';
73 |
74 | %testing MSE for learning curve
75 | err_te = T_te'-(w1*X_te);
76 | mse_te_l(n) = mean(err_te.^2);
77 | end
78 | %=========end of Linear LMS================
79 |
80 | %=========Kernel LMS===================
81 |
82 | %learning rate (adjustable)
83 | % lr_k = .1;
84 | lr_k = .2;
85 | % lr_k = .6;
86 |
87 | %init
88 | e_k = zeros(N_tr,1);
89 | y = zeros(N_tr,1);
90 | mse_te_k = zeros(N_tr,1);
91 |
92 | % n=1 init
93 | e_k(1) = T(1);
94 | y(1) = 0;
95 | mse_te_k(1) = mean(T_te.^2);
96 | % start
97 | for n=2:N_tr
98 | %training
99 | ii = 1:n-1;
100 | y(n) = lr_k*e_k(ii)'*(exp(-sum((X(:,n)*ones(1,n-1)-X(:,ii)).^2)))';
101 | e_k(n) = T(n) - y(n);
102 |
103 | %testing MSE
104 | y_te = zeros(N_te,1);
105 | for jj = 1:N_te
106 | y_te(jj) = lr_k*e_k(1:n)'*(exp(-sum((X_te(:,jj)*ones(1,n)-X(:,1:n)).^2)))';
107 | end
108 | err = T_te - y_te;
109 | mse_te_k(n) = mean(err.^2);
110 |
111 | end
112 |
113 | %=========end of Kernel LMS================
114 |
115 | figure
116 | plot(mse_te_l,'k-','LineWidth',2);
117 | hold on
118 | plot(mse_te_k,'k--','LineWidth',2);
119 |
120 | set(gca, 'FontSize', 14);
121 | set(gca, 'FontName', 'Arial');
122 |
123 | legend('LMS', 'KLMS')
124 | xlabel('iteration')
125 | ylabel('MSE')
--------------------------------------------------------------------------------
/ch2_codes/mg_prediction/PART3.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | %Copyright Weifeng Liu
3 | %CNEL
4 | %July 1, 2008
5 | %
6 | %description:
7 | %compare the solution norm of KLMS and RN for Mackey Glass time series
8 | %one step prediction
9 | %Monte Carlo simulation
10 | %
11 | %Usage:
12 | %ch2, m-g prediction, table 2-3
13 | %
14 | %Outside functions called:
15 | %none
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 |
18 | clear all,
19 | close all
20 | clc
21 | %======filter config=======
22 | %time delay (embedding) length
23 | TD = 10;
24 | %kernel parameter
25 | a = 1;%fixed
26 | %noise standard deviation
27 | np =.04;
28 | %data size
29 | N_tr = 500;
30 | N_te = 100;%
31 | %%======end of config=======
32 |
33 | %======monte carlo init =======
34 | load MK30 %MK30 5000*1
35 |
36 | MK30_tmp = MK30;
37 |
38 | MC = 50;
39 |
40 | solutionNorm_klms = zeros(MC,1);
41 | solutionNorm_rbf = zeros(MC,1);
42 |
43 | disp([num2str(MC), ' Monte Carlo simulations. Please wait...']);
44 |
45 | for mc = 1:MC
46 | disp(mc);
47 |
48 | %======data formatting===========
49 | MK30_tmp = MK30; %restore
50 |
51 | MK30_tmp = MK30_tmp + np*randn(size(MK30));
52 | MK30_tmp = MK30_tmp - mean(MK30_tmp);
53 |
54 | %500 training data
55 | train_set = MK30_tmp(1501:4500);
56 |
57 | %100 testing data
58 | test_set = MK30_tmp(4601:4900);
59 |
60 | %data embedding
61 | X = zeros(TD,N_tr);
62 | for k=1:N_tr
63 | X(:,k) = train_set(k:k+TD-1)';
64 | end
65 | T = train_set(TD+1:TD+N_tr);
66 |
67 | X_te = zeros(TD,N_te);
68 | for k=1:N_te
69 | X_te(:,k) = test_set(k:k+TD-1)';
70 | end
71 | T_te = test_set(TD+1:TD+N_te);
72 | %======end of data formatting===========
73 |
74 | %=========Kernel LMS===================
75 |
76 | %learning rate (adjustable)
77 | % lr_k = .1;
78 | lr_k = .2;
79 | % lr_k = .6;
80 |
81 | %init
82 | e_k = zeros(N_tr,1);
83 | y = zeros(N_tr,1);
84 |
85 | % n=1 init
86 | e_k(1) = T(1);
87 | y(1) = 0;
88 | mse_te_k(1) = mean(T_te.^2);
89 | % start
90 | for n=2:N_tr
91 | %training
92 | ii = 1:n-1;
93 | y(n) = lr_k*e_k(ii)'*(exp(-sum((X(:,n)*ones(1,n-1)-X(:,ii)).^2)))';
94 | e_k(n) = T(n) - y(n);
95 | end
96 |
97 | solutionNorm_klms(mc) = lr_k*norm(e_k);
98 |
99 | %=========end of Kernel LMS================
100 |
101 | %=========RBF============================
102 | %regularization parameter (adjustable)
103 | lam = 1;
104 |
105 | G = zeros(N_tr,N_tr);
106 | for i=1:N_tr-1
107 | j=i+1:N_tr;
108 | G(i,j)=exp(-sum((X(:,i)*ones(1,N_tr-i)-X(:,j)).^2));
109 | G(j,i)=G(i,j)';
110 | end
111 | G = G + eye(N_tr);
112 | G_lam =G + lam*eye(N_tr);
113 | a = inv(G_lam)*T;
114 |
115 | solutionNorm_rbf(mc) = norm(a);
116 |
117 | %=========End of RBF===================
118 | end%mc
119 |
120 | disp('soltion norm')
121 | disp(['Noise power: ',num2str(np)]);
122 | disp('<a;
33 |
34 |
35 | figure(10),
36 | lineWid = 3;
37 | plot(x,rf_LMS,'k-','LineWidth', lineWid)
38 | hold on
39 | plot(x,rf_Tik,'k-.','LineWidth', lineWid);
40 | %
41 | plot(x,rf_Tru,'k:','LineWidth', lineWid)
42 | %
43 | %
44 | set(gca, 'FontSize', 14);
45 | set(gca, 'FontName', 'Arial');
46 | legend('KLMS','Tikhonov','PCA')
47 | hold off
48 | xlabel('singular value')
49 | ylabel('reg-function')
50 | grid on
51 |
52 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
53 | %comparison on gradient descent with different step size
54 | N = 500;
55 |
56 | eta = .01;
57 | rf_LMS_1 = 1-(1-eta*x.^2/N).^N;
58 |
59 | eta = .1;
60 | rf_LMS_2 = 1-(1-eta*x.^2/N).^N;
61 |
62 | eta = 1;
63 | rf_LMS_3 = 1-(1-eta*x.^2/N).^N;
64 |
65 | figure(11),
66 | lineWid = 3;
67 | plot(x,rf_LMS_1,'k-','LineWidth', lineWid)
68 | hold on
69 | plot(x,rf_LMS_2,'k--','LineWidth', lineWid);
70 | %
71 | plot(x,rf_LMS_3,'k-.','LineWidth', lineWid)
72 | %
73 | %
74 | set(gca, 'FontSize', 14);
75 | set(gca, 'FontName', 'Arial');
76 | legend('\eta=.01','\eta=.1','\eta=1')
77 | hold off
78 | xlabel('singular value')
79 | ylabel('reg-function')
80 | grid on
81 |
82 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
83 | %comparison on gradient descent with different data size
84 | eta = .1;
85 |
86 | N = 100;
87 | rf_LMS_1 = 1-(1-eta*x.^2/N).^N;
88 |
89 | N = 500;
90 | rf_LMS_2 = 1-(1-eta*x.^2/N).^N;
91 |
92 | N = 1000;
93 | rf_LMS_3 = 1-(1-eta*x.^2/N).^N;
94 |
95 | figure(12),
96 | lineWid = 3;
97 | plot(x,rf_LMS_1,'k-','LineWidth', lineWid)
98 | hold on
99 | plot(x,rf_LMS_2,'k--','LineWidth', lineWid);
100 | %
101 | plot(x,rf_LMS_3,'k-.','LineWidth', lineWid)
102 | %
103 | set(gca, 'FontSize', 14);
104 | set(gca, 'FontName', 'Arial');
105 | legend('N = 100','N = 500','N = 1000')
106 | hold off
107 | xlabel('singular value')
108 | ylabel('reg-function')
109 | grid on
110 |
111 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/APA1.m:
--------------------------------------------------------------------------------
1 | function [weightVector,biasTerm,learningCurve]= ...
2 | APA1(K,trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %K: the K most recent observations are used to estimate the
10 | % gradient and hessian
11 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
12 | % is the number of training data
13 | %trainTarget: desired signal for training trainSize*1
14 | %
15 | %testInput: testing input for calculating the learning curve,
16 | % inputDimension*testSize, testSize is the number of test data
17 | %testTarget: desired signal for testing testSize*1
18 | %
19 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
20 | %stepSizeBias: learning rate for bias term, set to zero to disable
21 | %
22 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
23 | %
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %weightVector: the linear coefficients
27 | %biasTerm: the bias term
28 | %learningCurve: trainSize*1 used for learning curve
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 | %Notes: none.
31 |
32 | % memeory initialization
33 | [inputDimension,trainSize] = size(trainInput);
34 |
35 | if flagLearningCurve
36 | learningCurve = zeros(trainSize,1);
37 | learningCurve(1:K) = mean(testTarget.^2)*ones(K,1);
38 | else
39 | learningCurve = [];
40 | end
41 |
42 | weightVector = zeros(inputDimension,1);
43 | biasTerm = 0;
44 |
45 | % training
46 | for n = 1:trainSize-K+1
47 | networkOutput = trainInput(:,n:n+K-1)'*weightVector + biasTerm;
48 | aprioriErr = trainTarget(n:n+K-1) - networkOutput;
49 | weightVector = weightVector + stepSizeWeightVector*trainInput(:,n:n+K-1)*aprioriErr;
50 | biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
51 | if flagLearningCurve
52 | % testing
53 | err = testTarget -(testInput'*weightVector + biasTerm);
54 | learningCurve(n+K-1) = mean(err.^2);
55 | end
56 | end
57 |
58 | return
59 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/APA1s.m:
--------------------------------------------------------------------------------
1 | function [weightVector,biasTerm,learningCurve]= ...
2 | APA1s(K,trainInput,trainTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %K: the K most recent observations are used to estimate the
10 | % gradient and hessian
11 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
12 | % is the number of training data
13 | %trainTarget: desired signal for training trainSize*1
14 | %
15 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
16 | %stepSizeBias: learning rate for bias term, set to zero to disable
17 | %
18 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
19 | %
20 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
21 | %Output:
22 | %weightVector: the linear coefficients
23 | %biasTerm: the bias term
24 | %learningCurve: trainSize*1 used for learning curve
25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
26 | %Notes: none.
27 |
28 | % memeory initialization
29 | [inputDimension,trainSize] = size(trainInput);
30 |
31 | if flagLearningCurve
32 | learningCurve = zeros(trainSize,1);
33 | learningCurve(1:K) = mean(trainTarget.^2)*ones(K,1);
34 | else
35 | learningCurve = [];
36 | end
37 |
38 | weightVector = zeros(inputDimension,1);
39 | biasTerm = 0;
40 |
41 | % training
42 | for n = 1:trainSize-K+1
43 | networkOutput = trainInput(:,n:n+K-1)'*weightVector + biasTerm;
44 | aprioriErr = trainTarget(n:n+K-1) - networkOutput;
45 | weightVector = weightVector + stepSizeWeightVector*trainInput(:,n:n+K-1)*aprioriErr;
46 | biasTerm = biasTerm + stepSizeBias*sum(aprioriErr);
47 | if flagLearningCurve
48 | learningCurve(n+K-1) = mean(aprioriErr.^2);
49 | end
50 | end
51 | return
52 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/LMS1.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS1(trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %testInput: testing input for calculating the learning curve,
14 | % inputDimension*testSize, testSize is the number of test data
15 | %testTarget: desired signal for testing testSize*1
16 | %
17 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
18 | %stepSizeBias: learning rate for bias term, set to zero to disable
19 | %
20 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
21 | %
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Output:
24 | %aprioriErr: apriori error
25 | %weightVector: the linear coefficients
26 | %biasTerm: the bias term
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: none.
30 |
31 | % memeory initialization
32 | [inputDimension,trainSize] = size(trainInput);
33 |
34 | if flagLearningCurve
35 | learningCurve = zeros(trainSize,1);
36 | else
37 | learningCurve = [];
38 | end
39 |
40 | weightVector = zeros(inputDimension,1);
41 | biasTerm = 0;
42 | aprioriErr = zeros(trainSize,1);
43 |
44 | % training
45 | for n = 1:trainSize
46 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
47 | aprioriErr(n) = trainTarget(n) - networkOutput;
48 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*trainInput(:,n);
49 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
50 | if flagLearningCurve
51 | % testing
52 | err = testTarget -(testInput'*weightVector + biasTerm);
53 | learningCurve(n) = mean(err.^2);
54 | end
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/LMS1s.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS1s(trainInput,trainTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1s:
5 | %Normal least mean square algorithms
6 | %THe learning curve is from the apriori error
7 | %
8 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
9 | %Input:
10 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
11 | % is the number of training data
12 | %trainTarget: desired signal for training trainSize*1
13 | %
14 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
15 | %stepSizeBias: learning rate for bias term, set to zero to disable
16 | %
17 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
18 | %
19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
20 | %Output:
21 | %aprioriErr: apriori error
22 | %weightVector: the linear coefficients
23 | %biasTerm: the bias term
24 | %learningCurve: trainSize*1 used for learning curve
25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
26 | %Notes: none.
27 |
28 | % memeory initialization
29 | [inputDimension,trainSize] = size(trainInput);
30 |
31 | weightVector = zeros(inputDimension,1);
32 | biasTerm = 0;
33 | aprioriErr = zeros(trainSize,1);
34 |
35 | % training
36 | for n = 1:trainSize
37 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
38 | aprioriErr(n) = trainTarget(n) - networkOutput;
39 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*trainInput(:,n);
40 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
41 | end
42 |
43 | if flagLearningCurve
44 | learningCurve = aprioriErr.^2;
45 | else
46 | learningCurve = [];
47 | end
48 | return
49 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/LMS2.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS2(trainInput,trainTarget,testInput,testTarget,regularizationFactor,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS3:
5 | %Normalized least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %testInput: testing input for calculating the learning curve,
14 | % inputDimension*testSize, testSize is the number of test data
15 | %testTarget: desired signal for testing testSize*1
16 | %
17 | %regularizationFactor: regularization factor in Newton's recursion
18 | %
19 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
20 | %stepSizeBias: learning rate for bias term, set to zero to disable
21 | %
22 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
23 | %
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %aprioriErr: apriori error
27 | %weightVector: the linear coefficients
28 | %biasTerm: the bias term
29 | %learningCurve: trainSize*1 used for learning curve
30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
31 | %Notes: none.
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 |
36 | if flagLearningCurve
37 | learningCurve = zeros(trainSize,1);
38 | end
39 |
40 | weightVector = zeros(inputDimension,1);
41 | biasTerm = 0;
42 | aprioriErr = zeros(trainSize,1);
43 |
44 | % training
45 | for n = 1:trainSize
46 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
47 | aprioriErr(n) = trainTarget(n) - networkOutput;
48 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*trainInput(:,n)/(sum(trainInput(:,n).^2) + regularizationFactor);
49 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
50 | if flagLearningCurve
51 | % testing
52 | err = testTarget -(testInput'*weightVector + biasTerm);
53 | learningCurve(n) = mean(err.^2);
54 | end
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/ch3_codes/channelEq/gramMatrix.m:
--------------------------------------------------------------------------------
1 | function G = gramMatrix(data,typeKernel,paramKernel)
2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 | %Function gramMatrix
4 | %Calculate the gram matrix of data
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %inputs:
7 | %data: inputDimension*dataSize, the output matrix will be
8 | % dataSize-by-dataSize
9 | %typeKernel: 'Gauss','Poly'
10 | %paramKernel: parameter used in kernel
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %outputs:
13 | %G: GramMatrix of data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | %Notes: none.
16 |
17 | [inputDimension,dataSize] = size(data);
18 | G = zeros(dataSize,dataSize);
19 |
20 | for ii = 1:dataSize
21 | jj = ii:dataSize;
22 | G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
23 | G(jj,ii) = G(ii,jj);
24 | end
25 | return
--------------------------------------------------------------------------------
/ch3_codes/channelEq/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/KLMS1.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,weightVector,biasTerm,learningCurve] = ...
2 | KLMS1(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Input:
5 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
6 | % trainSize is the number of training data
7 | %trainTarget: desired signal for training trainSize*1
8 | %
9 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
10 | %testTarget: desired signal for testing testSize*1
11 | %
12 | %typeKernel: 'Gauss', 'Poly'
13 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
14 | %
15 | %stepSizeFeatureVector: learning rate for kernel part
16 | %stepSizeWeightVector: learning rate for linear part, set to zero to disable
17 | %stepSizeBias: learning rate for bias term, set to zero to disable
18 | %
19 | %flagLearningCurve: control if calculating the learning curve
20 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
21 | %Output:
22 | %expansionCoefficient: consisting of coefficients of the kernel expansion
23 | %weightVector: the linear weight vector
24 | %biasTerm: the bias term
25 | %learningCurve: trainSize*1 used for learning curve
26 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
27 | %Notes: none.
28 |
29 |
30 | % memeory initialization
31 | trainSize = length(trainTarget);
32 | testSize = length(testTarget);
33 |
34 | expansionCoefficient = zeros(trainSize,1);
35 |
36 | if flagLearningCurve
37 | learningCurve = zeros(trainSize,1);
38 | learningCurve(1) = mean(testTarget.^2);
39 | else
40 | learningCurve = [];
41 | end
42 |
43 | % n=1 init
44 | aprioriErr = trainTarget(1);
45 | weightVector = stepSizeWeightVector*aprioriErr*trainInput(:,1);
46 | biasTerm = stepSizeBias*aprioriErr;
47 | expansionCoefficient(1) = stepSizeFeatureVector*aprioriErr;
48 | % start
49 | for n = 2:trainSize
50 | % training
51 | % filtering
52 | ii = 1:n-1;
53 | networkOutput = expansionCoefficient(ii)'*ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*trainInput(:,n) + biasTerm;
54 | aprioriErr = trainTarget(n) - networkOutput;
55 | % updating
56 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n);
57 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
58 | expansionCoefficient(n) = stepSizeFeatureVector*aprioriErr;
59 |
60 | if flagLearningCurve
61 | % testing
62 | y_te = zeros(testSize,1);
63 | for jj = 1:testSize
64 | ii = 1:n;
65 | y_te(jj) = expansionCoefficient(ii)'*ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel) + weightVector'*testInput(:,jj) + biasTerm;
66 | end
67 | err = testTarget - y_te;
68 | learningCurve(n) = mean(err.^2);
69 | end
70 | end
71 |
72 | return
73 |
74 |
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/KRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | KRLS(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %Kernel recursive least squares with exponentially weighted
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %regularizationFactor: regularization parameter in Newton's recursion
19 | %
20 | %forgettingFactor: expoentially weighted value
21 | %
22 | %flagLearningCurve: control if calculating the learning curve
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %baseDictionary: dictionary stores all the bases centers
26 | %expansionCoefficient: coefficients of the kernel expansion
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: Since bases are by default all the training data, it is skipped
30 | % here.
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 | testSize = length(testTarget);
36 |
37 | expansionCoefficient = zeros(trainSize,1);
38 |
39 | if flagLearningCurve
40 | learningCurve = zeros(trainSize,1);
41 | learningCurve(1) = mean(testTarget.^2);
42 | else
43 | learningCurve = [];
44 | end
45 |
46 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
47 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
48 | % start training
49 | for n = 2:trainSize
50 | ii = 1:n-1;
51 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
52 | f_vector = Q_matrix*k_vector;
53 | s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
54 | Q_tmp = zeros(n,n);
55 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
56 | Q_tmp(ii,n) = -f_vector*s;
57 | Q_tmp(n,ii) = Q_tmp(ii,n)';
58 | Q_tmp(n,n) = s;
59 | Q_matrix = Q_tmp;
60 |
61 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
62 |
63 | % updating
64 | expansionCoefficient(n) = s*error;
65 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
66 |
67 | if flagLearningCurve
68 | % testing
69 | y_te = zeros(testSize,1);
70 | for jj = 1:testSize
71 | ii = 1:n;
72 | y_te(jj) = expansionCoefficient(ii)'*...
73 | ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel);
74 | end
75 | err = testTarget - y_te;
76 | learningCurve(n) = mean(err.^2);
77 | end
78 | end
79 |
80 | return
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/LMS1.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS1(trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %testInput: testing input for calculating the learning curve,
14 | % inputDimension*testSize, testSize is the number of test data
15 | %testTarget: desired signal for testing testSize*1
16 | %
17 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
18 | %stepSizeBias: learning rate for bias term, set to zero to disable
19 | %
20 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
21 | %
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Output:
24 | %aprioriErr: apriori error
25 | %weightVector: the linear coefficients
26 | %biasTerm: the bias term
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: none.
30 |
31 | % memeory initialization
32 | [inputDimension,trainSize] = size(trainInput);
33 |
34 | if flagLearningCurve
35 | learningCurve = zeros(trainSize,1);
36 | else
37 | learningCurve = [];
38 | end
39 |
40 | weightVector = zeros(inputDimension,1);
41 | biasTerm = 0;
42 | aprioriErr = zeros(trainSize,1);
43 |
44 | % training
45 | for n = 1:trainSize
46 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
47 | aprioriErr(n) = trainTarget(n) - networkOutput;
48 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*trainInput(:,n);
49 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
50 | if flagLearningCurve
51 | % testing
52 | err = testTarget -(testInput'*weightVector + biasTerm);
53 | learningCurve(n) = mean(err.^2);
54 | end
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/MK30.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch3_codes/mg_prediction/MK30.mat
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/gramMatrix.m:
--------------------------------------------------------------------------------
1 | function G = gramMatrix(data,typeKernel,paramKernel)
2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 | %Function gramMatrix
4 | %Calculate the gram matrix of data
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %inputs:
7 | %data: inputDimension*dataSize, the output matrix will be
8 | % dataSize-by-dataSize
9 | %typeKernel: 'Gauss','Poly'
10 | %paramKernel: parameter used in kernel
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %outputs:
13 | %G: GramMatrix of data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | %Notes: none.
16 |
17 | [inputDimension,dataSize] = size(data);
18 | G = zeros(dataSize,dataSize);
19 |
20 | for ii = 1:dataSize
21 | jj = ii:dataSize;
22 | G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
23 | G(jj,ii) = G(ii,jj);
24 | end
25 | return
--------------------------------------------------------------------------------
/ch3_codes/mg_prediction/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch3_codes/noiseCancelation/LMS2.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS2(trainInput,trainTarget,feedbackDimension,regularizationFactor,stepSizeWeightVector,stepSizeBias)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS2:
5 | %Normalized least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize
10 | % BUT you have to add the feedback dimension onto it to form
11 | % the real regressors!!
12 | % data format according to the problem, say the input may
13 | % accept feedback from the output in adaptive noise
14 | % cancellation!!!
15 | %feedbackDimension: the number of output feedback as the input
16 | %trainTarget: desired signal for training trainSize*1
17 | %
18 | %regularizationFactor: regularization factor in Newton's recursion
19 | %
20 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
21 | %stepSizeBias: learning rate for bias term, set to zero to disable
22 | %
23 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
24 | %
25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
26 | %Output:
27 | %aprioriErr: apriori error
28 | %weightVector: the linear coefficients
29 | %biasTerm: the bias term
30 | %learningCurve: trainSize*1 used for learning curve
31 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
32 | %Notes: none.
33 |
34 | % memeory initialization
35 | [inputDimension,trainSize] = size(trainInput);
36 | trainInput(:,trainSize+1) = zeros(inputDimension,1);
37 |
38 | learningCurve = zeros(trainSize,1);
39 |
40 | weightVector = zeros(inputDimension+feedbackDimension,1);
41 | biasTerm = 0;
42 |
43 | input = [trainInput(:,1);zeros(feedbackDimension,1)];
44 | networkOutput = zeros(trainSize,1);
45 | aprioriErr = zeros(trainSize,1);
46 | % training
47 | for n = 1:trainSize
48 | networkOutput(n) = weightVector'*input + biasTerm;
49 | aprioriErr(n) = trainTarget(n) - networkOutput(n);
50 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*input/(sum(input.^2) + regularizationFactor);
51 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
52 | learningCurve(n) = aprioriErr(n)^2;
53 | if feedbackDimension > n
54 | input = [trainInput(:,n+1);networkOutput(n:-1:1);zeros(feedbackDimension-n,1)];
55 | else
56 | input = [trainInput(:,n+1);networkOutput(n:-1:n-feedbackDimension+1)];
57 | end
58 | end
59 |
60 | return
61 |
--------------------------------------------------------------------------------
/ch3_codes/noiseCancelation/fmri.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch3_codes/noiseCancelation/fmri.mat
--------------------------------------------------------------------------------
/ch3_codes/noiseCancelation/gramMatrix.m:
--------------------------------------------------------------------------------
1 | function G = gramMatrix(data,typeKernel,paramKernel)
2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 | %Function gramMatrix
4 | %Calculate the gram matrix of data
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %inputs:
7 | %data: inputDimension*dataSize, the output matrix will be
8 | % dataSize-by-dataSize
9 | %typeKernel: 'Gauss','Poly'
10 | %paramKernel: parameter used in kernel
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %outputs:
13 | %G: GramMatrix of data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | %Notes: none.
16 |
17 | [inputDimension,dataSize] = size(data);
18 | G = zeros(dataSize,dataSize);
19 |
20 | for ii = 1:dataSize
21 | jj = ii:dataSize;
22 | G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
23 | G(jj,ii) = G(ii,jj);
24 | end
25 | return
--------------------------------------------------------------------------------
/ch3_codes/noiseCancelation/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch3_codes/noiseCancelation/sparseKLMS1.m:
--------------------------------------------------------------------------------
1 | function [predictionError,expansionCoefficient,dictionary,weightVector,biasTerm,learningCurve] = ...
2 | sparseKLMS1(trainInput,trainTarget,feedbackDimension,typeKernel,paramKernel,...
3 | stepSizeFeatureVector,stepSizeWeightVector,stepSizeBias,toleranceDistance,tolerancePredictError)
4 | %Function sparseKLMS1: kernel least mean square with novel criteria
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %Input:
7 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize is the number of
8 | % training data
9 | %trainTarget: desired signal for training trainSize*1
10 | %typeKernel: 'Gauss', 'Poly'
11 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
12 | %stepSizeFeatureVector: learning rate for kernel part
13 | %stepSizeWeightVector: learning rate for linear part, set to zero to disable
14 | %stepSizeBias: learning rate for bias term, set to zero to disable
15 | %flagLearningCurve: control if calculating the learning curve
16 | %toleranceDistance: tolerance for the closeness of the new data to the dictionary
17 | %tolerancePredictError: tolerance for the apriori error
18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
19 | %Output:
20 | %expansionCoefficient: onsisting of coefficients of the kernel
21 | % expansion with the stepSizeFeatureVector
22 | %dictionary: bases used in the kernel expansion in
23 | % the training set
24 | %weightVector: the linear coefficients
25 | %biasTerm: the bias term
26 | %learningCurve: trainSize*1 used for learning curve
27 |
28 | % memeory initialization
29 | trainSize = length(trainTarget);
30 |
31 | learningCurve = zeros(trainSize,1);
32 | learningCurve(1) = trainTarget(1)^2;
33 |
34 | input = [trainInput(:,1);zeros(feedbackDimension,1)];
35 | predictionError = size(trainSize,1);
36 | % n=1 init
37 | predictionError(1) = trainTarget(1);
38 | expansionCoefficient = stepSizeFeatureVector*predictionError(1);
39 | weightVector = stepSizeWeightVector*predictionError(1)*input;
40 | biasTerm = stepSizeBias*predictionError(1);
41 | networkOutput = zeros(trainSize,1);
42 | toleranceDistance = toleranceDistance^2;
43 |
44 | % dictionary
45 | dictionary = input;
46 | dictSize = 1;
47 |
48 | % start
49 | for n=2:trainSize
50 | % training
51 |
52 | % comparing the distance between trainInput(:,n) and the dictionary
53 | if feedbackDimension > n
54 | input = [trainInput(:,n);networkOutput(n-1:-1:1);zeros(feedbackDimension-n+1,1)];
55 | else
56 | input = [trainInput(:,n);networkOutput(n-1:-1:n-feedbackDimension)];
57 | end
58 |
59 | distance2dictionary = min(sum((input*ones(1,dictSize) - dictionary).^2));
60 | networkOutput(n) = expansionCoefficient*ker_eval(input,dictionary,typeKernel,paramKernel) + weightVector'*input + biasTerm;
61 | predictionError(n) = trainTarget(n) - networkOutput(n);
62 |
63 | if (distance2dictionary < toleranceDistance)
64 | learningCurve(n) = learningCurve(n-1);
65 | continue;
66 | end
67 | if (abs(predictionError(n)) < tolerancePredictError)
68 | learningCurve(n) = learningCurve(n-1);
69 | continue;
70 | end
71 | % updating
72 | dictSize = dictSize + 1;
73 | dictionary(:,dictSize) = input;
74 | expansionCoefficient(dictSize) = stepSizeFeatureVector*predictionError(n);
75 |
76 | weightVector = weightVector + stepSizeWeightVector*predictionError(n)*input;
77 | biasTerm = biasTerm + stepSizeBias*predictionError(n);
78 |
79 | learningCurve(n) = predictionError(n)^2;
80 |
81 | end
82 |
83 | return
84 |
85 |
--------------------------------------------------------------------------------
/ch4_codes/channelEq/gramMatrix.m:
--------------------------------------------------------------------------------
1 | function G = gramMatrix(data,typeKernel,paramKernel)
2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 | %Function gramMatrix
4 | %Calculate the gram matrix of data
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %inputs:
7 | %data: inputDimension*dataSize, the output matrix will be
8 | % dataSize-by-dataSize
9 | %typeKernel: 'Gauss','Poly'
10 | %paramKernel: parameter used in kernel
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %outputs:
13 | %G: GramMatrix of data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | %Notes: none.
16 |
17 | [inputDimension,dataSize] = size(data);
18 | G = zeros(dataSize,dataSize);
19 |
20 | for ii = 1:dataSize
21 | jj = ii:dataSize;
22 | G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
23 | G(jj,ii) = G(ii,jj);
24 | end
25 | return
--------------------------------------------------------------------------------
/ch4_codes/channelEq/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/PART1.m:
--------------------------------------------------------------------------------
1 | %Copyright
2 | %Weifeng Liu CNEL
3 | %July 4 2008
4 | %
5 | %Description:
6 | %Illustrate GPR
7 | %
8 | %Usage:
9 | %Ch4, model selection
10 | %
11 |
12 | addpath('.\gpml-matlab\gpml')
13 |
14 | close all
15 | clear
16 | clc
17 |
18 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
19 | % generate the data
20 | n = 100;
21 | %rand('state',18);
22 | %randn('state',20);
23 | covfunc = {'covSum', {'covSEiso','covNoise'}};
24 | loghyper = [log(1.0); log(1.0); log(0.1)];
25 | x = 30*(rand(n,1)-0.5);
26 | y = chol(feval(covfunc{:}, loghyper, x))'*randn(n,1); % Cholesky decomp.
27 |
28 | figure(1)
29 | plot(x, y, 'k+', 'MarkerSize', 10);
30 | set(gca, 'FontSize', 14);
31 | set(gca, 'FontName', 'Arial');
32 | xlabel('x')
33 | ylabel('y')
34 |
35 | xstar = linspace(-15, 15, 201)';
36 |
37 |
38 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
39 | % estimate the parameters and plot the regression result
40 | loghyper = [log(1.0); log(1.0); log(0.1)] + 0.05*randn(3,1);
41 | loghyper = minimize(loghyper, 'gpr', -100, covfunc, x, y);
42 | disp(exp(loghyper))
43 | [mu S2] = gpr(loghyper, covfunc, x, y, xstar);
44 | S2 = S2 - exp(2*loghyper(3));
45 | f = [mu+2*sqrt(S2);flipdim(mu-2*sqrt(S2),1)];
46 | figure
47 | fill([xstar; flipdim(xstar,1)], f, [7 7 7]/8, 'EdgeColor', [7 7 7]/8);
48 | hold on
49 | plot(xstar,mu,'k-','LineWidth',2);
50 | plot(x, y, 'k+', 'MarkerSize', 17);
51 | set(gca, 'FontSize', 14);
52 | set(gca, 'FontName', 'Arial');
53 | xlabel('x')
54 | ylabel('f')
55 |
56 |
57 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
58 | % plot the contour of marginal likelihood in terms of kernel size and noise
59 | % variance
60 | length = 40;
61 | ell_vector = linspace(0.5, 1.2, length);
62 | nv_vector = linspace(0.08,0.12,length);
63 | nlml = zeros(length, length);
64 |
65 | for ii = 1:length
66 | for jj = 1:length
67 | nlml(ii,jj) = gpr([log(ell_vector(ii)); log(1.0); log(nv_vector(jj))], covfunc, x, y);
68 | end
69 | end
70 |
71 | figure
72 | contour(ell_vector, nv_vector, exp(-nlml), 10)
73 | set(gca, 'FontSize', 14);
74 | set(gca, 'FontName', 'Arial');
75 | xlabel('kernel size')
76 | ylabel('noise variance')
77 | [c1, i1] = min(nlml);
78 | [c2, i2] = min(c1);
79 | hold on
80 | plot(ell_vector(i2), nv_vector(i1(i2)),'k+','MarkerSize',10)
--------------------------------------------------------------------------------
/ch4_codes/gpr/PART2.m:
--------------------------------------------------------------------------------
1 | %Copyright
2 | %Weifeng Liu CNEL
3 | %July 4 2008
4 | %
5 | %Description:
6 | %Evaluate the effectiveness of Maximum marginal likelihood
7 | %
8 | %Usage:
9 | %Ch4, model selection
10 | %
11 |
12 | addpath('.\gpml-matlab\gpml')
13 |
14 | close all
15 | clear
16 | clc
17 |
18 | MC = 100
19 |
20 | theta = zeros(3,MC)
21 | n = 100;
22 | covfunc = {'covSum', {'covSEiso','covNoise'}};
23 | for mc = 1:MC
24 |
25 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
26 | % generate the data
27 |
28 |
29 | loghyper = [log(1.0); log(1.0); log(0.1)];
30 | x = 30*(rand(n,1)-0.5);
31 | y = chol(feval(covfunc{:}, loghyper, x))'*randn(n,1); % Cholesky decomp.
32 |
33 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
34 | % estimate the parameters and plot the regression result
35 | loghyper = [log(1.0); log(1.0); log(0.1)] + 0.05*randn(3,1);
36 | loghyper = minimize(loghyper, 'gpr', -100, covfunc, x, y);
37 | theta(:, mc) = exp(loghyper);
38 | end
39 |
40 | mean(theta(1,:))
41 | std(theta(1,:))
42 |
43 | mean(theta(2,:))
44 | std(theta(2,:))
45 |
46 | mean(theta(3,:))
47 | std(theta(3,:))
48 |
49 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/Contents.m:
--------------------------------------------------------------------------------
1 | % gpml: code from Rasmussen & Williams: Gaussian Processes for Machine Learning
2 | % date: 2007-07-25.
3 | %
4 | % approxEP.m - the approximation method for Expectation Propagation
5 | % approxLA.m - the approximation method for Laplace's approximation
6 | % approximations.m - help for approximation methods
7 | % binaryEPGP.m - outdated, the EP approx for binary GP classification
8 | % binaryGP.m - binary Gaussian process classification
9 | % binaryLaplaceGP.m - outdated, Laplace's approx for binary GP classification
10 | %
11 | % covConst.m - covariance for constant functions
12 | % covFunctions.m - help file with overview of covariance functions
13 | % covLINard.m - linear covariance function with ard
14 | % covLINone.m - linear covaraince function
15 | % covMatern3iso.m - Matern covariance function with nu=3/2
16 | % covMatern5iso.m - Matern covaraince function with nu=5/2
17 | % covNNone.m - neural network covariance function
18 | % covNoise.m - independent covaraince function (ie white noise)
19 | % covPeriodic.m - covariance for smooth periodic function, with unit period
20 | % covProd.m - function for multiplying other covariance functions
21 | % covRQard.m - rational quadratic covariance function with ard
22 | % covRQiso.m - isotropic rational quadratic covariance function
23 | % covSEard.m - squared exponential covariance function with ard
24 | % covSEiso.m - isotropic squared exponential covariance function
25 | % covSum.m - function for adding other covariance functions
26 | %
27 | % cumGauss.m - cumulative Gaussian likelihood function
28 | % gpr.m - Gaussian process regression with general covariance
29 | % function
30 | % gprSRPP.m - Implements SR and PP approximations to GPR
31 | % likelihoods.m - help function for classification likelihoods
32 | % logistic.m - logistic likelihood function
33 | % minimize.m - Minimize a differentiable multivariate function
34 | % solve_chol.c - Solve linear equations from the Cholesky factorization
35 | % should be compiled into a mex file
36 | % solve_chol.m - A matlab implementation of the above, used only in case
37 | % the mex file wasn't generated (not very efficient)
38 | % sq_dist.c - Compute a matrix of all pairwise squared distances
39 | % should be compiled into a mex file
40 | % sq_dist.m - A matlab implementation of the above, used only in case
41 | % the mex file wasn't generated (not very efficient)
42 | %
43 | % See also the help for the demonstration scripts in the gpml-demo directory
44 | %
45 | % Copyright (c) 2005, 2006 by Carl Edward Rasmussen and Chris Williams
46 |
47 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/Copyright:
--------------------------------------------------------------------------------
1 |
2 | Software that implements
3 |
4 | GAUSSIAN PROCESS REGRESSION AND CLASSIFICATION
5 |
6 | Copyright (c) 2005 - 2007 by Carl Edward Rasmussen and Chris Williams
7 |
8 | Permission is granted for anyone to copy, use, or modify these programs for
9 | purposes of research or education, provided this copyright notice is retained,
10 | and note is made of any changes that have been made.
11 |
12 | These programs are distributed without any warranty, express or
13 | implied. As these programs were written for research purposes only, they
14 | have not been tested to the degree that would be advisable in any
15 | important application. All use of these programs is entirely at the
16 | user's own risk.
17 |
18 | The code and associated documentation are avaiable from
19 |
20 | http://www.GaussianProcess.org/gpml/code
21 |
22 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/Makefile:
--------------------------------------------------------------------------------
1 | all: sq_dist.mexglx solve_chol.mexglx
2 |
3 | sq_dist.mexglx: sq_dist.c
4 | mex sq_dist.c
5 |
6 | solve_chol.mexglx: solve_chol.c
7 | mex solve_chol.c
8 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/approxLA.m:
--------------------------------------------------------------------------------
1 | function [alpha, sW, L, nlZ, dnlZ] = approxLA(hyper, covfunc, lik, x, y)
2 |
3 | % Laplace approximation to the posterior Gaussian Process.
4 | % The function takes a specified covariance function (see covFunction.m) and
5 | % likelihood function (see likelihoods.m), and is designed to be used with
6 | % binaryGP.m. See also approximations.m.
7 | %
8 | % Copyright (c) 2006, 2007 Carl Edward Rasmussen and Hannes Nickisch 2007-03-29
9 |
10 | persistent best_alpha best_nlZ % copy of the best alpha and its obj value
11 | tol = 1e-6; % tolerance for when to stop the Newton iterations
12 |
13 | n = size(x,1);
14 | K = feval(covfunc{:}, hyper, x); % evaluate the covariance matrix
15 |
16 | if any(size(best_alpha) ~= [n,1]) % find a good starting point for alpha and f
17 | f = zeros(n,1); alpha = f; % start at zero
18 | [lp,dlp,d2lp] = feval(lik,y,f,'deriv'); W=-d2lp;
19 | Psi_new = lp; best_nlZ = Inf;
20 | else
21 | alpha = best_alpha; f = K*alpha; % try best so far
22 | [lp,dlp,d2lp] = feval(lik,y,f,'deriv'); W=-d2lp;
23 | Psi_new = -alpha'*f/2 + lp;
24 | if Psi_new < -n*log(2) % if zero is better ..
25 | f = zeros(n,1); alpha = f; % .. go back
26 | [lp,dlp,d2lp] = feval(lik,y,f,'deriv'); W=-d2lp;
27 | Psi_new = -alpha'*f/2 + lp;
28 | end
29 | end
30 | Psi_old = -Inf; % make sure while loop starts
31 |
32 | while Psi_new - Psi_old > tol % begin Newton's iterations
33 | Psi_old = Psi_new; alpha_old = alpha;
34 | sW = sqrt(W);
35 | L = chol(eye(n)+sW*sW'.*K); % L'*L=B=eye(n)+sW*K*sW
36 | b = W.*f+dlp;
37 | alpha = b - sW.*solve_chol(L,sW.*(K*b));
38 | f = K*alpha;
39 | [lp,dlp,d2lp,d3lp] = feval(lik,y,f,'deriv'); W=-d2lp;
40 |
41 | Psi_new = -alpha'*f/2 + lp;
42 | i = 0;
43 | while i < 10 && Psi_new < Psi_old % if objective didn't increase
44 | alpha = (alpha_old+alpha)/2; % reduce step size by half
45 | f = K*alpha;
46 | [lp,dlp,d2lp,d3lp] = feval(lik,y,f,'deriv'); W=-d2lp;
47 | Psi_new = -alpha'*f/2 + lp;
48 | i = i+1;
49 | end
50 | end % end Newton's iterations
51 |
52 | sW = sqrt(W); % recalculate L
53 | L = chol(eye(n)+sW*sW'.*K); % L'*L=B=eye(n)+sW*K*sW
54 | nlZ = alpha'*f/2 - lp + sum(log(diag(L))); % approx neg log marg likelihood
55 |
56 | if nlZ < best_nlZ % if best so far ..
57 | best_alpha = alpha; best_nlZ = nlZ; % .. then remember for next call
58 | end
59 |
60 | if nargout >= 4 % do we want derivatives?
61 | dnlZ = zeros(size(hyper)); % allocate space for derivatives
62 | Z = repmat(sW,1,n).*solve_chol(L, diag(sW));
63 | C = L'\(repmat(sW,1,n).*K);
64 | s2 = 0.5*(diag(K)-sum(C.^2,1)').*d3lp;
65 | for j=1:length(hyper)
66 | dK = feval(covfunc{:}, hyper, x, j);
67 | s1 = alpha'*dK*alpha/2-sum(sum(Z.*dK))/2;
68 | b = dK*dlp;
69 | s3 = b-K*(Z*b);
70 | dnlZ(j) = -s1-s2'*s3;
71 | end
72 | end
73 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/approximations.m:
--------------------------------------------------------------------------------
1 | % approximations: Exact inference for Gaussian process classification is
2 | % intractable, and approximations are necessary. Different approximation
3 | % techniques have been implemented, which all rely on a Gaussian approximation
4 | % to the non-Gaussian posterior:
5 | %
6 | % approxEP the Expectation Propagation (EP) algorithm
7 | % approxLA Laplace's method
8 | %
9 | % which are used by the Gaussian process classification funtion binaryGP.m.
10 | % The interface to the approximation methods is the following:
11 | %
12 | % function [alpha, sW, L, nlZ, dnlZ] = approx..(hyper, covfunc, lik, x, y)
13 | %
14 | % where:
15 | %
16 | % hyper is a column vector of hyperparameters
17 | % covfunc is the name of the covariance function (see covFunctions.m)
18 | % lik is the name of the likelihood function (see likelihoods.m)
19 | % x is a n by D matrix of training inputs
20 | % y is a (column) vector (of size n) of binary +1/-1 targets
21 | % nlZ is the returned value of the negative log marginal likelihood
22 | % dnlZ is a (column) vector of partial derivatives of the negative
23 | % log marginal likelihood wrt each hyperparameter
24 | % alpha is a (sparse or full column vector) containing inv(K)*m, where K
25 | % is the prior covariance matrix and m the approx posterior mean
26 | % sW is a (sparse or full column) vector containing diagonal of sqrt(W)
27 | % the approximate posterior covariance matrix is inv(inv(K)+W)
28 | % L is a (sparse or full) matrix, L = chol(sW*K*sW+eye(n))
29 | %
30 | % Usually, the approximate posterior to be returned admits the form
31 | % N(m=K*alpha, V=inv(inv(K)+W)), where alpha is a vector and W is diagonal;
32 | % if not, then L contains instead -inv(K+inv(W)), and sW is unused.
33 | %
34 | % For more information on the individual approximation methods and their
35 | % implementations, see the separate approx??.m files. See also binaryGP.m
36 | %
37 | % Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2007-06-25.
38 |
39 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/binaryEPGP.m:
--------------------------------------------------------------------------------
1 | function varargout = binaryEPGP(hyper, covfunc, varargin)
2 |
3 | % binaryEPGP - The Expectation Propagation approximation for binary Gaussian
4 | % process classification. Two modes are possible: training or testing: if no
5 | % test cases are supplied, then the approximate negative log marginal
6 | % likelihood and its partial derivatives wrt the hyperparameters is computed;
7 | % this mode is used to fit the hyperparameters. If test cases are given, then
8 | % the test set predictive probabilities are returned. The program is flexible
9 | % in allowing a multitude of covariance functions.
10 | %
11 | % usage: [nlZ, dnlZ ] = binaryEPGP(hyper, covfunc, x, y);
12 | % or: [p, mu, s2, nlZ] = binaryEPGP(hyper, covfunc, x, y, xstar);
13 | %
14 | % where:
15 | %
16 | % hyper is a (column) vector of hyperparameters
17 | % covfunc is the name of the covariance function (see below)
18 | % lik is the name of the likelihood function (see below)
19 | % x is a n by D matrix of training inputs
20 | % y is a (column) vector (of size n) of binary +1/-1 targets
21 | % xstar is a nn by D matrix of test inputs
22 | % nlZ is the returned value of the negative log marginal likelihood
23 | % dnlZ is a (column) vector of partial derivatives of the negative
24 | % log marginal likelihood wrt each log hyperparameter
25 | % p is a (column) vector (of length nn) of predictive probabilities
26 | % mu is a (column) vector (of length nn) of predictive latent means
27 | % s2 is a (column) vector (of length nn) of predictive latent variances
28 | %
29 | % The length of the vector of hyperparameters depends on the covariance
30 | % function, as specified by the "covfunc" input to the function, specifying the
31 | % name of a covariance function. A number of different covariance function are
32 | % implemented, and it is not difficult to add new ones. See "help covFunctions"
33 | % for the details
34 | %
35 | % The function can conveniently be used with the "minimize" function to train
36 | % a Gaussian process, eg:
37 | %
38 | % [hyper, fX, i] = minimize(hyper, 'binaryEPGP', length, 'covSEiso',
39 | % 'logistic', x, y);
40 | %
41 | % Copyright (c) 2004, 2005, 2006, 2007 Carl Edward Rasmussen, 2007-02-19.
42 |
43 | if nargin<4 || nargin>5
44 | disp('Usage: [nlZ, dnlZ ] = binaryEPGP(hyper, covfunc, x, y);')
45 | disp(' or: [p, mu, s2, nlZ] = binaryEPGP(hyper, covfunc, x, y, xstar);')
46 | return
47 | end
48 |
49 | % Note, this function is just a wrapper provided for backward compatibility,
50 | % the functionality is now provided by the more general binaryGP function.
51 |
52 | varargout = cell(nargout, 1); % allocate the right number of output arguments
53 | [varargout{:}] = binaryGP(hyper, 'approxEP', covfunc, 'cumGauss', varargin{:});
54 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/binaryLaplaceGP.m:
--------------------------------------------------------------------------------
1 | function varargout = binaryLaplaceGP(hyper, covfunc, lik, varargin)
2 |
3 | % binaryLaplaceGP - Laplace's approximation for binary Gaussian process
4 | % classification. Two modes are possible: training or testing: if no test
5 | % cases are supplied, then the approximate negative log marginal likelihood
6 | % and its partial derivatives wrt the hyperparameters is computed; this mode is
7 | % used to fit the hyperparameters. If test cases are given, then the test set
8 | % predictive probabilities are returned. The program is flexible in allowing
9 | % several different likelihood functions and a multitude of covariance
10 | % functions.
11 | %
12 | % usage: [nlZ, dnlZ ] = binaryLaplaceGP(hyper, covfunc, lik, x, y);
13 | % or: [p, mu, s2, nlZ] = binaryLaplaceGP(hyper, covfunc, lik, x, y, xstar);
14 | %
15 | % where:
16 | %
17 | % hyper is a (column) vector of hyperparameters
18 | % covfunc is the name of the covariance function (see below)
19 | % lik is the name of the likelihood function (see below)
20 | % x is a n by D matrix of training inputs
21 | % y is a (column) vector (of size n) of binary +1/-1 targets
22 | % xstar is a nn by D matrix of test inputs
23 | % nlZ is the returned value of the negative log marginal likelihood
24 | % dnlZ is a (column) vector of partial derivatives of the negative
25 | % log marginal likelihood wrt each log hyperparameter
26 | % p is a (column) vector (of length nn) of predictive probabilities
27 | % mu is a (column) vector (of length nn) of predictive latent means
28 | % s2 is a (column) vector (of length nn) of predictive latent variances
29 | %
30 | % The length of the vector of log hyperparameters depends on the covariance
31 | % function, as specified by the "covfunc" input to the function, specifying the
32 | % name of a covariance function. A number of different covariance function are
33 | % implemented, and it is not difficult to add new ones. See "help covFunctions"
34 | % for the details.
35 | %
36 | % The shape of the likelihood function is given by the "lik" input to the
37 | % function, specifying the name of the likelihood function. The two implemented
38 | % likelihood functions are:
39 | %
40 | % logistic the logistic function: 1/(1+exp(-x))
41 | % cumGauss the cumulative Gaussian (error function)
42 | %
43 | % The function can conveniently be used with the "minimize" function to train
44 | % a Gaussian process, eg:
45 | %
46 | % [hyper, fX, i] = minimize(hyper, 'binaryLaplaceGP', length, 'covSEiso',
47 | % 'logistic', x, y);
48 | %
49 | % Copyright (c) 2004, 2005, 2006, 2007 by Carl Edward Rasmussen, 2007-02-19.
50 |
51 | if nargin<5 || nargin>6
52 | disp('Usage: [nlZ, dnlZ ] = binaryLaplaceGP(hyper, covfunc, lik, x, y);')
53 | disp(' or: [p, mu, s2, nlZ] = binaryLaplaceGP(hyper, covfunc, lik, x, y, xstar);')
54 | return
55 | end
56 |
57 | % Note, this function is just a wrapper provided for backward compatibility,
58 | % the functionality is now provided by the more general binaryGP function.
59 |
60 | varargout = cell(nargout, 1); % allocate the right number of output arguments
61 | [varargout{:}] = binaryGP(hyper, 'approxLA', covfunc, lik, varargin{:});
62 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covConst.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covConst(logtheta, x, z);
2 |
3 | % covariance function for a constant function. The covariance function is
4 | % parameterized as:
5 | %
6 | % k(x^p,x^q) = 1/s2;
7 | %
8 | % The scalar hyperparameter is:
9 | %
10 | % logtheta = [ log(sqrt(s2)) ]
11 | %
12 | % For more help on design of covariance functions, try "help covFunctions".
13 | %
14 | % (C) Copyright 2006 by Carl Edward Rasmussen (2007-07-24)
15 |
16 | if nargin == 0, A = '1'; return; end % report number of parameters
17 |
18 | is2 = exp(-2*logtheta); % s2 inverse
19 |
20 | if nargin == 2
21 | A = is2;
22 | elseif nargout == 2 % compute test set covariances
23 | A = is2;
24 | B = is2;
25 | else % compute derivative matrix
26 | A = -2*is2*ones(size(x,1));
27 | end
28 |
29 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covLINard.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covLINard(logtheta, x, z);
2 |
3 | % Linear covariance function with Automatic Relevance Determination (ARD). The
4 | % covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = x^p'*inv(P)*x^q
7 | %
8 | % where the P matrix is diagonal with ARD parameters ell_1^2,...,ell_D^2, where
9 | % D is the dimension of the input space. The hyperparameters are:
10 | %
11 | % logtheta = [ log(ell_1)
12 | % log(ell_2)
13 | % .
14 | % log(ell_D) ]
15 | %
16 | % Note that there is no bias term; use covConst to add a bias.
17 | %
18 | % For more help on design of covariance functions, try "help covFunctions".
19 | %
20 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-24)
21 |
22 | if nargin == 0, A = 'D'; return; end % report number of parameters
23 |
24 | ell = exp(logtheta);
25 | x = x*diag(1./ell);
26 |
27 | if nargin == 2
28 | A = x*x';
29 | elseif nargout == 2 % compute test set covariances
30 | z = z*diag(1./ell);
31 | A = sum(z.*z,2);
32 | B = x*z';
33 | else % compute derivative matrices
34 | A = -2*x(:,z)*x(:,z)';
35 | end
36 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covLINone.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covLINone(logtheta, x, z);
2 |
3 | % Linear covariance function with a single hyperparameter. The covariance
4 | % function is parameterized as:
5 | %
6 | % k(x^p,x^q) = x^p'*inv(P)*x^q + 1./t2;
7 | %
8 | % where the P matrix is t2 times the unit matrix. The second term plays the
9 | % role of the bias. The hyperparameter is:
10 | %
11 | % logtheta = [ log(sqrt(t2)) ]
12 | %
13 | % For more help on design of covariance functions, try "help covFunctions".
14 | %
15 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-27)
16 |
17 | if nargin == 0, A = '1'; return; end % report number of parameters
18 |
19 | it2 = exp(-2*logtheta); % t2 inverse
20 |
21 | if nargin == 2 % compute covariance
22 | A = it2*(1+x*x');
23 | elseif nargout == 2 % compute test set covariances
24 | A = it2*(1+sum(z.*z,2));
25 | B = it2*(1+x*z');
26 | else % compute derivative matrix
27 | A = -2*it2*(1+x*x');
28 | end
29 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covMatern3iso.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covMatern3iso(loghyper, x, z)
2 |
3 | % Matern covariance function with nu = 3/2 and isotropic distance measure. The
4 | % covariance function is:
5 | %
6 | % k(x^p,x^q) = s2f * (1 + sqrt(3)*d(x^p,x^q)) * exp(-sqrt(3)*d(x^p,x^q))
7 | %
8 | % where d(x^p,x^q) is the distance sqrt((x^p-x^q)'*inv(P)*(x^p-x^q)), P is ell
9 | % times the unit matrix and sf2 is the signal variance. The hyperparameters
10 | % are:
11 | %
12 | % loghyper = [ log(ell)
13 | % log(sqrt(sf2)) ]
14 | %
15 | % For more help on design of covariance functions, try "help covFunctions".
16 | %
17 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-24)
18 |
19 | if nargin == 0, A = '2'; return; end
20 |
21 | persistent K;
22 | [n, D] = size(x);
23 | ell = exp(loghyper(1));
24 | sf2 = exp(2*loghyper(2));
25 |
26 | x = sqrt(3)*x/ell;
27 |
28 | if nargin == 2 % compute covariance matrix
29 | A = sqrt(sq_dist(x'));
30 | K = sf2*exp(-A).*(1+A);
31 | A = K;
32 | elseif nargout == 2 % compute test set covariances
33 | z = sqrt(3)*z/ell;
34 | A = sf2;
35 | B = sqrt(sq_dist(x',z'));
36 | B = sf2*exp(-B).*(1+B);
37 | else % compute derivative matrices
38 | if z == 1
39 | A = sf2*sq_dist(x').*exp(-sqrt(sq_dist(x')));
40 | else
41 | % check for correct dimension of the previously calculated kernel matrix
42 | if any(size(K)~=n)
43 | K = sqrt(sq_dist(x'));
44 | K = sf2*exp(-K).*(1+K);
45 | end
46 | A = 2*K;
47 | clear K;
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covMatern5iso.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covMatern5iso(loghyper, x, z)
2 |
3 | % Matern covariance function with nu = 5/2 and isotropic distance measure. The
4 | % covariance function is:
5 | %
6 | % k(x^p,x^q) = s2f * (1 + sqrt(5)*d + 5*d/3) * exp(-sqrt(5)*d)
7 | %
8 | % where d is the distance sqrt((x^p-x^q)'*inv(P)*(x^p-x^q)), P is ell times
9 | % the unit matrix and sf2 is the signal variance. The hyperparameters are:
10 | %
11 | % loghyper = [ log(ell)
12 | % log(sqrt(sf2)) ]
13 | %
14 | % For more help on design of covariance functions, try "help covFunctions".
15 | %
16 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-24)
17 |
18 | if nargin == 0, A = '2'; return; end
19 |
20 | persistent K;
21 | [n, D] = size(x);
22 | ell = exp(loghyper(1));
23 | sf2 = exp(2*loghyper(2));
24 |
25 | x = sqrt(5)*x/ell;
26 |
27 | if nargin == 2 % compute covariance matrix
28 | A = sq_dist(x');
29 | K = sf2*exp(-sqrt(A)).*(1+sqrt(A)+A/3);
30 | A = K;
31 | elseif nargout == 2 % compute test set covariances
32 | z = sqrt(5)*z/ell;
33 | A = sf2;
34 | B = sq_dist(x',z');
35 | B = sf2*exp(-sqrt(B)).*(1+sqrt(B)+B/3);
36 | else % compute derivative matrices
37 | if z == 1
38 | A = sq_dist(x');
39 | A = sf2*(A+sqrt(A).^3).*exp(-sqrt(A))/3;
40 | else
41 | % check for correct dimension of the previously calculated kernel matrix
42 | if any(size(K)~=n)
43 | K = sq_dist(x');
44 | K = sf2*exp(-sqrt(K)).*(1+sqrt(K)+K/3);
45 | end
46 | A = 2*K;
47 | clear K;
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covNNone.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covNNone(loghyper, x, z)
2 |
3 | % Neural network covariance function with a single parameter for the distance
4 | % measure. The covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = sf2 * asin(x^p'*P*x^q / sqrt[(1+x^p'*P*x^p)*(1+x^q'*P*x^q)])
7 | %
8 | % where the x^p and x^q vectors on the right hand side have an added extra bias
9 | % entry with unit value. P is ell^-2 times the unit matrix and sf2 controls the
10 | % signal variance. The hyperparameters are:
11 | %
12 | % loghyper = [ log(ell)
13 | % log(sqrt(sf2) ]
14 | %
15 | % For more help on design of covariance functions, try "help covFunctions".
16 | %
17 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-24)
18 |
19 | if nargin == 0, A = '2'; return; end % report number of parameters
20 |
21 | persistent Q K;
22 | [n D] = size(x);
23 | ell = exp(loghyper(1)); em2 = ell^(-2);
24 | sf2 = exp(2*loghyper(2));
25 | x = x/ell;
26 |
27 | if nargin == 2 % compute covariance
28 | Q = x*x';
29 | K = (em2+Q)./(sqrt(1+em2+diag(Q))*sqrt(1+em2+diag(Q)'));
30 | A = sf2*asin(K);
31 | elseif nargout == 2 % compute test set covariances
32 | z = z/ell;
33 | A = sf2*asin((em2+sum(z.*z,2))./(1+em2+sum(z.*z,2)));
34 | B = sf2*asin((em2+x*z')./sqrt((1+em2+sum(x.*x,2))*(1+em2+sum(z.*z,2)')));
35 | else % compute derivative matrix
36 | % check for correct dimension of the previously calculated kernel matrix
37 | if any(size(Q)~=n)
38 | Q = x*x';
39 | end
40 | % check for correct dimension of the previously calculated kernel matrix
41 | if any(size(K)~=n)
42 | K = (em2+Q)./(sqrt(1+em2+diag(Q))*sqrt(1+em2+diag(Q)'));
43 | end
44 | if z == 1 % first parameter
45 | v = (em2+sum(x.*x,2))./(1+em2+diag(Q));
46 | A = -2*sf2*((em2+Q)./(sqrt(1+em2+diag(Q))*sqrt(1+em2+diag(Q)'))- ...
47 | K.*(repmat(v,1,n)+repmat(v',n,1))/2)./sqrt(1-K.^2);
48 | clear Q;
49 | else % second parameter
50 | A = 2*sf2*asin(K);
51 | clear K;
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covNoise.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covNoise(logtheta, x, z);
2 |
3 | % Independent covariance function, ie "white noise", with specified variance.
4 | % The covariance function is specified as:
5 | %
6 | % k(x^p,x^q) = s2 * \delta(p,q)
7 | %
8 | % where s2 is the noise variance and \delta(p,q) is a Kronecker delta function
9 | % which is 1 iff p=q and zero otherwise. The hyperparameter is
10 | %
11 | % logtheta = [ log(sqrt(s2)) ]
12 | %
13 | % For more help on design of covariance functions, try "help covFunctions".
14 | %
15 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-03-24.
16 |
17 | if nargin == 0, A = '1'; return; end % report number of parameters
18 |
19 | s2 = exp(2*logtheta); % noise variance
20 |
21 | if nargin == 2 % compute covariance matrix
22 | A = s2*eye(size(x,1));
23 | elseif nargout == 2 % compute test set covariances
24 | A = s2;
25 | B = 0; % zeros cross covariance by independence
26 | else % compute derivative matrix
27 | A = 2*s2*eye(size(x,1));
28 | end
29 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covPeriodic.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covPeriodic(logtheta, x, z);
2 |
3 | % covariance function for a smooth periodic function, with unit period. The
4 | % covariance function is:
5 | %
6 | % k(x^p, x^q) = sf2 * exp(-2*sin^2(pi*(x_p-x_q))/ell^2)
7 | %
8 | % where the hyperparameters are:
9 | %
10 | % logtheta = [ log(ell)
11 | % log(sqrt(sf2)) ]
12 | %
13 | % For more help on design of covariance functions, try "help covFunctions".
14 | %
15 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-04-07)
16 |
17 | if nargin == 0, A = '2'; return; end
18 |
19 | [n D] = size(x);
20 | ell = exp(logtheta(1));
21 | sf2 = exp(2*logtheta(2));
22 |
23 | if nargin == 2
24 | A = sf2*exp(-2*(sin(pi*(repmat(x,1,n)-repmat(x',n,1)))/ell).^2);
25 | elseif nargout == 2 % compute test set covariances
26 | [nn D] = size(z);
27 | A = sf2*ones(nn,1);
28 | B = sf2*exp(-2*(sin(pi*(repmat(x,1,nn)-repmat(z',n,1)))/ell).^2);
29 | else % compute derivative matrices
30 | if z == 1
31 | r = (sin(pi*(repmat(x,1,n)-repmat(x',n,1)))/ell).^2;
32 | A = 4*sf2*exp(-2*r).*r;
33 | else
34 | A = 2*sf2*exp(-2*(sin(pi*(repmat(x,1,n)-repmat(x',n,1)))/ell).^2);
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covProd.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covProd(covfunc, logtheta, x, z);
2 |
3 | % covProd - compose a covariance function as the product of other covariance
4 | % functions. This function doesn't actually compute very much on its own, it
5 | % merely does some bookkeeping, and calls other covariance functions to do the
6 | % actual work.
7 | %
8 | % For more help on design of covariance functions, try "help covFunctions".
9 | %
10 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-04-06.
11 |
12 | for i = 1:length(covfunc) % iterate over covariance functions
13 | f = covfunc(i);
14 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
15 | j(i) = cellstr(feval(f{:}));
16 | end
17 |
18 | if nargin == 1, % report number of parameters
19 | A = char(j(1)); for i=2:length(covfunc), A = [A, '+', char(j(i))]; end
20 | return
21 | end
22 |
23 | [n, D] = size(x);
24 |
25 | v = []; % v vector indicates to which covariance parameters belong
26 | for i = 1:length(covfunc), v = [v repmat(i, 1, eval(char(j(i))))]; end
27 |
28 | switch nargin
29 | case 3 % compute covariance matrix
30 | A = ones(n, n); % allocate space for covariance matrix
31 | for i = 1:length(covfunc) % iteration over factor functions
32 | f = covfunc(i);
33 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
34 | A = A .* feval(f{:}, logtheta(v==i), x); % multiply covariances
35 | end
36 |
37 | case 4 % compute derivative matrix or test set covariances
38 | if nargout == 2 % compute test set cavariances
39 | A = ones(size(z,1),1); B = ones(size(x,1),size(z,1)); % allocate space
40 | for i = 1:length(covfunc)
41 | f = covfunc(i);
42 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
43 | [AA BB] = feval(f{:}, logtheta(v==i), x, z); % compute test covariances
44 | A = A .* AA; B = B .* BB; % and accumulate
45 | end
46 | else % compute derivative matrices
47 | A = ones(n, n);
48 | ii = v(z); % which covariance function
49 | j = sum(v(1:z)==ii); % which parameter in that covariance
50 | for i = 1:length(covfunc)
51 | f = covfunc(i);
52 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
53 | if i == ii
54 | A = A .* feval(f{:}, logtheta(v==i), x, j); % multiply derivative
55 | else
56 | A = A .* feval(f{:}, logtheta(v==i), x); % multiply covariance
57 | end
58 | end
59 | end
60 |
61 | end
62 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covRQard.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covRQard(logtheta, x, z)
2 |
3 | % Rational Quadratic covariance function with Automatic Relevance Determination
4 | % (ARD) distance measure. The covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = sf2 * [1 + (x^p - x^q)'*inv(P)*(x^p - x^q)/(2*alpha)]^(-alpha)
7 | %
8 | % where the P matrix is diagonal with ARD parameters ell_1^2,...,ell_D^2, where
9 | % D is the dimension of the input space, sf2 is the signal variance and alpha
10 | % is the shape parameter for the RQ covariance. The hyperparameters are:
11 | %
12 | % loghyper = [ log(ell_1)
13 | % log(ell_2)
14 | % .
15 | % log(ell_D)
16 | % log(sqrt(sf2))
17 | % log(alpha) ]
18 | %
19 | % For more help on design of covariance functions, try "help covFunctions".
20 | %
21 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-09-08)
22 |
23 | if nargin == 0, A = '(D+2)'; return; end
24 |
25 | persistent K;
26 | [n D] = size(x);
27 | ell = exp(loghyper(1:D));
28 | sf2 = exp(2*loghyper(D+1));
29 | alpha = exp(loghyper(D+2));
30 |
31 | if nargin == 2
32 | K = (1+0.5*sq_dist(diag(1./ell)*x')/alpha);
33 | A = sf2*(K.^(-alpha));
34 | elseif nargout == 2 % compute test set covariances
35 | A = sf2*ones(size(z,1),1);
36 | B = sf2*((1+0.5*sq_dist(diag(1./ell)*x',diag(1./ell)*z')/alpha).^(-alpha));
37 | else % compute derivative matrix
38 | % check for correct dimension of the previously calculated kernel matrix
39 | if any(size(K)~=n)
40 | K = (1+0.5*sq_dist(diag(1./ell)*x')/alpha);
41 | end
42 | if z <= D % length scale parameters
43 | A = sf2*K.^(-alpha-1).*sq_dist(x(:,z)'/ell(z));
44 | elseif z == D+1 % magnitude parameter
45 | A = 2*sf2*(K.^(-alpha));
46 | else
47 | A = sf2*K.^(-alpha).*(0.5*sq_dist(diag(1./ell)*x')./K - alpha*log(K));
48 | clear K;
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covRQiso.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covRQiso(loghyper, x, z)
2 |
3 | % Rational Quadratic covariance function with isotropic distance measure. The
4 | % covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = sf2 * [1 + (x^p - x^q)'*inv(P)*(x^p - x^q)/(2*alpha)]^(-alpha)
7 | %
8 | % where the P matrix is ell^2 times the unit matrix, sf2 is the signal
9 | % variance and alpha is the shape parameter for the RQ covariance. The
10 | % hyperparameters are:
11 | %
12 | % loghyper = [ log(ell)
13 | % log(sqrt(sf2))
14 | % log(alpha) ]
15 | %
16 | % For more help on design of covariance functions, try "help covFunctions".
17 | %
18 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-09-08)
19 |
20 | if nargin == 0, A = '3'; return; end
21 |
22 | [n, D] = size(x);
23 |
24 | persistent K;
25 | ell = exp(loghyper(1));
26 | sf2 = exp(2*loghyper(2));
27 | alpha = exp(loghyper(3));
28 |
29 | if nargin == 2 % compute covariance matrix
30 | K = (1+0.5*sq_dist(x'/ell)/alpha);
31 | A = sf2*(K.^(-alpha));
32 | elseif nargout == 2 % compute test set covariances
33 | A = sf2*ones(size(z,1),1);
34 | B = sf2*((1+0.5*sq_dist(x'/ell,z'/ell)/alpha).^(-alpha));
35 | else % compute derivative matrices
36 | % check for correct dimension of the previously calculated kernel matrix
37 | if any(size(K)~=n)
38 | K = (1+0.5*sq_dist(x'/ell)/alpha);
39 | end
40 | if z == 1 % length scale parameters
41 | A = sf2*K.^(-alpha-1).*sq_dist(x'/ell);
42 | elseif z == 2 % magnitude parameter
43 | A = 2*sf2*(K.^(-alpha));
44 | else
45 | A = sf2*K.^(-alpha).*(0.5*sq_dist(x'/ell)./K - alpha*log(K));
46 | clear K;
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covSEard.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covSEard(loghyper, x, z)
2 |
3 | % Squared Exponential covariance function with Automatic Relevance Detemination
4 | % (ARD) distance measure. The covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = sf2 * exp(-(x^p - x^q)'*inv(P)*(x^p - x^q)/2)
7 | %
8 | % where the P matrix is diagonal with ARD parameters ell_1^2,...,ell_D^2, where
9 | % D is the dimension of the input space and sf2 is the signal variance. The
10 | % hyperparameters are:
11 | %
12 | % loghyper = [ log(ell_1)
13 | % log(ell_2)
14 | % .
15 | % log(ell_D)
16 | % log(sqrt(sf2)) ]
17 | %
18 | % For more help on design of covariance functions, try "help covFunctions".
19 | %
20 | % (C) Copyright 2006 by Carl Edward Rasmussen (2006-03-24)
21 |
22 | if nargin == 0, A = '(D+1)'; return; end % report number of parameters
23 |
24 | persistent K;
25 |
26 | [n D] = size(x);
27 | ell = exp(loghyper(1:D)); % characteristic length scale
28 | sf2 = exp(2*loghyper(D+1)); % signal variance
29 |
30 | if nargin == 2
31 | K = sf2*exp(-sq_dist(diag(1./ell)*x')/2);
32 | A = K;
33 | elseif nargout == 2 % compute test set covariances
34 | A = sf2*ones(size(z,1),1);
35 | B = sf2*exp(-sq_dist(diag(1./ell)*x',diag(1./ell)*z')/2);
36 | else % compute derivative matrix
37 |
38 | % check for correct dimension of the previously calculated kernel matrix
39 | if any(size(K)~=n)
40 | K = sf2*exp(-sq_dist(diag(1./ell)*x')/2);
41 | end
42 |
43 | if z <= D % length scale parameters
44 | A = K.*sq_dist(x(:,z)'/ell(z));
45 | else % magnitude parameter
46 | A = 2*K;
47 | clear K;
48 | end
49 | end
50 |
51 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covSEiso.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covSEiso(loghyper, x, z);
2 |
3 | % Squared Exponential covariance function with isotropic distance measure. The
4 | % covariance function is parameterized as:
5 | %
6 | % k(x^p,x^q) = sf2 * exp(-(x^p - x^q)'*inv(P)*(x^p - x^q)/2)
7 | %
8 | % where the P matrix is ell^2 times the unit matrix and sf2 is the signal
9 | % variance. The hyperparameters are:
10 | %
11 | % loghyper = [ log(ell)
12 | % log(sqrt(sf2)) ]
13 | %
14 | % For more help on design of covariance functions, try "help covFunctions".
15 | %
16 | % (C) Copyright 2006 by Carl Edward Rasmussen (2007-06-25)
17 |
18 | if nargin == 0, A = '2'; return; end % report number of parameters
19 |
20 | [n D] = size(x);
21 | ell = exp(loghyper(1)); % characteristic length scale
22 | sf2 = exp(2*loghyper(2)); % signal variance
23 |
24 | if nargin == 2
25 | A = sf2*exp(-sq_dist(x'/ell)/2);
26 | elseif nargout == 2 % compute test set covariances
27 | A = sf2*ones(size(z,1),1);
28 | B = sf2*exp(-sq_dist(x'/ell,z'/ell)/2);
29 | else % compute derivative matrix
30 | if z == 1 % first parameter
31 | A = sf2*exp(-sq_dist(x'/ell)/2).*sq_dist(x'/ell);
32 | else % second parameter
33 | A = 2*sf2*exp(-sq_dist(x'/ell)/2);
34 | end
35 | end
36 |
37 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/covSum.m:
--------------------------------------------------------------------------------
1 | function [A, B] = covSum(covfunc, logtheta, x, z);
2 |
3 | % covSum - compose a covariance function as the sum of other covariance
4 | % functions. This function doesn't actually compute very much on its own, it
5 | % merely does some bookkeeping, and calls other covariance functions to do the
6 | % actual work.
7 | %
8 | % For more help on design of covariance functions, try "help covFunctions".
9 | %
10 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-03-20.
11 |
12 | for i = 1:length(covfunc) % iterate over covariance functions
13 | f = covfunc(i);
14 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
15 | j(i) = cellstr(feval(f{:}));
16 | end
17 |
18 | if nargin == 1, % report number of parameters
19 | A = char(j(1)); for i=2:length(covfunc), A = [A, '+', char(j(i))]; end
20 | return
21 | end
22 |
23 | [n, D] = size(x);
24 |
25 | v = []; % v vector indicates to which covariance parameters belong
26 | for i = 1:length(covfunc), v = [v repmat(i, 1, eval(char(j(i))))]; end
27 |
28 | switch nargin
29 | case 3 % compute covariance matrix
30 | A = zeros(n, n); % allocate space for covariance matrix
31 | for i = 1:length(covfunc) % iteration over summand functions
32 | f = covfunc(i);
33 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
34 | A = A + feval(f{:}, logtheta(v==i), x); % accumulate covariances
35 | end
36 |
37 | case 4 % compute derivative matrix or test set covariances
38 | if nargout == 2 % compute test set cavariances
39 | A = zeros(size(z,1),1); B = zeros(size(x,1),size(z,1)); % allocate space
40 | for i = 1:length(covfunc)
41 | f = covfunc(i);
42 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
43 | [AA BB] = feval(f{:}, logtheta(v==i), x, z); % compute test covariances
44 | A = A + AA; B = B + BB; % and accumulate
45 | end
46 | else % compute derivative matrices
47 | i = v(z); % which covariance function
48 | j = sum(v(1:z)==i); % which parameter in that covariance
49 | f = covfunc(i);
50 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary
51 | A = feval(f{:}, logtheta(v==i), x, j); % compute derivative
52 | end
53 |
54 | end
55 |
56 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/cumGauss.m:
--------------------------------------------------------------------------------
1 | function [out1, out2, out3, out4] = cumGauss(y, f, var)
2 |
3 | % cumGauss - Cumulative Gaussian likelihood function. The expression for the
4 | % likelihood is cumGauss(t) = normcdf(t) = (1+erf(t/sqrt(2)))/2.
5 | %
6 | % Three modes are provided, for computing likelihoods, derivatives and moments
7 | % respectively, see likelihoods.m for the details. In general, care is taken
8 | % to avoid numerical issues when the arguments are extreme. The
9 | % moments \int f^k cumGauss(y,f) N(f|mu,var) df are calculated analytically.
10 | %
11 | % Copyright (c) 2007 Carl Edward Rasmussen and Hannes Nickisch, 2007-03-29.
12 |
13 | if nargin>1, y=sign(y); end % allow only +/- 1 as values
14 |
15 | if nargin == 2 % (log) likelihood evaluation
16 |
17 | if numel(y)>0, yf = y.*f; else yf = f; end % product of latents and labels
18 |
19 | out1 = (1+erf(yf/sqrt(2)))/2; % likelihood
20 | if nargout>1
21 | out2 = zeros(size(f));
22 | b = 0.158482605320942; % quadratic asymptotics approximated at -6
23 | c = -1.785873318175113;
24 | ok = yf>-6; % normal evaluation for larger values
25 | out2( ok) = log(out1(ok));
26 | out2(~ok) = -yf(~ok).^2/2 + b*yf(~ok) + c; % log of sigmoid
27 | end
28 |
29 | elseif nargin == 3
30 |
31 | if strcmp(var,'deriv') % derivatives of the log
32 |
33 | if numel(y)==0, y=1; end
34 | yf = y.*f; % product of latents and labels
35 | [p,lp] = cumGauss(y,f);
36 | out1 = sum(lp);
37 |
38 | if nargout>1 % dlp, derivative of log likelihood
39 |
40 | n_p = zeros(size(f)); % safely compute Gaussian over cumulative Gaussian
41 | ok = yf>-5; % normal evaluation for large values of yf
42 | n_p(ok) = (exp(-yf(ok).^2/2)/sqrt(2*pi))./p(ok);
43 |
44 | bd = yf<-6; % tight upper bound evaluation
45 | n_p(bd) = sqrt(yf(bd).^2/4+1)-yf(bd)/2;
46 |
47 | interp = ~ok & ~bd; % linearly interpolate between both of them
48 | tmp = yf(interp);
49 | lam = -5-yf(interp);
50 | n_p(interp) = (1-lam).*(exp(-tmp.^2/2)/sqrt(2*pi))./p(interp) + ...
51 | lam .*(sqrt(tmp.^2/4+1)-tmp/2);
52 |
53 | out2 = y.*n_p; % dlp, derivative of log likelihood
54 | if nargout>2 % d2lp, 2nd derivative of log likelihood
55 | out3 = -n_p.^2 - yf.*n_p;
56 | if nargout>3 % d3lp, 3rd derivative of log likelihood
57 | out4 = 2*y.*n_p.^3 +3*f.*n_p.^2 +y.*(f.^2-1).*n_p;
58 | end
59 | end
60 | end
61 |
62 | else % compute moments
63 |
64 | mu = f; % 2nd argument is the mean of a Gaussian
65 | z = mu./sqrt(1+var);
66 | if numel(y)>0, z=z.*y; end
67 | out1 = cumGauss([],z); % zeroth raw moment
68 |
69 | [dummy,n_p] = cumGauss([],z,'deriv'); % Gaussian over cumulative Gaussian
70 |
71 | if nargout>1
72 | if numel(y)==0, y=1; end
73 | out2 = mu + y.*var.*n_p./sqrt(1+var); % 1st raw moment
74 | if nargout>2
75 | out3 = 2*mu.*out2 -mu.^2 +var -z.*var.^2.*n_p./(1+var); % 2nd raw moment
76 | out3 = out3.*out1;
77 | end
78 | out2 = out2.*out1;
79 | end
80 |
81 | end
82 |
83 | else
84 | error('No valid input provided.')
85 | end
86 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/gauher.m:
--------------------------------------------------------------------------------
1 | % compute abscissas and weight factors for Gaussian-Hermite quadrature
2 | %
3 | % CALL: [x,w]=gauher(N)
4 | %
5 | % x = base points (abscissas)
6 | % w = weight factors
7 | % N = number of base points (abscissas) (integrates a (2N-1)th order
8 | % polynomial exactly)
9 | %
10 | % p(x)=exp(-x^2/2)/sqrt(2*pi), a =-Inf, b = Inf
11 | %
12 | % The Gaussian Quadrature integrates a (2n-1)th order
13 | % polynomial exactly and the integral is of the form
14 | % b N
15 | % Int ( p(x)* F(x) ) dx = Sum ( w_j* F( x_j ) )
16 | % a j=1
17 | %
18 | % this procedure uses the coefficients a(j), b(j) of the
19 | % recurrence relation
20 | %
21 | % b p (x) = (x - a ) p (x) - b p (x)
22 | % j j j j-1 j-1 j-2
23 | %
24 | % for the various classical (normalized) orthogonal polynomials,
25 | % and the zero-th moment
26 | %
27 | % 1 = integral w(x) dx
28 | %
29 | % of the given polynomial's weight function w(x). Since the
30 | % polynomials are orthonormalized, the tridiagonal matrix is
31 | % guaranteed to be symmetric.
32 |
33 | function [x,w]=gauher(N)
34 | if N==20 % return precalculated values
35 | x=[ -7.619048541679757;-6.510590157013656;-5.578738805893203;
36 | -4.734581334046057;-3.943967350657318;-3.18901481655339 ;
37 | -2.458663611172367;-1.745247320814127;-1.042945348802751;
38 | -0.346964157081356; 0.346964157081356; 1.042945348802751;
39 | 1.745247320814127; 2.458663611172367; 3.18901481655339 ;
40 | 3.943967350657316; 4.734581334046057; 5.578738805893202;
41 | 6.510590157013653; 7.619048541679757];
42 | w=[ 0.000000000000126; 0.000000000248206; 0.000000061274903;
43 | 0.00000440212109 ; 0.000128826279962; 0.00183010313108 ;
44 | 0.013997837447101; 0.061506372063977; 0.161739333984 ;
45 | 0.260793063449555; 0.260793063449555; 0.161739333984 ;
46 | 0.061506372063977; 0.013997837447101; 0.00183010313108 ;
47 | 0.000128826279962; 0.00000440212109 ; 0.000000061274903;
48 | 0.000000000248206; 0.000000000000126 ];
49 | else
50 | b = sqrt( (1:N-1)/2 )';
51 | [V,D] = eig( diag(b,1) + diag(b,-1) );
52 | w = V(1,:)'.^2;
53 | x = sqrt(2)*diag(D);
54 | end
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/gpr.m:
--------------------------------------------------------------------------------
1 | function [out1, out2] = gpr(logtheta, covfunc, x, y, xstar);
2 |
3 | % gpr - Gaussian process regression, with a named covariance function. Two
4 | % modes are possible: training and prediction: if no test data are given, the
5 | % function returns minus the log likelihood and its partial derivatives with
6 | % respect to the hyperparameters; this mode is used to fit the hyperparameters.
7 | % If test data are given, then (marginal) Gaussian predictions are computed,
8 | % whose mean and variance are returned. Note that in cases where the covariance
9 | % function has noise contributions, the variance returned in S2 is for noisy
10 | % test targets; if you want the variance of the noise-free latent function, you
11 | % must substract the noise variance.
12 | %
13 | % usage: [nlml dnlml] = gpr(logtheta, covfunc, x, y)
14 | % or: [mu S2] = gpr(logtheta, covfunc, x, y, xstar)
15 | %
16 | % where:
17 | %
18 | % logtheta is a (column) vector of log hyperparameters
19 | % covfunc is the covariance function
20 | % x is a n by D matrix of training inputs
21 | % y is a (column) vector (of size n) of targets
22 | % xstar is a nn by D matrix of test inputs
23 | % nlml is the returned value of the negative log marginal likelihood
24 | % dnlml is a (column) vector of partial derivatives of the negative
25 | % log marginal likelihood wrt each log hyperparameter
26 | % mu is a (column) vector (of size nn) of prediced means
27 | % S2 is a (column) vector (of size nn) of predicted variances
28 | %
29 | % For more help on covariance functions, see "help covFunctions".
30 | %
31 | % (C) copyright 2006 by Carl Edward Rasmussen (2006-03-20).
32 |
33 | if ischar(covfunc), covfunc = cellstr(covfunc); end % convert to cell if needed
34 | [n, D] = size(x);
35 | if eval(feval(covfunc{:})) ~= size(logtheta, 1)
36 | error('Error: Number of parameters do not agree with covariance function')
37 | end
38 |
39 | K = feval(covfunc{:}, logtheta, x); % compute training set covariance matrix
40 |
41 | L = chol(K)'; % cholesky factorization of the covariance
42 | alpha = solve_chol(L',y);
43 |
44 | if nargin == 4 % if no test cases, compute the negative log marginal likelihood
45 |
46 | out1 = 0.5*y'*alpha + sum(log(diag(L))) + 0.5*n*log(2*pi);
47 |
48 | if nargout == 2 % ... and if requested, its partial derivatives
49 | out2 = zeros(size(logtheta)); % set the size of the derivative vector
50 | W = L'\(L\eye(n))-alpha*alpha'; % precompute for convenience
51 | for i = 1:length(out2)
52 | out2(i) = sum(sum(W.*feval(covfunc{:}, logtheta, x, i)))/2;
53 | end
54 | end
55 |
56 | else % ... otherwise compute (marginal) test predictions ...
57 |
58 | [Kss, Kstar] = feval(covfunc{:}, logtheta, x, xstar); % test covariances
59 |
60 | out1 = Kstar' * alpha; % predicted means
61 |
62 | if nargout == 2
63 | v = L\Kstar;
64 | out2 = Kss - sum(v.*v)';
65 | end
66 |
67 | end
68 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/gprSRPP.m:
--------------------------------------------------------------------------------
1 | function [mu, S2SR, S2PP] = gprSRPP(logtheta, covfunc, x, INDEX, y, xstar);
2 |
3 | % gprSRPP - Carries out approximate Gaussian process regression prediction
4 | % using the subset of regressors (SR) or projected process approximation (PP)
5 | % and the active set specified by INDEX.
6 | %
7 | % Usage
8 | %
9 | % [mu, S2SR, S2PP] = gprSRPP(logtheta, covfunc, x, INDEX, y, xstar)
10 | %
11 | % where
12 | %
13 | % logtheta is a (column) vector of log hyperparameters
14 | % covfunc is the covariance function, which is assumed to
15 | % be a covSum, and the last entry of the sum is covNoise
16 | % x is a n by D matrix of training inputs
17 | % INDEX is a vector of length m <= n used to specify which
18 | % inputs are used in the active set
19 | % y is a (column) vector (of size n) of targets
20 | % xstar is a nstar by D matrix of test inputs
21 | % mu is a (column) vector (of size nstar) of prediced means
22 | % S2SR is a (column) vector (of size nstar) of predicted variances under SR
23 | % S2PP is a (column) vector (of size nsstar) of predicted variances under PP
24 | %
25 | % where D is the dimension of the input.
26 | %
27 | % For more help on covariance functions, see "help covFunctions".
28 | %
29 | % (C) copyright 2005, 2006 by Chris Williams (2006-03-29).
30 |
31 | if ischar(covfunc), covfunc = cellstr(covfunc); end % convert to cell if needed
32 | [n, D] = size(x);
33 | if eval(feval(covfunc{:})) ~= size(logtheta, 1)
34 | error('Error: Number of parameters do not agree with covariance function')
35 | end
36 |
37 | % we check that the covfunc cell array is a covSum, with last entry 'covNoise'
38 | if length(covfunc) ~= 2 | ~strcmp(covfunc(1), 'covSum') | ...
39 | ~strcmp(covfunc{2}(end), 'covNoise')
40 | error('The covfunc must be "covSum" whose last summand must be "covNoise"')
41 | end
42 |
43 | sigma2n = exp(2*logtheta(end)); % noise variance
44 | [nstar, D] = size(xstar); % number of test cases and dimension of input space
45 | m = length(INDEX); % size of subset
46 |
47 | % note, that in the following Kmm is computed by extracting the relevant part
48 | % of Knm, thus it will be the "noise-free" covariance (although the covfunc
49 | % specification does include noise).
50 |
51 | [v, Knm] = feval(covfunc{:}, logtheta, x, x(INDEX,:));
52 | Kmm = Knm(INDEX,:); % Kmm is a noise-free covariance matrix
53 | jitter = 1e-9*trace(Kmm);
54 | Kmm = Kmm + jitter*eye(m); % as suggested in code of jqc
55 |
56 | % a is cov between active set and test points and vstar is variances at test
57 | % points, incl noise variance
58 |
59 | [vstar, a] = feval(covfunc{:}, logtheta, x(INDEX,:), xstar);
60 |
61 | mu = a'*((sigma2n*Kmm + Knm'*Knm)\(Knm'*y)); % pred mean eq. (8.14) and (8.26)
62 |
63 | e = (sigma2n*Kmm + Knm'*Knm) \ a;
64 |
65 | S2SR = sigma2n*sum(a.*e,1)'; % noise-free SR variance, eq. 8.15
66 | S2PP = vstar-sum(a.*(Kmm\a),1)'+S2SR; % PP variance eq. (8.27) including noise
67 | S2SR = S2SR + sigma2n; % SR variance inclusing noise
68 |
69 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/likelihoods.m:
--------------------------------------------------------------------------------
1 | % likelihood: likelihood functions are provided to be used by the binaryGP
2 | % function, for binary Gaussian process classification. Two likelihood
3 | % functions are provided:
4 | %
5 | % logistic
6 | % cumGauss
7 | %
8 | % The likelihood functions have three possible modes, the mode being selected
9 | % as follows (where "lik" stands for any likelihood function):
10 | %
11 | % (log) likelihood evaluation: [p, lp] = lik(y, f)
12 | %
13 | % where y are the targets, f the latent function values, p the probabilities
14 | % and lp the log probabilities. All vectors are the same size.
15 | %
16 | % derivatives (of the log): [lp, dlp, d2lp, d3lp] = lik(y, f, 'deriv')
17 | %
18 | % where lp is a number (sum of the log probablities for each case) and the
19 | % derivatives (up to order 3) of the logs wrt the latent values are vectors
20 | % (as the likelihood factorizes there are no mixed terms).
21 | %
22 | % moments wrt Gaussian measure: [m0, m1, m2] = lik(y, mu, var)
23 | %
24 | % where mk is the k'th moment: \int f^k lik(y,f) N(f|mu,var) df, and if y is
25 | % empty, it is assumed to be a vector of ones.
26 | %
27 | % See the help for the individual likelihood for the computations specific to
28 | % each likelihood function.
29 | %
30 | % Copyright (c) 2007 Carl Edward Rasmussen and Hannes Nickisch 2007-04-11.
31 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/solve_chol.c:
--------------------------------------------------------------------------------
1 | /* solve_chol - solve a linear system A*X = B using the cholesky factorization
2 | of A (where A is square, symmetric and positive definite.
3 |
4 | Copyright (c) 2004 Carl Edward Rasmussen. 2004-10-19. */
5 |
6 | #include "mex.h"
7 | #include
8 |
9 | extern int dpotrs_(char *, int *, int *, double *, int *, double *, int *, int *);
10 |
11 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
12 | {
13 | double *C;
14 | int n, m, q;
15 |
16 | if (nrhs != 2 || nlhs > 1) /* check the input */
17 | mexErrMsgTxt("Usage: X = solve_chol(R, B)");
18 | n = mxGetN(prhs[0]);
19 | if (n != mxGetM(prhs[0]))
20 | mexErrMsgTxt("Error: First argument matrix must be square");
21 | if (n != mxGetM(prhs[1]))
22 | mexErrMsgTxt("Error: First and second argument matrices must have same number of rows");
23 | m = mxGetN(prhs[1]);
24 |
25 | plhs[0] = mxCreateDoubleMatrix(n, m, mxREAL); /* allocate space for output */
26 | C = mxGetPr(plhs[0]);
27 |
28 | if (n==0) return; /* if argument was empty matrix, do no more */
29 | memcpy(C,mxGetPr(prhs[1]),n*m*sizeof(double)); /* copy argument matrix */
30 | dpotrs_("U", &n, &m, mxGetPr(prhs[0]), &n, C, &n, &q); /* solve system */
31 | if (q > 0)
32 | mexErrMsgTxt("Error: illegal input to solve_chol");
33 | }
34 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/solve_chol.m:
--------------------------------------------------------------------------------
1 | % solve_chol - solve linear equations from the Cholesky factorization.
2 | % Solve A*X = B for X, where A is square, symmetric, positive definite. The
3 | % input to the function is R the Cholesky decomposition of A and the matrix B.
4 | % Example: X = solve_chol(chol(A),B);
5 | %
6 | % NOTE: The program code is written in the C language for efficiency and is
7 | % contained in the file solve_chol.c, and should be compiled using matlabs mex
8 | % facility. However, this file also contains a (less efficient) matlab
9 | % implementation, supplied only as a help to people unfamiliar with mex. If
10 | % the C code has been properly compiled and is avaiable, it automatically
11 | % takes precendence over the matlab code in this file.
12 | %
13 | % Copyright (c) 2004, 2005, 2006 by Carl Edward Rasmussen. 2006-02-08.
14 |
15 | function x = solve_chol(A, B);
16 |
17 | if nargin ~= 2 | nargout > 1
18 | error('Wrong number of arguments.');
19 | end
20 |
21 | if size(A,1) ~= size(A,2) | size(A,1) ~= size(B,1)
22 | error('Wrong sizes of matrix arguments.');
23 | end
24 |
25 | x = A\(A'\B);
26 |
--------------------------------------------------------------------------------
/ch4_codes/gpr/gpml-matlab/gpml/sq_dist.c:
--------------------------------------------------------------------------------
1 | /* sq_dist - a mex function to compute a matrix of all pairwise squared
2 | distances between two sets of vectors, stored in the columns of the two
3 | matrices that are arguments to the function. The length of the vectors must
4 | agree. If only a single argument is given, the missing argument is taken to
5 | be identical to the first. If an optional third matrix argument Q is given,
6 | it must be of the same size as the output, but in this case a vector of the
7 | traces of the product of Q and the coordinatewise squared distances is
8 | returned.
9 |
10 | Copyright (c) 2003, 2004 Carl Edward Rasmussen. 2003-04-22. */
11 |
12 | #include "mex.h"
13 | #include
14 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
15 | {
16 | double *a, *b, *C, *Q, z, t;
17 | int D, n, m, i, j, k;
18 | if (nrhs < 1 || nrhs > 3 || nlhs > 1)
19 | mexErrMsgTxt("Usage: C = sq_dist(a,b)\n or: C = sq_dist(a)\n or: c = sq_dist(a,b,Q)\nwhere the b matrix may be empty.");
20 | a = mxGetPr(prhs[0]);
21 | m = mxGetN(prhs[0]);
22 | D = mxGetM(prhs[0]);
23 | if (nrhs == 1 || mxIsEmpty(prhs[1])) {
24 | b = a;
25 | n = m;
26 | } else {
27 | b = mxGetPr(prhs[1]);
28 | n = mxGetN(prhs[1]);
29 | if (D != mxGetM(prhs[1]))
30 | mexErrMsgTxt("Error: column lengths must agree");
31 | }
32 | if (nrhs < 3) {
33 | plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL);
34 | C = mxGetPr(plhs[0]);
35 | for (i=0; i 3 | nargout > 1
31 | error('Wrong number of arguments.');
32 | end
33 |
34 | if nargin == 1 | isempty(b) % input arguments are taken to be
35 | b = a; % identical if b is missing or empty
36 | end
37 |
38 | [D, n] = size(a);
39 | [d, m] = size(b);
40 | if d ~= D
41 | error('Error: column lengths must agree.');
42 | end
43 |
44 | if nargin < 3
45 | C = zeros(n,m);
46 | for d = 1:D
47 | C = C + (repmat(b(d,:), n, 1) - repmat(a(d,:)', 1, m)).^2;
48 | end
49 | % C = repmat(sum(a.*a)',1,m)+repmat(sum(b.*b),n,1)-2*a'*b could be used to
50 | % replace the 3 lines above; it would be faster, but numerically less stable.
51 | else
52 | if [n m] == size(Q)
53 | C = zeros(D,1);
54 | for d = 1:D
55 | C(d) = sum(sum((repmat(b(d,:), n, 1) - repmat(a(d,:)', 1, m)).^2.*Q));
56 | end
57 | else
58 | error('Third argument has wrong size.');
59 | end
60 | end
61 |
--------------------------------------------------------------------------------
/ch4_codes/mg_prediction/MK30.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch4_codes/mg_prediction/MK30.mat
--------------------------------------------------------------------------------
/ch4_codes/mg_prediction/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2,1)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2,1)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2,1)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | if strcmp(ker_type,'CO2')
29 |
30 | % if N1 == N2
31 | % y = (66^2*exp(-(X1-X2).^2/(2*67^2)) + ...
32 | % 2.4^2*exp(-(X1-X2).^2/(2*90^2) - 2*(sin(pi*(X1-X2))).^2/(1.3^2))+...
33 | % 0.66^2*(1+(X1-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
34 | % 0.18^2*exp(-(X1-X2).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2))';
35 | % elseif N1 == 1
36 | % y = (66^2*exp(-(X1*ones(1,N2)-X2).^2/(2*67^2)) + ...
37 | % 2.4^2*exp(-(X1*ones(1,N2)-X2).^2/(2*90^2) - 2*(sin(pi*(X1*ones(1,N2)-X2))).^2/(1.3^2))+...
38 | % 0.66^2*(1+(X1*ones(1,N2)-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
39 | % 0.18^2*exp(-(X1*ones(1,N2)-X2).^2/(2*0.1333^2)) + 0.19^2*(X1*ones(1,N2) == X2))';
40 | % elseif N2 == 1
41 | % y = (66^2*exp(-(X1-X2*ones(1,N1)).^2/(2*67^2)) + ...
42 | % 2.4^2*exp(-(X1-X2*ones(1,N1)).^2/(2*90^2) - 2*(sin(pi*(X1-X2*ones(1,N1)))).^2/(1.3^2))+...
43 | % 0.66^2*(1+(X1-X2*ones(1,N1)).^2/(2*0.78*1.2^2)).^(-0.78) +...
44 | % 0.18^2*exp(-(X1-X2*ones(1,N1)).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2*ones(1,N1)))';
45 | % else
46 | % warning('error dimension--')
47 | % end
48 | if N1 == N2
49 | y = (4356*exp(-(X1-X2).^2*1.1138e-004) + ...
50 | 5.76*exp(-(X1-X2).^2*6.1728e-005 - (sin(pi*(X1-X2))).^2*1.1834)+...
51 | 0.4356*(1+(X1-X2).^2*0.4452).^(-0.78) +...
52 | 0.0324*exp(-(X1-X2).^2*28.1391) + 0.0361*(X1 == X2))';
53 | elseif N1 == 1
54 | y = (4356*exp(-(X1*ones(1,N2)-X2).^2*1.1138e-004) + ...
55 | 5.76*exp(-(X1*ones(1,N2)-X2).^2*6.1728e-005 - (sin(pi*(X1*ones(1,N2)-X2))).^2*1.1834)+...
56 | 0.4356*(1+(X1*ones(1,N2)-X2).^2*0.4452).^(-0.78) +...
57 | 0.0324*exp(-(X1*ones(1,N2)-X2).^2*28.1391) + 0.0361*(X1*ones(1,N2) == X2))';
58 | elseif N2 == 1
59 | y = (4356*exp(-(X1-X2*ones(1,N1)).^2*1.1138e-004) + ...
60 | 5.76*exp(-(X1-X2*ones(1,N1)).^2*6.1728e-005 - (sin(pi*(X1-X2*ones(1,N1)))).^2*1.1834)+...
61 | 0.4356*(1+(X1-X2*ones(1,N1)).^2*0.4452).^(-0.78) +...
62 | 0.0324*exp(-(X1-X2*ones(1,N1)).^2*28.1391) + 0.0361*(X1 == X2*ones(1,N1)))';
63 | else
64 | warning('error dimension--')
65 | end
66 |
67 | end
68 |
69 | return
70 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/EX_KRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | EX_KRLS(trainInput,trainTarget,typeKernel,paramKernel,alphaParameter,regularizationFactor,forgettingFactor,qFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %extended Kernel recursive least squares
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %alphaParameter: alpha parameter in the state-space model
19 | %regularizationFactor: regularization parameter in Newton's recursion
20 | %
21 | %forgettingFactor: expoentially weighted value
22 | %
23 | %flagLearningCurve: control if calculating the learning curve
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %baseDictionary: dictionary stores all the bases centers
27 | %expansionCoefficient: coefficients of the kernel expansion
28 | %learningCurve: trainSize*1 used for learning curve
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 | %Notes: Since bases are by default all the training data, it is skipped
31 | % here.
32 |
33 |
34 | % memeory initialization
35 | [inputDimension,trainSize] = size(trainInput);
36 |
37 | expansionCoefficient = zeros(trainSize,1);
38 |
39 | learningCurve = zeros(trainSize,1);
40 |
41 | Q_matrix = 0;
42 | roe = 1/(forgettingFactor*regularizationFactor);
43 |
44 | % start training
45 | for n = 1:trainSize
46 | ii = 1:n-1;
47 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
48 | f_vector = Q_matrix*k_vector;
49 | r_e = (forgettingFactor^(n)+ roe*ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
50 | s = 1/r_e;
51 |
52 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
53 | % updating
54 | expansionCoefficient(n) = alphaParameter*roe*s*error;
55 | expansionCoefficient(ii) = alphaParameter*(expansionCoefficient(ii) - f_vector*s*error);
56 |
57 | Q_tmp = zeros(n,n);
58 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
59 | Q_tmp(ii,n) = -roe*f_vector*s;
60 | Q_tmp(n,ii) = Q_tmp(ii,n)';
61 | Q_tmp(n,n) = roe^2*s;
62 | Q_matrix = alphaParameter^2*Q_tmp;
63 |
64 | roe = alphaParameter^2*roe + forgettingFactor^(n)*qFactor;
65 |
66 | learningCurve(n) = error^2;
67 |
68 | end
69 |
70 | return
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/EX_RLS.m:
--------------------------------------------------------------------------------
1 | function [weightVector,learningCurve]= ...
2 | EX_RLS(trainInput,trainTarget,pInitial,forgettingFactor,alphaParameter,qFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function EX_RLS:
5 | %extended recursive least squares
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %
14 | %pInitial: the initial value for the P matrix, for example diagonal matrix
15 | %
16 | %forgettingFactor: the exponentially weighted value, very close to 1
17 | %
18 | %alphaParameter: alpha parameter is a scalar state transition factor,
19 | % usually close to 1
20 | %
21 | %qFactor: modeling the variation in the state, providing a tradeoff between
22 | % the measurement noise ans modelind variantion, usually small
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %weightVector: the linear coefficients
26 | %biasTerm: the bias term
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: none.
30 |
31 | % memeory initialization
32 | [inputDimension,trainSize] = size(trainInput);
33 |
34 | learningCurve = zeros(trainSize,1);
35 |
36 | weightVector = zeros(inputDimension,1);
37 |
38 | iForgettingFactor = 1/forgettingFactor;
39 | P_matrix = pInitial;
40 |
41 | % training
42 | for n = 1:trainSize
43 | u = trainInput(:,n);
44 | iGamma = 1+iForgettingFactor*u'*P_matrix*u;
45 | gamma = 1/iGamma;
46 | gain = alphaParameter*iForgettingFactor*P_matrix*u*gamma;
47 | error = trainTarget(n) - u'*weightVector;
48 | weightVector = alphaParameter*weightVector + gain*error;
49 | P_matrix = iForgettingFactor*alphaParameter^2*P_matrix - gain*gain'*iGamma + qFactor;
50 | learningCurve(n) = error^2;
51 |
52 | end
53 |
54 | return
55 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/KRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | KRLS(trainInput,trainTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %Kernel recursive least squares with exponentially weighted
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %regularizationFactor: regularization parameter in Newton's recursion
19 | %
20 | %forgettingFactor: expoentially weighted value
21 | %
22 | %flagLearningCurve: control if calculating the learning curve
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %baseDictionary: dictionary stores all the bases centers
26 | %expansionCoefficient: coefficients of the kernel expansion
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: Since bases are by default all the training data, it is skipped
30 | % here.
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 |
36 | expansionCoefficient = zeros(trainSize,1);
37 |
38 | learningCurve = zeros(trainSize,1);
39 | learningCurve(1) = trainTarget(1)^2;
40 |
41 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
42 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
43 | % start training
44 | for n = 2:trainSize
45 | ii = 1:n-1;
46 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
47 | f_vector = Q_matrix*k_vector;
48 | s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
49 | Q_tmp = zeros(n,n);
50 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
51 | Q_tmp(ii,n) = -f_vector*s;
52 | Q_tmp(n,ii) = Q_tmp(ii,n)';
53 | Q_tmp(n,n) = s;
54 | Q_matrix = Q_tmp;
55 |
56 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
57 |
58 | % updating
59 | expansionCoefficient(n) = s*error;
60 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
61 |
62 | learningCurve(n) = error^2;
63 |
64 | end
65 |
66 | return
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/LMS2.m:
--------------------------------------------------------------------------------
1 | function [weightVector,biasTerm,learningCurve]= ...
2 | LMS2(trainInput,trainTarget,regularizationFactor,stepSizeWeightVector,stepSizeBias)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS2:
5 | %Normalized least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %
14 | %regularizationFactor: regularization factor in Newton's recursion
15 | %
16 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
17 | %stepSizeBias: learning rate for bias term, set to zero to disable
18 | %
19 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
20 | %
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | %Output:
23 | %aprioriErr: apriori error
24 | %weightVector: the linear coefficients
25 | %biasTerm: the bias term
26 | %learningCurve: trainSize*1 used for learning curve
27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
28 | %Notes: none.
29 |
30 | % memeory initialization
31 | [inputDimension,trainSize] = size(trainInput);
32 |
33 | learningCurve = zeros(trainSize,1);
34 |
35 | weightVector = zeros(inputDimension,1);
36 | biasTerm = 0;
37 |
38 | % training
39 | for n = 1:trainSize
40 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
41 | aprioriErr = trainTarget(n) - networkOutput;
42 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n)/(sum(trainInput(:,n).^2) + regularizationFactor);
43 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
44 | learningCurve(n) = aprioriErr^2;
45 | end
46 |
47 | return
48 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/PART3.m:
--------------------------------------------------------------------------------
1 | % Lorenz model tracking
2 | % Weifeng Liu
3 | % Jul. 2008.
4 | %
5 | % Description:
6 | % kernel size in EX-KRLS in Lorenz signal modeling
7 | % kernel size
8 | %
9 | % Usage
10 | % Ch 4
11 | %
12 | % Outside functions used
13 | % EX-KRLS
14 |
15 |
16 | close all
17 | clear all
18 | clc
19 |
20 | load lorenz.mat
21 | %lorenz2 50000*1 double
22 | lorenz2 = lorenz2 - mean(lorenz2);
23 | lorenz2 = lorenz2/std(lorenz2);
24 |
25 | trainSize = 500;
26 | inputDimension = 5;
27 |
28 | predictionHorizon = 10;
29 |
30 | %Kernel parameters
31 | typeKernel = 'Gauss';
32 | paramKernel_v = logspace(-1,1,10);
33 | %paramKernel = 1;
34 |
35 | L = 20;
36 |
37 | for h=1:length(paramKernel_v)
38 |
39 | paramKernel = paramKernel_v(h);
40 | disp([num2str(paramKernel),' -> paramKernel'])
41 | disp([num2str(L),' Monte Carlo simulations. Please wait...'])
42 | ensembleLearningCurveExkrls = zeros(trainSize,1);
43 |
44 | for k = 1:L
45 | disp(k);
46 |
47 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
48 | %
49 | % Data Formatting
50 | %
51 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
52 | % Input training signal with data embedding
53 | inputSignal = lorenz2(k*trainSize:k*trainSize+trainSize+inputDimension+predictionHorizon+1);
54 |
55 | trainInput = zeros(inputDimension,trainSize);
56 | for kk = 1:trainSize
57 | trainInput(:,kk) = inputSignal(kk:kk+inputDimension-1);
58 | end
59 |
60 | % Desired training signal
61 | trainTarget = zeros(trainSize,1);
62 | for ii=1:trainSize
63 | trainTarget(ii) = inputSignal(ii+inputDimension+predictionHorizon-1);
64 | end
65 |
66 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
67 | %
68 | % Ex-KRLS
69 | %
70 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
71 | alphaParameterExkrls = 1;
72 | regularizationFactorExkrls = 0.001;
73 | forgettingFactorExkrls = 0.99;
74 | qFactorExkrls = 0.01;
75 | % flagLearningCurve = 1;
76 |
77 | [expansionCoefficientExkrls,learningCurveExkrls] = ...
78 | EX_KRLS(trainInput,trainTarget,typeKernel,paramKernel,alphaParameterExkrls,regularizationFactorExkrls,forgettingFactorExkrls,qFactorExkrls);
79 | %=========end of Ex_KRLS================
80 |
81 | ensembleLearningCurveExkrls = ensembleLearningCurveExkrls + learningCurveExkrls;
82 |
83 | end
84 | signalPower = std(inputSignal)^2;
85 |
86 | %%
87 | ensembleLearningCurveExkrls_dB = 10*log10(ensembleLearningCurveExkrls/L/signalPower);
88 |
89 | disp('====================================')
90 |
91 | mseMean(h) = mean(ensembleLearningCurveExkrls_dB(end-100:end));
92 | mseStd(h) = std(ensembleLearningCurveExkrls_dB(end-100:end));
93 | disp([num2str(mseMean(h)),'+/-',num2str(mseStd(h))]);
94 |
95 | disp('====================================')
96 | end
97 |
98 |
99 | %%
100 | errorbar(paramKernel_v,mseMean,mseStd)
101 | set(gca, 'FontSize', 14);
102 |
103 | set(gca, 'FontName', 'Arial');
104 | xlabel('kernel parameter'),ylabel('MSE (dB)')
105 | grid on
106 | axis tight
107 |
108 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/RLS.m:
--------------------------------------------------------------------------------
1 | function [weightVector,learningCurve]= ...
2 | RLS(trainInput,trainTarget,pInitial,forgettingFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function RLS:
5 | %recursive least square exponentially weighted
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %pInitial: the initial value for the P matrix, for example diagonal matrix
14 | %
15 | %forgettingFactor: the exponentially weighted value, very close to 1
16 | %
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 | %Output:
19 | %weightVector: the linear coefficients
20 | %biasTerm: the bias term
21 | %learningCurve: trainSize*1 used for learning curve
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Notes: none.
24 |
25 | % memeory initialization
26 | [inputDimension,trainSize] = size(trainInput);
27 |
28 | learningCurve = zeros(trainSize,1);
29 |
30 | weightVector = zeros(inputDimension,1);
31 |
32 | iForgettingFactor = 1/forgettingFactor;
33 | P = pInitial;
34 |
35 | % training
36 | for n = 1:trainSize
37 | u = trainInput(:,n);
38 | iGamma = 1+iForgettingFactor*u'*P*u;
39 | gamma = 1/iGamma;
40 | gain = iForgettingFactor*P*u*gamma;
41 | error = trainTarget(n) - u'*weightVector;
42 | weightVector = weightVector + gain*error;
43 | P = iForgettingFactor*P - gain*gain'*iGamma;
44 |
45 | learningCurve(n) = error^2;
46 |
47 | end
48 |
49 | return
50 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/SWKRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | SWKRLS(K,trainInput,trainTarget,typeKernel,paramKernel,paramRegularization)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function sliding window kernel recursive least squares
5 | %
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %typeKernel: 'Gauss', 'Poly'
13 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
14 | %
15 | %paramRegularization: regularization parameter in cost function
16 | %
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 | %Output:
19 | %expansionCoefficient: coefficients of the kernel expansion
20 | %learningCurve: trainSize*1 used for learning curve
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | %Notes: Since bases are by default all the training data, it is skipped
23 | % here.
24 |
25 | % memeory initialization
26 | [inputDimension,trainSize] = size(trainInput);
27 |
28 | expansionCoefficient = zeros(K,1);
29 |
30 | learningCurve = zeros(trainSize,1);
31 | learningCurve(1) = trainTarget(1)^2;
32 |
33 | Q_matrix = 1/(paramRegularization + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
34 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
35 | % start training
36 | for n = 2:K
37 | ii = 1:n-1;
38 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
39 | f_vector = Q_matrix*k_vector;
40 | s = 1/(paramRegularization+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
41 | Q_tmp = zeros(n,n);
42 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
43 | Q_tmp(ii,n) = -f_vector*s;
44 | Q_tmp(n,ii) = Q_tmp(ii,n)';
45 | Q_tmp(n,n) = s;
46 | Q_matrix = Q_tmp;
47 |
48 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
49 |
50 | % updating
51 | expansionCoefficient(n) = s*error;
52 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
53 |
54 | learningCurve(n) = error^2;
55 | end
56 |
57 | % start training
58 | for n = K+1:trainSize
59 |
60 | k_vector = ker_eval(trainInput(:,n),trainInput(:,n-K:n-1),typeKernel,paramKernel);
61 | error = trainTarget(n) - k_vector'*expansionCoefficient;
62 |
63 | % updating
64 | expansionCoefficient = inv(paramRegularization*eye(K) + gramMatrix(trainInput(:,n-K+1:n),typeKernel,paramKernel))*trainTarget(n-K+1:n);
65 |
66 | learningCurve(n) = error^2;
67 | end
68 |
69 | return
70 |
71 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/SWKRLS2.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | SWKRLS2(K,trainInput,trainTarget,typeKernel,paramKernel,paramRegularization)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function sliding window kernel recursive least squares
5 | %
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %typeKernel: 'Gauss', 'Poly'
13 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
14 | %
15 | %paramRegularization: regularization parameter in cost function
16 | %
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 | %Output:
19 | %expansionCoefficient: coefficients of the kernel expansion
20 | %learningCurve: trainSize*1 used for learning curve
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | %Notes: Since bases are by default all the training data, it is skipped
23 | % here.
24 |
25 | % memeory initialization
26 | [inputDimension,trainSize] = size(trainInput);
27 |
28 | expansionCoefficient = zeros(K,1);
29 |
30 | learningCurve = zeros(trainSize,1);
31 | learningCurve(1) = trainTarget(1)^2;
32 |
33 | Q_matrix = 1/(paramRegularization + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
34 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
35 | % start training
36 | for n = 2:K
37 | ii = 1:n-1;
38 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
39 | f_vector = Q_matrix*k_vector;
40 | s = 1/(paramRegularization+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
41 | Q_tmp = zeros(n,n);
42 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
43 | Q_tmp(ii,n) = -f_vector*s;
44 | Q_tmp(n,ii) = Q_tmp(ii,n)';
45 | Q_tmp(n,n) = s;
46 | Q_matrix = Q_tmp;
47 |
48 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
49 |
50 | % updating
51 | expansionCoefficient(n) = s*error;
52 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
53 |
54 | learningCurve(n) = error^2;
55 | end
56 |
57 | % start training
58 | for n = K+1:trainSize
59 |
60 | k_vector = ker_eval(trainInput(:,n),trainInput(:,n-K:n-1),typeKernel,paramKernel);
61 | error = trainTarget(n) - k_vector'*expansionCoefficient;
62 |
63 | % updating
64 | f_vector = Q_matrix*k_vector;
65 | s = 1/(paramRegularization+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
66 | Q_tmp = zeros(K+1,K+1);
67 | Q_tmp(1:K,1:K) = Q_matrix + f_vector*f_vector'*s;
68 | Q_tmp(1:K,K+1) = -f_vector*s;
69 | Q_tmp(K+1,1:K) = Q_tmp(1:K,K+1)';
70 | Q_tmp(K+1,K+1) = s;
71 |
72 | Q_matrix = Q_tmp(2:K+1,2:K+1) - Q_tmp(2:K+1,1)*Q_tmp(2:K+1,1)'/Q_tmp(1,1);
73 |
74 | expansionCoefficient = Q_matrix*trainTarget(n-K+1:n);
75 |
76 | learningCurve(n) = error^2;
77 | end
78 |
79 | return
80 |
81 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/gramMatrix.m:
--------------------------------------------------------------------------------
1 | function G = gramMatrix(data,typeKernel,paramKernel)
2 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
3 | %Function gramMatrix
4 | %Calculate the gram matrix of data
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %inputs:
7 | %data: inputDimension*dataSize, the output matrix will be
8 | % dataSize-by-dataSize
9 | %typeKernel: 'Gauss','Poly'
10 | %paramKernel: parameter used in kernel
11 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
12 | %outputs:
13 | %G: GramMatrix of data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | %Notes: none.
16 |
17 | [inputDimension,dataSize] = size(data);
18 | G = zeros(dataSize,dataSize);
19 |
20 | for ii = 1:dataSize
21 | jj = ii:dataSize;
22 | G(ii,jj) = ker_eval(data(:,ii),data(:,jj),typeKernel,paramKernel);
23 | G(jj,ii) = G(ii,jj);
24 | end
25 | return
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/kernelparameter.fig:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch5_codes/Lorenz/kernelparameter.fig
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/lorenz.m:
--------------------------------------------------------------------------------
1 | close all
2 | clear all
3 |
4 | num = 10000;
5 |
6 | A = [ -8/3 0 0; 0 -10 10; 0 28 -1 ];
7 |
8 | y = zeros(3,num+1);
9 | y(:,1) = [35 -10 -7]';
10 |
11 | h = 0.01;
12 | p = plot3(y(1),y(2),y(3),'.', ...
13 | 'EraseMode','none','MarkerSize',5); % Set EraseMode to none
14 | axis([0 50 -25 25 -25 25])
15 | hold on
16 | for i=1:num
17 | A(1,3) = y(2,i);
18 | A(3,1) = -y(2,i);
19 | ydot = A*y(:,i);
20 | y(:,i+1) = y(:,i) + h*ydot;
21 | % Change coordinates
22 | % set(p,'XData',y(1,i),'YData',y(2,i),'ZData',y(3,i))
23 | % drawnow
24 | % i=i+1;
25 | end
26 |
27 | figure,plot(y(1,:))
28 | figure,plot(y(2,:))
29 | figure,plot(y(3,:))
30 |
31 | lorenz2 = y(1,:);
32 | save lorenz.mat lorenz2
33 |
34 | %%
35 | figure
36 | p = plot3(y(1),y(2),y(3),'.', ...
37 | 'EraseMode','none','MarkerSize',5); % Set EraseMode to none
38 | axis([0 50 -25 25 -25 25])
39 | hold on
40 | for i=1:2000
41 | % Change coordinates
42 | set(p,'XData',y(1,i),'YData',y(2,i),'ZData',y(3,i))
43 | drawnow
44 | end
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/lorenz.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch5_codes/Lorenz/lorenz.mat
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/lorenz_data_display.m:
--------------------------------------------------------------------------------
1 | close all
2 | clear all
3 |
4 | num = 5000;
5 |
6 | A = [ -8/3 0 0; 0 -10 10; 0 28 -1 ];
7 |
8 | y = zeros(3,num+1);
9 | y(:,1) = [35 -10 -7]';
10 |
11 | h = 0.01;
12 | for i=1:num
13 | A(1,3) = y(2,i);
14 | A(3,1) = -y(2,i);
15 | ydot = A*y(:,i);
16 | y(:,i+1) = y(:,i) + h*ydot;
17 | % Change coordinates
18 | end
19 |
20 | figure,plot(y(1,:),'lineWidth',2)
21 | set(gca, 'FontSize', 14);
22 | set(gca, 'FontName', 'Arial');
23 | xlabel('time')
24 | ylabel('x')
25 | axis tight
26 |
27 | figure,plot(y(2,:))
28 | figure,plot(y(3,:))
29 |
30 | %%
31 | figure
32 | p = plot3(y(1),y(2),y(3),'.', ...
33 | 'EraseMode','none','MarkerSize',5); % Set EraseMode to none
34 | axis([0 50 -25 25 -25 25])
35 | set(gca, 'FontSize', 14);
36 | set(gca, 'FontName', 'Arial');
37 | xlabel('x')
38 | ylabel('y')
39 | zlabel('z')
40 |
41 | hold on
42 | for i=1:2000
43 | % Change coordinates
44 | set(p,'XData',y(1,i),'YData',y(2,i),'ZData',y(3,i))
45 | drawnow
46 | end
47 |
48 | %%
49 | figure
50 | axis([0 50 -25 25 -25 25])
51 | set(gca, 'FontSize', 14);
52 | set(gca, 'FontName', 'Arial');
53 | xlabel('x')
54 | ylabel('y')
55 | zlabel('z')
56 |
57 | hold on
58 | for i=1:2000
59 | plot3(y(1,i),y(2,i),y(3,i),'.','MarkerSize',5);
60 | end
61 |
62 |
63 |
--------------------------------------------------------------------------------
/ch5_codes/Lorenz/nlG.m:
--------------------------------------------------------------------------------
1 | function out = nlG(input,param,flag)
2 |
3 | switch flag
4 | case 0
5 | out = param*input;
6 | case 1
7 | out = (1-exp(-param*input))./(1+exp(-param*input));
8 | case 2
9 | out = (1-param)*input + param*input.^2;
10 | case 3
11 | out = sin(param*input);
12 | case 4 %threshold cut off
13 | out = input;
14 | out(find(out>param)) = param;
15 | out(find(out<-param)) = -param;
16 | otherwise
17 | warning('nlG');
18 | end
19 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/EX_KRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | EX_KRLS(trainInput,trainTarget,typeKernel,paramKernel,alphaParameter,regularizationFactor,forgettingFactor,qFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %extended Kernel recursive least squares
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %alphaParameter: alpha parameter in the state-space model
19 | %regularizationFactor: regularization parameter in Newton's recursion
20 | %
21 | %forgettingFactor: expoentially weighted value
22 | %
23 | %flagLearningCurve: control if calculating the learning curve
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %baseDictionary: dictionary stores all the bases centers
27 | %expansionCoefficient: coefficients of the kernel expansion
28 | %learningCurve: trainSize*1 used for learning curve
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 | %Notes: Since bases are by default all the training data, it is skipped
31 | % here.
32 |
33 |
34 | % memeory initialization
35 | [inputDimension,trainSize] = size(trainInput);
36 |
37 | expansionCoefficient = zeros(trainSize,1);
38 |
39 | learningCurve = zeros(trainSize,1);
40 |
41 | Q_matrix = 0;
42 | roe = 1/(forgettingFactor*regularizationFactor);
43 |
44 | % start training
45 | for n = 1:trainSize
46 | ii = 1:n-1;
47 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
48 | f_vector = Q_matrix*k_vector;
49 | r_e = (forgettingFactor^(n)+ roe*ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
50 | s = 1/r_e;
51 |
52 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
53 | % updating
54 | expansionCoefficient(n) = alphaParameter*roe*s*error;
55 | expansionCoefficient(ii) = alphaParameter*(expansionCoefficient(ii) - f_vector*s*error);
56 |
57 | Q_tmp = zeros(n,n);
58 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
59 | Q_tmp(ii,n) = -roe*f_vector*s;
60 | Q_tmp(n,ii) = Q_tmp(ii,n)';
61 | Q_tmp(n,n) = roe^2*s;
62 | Q_matrix = alphaParameter^2*Q_tmp;
63 |
64 | roe = alphaParameter^2*roe + forgettingFactor^(n)*qFactor;
65 |
66 | learningCurve(n) = error^2;
67 |
68 | end
69 |
70 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/EX_RLS.m:
--------------------------------------------------------------------------------
1 | function [weightVector,learningCurve]= ...
2 | EX_RLS(trainInput,trainTarget,pInitial,forgettingFactor,alphaParameter,qFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function EX_RLS:
5 | %extended recursive least squares
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %
14 | %pInitial: the initial value for the P matrix, for example diagonal matrix
15 | %
16 | %forgettingFactor: the exponentially weighted value, very close to 1
17 | %
18 | %alphaParameter: alpha parameter is a scalar state transition factor,
19 | % usually close to 1
20 | %
21 | %qFactor: modeling the variation in the state, providing a tradeoff between
22 | % the measurement noise ans modelind variantion, usually small
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %weightVector: the linear coefficients
26 | %biasTerm: the bias term
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: none.
30 |
31 | % memeory initialization
32 | [inputDimension,trainSize] = size(trainInput);
33 |
34 | learningCurve = zeros(trainSize,1);
35 |
36 | weightVector = zeros(inputDimension,1);
37 |
38 | iForgettingFactor = 1/forgettingFactor;
39 | P_matrix = pInitial;
40 |
41 | % training
42 | for n = 1:trainSize
43 | u = trainInput(:,n);
44 | iGamma = 1+iForgettingFactor*u'*P_matrix*u;
45 | gamma = 1/iGamma;
46 | gain = alphaParameter*iForgettingFactor*P_matrix*u*gamma;
47 | error = trainTarget(n) - u'*weightVector;
48 | weightVector = alphaParameter*weightVector + gain*error;
49 | P_matrix = iForgettingFactor*alphaParameter^2*P_matrix - gain*gain'*iGamma + qFactor;
50 | learningCurve(n) = error^2;
51 |
52 | end
53 |
54 | return
55 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/KRLS.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | KRLS(trainInput,trainTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %Kernel recursive least squares with exponentially weighted
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %regularizationFactor: regularization parameter in Newton's recursion
19 | %
20 | %forgettingFactor: expoentially weighted value
21 | %
22 | %flagLearningCurve: control if calculating the learning curve
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %baseDictionary: dictionary stores all the bases centers
26 | %expansionCoefficient: coefficients of the kernel expansion
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: Since bases are by default all the training data, it is skipped
30 | % here.
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 |
36 | expansionCoefficient = zeros(trainSize,1);
37 |
38 | learningCurve = zeros(trainSize,1);
39 | learningCurve(1) = trainTarget(1)^2;
40 |
41 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
42 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
43 | % start training
44 | for n = 2:trainSize
45 | ii = 1:n-1;
46 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
47 | f_vector = Q_matrix*k_vector;
48 | s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
49 | Q_tmp = zeros(n,n);
50 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
51 | Q_tmp(ii,n) = -f_vector*s;
52 | Q_tmp(n,ii) = Q_tmp(ii,n)';
53 | Q_tmp(n,n) = s;
54 | Q_matrix = Q_tmp;
55 |
56 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
57 |
58 | % updating
59 | expansionCoefficient(n) = s*error;
60 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
61 |
62 | learningCurve(n) = error^2;
63 |
64 | end
65 |
66 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/KRLS_ALD.asv:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,dictionaryIndex,learningCurve,CI] = ...
2 | KRLS_ALD(trainInput,trainTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,th1)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function kernel recursive least squares approximate linear dependency
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %Input:
7 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
8 | % trainSize is the number of training data
9 | %trainTarget: desired signal for training trainSize*1
10 | %
11 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
12 | %testTarget: desired signal for testing testSize*1
13 | %
14 | %typeKernel: 'Gauss', 'Poly'
15 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
16 | %
17 | %regularizationFactor: regularization parameter in Newton's recursion
18 | %
19 | %forgettingFactor: expoentially weighted value
20 | %
21 | %th1,th2: thresholds to categorize data into redundant, learnable, abnormal
22 | %
23 | %flagLearningCurve: control if calculating the learning curve
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %baseDictionary: dictionary stores all the bases centers
27 | %expansionCoefficient: coefficients of the kernel expansion
28 | %learningCurve: trainSize*1 used for learning curve
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 |
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 |
36 | learningCurve = zeros(trainSize,1);
37 | learningCurve(1) = trainTarget(1)^2;
38 |
39 |
40 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
41 |
42 | expansionCoefficient = Q_matrix*trainTarget(1);
43 | % dictionary
44 | dictionaryIndex = 1;
45 | dictSize = 1;
46 |
47 | predictionVar = regularizationFactor*forgettingFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel);
48 |
49 | CI = zeros(trainSize,1);
50 |
51 | CI(1) = log(predictionVar)/2;
52 |
53 | % start training
54 | for n = 2:trainSize
55 |
56 | %calc the Conditional Information
57 | k_vector = ker_eval(trainInput(:,n),trainInput(:,dictionaryIndex),typeKernel,paramKernel);
58 | networkOutput = expansionCoefficient*k_vector;
59 | predictionError = trainTarget(n) - networkOutput;
60 | f_vector = Q_matrix*k_vector;
61 |
62 | predictionVar = regularizationFactor*forgettingFactor^(n) + ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) -...
63 | k_vector'*f_vector;
64 |
65 | CI(n) = log (predictionVar)/2;
66 |
67 | if (CI(n) > th1)
68 |
69 |
70 | %update Q_matrix
71 | s = 1/predictionVar;
72 | Q_tmp = zeros(dictSize+1,dictSize+1);
73 | Q_tmp(1:dictSize,1:dictSize) = Q_matrix + f_vector*f_vector'*s;
74 | Q_tmp(1:dictSize,dictSize+1) = -f_vector*s;
75 | Q_tmp(dictSize+1,1:dictSize) = Q_tmp(1:dictSize,dictSize+1)';
76 | Q_tmp(dictSize+1,dictSize+1) = s;
77 | Q_matrix = Q_tmp;
78 |
79 | % updating coefficients
80 | dictSize = dictSize + 1;
81 | dictionaryIndex(dictSize) = n;
82 | expansionCoefficient(dictSize) = s*predictionError;
83 | expansionCoefficient(1:dictSize-1) = expansionCoefficient(1:dictSize-1) - f_vector'*expansionCoefficient(dictSize);
84 |
85 | learningCurve(n) = predictionError^2;
86 |
87 | else %redundant and abnormal
88 | learningCurve(n) = learningCurve(n-1);
89 |
90 | end
91 | end
92 |
93 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/KRLS_ALD.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,dictionaryIndex,learningCurve,CI] = ...
2 | KRLS_ALD(trainInput,trainTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,th1)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function kernel recursive least squares approximate linear dependency
5 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
6 | %Input:
7 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
8 | % trainSize is the number of training data
9 | %trainTarget: desired signal for training trainSize*1
10 | %
11 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
12 | %testTarget: desired signal for testing testSize*1
13 | %
14 | %typeKernel: 'Gauss', 'Poly'
15 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
16 | %
17 | %regularizationFactor: regularization parameter in Newton's recursion
18 | %
19 | %forgettingFactor: expoentially weighted value
20 | %
21 | %th1,th2: thresholds to categorize data into redundant, learnable, abnormal
22 | %
23 | %flagLearningCurve: control if calculating the learning curve
24 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
25 | %Output:
26 | %baseDictionary: dictionary stores all the bases centers
27 | %expansionCoefficient: coefficients of the kernel expansion
28 | %learningCurve: trainSize*1 used for learning curve
29 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
30 |
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 |
36 | learningCurve = zeros(trainSize,1);
37 | learningCurve(1) = trainTarget(1)^2;
38 |
39 |
40 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
41 |
42 | expansionCoefficient = Q_matrix*trainTarget(1);
43 | % dictionary
44 | dictionaryIndex = 1;
45 | dictSize = 1;
46 |
47 | predictionVar = regularizationFactor*forgettingFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel);
48 |
49 | CI = zeros(trainSize,1);
50 |
51 | CI(1) = log(predictionVar)/2;
52 |
53 | % start training
54 | for n = 2:trainSize
55 |
56 | %calc the Conditional Information
57 | k_vector = ker_eval(trainInput(:,n),trainInput(:,dictionaryIndex),typeKernel,paramKernel);
58 | networkOutput = expansionCoefficient*k_vector;
59 | predictionError = trainTarget(n) - networkOutput;
60 | f_vector = Q_matrix*k_vector;
61 |
62 | predictionVar = regularizationFactor*forgettingFactor^(n) + ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) -...
63 | k_vector'*f_vector;
64 |
65 | CI(n) = log (predictionVar)/2;
66 |
67 | if (CI(n) > th1)
68 |
69 |
70 | %update Q_matrix
71 | s = 1/predictionVar;
72 | Q_tmp = zeros(dictSize+1,dictSize+1);
73 | Q_tmp(1:dictSize,1:dictSize) = Q_matrix + f_vector*f_vector'*s;
74 | Q_tmp(1:dictSize,dictSize+1) = -f_vector*s;
75 | Q_tmp(dictSize+1,1:dictSize) = Q_tmp(1:dictSize,dictSize+1)';
76 | Q_tmp(dictSize+1,dictSize+1) = s;
77 | Q_matrix = Q_tmp;
78 |
79 | % updating coefficients
80 | dictSize = dictSize + 1;
81 | dictionaryIndex(dictSize) = n;
82 | expansionCoefficient(dictSize) = s*predictionError;
83 | expansionCoefficient(1:dictSize-1) = expansionCoefficient(1:dictSize-1) - f_vector'*expansionCoefficient(dictSize);
84 |
85 | learningCurve(n) = predictionError^2;
86 |
87 | else %redundant and abnormal
88 | learningCurve(n) = learningCurve(n-1);
89 |
90 | end
91 | end
92 |
93 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/LMS2.m:
--------------------------------------------------------------------------------
1 | function [weightVector,biasTerm,learningCurve]= ...
2 | LMS2(trainInput,trainTarget,regularizationFactor,stepSizeWeightVector,stepSizeBias)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS2:
5 | %Normalized least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %
14 | %regularizationFactor: regularization factor in Newton's recursion
15 | %
16 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
17 | %stepSizeBias: learning rate for bias term, set to zero to disable
18 | %
19 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
20 | %
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | %Output:
23 | %aprioriErr: apriori error
24 | %weightVector: the linear coefficients
25 | %biasTerm: the bias term
26 | %learningCurve: trainSize*1 used for learning curve
27 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
28 | %Notes: none.
29 |
30 | % memeory initialization
31 | [inputDimension,trainSize] = size(trainInput);
32 |
33 | learningCurve = zeros(trainSize,1);
34 |
35 | weightVector = zeros(inputDimension,1);
36 | biasTerm = 0;
37 |
38 | % training
39 | for n = 1:trainSize
40 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
41 | aprioriErr = trainTarget(n) - networkOutput;
42 | weightVector = weightVector + stepSizeWeightVector*aprioriErr*trainInput(:,n)/(sum(trainInput(:,n).^2) + regularizationFactor);
43 | biasTerm = biasTerm + stepSizeBias*aprioriErr;
44 | learningCurve(n) = aprioriErr^2;
45 | end
46 |
47 | return
48 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/MLE.m:
--------------------------------------------------------------------------------
1 | function [Pmle,p,sigma]=MLE(x,w) % MLE estimation
2 | %x is xn by D, w is xn by 1, Pmle is 1 by Dim, sigma by silverKerWidth
3 | [xn,D]=size(x);
4 | sigma=zeros(1,D);
5 | Pmle=zeros(1,D);
6 | p=zeros(xn,D); %smoothed pdf by kernel, xn by D
7 | for i=1:D
8 | sigma(i)=silverKerWidth(x(:,i)); % Silver Kernel Width
9 | [h,p(:,i)]=kernel(x(:,i),x(:,i),sigma(i),w); % kernel smooth
10 | end
11 | [PM,IM]=maxp(p); % find the max pdf and index for each column of p
12 | Pmle=x(IM);
13 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/RLS.m:
--------------------------------------------------------------------------------
1 | function [weightVector,learningCurve]= ...
2 | RLS(trainInput,trainTarget,pInitial,forgettingFactor)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function RLS:
5 | %recursive least square exponentially weighted
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %pInitial: the initial value for the P matrix, for example diagonal matrix
14 | %
15 | %forgettingFactor: the exponentially weighted value, very close to 1
16 | %
17 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
18 | %Output:
19 | %weightVector: the linear coefficients
20 | %biasTerm: the bias term
21 | %learningCurve: trainSize*1 used for learning curve
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Notes: none.
24 |
25 | % memeory initialization
26 | [inputDimension,trainSize] = size(trainInput);
27 |
28 | learningCurve = zeros(trainSize,1);
29 |
30 | weightVector = zeros(inputDimension,1);
31 |
32 | iForgettingFactor = 1/forgettingFactor;
33 | P = pInitial;
34 |
35 | % training
36 | for n = 1:trainSize
37 | u = trainInput(:,n);
38 | iGamma = 1+iForgettingFactor*u'*P*u;
39 | gamma = 1/iGamma;
40 | gain = iForgettingFactor*P*u*gamma;
41 | error = trainTarget(n) - u'*weightVector;
42 | weightVector = weightVector + gain*error;
43 | P = iForgettingFactor*P - gain*gain'*iGamma;
44 |
45 | learningCurve(n) = error^2;
46 |
47 | end
48 |
49 | return
50 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/SIR.m:
--------------------------------------------------------------------------------
1 | function [Xexp,Xmle,Xrecord]=SIR(x0,u,z,Q,R,F,paramNonlinear,typeNonlinear)
2 |
3 | [xn,D]=size(x0);
4 | T=length(z);
5 |
6 | X=x0;
7 | XA=X;
8 | Xrecord=[reshape(x0,xn*D,1),zeros(xn*D,T-1)];
9 | Xexp=zeros(D,T);
10 | Xmle=zeros(D,T);
11 |
12 |
13 | %compute
14 | for k=2:T
15 | if(rem(k/T,.2)==0)
16 | disp([num2str(k/T*100),'%...'])
17 | end
18 | XA=genesam(X,F,Q); % generate new approximate samples
19 | WA=normw(XA,u(:,k),z(k),R,paramNonlinear,typeNonlinear); % get normalized approximate weight
20 | [X,W]=resample(XA,WA); % resample to reduce degeneracy
21 | Xrecord(:,k)=reshape(X,xn*D,1);
22 | Xexp(:,k)=mean(X);
23 | Xmle(:,k)=MLE(X,W);
24 | XA=X;
25 | end
26 |
27 |
28 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/genesam.m:
--------------------------------------------------------------------------------
1 | function X=genesam(x,F,Q) %generate new samples
2 | [xn,D]=size(x); % x is xn*D sample column vector
3 | v=zeros(xn,D);
4 | for i=1:D
5 | v(:,i)= normrnd(0,sqrt(Q(i))*ones(xn,1));
6 | end
7 | X=x*F+v; % new samples generated by system fk(.,.)
8 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | return
29 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/kernel.m:
--------------------------------------------------------------------------------
1 | function [h,p,gp,Pspk_x]=kernel(x,mu,sigma,w,spk)
2 | % kernel smoothing weighted (w) particles (x) with kernel centered at mu
3 | % and kernel size (sigma). spk is the spike train in case to calculated the
4 | % spike triggered x distribution
5 | % output h -- histogram
6 | % p -- pdf
7 | % x is m*1 sample column vector at time k; w is m*1 weight column vector at time k
8 | % p is posterior density of all samples at time k, m*1 column vector
9 | % gp is the Gassian matrix, m by n
10 | if nargin<5
11 | m=length(x); % x has m intereted points, m by 1
12 | n=length(mu); %mu,w has n samples as column, n by 1
13 | if n<=1e4
14 | X=x*ones(1,n);
15 | Mu=ones(m,1)*mu';
16 | if prod(size(sigma))~=1
17 | sigma=ones(m,1)*sigma';
18 | end
19 | gp=normpdf(X,Mu,sigma);%/normpdf(1,1,(sigma));
20 | h=gp*w;
21 | p=h/sum(w);%divide by N samples if w are all ones.
22 | else
23 | matrixN=floor(n/1e4);
24 | gp=[];
25 | X=x*ones(1,1e4);
26 | for i=1:matrixN
27 | Mu=ones(m,1)*mu((i-1)*1e4+[1:1e4])';
28 | if prod(size(sigma))~=1
29 | sigma=ones(m,1)*sigma((i-1)*1e4+[1:1e4])';
30 | end
31 | gp=[gp,normpdf(X,Mu,sigma)];
32 | end
33 | h=sum(gp,2);
34 | p=mean(gp,2);
35 | end
36 | else
37 | m=length(x); % x has m intereted points, m by 1
38 | n=length(mu); %mu,w has n samples as column, n by 1
39 | if n<=1e4
40 | X=x*ones(1,n);
41 | Mu=ones(m,1)*mu';
42 | if prod(size(sigma))~=1
43 | sigma=ones(m,1)*sigma';
44 | end
45 | gp=normpdf(X,Mu,sigma);%/normpdf(1,1,(sigma));
46 | h=gp*w;
47 | p=h/sum(w);%divide by N samples if w are all ones.
48 | Pspk_x=gp*spk;
49 | else
50 | matrixN=floor(n/1e4);
51 | gp=zeros(m,1e4);
52 | Pspk_x=zeros(m,size(spk,2));
53 | X=x*ones(1,1e4);
54 | for i=1:matrixN
55 | Mu=ones(m,1)*mu((i-1)*1e4+[1:1e4])';
56 | if prod(size(sigma))~=1
57 | sigma=ones(m,1)*sigma((i-1)*1e4+[1:1e4])';
58 | end
59 | tmp=normpdf(X,Mu,sigma);
60 | gp=gp+tmp;
61 | Pspk_x=Pspk_x+tmp*spk((i-1)*1e4+[1:1e4],:);
62 | end
63 | h=sum(gp,2);
64 | p=mean(gp,2)/matrixN;
65 | Pspk_x=sum(Pspk_x,2);
66 | end
67 | end
68 |
69 |
70 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/ls.m:
--------------------------------------------------------------------------------
1 | function [W,y,e,R,P]=ls(x,d,alpha) % least squre algorithm
2 | [L,T]=size(x); % L-dimension, T-time duration
3 | [N,T]=size(d);
4 | W=zeros(L,N);
5 | R=zeros(L,L);
6 |
7 | % for i=1:L
8 | % for j=1:i
9 | % R(i,j)=mean(x(i,:).*x(j,:));
10 | % end
11 | % P(i)=mean(d(k,:).*x(i,:));
12 | % end
13 | % R=R+R'-diag(diag(R));
14 | R=x*x'/T; % auto correlation matrix
15 | P=x*d'/T; % cross correlation matrix
16 | if nargin < 3
17 | alpha=0;
18 | end
19 | W=inv(R+alpha*eye(size(R)))*P; %W- transfer function / weight
20 |
21 |
22 |
23 | y=W'*x; % prediction
24 | e=d-y; % error
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/maxp.m:
--------------------------------------------------------------------------------
1 | function [PM,IM]=maxp(p) % p is N*T posterior density
2 | [N,T]=size(p);
3 | PM=zeros(1,T);
4 | IM=zeros(1,T);
5 | [PM,IM]=max(p);
6 | IM=[0:(T-1)]*N+IM;
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/nlG.m:
--------------------------------------------------------------------------------
1 | function out = nlG(input,param,flag)
2 |
3 | switch flag
4 | case 0
5 | out = param*input;
6 | case 1
7 | out = (1-exp(-param*input))./(1+exp(-param*input));
8 | case 2
9 | out = (1-param)*input + param*input.^2;
10 | case 3
11 | out = sin(param*input);
12 | case 4 %threshold cut off
13 | out = input;
14 | out(find(out>param)) = param;
15 | out(find(out<-param)) = -param;
16 | otherwise
17 | warning('nlG');
18 | end
19 | return
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/normw.m:
--------------------------------------------------------------------------------
1 | function w=normw(x,u,z,r,paramNonlinear,typeNonlinear)
2 | % get normalized weight xn*1 column vector at time k
3 | % x is xn*D sample; u is D*1 colum vector, system input.
4 | % z is 1*1 observation.
5 | [xn,D]=size(x);
6 | if isempty(paramNonlinear)==1 && isempty(typeNonlinear)==1
7 | w=normpdf(z*ones(xn,1),x.^2/20,sqrt(r)*ones(xn,1)); %p(zk|xk(i))~Gaussian
8 | else
9 | w=normpdf(z*ones(xn,1),nlG((x*u),paramNonlinear,typeNonlinear),sqrt(r)*ones(xn,1)); %p(zk|xk(i))~Gaussian
10 | end
11 |
12 | if sum(w)==0
13 | w=1/xn*ones(xn,1);
14 | % disp('warning: weights are all zeros');
15 | else
16 | w=w/sum(w);
17 | end
18 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/particlefilter.m:
--------------------------------------------------------------------------------
1 |
2 | function [ChannelExp,ChannelMle,Xrecord]=particlefilter(channel0,targetInput,targetOutput,Q,R,xn,F,paramNonlinear,typeNonlinear)
3 |
4 | u=targetInput;
5 | z=targetOutput;
6 |
7 | x0=channel0;
8 | [ChannelExp,ChannelMle,Xrecord]=SIR(x0,u,z,Q,R,F,paramNonlinear,typeNonlinear); % SIR particle filter
9 | % %draw(X,p,0); % draw all particles, using linear gray colormap
10 | % [XS1,p]=draw(X,1,W,sigma); % get the estimate state by maxlikehood (option 1)
11 | % XS2=draw(X,2,W,sigma); % get the estimate state by expectation (option 2)
12 | % % H=draw(X,3,W,sigma);
13 | % pdfobserv(X,p,XS1,XS2,10,xr,X0);
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/rayleigh.m:
--------------------------------------------------------------------------------
1 |
2 | function r = rayleigh(T_s,Ns,F_d)
3 |
4 | % Generates a Rayleigh fading channel
5 | %
6 | % T_s: symbol period
7 | % Ns : number of symbols
8 | % F_d: maximum doppler spread. It can be a scalar or a vector.
9 | % If F_d is a scalar, then it corresponds to a constant
10 | % mobile speed over the simulation time. If F_d is a vector
11 | % whose length is equal to Ns, then it corresponds to a mobile
12 | % speed that is varying over the simulation time.
13 | %
14 |
15 | N = 20; % Assumed number of scatterers
16 |
17 | if (max(size(F_d))==1)
18 | f = (T_s*F_d)*[0:Ns-1];
19 | else
20 | f = (T_s*F_d).*[0:Ns-1];
21 | end
22 |
23 | phi = (2*pi)*rand(1,N);
24 | C = (randn(1,N)+i*randn(1,N))/sqrt(2*N);
25 | r = zeros(1,Ns);
26 | for j=1:N
27 | r = r+exp(i*2*pi*cos(phi(j))*f)*C(j);
28 | end
29 |
--------------------------------------------------------------------------------
/ch5_codes/RayleighChannelTracking/resample.m:
--------------------------------------------------------------------------------
1 | function [X,W,In]=resample(x,w)
2 |
3 | % resampling
4 |
5 | % input: x- m*D sample column vector at time k;
6 | % w- m*1 weight column vector at time k;
7 | % input: X- m*D sample column vector at time k after resampling;
8 | % W- m*1 weight column vector at time k after resampling;
9 | [m,D]=size(x);
10 | X=zeros(m,D);
11 | W=zeros(m,1);
12 | In=zeros(m,1);
13 | c=cumsum(w); % cdf
14 | i=1;
15 | u1=rand(1)/m; % draw u1 uniformly distribute in [0, 1/N]
16 | u=zeros(m,1);
17 | for j=1:m
18 | u(j)=u1+(j-1)/m;
19 | while (u(j)>c(i) && i toleranceCoherence)
58 | if flagLearningCurve, learningCurve(n) = learningCurve(n-1); end
59 | continue;
60 | end
61 | networkOutput = expansionCoefficient*ker_eval(trainInput(:,n),trainInput(:,dictionaryIndex),typeKernel,paramKernel) + weightVector'*trainInput(:,n) + biasTerm;
62 | predictionError = trainTarget(n) - networkOutput;
63 |
64 | % updating
65 | dictSize = dictSize + 1;
66 | dictionaryIndex(dictSize) = n;
67 | expansionCoefficient(dictSize) = stepSizeFeatureVector*predictionError;
68 |
69 | weightVector = weightVector + stepSizeWeightVector*predictionError*trainInput(:,n);
70 | biasTerm = biasTerm + stepSizeBias*predictionError;
71 |
72 | if flagLearningCurve == 1
73 | % testing
74 | y_te = zeros(testSize,1);
75 | for jj = 1:testSize
76 | %ii = 1:dictSize;
77 | y_te(jj) = expansionCoefficient*ker_eval(testInput(:,jj),trainInput(:,dictionaryIndex),typeKernel,paramKernel) + weightVector'*testInput(:,jj) + biasTerm;
78 | end
79 | err = testTarget - y_te;
80 | learningCurve(n) = mean(err.^2);
81 | end
82 | end
83 |
84 | return
85 |
86 |
--------------------------------------------------------------------------------
/ch6_codes/mg_prediction/LMS1.m:
--------------------------------------------------------------------------------
1 | function [aprioriErr,weightVector,biasTerm,learningCurve]= ...
2 | LMS1(trainInput,trainTarget,testInput,testTarget,stepSizeWeightVector,stepSizeBias,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function LMS1:
5 | %Normal least mean square algorithms
6 | %
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | %Input:
9 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and trainSize
10 | % is the number of training data
11 | %trainTarget: desired signal for training trainSize*1
12 | %
13 | %testInput: testing input for calculating the learning curve,
14 | % inputDimension*testSize, testSize is the number of test data
15 | %testTarget: desired signal for testing testSize*1
16 | %
17 | %stepSizeWeightVector: learning rate for weight part, set to zero to disable
18 | %stepSizeBias: learning rate for bias term, set to zero to disable
19 | %
20 | %flagLearningCurve: A FLAG to indicate if learning curve is needed
21 | %
22 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
23 | %Output:
24 | %aprioriErr: apriori error
25 | %weightVector: the linear coefficients
26 | %biasTerm: the bias term
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: none.
30 |
31 | % memeory initialization
32 | [inputDimension,trainSize] = size(trainInput);
33 |
34 | if flagLearningCurve
35 | learningCurve = zeros(trainSize,1);
36 | else
37 | learningCurve = [];
38 | end
39 |
40 | weightVector = zeros(inputDimension,1);
41 | biasTerm = 0;
42 | aprioriErr = zeros(trainSize,1);
43 |
44 | % training
45 | for n = 1:trainSize
46 | networkOutput = weightVector'*trainInput(:,n) + biasTerm;
47 | aprioriErr(n) = trainTarget(n) - networkOutput;
48 | weightVector = weightVector + stepSizeWeightVector*aprioriErr(n)*trainInput(:,n);
49 | biasTerm = biasTerm + stepSizeBias*aprioriErr(n);
50 | if flagLearningCurve
51 | % testing
52 | err = testTarget -(testInput'*weightVector + biasTerm);
53 | learningCurve(n) = mean(err.^2);
54 | end
55 | end
56 |
57 | return
58 |
--------------------------------------------------------------------------------
/ch6_codes/mg_prediction/MK30.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch6_codes/mg_prediction/MK30.mat
--------------------------------------------------------------------------------
/ch6_codes/mg_prediction/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2,1)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2,1)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2,1)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | if strcmp(ker_type,'CO2')
29 |
30 | % if N1 == N2
31 | % y = (66^2*exp(-(X1-X2).^2/(2*67^2)) + ...
32 | % 2.4^2*exp(-(X1-X2).^2/(2*90^2) - 2*(sin(pi*(X1-X2))).^2/(1.3^2))+...
33 | % 0.66^2*(1+(X1-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
34 | % 0.18^2*exp(-(X1-X2).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2))';
35 | % elseif N1 == 1
36 | % y = (66^2*exp(-(X1*ones(1,N2)-X2).^2/(2*67^2)) + ...
37 | % 2.4^2*exp(-(X1*ones(1,N2)-X2).^2/(2*90^2) - 2*(sin(pi*(X1*ones(1,N2)-X2))).^2/(1.3^2))+...
38 | % 0.66^2*(1+(X1*ones(1,N2)-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
39 | % 0.18^2*exp(-(X1*ones(1,N2)-X2).^2/(2*0.1333^2)) + 0.19^2*(X1*ones(1,N2) == X2))';
40 | % elseif N2 == 1
41 | % y = (66^2*exp(-(X1-X2*ones(1,N1)).^2/(2*67^2)) + ...
42 | % 2.4^2*exp(-(X1-X2*ones(1,N1)).^2/(2*90^2) - 2*(sin(pi*(X1-X2*ones(1,N1)))).^2/(1.3^2))+...
43 | % 0.66^2*(1+(X1-X2*ones(1,N1)).^2/(2*0.78*1.2^2)).^(-0.78) +...
44 | % 0.18^2*exp(-(X1-X2*ones(1,N1)).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2*ones(1,N1)))';
45 | % else
46 | % warning('error dimension--')
47 | % end
48 | if N1 == N2
49 | y = (4356*exp(-(X1-X2).^2*1.1138e-004) + ...
50 | 5.76*exp(-(X1-X2).^2*6.1728e-005 - (sin(pi*(X1-X2))).^2*1.1834)+...
51 | 0.4356*(1+(X1-X2).^2*0.4452).^(-0.78) +...
52 | 0.0324*exp(-(X1-X2).^2*28.1391) + 0.0361*(X1 == X2))';
53 | elseif N1 == 1
54 | y = (4356*exp(-(X1*ones(1,N2)-X2).^2*1.1138e-004) + ...
55 | 5.76*exp(-(X1*ones(1,N2)-X2).^2*6.1728e-005 - (sin(pi*(X1*ones(1,N2)-X2))).^2*1.1834)+...
56 | 0.4356*(1+(X1*ones(1,N2)-X2).^2*0.4452).^(-0.78) +...
57 | 0.0324*exp(-(X1*ones(1,N2)-X2).^2*28.1391) + 0.0361*(X1*ones(1,N2) == X2))';
58 | elseif N2 == 1
59 | y = (4356*exp(-(X1-X2*ones(1,N1)).^2*1.1138e-004) + ...
60 | 5.76*exp(-(X1-X2*ones(1,N1)).^2*6.1728e-005 - (sin(pi*(X1-X2*ones(1,N1)))).^2*1.1834)+...
61 | 0.4356*(1+(X1-X2*ones(1,N1)).^2*0.4452).^(-0.78) +...
62 | 0.0324*exp(-(X1-X2*ones(1,N1)).^2*28.1391) + 0.0361*(X1 == X2*ones(1,N1)))';
63 | else
64 | warning('error dimension--')
65 | end
66 |
67 | end
68 |
69 | return
70 |
--------------------------------------------------------------------------------
/ch6_codes/regression/KRLS_old.m:
--------------------------------------------------------------------------------
1 | function [expansionCoefficient,learningCurve] = ...
2 | KRLS_old(trainInput,trainTarget,testInput,testTarget,typeKernel,paramKernel,regularizationFactor,forgettingFactor,flagLearningCurve)
3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
4 | %Function KRLS
5 | %Kernel recursive least squares with exponentially weighted
6 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
7 | %Input:
8 | %trainInput: input signal inputDimension*trainSize, inputDimension is the input dimension and
9 | % trainSize is the number of training data
10 | %trainTarget: desired signal for training trainSize*1
11 | %
12 | %testInput: testing input, inputDimension*testSize, testSize is the number of the test data
13 | %testTarget: desired signal for testing testSize*1
14 | %
15 | %typeKernel: 'Gauss', 'Poly'
16 | %paramKernel: h (kernel size) for Gauss and p (order) for poly
17 | %
18 | %regularizationFactor: regularization parameter in Newton's recursion
19 | %
20 | %forgettingFactor: expoentially weighted value
21 | %
22 | %flagLearningCurve: control if calculating the learning curve
23 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
24 | %Output:
25 | %baseDictionary: dictionary stores all the bases centers
26 | %expansionCoefficient: coefficients of the kernel expansion
27 | %learningCurve: trainSize*1 used for learning curve
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | %Notes: Since bases are by default all the training data, it is skipped
30 | % here.
31 |
32 |
33 | % memeory initialization
34 | [inputDimension,trainSize] = size(trainInput);
35 | testSize = length(testTarget);
36 |
37 | expansionCoefficient = zeros(trainSize,1);
38 |
39 | if flagLearningCurve
40 | learningCurve = zeros(trainSize,1);
41 | learningCurve(1) = mean(testTarget.^2);
42 | else
43 | learningCurve = [];
44 | end
45 |
46 | Q_matrix = 1/(forgettingFactor*regularizationFactor + ker_eval(trainInput(:,1),trainInput(:,1),typeKernel,paramKernel));
47 | expansionCoefficient(1) = Q_matrix*trainTarget(1);
48 | % start training
49 | for n = 2:trainSize
50 | ii = 1:n-1;
51 | k_vector = ker_eval(trainInput(:,n),trainInput(:,ii),typeKernel,paramKernel);
52 | f_vector = Q_matrix*k_vector;
53 | s = 1/(regularizationFactor*forgettingFactor^(n)+ ker_eval(trainInput(:,n),trainInput(:,n),typeKernel,paramKernel) - k_vector'*f_vector);
54 | Q_tmp = zeros(n,n);
55 | Q_tmp(ii,ii) = Q_matrix + f_vector*f_vector'*s;
56 | Q_tmp(ii,n) = -f_vector*s;
57 | Q_tmp(n,ii) = Q_tmp(ii,n)';
58 | Q_tmp(n,n) = s;
59 | Q_matrix = Q_tmp;
60 |
61 | error = trainTarget(n) - k_vector'*expansionCoefficient(ii);
62 |
63 | % updating
64 | expansionCoefficient(n) = s*error;
65 | expansionCoefficient(ii) = expansionCoefficient(ii) - f_vector*expansionCoefficient(n);
66 |
67 | if flagLearningCurve
68 | % testing
69 | y_te = zeros(testSize,1);
70 | for jj = 1:testSize
71 | ii = 1:n;
72 | y_te(jj) = expansionCoefficient(ii)'*...
73 | ker_eval(testInput(:,jj),trainInput(:,ii),typeKernel,paramKernel);
74 | end
75 | err = testTarget - y_te;
76 | learningCurve(n) = mean(err.^2);
77 | end
78 | end
79 |
80 | return
--------------------------------------------------------------------------------
/ch6_codes/regression/ch5-6.fig:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/ch6_codes/regression/ch5-6.fig
--------------------------------------------------------------------------------
/ch6_codes/regression/ker_eval.m:
--------------------------------------------------------------------------------
1 | function y = ker_eval(X1,X2,ker_type,ker_param)
2 |
3 | N1 = size(X1,2);
4 | N2 = size(X2,2);
5 |
6 | if strcmp(ker_type,'Gauss')
7 | if N1 == N2
8 | y = (exp(-sum((X1-X2).^2,1)*ker_param))';
9 | elseif N1 == 1
10 | y = (exp(-sum((X1*ones(1,N2)-X2).^2,1)*ker_param))';
11 | elseif N2 == 1
12 | y = (exp(-sum((X1-X2*ones(1,N1)).^2,1)*ker_param))';
13 | else
14 | warning('error dimension--')
15 | end
16 | end
17 | if strcmp(ker_type,'Poly')
18 | if N1 == N2
19 | y = ((1 + sum(X1.*X2)).^ker_param)';
20 | elseif N1 == 1
21 | y = ((1 + X1'*X2).^ker_param)';
22 | elseif N2 == 1
23 | y = ((1 + X2'*X1).^ker_param)';
24 | else
25 | warning('error dimension--')
26 | end
27 | end
28 | if strcmp(ker_type,'CO2')
29 |
30 | % if N1 == N2
31 | % y = (66^2*exp(-(X1-X2).^2/(2*67^2)) + ...
32 | % 2.4^2*exp(-(X1-X2).^2/(2*90^2) - 2*(sin(pi*(X1-X2))).^2/(1.3^2))+...
33 | % 0.66^2*(1+(X1-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
34 | % 0.18^2*exp(-(X1-X2).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2))';
35 | % elseif N1 == 1
36 | % y = (66^2*exp(-(X1*ones(1,N2)-X2).^2/(2*67^2)) + ...
37 | % 2.4^2*exp(-(X1*ones(1,N2)-X2).^2/(2*90^2) - 2*(sin(pi*(X1*ones(1,N2)-X2))).^2/(1.3^2))+...
38 | % 0.66^2*(1+(X1*ones(1,N2)-X2).^2/(2*0.78*1.2^2)).^(-0.78) +...
39 | % 0.18^2*exp(-(X1*ones(1,N2)-X2).^2/(2*0.1333^2)) + 0.19^2*(X1*ones(1,N2) == X2))';
40 | % elseif N2 == 1
41 | % y = (66^2*exp(-(X1-X2*ones(1,N1)).^2/(2*67^2)) + ...
42 | % 2.4^2*exp(-(X1-X2*ones(1,N1)).^2/(2*90^2) - 2*(sin(pi*(X1-X2*ones(1,N1)))).^2/(1.3^2))+...
43 | % 0.66^2*(1+(X1-X2*ones(1,N1)).^2/(2*0.78*1.2^2)).^(-0.78) +...
44 | % 0.18^2*exp(-(X1-X2*ones(1,N1)).^2/(2*0.1333^2)) + 0.19^2*(X1 == X2*ones(1,N1)))';
45 | % else
46 | % warning('error dimension--')
47 | % end
48 | if N1 == N2
49 | y = (4356*exp(-(X1-X2).^2*1.1138e-004) + ...
50 | 5.76*exp(-(X1-X2).^2*6.1728e-005 - (sin(pi*(X1-X2))).^2*1.1834)+...
51 | 0.4356*(1+(X1-X2).^2*0.4452).^(-0.78) +...
52 | 0.0324*exp(-(X1-X2).^2*28.1391) + 0.0361*(X1 == X2))';
53 | elseif N1 == 1
54 | y = (4356*exp(-(X1*ones(1,N2)-X2).^2*1.1138e-004) + ...
55 | 5.76*exp(-(X1*ones(1,N2)-X2).^2*6.1728e-005 - (sin(pi*(X1*ones(1,N2)-X2))).^2*1.1834)+...
56 | 0.4356*(1+(X1*ones(1,N2)-X2).^2*0.4452).^(-0.78) +...
57 | 0.0324*exp(-(X1*ones(1,N2)-X2).^2*28.1391) + 0.0361*(X1*ones(1,N2) == X2))';
58 | elseif N2 == 1
59 | y = (4356*exp(-(X1-X2*ones(1,N1)).^2*1.1138e-004) + ...
60 | 5.76*exp(-(X1-X2*ones(1,N1)).^2*6.1728e-005 - (sin(pi*(X1-X2*ones(1,N1)))).^2*1.1834)+...
61 | 0.4356*(1+(X1-X2*ones(1,N1)).^2*0.4452).^(-0.78) +...
62 | 0.0324*exp(-(X1-X2*ones(1,N1)).^2*28.1391) + 0.0361*(X1 == X2*ones(1,N1)))';
63 | else
64 | warning('error dimension--')
65 | end
66 |
67 | end
68 |
69 | return
70 |
--------------------------------------------------------------------------------
/ch6_codes/regression/nlG.m:
--------------------------------------------------------------------------------
1 | function out = nlG(input,param,flag)
2 |
3 | switch flag
4 | case 0
5 | out = param*input;
6 | case 1
7 | out = (1-exp(-param*input))./(1+exp(-param*input));
8 | case 2
9 | out = (1-param)*input + param*input.^2 + sin(input);
10 | case 3
11 | out = sin(param*input);
12 | case 4 %threshold cut off
13 | out = input;
14 | out(find(out>param)) = param;
15 | out(find(out<-param)) = -param;
16 | case 5
17 | out = input.^2 + sin(input) + log (abs(input)+1);
18 | otherwise
19 | warning('nlG');
20 | end
21 | return
--------------------------------------------------------------------------------
/images/cover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/images/cover.jpg
--------------------------------------------------------------------------------
/samples/chapter1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/samples/chapter1.pdf
--------------------------------------------------------------------------------
/samples/contents.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/samples/contents.pdf
--------------------------------------------------------------------------------
/samples/preface.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cnel/KernelAdaptiveFiltering/2e1ad753bf98aeee4397b91d151926efc031916b/samples/preface.pdf
--------------------------------------------------------------------------------