├── doc ├── kaf_diagram.pdf ├── kaf_diagram.png ├── kafbox_doc.pdf ├── fig │ ├── kafbox_logo.pdf │ └── unican_logo.pdf ├── .gitignore └── extra.bib ├── demo ├── .gitignore ├── fig │ └── kafbox_sample_run.gif ├── literature │ ├── liu2010kernel │ │ ├── liu2010kernel.bib │ │ └── fig2_12.m │ ├── richard2009online │ │ ├── richard2009online.bib │ │ ├── generate_richardbench.m │ │ ├── generate_doddbench.m │ │ ├── fig1.m │ │ └── fig2.m │ ├── yukawa2012multikernel │ │ ├── yukawa2012multikernel.bib │ │ └── fig1a.m │ ├── vanvaerenbergh2006sliding │ │ ├── vanvaerenbergh2006sliding.bib │ │ └── fig2.m │ └── vanvaerenbergh2012kernel │ │ ├── vanvaerenbergh2012kernel.bib │ │ └── fig3.m ├── demo_prediction.m ├── demo_regression_2d.m ├── demo_sinc.m ├── demo_prediction_kstep.m ├── demo_template_system_identification.m ├── demo_parameter_estimation_nonstationary.m ├── demo_prediction_kstep_split.m ├── run_all_demos.m ├── demo_parameter_estimation_lorenz.m ├── demo_prediction_mackey_glass.m ├── demo_sinc_all.m ├── demo_reconverge_all.m └── demo_profiler_prediction_lorenz.m ├── .gitignore ├── lib ├── base │ ├── linear_filter.m │ ├── kernel_adaptive_filter.m │ ├── base_estimator.m │ └── kernel.m ├── util │ ├── kernel_adaptive_filter.m │ ├── gpml │ │ ├── readme.md │ │ ├── Copyright │ │ ├── kafbox_solve_chol.m │ │ ├── kafbox_covNoise.m │ │ ├── kafbox_covLambda.m │ │ ├── kafbox_covSEiso.m │ │ ├── kafbox_covSEiso2.m │ │ ├── kafbox_sq_dist.c │ │ ├── kafbox_sq_dist.m │ │ ├── kafbox_covSum.m │ │ ├── kafbox_covProd.m │ │ └── kafbox_gpr.m │ ├── kafbox_predictionloop.m │ ├── kafbox_quickrun.m │ └── kafbox_parameter_estimation.m ├── profiler │ ├── struct2str.m │ ├── kafbox_profiler_plotconvergence.m │ ├── nlms_profiler.m │ ├── kflops.m │ ├── lkapa_profiler.m │ ├── nlkapa_profiler.m │ ├── kafbox_template_profiler.m │ ├── kafbox_profiler_msecurves.m │ ├── rls_profiler.m │ ├── kafbox_profiler.m │ ├── klms_profiler.m │ ├── knlms_profiler.m │ ├── norma_profiler.m │ ├── kafbox_profiler_convergence_analysis.m │ ├── qklms_profiler.m │ ├── kafbox_profiler_storet.m │ ├── kap_profiler.m │ ├── exkrls_profiler.m │ ├── kafbox_profiler_simulation.m │ ├── swkrls_profiler.m │ ├── fbkrls_profiler.m │ ├── krls_profiler.m │ └── krlst_profiler.m ├── memory_cell.m ├── nlms.m ├── lms.m ├── rls.m ├── klms.m ├── problms.m ├── readme.md ├── rffklms.m ├── kafbox_template.m ├── norma.m ├── qklms.m ├── knlms.m ├── fbklms.m ├── krls.m ├── kalman.m ├── kap.m ├── exkrls.m ├── lkapa.m ├── swkrls.m ├── fbkrls.m ├── nlkapa.m ├── klms_csl1.m ├── mknlms_cs.m └── klms_csal1.m ├── install.m ├── data ├── kafbox_data_lorenz.m ├── kafbox_data_mg30.m ├── kafbox_data.m ├── generate_channel_switch.m └── kafbox_data_channel_switch.m └── LICENSE.txt /doc/kaf_diagram.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/doc/kaf_diagram.pdf -------------------------------------------------------------------------------- /doc/kaf_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/doc/kaf_diagram.png -------------------------------------------------------------------------------- /doc/kafbox_doc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/doc/kafbox_doc.pdf -------------------------------------------------------------------------------- /demo/.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Figures 3 | ################# 4 | 5 | *.png 6 | *.pdf 7 | -------------------------------------------------------------------------------- /doc/fig/kafbox_logo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/doc/fig/kafbox_logo.pdf -------------------------------------------------------------------------------- /doc/fig/unican_logo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/doc/fig/unican_logo.pdf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Matlab 3 | ################# 4 | 5 | *~ 6 | *.asv 7 | *.bak 8 | *.mat 9 | -------------------------------------------------------------------------------- /demo/fig/kafbox_sample_run.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steven2358/kafbox/HEAD/demo/fig/kafbox_sample_run.gif -------------------------------------------------------------------------------- /lib/base/linear_filter.m: -------------------------------------------------------------------------------- 1 | % Superclass for linear filters. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef linear_filter < base_estimator 7 | 8 | end 9 | -------------------------------------------------------------------------------- /lib/base/kernel_adaptive_filter.m: -------------------------------------------------------------------------------- 1 | % Superclass for kernel adaptive filters. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef kernel_adaptive_filter < base_estimator 7 | 8 | end 9 | -------------------------------------------------------------------------------- /demo/literature/liu2010kernel/liu2010kernel.bib: -------------------------------------------------------------------------------- 1 | @book{liu2010kernel, 2 | title={Kernel Adaptive Filtering: A Comprehensive Introduction}, 3 | publisher={Wiley}, 4 | author={Liu, Weifeng and Pr\'incipe, Jos\'e C. and Haykin, Simon}, 5 | year={2010}, 6 | pages={209} 7 | } 8 | -------------------------------------------------------------------------------- /lib/util/kernel_adaptive_filter.m: -------------------------------------------------------------------------------- 1 | % Superclass for kernel adaptive filters. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef kernel_adaptive_filter < matlab.mixin.Copyable 7 | 8 | end 9 | -------------------------------------------------------------------------------- /demo/literature/richard2009online/richard2009online.bib: -------------------------------------------------------------------------------- 1 | @ARTICLE{richard2009online, 2 | author={Richard, C{\'e}dric and Bermudez, Jos{\'e} Carlos M and Honeine, Paul}, 3 | journal=IEEE Transactions on Signal Processing}, 4 | title={Online Prediction of Time Series Data With Kernels}, 5 | year={2009}, 6 | month=mar, 7 | volume={57}, 8 | number={3}, 9 | pages={1058--1067}, 10 | } 11 | -------------------------------------------------------------------------------- /demo/literature/yukawa2012multikernel/yukawa2012multikernel.bib: -------------------------------------------------------------------------------- 1 | @article{yukawa2012multikernel, 2 | author = {Yukawa, Masahiro}, 3 | journal = {IEEE Transactions on Signal Processing}, 4 | title = {Multikernel Adaptive Filtering}, 5 | year = {2012}, 6 | volume = {60}, 7 | number = {9}, 8 | pages = {4672-4682}, 9 | doi = {10.1109/TSP.2012.2200889}, 10 | ISSN = {1053-587X}, 11 | } 12 | -------------------------------------------------------------------------------- /install.m: -------------------------------------------------------------------------------- 1 | % Installation file. Adds local folders to path. 2 | 3 | fprintf('Adding KAFBOX folders to Matlab path... ') 4 | 5 | addpath(genpath(fullfile(pwd,'data'))); % add data folder with subfolders 6 | addpath(genpath(fullfile(pwd,'lib'))); % add lib folder with subfolders 7 | 8 | % addpath(fullfile(pwd,'demo')); % add demo folder without subfolders 9 | 10 | fprintf('done.\n') 11 | disp('Type "savepath" if you wish to store the changes.') 12 | % savepath; 13 | -------------------------------------------------------------------------------- /data/kafbox_data_lorenz.m: -------------------------------------------------------------------------------- 1 | % KAFBOX_DATA_LORENZ Data loader for Lorenz data set 2 | 3 | function [X,Y,X_test,Y_test] = kafbox_data_lorenz(data_opts) 4 | 5 | data = load('lorenz.dat'); 6 | 7 | % prediction horizon 8 | horizon = 1; 9 | if isfield(data_opts,'horizon') 10 | horizon = data_opts.horizon; 11 | end 12 | 13 | % construct signal 14 | X = data(1:end-horizon); % input 15 | Y = data(1+horizon:end); % desired output 16 | 17 | X_test = []; 18 | Y_test = []; 19 | -------------------------------------------------------------------------------- /data/kafbox_data_mg30.m: -------------------------------------------------------------------------------- 1 | % KAFBOX_DATA_MG30 Data loader for Mackey-Glass 30 data set 2 | 3 | function [X,Y,X_test,Y_test] = kafbox_data_mg30(data_opts) 4 | 5 | data = load('mg30.dat'); 6 | 7 | % prediction horizon 8 | horizon = 1; 9 | if isfield(data_opts,'horizon') 10 | horizon = data_opts.horizon; 11 | end 12 | 13 | % construct signal 14 | X = data(1:end-horizon); % input 15 | Y = data(1+horizon:end); % desired output 16 | 17 | X_test = []; 18 | Y_test = []; 19 | -------------------------------------------------------------------------------- /demo/literature/vanvaerenbergh2006sliding/vanvaerenbergh2006sliding.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{vanvaerenbergh2006sliding, 2 | title={A sliding-window kernel {RLS} algorithm and its application to nonlinear channel identification}, 3 | author={Van Vaerenbergh, Steven and V\'ia, Javier and Santamar\'ia, Ignacio}, 4 | booktitle={2006 IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, 5 | year = {2006}, 6 | address = {Toulouse, France}, 7 | month = may, 8 | pages = {789--792}, 9 | volume = 5, 10 | } 11 | -------------------------------------------------------------------------------- /demo/literature/vanvaerenbergh2012kernel/vanvaerenbergh2012kernel.bib: -------------------------------------------------------------------------------- 1 | @ARTICLE{vanvaerenbergh2012kernel, 2 | title={Kernel Recursive Least-Squares Tracker for Time-Varying Regression}, 3 | author={Van Vaerenbergh, Steven and L{\'a}zaro-Gredilla, Miguel and Santamar{\'i}a, Ignacio}, 4 | journal={IEEE Transactions on Neural Networks and Learning Systems}, 5 | year={2012}, 6 | month=aug, 7 | volume={23}, 8 | number={8}, 9 | pages={1313--1326}, 10 | keywords={}, 11 | doi={10.1109/TNNLS.2012.2200500}, 12 | ISSN={2162-237X}, 13 | } 14 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Latex 3 | ################# 4 | 5 | *.acn 6 | *.acr 7 | *.alg 8 | *.aux 9 | *.backup 10 | *.bbl 11 | *.blg 12 | *.brf 13 | *.dvi 14 | *.fdb_latexmk 15 | *.fls 16 | *.glg 17 | *.glo 18 | *.gls 19 | *.idx 20 | *.ilg 21 | *.ind 22 | *.ist 23 | *.loa 24 | *.lof 25 | *.log 26 | *.lol 27 | *.lot 28 | *.maf 29 | *.mtc 30 | *.mtc0 31 | *.nav 32 | *.nlo 33 | *.out 34 | *.pdfsync 35 | *.ps 36 | *.snm 37 | *.synctex 38 | *.synctex.gz 39 | *.tdo 40 | *.toc 41 | *.vrb 42 | *.xdy 43 | *.dpth 44 | *.auxlock 45 | 46 | *.md5 47 | -------------------------------------------------------------------------------- /lib/util/gpml/readme.md: -------------------------------------------------------------------------------- 1 | GPML in KAFBOX 2 | ============== 3 | 4 | This folder contains excerpts from the GPML toolbox v2.0. The full toolbox is available at http://gaussianprocess.org/gpml/code/matlab/release/oldcode.html 5 | 6 | Prefixes have been added to the files in order to avoid interference with newer versions of GPML located on the same system. 7 | 8 | The files included here are used for the parameter estimation of kernel adaptive filters. Demos can be found in `demo/demo_parameter_estimation_lorenz.m` and `demo/demo_parameter_estimation_nonstationary.m`. 9 | -------------------------------------------------------------------------------- /lib/util/kafbox_predictionloop.m: -------------------------------------------------------------------------------- 1 | % Wrapper program for time series prediction. Performs progression of time 2 | % and calls adaptive filter during each iteration. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | function [e,kaf] = kafbox_predictionloop(kaf,X,y,vb) 8 | 9 | if nargin<4 10 | vb = 1; 11 | end 12 | 13 | N = size(X,1); 14 | e = zeros(N,1); 15 | 16 | for n=1:N 17 | if ~mod(n,floor(N/10)) && vb 18 | fprintf('.'); % progress indicator (10 dots) 19 | end 20 | 21 | y_est = kaf.evaluate(X(n,:)); % predict the next output 22 | e(n) = y(n)-y_est; % store error 23 | kaf.train(X(n,:),y(n)); % train with one input-output pair 24 | end 25 | 26 | if vb 27 | fprintf('\n'); 28 | end 29 | -------------------------------------------------------------------------------- /lib/util/gpml/Copyright: -------------------------------------------------------------------------------- 1 | 2 | Software that implements 3 | 4 | GAUSSIAN PROCESS REGRESSION AND CLASSIFICATION 5 | 6 | Copyright (c) 2005 - 2007 by Carl Edward Rasmussen and Chris Williams 7 | 8 | Permission is granted for anyone to copy, use, or modify these programs for 9 | purposes of research or education, provided this copyright notice is retained, 10 | and note is made of any changes that have been made. 11 | 12 | These programs are distributed without any warranty, express or 13 | implied. As these programs were written for research purposes only, they 14 | have not been tested to the degree that would be advisable in any 15 | important application. All use of these programs is entirely at the 16 | user's own risk. 17 | 18 | The code and associated documentation are avaiable from 19 | 20 | http://www.GaussianProcess.org/gpml/code 21 | 22 | -------------------------------------------------------------------------------- /lib/profiler/struct2str.m: -------------------------------------------------------------------------------- 1 | % convert a structure to a string 2 | function str = struct2str(my_struct) 3 | fields = sortrows(fieldnames(my_struct)); % avoid permutations 4 | str = ''; 5 | for i=1:length(fields) 6 | fn = fields{i}; 7 | fv = my_struct.(fn); 8 | switch class(fv) 9 | case 'char' 10 | str = sprintf('%s %s=%s',str,fn,fv); 11 | case 'double' 12 | if (round(fv)==fv) 13 | str = sprintf('%s %s=%d',str,fn,fv); 14 | else 15 | found = 0; 16 | for j=0:10 17 | if (round(10^j*fv)==10^j*fv) && ~found 18 | str = sprintf(sprintf('%%s %%s=%%.%df',j),str,fn,fv); 19 | found = 1; 20 | end 21 | end 22 | end 23 | otherwise 24 | error('unknown field class'); 25 | end 26 | end 27 | str = str(2:end); 28 | -------------------------------------------------------------------------------- /lib/memory_cell.m: -------------------------------------------------------------------------------- 1 | % Dummy prediction method that always returns the last seen output. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef memory_cell < base_estimator 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') % variables 9 | y_mem; 10 | end 11 | 12 | methods 13 | % dummy constructor 14 | function obj = memory_cell(~) 15 | end 16 | 17 | % evaluate the algorithm 18 | function y_est = evaluate(obj,x) 19 | if ~isempty(obj.y_mem) 20 | y_est = repmat(obj.y_mem,size(x,1),1); 21 | else 22 | y_est = zeros(size(x,1),1); 23 | end 24 | end 25 | 26 | % train the algorithm 27 | function train(obj,~,y) 28 | obj.y_mem = y; 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /demo/demo_prediction.m: -------------------------------------------------------------------------------- 1 | % 1-step ahead prediction on Lorenz attractor time-series data 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | [X,Y] = kafbox_data(struct('name','Lorenz','embedding',6)); 7 | 8 | % make a kernel adaptive filter object of class krls with options: 9 | % ALD threshold 1E-4, Gaussian kernel, and kernel width 32 10 | kaf = krls(struct('nu',1E-4,'kerneltype','gauss','kernelpar',32)); 11 | 12 | %% RUN ALGORITHM 13 | N = size(X,1); 14 | Y_est = zeros(N,1); 15 | for i=1:N 16 | if ~mod(i,floor(N/10)), fprintf('.'); end % progress indicator, 10 dots 17 | Y_est(i) = kaf.evaluate(X(i,:)); % predict the next output 18 | kaf.train(X(i,:),Y(i)); % train with one input-output pair 19 | end 20 | fprintf('\n'); 21 | SE = (Y-Y_est).^2; % test error 22 | 23 | %% OUTPUT 24 | fprintf('MSE after first 1000 samples: %.2fdB\n\n',10*log10(mean(SE(1001:end)))); 25 | -------------------------------------------------------------------------------- /demo/demo_regression_2d.m: -------------------------------------------------------------------------------- 1 | % Wobbly field regression demo. Also used in lib/test/unit_test.m 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox 5 | 6 | close all 7 | clear 8 | 9 | %% PARAMETERS 10 | 11 | algorithm = 'krlst'; % algorithm class (choose from lib/ folder) 12 | opts = struct(); % algorithm options go here (kernel type, parameters, etc) 13 | 14 | %% PROGRAM 15 | 16 | kaf = feval(algorithm, opts); %#ok 17 | 18 | % generate some data 19 | c = 5; 20 | N = 1000; 21 | x = rand(N,2)*c; 22 | y = sin(3*x(:,1)).*cos(x(:,1)+x(:,2)); 23 | 24 | fprintf('Training') 25 | for i=1:N 26 | if ~mod(i,floor(N/10)), fprintf('.'); end 27 | kaf.train(x(i,:),y(i)); 28 | % y_test = kaf.evaluate(x(i+1,:)); 29 | end 30 | fprintf('\n') 31 | 32 | %% OUTPUT 33 | 34 | [x1,x2] = meshgrid(0:.2:c, 0:.2:c); 35 | yt = kaf.evaluate([x1(:) x2(:)]); 36 | 37 | z = reshape(yt,size(x1,1),size(x2,1)); 38 | figure; 39 | surf(x1,x2,z); 40 | -------------------------------------------------------------------------------- /demo/literature/richard2009online/generate_richardbench.m: -------------------------------------------------------------------------------- 1 | function [u,d,dref] = generate_richardbench(N) 2 | % Generate RICHARDBENCH signal. 3 | % 4 | % Benchmark signal introduced in C. Richard, J.C.M. Bermudez, P. Honeine, 5 | % "Online Prediction of Time Series Data With Kernels," IEEE Transactions 6 | % on Signal Processing, vol.57, no.3, pp.1058,1067, March 2009. 7 | % 8 | % Comment: copyright Cedric Richard, http://cedric-richard.fr/ 9 | % 10 | % Input: N: number of data points 11 | % 12 | % Outputs: u: input sequence (1-dimensional sequence [u(:,1)]) 13 | % d: noisy desired output (1-dimensional sequence) 14 | % dref: noise-free desired output 15 | % 16 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 17 | % https://github.com/steven2358/kafbox/ 18 | 19 | v = zeros(1,N+1); 20 | v(1) = 0.5; 21 | u = 0.25*randn(N+1,1); 22 | dref = zeros(1,N+1); 23 | 24 | for t=2:N+1 25 | v(t) = 1.1*exp(-abs(v(t-1)))+u(t); 26 | dref(t) = v(t)^2; 27 | end 28 | d = dref+randn(1,N+1); 29 | 30 | d(1) = []; 31 | dref(1) = []; 32 | u(1) = []; 33 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_solve_chol.m: -------------------------------------------------------------------------------- 1 | % solve_chol - solve linear equations from the Cholesky factorization. 2 | % Solve A*X = B for X, where A is square, symmetric, positive definite. The 3 | % input to the function is R the Cholesky decomposition of A and the matrix B. 4 | % Example: X = solve_chol(chol(A),B); 5 | % 6 | % NOTE: The program code is written in the C language for efficiency and is 7 | % contained in the file solve_chol.c, and should be compiled using matlabs mex 8 | % facility. However, this file also contains a (less efficient) matlab 9 | % implementation, supplied only as a help to people unfamiliar with mex. If 10 | % the C code has been properly compiled and is avaiable, it automatically 11 | % takes precendence over the matlab code in this file. 12 | % 13 | % Copyright (c) 2004, 2005, 2006 by Carl Edward Rasmussen. 2006-02-08. 14 | 15 | function x = kafbox_solve_chol(A, B); 16 | 17 | if nargin ~= 2 | nargout > 1 18 | error('Wrong number of arguments.'); 19 | end 20 | 21 | if size(A,1) ~= size(A,2) | size(A,1) ~= size(B,1) 22 | error('Wrong sizes of matrix arguments.'); 23 | end 24 | 25 | x = A\(A'\B); 26 | -------------------------------------------------------------------------------- /demo/literature/richard2009online/generate_doddbench.m: -------------------------------------------------------------------------------- 1 | function [v,d,dref] = generate_doddbench(N) 2 | % Generate DODDBENCH signal. 3 | % 4 | % Benchmark signal introduced in Dodd, T.J., Kadirkamanathan, V. and 5 | % Harrison, R.F., "Function estimation in Hilbert space using sequential 6 | % projections," Proc. of the IFAC Conf. on Intelligent Control Systems and 7 | % Signal Processing, 113-118, 2003. 8 | % 9 | % Comment: copyright Cedric Richard, http://cedric-richard.fr/ 10 | % 11 | % Input: N: number of data points 12 | % 13 | % Outputs: v: input sequence (2-dimensional sequence [v(:,1);v(:,2)]) 14 | % d: noisy desired output (1-dimensional sequence) 15 | % dref: noise-free desired output 16 | % 17 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 18 | % https://github.com/steven2358/kafbox/ 19 | 20 | dref = zeros(1,N+2); 21 | dref(1:2)=[0.1 0.1]; 22 | 23 | for t=3:N+2 24 | dref(t) = (0.8-0.5*exp(-dref(t-1)^2))*dref(t-1) - ... 25 | (0.3+0.9*exp(-dref(t-1)^2))*dref(t-2)+0.1*sin(pi*dref(t-1)); 26 | end 27 | d = dref + 0.1*randn(1,N+2); 28 | v = [d(1:N); d(2:N+1)]'; 29 | 30 | d(1:2)=[]; 31 | dref(1:2)=[]; 32 | -------------------------------------------------------------------------------- /lib/util/kafbox_quickrun.m: -------------------------------------------------------------------------------- 1 | % Quick-run an algorithm on a dataset. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % http://sourceforge.net/projects/kafbox/ 5 | 6 | function kafbox_quickrun(kafname,datasetname,kafopt,dataopt) 7 | 8 | rng(1) 9 | 10 | t1 = tic; 11 | if nargin > 2 12 | kaf = feval(kafname,kafopt); 13 | else 14 | kaf = feval(kafname); 15 | end 16 | 17 | % get data 18 | opt = struct('N_test',50); 19 | [X,y,y_ref,X_test,y_test] = generate_channel_switch(opt); %#ok 20 | 21 | N = size(X,1); 22 | 23 | N_switch = 500; 24 | 25 | MSE = zeros(N,1); 26 | for n=1:N 27 | if ~mod(n,floor(N/10)) 28 | fprintf('.'); % progress indicator (10 dots) 29 | end 30 | 31 | y_est = kaf.evaluate(X_test); 32 | if n<=N_switch 33 | MSE(n) = mean((y_test(:,1)-y_est).^2); 34 | else 35 | MSE(n) = mean((y_test(:,2)-y_est).^2); 36 | end 37 | 38 | kaf.train(X(n,:),y(n)); % train with one input-output pair 39 | end 40 | fprintf(' %.2fs. Final MSE=%3.2fdB.\n',toc(t1),10*log10(mean(MSE(N-500:N,1)))); 41 | 42 | %% OUTPUT 43 | 44 | figure; 45 | plot(10*log10(MSE(:))) 46 | legend(kafname) 47 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler_plotconvergence.m: -------------------------------------------------------------------------------- 1 | function [f,h] = kafbox_profiler_plotconvergence(algorithms,... 2 | mse_curves,resinds) 3 | 4 | figure; hold all 5 | set(gcf,'Position',[200, 200, 500 300]) 6 | 7 | titles = cell(length(algorithms),1); 8 | 9 | for i=1:size(resinds,1) 10 | algo = algorithms{resinds(i,1)}; 11 | 12 | ls = '-'; % line style 13 | if isfield(algo.figstyle,'line') 14 | ls = algo.figstyle.line; 15 | end 16 | 17 | lw = 1; % line width 18 | if isfield(algo.figstyle,'linewidth') 19 | lw = algo.figstyle.linewidth; 20 | end 21 | 22 | curve = mse_curves{resinds(i,1)}{resinds(i,2)}; 23 | xs= ~isnan(curve); 24 | inds = 1:length(mse_curves{resinds(i,1)}{resinds(i,2)}); 25 | plot(inds(xs),10*log10(curve(xs)),'color',algo.figstyle.color,... 26 | 'LineWidth',lw,'LineStyle',ls) 27 | 28 | titles{i} = algo.name; 29 | end 30 | 31 | h = legend(titles); 32 | grid on; box on 33 | xlabel('iteration'); 34 | ylabel('(N)MSE'); 35 | 36 | % Expand figure axes 37 | f = gcf; 38 | style = hgexport('factorystyle'); style.Bounds = 'tight'; 39 | hgexport(f,'-clipboard',style,'applystyle', true); 40 | drawnow; 41 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covNoise.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covNoise(logtheta, x, z); 2 | 3 | % Independent covariance function, ie "white noise", with specified variance. 4 | % The covariance function is specified as: 5 | % 6 | % k(x^p,x^q) = s2 * \delta(p,q) 7 | % 8 | % where s2 is the noise variance and \delta(p,q) is a Kronecker delta function 9 | % which is 1 iff p=q and zero otherwise. The hyperparameter is 10 | % 11 | % logtheta = [ log(sqrt(s2)) ] 12 | % 13 | % For more help on design of covariance functions, try "help covFunctions". 14 | % 15 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-03-24. 16 | 17 | if nargin == 0, A = '1'; return; end % report number of parameters 18 | 19 | s2 = exp(2*logtheta); % noise variance 20 | 21 | if nargin == 2 % compute covariance matrix 22 | A = s2*eye(size(x,1)); 23 | elseif nargout == 2 % compute test set covariances 24 | A = s2; 25 | B = 0; % zeros cross covariance by independence 26 | else % compute derivative matrix 27 | A = 2*s2*eye(size(x,1)); 28 | end 29 | -------------------------------------------------------------------------------- /lib/base/base_estimator.m: -------------------------------------------------------------------------------- 1 | % Base class for all estimators in KAFBOX. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef base_estimator < matlab.mixin.Copyable 7 | methods 8 | % get parameter names for the estimator 9 | function names = get_param_names(obj) 10 | names = fieldnames(obj); 11 | end 12 | 13 | % get parameters 14 | function params = get_params(obj) 15 | params = struct; 16 | for fn = fieldnames(obj)' 17 | params.(fn{1}) = obj.(fn{1}); 18 | end 19 | end 20 | 21 | % set parameters 22 | function set_params(obj,params) 23 | if (nargin > 0) % copy valid parameters 24 | for fn = fieldnames(params)' 25 | if ismember(fn{1},fieldnames(obj)) 26 | values = params.(fn{1}); 27 | obj.(fn{1}) = values; 28 | else 29 | warning('Unknown parameter: %s.',fn{1}); 30 | end 31 | end 32 | end 33 | end 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /demo/demo_sinc.m: -------------------------------------------------------------------------------- 1 | % Demo: learn a sinc. Run one algorithm using its default parameters. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | close all 7 | clear 8 | rng('default'); rng(1) 9 | 10 | %% PARAMETERS 11 | 12 | N = 1000; % number of training data 13 | N_test = 500; % number of test data 14 | SNR = 20; % SNR in dB 15 | 16 | algorithm = 'krls'; 17 | 18 | %% GENERATE DATA 19 | x = randn(N,1); 20 | x_test = linspace(min(x),max(x),N_test)'; 21 | y_ref = sinc([x;x_test]); 22 | y = y_ref + sqrt(10^(-SNR/10)*var(y_ref))*randn(N+N_test,1); 23 | y_test = y_ref(N+1:N+N_test); 24 | 25 | %% RUN ALGORITHM 26 | fprintf('%s: ',upper(algorithm)); 27 | Y_est = zeros(N_test,1); 28 | kaf = feval(algorithm); %#ok 29 | t1 = tic; 30 | for i=1:N 31 | if ~mod(i,floor(N/10)), fprintf('.'); end 32 | kaf.train(x(i),y(i)); 33 | end 34 | y_est = kaf.evaluate(x_test); 35 | MSE = mean((y_test-y_est).^2); 36 | 37 | %% OUTPUT 38 | 39 | fprintf(' %.2fs. MSE=%3.2fdB\n',toc(t1),10*log10(MSE)) 40 | 41 | figure; hold all 42 | plot(x,y(1:N),'.') 43 | 44 | plot(x_test,y_est,'LineWidth',2) 45 | legend({'data',strrep(upper(algorithm),'_','-')}) 46 | axis([min(x)-0.5 max(x)+0.5 min(y)-0.5 max(y)+0.5]); 47 | -------------------------------------------------------------------------------- /demo/demo_prediction_kstep.m: -------------------------------------------------------------------------------- 1 | % Demo: k-step ahead prediction on Lorenz attractor time-series data 2 | 3 | clear 4 | close all; 5 | 6 | %% PARAMETERS 7 | 8 | k = 10; % prediction horizon 9 | 10 | % load data using helper function 11 | [X,Y] = kafbox_data(struct('name','Lorenz','horizon',k,... 12 | 'embedding',6,'N',5000)); 13 | 14 | % Make a kernel adaptive filter object 15 | kaf = krlst(struct('lambda',1,'M',100,'sn2',1E-6,'kerneltype','gauss','kernelpar',32)); 16 | % kaf = qklms(struct('eta',0.5,'epsu',.1,'kerneltype','gauss','kernelpar',32)); 17 | 18 | %% RUN ALGORITHM 19 | N = size(X,1); 20 | Y_est = zeros(N,1); 21 | for i=k:N 22 | if ~mod(i,floor(N/10)), fprintf('.'); end % progress indicator, 10 dots 23 | Y_est(i) = kaf.evaluate(X(i,:)); % predict the next output 24 | 25 | % Train on input-output data. Use the pair from (horizon-1) steps ago 26 | % since the newest output available at step i is Y(i-horizon+1). 27 | kaf.train(X(i-k+1,:),Y(i-k+1)); 28 | end 29 | fprintf('\n'); 30 | SE = (Y-Y_est).^2; % test error 31 | 32 | %% OUTPUT 33 | fprintf('MSE after first 1000 samples: %.2fdB\n\n',... 34 | 10*log10(mean(SE(1001:end)))); 35 | 36 | figure; hold all; plot(Y); plot(Y_est); 37 | legend('original','prediction'); 38 | title(sprintf('%d-step ahead prediction %s on Lorenz time series',... 39 | k,upper(class(kaf)))); 40 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2017, Steven Van Vaerenbergh. 2 | All rights reserved. 3 | 4 | The code is released under the FreeBSD License. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 19 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /lib/profiler/nlms_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Normalized Least Mean Squares algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef nlms_profiler < nlms 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | end 11 | 12 | methods 13 | 14 | function obj = nlms_profiler(parameters) % constructor 15 | if nargin<1, parameters = struct(); end 16 | obj = obj@nlms(parameters); 17 | end 18 | 19 | function flops = lastflops(obj) % flops for last iteration 20 | m = size(obj.w,1); 21 | floptions = struct(... 22 | 'sum', 3*m, ... 23 | 'mult', 3*m, ... 24 | 'div', 1); 25 | flops = kflops(floptions); 26 | end 27 | 28 | %% flops breakdown 29 | 30 | % obj.w = obj.w + obj.mu/(obj.eps + x*x')*x*(y-x*obj.w'); 31 | % sum: 3*m 32 | % mult: 3*m 33 | % div: 1 34 | 35 | %% 36 | 37 | function train_profiled(obj,x,y) 38 | t1 = tic; 39 | obj.train(x,y); 40 | t2 = toc(t1); 41 | obj.elapsed = obj.elapsed + t2; 42 | end 43 | 44 | function bytes = lastbytes(obj) % bytes used in last iteration 45 | m = size(obj.w,1); 46 | bytes = 8*m; % 8 bytes for double precision 47 | % w 48 | end 49 | 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /lib/nlms.m: -------------------------------------------------------------------------------- 1 | % Normalized Least Mean Squares algorithm 2 | % 3 | % From A. H. Sayed, "Fundamentals of adaptive filtering}", Wiley-IEEE 4 | % Press, 2003, Chapter 5. 5 | % 6 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 7 | % https://github.com/steven2358/kafbox/ 8 | 9 | classdef nlms < linear_filter 10 | 11 | properties (GetAccess = 'public', SetAccess = 'private') 12 | mu = .9; % step size 13 | eps = 1E-6; % regularization 14 | end 15 | 16 | properties (GetAccess = 'public', SetAccess = 'private') 17 | w = []; 18 | end 19 | 20 | methods 21 | function obj = nlms(parameters) % constructor 22 | if (nargin > 0) % copy valid parameters 23 | for fn = fieldnames(parameters)' 24 | if ismember(fn,fieldnames(obj)) 25 | obj.(fn{1}) = parameters.(fn{1}); 26 | end 27 | end 28 | end 29 | end 30 | 31 | function y_est = evaluate(obj,x) % evaluate the algorithm 32 | if numel(obj.w)>0 33 | y_est = x*obj.w; 34 | else 35 | y_est = zeros(size(x,1),1); 36 | end 37 | end 38 | 39 | function train(obj,x,y) % train the algorithm 40 | if numel(obj.w)==0 % initialize 41 | obj.w = zeros(length(x),1); 42 | end 43 | 44 | % Algorithm 5.6.1 in reference 45 | err = (y-x*obj.w); 46 | obj.w = obj.w + obj.mu/(obj.eps + x*x')*x'*err; 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/profiler/kflops.m: -------------------------------------------------------------------------------- 1 | function fl = kflops(operations) 2 | % Calculate the number of FLOPS needed to perform the specified operations. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | % values for x86 processor, change as desired 8 | flops_sum = 1; 9 | flops_mult = 1; 10 | flops_div = 8; 11 | flops_sqrt = 8; 12 | flops_exp = 20; 13 | 14 | fl = 0; 15 | f = fields(operations); 16 | for op = f' 17 | num = operations.(op{1}); 18 | switch op{1} 19 | case 'sum' 20 | fl = fl + num*flops_sum; 21 | case 'mult' 22 | fl = fl + num*flops_mult; 23 | case 'div' 24 | fl = fl + num*flops_div; 25 | case 'exp' 26 | fl = fl + num*flops_exp; 27 | case 'sqrt' 28 | fl = fl + num*flops_sqrt; 29 | case 'gauss_kernel' 30 | n1 = operations.(op{1})(1); 31 | n2 = operations.(op{1})(2); 32 | m = operations.(op{1})(3); 33 | 34 | fl = fl + kflops(struct('sum',n1*n2*(2*m-1),... 35 | 'mult',n1*n2*(m+1),'exp',n1*n2)); 36 | 37 | % breakdown: 38 | % fl = kaf_flops('sum',N1*N2*M) + ... % all X1(i,:)-X(2(j,:) 39 | % kaf_flops('mult',N1*N2*M) + ... % square all elements 40 | % kaf_flops('sum',N1*N2*(M-1)) + ... % sum M-1 elements per entry 41 | % kaf_flops('mult',N1*N2) + ... % multiply by 1/(2*sgm^2) 42 | % kaf_flops('exp',N1*N2); 43 | 44 | otherwise 45 | error('unknown option') 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /lib/lms.m: -------------------------------------------------------------------------------- 1 | % Least Mean Squares algorithm 2 | % 3 | % From A. H. Sayed, "Fundamentals of adaptive filtering}", Wiley-IEEE 4 | % Press, 2003, Chapter 5. 5 | % 6 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 7 | % https://github.com/steven2358/kafbox/ 8 | 9 | classdef lms < linear_filter 10 | 11 | properties (GetAccess = 'public', SetAccess = 'private') 12 | mu = 0.001; % learning rate 13 | end 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') 16 | w = []; % filter coefficients 17 | end 18 | 19 | methods 20 | function obj = lms(parameters) % constructor 21 | if (nargin > 0) % copy valid parameters 22 | for fn = fieldnames(parameters)' 23 | if ismember(fn,fieldnames(obj)) 24 | obj.(fn{1}) = parameters.(fn{1}); 25 | end 26 | end 27 | end 28 | end 29 | 30 | function y_est = evaluate(obj,x) % evaluate the algorithm 31 | if numel(obj.w)>0 32 | y_est = x*obj.w; 33 | else 34 | y_est = zeros(size(x,1),1); 35 | end 36 | end 37 | 38 | function train(obj,x,y) % train the algorithm 39 | if numel(obj.w)==0 % initialize 40 | obj.w = zeros(length(x),1); 41 | end 42 | 43 | % Algorithm 5.2.1 in reference 44 | err = y - x*obj.w; % instantaneous error 45 | obj.w = obj.w + obj.mu*x'*err; % update filter coefficients 46 | end 47 | 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /data/kafbox_data.m: -------------------------------------------------------------------------------- 1 | % KAFBOX_DATA Data handler. Returns input-output data specified in the 2 | % options. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | function [X_train,Y_train,X_test,Y_test] = kafbox_data(data_options) 8 | 9 | if isfield(data_options,'class') 10 | data_name = lower(data_options.class); 11 | elseif isfield(data_options,'name') 12 | data_name = lower(data_options.name); 13 | else 14 | error('Unknown data.'); 15 | end 16 | 17 | dataset_handle = str2func(['kafbox_data_' data_name]); 18 | 19 | [X,Y,X_test,Y_test] = feval(dataset_handle, data_options); 20 | 21 | % time embedding 22 | L = 0; 23 | if isfield(data_options,'embedding') 24 | L = data_options.embedding; 25 | X = time_embedding(X,L); 26 | end 27 | 28 | % apply offset 29 | if isfield(data_options,'offset') 30 | X = X(1+data_options.offset:end,:); 31 | Y = Y(1+data_options.offset:end); 32 | end 33 | 34 | % crop training data 35 | N = length(Y); 36 | if isfield(data_options,'N') 37 | N = min(data_options.N,N); 38 | end 39 | X_train = X(1:N,:); 40 | Y_train = Y(1:N); 41 | 42 | if isfield(data_options,'n_train') && isfield(data_options,'n_test') 43 | n_train = data_options.n_train; 44 | n_test = data_options.n_test; 45 | X_train = X(L:n_train+L-1,:); 46 | Y_train = Y(L:n_train+L-1); 47 | X_test = X(n_train+L:n_train+L+n_test-1,:); 48 | Y_test = Y(n_train+L:n_train+L+n_test-1); 49 | end 50 | 51 | 52 | function X_embedded = time_embedding(X,L) 53 | N = size(X,1); 54 | X_embedded = zeros(N,L); % L-1 first vectors are zero padded 55 | for i = 1:L, 56 | X_embedded(i:N,i) = X(1:N-i+1,1); % time embedding 57 | end 58 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covLambda.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covLambda(logtheta, xt, z) 2 | % KAFBOX_COVLAMBDA covariance function for an AR(1) process with fixed 3 | % power, parameterized as k(x^(t),x^(t+n)) = lambda^|n|. Uses the 4 | % covariance functions constructors from GPML toolbox version 2.0. 5 | % 6 | % INPUT: - logtheta = [ log(lambda/(1-lambda))] % hyperparameter 7 | % - xt: input data, each row is a data point. First column 8 | % contains temporal indices, other columns are "spatial" data. 9 | % - z: test set 10 | % OUTPUT: - A, B: depend on the number of input arguments 11 | % USAGE: [A, B] = kaf_covLambda(logtheta, x, z) 12 | % 13 | % Author: Miguel Lazaro Gredilla, 2012 14 | % 15 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 16 | % https://github.com/steven2358/kafbox/ 17 | 18 | if nargin == 0, A = '1'; return; end % report number of parameters 19 | 20 | t = xt(:,1); % calculate covariance function only on temporal indices 21 | 22 | lambda = exp(logtheta(1))/(1+exp(logtheta)); 23 | sf2 = 1; 24 | n = size(t,1); 25 | 26 | if nargin == 2 27 | A = sf2*lambda.^abs(repmat(t,1,n)-repmat(t',n,1)); 28 | elseif nargout == 2 % compute test set covariances 29 | z = z(:,1); 30 | ntst = size(z,1); 31 | A = sf2*ones(ntst,1); 32 | B = sf2*lambda.^abs(repmat(t,1,ntst)-repmat(z',n,1)); 33 | else % compute derivative matrix 34 | if z == 1 % wrt lambda 35 | absn = abs(repmat(t,1,n)-repmat(t',n,1)); 36 | A = sf2*absn.*lambda.^absn.*(1-lambda); 37 | elseif z == 2 % wrt sf2 38 | display('It is fixed, you should not be trying to compute this.') 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /demo/demo_template_system_identification.m: -------------------------------------------------------------------------------- 1 | % Online system identification with a kernel adaptive filtering algorithm. 2 | % Author: Steven Van Vaerenbergh, 2013 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | close all; 8 | clear; 9 | 10 | %% PARAMETERS 11 | % Instructions: 1. Uncomment one datafile and one algorithm; 2. Execute. 12 | 13 | datafile = 'mimotestbed8K.dat'; L = 4; 14 | % kaf = krlst(struct('lambda',.995,'M',100,'sn2',0.015,'kerneltype','gauss','kernelpar',3.1)); % achieves -10.41 dB 15 | kaf = swkrls(struct('c',0.015,'M',70,'kerneltype','gauss','kernelpar',3.1)); % achieves -9.13 dB 16 | % kaf = norma(struct('lambda',1E-4,'tau',500,'eta',0.4,'kerneltype','gauss','kernelpar',3.1)); % achieves -8.58 dB 17 | 18 | %% PROGRAM 19 | tic 20 | 21 | data = load(datafile); % input and output data are 2 columns 22 | N = size(data,1); 23 | X = zeros(N,L); 24 | for i = 1:L, X(i:N,i) = data(1:N-i+1); end % time-embedding 25 | Y = data(:,2); % desired output 26 | 27 | fprintf('Running system identification algorithm') 28 | Y_est = zeros(N,1); 29 | for i=1:N 30 | if ~mod(i,floor(N/10)), fprintf('.'); end 31 | 32 | Y_est(i) = kaf.evaluate(X(i,:)); % make prediction 33 | kaf.train(X(i,:),Y(i)); % train 34 | end 35 | fprintf('\n'); 36 | 37 | SE = (Y-Y_est).^2; % test error 38 | 39 | toc 40 | %% OUTPUT 41 | 42 | fprintf('MSE after first 1000: %.2fdB\n\n',10*log10(mean(SE(1001:end)))); 43 | 44 | figure; plot(10*log10(SE)); xlabel('samples'); ylabel('squared error (dB)'); 45 | title(sprintf('%s on %s',upper(class(kaf)),datafile)); 46 | 47 | figure; hold all; plot(Y); plot(Y_est); 48 | legend('original','prediction'); 49 | title(sprintf('%s on %s',upper(class(kaf)),datafile)); 50 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covSEiso.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covSEiso(loghyper, x, z) 2 | 3 | % Squared Exponential covariance function with isotropic distance measure. The 4 | % covariance function is parameterized as: 5 | % 6 | % k(x^p,x^q) = sf2 * exp(-(x^p - x^q)'*inv(P)*(x^p - x^q)/2) 7 | % 8 | % where the P matrix is ell^2 times the unit matrix and sf2 is the signal 9 | % variance. The hyperparameters are: 10 | % 11 | % loghyper = [ log(ell) 12 | % log(sqrt(sf2)) ] 13 | % 14 | % For more help on design of covariance functions, try "help covFunctions". 15 | % 16 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 17 | % https://github.com/steven2358/kafbox/ 18 | % 19 | % Original covSEiso.m (C) Copyright 2006 by Carl Edward Rasmussen as part 20 | % of the GPML toolbox version 2.0. The code and associated documentation 21 | % are avaiable from http://www.GaussianProcess.org/gpml/code 22 | 23 | if nargin == 0, A = '2'; return; end % report number of parameters 24 | 25 | ell = exp(loghyper(1)); % characteristic length scale 26 | sf2 = exp(2*loghyper(2)); % signal variance 27 | 28 | if nargin == 2 29 | A = sf2*exp(-kafbox_sq_dist(x'/ell)/2); 30 | elseif nargout == 2 % compute test set covariances 31 | A = sf2*ones(size(z,1),1); 32 | B = sf2*exp(-kafbox_sq_dist(x'/ell,z'/ell)/2); 33 | else % compute derivative matrix 34 | if z == 1 % first parameter 35 | A = sf2*exp(-kafbox_sq_dist(x'/ell)/2).*kafbox_sq_dist(x'/ell); 36 | else % second parameter 37 | A = 2*sf2*exp(-kafbox_sq_dist(x'/ell)/2); 38 | end 39 | end 40 | -------------------------------------------------------------------------------- /lib/profiler/lkapa_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Leaky Kernel Affine Projection Algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef lkapa_profiler < lkapa 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | end 11 | 12 | methods 13 | 14 | function kaf = lkapa_profiler(parameters) % constructor 15 | if nargin<1, parameters = struct(); end 16 | kaf = kaf@lkapa(parameters); 17 | end 18 | 19 | function flops = lastflops(kaf) % flops for last iteration 20 | % numbers of operations 21 | m1 = 1; 22 | m2 = 1; 23 | m3 = 1; 24 | m4 = 1; 25 | 26 | floptions = struct(... 27 | 'sum', m1, ... 28 | 'mult', m2, ... 29 | 'div', m3, ... 30 | sprintf('%s_kernel',kaf.kerneltype), [m4,1,size(kaf.dict,2)]); 31 | 32 | flops = nan; % dummy. kflops(floptions); 33 | end 34 | 35 | %% flops breakdown 36 | 37 | % [space for remarks on number of operations used above] 38 | 39 | %% 40 | 41 | function train_profiled(kaf,x,y) 42 | t1 = tic; 43 | kaf.train(x,y); 44 | t2 = toc(t1); 45 | kaf.elapsed = kaf.elapsed + t2; 46 | end 47 | 48 | function bytes = lastbytes(kaf) % bytes used in last iteration 49 | m = size(kaf.dict,1); 50 | m2 = 1; 51 | bytes = nan; % dummy. (m2 + m*size(kaf.dict,2)); % 8 bytes for double precision 52 | % [list variables] 53 | end 54 | 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /demo/demo_parameter_estimation_nonstationary.m: -------------------------------------------------------------------------------- 1 | % DEMO_PARAMETER_ESTIMATION_NONSTATIONARY Estimation of the parameters of 2 | % the KRLS-T algorithm in a non-statiory setting. 3 | % 4 | % This demo generates data as y = ft(x) where ft is a time-varying 5 | % function. Then it estimates the optimal parameters of the KRLS-T. The 6 | % estimated parameters are: forgetting factor lambda, regularization c and 7 | % Gaussian kernel width. Kernels other than the Gaussian can be used by 8 | % modifying kafbox_parameter_estimation.m. 9 | % 10 | % Author: Steven Van Vaerenbergh, 2013. 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | close all 16 | clear 17 | 18 | %% PARAMETERS 19 | 20 | N = 500; % number of data 21 | sigma = 1; % spatial kernel width 22 | lambda = .995; % forgetting factor (temporal kernel lambda) 23 | c = 1E-2; % AWGN variance 24 | 25 | %% PROGRAM 26 | tic 27 | 28 | fprintf('\nGenerating time-varying data with spatio-temporal\n'); 29 | fprintf('AR(1) covariance...\n'); 30 | 31 | X = randn(N,1); % 1D input data 32 | time_ind = 0:N-1; 33 | T = toeplitz(time_ind)/2; 34 | Kt = lambda.^T; 35 | K = kernel(X,X,'gauss',sigma); 36 | Kn = c*eye(N); 37 | K = Kt.*K + Kn; 38 | Z = randn(N,1); % GP generator 39 | M = zeros(N,1); % output data mean 40 | Y = chol(K)'*Z + M; % output data 41 | 42 | fprintf('Estimating parameters of KRLS-T for these data...\n\n'); 43 | [sigma_est,c_est,lambda_est] = kafbox_parameter_estimation(X,Y); 44 | 45 | toc 46 | %% OUTPUT 47 | 48 | fprintf('\n'); 49 | fprintf(' True Estimated\n'); 50 | fprintf('sigma: %.4f %.4f\n',sigma,sigma_est) 51 | fprintf('c: %.4f %.4f\n',c,c_est) 52 | fprintf('lambda: %.4f %.4f\n\n',lambda,lambda_est) 53 | fprintf('\n'); 54 | 55 | % plot data 56 | figure; plot3(time_ind,X(:,1),Y,'+') 57 | xlabel('time'); ylabel('x'); zlabel('y'); 58 | -------------------------------------------------------------------------------- /lib/profiler/nlkapa_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Normalized Leaky Kernel Affine Projection 2 | % Algorithm 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | classdef nlkapa_profiler < nlkapa 8 | 9 | properties (GetAccess = 'public', SetAccess = 'private') 10 | elapsed = 0; % elapsed time 11 | end 12 | 13 | methods 14 | 15 | function kaf = nlkapa_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@nlkapa(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | % numbers of operations 22 | m1 = 1; 23 | m2 = 1; 24 | m3 = 1; 25 | m4 = 1; 26 | 27 | floptions = struct(... 28 | 'sum', m1, ... 29 | 'mult', m2, ... 30 | 'div', m3, ... 31 | sprintf('%s_kernel',kaf.kerneltype), [m4,1,size(kaf.dict,2)]); 32 | 33 | flops = nan; % dummy. kflops(floptions); 34 | end 35 | 36 | %% flops breakdown 37 | 38 | % [space for remarks on number of operations used above] 39 | 40 | %% 41 | 42 | function train_profiled(kaf,x,y) 43 | t1 = tic; 44 | kaf.train(x,y); 45 | t2 = toc(t1); 46 | kaf.elapsed = kaf.elapsed + t2; 47 | end 48 | 49 | function bytes = lastbytes(kaf) % bytes used in last iteration 50 | m = size(kaf.dict,1); 51 | m2 = 1; 52 | bytes = nan; % dummy. (m2 + m*size(kaf.dict,2)); % 8 bytes for double precision 53 | % [list variables] 54 | end 55 | 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_template_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension template. 2 | % 3 | % Profiler extension for [algorithm] 4 | % 5 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 6 | % https://github.com/steven2358/kafbox/ 7 | 8 | classdef kafbox_template_profiler < kafbox_template 9 | 10 | properties (GetAccess = 'public', SetAccess = 'private') 11 | elapsed = 0; % elapsed time 12 | end 13 | 14 | methods 15 | 16 | function kaf = kafbox_template_profiler(parameters) % constructor 17 | if nargin<1, parameters = struct(); end 18 | kaf = kaf@kafbox_template(parameters); 19 | end 20 | 21 | function flops = lastflops(kaf) % flops for last iteration 22 | % numbers of operations 23 | m1 = 1; 24 | m2 = 1; 25 | m3 = 1; 26 | m4 = 1; 27 | 28 | floptions = struct(... 29 | 'sum', m1, ... 30 | 'mult', m2, ... 31 | 'div', m3, ... 32 | sprintf('%s_kernel',kaf.kerneltype), [m4,1,size(kaf.dict,2)]); 33 | 34 | flops = kflops(floptions); 35 | end 36 | 37 | %% flops breakdown 38 | 39 | % [space for remarks on number of operations used above] 40 | 41 | %% 42 | 43 | function train_profiled(kaf,x,y) 44 | t1 = tic; 45 | kaf.train(x,y); 46 | t2 = toc(t1); 47 | kaf.elapsed = kaf.elapsed + t2; 48 | end 49 | 50 | function bytes = lastbytes(kaf) % bytes used in last iteration 51 | m = size(kaf.dict,1); 52 | m2 = 1; 53 | bytes = (m2 + m*size(kaf.dict,2)); % 8 bytes for double precision 54 | % [list variables] 55 | end 56 | 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler_msecurves.m: -------------------------------------------------------------------------------- 1 | % Proces MSE results into MSE curves: average out and/or limit to a 2 | % subset of temporal indices. 3 | % 4 | % Input: 5 | % - results: a configresults cell produced by kafbox_profiler. 6 | % - inds: array of indices for which to calculate output 7 | % 8 | % Output: 9 | % - MSE_avg_setups: cell containing averaged out MSE curves, structure 10 | % corresponds to "results" cell structure. 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | function MSE_avg_setups = kafbox_profiler_msecurves(results,inds) 16 | 17 | num_setups = length(results); 18 | MSE_avg_setups = cell(num_setups,1); 19 | 20 | for setup_ind = 1:num_setups 21 | setup_results = results{setup_ind}; 22 | 23 | num_configs = length(setup_results); 24 | MSE_avg_configs = cell(num_configs,1); 25 | 26 | for config_ind = 1:num_configs 27 | config_results = setup_results{config_ind}; 28 | 29 | if isfield(config_results{1},'NMSE') 30 | N = length(config_results{1}.NMSE); % temporary 31 | else 32 | N = length(config_results{1}.MSE); 33 | end 34 | MSE = zeros(N,1); 35 | 36 | num_sim = length(config_results); 37 | for sim_ind = 1:num_sim 38 | simresults = config_results{sim_ind}; 39 | 40 | if isfield(simresults,'NMSE') % temporary 41 | simresults.MSE = simresults.NMSE; 42 | end 43 | if nargin<2 44 | inds = 1:length(simresults.MSE); 45 | end 46 | 47 | mm = min(length(simresults.MSE(inds)),N); 48 | MSE = MSE(1:mm) + simresults.MSE(inds(1:mm))/num_sim; 49 | end 50 | MSE_avg_configs{config_ind} = MSE; 51 | end 52 | MSE_avg_setups{setup_ind} = MSE_avg_configs; 53 | end 54 | -------------------------------------------------------------------------------- /lib/rls.m: -------------------------------------------------------------------------------- 1 | % Recursive Least-Squares Algorithm with exponential weighting 2 | % 3 | % From S. Haykin, "Adaptive Filtering Theory (3rd Ed.)", Prentice Hall, 4 | % Chapter 13. 5 | % 6 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 7 | % https://github.com/steven2358/kafbox/ 8 | 9 | classdef rls < linear_filter 10 | 11 | properties (GetAccess = 'public', SetAccess = 'private') 12 | lambda = .99; % forgetting factor 13 | c = 1E-4; % regularization 14 | end 15 | 16 | properties (GetAccess = 'public', SetAccess = 'private') 17 | P = []; % inverse autocorrelation matrix 18 | w = []; % filter coefficients 19 | end 20 | 21 | methods 22 | 23 | function obj = rls(parameters) % constructor 24 | if (nargin > 0) % copy valid parameters 25 | for fn = fieldnames(parameters)' 26 | if ismember(fn,fieldnames(obj)) 27 | obj.(fn{1}) = parameters.(fn{1}); 28 | end 29 | end 30 | end 31 | end 32 | 33 | function y_est = evaluate(obj,x) % evaluate the algorithm 34 | if numel(obj.w)>0 35 | y_est = x*obj.w; 36 | else 37 | y_est = zeros(size(x,1),1); 38 | end 39 | end 40 | 41 | function train(obj,x,y) % train the algorithm 42 | if numel(obj.w)==0 % initialize 43 | m = length(x); 44 | obj.w = zeros(m,1); 45 | obj.P = obj.c\eye(m); 46 | end 47 | 48 | g = obj.P*x'/(obj.lambda+x*obj.P*x'); % gain vector 49 | err = y - x*obj.w; % instantaneous error 50 | obj.w = obj.w + g*err; % update filter coefficients 51 | obj.P = obj.lambda\(obj.P - g*x*obj.P); % update inv. autocorr. 52 | end 53 | 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /demo/demo_prediction_kstep_split.m: -------------------------------------------------------------------------------- 1 | % Demo: k-step ahead prediction on Lorenz attractor time-series data. 2 | % This script trains and tests on separate splits of the data. 3 | 4 | clear 5 | close all; 6 | 7 | %% PARAMETERS 8 | 9 | k = 10; % prediction horizon 10 | N_train = 1000; % number of training data 11 | N_test = 1000; % number of test data, max 5000 - N_train 12 | seed = 1; % random seed for reproducibility 13 | 14 | % load data using helper function 15 | [X,Y] = kafbox_data(struct('name','Lorenz','horizon',k,'embedding',6,'N',5000)); 16 | 17 | % Make a kernel adaptive filter object 18 | kaf = krlst(struct('lambda',1,'M',100,'sn2',1E-4,'kerneltype','gauss','kernelpar',32)); 19 | % kaf = qklms(struct('eta',0.1,'epsu',.1,'kerneltype','gauss','kernelpar',32)); 20 | 21 | %% RUN ALGORITHM 22 | 23 | % init random generator 24 | rng('default') 25 | rng(seed); 26 | 27 | % corrupt outputs with noise 28 | Y = Y + 3*randn(size(Y)); 29 | 30 | % split 31 | X_train = X(1:N_train,:); 32 | Y_train = Y(1:N_train); 33 | X_test = X(N_train+1:N_train+N_test,:); 34 | Y_test = Y(N_train+1:N_train+N_test); 35 | 36 | Y_test_MSE = zeros(N_train,1); 37 | for i=1:N_train 38 | % train on data set 1 39 | if ~mod(i,floor(N_train/10)), fprintf('.'); end % progress indicator, 10 dots 40 | kaf.train(X(i,:),Y(i)); % train with one input-output pair 41 | 42 | % test on data set 2 43 | Y_test_est = kaf.evaluate(X_test); 44 | 45 | % store MSE 46 | SE = (Y_test-Y_test_est).^2; % out-of-sample error 47 | Y_test_MSE(i) = mean(SE); 48 | end 49 | fprintf('\n'); 50 | 51 | %% OUTPUT 52 | 53 | % learning curve 54 | figure 55 | plot(10*log10(Y_test_MSE)) 56 | title('Learning curve') 57 | 58 | % errors for final prediction 59 | figure; hold all; plot(Y_test); plot(Y_test_est); 60 | legend('original','prediction'); 61 | title(sprintf('%d-step ahead prediction result for %s on test set after %d training steps',... 62 | k,upper(class(kaf)),N_train)); 63 | 64 | fprintf('Final MSE: %.2f\n',Y_test_MSE(N_train)); 65 | -------------------------------------------------------------------------------- /lib/base/kernel.m: -------------------------------------------------------------------------------- 1 | % Calculate the kernel matrix for two data sets. 2 | % Author: Steven Van Vaerenbergh, 2013 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | function K = kernel(X1,X2,ktype,kpar) 8 | N1 = size(X1,1); 9 | N2 = size(X2,1); 10 | 11 | switch ktype 12 | case {'gauss','rbf'} % RBF kernel 13 | norms1 = sum(X1.^2,2); 14 | norms2 = sum(X2.^2,2); 15 | 16 | mat1 = repmat(norms1,1,N2); 17 | mat2 = repmat(norms2',N1,1); 18 | 19 | dist2 = mat1 + mat2 - 2*X1*X2'; % full distance matrix 20 | K = exp(-dist2/(2*kpar^2)); 21 | 22 | case 'laplace' % Laplace kernel 23 | norms1 = sum(X1.^2,2); 24 | norms2 = sum(X2.^2,2); 25 | 26 | mat1 = repmat(norms1,1,N2); 27 | mat2 = repmat(norms2',N1,1); 28 | 29 | dist2 = mat1 + mat2 - 2*X1*X2'; % full distance matrix 30 | K = exp(-sqrt(dist2)/(2*kpar^2)); 31 | 32 | case 'gauss-diag' % diagonal of RBF kernel 33 | K = exp(-sum((X1-X2).^2,2)/(2*kpar^2)); 34 | 35 | case 'gauss-aniso' % anisotropic RBF kernel 36 | D = kpar; % anisotropic scaling 37 | 38 | X1 = X1*D; 39 | X2 = X2*D; 40 | 41 | K = kernel(X1,X2,'gauss',1); 42 | 43 | case 'poly' % polynomial kernel 44 | p = kpar(1); % polynome order 45 | c = kpar(2); % additive constant 46 | 47 | K = (X1*X2' + c).^p; 48 | 49 | case 'linear' % linear kernel 50 | K = X1*X2'; 51 | 52 | case 'sum' 53 | a = kpar.a; 54 | ktype1 = kpar.ktype1; 55 | kpar1 = kpar.kpar1; 56 | b = kpar.b; 57 | ktype2 = kpar.ktype2; 58 | kpar2 = kpar.kpar2; 59 | 60 | K = a*kernel(X1,X2,ktype1,kpar1) + b*kernel(X1,X2,ktype2,kpar2); 61 | 62 | otherwise % default case 63 | error ('unknown kernel type') 64 | end 65 | -------------------------------------------------------------------------------- /lib/profiler/rls_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Recursive Least-Squares algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef rls_profiler < rls 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | end 11 | 12 | methods 13 | 14 | function obj = rls_profiler(parameters) % constructor 15 | if nargin<1, parameters = struct(); end 16 | obj = obj@rls(parameters); 17 | end 18 | 19 | function flops = lastflops(obj) % flops for last iteration 20 | m = size(obj.w,1); 21 | floptions = struct(... 22 | 'sum', 2*m^2 - m + m + 2*m - 1 + 2*m^2 - m, ... 23 | 'mult', m + m + m + 3*m^2, ... 24 | 'div', 1 + 1); 25 | flops = kflops(floptions); 26 | end 27 | 28 | %% flops breakdown 29 | 30 | % k = obj.P*x'/(obj.lambda+x*obj.P*x'); 31 | % sum: 2*m^2 - m 32 | % mult: m 33 | % div: 1 34 | 35 | % z = y - x*obj.w; 36 | % sum: m 37 | % mult: m 38 | 39 | % obj.w = obj.w + k*z; 40 | % sum: 2*m - 1 41 | % mult: m 42 | 43 | % obj.P = obj.lambda\(obj.P - k*x*obj.P); 44 | % sum: 2*m^2 - m 45 | % mult: 3*m^2 46 | % div: 1 47 | 48 | %% 49 | 50 | function train_profiled(obj,x,y) % measures elapsed time of training 51 | t1 = tic; 52 | obj.train(x,y); 53 | t2 = toc(t1); 54 | obj.elapsed = obj.elapsed + t2; 55 | end 56 | 57 | function bytes = lastbytes(obj) % bytes used in last iteration 58 | m = size(obj.w,1); 59 | bytes = 8*(m + m^2); % 8 bytes for double precision 60 | % w, P 61 | end 62 | 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /demo/run_all_demos.m: -------------------------------------------------------------------------------- 1 | % Script to run all demos consecutively. 2 | % 3 | % Author: Steven Van Vaerenbergh, 2015. 4 | % 5 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 6 | % https://github.com/steven2358/kafbox 7 | 8 | clc 9 | close all 10 | clear 11 | 12 | % get list of demo scripts 13 | fdir = fileparts(which('run_all_demos.m')); 14 | files_demo = dir(fullfile(fdir,'demo_*.m')); 15 | files = files_demo; 16 | folders = repmat({fdir},length(files),1); 17 | 18 | % get list of literature scripts 19 | folders_literature = dir(fullfile(fdir,'literature','*2*')); 20 | for i=1:length(folders_literature) 21 | folder_i = fullfile(fdir,'literature',folders_literature(i).name); 22 | files_i = dir(fullfile(folder_i,'fig*.m')); 23 | folders_i = repmat({folder_i},length(files_i),1); 24 | 25 | files = [files; files_i]; %#ok 26 | folders = [folders; folders_i]; %#ok 27 | end 28 | 29 | [~,files] = cellfun(@fileparts, {files.name}, 'UniformOutput',0); 30 | 31 | t1 = tic; 32 | fprintf('\n') 33 | for i=1:length(files) 34 | close all 35 | clear eval 36 | save(fullfile(tempdir,'temp.mat'),'i','folders','files','fdir',... 37 | 't1'); % memory map 38 | 39 | try 40 | % run script 41 | cd(folders{i}) 42 | fname_demo = files{i}; 43 | fprintf('\nRunning %s\n',fname_demo); 44 | eval(fname_demo); 45 | catch err 46 | % return to demo folder 47 | load(fullfile(tempdir,'temp.mat')); 48 | cd(fdir); 49 | 50 | % report error 51 | me = err.stack(1); 52 | error(['Error in ',... 53 | '',... 54 | '%s at %d\n',... 55 | '%s'],... 56 | me.file, me.line, me.name, me.line,... 57 | err.message); 58 | end 59 | 60 | load(fullfile(tempdir,'temp.mat')); 61 | cd(fdir); 62 | end 63 | delete(fullfile(tempdir,'temp.mat')); 64 | toc(t1) 65 | 66 | close all 67 | 68 | fprintf('\nAll tests finished.\n') 69 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covSEiso2.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covSEiso2(loghyper, xt, z) 2 | % KAFBOX_COVSEISO2 Squared Exponential covariance function with isotropic 3 | % distance measure. The covariance function is parameterized as: 4 | % k(x^p,x^q) = sf2 * exp(-(x^p - x^q)'*inv(P)*(x^p - x^q)/2), where the P 5 | % matrix is ell^2 times the unit matrix and sf2 is the signal variance. 6 | % 7 | % INPUT: - loghyper = [log(ell); log(sqrt(sf2))] % hyperparameters 8 | % - xt: input data, each row is a data point. First column 9 | % contains temporal indices, other columns are "spatial" data. 10 | % - z: test set 11 | % OUTPUT: - A, B: depend on the number of input arguments 12 | % USAGE: [A, B] = kaf_covSEiso2(loghyper, xt, z) 13 | % 14 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 15 | % https://github.com/steven2358/kafbox/ 16 | % 17 | % Original covSEiso.m (C) Copyright 2006 by Carl Edward Rasmussen as part 18 | % of the GPML toolbox version 2.0. The code and associated documentation 19 | % are avaiable from http://www.GaussianProcess.org/gpml/code 20 | 21 | if nargin == 0, A = '2'; return; end % report number of parameters 22 | 23 | x = xt(:,2:end); % calculate covariance function only on spatial data 24 | 25 | ell = exp(loghyper(1)); % characteristic length scale 26 | sf2 = exp(2*loghyper(2)); % signal variance 27 | 28 | if nargin == 2 29 | A = sf2*exp(-kafbox_sq_dist(x'/ell)/2); 30 | elseif nargout == 2 % compute test set covariances 31 | z = z(:,2:end); 32 | A = sf2*ones(size(z,1),1); 33 | B = sf2*exp(-kafbox_sq_dist(x'/ell,z'/ell)/2); 34 | else % compute derivative matrix 35 | if z == 1 % first parameter 36 | A = sf2*exp(-kafbox_sq_dist(x'/ell)/2).*kafbox_sq_dist(x'/ell); 37 | else % second parameter 38 | A = 2*sf2*exp(-kafbox_sq_dist(x'/ell)/2); 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler.m: -------------------------------------------------------------------------------- 1 | % KAFBOX_PROFILER Profiler program. Runs through different algorithms, each 2 | % with a number of configurations, each for a number of simulations. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | function [data,algorithms,results] = ... 8 | kafbox_profiler(data,sim_opts,algorithms,outputdir) 9 | 10 | num_algo = length(algorithms); 11 | numsim = 1; 12 | if isfield(sim_opts,'numsim') 13 | numsim = sim_opts.numsim; 14 | end 15 | 16 | % pre-allocate structure 17 | results = cell(num_algo,1); 18 | for algo_ind = 1:num_algo 19 | num_config = length(algorithms{algo_ind}.options.sweep_val); 20 | algo_results = cell(num_config,1); 21 | for config_ind = 1:num_config 22 | algo_results{config_ind} = cell(numsim,1); 23 | end 24 | results{algo_ind} = algo_results; 25 | end 26 | 27 | for sim_ind = 1:numsim % process simulations 28 | fprintf('SIM %2d\n',sim_ind); 29 | 30 | for algo_ind = 1:num_algo % process algorithms 31 | 32 | algo = algorithms{algo_ind}; 33 | sweep_par = algo.options.sweep_par; 34 | sweep_val = algo.options.sweep_val; 35 | num_config = length(sweep_val); % number of iterations in sweep 36 | 37 | for config_ind = 1:num_config % process configurations 38 | algo_config = algo; 39 | fprintf('%9s %10s: ',algo_config.name,... 40 | struct2str(struct(sweep_par,sweep_val(config_ind)))); 41 | 42 | % set sweep parameter value 43 | eval(sprintf('algo_config.options.%s = sweep_val(config_ind);',sweep_par)); 44 | 45 | % perform one simulation for this configuration 46 | simresults = kafbox_profiler_simulation(data,sim_opts,... 47 | algo_config,sim_ind,outputdir); 48 | 49 | results{algo_ind}{config_ind}{sim_ind} = simresults; 50 | 51 | fprintf(' %.2fs',simresults.elapsed); 52 | fprintf('\n'); 53 | end 54 | end 55 | fprintf('\n'); 56 | end 57 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_sq_dist.c: -------------------------------------------------------------------------------- 1 | /* sq_dist - a mex function to compute a matrix of all pairwise squared 2 | distances between two sets of vectors, stored in the columns of the two 3 | matrices that are arguments to the function. The length of the vectors must 4 | agree. If only a single argument is given, the missing argument is taken to 5 | be identical to the first. If an optional third matrix argument Q is given, 6 | it must be of the same size as the output, but in this case a vector of the 7 | traces of the product of Q and the coordinatewise squared distances is 8 | returned. 9 | 10 | Copyright (c) 2003, 2004 Carl Edward Rasmussen. 2003-04-22. */ 11 | 12 | #include "mex.h" 13 | #include 14 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 15 | { 16 | double *a, *b, *C, *Q, z, t; 17 | int D, n, m, i, j, k; 18 | if (nrhs < 1 || nrhs > 3 || nlhs > 1) 19 | mexErrMsgTxt("Usage: C = sq_dist(a,b)\n or: C = sq_dist(a)\n or: c = sq_dist(a,b,Q)\nwhere the b matrix may be empty."); 20 | a = mxGetPr(prhs[0]); 21 | m = mxGetN(prhs[0]); 22 | D = mxGetM(prhs[0]); 23 | if (nrhs == 1 || mxIsEmpty(prhs[1])) { 24 | b = a; 25 | n = m; 26 | } else { 27 | b = mxGetPr(prhs[1]); 28 | n = mxGetN(prhs[1]); 29 | if (D != mxGetM(prhs[1])) 30 | mexErrMsgTxt("Error: column lengths must agree"); 31 | } 32 | if (nrhs < 3) { 33 | plhs[0] = mxCreateDoubleMatrix(m, n, mxREAL); 34 | C = mxGetPr(plhs[0]); 35 | for (i=0; i 0) % copy valid parameters 29 | for fn = fieldnames(parameters)' 30 | if ismember(fn,fieldnames(kaf)) 31 | kaf.(fn{1}) = parameters.(fn{1}); 32 | end 33 | end 34 | end 35 | end 36 | 37 | function y_est = evaluate(kaf,x) % evaluate the algorithm 38 | if size(kaf.dict,1)>0 39 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 40 | y_est = k'*kaf.alpha; 41 | else 42 | y_est = zeros(size(x,1),1); % zeros if not initialized 43 | end 44 | end 45 | 46 | function train(kaf,x,y) % train the algorithm 47 | if (size(kaf.dict,1) 0) % copy valid parameters 27 | for fn = fieldnames(parameters)' 28 | if ismember(fn,fieldnames(obj)) 29 | obj.(fn{1}) = parameters.(fn{1}); 30 | end 31 | end 32 | end 33 | end 34 | 35 | function y_est = evaluate(obj,x) % evaluate the algorithm 36 | if numel(obj.w)>0 37 | y_est = x*obj.w; 38 | else 39 | y_est = zeros(size(x,1),1); 40 | end 41 | end 42 | 43 | function train(obj,x,y) % train the algorithm 44 | m = length(x); 45 | if numel(obj.w)==0 % initialize 46 | obj.w = zeros(m,1); 47 | end 48 | 49 | x_norm2 = x*x'; 50 | K_k = (obj.sigma2_k + obj.sigma2_d) / ... 51 | ((obj.sigma2_k + obj.sigma2_d) * x_norm2 + obj.sigma2_n); 52 | 53 | err = y - obj.lambda*x*obj.w; 54 | obj.w = obj.lambda*obj.w + K_k*x'*err; 55 | obj.sigma2_k = (1 - K_k*x_norm2/m)*... 56 | (obj.sigma2_k + obj.sigma2_d); 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/profiler/klms_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Least-Mean-Square algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef klms_profiler < klms 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | prev_dict_size = 0; % previous dictionary size for growth check 11 | end 12 | 13 | methods 14 | 15 | function kaf = klms_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@klms(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | m = size(kaf.dict,1); 22 | if kaf.prev_dict_size < m % growing 23 | m1 = m - 1; 24 | floptions = struct(... 25 | 'sum', m1 - 1, ... 26 | 'mult', m1 + 1, ... 27 | sprintf('%s_kernel',kaf.kerneltype), [m1, 1, size(kaf.dict,2)]); 28 | 29 | flops = kflops(floptions); 30 | else 31 | flops = 0; 32 | end 33 | end 34 | 35 | %% flops breakdown 36 | 37 | % k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 38 | % kernel: m1 39 | 40 | % y_est = k'*kaf.alpha; 41 | % sum: m1 - 1 42 | % mult: m1 43 | 44 | % err = y - y_est; 45 | % sum: 1 46 | 47 | % kaf.alpha = [kaf.alpha; kaf.mu*err]; 48 | % mult: 1 49 | 50 | %% 51 | 52 | function train_profiled(kaf,x,y) 53 | kaf.prev_dict_size = size(kaf.dict,1); 54 | t1 = tic; 55 | kaf.train(x,y); 56 | t2 = toc(t1); 57 | kaf.elapsed = kaf.elapsed + t2; 58 | end 59 | 60 | function bytes = lastbytes(kaf) % bytes used in last iteration 61 | m = size(kaf.dict,1); 62 | bytes = 8*(m + m*size(kaf.dict,2)); % 8 bytes for double precision 63 | % alpha, dict 64 | end 65 | 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /demo/demo_parameter_estimation_lorenz.m: -------------------------------------------------------------------------------- 1 | % DEMO_PARAMETER_ESTIMATION_LORENZ Estimation of the parameters of the 2 | % KRLS-T algorithm for predicting the Lorenz attractor time-series. 3 | % 4 | % This demo estimates the optimal parameters of the KRLS-T algorithm when 5 | % applied to predict the Lorenz attractor time-series. The estimated 6 | % parameters are: forgetting factor lambda, regularization c and Gaussian 7 | % kernel width. Kernels other than the Gaussian can be used by modifying 8 | % kafbox_parameter_estimation.m. 9 | % 10 | % Author: Steven Van Vaerenbergh, 2013. 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | close all 16 | clear 17 | 18 | %% PARAMETERS 19 | 20 | horizon = 1; % prediction horizon 21 | embedding = 6; % time-embedding 22 | N = 500; % number of data 23 | 24 | %% PROGRAM 25 | tic 26 | 27 | fprintf('Loading Lorenz attractor time-series...\n') 28 | [X,Y] = kafbox_data(struct('name','lorenz','horizon',horizon,... 29 | 'embedding',embedding,'N',N)); 30 | 31 | fprintf('Estimating KRLS-T parameters for %d-step prediction...\n\n',... 32 | horizon) 33 | [sigma_est,reg_est,lambda_est] = kafbox_parameter_estimation(X,Y); 34 | 35 | fprintf('Running KRLS-T with estimated parameters...\n') 36 | Y_est = zeros(N,1); 37 | kaf = krlst(struct('lambda',lambda_est,'M',100,'sn2',reg_est,... 38 | 'kerneltype','gauss','kernelpar',sigma_est)); 39 | 40 | for i=1:N 41 | if ~mod(i,floor(N/10)), fprintf('.'); end % progress indicator, 10 dots 42 | 43 | Y_est(i) = kaf.evaluate(X(i,:)); % predict the next output 44 | kaf.train(X(i,:),Y(i)); % train with one input-output pair 45 | 46 | end 47 | fprintf('\n'); 48 | SE = (Y-Y_est).^2; % square error 49 | 50 | toc 51 | %% OUTPUT 52 | 53 | fprintf('\n'); 54 | fprintf(' Estimated\n'); 55 | fprintf('sigma: %.4f\n',sigma_est) 56 | fprintf('c: %e\n',reg_est) 57 | fprintf('lambda: %.4f\n\n',lambda_est) 58 | 59 | fprintf('Average MSE after first 100 samples: %.2fdB\n\n',... 60 | 10*log10(mean(SE(101:end)))); 61 | 62 | figure; hold all; plot(Y); plot(Y_est); 63 | legend({'original','prediction'},'Location','SE'); 64 | title(sprintf('%d-step ahead prediction %s on Lorenz time series',... 65 | horizon,upper(class(kaf)))); 66 | -------------------------------------------------------------------------------- /lib/readme.md: -------------------------------------------------------------------------------- 1 | Algorithm code structure 2 | ------------------------ 3 | - Each algorithm is contained in a single file in the `/lib` folder. 4 | - Each algorithm is implemented as an object in matlab, using the `classdef` syntax. 5 | - Each algorithm has two basic public methods: `evaluate` and `train`. 6 | - The object code contains only one iteration of the algorithm. The for-loop over the time index that governs the online operation should go in an external script. 7 | 8 | 9 | Quick steps to code an algorithm in the toolbox' format 10 | ------------------------------------------------------- 11 | 1. Make a copy of `lib/kafbox_template.m` and name it `MY_ALGORITHM.m`. 12 | 2. Open the file and replace all mentions of "kafbox_template" to MY_ALGORITHM. 13 | 3. Clean up the file header by replacing all text between square brackets. 14 | 4. In the first properties section, fill in the parameter values used by the algorithm with their corresponding default values. Default values should be chosen such that the algorithm performs well on a whitened input signal (see `demo/demo_sinc_all.m`). 15 | 5. In the second properties section, fill in the variables that will be calculated by the algorithm. 16 | 6. [optional] Adjust the `evaluate` method. 17 | 7. Add the code for training the algorithm to the `train` method. It may be useful to rely on "helper" functions to keep the code modular. For coding style, see below. 18 | 8. Check if the code is formatted correctly by running the script `unit_test('MY_ALGORITHM')`. 19 | 20 | 21 | How to contribute an algorithm to the toolbox 22 | --------------------------------------------- 23 | Option 1: email it to me (steven.vanvaerenbergh@unican.es). 24 | 25 | Option 2: fork the toolbox on GitHub (https://github.com/steven2358/kafbox), push your change to a named branch, then send me a pull request. 26 | 27 | 28 | Coding style 29 | ------------ 30 | **Code** should be 31 | 1. as human-readable as possible, in the first place; 32 | 2. short and structured, in the second place. 33 | 34 | **Algorithm structure** should follow the pseudocode from the corresponding publication. 35 | 36 | **Variable naming** should correspond to the nomenclature used in the corresponding publication whenever possible. 37 | 38 | **Comments** should be used sparingly. Document the design and purpose of the code rather than its mechanics. 39 | -------------------------------------------------------------------------------- /lib/rffklms.m: -------------------------------------------------------------------------------- 1 | % Random Fourier Feature Kernel Least Mean Square algorithm. 2 | % 3 | % Abhishek Singh, Narendra Ahuja and Pierre Moulin, "Online learning with 4 | % kernels: Overcoming the growing sum problem," 2012 IEEE International 5 | % Workshop on Machine Learning for Signal Processing (MLSP), Sept. 2012. 6 | % http://dx.doi.org/10.1109/MLSP.2012.6349811 7 | % 8 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 9 | % https://github.com/steven2358/kafbox/ 10 | 11 | classdef rffklms < kernel_adaptive_filter 12 | 13 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 14 | seed = 1; 15 | mu = .9; % step size 16 | D = 1000; % RFF dimension 17 | kerneltype = 'gauss'; % kernel type 18 | kernelpar = 1; % kernel parameter 19 | end 20 | 21 | properties (GetAccess = 'public', SetAccess = 'private') % variables 22 | omega = []; % 23 | Omega = []; % weight vector 24 | b = 0; % 25 | end 26 | 27 | methods 28 | function kaf = rffklms(parameters) % constructor 29 | if (nargin > 0) % copy valid parameters 30 | for fn = fieldnames(parameters)' 31 | if ismember(fn,fieldnames(kaf)) 32 | kaf.(fn{1}) = parameters.(fn{1}); 33 | end 34 | end 35 | end 36 | end 37 | 38 | function y_est = evaluate(kaf,x) % evaluate the algorithm 39 | if numel(kaf.omega) 40 | Psi = cos(kaf.omega*x' + repmat(kaf.b,1,size(x,1))); 41 | y_est = Psi'*kaf.Omega/kaf.D; 42 | else 43 | y_est = zeros(size(x,1),1); 44 | end 45 | end 46 | 47 | function train(kaf,x,y) % train the algorithm 48 | if ~numel(kaf.omega) 49 | rng('default'); 50 | rng(kaf.seed); 51 | kaf.omega = 1/kaf.kernelpar*randn(kaf.D,size(x,2)); 52 | kaf.b = 2*pi*rand(kaf.D,1); 53 | kaf.Omega = zeros(kaf.D,1); 54 | end 55 | 56 | Psi = cos(kaf.omega*x' + kaf.b); 57 | y_est = kaf.Omega'*Psi/kaf.D; 58 | err = y - y_est; 59 | kaf.Omega = kaf.Omega + kaf.mu*err*Psi; 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /lib/kafbox_template.m: -------------------------------------------------------------------------------- 1 | % This is the template used for kernel adaptive filtering algorithms in 2 | % the kernel adaptive filtering toolbox. [Delete this line.] 3 | % 4 | % [The name of the algorithm goes here] 5 | % 6 | % [A reference to the original publication of the algorithm goes here, 7 | % including a link to its DOI url: http://dx.doi.org/xxx] 8 | % 9 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 10 | % https://github.com/steven2358/kafbox/ 11 | 12 | classdef kafbox_template < kernel_adaptive_filter 13 | 14 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 15 | param1 = 1; 16 | param2 = 2; 17 | kerneltype = 'gauss'; % kernel type 18 | kernelpar = 1; % kernel parameter 19 | end 20 | 21 | properties (GetAccess = 'public', SetAccess = 'private') % variables 22 | dict = []; % dictionary 23 | alpha = []; % expansion coefficients 24 | end 25 | 26 | methods 27 | function kaf = kafbox_template(parameters) % constructor 28 | if (nargin > 0) % copy valid parameters 29 | for fn = fieldnames(parameters)' 30 | if ismember(fn,fieldnames(kaf)) 31 | kaf.(fn{1}) = parameters.(fn{1}); 32 | end 33 | end 34 | end 35 | end 36 | 37 | function y_est = evaluate(kaf,x) % evaluate the algorithm 38 | if ~isempty(kaf.dict) 39 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 40 | y_est = k'*kaf.alpha; 41 | else 42 | y_est = zeros(size(x,1),1); 43 | end 44 | end 45 | 46 | function train(kaf,x,y) % train the algorithm 47 | if isempty(kaf.dict) % initialize 48 | kaf.dict = x; 49 | kaf.alpha = 0; 50 | else 51 | 52 | % [main algorithm training goes here] 53 | 54 | % [example of a helper function] 55 | z = kaf.helper1(x,y); 56 | end 57 | 58 | end 59 | end 60 | 61 | methods (Static = true) % [helper functions go here] 62 | function z = helper1(x,y) 63 | z = x*y; 64 | % operations 65 | end 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /demo/literature/liu2010kernel/fig2_12.m: -------------------------------------------------------------------------------- 1 | % Reproduces figure 2.12 from "Kernel Adaptive Filtering: A Comprehensive 2 | % Introduction". 3 | % 4 | % Comparison of the performance of LMS and KLMS in nonlinear channel 5 | % equalization. Execution time: < 1 minute (Intel Pentium Core2 Duo). 6 | % 7 | % Weifeng Liu, Jose C. Principe and Simon Haykin, "Kernel Adaptive 8 | % Filtering: A Comprehensive Introduction", Wiley, 2010. 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | clear 14 | close all 15 | 16 | %% PARAMETERS 17 | embedding = 5; % input embedding 18 | delay = 2; % equalization delay 19 | 20 | N_tr = 1000; % number of training data 21 | N_te = 50; % number of testing data 22 | 23 | setups{1} = lms(struct('mu',.01)); %#ok 24 | setups{2} = klms(struct('eta',0.2,'M',N_tr,'kerneltype','gauss','kernelpar',1)); 25 | 26 | %% PREPARE DATA 27 | 28 | fprintf('Fig. 2.12 from "Kernel Adaptive Filtering: A Comprehensive\n'); 29 | fprintf('Introduction".\n') 30 | 31 | u = randn(N_tr+N_te+embedding-1,1)>0; 32 | u = 2*u-1; % binary input 33 | 34 | z = u + 0.5*[0;u(1:end-1)]; % output of linear channel 35 | ns = 0.4*randn(length(u),1); % channel noise 36 | y = z - 0.9*z.^2 + ns; % output of nonlinear channel 37 | 38 | X_all = zeros(N_tr+N_te,embedding); % time-embedding 39 | for k=1:embedding 40 | X_all(:,k) = y(k:N_tr+N_te+k-1); 41 | end 42 | 43 | X_tr = X_all(1:N_tr,:); % training input data 44 | X_te = X_all(N_tr+1:N_tr+N_te,:); % test input data 45 | 46 | T_tr = u(delay:delay+N_tr-1); % training desired output 47 | T_te = u(delay+N_tr:delay+N_tr+N_te-1); % training desired output 48 | 49 | %% RUN ALGORITHMS 50 | 51 | num_setup = length(setups); 52 | 53 | MSE = zeros(N_tr,num_setup); 54 | 55 | for setup_ind = 1:num_setup 56 | kaf = setups{setup_ind}; 57 | 58 | for n=1:N_tr 59 | if ~mod(n,floor(N_tr/10)), fprintf('.'); end % progress indicator, 10 dots 60 | 61 | t_te = kaf.evaluate(X_te); % test on test set 62 | err = T_te - t_te; 63 | MSE(n,setup_ind) = mean(err.^2); 64 | 65 | kaf.train(X_tr(n,:),T_tr(n)); % train with one input-output pair 66 | end 67 | fprintf('\n'); 68 | end 69 | 70 | %% OUTPUT 71 | 72 | figure 73 | plot(MSE,'LineWidth',2) 74 | 75 | legend('LMS','KLMS') 76 | xlabel('iteration') 77 | ylabel('testing MSE') 78 | -------------------------------------------------------------------------------- /lib/norma.m: -------------------------------------------------------------------------------- 1 | % Naive Online regularized Risk Minimization Algorithm 2 | % 3 | % J. Kivinen, A.J. Smola, and R.C. Williamson, "Online learning with 4 | % kernels," IEEE Transactions on Signal Processing, vol. 52, no. 8, 5 | % pp. 2165-2176, Aug. 2004, http://dx.doi.org/10.1109/TSP.2004.830991 6 | % 7 | % Remark: using squared loss function 8 | % 9 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 10 | % https://github.com/steven2358/kafbox/ 11 | 12 | classdef norma < kernel_adaptive_filter 13 | 14 | properties (GetAccess = 'public', SetAccess = 'private') 15 | tau = 500; % memory size (terms retained in truncation) 16 | lambda = 1E-2; % regularization parameter 17 | eta = .5; % learning rate 18 | tcoeff = 0; % learning rate coefficient: eta_t = eta * t^tcoeff 19 | kerneltype = 'gauss'; % kernel type 20 | kernelpar = 1; % kernel parameter 21 | end 22 | 23 | properties (GetAccess = 'public', SetAccess = 'private') 24 | dict = []; % dictionary 25 | alpha = []; % expansion coefficients 26 | end 27 | 28 | methods 29 | function kaf = norma(parameters) % constructor 30 | if (nargin > 0) % copy valid parameters 31 | for fn = fieldnames(parameters)' 32 | if ismember(fn,fieldnames(kaf)) 33 | kaf.(fn{1}) = parameters.(fn{1}); 34 | end 35 | end 36 | end 37 | end 38 | 39 | function y_est = evaluate(kaf,x) % evaluate the algorithm 40 | if size(kaf.dict,1)>0 41 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 42 | y_est = k'*kaf.alpha; 43 | else 44 | y_est = zeros(size(x,1),1); 45 | end 46 | end 47 | 48 | function train(kaf,x,y) % train the algorithm 49 | kaf.alpha = (1-kaf.lambda*kaf.eta)*kaf.alpha; 50 | 51 | y_est = kaf.evaluate(x); 52 | err = y - y_est; 53 | 54 | kaf.alpha = [kaf.alpha; kaf.eta*err]; % grow 55 | kaf.dict = [kaf.dict; x]; % grow 56 | if length(kaf.alpha)>kaf.tau 57 | kaf.alpha(1) = []; % prune 58 | kaf.dict(1,:) = []; % prune 59 | end 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /lib/profiler/knlms_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Normalized Least-Mean-Square algorithm with 2 | % Coherence Criterion 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | classdef knlms_profiler < knlms 8 | 9 | properties (GetAccess = 'public', SetAccess = 'private') 10 | elapsed = 0; % elapsed time 11 | prev_dict_size = 0; % previous dictionary size for growth check 12 | end 13 | 14 | methods 15 | 16 | function kaf = knlms_profiler(parameters) % constructor 17 | if nargin<1, parameters = struct(); end 18 | kaf = kaf@knlms(parameters); 19 | end 20 | 21 | function flops = lastflops(kaf) % flops for last iteration 22 | m = size(kaf.dict,1); 23 | if kaf.prev_dict_size < m % growing 24 | m1 = m - 1; 25 | n2 = 1; 26 | else 27 | m1 = m; 28 | n2 = 0; 29 | end 30 | m3 = m; 31 | floptions = struct(... 32 | 'sum', 3*m3, ... 33 | 'mult', 3*m3 + 1, ... 34 | sprintf('%s_kernel',kaf.kerneltype), [m1+n2, 1, size(kaf.dict,2)]); 35 | 36 | flops = kflops(floptions); 37 | end 38 | 39 | %% flops breakdown 40 | 41 | % k = kernel(x,kaf.dict,kaf.kerneltype,kaf.kernelpar); 42 | % kernel: m1 43 | 44 | % h = kernel(x,kaf.dict,kaf.kerneltype,kaf.kernelpar); 45 | % kernel: n2 % 1 element when growing, 0 when not 46 | 47 | % kaf.alpha = kaf.alpha + kaf.eta / (kaf.eps + h*h') * (y - h*kaf.alpha) * h'; 48 | % sum: 3*m3 49 | % mult: 3*m3 + 1 50 | % div: 1 51 | 52 | %% 53 | 54 | function train_profiled(kaf,x,y) 55 | kaf.prev_dict_size = size(kaf.dict,1); 56 | t1 = tic; 57 | kaf.train(x,y); 58 | t2 = toc(t1); 59 | kaf.elapsed = kaf.elapsed + t2; 60 | end 61 | 62 | function bytes = lastbytes(kaf) % bytes used in last iteration 63 | m = size(kaf.dict,1); 64 | bytes = 8*(m + m*size(kaf.dict,2)); % 8 bytes for double precision 65 | % alpha, dict 66 | end 67 | 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /demo/literature/richard2009online/fig1.m: -------------------------------------------------------------------------------- 1 | % Partially reproduces figure 1 from "Online Prediction of Time Series Data 2 | % With Kernels". (3 algorithms, 25 MC simulations.) 3 | % 4 | % Learning curves for KNLMS, NORMA, and KRLS on a nonlinear system. 5 | % Execution time: 2.5 minutes (Intel Pentium Core2 Duo). 6 | % 7 | % C. Richard, J.C.M. Bermudez, and P. Honeine, "Online Prediction of Time 8 | % Series Data With Kernels," IEEE Transactions on Signal Processing, 9 | % vol. 57, no. 3, pp. 1058-1067, March 2009, 10 | % http://dx.doi.org/10.1109/TSP.2008.2009895 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | clear 16 | close all 17 | 18 | %% PARAMETERS 19 | 20 | N = 3000; % number of training data points 21 | ktype = 'gauss'; 22 | kpar = 1/sqrt(2*3.73); 23 | 24 | setups{1} = norma(struct('lambda',0.98,'tau',38,'eta',1,'tcoeff',-1/2,'kerneltype',ktype,'kernelpar',kpar)); 25 | setups{2} = knlms(struct('eta',.09,'eps',0.03,'mu0',0.5,'kerneltype',ktype,'kernelpar',kpar)); 26 | setups{3} = krls(struct('nu',.6,'kerneltype',ktype,'kernelpar',kpar)); 27 | 28 | numsim = 25; 29 | 30 | %% RUN ALGORITHMS 31 | t1 = tic; 32 | fprintf('Fig. 1 from "Online Prediction of Time Series Data With Kernels".\n'); 33 | 34 | num_setup = length(setups); 35 | MSE = zeros(N,num_setup); 36 | titles = cell(num_setup,1); 37 | 38 | for sim_ind = 1:numsim 39 | fprintf('SIM %d:\n',sim_ind) 40 | 41 | % Generate the data 42 | [X,y,yref] = generate_doddbench(N); 43 | 44 | for setup_ind = 1:num_setup 45 | kaf = setups{setup_ind}; 46 | titles{setup_ind} = upper(class(kaf)); 47 | 48 | for n=1:N 49 | if ~mod(n,floor(N/10)), fprintf('.'); end % progress indicator, 10 dots 50 | 51 | y_est = kaf.evaluate(X(n,:)); % test on test set 52 | err = yref(n) - y_est; 53 | MSE(n,setup_ind) = MSE(n,setup_ind) + err.^2/numsim; 54 | 55 | kaf.train(X(n,:),y(n)); % train with one input-output pair 56 | end 57 | fprintf('\n'); 58 | end 59 | end 60 | 61 | toc(t1) 62 | %% OUTPUT 63 | 64 | % MSE smoothing by moving average for visualization 65 | MSE_smooth = filter(1/20*ones(20,1),1,MSE); 66 | 67 | figure 68 | plot(10*log10(MSE_smooth)); 69 | title('Learning curves') 70 | grid on 71 | 72 | xlabel('iteration') 73 | ylabel('MSE (dB)') 74 | legend(titles) 75 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_sq_dist.m: -------------------------------------------------------------------------------- 1 | % sq_dist - a function to compute a matrix of all pairwise squared distances 2 | % between two sets of vectors, stored in the columns of the two matrices, a 3 | % (of size D by n) and b (of size D by m). If only a single argument is given 4 | % or the second matrix is empty, the missing matrix is taken to be identical 5 | % to the first. 6 | % 7 | % Special functionality: If an optional third matrix argument Q is given, it 8 | % must be of size n by m, and in this case a vector of the traces of the 9 | % product of Q' and the coordinatewise squared distances is returned. 10 | % 11 | % NOTE: The program code is written in the C language for efficiency and is 12 | % contained in the file sq_dist.c, and should be compiled using matlabs mex 13 | % facility. However, this file also contains a (less efficient) matlab 14 | % implementation, supplied only as a help to people unfamiliar with mex. If 15 | % the C code has been properly compiled and is avaiable, it automatically 16 | % takes precendence over the matlab code in this file. 17 | % 18 | % Usage: C = sq_dist(a, b) 19 | % or: C = sq_dist(a) or equiv.: C = sq_dist(a, []) 20 | % or: c = sq_dist(a, b, Q) 21 | % where the b matrix may be empty. 22 | % 23 | % where a is of size D by n, b is of size D by m (or empty), C and Q are of 24 | % size n by m and c is of size D by 1. 25 | % 26 | % Copyright (c) 2003, 2004, 2005 and 2006 Carl Edward Rasmussen. 2006-03-09. 27 | 28 | function C = kafbox_sq_dist(a, b, Q); 29 | 30 | if nargin < 1 | nargin > 3 | nargout > 1 31 | error('Wrong number of arguments.'); 32 | end 33 | 34 | if nargin == 1 | isempty(b) % input arguments are taken to be 35 | b = a; % identical if b is missing or empty 36 | end 37 | 38 | [D, n] = size(a); 39 | [d, m] = size(b); 40 | if d ~= D 41 | error('Error: column lengths must agree.'); 42 | end 43 | 44 | if nargin < 3 45 | C = zeros(n,m); 46 | for d = 1:D 47 | C = C + (repmat(b(d,:), n, 1) - repmat(a(d,:)', 1, m)).^2; 48 | end 49 | % C = repmat(sum(a.*a)',1,m)+repmat(sum(b.*b),n,1)-2*a'*b could be used to 50 | % replace the 3 lines above; it would be faster, but numerically less stable. 51 | else 52 | if [n m] == size(Q) 53 | C = zeros(D,1); 54 | for d = 1:D 55 | C(d) = sum(sum((repmat(b(d,:), n, 1) - repmat(a(d,:)', 1, m)).^2.*Q)); 56 | end 57 | else 58 | error('Third argument has wrong size.'); 59 | end 60 | end 61 | -------------------------------------------------------------------------------- /demo/demo_prediction_mackey_glass.m: -------------------------------------------------------------------------------- 1 | % 1-step ahead prediction on Mackey Glass time series. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | close all; clear 7 | 8 | %% PARAMETERS 9 | 10 | h = 1; % prediction horizon 11 | L = 10; % embedding 12 | n_train = 200; % train data 13 | n_test = 100; % test data 14 | 15 | sigma = 1; % kernel parameter 16 | lms_lr = 0.2; % lms learning rate 17 | klms_lr = 0.2; % klms learning rate 18 | krls_nu = 1E-4; % krls precision parameter 19 | 20 | % select algorithms 21 | i=1; 22 | % algos{i} = lms(struct('mu',lms_lr)); i=i+1; 23 | algos{i} = klms(struct('M',Inf,'kerneltype','gauss','kernelpar',sigma,'eta',klms_lr)); i=i+1; 24 | % algos{i} = krls(struct('nu',krls_nu,'kerneltype','gauss','kernelpar',sigma)); i=i+1; 25 | 26 | %% PREPARE DATA 27 | 28 | % load data 29 | [X,y] = kafbox_data(struct('name','mg30','embedding',L,'horizon',h,... 30 | 'N',n_train+n_test)); 31 | 32 | % split data 33 | X_train = X(1:n_train,:); 34 | y_train = y(1:n_train); 35 | X_test = X(n_train+1:n_train+n_test,:); 36 | y_test = y(n_train+1:n_train+n_test); 37 | 38 | %% RUN ALGORITHMS 39 | n_algos = length(algos); 40 | MSE = zeros(n_train,n_algos); 41 | y_est_final = zeros(n_test,n_algos); 42 | titles = cell(n_algos,1); 43 | for j=1:n_algos 44 | kaf = algos{j}; 45 | titles{j} = upper(class(kaf)); % store algorithm name 46 | fprintf('Training %s',titles{j}) 47 | for i=1:n_train 48 | if ~mod(i,floor(n_train/10)), fprintf('.'); end % progress indicator 49 | 50 | kaf.train(X_train(i,:),y_train(i)); % train with one input-output pair 51 | 52 | y_est = kaf.evaluate(X_test); % evaluate on test data 53 | MSE(i,j) = mean((y_test-y_est).^2); % test error 54 | end 55 | fprintf('\n'); 56 | y_est_final(:,j) = y_est; 57 | end 58 | 59 | %% OUTPUT 60 | figure; 61 | plot(10*log10(MSE)); 62 | title('Learning curves') 63 | legend(titles); 64 | xlabel('iteration'); ylabel('MSE (db)'); 65 | 66 | fprintf('Final MSE: %.2fdB\n\n',10*log10(MSE(end))); 67 | 68 | figure; hold all; 69 | plot(y_test,'LineWidth',2); 70 | titles2 = {'original'}; 71 | line_styles = {'--','-.'}; 72 | for j=1:n_algos 73 | plot(y_est_final(:,j),'LineWidth',2,'LineStyle',line_styles{j}); 74 | end 75 | titles2(2:n_algos+1) = titles; 76 | legend(titles2); 77 | title(sprintf('%d-step ahead prediction on Mackey Glass time series',h)); 78 | 79 | % export_fig('test.pdf') 80 | -------------------------------------------------------------------------------- /lib/profiler/norma_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Naive Online regularized Risk Minimization 2 | % Algorithm 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | classdef norma_profiler < norma 8 | 9 | properties (GetAccess = 'public', SetAccess = 'private') 10 | elapsed = 0; % elapsed time 11 | prev_dict_size = 0; % previous memory size for prune check 12 | end 13 | 14 | methods 15 | 16 | function kaf = norma_profiler(parameters) % constructor 17 | if nargin<1, parameters = struct(); end 18 | kaf = kaf@norma(parameters); 19 | end 20 | 21 | function flops = lastflops(kaf) % flops for last iteration 22 | 23 | m = size(kaf.dict,1); 24 | if kaf.prev_dict_size < m % growing (no pruning) 25 | m1 = m-1; 26 | else 27 | m1 = m; 28 | end 29 | floptions = struct(... 30 | 'sum', m1 - 1 + 1 + 1, ... 31 | 'mult', 2*m1 + 3 + 1, ... 32 | 'exp', 1, ... 33 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 34 | 35 | flops = kflops(floptions); 36 | end 37 | 38 | %% flops breakdown 39 | 40 | % k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 41 | % kernel: m1 42 | 43 | % y_est = k'*(kaf.alpha.*kaf.beta(length(kaf.alpha):-1:1)); 44 | % sum: m1 - 1 45 | % mult: 2*m1 46 | 47 | % err = y - y_est; 48 | % sum: 1 49 | 50 | % kaf.alpha = (1-kaf.lambda*kaf.eta*kaf.t^kaf.tcoeff)*kaf.alpha; 51 | % sum: 1 52 | % mult: 3 53 | % exp: 1 54 | 55 | % kaf.alpha = [kaf.alpha; kaf.mu*err]; 56 | % mult: 1 57 | 58 | %% 59 | 60 | function train_profiled(kaf,x,y) 61 | kaf.prev_dict_size = size(kaf.dict,1); 62 | t1 = tic; 63 | kaf.train(x,y); 64 | t2 = toc(t1); 65 | kaf.elapsed = kaf.elapsed + t2; 66 | end 67 | 68 | function bytes = lastbytes(kaf) % bytes used in last iteration 69 | m = size(kaf.dict,1); 70 | bytes = 4 + 8*(m + m*size(kaf.dict,2)); % 8 bytes for double precision, 4 for uint32 71 | % t, alpha, dict 72 | end 73 | 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /lib/util/kafbox_parameter_estimation.m: -------------------------------------------------------------------------------- 1 | function [sigma,reg,lambda] = ... 2 | kafbox_parameter_estimation(X,Y,time_ind,epochs) 3 | % KAFBOX_PARAMETER_ESTIMATION Gaussian-process based estimation of the 4 | % parameters of the KRLS-T algorithm. 5 | % 6 | % Steven Van Vaerenbergh, Ignacio Santamaria, and Miguel Lazaro-Gredilla, 7 | % "Estimation of the forgetting factor in kernel recursive least squares." 8 | % 2012 IEEE International Workshop on Machine Learning for Signal 9 | % Processing (MLSP), 2012. http://dx.doi.org/10.1109/MLSP.2012.6349749 10 | % 11 | % INPUT: - X: input data, each row is a data point 12 | % - Y: output data, one column. 13 | % - time_ind: temporal indices of points 14 | % - epochs: number of epochs for minimization 15 | % OUTPUT: - sigma: estimated kernel length scale (width) 16 | % - reg: estimated regularization 17 | % - lambda: estimated forgetting factor. 18 | % USAGE: [sigma,reg,lambda] = 19 | % kafbox_parameter_estimation(X,Y,time_ind,epochs) 20 | % 21 | % Author: Steven Van Vaerenbergh, 2013. 22 | % 23 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 24 | % https://github.com/steven2358/kafbox/ 25 | 26 | N = size(X,1); 27 | if nargin<4 28 | epochs = 15; 29 | end 30 | if nargin<3 31 | time_ind = (0:N-1)'; % assume that samples are taken on synchronous time instants 32 | end 33 | Xt = [time_ind X]; 34 | 35 | % initialization values 36 | noisevar0 = mean(Y)^2/4; 37 | lambda0 = 0.99; 38 | ell0 = 1; 39 | sf20 = 1; 40 | 41 | % The standard covariance function in kernel adaptive filtering is a 42 | % Gaussian kernel with additive noise. Forgetting is introduced through an 43 | % AR(1) process with forgetting factor lambda. See publication for details. 44 | covfunc = {'kafbox_covSum',{{'kafbox_covProd',... 45 | {'kafbox_covLambda', 'kafbox_covSEiso2'}},'kafbox_covNoise'}}; 46 | loghyper0 = [log(lambda0/(1-lambda0)); log(ell0); log(sqrt(sf20)); 0.5*log(noisevar0)]; 47 | 48 | % Minimize negative log maximal likelihood using GPML toolbox 49 | loghyper1 = kafbox_minimize(loghyper0,'kafbox_gpr',epochs,covfunc,Xt,Y); 50 | 51 | lambda_est = exp(loghyper1(1))/(1+exp(loghyper1(1))); 52 | lambda_est = lambda_est^2; % correspondance between st-gp and krls-t 53 | ell_est = exp(loghyper1(2)); 54 | sf2_est = exp(2*loghyper1(3)); 55 | noisevar_est = exp(2*loghyper1(4)); 56 | 57 | sigma = ell_est; % length-scale 58 | reg = noisevar_est/sf2_est; % noise-to-signal parameter (regularization) 59 | lambda = lambda_est; % forgetting factor 60 | -------------------------------------------------------------------------------- /lib/qklms.m: -------------------------------------------------------------------------------- 1 | % Quantized Kernel Least Mean Square algorithm 2 | % 3 | % B. Chen, S. Zhao, P. Zhu, and J.C. Principe, "Quantized Kernel Least Mean 4 | % Square Algorithm," IEEE Transactions on Neural Networks and Learning 5 | % Systems, vol. 23, no. 1, pp. 22-32, Jan. 2012, 6 | % http://dx.doi.org/10.1109/TNNLS.2011.2178446 7 | % 8 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 9 | % https://github.com/steven2358/kafbox/ 10 | 11 | classdef qklms < kernel_adaptive_filter 12 | 13 | properties (GetAccess = 'public', SetAccess = 'private') 14 | eta = .9; % learning rate 15 | epsu = .1; 16 | kerneltype = 'gauss'; % kernel type 17 | kernelpar = 1; % kernel parameter 18 | end 19 | 20 | properties (GetAccess = 'public', SetAccess = 'private') 21 | dict = []; % codebook (dictionary) 22 | alpha = []; % expansion coefficients 23 | end 24 | 25 | methods 26 | function kaf = qklms(parameters) % constructor 27 | if (nargin > 0) % copy valid parameters 28 | for fn = fieldnames(parameters)' 29 | if ismember(fn,fieldnames(kaf)) 30 | kaf.(fn{1}) = parameters.(fn{1}); 31 | end 32 | end 33 | end 34 | end 35 | 36 | function y_est = evaluate(kaf,x) % evaluate the algorithm 37 | if size(kaf.dict,1)>0 38 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 39 | y_est = k'*kaf.alpha; 40 | else 41 | y_est = zeros(size(x,1),1); % zeros if not initialized 42 | end 43 | end 44 | 45 | function train(kaf,x,y) % train the algorithm 46 | y_est = kaf.evaluate(x); % evaluate function output 47 | err = y - y_est; % instantaneous error 48 | 49 | m = size(kaf.dict,1); 50 | if m==0 51 | d2 = kaf.epsu^2 + 1; % force addition of initial base 52 | else % find distance to closest dictionary element 53 | [d2,j] = min(sum((kaf.dict - repmat(x,m,1)).^2,2)); 54 | end 55 | if d2 <= kaf.epsu^2 % reduced coefficient update 56 | kaf.alpha(j) = kaf.alpha(j) + kaf.eta*err; 57 | else 58 | kaf.dict = [kaf.dict; x]; % add base to dictionary 59 | kaf.alpha = [kaf.alpha; kaf.eta*err]; % add new coefficient 60 | end 61 | end 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covSum.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covSum(covfunc, logtheta, x, z); 2 | 3 | % covSum - compose a covariance function as the sum of other covariance 4 | % functions. This function doesn't actually compute very much on its own, it 5 | % merely does some bookkeeping, and calls other covariance functions to do the 6 | % actual work. 7 | % 8 | % For more help on design of covariance functions, try "help covFunctions". 9 | % 10 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-03-20. 11 | 12 | for i = 1:length(covfunc) % iterate over covariance functions 13 | f = covfunc(i); 14 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 15 | j(i) = cellstr(feval(f{:})); 16 | end 17 | 18 | if nargin == 1, % report number of parameters 19 | A = char(j(1)); for i=2:length(covfunc), A = [A, '+', char(j(i))]; end 20 | return 21 | end 22 | 23 | [n, D] = size(x); 24 | 25 | v = []; % v vector indicates to which covariance parameters belong 26 | for i = 1:length(covfunc), v = [v repmat(i, 1, eval(char(j(i))))]; end 27 | 28 | switch nargin 29 | case 3 % compute covariance matrix 30 | A = zeros(n, n); % allocate space for covariance matrix 31 | for i = 1:length(covfunc) % iteration over summand functions 32 | f = covfunc(i); 33 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 34 | A = A + feval(f{:}, logtheta(v==i), x); % accumulate covariances 35 | end 36 | 37 | case 4 % compute derivative matrix or test set covariances 38 | if nargout == 2 % compute test set cavariances 39 | A = zeros(size(z,1),1); B = zeros(size(x,1),size(z,1)); % allocate space 40 | for i = 1:length(covfunc) 41 | f = covfunc(i); 42 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 43 | [AA BB] = feval(f{:}, logtheta(v==i), x, z); % compute test covariances 44 | A = A + AA; B = B + BB; % and accumulate 45 | end 46 | else % compute derivative matrices 47 | i = v(z); % which covariance function 48 | j = sum(v(1:z)==i); % which parameter in that covariance 49 | f = covfunc(i); 50 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 51 | A = feval(f{:}, logtheta(v==i), x, j); % compute derivative 52 | end 53 | 54 | end 55 | -------------------------------------------------------------------------------- /demo/literature/richard2009online/fig2.m: -------------------------------------------------------------------------------- 1 | % Partially reproduces figure 2 from "Online Prediction of Time Series Data 2 | % With Kernels". (5 algorithms, 20 MC simulations.) 3 | % 4 | % Learning curves for KAP, KNLMS, NORMA and KRLS on a nonlinear system. 5 | % Execution time: 3.5 minutes (Intel Pentium Core2 Duo). 6 | % 7 | % C. Richard, J.C.M. Bermudez, and P. Honeine, "Online Prediction of Time 8 | % Series Data With Kernels," IEEE Transactions on Signal Processing, 9 | % vol. 57, no. 3, pp. 1058-1067, March 2009, 10 | % http://dx.doi.org/10.1109/TSP.2008.2009895 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | clear 16 | close all 17 | 18 | %% PARAMETERS 19 | 20 | N = 3000; % number of training data points 21 | ktype = 'laplace'; 22 | kpar = 0.35; 23 | 24 | setups{1} = norma(struct('lambda',0.09,'tau',35,'eta',0.09,'tcoeff',-1/2,'kerneltype',ktype,'kernelpar',kpar)); 25 | setups{2} = knlms(struct('eta',.01,'eps',9E-4,'mu0',0.3,'kerneltype',ktype,'kernelpar',kpar)); 26 | setups{3} = kap(struct('p',2,'eta',.009,'eps',7e-2,'mu0',0.3,'kerneltype',ktype,'kernelpar',kpar)); 27 | setups{4} = kap(struct('p',3,'eta',.01,'eps',7e-2,'mu0',0.3,'kerneltype',ktype,'kernelpar',kpar)); 28 | setups{5} = krls(struct('nu',.7,'kerneltype',ktype,'kernelpar',kpar)); 29 | 30 | numsim = 20; 31 | 32 | %% RUN ALGORITHMS 33 | t1 = tic; 34 | fprintf('Fig. 2 from "Online Prediction of Time Series Data With Kernels".\n'); 35 | 36 | num_setup = length(setups); 37 | MSE = zeros(N,num_setup); 38 | titles = cell(num_setup,1); 39 | 40 | for sim_ind = 1:numsim 41 | fprintf('SIM %d:\n',sim_ind) 42 | 43 | % Generate the data 44 | [X,y,yref] = generate_richardbench(N); 45 | 46 | for setup_ind = 1:num_setup 47 | kaf = setups{setup_ind}; 48 | titles{setup_ind} = upper(class(kaf)); 49 | 50 | for n=1:N 51 | if ~mod(n,floor(N/10)), fprintf('.'); end % progress indicator, 10 dots 52 | 53 | y_est = kaf.evaluate(X(n,:)); % test on test set 54 | err = yref(n) - y_est; 55 | MSE(n,setup_ind) = MSE(n,setup_ind) + err.^2/numsim; 56 | 57 | kaf.train(X(n,:),y(n)); % train with one input-output pair 58 | end 59 | fprintf('\n'); 60 | end 61 | end 62 | 63 | toc(t1) 64 | %% OUTPUT 65 | 66 | % MSE smoothing by moving average for visualization 67 | MSE_smooth = filter(1/20*ones(20,1),1,MSE); 68 | 69 | figure 70 | plot(10*log10(MSE_smooth)); 71 | title('Learning curves') 72 | grid on 73 | 74 | xlabel('iteration') 75 | ylabel('MSE (dB)') 76 | legend(titles) 77 | -------------------------------------------------------------------------------- /demo/demo_sinc_all.m: -------------------------------------------------------------------------------- 1 | % Demo: learn a sinc. Run and compare all algorithms using their default 2 | % parameters. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | close all 8 | clear 9 | rng('default'); rng(1) 10 | 11 | %% PARAMETERS 12 | 13 | N = 1000; % number of training data 14 | N_test = 500; % number of test data 15 | SNR = 20; % SNR in dB 16 | 17 | %% PROGRAM 18 | 19 | % generate data 20 | x = randn(N,1); 21 | x_test = linspace(min(x),max(x),N_test)'; 22 | y_ref = sinc([x;x_test]); 23 | y = y_ref + sqrt(10^(-SNR/10)*var(y_ref))*randn(N+N_test,1); 24 | y_test = y_ref(N+1:N+N_test); 25 | 26 | % get list of kernel adaptive filters in 'lib' folder 27 | fdir = fileparts(which('kafbox_template.m')); 28 | files = dir(fullfile(fdir,'*.m')); 29 | [~,algorithms] = cellfun(@fileparts, {files.name}, 'UniformOutput',0); 30 | for i=length(algorithms):-1:1 31 | if ~exist(algorithms{i},'class') 32 | algorithms(i) = []; % remove files that do not represent classes 33 | end 34 | end 35 | 36 | % perform online learning for each algorithm 37 | fprintf('\n') 38 | num_alg = length(algorithms); 39 | titles = cell(num_alg,1); 40 | MSE = zeros(num_alg,1); 41 | Y_est = zeros(N_test,num_alg); 42 | for algo_ind=1:num_alg 43 | t1 = tic; 44 | algorithm = algorithms{algo_ind}; 45 | fprintf('%2d. %9s: ',algo_ind,upper(algorithm)); 46 | titles{algo_ind} = strrep(upper(algorithm),'_','\_'); 47 | 48 | kaf = feval(algorithm); 49 | for i=1:N 50 | if ~mod(i,floor(N/10)), fprintf('.'); end 51 | kaf.train(x(i),y(i)); 52 | end 53 | y_est = kaf.evaluate(x_test); 54 | Y_est(:,algo_ind) = y_est; 55 | MSE(algo_ind) = mean((y_test-y_est).^2); 56 | 57 | fprintf(' %.2fs. MSE=%3.2fdB\n',toc(t1),10*log10(MSE(algo_ind))) 58 | end 59 | 60 | %% OUTPUT 61 | 62 | % plot results in different "leagues" 63 | [MSE_sorted,ind] = sort(MSE,'descend'); 64 | num_fig = ceil(num_alg/5); 65 | 66 | remaining = num_alg; 67 | titles{num_alg+1} = 'data'; 68 | for fig_ind=num_fig:-1:1 69 | figure; hold all 70 | plot(x,y(1:N),'.') 71 | 72 | rm = rem(remaining,5); 73 | num_in_league = (rm==0)*5 + rm; 74 | % plot the results for the num_in_league worst results 75 | league_inds = num_alg-remaining+num_in_league:-1:num_alg-remaining+1; 76 | for i=league_inds 77 | plot(x_test,Y_est(:,ind(i)),'LineWidth',2) 78 | end 79 | title(sprintf('League %d',fig_ind)) 80 | legend(titles([num_alg+1; ind(league_inds)])) 81 | 82 | axis([min(x)-0.5 max(x)+0.5 min(y)-0.5 max(y)+0.5]); 83 | remaining = remaining - num_in_league; 84 | end 85 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler_convergence_analysis.m: -------------------------------------------------------------------------------- 1 | % Convergence analysis. Returns steady state MSE, numbers of iterations to 2 | % reach specified MSE value and MSE reached after specified number of 3 | % iterations. 4 | % 5 | % Input: 6 | % - MSE_curve: array containing MSE curve 7 | % - target_mse: target MSE value 8 | % - target_it: target number of iterations 9 | % 10 | % Output: 11 | % - ss: steady-state MSE level calculated over the last 1000 samples 12 | % - it_reached: number of iterations it takes to reach the specified MSE 13 | % - mse_reached: mse reached after the specified number of iterations 14 | % 15 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 16 | % https://github.com/steven2358/kafbox/ 17 | 18 | function [ss,it_reached,mse_reached] = ... 19 | kafbox_profiler_convergence_analysis(MSE_curve, target_mse, target_it) 20 | 21 | xs = find(~isnan(MSE_curve)); 22 | 23 | % calculate steady state as MSE over last 1000 points 24 | ss = 10*log10(mean(MSE_curve(xs(xs>length(MSE_curve)-1000)))); 25 | 26 | if nargin > 1 27 | % check when error_measure reaches certain MSE 28 | xsi = find(10*log10(MSE_curve(xs)) < target_mse); 29 | if numel(xsi)>0 30 | if xsi(1)==1 31 | index = xsi(1); 32 | else 33 | % linear interpolation to find value 34 | sti = xsi(1)-1; 35 | ndi = xsi(1); 36 | stv = 10*log10(MSE_curve(xs(sti))); 37 | ndv = 10*log10(MSE_curve(xs(ndi))); 38 | index = xs(sti)+(xs(ndi) - xs(sti))*(stv-target_mse)/(stv-ndv); 39 | % plot(index,target_MSE,'ro') 40 | % plot([0 index],[target_MSE target_MSE],'r','LineWidth',2) 41 | % ylim = get(gca,'YLim'); 42 | % plot([index index],[ylim(1) target_MSE],'r') 43 | end 44 | else 45 | % fprintf('No convergence to %d dB.\n',target_MSE); 46 | index = nan; 47 | end 48 | it_reached = index; 49 | end 50 | 51 | if nargin > 2 52 | % check what error is reached after certain number of iterations 53 | xsi = find((xs) >= target_it); 54 | if xsi(1)==1 55 | value = 10*log10(MSE_curve(xsi(1))); 56 | else 57 | % linear interpolation to find value 58 | sti = xsi(1)-1; 59 | ndi = xsi(1); 60 | stv = 10*log10(MSE_curve(xs(sti))); 61 | ndv = 10*log10(MSE_curve(xs(ndi))); 62 | value = stv + (ndv - stv)*(xs(sti)-target_iter)/(xs(sti)-xs(ndi)); 63 | % plot(target_iter,value,'ko') 64 | % plot([0 target_iter],[value value],'k') 65 | % ylim = get(gca,'YLim'); 66 | % plot([target_iter target_iter],[ylim(1) value],'k','LineWidth',2) 67 | end 68 | mse_reached = value; 69 | end 70 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_covProd.m: -------------------------------------------------------------------------------- 1 | function [A, B] = kafbox_covProd(covfunc, logtheta, x, z); 2 | 3 | % covProd - compose a covariance function as the product of other covariance 4 | % functions. This function doesn't actually compute very much on its own, it 5 | % merely does some bookkeeping, and calls other covariance functions to do the 6 | % actual work. 7 | % 8 | % For more help on design of covariance functions, try "help covFunctions". 9 | % 10 | % (C) Copyright 2006 by Carl Edward Rasmussen, 2006-04-06. 11 | 12 | for i = 1:length(covfunc) % iterate over covariance functions 13 | f = covfunc(i); 14 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 15 | j(i) = cellstr(feval(f{:})); 16 | end 17 | 18 | if nargin == 1, % report number of parameters 19 | A = char(j(1)); for i=2:length(covfunc), A = [A, '+', char(j(i))]; end 20 | return 21 | end 22 | 23 | [n, D] = size(x); 24 | 25 | v = []; % v vector indicates to which covariance parameters belong 26 | for i = 1:length(covfunc), v = [v repmat(i, 1, eval(char(j(i))))]; end 27 | 28 | switch nargin 29 | case 3 % compute covariance matrix 30 | A = ones(n, n); % allocate space for covariance matrix 31 | for i = 1:length(covfunc) % iteration over factor functions 32 | f = covfunc(i); 33 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 34 | A = A .* feval(f{:}, logtheta(v==i), x); % multiply covariances 35 | end 36 | 37 | case 4 % compute derivative matrix or test set covariances 38 | if nargout == 2 % compute test set cavariances 39 | A = ones(size(z,1),1); B = ones(size(x,1),size(z,1)); % allocate space 40 | for i = 1:length(covfunc) 41 | f = covfunc(i); 42 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 43 | [AA BB] = feval(f{:}, logtheta(v==i), x, z); % compute test covariances 44 | A = A .* AA; B = B .* BB; % and accumulate 45 | end 46 | else % compute derivative matrices 47 | A = ones(n, n); 48 | ii = v(z); % which covariance function 49 | j = sum(v(1:z)==ii); % which parameter in that covariance 50 | for i = 1:length(covfunc) 51 | f = covfunc(i); 52 | if iscell(f{:}), f = f{:}; end % dereference cell array if necessary 53 | if i == ii 54 | A = A .* feval(f{:}, logtheta(v==i), x, j); % multiply derivative 55 | else 56 | A = A .* feval(f{:}, logtheta(v==i), x); % multiply covariance 57 | end 58 | end 59 | end 60 | 61 | end 62 | -------------------------------------------------------------------------------- /data/generate_channel_switch.m: -------------------------------------------------------------------------------- 1 | function [x_embed,y,y_ref,x_test_embed,y_test_ref,H] = ... 2 | generate_channel_switch(opt) 3 | % Generate CHANNEL_SWITCH data set: input-output data of a nonlinear 4 | % channel whose linear part is changed abruptly at a chosen point. The 5 | % nonlinear channel is a Wiener system with a randomly chosen linear part. 6 | % 7 | % Input options: "opt" is a structure with the following fields: 8 | % - N: total number of training data points 9 | % - N_switch: iteration after which the channel switch occurs 10 | % - N_test: number of test data points (before and after switch) 11 | % - sigpower: input signal power 12 | % - chlen: linear channel length 13 | % - fun: nonlinear function 14 | % - SNR: signal-to-noise ratio of additve output noise 15 | % 16 | % Outputs: 17 | % - x_embed: system input with time embedding (each datum is a row) 18 | % - y: system output 19 | % - y_ref: noiseless system output 20 | % - x_test_embed: test system input with time embedding 21 | % - y_test_ref: noiseless test system output, one column per channel 22 | % - H: channel matrix (each row is a channel impulse response) 23 | % 24 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 25 | % https://github.com/steven2358/kafbox/ 26 | 27 | %% DEFAULT PARAMETER VALUES 28 | 29 | options = struct('N',1500,'N_test',500,'N_switch',500,'chlen',5,... 30 | 'sigpower',1,'fun','tanh(x)','SNR',20); 31 | 32 | %% CUSTOM PARAMETER VALUES 33 | if nargin >= 1, 34 | for opt_name = fieldnames(opt)', 35 | if strmatch(opt_name,fieldnames(options),'exact'), 36 | options.(opt_name{1}) = opt.(opt_name{1}); 37 | end 38 | end 39 | end 40 | 41 | N = options.N; 42 | N_test = options.N_test; 43 | N_switch = options.N_switch; 44 | sigpower = options.sigpower; 45 | chlen = options.chlen; 46 | fun = options.fun; 47 | SNR = options.SNR; 48 | 49 | %% PROGRAM 50 | 51 | H = [ones(2,1) randn(2,chlen-1)]; % linear channel coefficients 52 | 53 | f = inline(fun); % Wiener system nonlinearity 54 | 55 | N_all = N+N_test+chlen-1; 56 | x_all = sqrt(sigpower)*randn(N_all,1); 57 | x_all_embed = zeros(N_all,chlen); 58 | for i = 1:chlen, 59 | x_all_embed(i:N_all,i) = x_all(1:N_all-i+1); % time-embedding 60 | end 61 | x_all_embed = x_all_embed(chlen:N_all,:); 62 | 63 | x_embed = x_all_embed(1:N,:); 64 | x_test_embed = x_all_embed(N+1:N+N_test,:); 65 | 66 | % linear filtering 67 | xp1 = x_embed(1:N_switch,:)*H(1,:)'; 68 | xp2 = x_embed(N_switch+1:N,:)*H(2,:)'; 69 | 70 | % apply nonlinearity 71 | y_ref = f([xp1;xp2]); % no noise yet 72 | 73 | % add noise 74 | noisevar = 10^(-SNR/10)*var(y_ref); 75 | noise = sqrt(noisevar)*randn(N,1); 76 | 77 | y = y_ref + noise; 78 | 79 | % get test outputs: 2 columns, no noise 80 | xp3 = x_test_embed*H(1,:)'; 81 | xp4 = x_test_embed*H(2,:)'; 82 | y_test_ref = f([xp3 xp4]); 83 | 84 | % figure;plot([xp1;xp2],y,'.') 85 | -------------------------------------------------------------------------------- /lib/profiler/qklms_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Least-Mean-Square algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef qklms_profiler < qklms 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | prev_dict_size = 0; % previous dictionary size for growth check 11 | end 12 | 13 | methods 14 | 15 | function kaf = qklms_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@qklms(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | m = size(kaf.dict,1); 22 | 23 | if kaf.prev_dict_size < m % growing 24 | m1 = m-1; 25 | n4 = 1; 26 | n5 = 1; 27 | else 28 | m1 = m; 29 | n4 = 1; 30 | n5 = 0; 31 | end 32 | 33 | if strcmp('kaf.kernel','gauss') 34 | m2 = 0; m3 = 0; % quantization criterion can use values calculated by kernel 35 | else 36 | m2 = m1; m3 = size(kaf.dict,2); 37 | end 38 | 39 | floptions = struct(... 40 | 'sum', m1 - 1 + 1 + (2*m2-1)*m3 + n4 + n5, ... 41 | 'mult', m1 + m2*m3 + n4 + n5, ... 42 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 43 | 44 | flops = kflops(floptions); 45 | end 46 | 47 | %% flops breakdown 48 | 49 | % k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 50 | % kernel: m1 51 | 52 | % y_est = k'*kaf.alpha; 53 | % sum: m1 - 1 54 | % mult: m1 55 | 56 | % err = y - y_est; 57 | % sum: 1 58 | 59 | % [d2,j] = min(sum((kaf.dict - repmat(x,m,1)).^2,2)); 60 | % sum: (2*m2-1)*m3 61 | % mult: m2*m3 62 | 63 | % kaf.alpha(j) = kaf.alpha(j) + kaf.eta*err; 64 | % sum: n4 65 | % mult: n5 66 | 67 | % kaf.alpha = [kaf.alpha; kaf.mu*err]; 68 | % mult: n4 69 | 70 | %% 71 | 72 | function train_profiled(kaf,x,y) 73 | kaf.prev_dict_size = size(kaf.dict,1); 74 | t1 = tic; 75 | kaf.train(x,y); 76 | t2 = toc(t1); 77 | kaf.elapsed = kaf.elapsed + t2; 78 | end 79 | 80 | function bytes = lastbytes(kaf) % bytes used in last iteration 81 | m = size(kaf.dict,1); 82 | bytes = 8*(m + m*size(kaf.dict,2)); % 8 bytes for double precision 83 | % alpha, dict 84 | end 85 | 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler_storet.m: -------------------------------------------------------------------------------- 1 | % Handles storage and retrieval of profiler results. 2 | % 3 | % Checks if the results for a given configuration were stored before, and 4 | % saves new results if necessary. 5 | % 6 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 7 | % https://github.com/steven2358/kafbox/ 8 | 9 | function results = kafbox_profiler_storet(data,config,output_dir,results) 10 | 11 | if nargin<4 12 | option = 'check'; 13 | else 14 | option = 'save'; 15 | end 16 | 17 | dataset = lower(data.name); 18 | if isfield(data,'class') 19 | dataset = sprintf('%s_%s',lower(data.class),dataset); 20 | end 21 | dataset_algo = [dataset '_' config.class]; 22 | results_path = fullfile(output_dir,dataset_algo); 23 | index_path = fullfile(results_path,'index.mat'); 24 | if ~exist(results_path,'file') 25 | mkdir(results_path); 26 | end 27 | 28 | % all relevant values in one array 29 | my_config = config.options; 30 | fn = fieldnames(data); 31 | for i = 1:length(fn) 32 | if ~strcmp(fn{i},'name') 33 | my_config.(sprintf('data_%s',fn{i})) = data.(fn{i}); 34 | end 35 | end 36 | 37 | skiplist = {'sweep_par','sweep_val','data_numsim'}; 38 | for i = 1:length(skiplist) 39 | if isfield(my_config,skiplist{i}) 40 | my_config = rmfield(my_config,skiplist{i}); 41 | end 42 | end 43 | 44 | switch option 45 | case 'check' 46 | if exist(index_path,'file')==2 47 | load(index_path); % loads "configs" 48 | results = find_results(my_config,configs,results_path); %#ok 49 | else 50 | results = []; 51 | end 52 | case 'save' 53 | if exist(index_path,'file')==2 54 | load(index_path); % loads "configs" 55 | ind = length(configs)+1; %#ok 56 | else 57 | ind = 1; 58 | end 59 | id = datestr(now,30); 60 | configs{ind}.id = id; 61 | configs{ind}.cstr = struct2str(my_config); 62 | save(index_path,'configs'); 63 | save_results(results,results_path,id) 64 | otherwise 65 | error('unknown option') 66 | end 67 | 68 | 69 | % check if config was already processed and return corresponding results 70 | function r = find_results(my_config,configs,results_path) 71 | 72 | str = struct2str(my_config); 73 | 74 | for i=1:length(configs) 75 | stri = configs{i}.cstr; 76 | if strcmp(str,stri) 77 | id = configs{i}.id; 78 | r = load_results(results_path,id); 79 | return 80 | end 81 | end 82 | r = []; 83 | 84 | 85 | function save_results(results,results_path,id) 86 | fname = sprintf('%s/%s',results_path,id); 87 | save(fname,'results'); 88 | 89 | 90 | function results = load_results(results_path,id) 91 | fname = sprintf('%s/%s.mat',results_path,id); 92 | if exist(fname,'file')==2 93 | r = load(fname); 94 | results = r.results; 95 | else 96 | results = []; 97 | end 98 | -------------------------------------------------------------------------------- /lib/knlms.m: -------------------------------------------------------------------------------- 1 | % Kernel Normalized Least-Mean-Square algorithm with Coherence Criterion 2 | % 3 | % C. Richard, J.C.M. Bermudez, and P. Honeine, "Online Prediction of Time 4 | % Series Data With Kernels," IEEE Transactions on Signal Processing, 5 | % vol. 57, no. 3, pp. 1058-1067, March 2009, 6 | % http://dx.doi.org/10.1109/TSP.2008.2009895 7 | % 8 | % Remark: memories are initialized empty in this implementation 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | classdef knlms < kernel_adaptive_filter 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 16 | eta = .5; % step size 17 | mu0 = .95; % coherence criterion threshold 18 | eps = 1E-2; % regularization 19 | kerneltype = 'gauss'; % kernel type 20 | kernelpar = 1; % kernel parameter 21 | end 22 | 23 | properties (GetAccess = 'public', SetAccess = 'private') % variables 24 | dict = []; % dictionary 25 | modict = []; % modulus of the dictionary elements 26 | alpha = []; % expansion coefficients 27 | end 28 | 29 | methods 30 | function kaf = knlms(parameters) % constructor 31 | if (nargin > 0) % copy valid parameters 32 | for fn = fieldnames(parameters)' 33 | if ismember(fn,fieldnames(kaf)) 34 | kaf.(fn{1}) = parameters.(fn{1}); 35 | end 36 | end 37 | end 38 | end 39 | 40 | function y_est = evaluate(kaf,x) % evaluate the algorithm 41 | if size(kaf.dict,1)>0 42 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 43 | y_est = k'*kaf.alpha; 44 | else 45 | y_est = zeros(size(x,1),1); 46 | end 47 | end 48 | 49 | function train(kaf,x,y) % train the algorithm 50 | if size(kaf.dict,2)==0 % initialize 51 | k = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 52 | kaf.dict = x; 53 | kaf.modict = sqrt(k); 54 | kaf.alpha = 0; 55 | else 56 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 57 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 58 | C = k./(sqrt(kx)*kaf.modict); % coherence 59 | if (max(C) <= kaf.mu0) % coherence criterion 60 | kaf.dict = [kaf.dict; x]; % order increase 61 | kaf.modict = [kaf.modict; sqrt(kx)]; 62 | kaf.alpha = [kaf.alpha; 0]; % reserve spot 63 | end 64 | end 65 | 66 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 67 | kaf.alpha = kaf.alpha + ... % update coefficients 68 | kaf.eta / (kaf.eps + k'*k) * (y - k'*kaf.alpha) * k; 69 | end 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /demo/demo_reconverge_all.m: -------------------------------------------------------------------------------- 1 | % Demo: learn a nonlinear channel with an abrupt change. Run and compare 2 | % all algorithms using their default parameters. 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | close all 8 | clear 9 | rng('default'); rng(1); % for reproducibility 10 | 11 | %% PARAMETERS 12 | 13 | N = 1500; % number of training data 14 | N_switch = 500; % iteration after which the channel switch occurs 15 | N_test = 100; % number of test data 16 | SNR = 30; % SNR in dB 17 | sigpower = 0.5^2; % input signal power 18 | 19 | %% PROGRAM 20 | 21 | w = who; 22 | for i=1:length(w) % copy all parameters to option structure 23 | eval(sprintf('opt.%s = %s;',w{i},w{i})) 24 | end 25 | 26 | % generate data 27 | [X,y,y_ref,X_test,y_test] = generate_channel_switch(opt); 28 | 29 | % get list of kernel adaptive filters in 'lib' folder 30 | fdir = fileparts(which('kafbox_template.m')); 31 | files = dir(fullfile(fdir,'*.m')); 32 | [~,algorithms] = cellfun(@fileparts, {files.name}, 'UniformOutput',0); 33 | for i=length(algorithms):-1:1 34 | if ~exist(algorithms{i},'class') 35 | algorithms(i) = []; % remove files that do not represent classes 36 | end 37 | end 38 | 39 | % perform online learning for each algorithm 40 | fprintf('\n') 41 | num_alg = length(algorithms); 42 | titles = cell(num_alg,1); 43 | MSE = zeros(N,num_alg); 44 | MSE_final = zeros(1,num_alg); 45 | for algo_ind=1:num_alg 46 | t1 = tic; 47 | algorithm = algorithms{algo_ind}; 48 | fprintf('%2d. %9s: ',algo_ind,upper(algorithm)); 49 | titles{algo_ind} = strrep(upper(algorithm),'_','\_'); 50 | 51 | kaf = feval(algorithm); 52 | for i=1:N 53 | if ~mod(i,floor(N/10)), fprintf('.'); end 54 | 55 | y_est = kaf.evaluate(X_test); 56 | if i<=N_switch 57 | MSE(i,algo_ind) = mean((y_test(:,1)-y_est).^2); 58 | else 59 | MSE(i,algo_ind) = mean((y_test(:,2)-y_est).^2); 60 | end 61 | 62 | kaf.train(X(i,:),y(i)); 63 | end 64 | MSE_final(algo_ind) = mean(MSE(N-500:N,algo_ind)); 65 | 66 | fprintf(' %.2fs. Final MSE=%.2fdB\n',toc(t1),... 67 | 10*log10(MSE_final(algo_ind))) 68 | end 69 | fprintf('\n'); 70 | 71 | %% OUTPUT 72 | 73 | % plot results in different "leagues" 74 | [MSE_final_sorted,ind] = sort(MSE_final,'descend'); 75 | num_fig = ceil(num_alg/5); 76 | 77 | remaining = num_alg; 78 | for fig_ind=num_fig:-1:1 79 | figure; hold all 80 | rm = rem(remaining,5); 81 | num_in_league = (rm==0)*5 + rm; 82 | % plot the results for the num_in_league worst results 83 | league_inds = num_alg-remaining+num_in_league:-1:num_alg-remaining+1; 84 | for i=league_inds 85 | plot(10*log10(MSE(:,ind(i))),'LineWidth',1) 86 | end 87 | title(sprintf('League %d',fig_ind)) 88 | legend(titles(ind(league_inds))) 89 | 90 | axis([0 N 5*floor(min(10*log10(MSE(:)))/5) 0]); 91 | remaining = remaining - num_in_league; 92 | end 93 | -------------------------------------------------------------------------------- /demo/literature/vanvaerenbergh2006sliding/fig2.m: -------------------------------------------------------------------------------- 1 | % Reproduces an experiment similar to the one from figure 2 in "A 2 | % sliding-window kernel RLS algorithm and its application to nonlinear 3 | % channel identification". 4 | % 5 | % MSE performance of sliding-window kernel RLS for two different window 6 | % sizes. Execution time: < 1 minute (Intel Pentium Core2 Duo). 7 | % 8 | % S. Van Vaerenbergh, J. Via, and I. Santamaria, "A Sliding-Window Kernel 9 | % RLS Algorithm and Its Application to Nonlinear Channel Identification," 10 | % 2006 IEEE International Conference on Acoustics, Speech and Signal 11 | % Processing (ICASSP), May 2006, 12 | % 13 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 14 | % https://github.com/steven2358/kafbox/ 15 | 16 | clear 17 | close all 18 | 19 | %% PARAMETERS 20 | opt.chlen = 4; % channel length, and input embedding 21 | opt.N = 1500; % number of training data 22 | opt.N_switch = 500; % iteration after which the channel switch occurs 23 | opt.N_test = 100; % number of test data 24 | opt.SNR = 20; % SNR in dB 25 | 26 | num_sim = 10; 27 | 28 | setups{1} = struct('M',75,'c',1E-2,'kerneltype','gauss','kernelpar',5); 29 | setups{2} = struct('M',150,'c',1E-2,'kerneltype','gauss','kernelpar',5); 30 | 31 | titles = {'SW-KRLS, M=75','SW-KRLS, M=150'}; 32 | 33 | %% PREPARE DATA 34 | fprintf('Fig. 2 from "A sliding-window kernel RLS algorithm and its \n'); 35 | fprintf('application to nonlinear channel identification".\n'); 36 | 37 | % perform online learning for each algorithm 38 | fprintf('\n') 39 | N = opt.N; 40 | N_switch = opt.N_switch; 41 | num_alg = length(setups); 42 | MSE = zeros(N,num_alg); 43 | MSE_final = zeros(1,num_alg); 44 | for sim_ind = 1:num_sim 45 | % generate data 46 | [X,y,y_ref,X_test,y_test] = generate_channel_switch(opt); 47 | 48 | fprintf('SIM %d:\n',sim_ind) 49 | for algo_ind = 1:num_alg 50 | t1 = tic; 51 | 52 | kaf = swkrls(setups{algo_ind}); 53 | 54 | fprintf('%9s M=%3d: ',upper(class(kaf)),kaf.M); 55 | 56 | mse = zeros(N,1); 57 | for i=1:N 58 | if ~mod(i,floor(N/10)), fprintf('.'); end 59 | 60 | y_est = kaf.evaluate(X_test); 61 | if i<=N_switch 62 | mse(i) = mean((y_test(:,1)-y_est).^2); 63 | else 64 | mse(i) = mean((y_test(:,2)-y_est).^2); 65 | end 66 | 67 | kaf.train(X(i,:),y(i)); 68 | end 69 | MSE(:,algo_ind) = MSE(:,algo_ind) + mse/num_sim; 70 | mse_final = mean(mse(N-500:N)); 71 | 72 | fprintf(' %.2fs. Final MSE=%3.2fdB\n',toc(t1),... 73 | 10*log10(mse_final)) 74 | end 75 | fprintf('\n'); 76 | end 77 | fprintf('\n'); 78 | 79 | %% OUTPUT 80 | 81 | figure; hold all 82 | for algo_ind=1:num_alg 83 | plot(10*log10(MSE(:,algo_ind)),'LineWidth',1) 84 | 85 | axis([0 N 5*floor(min(10*log10(MSE(:)))/5) 0]); 86 | end 87 | xlabel('iteration') 88 | ylabel('MSE (dB)') 89 | legend(titles) 90 | -------------------------------------------------------------------------------- /lib/profiler/kap_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Affine Projection algorithm with Coherence 2 | % Criterion 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | classdef kap_profiler < kap 8 | 9 | properties (GetAccess = 'public', SetAccess = 'private') 10 | elapsed = 0; % elapsed time 11 | prev_dict_size = 0; % previous dictionary size for growth check 12 | end 13 | 14 | methods 15 | 16 | function kaf = kap_profiler(parameters) % constructor 17 | if nargin<1, parameters = struct(); end 18 | kaf = kaf@kap(parameters); 19 | end 20 | 21 | function flops = lastflops(kaf) % flops for last iteration 22 | m = size(kaf.dict,1); 23 | if kaf.prev_dict_size < m % growing 24 | m1 = m - 1; 25 | m2 = m - 1; 26 | else 27 | m1 = m; 28 | m2 = m; 29 | end 30 | m3 = size(kaf.memx,1); 31 | p2 = size(kaf.memx,1); 32 | 33 | floptions = struct(... 34 | 'sum', m2 + p2^2*m2 - p2^2 + m2*(p2-1) + p2*m2 + (p2-1)*p2*(2*p2-1)/6 - (p2-1)*p2/2, ... 35 | 'mult', m2*p2 + p2^2*m2 + p2*p2 + p2*m2 + (p2-1)*p2*(2*p2-1)/6 - (p2-1)*p2/2, ... 36 | 'div', p2 - 1, ... 37 | sprintf('%s_kernel',kaf.kerneltype), [m1+m3*m2, 1, size(kaf.dict,2)]); 38 | 39 | flops = kflops(floptions); 40 | end 41 | 42 | %% flops breakdown 43 | 44 | % k = kernel(x,kaf.dict,kaf.kerneltype,kaf.kernelpar); 45 | % kernel: m1 46 | 47 | % kernel(kaf.memx,kaf.dict,kaf.kerneltype,kaf.kernelpar); 48 | % kernel: m3*m2 49 | 50 | % kaf.alpha = kaf.alpha + ... 51 | % kaf.eta*H'/... 52 | % (kaf.eps*eye(size(H,1)) + H*H')*... 53 | % (kaf.memd - H*kaf.alpha); 54 | % sum: m2 + p2^2*m2 - p2^2 + m2*(p2-1) + p2*m2 % without division 55 | % mult: m2*p2 + p2^2*m2 + p2*p2 + p2*m2 % without division 56 | 57 | % matrix division in previous operation, assuming Gaussian elimination 58 | % sum: (p2-1)*p2*(2*p2-1)/6 - (p2-1)*p2/2 59 | % mult: (p2-1)*p2*(2*p2-1)/6 - (p2-1)*p2/2 60 | % div: p2 - 1 61 | 62 | %% 63 | 64 | function train_profiled(kaf,x,y) 65 | kaf.prev_dict_size = size(kaf.dict,1); 66 | t1 = tic; 67 | kaf.train(x,y); 68 | t2 = toc(t1); 69 | kaf.elapsed = kaf.elapsed + t2; 70 | end 71 | 72 | function bytes = lastbytes(kaf) % bytes used in last iteration 73 | m = size(kaf.dict,1); 74 | bytes = 8*(m + m*size(kaf.dict,2) + kaf.p + kaf.p*size(kaf.memx,2)); % 8 bytes for double precision 75 | % alpha, dict, memd, memx 76 | end 77 | 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /lib/util/gpml/kafbox_gpr.m: -------------------------------------------------------------------------------- 1 | function [out1, out2] = kafbox_gpr(logtheta, covfunc, x, y, xstar) 2 | 3 | % gpr - Gaussian process regression, with a named covariance function. Two 4 | % modes are possible: training and prediction: if no test data are given, the 5 | % function returns minus the log likelihood and its partial derivatives with 6 | % respect to the hyperparameters; this mode is used to fit the hyperparameters. 7 | % If test data are given, then (marginal) Gaussian predictions are computed, 8 | % whose mean and variance are returned. Note that in cases where the covariance 9 | % function has noise contributions, the variance returned in S2 is for noisy 10 | % test targets; if you want the variance of the noise-free latent function, you 11 | % must substract the noise variance. 12 | % 13 | % usage: [nlml dnlml] = gpr(logtheta, covfunc, x, y) 14 | % or: [mu S2] = gpr(logtheta, covfunc, x, y, xstar) 15 | % 16 | % where: 17 | % 18 | % logtheta is a (column) vector of log hyperparameters 19 | % covfunc is the covariance function 20 | % x is a n by D matrix of training inputs 21 | % y is a (column) vector (of size n) of targets 22 | % xstar is a nn by D matrix of test inputs 23 | % nlml is the returned value of the negative log marginal likelihood 24 | % dnlml is a (column) vector of partial derivatives of the negative 25 | % log marginal likelihood wrt each log hyperparameter 26 | % mu is a (column) vector (of size nn) of prediced means 27 | % S2 is a (column) vector (of size nn) of predicted variances 28 | % 29 | % For more help on covariance functions, see "help covFunctions". 30 | % 31 | % (C) copyright 2006 by Carl Edward Rasmussen (2006-03-20). 32 | 33 | if ischar(covfunc), covfunc = cellstr(covfunc); end % convert to cell if needed 34 | [n, D] = size(x); %#ok 35 | if eval(feval(covfunc{:})) ~= size(logtheta, 1) 36 | error('Error: Number of parameters do not agree with covariance function') 37 | end 38 | 39 | K = feval(covfunc{:}, logtheta, x); % compute training set covariance matrix 40 | 41 | L = chol(K)'; % cholesky factorization of the covariance 42 | alpha = kafbox_solve_chol(L',y); 43 | 44 | if nargin == 4 % if no test cases, compute the negative log marginal likelihood 45 | 46 | out1 = 0.5*y'*alpha + sum(log(diag(L))) + 0.5*n*log(2*pi); 47 | 48 | if nargout == 2 % ... and if requested, its partial derivatives 49 | out2 = zeros(size(logtheta)); % set the size of the derivative vector 50 | W = L'\(L\eye(n))-alpha*alpha'; % precompute for convenience 51 | for i = 1:length(out2) 52 | out2(i) = sum(sum(W.*feval(covfunc{:}, logtheta, x, i)))/2; 53 | end 54 | end 55 | 56 | else % ... otherwise compute (marginal) test predictions ... 57 | 58 | [Kss, Kstar] = feval(covfunc{:}, logtheta, x, xstar); % test covariances 59 | 60 | out1 = Kstar' * alpha; % predicted means 61 | 62 | if nargout == 2 63 | v = L\Kstar; 64 | out2 = Kss - sum(v.*v)'; 65 | end 66 | 67 | end 68 | -------------------------------------------------------------------------------- /lib/profiler/exkrls_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Extended Kernel Recursive Least Squares algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef exkrls_profiler < exkrls 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | prev_dict_size = 0; % previous dictionary size for growth check 11 | end 12 | 13 | methods 14 | 15 | function kaf = exkrls_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@exkrls(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | m = size(kaf.dict,1); 22 | if kaf.prev_dict_size < m % growing 23 | m1 = m; 24 | m2 = m - 1; 25 | floptions = struct(... 26 | 'sum', 1 + m2 - 1 + m2 + 1 + m2 + 2*m2 - 1 + 1 + m2^2 + m2 - 1, ... 27 | 'mult', m2 + m2 + 2 + m2 + m2 + 2 + 4 + 2*m2^2 + m2 + 2, ... 28 | 'div', 1 + 1, ... 29 | sprintf('%s_kernel',kaf.kerneltype), [m1, 1, size(kaf.dict,2)]); 30 | else 31 | floptions = struct('sum', 1); % no kernel calculation 32 | end 33 | flops = kflops(floptions); 34 | end 35 | 36 | %% flops breakdown 37 | 38 | % k = kernel([kaf.dict; x],x,kaf.kerneltype,kaf.kernelpar); 39 | % kernel: m1 40 | 41 | % kaf.i = kaf.i + 1; 42 | % sum: 1 43 | 44 | % z = kaf.Q*kt; 45 | % sum: m2 - 1 46 | % mult: m2 47 | 48 | % r = kaf.beta^kaf.i*kaf.rho + ktt - kt'*z; 49 | % sum: m2 + 1 50 | % mult: m2 + 2 % assumes previous kaf.beta^kaf.i is stored 51 | 52 | % err = y - kt'*kaf.alpha; 53 | % sum: m2 54 | % mult: m2 55 | 56 | % kaf.alpha = kaf.alphaf*[kaf.alpha - z*err/r; err/r]; 57 | % sum: 2*m2 - 1 58 | % mult: m2 + 2 59 | % div: 1 60 | 61 | % dummy = kaf.alphaf^2 + kaf.beta^kaf.i*kaf.q*kaf.rho; 62 | % sum: 1 63 | % mult: 4 64 | 65 | % kaf.rho = kaf.rho/dummy; 66 | % div: 1 67 | 68 | % kaf.Q = kaf.alphaf^2/(r*dummy)*[kaf.Q*r + z*z', -z; -z', 1]; 69 | % sum: m2^2 + m2 - 1 70 | % mult: 2*m2^2 + m2 + 2 71 | % div: 1 72 | 73 | %% 74 | 75 | function train_profiled(kaf,x,y) 76 | kaf.prev_dict_size = size(kaf.dict,1); 77 | t1 = tic; 78 | kaf.train(x,y); 79 | t2 = toc(t1); 80 | kaf.elapsed = kaf.elapsed + t2; 81 | end 82 | 83 | function bytes = lastbytes(kaf) % bytes used in last iteration 84 | m = size(kaf.dict,1); 85 | bytes = 8*(m^2 + m + 1 + m*size(kaf.dict,2)) + 4; % 8 bytes for double precision, 4 for uint32 86 | % Q, alpha, rho, dict, i 87 | end 88 | 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /demo/literature/yukawa2012multikernel/fig1a.m: -------------------------------------------------------------------------------- 1 | % Almost reproduces figure 1a from "Multikernel Adaptive Filtering". 2 | % 3 | % Comparison of the performance of LMS, KNLMS and MKNLMS-CS in nonlinear 4 | % channel equalization. Execution time: < 1 minute (Intel Pentium Core2 5 | % Duo). 6 | % 7 | % Masahiro Yukawa, "Multikernel Adaptive Filtering", IEEE Transactions on 8 | % Signal Processing, vol.60, no.9, pp.4672-4682, Sept. 2012. 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | clear 14 | close all 15 | 16 | %% PARAMETERS 17 | N_tr = 3000; % number of training data 18 | N_te = 500; % number of testing data 19 | SNR = 20; 20 | 21 | embedding = 5; % input embedding 22 | delay = 2; % equalization delay 23 | 24 | mk_thresh = 0.36; % threshold value from paper 25 | % mk_thresh = 0.9; % alternative value 26 | 27 | mk_kernelpars = [.2 2]; % kernel parameter values from paper 28 | % mk_kernelpars = [.2 .5 1 2 5 10 20 50]; % alternative values 29 | 30 | % remark 1 from paper determines mu0 31 | setups{1} = lms(struct('mu',.01)); %#ok 32 | setups{2} = knlms(struct('mu0',mk_thresh^(.2/min(mk_kernelpars)),'eta',0.2,'eps',3E-2,'kerneltype','gauss','kernelpar',1/sqrt(.2))); 33 | setups{3} = knlms(struct('mu0',mk_thresh^(.5/min(mk_kernelpars)),'eta',0.2,'eps',3E-2,'kerneltype','gauss','kernelpar',1/sqrt(.5))); 34 | setups{4} = mknlms_cs(struct('delta',mk_thresh,'eta',0.2,'rho',6E-2,'kerneltype','gauss','kernelpars',1./sqrt(mk_kernelpars))); 35 | 36 | %% PREPARE DATA 37 | 38 | fprintf('Fig. 1a from "Multikernel Adaptive Filtering".\n') 39 | 40 | u = randn(N_tr+N_te+embedding-1,1)>0; 41 | u = 2*u-1; % binary input 42 | 43 | z = u + 0.5*[0;u(1:end-1)]; % output of linear channel 44 | varz = var(z); 45 | noisevar = 10^(-SNR/10)*varz; 46 | ns = sqrt(noisevar)*randn(length(u),1); % channel noise 47 | y = z - 0.9*z.^2 + ns; % output of nonlinear channel 48 | 49 | X_all = zeros(N_tr+N_te,embedding); % time-embedding 50 | for k=1:embedding 51 | X_all(:,k) = y(k:N_tr+N_te+k-1); 52 | end 53 | 54 | X_tr = X_all(1:N_tr,:); % training input data 55 | X_te = X_all(N_tr+1:N_tr+N_te,:); % test input data 56 | 57 | T_tr = u(delay:delay+N_tr-1); % training desired output 58 | T_te = u(delay+N_tr:delay+N_tr+N_te-1); % training desired output 59 | 60 | %% RUN ALGORITHMS 61 | tic 62 | 63 | num_setup = length(setups); 64 | MSE = zeros(N_tr,num_setup); 65 | 66 | for setup_ind = 1:num_setup 67 | kaf = setups{setup_ind}; 68 | 69 | for n=1:N_tr 70 | if ~mod(n,floor(N_tr/10)), fprintf('.'); end % progress indicator, 10 dots 71 | 72 | t_te = kaf.evaluate(X_te); % test on test set 73 | err = T_te - t_te; 74 | MSE(n,setup_ind) = mean(err.^2); 75 | 76 | kaf.train(X_tr(n,:),T_tr(n)); % train with one input-output pair 77 | end 78 | if setup_ind == 1 79 | fprintf('\n'); 80 | else 81 | fprintf(' Dict. size: %d\n',size(kaf.dict,1)); 82 | end 83 | end 84 | 85 | toc 86 | %% OUTPUT 87 | 88 | figure 89 | semilogy(MSE,'LineWidth',2); grid on 90 | 91 | legend('LMS','KNLMS 1','KNLMS 2','MKNLMS-CS') 92 | xlabel('Iteration number') 93 | ylabel('MSE') 94 | 95 | -------------------------------------------------------------------------------- /data/kafbox_data_channel_switch.m: -------------------------------------------------------------------------------- 1 | function [x_embed,y,x_test_embed,y_test_ref,H] = ... 2 | kafbox_data_channel_switch(opt) 3 | 4 | % KAFBOX_DATA_CHANNEL_SWITCH Data generator for channel switch data 5 | % 6 | % CHANNEL_SWITCH data set: input-output data of a nonlinear 7 | % channel whose linear part is changed abruptly at a chosen point. The 8 | % nonlinear channel is a Wiener system with a randomly chosen linear part. 9 | % 10 | % Input options: "opt" is a structure with the following fields: 11 | % - N: total number of training data points 12 | % - N_switch: iteration after which the channel switch occurs 13 | % - N_test: number of test data points (before and after switch) 14 | % - sigpower: input signal power 15 | % - chlen: linear channel length 16 | % - fun: nonlinear function 17 | % - SNR: signal-to-noise ratio of additve output noise 18 | % - H: the channels to be used (each row is a channel impulse response). 19 | % If not provided these channels are generated randomly. 20 | % 21 | % Outputs: 22 | % - x_embed: system input with time embedding (each datum is a row) 23 | % - y: system output 24 | % - y_ref: noiseless system output 25 | % - x_test_embed: test system input with time embedding 26 | % - y_test_ref: noiseless test system output, one column per channel 27 | % - H: channel matrix (each row is a channel impulse response) 28 | % 29 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 30 | % https://github.com/steven2358/kafbox/ 31 | 32 | %% DEFAULT PARAMETER VALUES 33 | 34 | options = struct('N',1500,'N_test',500,'N_switch',500,'chlen',5,... 35 | 'sigpower',1,'fun','tanh(x)','SNR',20);%,'H',0); 36 | 37 | %% CUSTOM PARAMETER VALUES 38 | if nargin >= 1, 39 | for opt_name = fieldnames(opt)', 40 | if ismember(opt_name,fieldnames(options)), 41 | options.(opt_name{1}) = opt.(opt_name{1}); 42 | end 43 | end 44 | end 45 | 46 | N = options.N; 47 | N_test = options.N_test; 48 | N_switch = options.N_switch; 49 | sigpower = options.sigpower; 50 | chlen = options.chlen; 51 | fun = options.fun; 52 | SNR = options.SNR; 53 | % H = options.H; 54 | 55 | %% PROGRAM 56 | 57 | % if numel(H)==1 58 | % if assert(H,0) 59 | H = [ones(2,1) randn(2,chlen-1)]; % linear channel coefficients 60 | % end 61 | % end 62 | 63 | f = inline(fun); % Wiener system nonlinearity 64 | 65 | N_all = N+N_test+chlen-1; 66 | x_all = sqrt(sigpower)*randn(N_all,1); 67 | x_all_embed = zeros(N_all,chlen); 68 | for i = 1:chlen, 69 | x_all_embed(i:N_all,i) = x_all(1:N_all-i+1); % time-embedding 70 | end 71 | x_all_embed = x_all_embed(chlen:N_all,:); 72 | 73 | x_embed = x_all_embed(1:N,:); 74 | x_test_embed = x_all_embed(N+1:N+N_test,:); 75 | 76 | % linear filtering 77 | xp1 = x_embed(1:N_switch,:)*H(1,:)'; 78 | xp2 = x_embed(N_switch+1:N,:)*H(2,:)'; 79 | 80 | % apply nonlinearity 81 | y_ref = f([xp1;xp2]); % no noise yet 82 | 83 | % add noise 84 | noisevar = 10^(-SNR/10)*var(y_ref); 85 | noise = sqrt(noisevar)*randn(N,1); 86 | 87 | y = y_ref + noise; 88 | 89 | % get test outputs: 2 columns, no noise 90 | xp3 = x_test_embed*H(1,:)'; 91 | xp4 = x_test_embed*H(2,:)'; 92 | y_test_ref = f([xp3 xp4]); 93 | 94 | % figure;plot([xp1;xp2],y,'.') 95 | -------------------------------------------------------------------------------- /lib/profiler/kafbox_profiler_simulation.m: -------------------------------------------------------------------------------- 1 | % Perform a single profiler simulation. 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | function simresults = kafbox_profiler_simulation(data,sim_opts,... 7 | algo_config,sim_ind,output_dir) 8 | 9 | simdata = data; 10 | 11 | if isfield(data,'offset') 12 | simdata.offset = (sim_ind-1)*data.offset; 13 | elseif isfield(data,'permutation') 14 | simdata.permutation = (sim_ind-1)*data.permutation; % start with 0 = no permutation 15 | else 16 | % fix random seed per simulation 17 | rs = sim_ind; 18 | rng('default'); 19 | rng(sim_ind); 20 | simdata.rs = rs; 21 | end 22 | 23 | % check if results for this simulation have been stored before 24 | simresults = kafbox_profiler_storet(simdata,algo_config,output_dir); 25 | 26 | if isempty(simresults) % perform simulation 27 | 28 | [X,Y,X_test,Y_test] = kafbox_data(simdata); % load data 29 | 30 | every = 1; 31 | if isfield(data,'test_every') 32 | every = data.test_every; 33 | end 34 | 35 | N = size(X,1); 36 | all_fl = zeros(N,1); % flops per iteration 37 | all_bytes = zeros(N,1); % flops per iteration 38 | erm = sim_opts.error_measure; % error measure 39 | eval(sprintf('%s = nan*zeros(N,1);',erm)); 40 | var_test = var(Y_test); % for NMSE 41 | 42 | kaf = feval(sprintf('%s_profiler',algo_config.class),algo_config.options); 43 | 44 | results2store = {'elapsed','flops','bytes',erm}; 45 | 46 | y_test_ind = ones(N,1); 47 | if isfield(data,'N_switch') 48 | y_test_ind(data.N_switch:end) = 2; 49 | end 50 | 51 | for i=1:N 52 | Y_est = kaf.evaluate(X(i,:)); % predict 53 | kaf.train_profiled(X(i,:),Y(i)); % train 54 | 55 | all_fl(i) = kaf.lastflops(); 56 | all_bytes(i) = kaf.lastbytes(); 57 | 58 | if isfield(data,'test_every_conv') 59 | de = data.test_every_conv; 60 | if i<10*data.test_every_conv 61 | % start from 1 at i=1 and go exponentially to every at 62 | % i=10*every 63 | every = max(1,round(10^(log10(de)/10/de*i))); 64 | else 65 | every = data.test_every_conv; 66 | end 67 | end 68 | if mod(i,every) == 0 69 | if isempty(X_test) 70 | Y_test = Y(i); % test prediction 71 | else 72 | Y_est = kaf.evaluate(X_test); % test on multiple data 73 | end 74 | erm_val = mean((Y_test(:,y_test_ind(i)) - Y_est).^2); 75 | if strcmp(erm,'NMSE') 76 | erm_val = erm_val/var_test; %#ok 77 | end 78 | eval(sprintf('%s(i) = erm_val;',erm)) 79 | end 80 | end 81 | 82 | elapsed = kaf.elapsed; %#ok 83 | flops = max(all_fl); %#ok 84 | bytes = max(all_bytes); %#ok 85 | 86 | for j=1:length(results2store) 87 | eval(sprintf('simresults.%s = %s;',results2store{j},results2store{j})) 88 | end 89 | 90 | kafbox_profiler_storet(simdata,algo_config,output_dir,simresults); 91 | 92 | fprintf('calculated'); 93 | else 94 | fprintf('retrieved '); 95 | end 96 | -------------------------------------------------------------------------------- /lib/profiler/swkrls_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Sliding-Window Kernel Recursive Least Squares 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef swkrls_profiler < swkrls 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | prev_dict_size = 0; % previous dictionary size for growth check 11 | end 12 | 13 | methods 14 | 15 | function kaf = swkrls_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@swkrls(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | m = size(kaf.dict,1); 22 | if kaf.prev_dict_size < m % growing 23 | m1 = m; 24 | m2 = m - 1; 25 | floptions = struct(... 26 | 'sum', m2^2 + m2^2 - m2 + m2^2 + m^2 - m, ... 27 | 'mult', m2^2 + m2 + m2^2 + m^2, ... 28 | 'div', 1, ... 29 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 30 | else 31 | m1 = m + 1; 32 | m2 = m; 33 | m3 = m; 34 | floptions = struct(... 35 | 'sum', m2^2 + m2^2 - m2 + m2^2 + m3^2 + m^2 - m, ... 36 | 'mult', m2^2 + m2 + m2^2 + m3^2 + m3 + m^2, ... 37 | 'div', 1 + 1, ... 38 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 39 | end 40 | 41 | flops = kflops(floptions); 42 | end 43 | 44 | %% flops breakdown 45 | 46 | % k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); % grow Kinv 47 | % kernel: m1 48 | 49 | % d = k(end) + kaf.c; % grow Kinv 50 | % sum: 1 51 | 52 | % g_inv = d - b'*kaf.Kinv*b; % grow Kinv 53 | % sum: m2^2 54 | % mult: m2^2 + m2 55 | 56 | % g = 1/g_inv; % grow Kinv 57 | % div: 1 58 | 59 | % f = -kaf.Kinv*b*g; % grow Kinv 60 | % sum: m2^2 - m2 61 | % mult: m2^2 + m2 62 | 63 | % E = kaf.Kinv - kaf.Kinv*b*f'; % grow Kinv 64 | % sum: m2^2 65 | % mult: m2^2 66 | 67 | % kaf.Kinv = G - f*f'/e; % prune Kinv 68 | % sum: m3^2 69 | % mult: m3^2 + m3 70 | % div: 1 71 | 72 | % kaf.alpha = kaf.Kinv*kaf.dicty; % end of training 73 | % sum: m^2 - m 74 | % prod: m^2 75 | 76 | %% 77 | 78 | function train_profiled(kaf,x,y) 79 | kaf.prev_dict_size = size(kaf.dict,1); 80 | t1 = tic; 81 | kaf.train(x,y); 82 | t2 = toc(t1); 83 | kaf.elapsed = kaf.elapsed + t2; 84 | end 85 | 86 | function bytes = lastbytes(kaf) % bytes used in last iteration 87 | m = size(kaf.dict,1); 88 | bytes = 8*(m^2 + m + m + m*size(kaf.dict,2)); % 8 bytes for double precision 89 | % Kinv, alpha, dicty, dict 90 | end 91 | 92 | end 93 | end 94 | -------------------------------------------------------------------------------- /lib/fbklms.m: -------------------------------------------------------------------------------- 1 | % Fixed-budget kernel least mean squares (FB-KLMS) algorithm. 2 | % 3 | % D. Rzepka, "Fixed-budget kernel least mean squares," 2012 IEEE 17th 4 | % Conference on Emerging Technologies & Factory Automation (ETFA), Krakow, 5 | % Poland, Sept. 2012, http://dx.doi.org/10.1109/ETFA.2012.6489767 6 | % 7 | % Remark: code contributed by Dominik Rzepka 8 | % 9 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 10 | % https://github.com/steven2358/kafbox/ 11 | 12 | classdef fbklms < kernel_adaptive_filter 13 | 14 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 15 | nu = .05; % growth criterion threshold 16 | M = 500; % dictionary size 17 | eta = .5; % learning rate 18 | kerneltype = 'gauss'; % kernel type 19 | kernelpar = 1; % kernel parameter 20 | end 21 | 22 | properties (GetAccess = 'private', SetAccess = 'private') % variables 23 | dict = []; % dictionary 24 | diagkdict = []; % diagonal of kernel matrix for dictionary 25 | alpha = []; % expansion coefficients 26 | end 27 | 28 | methods 29 | function kaf = fbklms(parameters) % constructor 30 | if (nargin > 0) % copy valid parameters 31 | for fn = fieldnames(parameters)' 32 | if ismember(fn,fieldnames(kaf)) 33 | kaf.(fn{1}) = parameters.(fn{1}); 34 | end 35 | end 36 | end 37 | end 38 | 39 | function y_est = evaluate(kaf,x) % evaluate the algorithm 40 | if size(kaf.dict,1)>0 41 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 42 | y_est = k'*kaf.alpha; 43 | else 44 | y_est = zeros(size(x,1),1); 45 | end 46 | end 47 | 48 | function train(kaf,x,y) % train the algorithm 49 | if size(kaf.dict,2)==0 % initialize 50 | kaf.dict = x; 51 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 52 | kaf.diagkdict(1) = k; 53 | kaf.alpha = kaf.eta*y*k/(k'*k); 54 | else 55 | 56 | kt = kernel([kaf.dict;x],x,kaf.kerneltype,kaf.kernelpar); 57 | k = kt(1:end-1); 58 | y_est = k'*kaf.alpha; 59 | e = y - y_est; 60 | 61 | kaf.alpha = kaf.alpha + kaf.eta*e*k/(k'*k); 62 | 63 | % growth criterion 64 | kx = kt(end); 65 | dependency = kx - k./kaf.diagkdict; 66 | if min(dependency) >= kaf.nu % expand dictionary 67 | kaf.dict = [kaf.dict; x]; 68 | kaf.diagkdict = [kaf.diagkdict; kx]; 69 | kaf.alpha = [kaf.alpha; kaf.eta*e/(k'*k)]; 70 | 71 | if length(kaf.alpha) > kaf.M % prune dictionary 72 | [~, id] = min(abs(kaf.alpha)); 73 | kaf.dict(id,:) = []; 74 | kaf.diagkdict(id) = []; 75 | kaf.alpha(id) = []; 76 | end 77 | end 78 | end 79 | end 80 | end 81 | end 82 | -------------------------------------------------------------------------------- /lib/krls.m: -------------------------------------------------------------------------------- 1 | % Kernel Recursive Least Squares algorithm with Approximate Linear 2 | % Dependency criterion 3 | % 4 | % Y. Engel, S. Mannor, and R. Meir, "The kernel recursive least-squares 5 | % algorithm," IEEE Transactions on Signal Processing, vol. 52, no. 8, pp. 6 | % 2275-2285, Aug. 2004, http://dx.doi.org/10.1109/TSP.2004.830985 7 | % 8 | % Remark: implementation includes a maximum dictionary size M 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | classdef krls < kernel_adaptive_filter 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') 16 | nu = 1E-4; % ALD threshold 17 | M = 1000; % maximum dictionary size 18 | kerneltype = 'gauss'; % kernel type 19 | kernelpar = 1; % kernel parameter 20 | end 21 | 22 | properties (GetAccess = 'public', SetAccess = 'private') 23 | dict = []; % dictionary 24 | alpha = []; % expansion coefficients 25 | P = []; % inverse A'*A 26 | Kinv = []; % inverse kernel matrix 27 | end 28 | 29 | methods 30 | 31 | function kaf = krls(parameters) % constructor 32 | if (nargin > 0) % copy valid parameters 33 | for fn = fieldnames(parameters)' 34 | if ismember(fn,fieldnames(kaf)) 35 | kaf.(fn{1}) = parameters.(fn{1}); 36 | end 37 | end 38 | end 39 | end 40 | 41 | function y_est = evaluate(kaf,x) % evaluate the algorithm 42 | if size(kaf.dict,1)>0 43 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 44 | y_est = k'*kaf.alpha; 45 | else 46 | y_est = zeros(size(x,1),1); 47 | end 48 | end 49 | 50 | function train(kaf,x,y) % train the algorithm 51 | k = kernel([kaf.dict; x],x,kaf.kerneltype,kaf.kernelpar); 52 | kt = k(1:end-1); 53 | ktt = k(end); 54 | if numel(kt)==0 % initialize 55 | kaf.Kinv = 1/ktt; 56 | kaf.alpha = y/ktt; 57 | kaf.P = 1; 58 | kaf.dict = x; 59 | else 60 | at = kaf.Kinv*kt; % coefficients of best linear combination 61 | delta = ktt - kt'*at; % residual of linear approximation 62 | 63 | if (delta>kaf.nu && size(kaf.dict,1) 0) % copy valid parameters 33 | for fn = fieldnames(parameters)' 34 | if ismember(fn,fieldnames(obj)) 35 | obj.(fn{1}) = parameters.(fn{1}); 36 | end 37 | end 38 | end 39 | end 40 | 41 | function [mean_test, var_test] = evaluate(obj,H) % evaluate 42 | if ~isempty(obj.x_mean) 43 | mean_test = H*obj.x_mean; 44 | var_test = H*obj.P*H'; % predictive variance 45 | else 46 | mean_test = zeros(size(H,1),1); 47 | var_test = nan(size(H,1),1); % signal scale is unknown 48 | end 49 | end 50 | 51 | function train(obj,H,z,u) % train the algorithm 52 | if isempty(obj.x_mean) % initialize 53 | [n,m] = size(H); % measurement and state dimensions 54 | obj.x_mean = zeros(m,1); % filter coefficients 55 | obj.P = eye(m); 56 | 57 | if isempty(obj.F) 58 | % default state transition: random walk 59 | obj.F = eye(m); 60 | obj.Q = 0.1*eye(m); 61 | % default control input 62 | obj.B = eye(m); 63 | % default measurement noise covariance 64 | obj.R = 1E-2*eye(n); 65 | end 66 | end 67 | 68 | if nargin<4 69 | u = zeros(size(H,2),1); % no control input 70 | end 71 | 72 | % state prediction equations 73 | obj.x_mean = obj.F*obj.x_mean + obj.B*u; 74 | obj.P = obj.F*obj.P*obj.F + obj.Q; 75 | 76 | % calculate the kalman gain 77 | K = obj.P*H'/(H*obj.P*H' + obj.R); 78 | 79 | % measurement update equations 80 | obj.x_mean = obj.x_mean + K*(z - H*obj.x_mean); % mean 81 | obj.P = obj.P - K*H*obj.P; % covariance 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /lib/kap.m: -------------------------------------------------------------------------------- 1 | % Kernel Affine Projection (KAP) algorithm with coherence criterion 2 | % 3 | % C. Richard, J.C.M. Bermudez, and P. Honeine, "Online Prediction of Time 4 | % Series Data With Kernels," IEEE Transactions on Signal Processing, 5 | % vol. 57, no. 3, pp. 1058-1067, March 2009, 6 | % http://dx.doi.org/10.1109/TSP.2008.2009895 7 | % 8 | % Remark: memories are initialized empty in this implementation 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | classdef kap < kernel_adaptive_filter 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') 16 | mu0 = .95; % coherence criterion threshold 17 | eta = .5; % step size 18 | eps = 1E-2; % regularization 19 | p = 20; % memory length 20 | kerneltype = 'gauss'; % kernel type 21 | kernelpar = 1; % kernel parameter 22 | end 23 | 24 | properties (GetAccess = 'public', SetAccess = 'private') 25 | memx = []; % input memory 26 | memy = []; % output memory 27 | dict = []; % dictionary 28 | modict = []; % modulus of the dictionary elements 29 | alpha = []; % expansion coefficients 30 | end 31 | 32 | methods 33 | function kaf = kap(parameters) % constructor 34 | if (nargin > 0) % copy valid parameters 35 | for fn = fieldnames(parameters)' 36 | if ismember(fn,fieldnames(kaf)) 37 | kaf.(fn{1}) = parameters.(fn{1}); 38 | end 39 | end 40 | end 41 | end 42 | 43 | function y_est = evaluate(kaf,x) % evaluate the algorithm 44 | if size(kaf.dict,1)>0 45 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 46 | y_est = k'*kaf.alpha; 47 | else 48 | y_est = zeros(size(x,1),1); 49 | end 50 | end 51 | 52 | function train(kaf,x,y) % train the algorithm 53 | if (length(kaf.memy) < kaf.p) 54 | kaf.memx = [kaf.memx; x]; % grow the memory 55 | kaf.memy = [kaf.memy; y]; % grow the memory 56 | else 57 | kaf.memx = [kaf.memx(2:end,:); x]; % sliding memory 58 | kaf.memy = [kaf.memy(2:end); y]; % sliding memory 59 | end 60 | 61 | if size(kaf.dict,2)==0 % initialize 62 | k = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 63 | kaf.dict = x; 64 | kaf.modict = sqrt(k); 65 | kaf.alpha = 0; 66 | else 67 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 68 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 69 | C = k./(sqrt(kx)*kaf.modict); % coherence 70 | if (max(C) <= kaf.mu0) % coherence criterion 71 | kaf.dict = [kaf.dict; x]; % order increase 72 | kaf.alpha = [kaf.alpha; 0]; % order increase 73 | end 74 | end 75 | 76 | H = kernel(kaf.memx,kaf.dict,kaf.kerneltype,kaf.kernelpar); 77 | kaf.alpha = kaf.alpha + ... 78 | kaf.eta*H'/... 79 | (kaf.eps*eye(size(H,1)) + H*H')*... 80 | (kaf.memy - H*kaf.alpha); 81 | end 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /lib/exkrls.m: -------------------------------------------------------------------------------- 1 | % Extended Kernel Recursive Least Squares algorithm 2 | % 3 | % W. Liu, I.M. Park. Y. Wang, and J.C. Principe, "Extended Kernel Recursive 4 | % Least Squares Algorithm," IEEE Transactions on Signal Processing, vol. 5 | % 57, no. 10, pp. 3801-3814, Oct. 2009, 6 | % http://dx.doi.org/10.1109/TSP.2009.2022007 7 | % 8 | % Remark: implementation of the tracking model, includes a maximum 9 | % dictionary size M 10 | % 11 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 12 | % https://github.com/steven2358/kafbox/ 13 | 14 | classdef exkrls < kernel_adaptive_filter 15 | 16 | properties (GetAccess = 'public', SetAccess = 'private') 17 | alphaf = .999; % state forgetting factor, "alpha" in publication 18 | beta = .995; % data forgetting factor 19 | lambda = 1E-2; % regularization 20 | q = 1E-3; % trade-off between modeling variation and measurement disturbance 21 | M = 500; % maximum dictionary size 22 | kerneltype = 'gauss'; % kernel type 23 | kernelpar = 1; % kernel parameter 24 | end 25 | 26 | properties (GetAccess = 'public', SetAccess = 'private') 27 | dict = []; % dictionary 28 | rho = []; 29 | Q = []; 30 | i = 0; % iteration number; 31 | alpha = []; % expansion coefficients, "a" in publication 32 | end 33 | 34 | methods 35 | function kaf = exkrls(parameters) % constructor 36 | allpars = {'alphaf','lambda','beta','q','kerneltype','kernelpar','M'}; 37 | if (nargin > 0) 38 | for j=1:length(allpars) 39 | p = allpars{j}; 40 | if isfield(parameters,p), kaf.(p) = parameters.(p); end 41 | end 42 | end 43 | end 44 | 45 | function y_est = evaluate(kaf,x) % evaluate the algorithm 46 | if size(kaf.dict,1)>0 47 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 48 | y_est = k'*kaf.alpha; 49 | else 50 | y_est = zeros(size(x,1),1); 51 | end 52 | end 53 | 54 | function train(kaf,x,y) % train the algorithm 55 | kaf.i = kaf.i + 1; 56 | k = kernel([kaf.dict; x],x,kaf.kerneltype,kaf.kernelpar); 57 | kt = k(1:end-1); 58 | ktt = k(end); 59 | if numel(kt)==0 % initialize 60 | kaf.alpha = kaf.alphaf*y/(kaf.lambda*kaf.beta+ktt); 61 | kaf.rho = kaf.lambda*kaf.beta/(kaf.alphaf^2*kaf.beta + kaf.lambda*kaf.q); 62 | kaf.Q = kaf.alphaf^2/((kaf.beta*kaf.lambda+ktt)*(kaf.alphaf^2+kaf.beta*kaf.lambda*kaf.q)); 63 | kaf.dict = x; 64 | else 65 | if (size(kaf.dict,1) 0) % copy valid parameters 35 | for fn = fieldnames(parameters)' 36 | if ismember(fn,fieldnames(kaf)) 37 | kaf.(fn{1}) = parameters.(fn{1}); 38 | end 39 | end 40 | end 41 | end 42 | 43 | function y_est = evaluate(kaf,x) % evaluate the algorithm 44 | if size(kaf.dict,1)>0 45 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 46 | y_est = k'*kaf.alpha; 47 | else 48 | y_est = zeros(size(x,1),1); 49 | end 50 | end 51 | 52 | function train(kaf,x,y) % train the algorithm 53 | if size(kaf.dict,2)==0 % initialize 54 | kaf.dict = x; 55 | kaf.alpha = kaf.eta*y; 56 | kaf.xmem = x; 57 | kaf.ymem = y; 58 | else 59 | if size(kaf.dict,1) < kaf.M 60 | if size(kaf.xmem,1) 0) % copy valid parameters 31 | for fn = fieldnames(parameters)' 32 | if ismember(fn,fieldnames(kaf)) 33 | kaf.(fn{1}) = parameters.(fn{1}); 34 | end 35 | end 36 | end 37 | end 38 | 39 | function y_est = evaluate(kaf,x) % evaluate the algorithm 40 | if size(kaf.dict,1)>0 41 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 42 | y_est = k'*kaf.alpha; 43 | else 44 | y_est = zeros(size(x,1),1); 45 | end 46 | end 47 | 48 | function train(kaf,x,y) % train the algorithm 49 | kaf.dict = [kaf.dict; x]; % add base to dictionary 50 | kaf.dicty = [kaf.dicty; y]; % add y to output dictionary 51 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 52 | kaf.Kinv = kaf.grow_kernel_matrix(kaf.Kinv,k,kaf.c); % grow 53 | 54 | if (size(kaf.dict,1) > kaf.M) % prune 55 | kaf.dict(1,:) = []; % remove oldest base from dictionary 56 | kaf.dicty(1) = []; % prune output dictionary 57 | kaf.Kinv = kaf.prune_kernel_matrix(kaf.Kinv); % prune 58 | end 59 | 60 | kaf.alpha = kaf.Kinv*kaf.dicty; 61 | end 62 | end 63 | 64 | methods (Static = true) 65 | function Kinv = grow_kernel_matrix(Kinv,k,c) 66 | % calculate inverse of expanded matrix K = [K_inv b;b' d] 67 | b = k(1:end-1); 68 | d = k(end) + c; % add regularization 69 | if numel(b)>0 70 | g_inv = d - b'*Kinv*b; 71 | g = 1/g_inv; 72 | f = -Kinv*b*g; 73 | E = Kinv - Kinv*b*f'; 74 | Kinv = [E f;f' g]; 75 | else 76 | Kinv = 1/d; 77 | end 78 | end 79 | 80 | function Kinv = prune_kernel_matrix(Kinv) 81 | % calculate inverse of pruned kernel matrix Kp, K = [a b';b Kp] 82 | m = size(Kinv,1); 83 | G = Kinv(2:m,2:m); 84 | f = Kinv(2:m,1); 85 | e = Kinv(1,1); 86 | Kinv = G - f*f'/e; 87 | end 88 | end 89 | end 90 | -------------------------------------------------------------------------------- /lib/fbkrls.m: -------------------------------------------------------------------------------- 1 | % Fixed-Budget Kernel Recursive Least Squares algorithm 2 | % 3 | % S. Van Vaerenbergh, I. Santamaria, W. Liu, and J.C. Principe, "Fixed- 4 | % budget kernel recursive least-squares," 2010 IEEE International 5 | % Conference on Acoustics Speech and Signal Processing (ICASSP), pp. 1882- 6 | % 1885, 14-19 March 2010, http://dx.doi.org/10.1109/ICASSP.2010.5495350 7 | % 8 | % Remark: label update is not implemented (mu=0) 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | classdef fbkrls < kernel_adaptive_filter 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') 16 | M = 100; % dictionary size 17 | lambda = 1E-2; % regularization parameter 18 | kerneltype = 'gauss'; % kernel type 19 | kernelpar = 1; % kernel parameter 20 | end 21 | 22 | properties (GetAccess = 'public', SetAccess = 'private') 23 | dict = []; % dictionary 24 | dicty = []; % output dictionary 25 | alpha = []; % expansion coefficients 26 | Kinv = []; % inverse kernel matrix 27 | end 28 | 29 | methods 30 | function kaf = fbkrls(parameters) % constructor 31 | if (nargin > 0) % copy valid parameters 32 | for fn = fieldnames(parameters)' 33 | if ismember(fn,fieldnames(kaf)) 34 | kaf.(fn{1}) = parameters.(fn{1}); 35 | end 36 | end 37 | end 38 | end 39 | 40 | function y_est = evaluate(kaf,x) % evaluate the algorithm 41 | if size(kaf.dict,1)>0 42 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 43 | y_est = k'*kaf.alpha; 44 | else 45 | y_est = zeros(size(x,1),1); 46 | end 47 | end 48 | 49 | function train(kaf,x,y) % train the algorithm 50 | kaf.dict = [kaf.dict; x]; % grow 51 | kaf.dicty = [kaf.dicty; y]; % grow 52 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 53 | kaf.Kinv = kaf.grow_kernel_matrix(kaf.Kinv,k,kaf.lambda);% grow 54 | 55 | kaf.alpha = kaf.Kinv*kaf.dicty; 56 | if (size(kaf.dict,1) > kaf.M) 57 | ape = abs(kaf.alpha)./diag(kaf.Kinv); % a posteriori error 58 | [~,ind] = min(ape); 59 | 60 | kaf.dict(ind,:) = []; % prune 61 | kaf.dicty(ind) = []; % prune 62 | kaf.Kinv = kaf.prune_kernel_matrix(kaf.Kinv,ind); % prune 63 | end 64 | 65 | kaf.alpha = kaf.Kinv*kaf.dicty; 66 | end 67 | end 68 | 69 | methods (Static = true) 70 | function Kinv = grow_kernel_matrix(Kinv,k,c) 71 | % calculate inverse of expanded matrix K = [K_inv b;b' d] 72 | b = k(1:end-1); 73 | d = k(end) + c; % add regularization 74 | if numel(b)>0 75 | g_inv = d - b'*Kinv*b; 76 | g = 1/g_inv; 77 | f = -Kinv*b*g; 78 | E = Kinv - Kinv*b*f'; 79 | Kinv = [E f;f' g]; 80 | else 81 | Kinv = 1/d; 82 | end 83 | end 84 | 85 | function Kinv = prune_kernel_matrix(Kinv,ind) 86 | % calculate inverse of pruned kernel matrix 87 | m = size(Kinv,1); 88 | noind = 1:m; 89 | noind(ind) = []; 90 | G = Kinv(noind,noind); 91 | f = Kinv(noind,ind); 92 | e = Kinv(ind,ind); 93 | Kinv = G - f*f'/e; 94 | end 95 | end 96 | end 97 | -------------------------------------------------------------------------------- /doc/extra.bib: -------------------------------------------------------------------------------- 1 | @STRING{ieee-mlsp = {IEEE International Workshop on Machine Learning for Signal Processing (MLSP)}} 2 | @STRING{ieee-icassp = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}} 3 | @STRING{ieee-ijcnn = {IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}} 4 | 5 | @INPROCEEDINGS{singh2012random, 6 | author = {Singh, Abhishek and Ahuja, Narendra and Moulin, Pierre}, 7 | booktitle = {2012 } # ieee-mlsp, 8 | title = {Online learning with kernels: Overcoming the growing sum problem}, 9 | year = {2012}, 10 | month = sep, 11 | pages = {1-6}, 12 | doi = {10.1109/MLSP.2012.6349811}, 13 | } 14 | 15 | @INPROCEEDINGS{takizawa2013efficient, 16 | author = {Takizawa, M.-A. and Yukawa, M.}, 17 | booktitle = {2013 } # ieee-icassp, 18 | title = {An efficient data-reusing kernel adaptive filtering algorithm based on Parallel HYperslab Projection along Affine Subspaces}, 19 | year = {2013}, 20 | month = may, 21 | pages = {3557-3561}, 22 | doi = {10.1109/ICASSP.2013.6638320} 23 | } 24 | 25 | @INPROCEEDINGS{gao2013kernel, 26 | author = {Wei Gao and Jie Chen and Richard, C. and Jianguo Huang and Flamary, R.}, 27 | booktitle = {2013 } # ieee-icassp, 28 | title = {Kernel LMS algorithm with forward-backward splitting for dictionary learning}, 29 | year = {2013}, 30 | month = may, 31 | pages = {5735-5739}, 32 | doi = {10.1109/ICASSP.2013.6638763} 33 | } 34 | 35 | @INPROCEEDINGS{pokharel2013mixture, 36 | author = {Pokharel, R. and Seth, S. and Principe, J.C.}, 37 | booktitle = {2013 } # ieee-ijcnn, 38 | title = {Mixture kernel least mean square}, 39 | year = {2013}, 40 | month = aug, 41 | pages = {1-7}, 42 | doi = {10.1109/IJCNN.2013.6706867} 43 | } 44 | 45 | @misc{wiki:online_machine_learning, 46 | author = "Wikipedia", 47 | title = "Online machine learning --- Wikipedia{,} The Free Encyclopedia", 48 | year = "2014", 49 | url = "http://en.wikipedia.org/w/index.php?title=Online_machine_learning&oldid=610152751", 50 | note = "[Online; accessed 26-May-2014]" 51 | } 52 | 53 | @article{wilson2014best, 54 | title = {Best practices for scientific computing}, 55 | author = {Wilson, Greg and Aruliah, DA and Brown, C Titus and Hong, Neil P Chue and Davis, Matt and Guy, Richard T and Haddock, Steven HD and Huff, Kathryn D and Mitchell, Ian M and Plumbley, Mark D and others}, 56 | journal = {PLoS biology}, 57 | volume = {12}, 58 | number = {1}, 59 | pages = {e1001745}, 60 | year = {2014}, 61 | publisher = {Public Library of Science} 62 | } 63 | 64 | @incollection{blum1998line, 65 | title = {On-line algorithms in machine learning}, 66 | booktitle = {Online Algorithms: The State of the Art}, 67 | chapter = 14, 68 | author = {Blum, Avrim}, 69 | year = {1998}, 70 | publisher = {Springer Berlin Heidelberg}, 71 | series = {Lecture Notes in Computer Science}, 72 | volume={1442}, 73 | editor = {Fiat, Amos and Woeginger, Gerhard J.}, 74 | doi = {10.1007/BFb0029575}, 75 | pages={306-325} 76 | } 77 | 78 | @INPROCEEDINGS{fernandez2015probabilistic, 79 | author={Fern\'andez-Bes, Jesus and Elvira, V\'ictor and Van Vaerenbergh, Steven}, 80 | booktitle = {2015 } # ieee-icassp, 81 | title={A probabilistic least-mean-squares filter}, 82 | year={2015}, 83 | month = apr, 84 | pages={2199--2203}, 85 | doi={10.1109/ICASSP.2015.7178361}, 86 | ISSN={1520-6149} 87 | } 88 | 89 | @article{kalman1960new, 90 | title={A new approach to linear filtering and prediction problems}, 91 | author={Kalman, Rudolph Emil}, 92 | journal={Journal of basic Engineering}, 93 | volume={82}, 94 | number={1}, 95 | pages={35--45}, 96 | year={1960}, 97 | publisher={American Society of Mechanical Engineers} 98 | } 99 | -------------------------------------------------------------------------------- /lib/profiler/fbkrls_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Fixed-Budget Kernel Recursive Least Squares 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef fbkrls_profiler < fbkrls 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | prev_dict_size = 0; % previous dictionary size for growth check 11 | end 12 | 13 | methods 14 | 15 | function kaf = fbkrls_profiler(parameters) % constructor 16 | if nargin<1, parameters = struct(); end 17 | kaf = kaf@fbkrls(parameters); 18 | end 19 | 20 | function flops = lastflops(kaf) % flops for last iteration 21 | m = size(kaf.dict,1); 22 | if kaf.prev_dict_size < m % growing 23 | m1 = m; 24 | m2 = m - 1; 25 | m4 = m; 26 | floptions = struct(... 27 | 'sum', m2^2 + m2^2 - m2 + m2^2 + m^2 - m + m4^2 - m4, ... 28 | 'mult', m2^2 + m2 + m2^2 + m^2 + m4^2, ... 29 | 'div', 1, ... 30 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 31 | else 32 | m1 = m + 1; 33 | m2 = m; 34 | m3 = m; 35 | m4 = m + 1; 36 | m5 = m + 1; 37 | floptions = struct(... 38 | 'sum', m2^2 + m2^2 - m2 + m2^2 + m3^2 + m^2 - m + m4^2 - m4, ... 39 | 'mult', m2^2 + m2 + m2^2 + m3^2 + m3 + m^2 + m4^2, ... 40 | 'div', 1 + 1 + m5, ... 41 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 42 | end 43 | 44 | flops = kflops(floptions); 45 | end 46 | 47 | %% flops breakdown 48 | 49 | % k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); % grow Kinv 50 | % kernel: m1 51 | 52 | % d = k(end) + kaf.c; % grow Kinv 53 | % sum: 1 54 | 55 | % g_inv = d - b'*kaf.Kinv*b; % grow Kinv 56 | % sum: m2^2 57 | % mult: m2^2 + m2 58 | 59 | % g = 1/g_inv; % grow Kinv 60 | % div: 1 61 | 62 | % f = -kaf.Kinv*b*g; % grow Kinv 63 | % sum: m2^2 - m2 64 | % mult: m2^2 + m2 65 | 66 | % E = kaf.Kinv - kaf.Kinv*b*f'; % grow Kinv 67 | % sum: m2^2 68 | % mult: m2^2 69 | 70 | % kaf.alpha = kaf.Kinv*kaf.dicty; % before prune check 71 | % sum: m4^2 - m4 72 | % prod: m4^2 73 | 74 | % err_ap = abs(kaf.alpha)./diag(kaf.Kinv); % finding index to prune 75 | % div: m5 76 | 77 | % kaf.Kinv = G - f*f'/e; % prune Kinv 78 | % sum: m3^2 79 | % mult: m3^2 + m3 80 | % div: 1 81 | 82 | % kaf.alpha = kaf.Kinv*kaf.dicty; % end of training 83 | % sum: m^2 - m 84 | % prod: m^2 85 | 86 | %% 87 | 88 | function train_profiled(kaf,x,y) 89 | kaf.prev_dict_size = size(kaf.dict,1); 90 | t1 = tic; 91 | kaf.train(x,y); 92 | t2 = toc(t1); 93 | kaf.elapsed = kaf.elapsed + t2; 94 | end 95 | 96 | function bytes = lastbytes(kaf) % bytes used in last iteration 97 | m = size(kaf.dict,1); 98 | bytes = 8*(m^2 + m + m + m*size(kaf.dict,2)); % 8 bytes for double precision 99 | % Kinv, alpha, dicty, dict 100 | end 101 | 102 | end 103 | end 104 | -------------------------------------------------------------------------------- /lib/nlkapa.m: -------------------------------------------------------------------------------- 1 | % Normalized Leaky Kernel Affine Projection Algorithm 2 | % 3 | % W. Liu and J.C. Principe, "Kernel Affine Projection Algorithms", EURASIP 4 | % Journal on Advances in Signal Processing, Volume 2008, Article ID 784292, 5 | % 12 pages. http://dx.doi.org/10.1155/2008/784292 6 | % 7 | % Remark: This implementation includes a maximum dictionary size M. With 8 | % M=Inf this algorithm is equivalent to KAPA-4 from the publication. With 9 | % M=Inf and lambda=0 it is equivalent to KAPA-2. 10 | % 11 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 12 | % https://github.com/steven2358/kafbox/ 13 | 14 | classdef nlkapa < kernel_adaptive_filter 15 | 16 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 17 | eta = .05; % learning rate 18 | eps = 1E-4; % Newton regularization 19 | lambda = 1E-2; % Tikhonov regularization 20 | M = 1000; % maximum dictionary size 21 | P = 20; % number of regressors 22 | kerneltype = 'gauss'; % kernel type 23 | kernelpar = 1; % kernel parameter 24 | end 25 | 26 | properties (GetAccess = 'protected', SetAccess = 'private') % variables 27 | xmem = []; % input memory 28 | ymem = []; % output memory 29 | dict = []; % dictionary 30 | alpha = []; % expansion coefficients 31 | end 32 | 33 | methods 34 | function kaf = nlkapa(parameters) % constructor 35 | if (nargin > 0) % copy valid parameters 36 | for fn = fieldnames(parameters)' 37 | if ismember(fn,fieldnames(kaf)) 38 | kaf.(fn{1}) = parameters.(fn{1}); 39 | end 40 | end 41 | end 42 | end 43 | 44 | function y_est = evaluate(kaf,x) % evaluate the algorithm 45 | if size(kaf.dict,1)>0 46 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 47 | y_est = k'*kaf.alpha; 48 | else 49 | y_est = zeros(size(x,1),1); 50 | end 51 | end 52 | 53 | function train(kaf,x,y) % train the algorithm 54 | if size(kaf.dict,2)==0 % initialize 55 | kaf.dict = x; 56 | kaf.alpha = kaf.eta*y; 57 | kaf.xmem = x; 58 | kaf.ymem = y; 59 | else 60 | if size(kaf.dict,1) < kaf.M 61 | if size(kaf.xmem,1) 86 | % prefer inv to \ to avoid instability 87 | end 88 | end 89 | end 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /lib/klms_csl1.m: -------------------------------------------------------------------------------- 1 | % Kernel Least Mean Squares algorithm with Coherence-Sparsification 2 | % criterion and L1-norm regularization 3 | % 4 | % Wei Gao, Jie Chen, Cedric Richard, Jianguo Huang, and Remi Flamary, 5 | % "Kernel LMS algorithm with forward-backward splitting for dictionary 6 | % learning,", 2013 IEEE International Conference on Acoustics, Speech, and 7 | % Signal Processing (ICASSP 2013), Vancouver, Canada, March 2013. 8 | % http://dx.doi.org/10.1109/ICASSP.2013.6638763 9 | % 10 | % Remark: Code contributed by Wei Gao and Cedric Richard. 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | classdef klms_csl1 < kernel_adaptive_filter 16 | 17 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 18 | eta = .1; % step-size 19 | mu0 = .95; % threshold for coherence criterion 20 | lambda = 5E-4; % sparsification threshold 21 | kerneltype = 'gauss'; % kernel type 22 | kernelpar = .5; % kernel parameter 23 | end 24 | 25 | properties (GetAccess = 'public', SetAccess = 'private') % variables 26 | dict = []; % dictionary 27 | modict = []; % modulus of the dictionary elements 28 | alpha = []; % expansion coefficients 29 | end 30 | 31 | methods 32 | function kaf = klms_csl1(parameters) % constructor 33 | if (nargin > 0) % copy valid parameters 34 | for fn = fieldnames(parameters)' 35 | if ismember(fn,fieldnames(kaf)) 36 | kaf.(fn{1}) = parameters.(fn{1}); 37 | end 38 | end 39 | end 40 | end 41 | 42 | function y_est = evaluate(kaf,x) % evaluate the algorithm 43 | if size(kaf.dict,1)>0 44 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 45 | y_est = k'*kaf.alpha; 46 | else 47 | y_est = zeros(size(x,1),1); 48 | end 49 | end 50 | 51 | function train(kaf,x,y) % train the algorithm 52 | if size(kaf.dict,2)==0 % initialize 53 | kd = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 54 | kaf.dict = x; 55 | kaf.modict = sqrt(kd); 56 | kaf.alpha = 0; 57 | else 58 | kd = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 59 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 60 | C = kd./(sqrt(kx)*kaf.modict); % coherence 61 | 62 | if (max(C) <= kaf.mu0) % coherence criterion 63 | kaf.dict = [kaf.dict; x]; % order increase 64 | kaf.modict = [kaf.modict; sqrt(kx)]; 65 | kaf.alpha = [kaf.alpha; 0]; % order increase 66 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 67 | kd = [kd; kx]; 68 | end 69 | end 70 | 71 | y_est = kaf.evaluate(x); 72 | e = y - y_est; 73 | kaf.alpha = kaf.alpha + kaf.eta * kd * e; 74 | 75 | kaf.alpha = kaf.prox(kaf.lambda,kaf.alpha); 76 | 77 | % prune 78 | idx = find(kaf.alpha == 0); 79 | if ~isempty(idx) 80 | kaf.dict(idx,:) = []; 81 | kaf.modict(idx,:) = []; 82 | kaf.alpha(idx,:) = []; 83 | end 84 | end 85 | 86 | end 87 | 88 | methods (Static = true) 89 | 90 | % proximal operator for l1 norm 91 | function alphap = prox(lambda,alpha) 92 | alphap = sign(alpha).*max(abs(alpha) - lambda, 0); 93 | end 94 | 95 | end 96 | end 97 | -------------------------------------------------------------------------------- /lib/mknlms_cs.m: -------------------------------------------------------------------------------- 1 | % Multikernel Normalized Least Mean Square algorithm With Coherence-Based 2 | % Sparsification (MKNLMS-CS) 3 | % 4 | % M. Yukawa, "Multikernel Adaptive Filtering," IEEE Transactions on Signal 5 | % Processing, vol.60, no.9, pp.4672,4682, Sept. 2012, 6 | % http://dx.doi.org/10.1109/TSP.2012.2200889 7 | % 8 | % Remark: version01, August 2013 9 | % 10 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 11 | % https://github.com/steven2358/kafbox/ 12 | 13 | classdef mknlms_cs < kernel_adaptive_filter 14 | 15 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 16 | delta = .95; % coherence criterion threshold 17 | eta = .5; % step size 18 | rho = 1E-2; % regularization 19 | kerneltype = 'gauss'; % kernel type 20 | kernelpars = .5:.5:2; % kernel parameters 21 | end 22 | properties (GetAccess = 'public', SetAccess = 'private') % variables 23 | dict = []; % dictionary 24 | alpha = []; % expansion coefficients, H in the original article 25 | end 26 | 27 | methods 28 | 29 | function kaf = mknlms_cs(parameters) % constructor 30 | if (nargin > 0) % copy valid parameters 31 | for fn = fieldnames(parameters)' 32 | if ismember(fn,fieldnames(kaf)) 33 | kaf.(fn{1}) = parameters.(fn{1}); 34 | end 35 | end 36 | end 37 | end 38 | 39 | function y_est = evaluate(kaf,x) % evaluate the algorithm 40 | N = size(x,1); 41 | if size(kaf.dict,1)>0 42 | K = multikernel_dict(kaf,x); 43 | y_est = reshape(K,N,[])*kaf.alpha(:); 44 | else 45 | y_est = zeros(size(x,1),1); 46 | end 47 | end 48 | 49 | function train(kaf,x,y) % train the algorithm 50 | M = length(kaf.kernelpars); % number of distinct kernels 51 | if size(kaf.dict,2)==0 % initialize 52 | kaf.dict = x; 53 | kaf.alpha = zeros(1,M); % row with coefficients for all kernels 54 | else 55 | K = multikernel_dict(kaf,x); 56 | if (max(K(:)) <= kaf.delta) % coherence criterion 57 | kaf.dict = [kaf.dict; x]; % order increase 58 | kaf.alpha = [kaf.alpha; zeros(1,M)]; % order increase 59 | end 60 | end 61 | K = multikernel_dict(kaf,x); 62 | kaf.alpha = kaf.alpha + kaf.eta /... 63 | (kaf.rho + K(:)'*K(:)) * (y - K(:)'*kaf.alpha(:)) * ... 64 | reshape(K,size(kaf.dict,1),M); 65 | end 66 | 67 | function K = multikernel_dict(kaf,X) % multikernel for dictionary 68 | M = length(kaf.kernelpars); % number of distinct kernels 69 | N = size(X,1); 70 | D = size(kaf.dict,1); 71 | K = zeros(N,D,M); 72 | switch kaf.kerneltype 73 | case 'gauss' % RBF kernel 74 | norms1 = sum(X.^2,2); 75 | norms2 = sum(kaf.dict.^2,2); 76 | mat1 = repmat(norms1,1,D); 77 | mat2 = repmat(norms2',N,1); 78 | 79 | d2 = mat1 + mat2 - 2*X*kaf.dict'; % distance matrix 80 | Kalpha = -1./(2*kaf.kernelpars.^2); 81 | for m=1:M 82 | K(:,:,m) = exp(d2*Kalpha(m)); 83 | end 84 | otherwise % default case 85 | for m=1:M 86 | k = kernel(X,kaf.dict,kaf.kerneltype,kaf.kernelpars(m)); 87 | K(:,:,m) = k; 88 | end 89 | end 90 | end 91 | end 92 | end 93 | -------------------------------------------------------------------------------- /lib/profiler/krls_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Recursive Least Squares with Approximate 2 | % Linear Dependency 3 | % 4 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 5 | % https://github.com/steven2358/kafbox/ 6 | 7 | classdef krls_profiler < krls 8 | 9 | properties (GetAccess = 'public', SetAccess = 'private') 10 | elapsed = 0; % elapsed time 11 | prev_dict_size = 0; % previous dictionary size for growth check 12 | end 13 | 14 | methods 15 | 16 | function kaf = krls_profiler(parameters) % constructor 17 | if nargin<1, parameters = struct(); end 18 | kaf = kaf@krls(parameters); 19 | end 20 | 21 | function flops = lastflops(kaf) % flops for last iteration 22 | m = size(kaf.dict,1); 23 | if kaf.prev_dict_size < m % growing 24 | m1 = m; 25 | m2 = m - 1; 26 | m3 = m - 1; 27 | floptions = struct(... 28 | 'sum', m2^2 - m2 + m2 + m3^2 + m3 + m3, ... 29 | 'mult', m2^2 + m2 + m3^2 + 2*m3 + m3 + 1 + m3, ... 30 | 'div', 1, ... 31 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 32 | else 33 | m1 = m + 1; 34 | m2 = m; 35 | m4 = m; 36 | floptions = struct(... 37 | 'sum', m2^2 - m2 + m2 + m4^2 + m4^2 + m4^2 + m4, ... 38 | 'mult', m2^2 + m2 + m4^2 + m4 + m4^2 + m4^2 + 2*m4, ... 39 | 'div', 1, ... 40 | sprintf('%s_kernel',kaf.kerneltype), [m1,1,size(kaf.dict,2)]); 41 | end 42 | % note: update with dictionary changed has 2*m^2 operations, 43 | % while update with dictionary unchanged has 8*m^2 operations. 44 | 45 | flops = kflops(floptions); 46 | end 47 | 48 | %% flops breakdown 49 | 50 | % k = kernel([kaf.dict; x],x,kaf.kerneltype,kaf.kernelpar); 51 | % kernel: m1 52 | 53 | % at = kaf.Kinv*kt; % check linear dependency 54 | % sum: m2^2 - m2 55 | % mult: m2^2 56 | 57 | % delta = ktt - kt'*at; % check linear dependency 58 | % sum: m2 59 | % mult: m2 60 | 61 | % kaf.Kinv = 1/delta*[delta*kaf.Kinv + at*at', -at; -at', 1]; % grow Kinv 62 | % sum: m3^2 63 | % mult: m3^2 + 2*m3 + 1 64 | % div: 1 65 | 66 | % ode = 1/delta*(y-kt'*kaf.alpha); % grow Kinv 67 | % sum: m3 68 | % mult: m3 + 1 69 | 70 | % kaf.alpha = [kaf.alpha - at*ode; ode]; % grow Kinv 71 | % sum: m3 72 | % mult: m3 73 | 74 | % q = kaf.P*at/(1+at'*kaf.P*at); % only update alpha 75 | % sum: m4^2 76 | % mult: m4^2 + m4 77 | % div: 1 78 | 79 | % kaf.P = kaf.P - q*(at'*kaf.P); % only update alpha 80 | % sum: m4^2 81 | % mult: m4^2 82 | 83 | % kaf.alpha = kaf.alpha + kaf.Kinv*q*(y-kt'*kaf.alpha); % only update alpha 84 | % sum: m4^2 + m4 85 | % mult: m4^2 + 2*m4 86 | 87 | %% 88 | 89 | function train_profiled(kaf,x,y) 90 | kaf.prev_dict_size = size(kaf.dict,1); 91 | t1 = tic; 92 | kaf.train(x,y); 93 | t2 = toc(t1); 94 | kaf.elapsed = kaf.elapsed + t2; 95 | end 96 | 97 | function bytes = lastbytes(kaf) % bytes used in last iteration 98 | m = size(kaf.dict,1); 99 | bytes = 8*(m^2 + m^2 + m + m*size(kaf.dict,2)); % 8 bytes for double precision 100 | % Kinv, P, alpha, dict 101 | end 102 | 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /demo/literature/vanvaerenbergh2012kernel/fig3.m: -------------------------------------------------------------------------------- 1 | % Partially reproduces figure 3 from "Kernel Recursive Least-Squares 2 | % Tracker for Time-Varying Regression". (Only 3 algorithms, only 1 MC 3 | % simulation.) 4 | % 5 | % MSE performance comparison of different tracking algorithms on a 6 | % communications channel that shows an abrupt change at iteration 500. 7 | % Execution time: < 1 minute (Intel Pentium Core2 Duo). 8 | % 9 | % S. Van Vaerenbergh, M. Lazaro-Gredilla, and I. Santamaria, "Kernel 10 | % Recursive Least-Squares Tracker for Time-Varying Regression," IEEE 11 | % Transactions on Neural Networks and Learning Systems, vol. 23, no. 8, pp. 12 | % 1313-1326, Aug. 2012, http://dx.doi.org/10.1109/TNNLS.2012.2200500 13 | % 14 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab 15 | % https://github.com/steven2358/kafbox/ 16 | 17 | clear 18 | close all 19 | 20 | %% PARAMETERS 21 | 22 | N = 1500; % number of training data points 23 | N_test = 500; % number of test data points 24 | Nswitch = 500; % switch from model 1 to model 2 after Nswitch iterations 25 | B1 = [1.0000, -0.3817, -0.1411, 0.5789, 0.191]; % model 1 linear filter 26 | B2 = [1.0000, -0.0870, 0.9852, -0.2826, -0.1711]; % model 2 linear filter 27 | f = @(x) tanh(x); % Wiener system nonlinearity 28 | SNR = 20; % SNR in dB 29 | 30 | embedding = 5; % time-embedding 31 | 32 | setups{1} = swkrls(struct('c',1E-2,'M',50,'kerneltype','gauss','kernelpar',1)); 33 | setups{2} = krlst(struct('lambda',.999,'M',50,'sn2',1E-2,'kerneltype','gauss','kernelpar',1)); 34 | setups{3} = norma(struct('lambda',1E-2,'tau',1500,'eta',0.1,'kerneltype','gauss','kernelpar',1)); % large tau -> very slow 35 | 36 | %% PREPARE DATA 37 | 38 | fprintf('Fig. 3 from "Kernel Recursive Least-Squares Tracker for\n'); 39 | fprintf('Time-Varying Regression".\n') 40 | 41 | % generate Gaussian input data s 42 | s = rand(N+N_test,1); 43 | s_mem = zeros(N+N_test,embedding); 44 | for i = 1:embedding 45 | s_mem(i:N+N_test,i) = s(1:N+N_test-i+1); % time-embedding 46 | end 47 | s = s_mem(1:N+N_test,:); % input data, stored in columns 48 | s_train = s_mem(1:N,:); % input train data, stored in columns 49 | s_test = s_mem(N+1:N+N_test,:); % input test data, stored in columns 50 | 51 | % generate internal data x and output y 52 | X1 = s_mem(1:Nswitch,:)*B1'; 53 | X2 = s_mem(Nswitch+1:N,:)*B2'; 54 | X = [X1;X2]; 55 | Y_nn = f(X); % noiseless Y 56 | vary = var(Y_nn); 57 | noisevar = 10^(-SNR/10)*vary; 58 | noise = sqrt(noisevar)*randn(N,1); 59 | Y = Y_nn + noise; % noisy output data 60 | 61 | X_test1 = s_mem(N+1:N+N_test,:)*B1'; 62 | X_test2 = s_mem(N+1:N+N_test,:)*B2'; 63 | noise_test1 = sqrt(noisevar)*randn(N_test,1); 64 | noise_test2 = sqrt(noisevar)*randn(N_test,1); 65 | Y_test1 = f(X_test1) + noise_test1; % noisy output test data, model 1 66 | Y_test2 = f(X_test2) + noise_test2; % noisy output test data, model 2 67 | 68 | %% RUN ALGORITHMS 69 | t1 = tic; 70 | 71 | num_setup = length(setups); 72 | MSE = zeros(N,num_setup); 73 | titles = cell(num_setup,1); 74 | 75 | for setup_ind=1:length(setups) 76 | t2 = tic; 77 | kaf = setups{setup_ind}; 78 | 79 | titles{setup_ind} = upper(class(kaf)); 80 | fprintf('%s\t',titles{setup_ind}); 81 | for n=1:N 82 | if ~mod(n,round(N/10)), fprintf('.'); end 83 | if n<=Nswitch 84 | Y_test = Y_test1; 85 | else 86 | Y_test = Y_test2; 87 | end 88 | Y_est = kaf.evaluate(s_test); % test on test set 89 | err = Y_test - Y_est; 90 | MSE(n,setup_ind) = mean(err.^2); 91 | 92 | kaf.train(s_train(n,:),Y(n)); % train with one input-output pair 93 | end 94 | fprintf(' %.2f seconds\n',toc(t2)); 95 | end 96 | 97 | toc(t1) 98 | %% OUTPUT 99 | 100 | figure 101 | plot(10*log10(MSE),'LineWidth',2); grid on 102 | xlabel('iteration') 103 | ylabel('MSE (dB)') 104 | legend(titles) 105 | -------------------------------------------------------------------------------- /lib/klms_csal1.m: -------------------------------------------------------------------------------- 1 | % Kernel Least Mean Squares algorithm with Coherence-Sparsification 2 | % criterion and Adaptive L1-norm regularization 3 | % 4 | % Wei Gao, Jie Chen, Cedric Richard, Jianguo Huang, and Remi Flamary, 5 | % "Kernel LMS algorithm with forward-backward splitting for dictionary 6 | % learning,", 2013 IEEE International Conference on Acoustics, Speech, and 7 | % Signal Processing (ICASSP 2013), Vancouver, Canada, March 2013. 8 | % http://dx.doi.org/10.1109/ICASSP.2013.6638763 9 | % 10 | % Remark: Code contributed by Wei Gao and Cedric Richard. 11 | % 12 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 13 | % https://github.com/steven2358/kafbox/ 14 | 15 | classdef klms_csal1 < kernel_adaptive_filter 16 | 17 | properties (GetAccess = 'public', SetAccess = 'private') % parameters 18 | eta = .1; % step-size 19 | mu0 = .95; % threshold for coherence criterion 20 | lambda = 5E-4; % sparsification threshold 21 | eps_alpha = 1E-6; % constant to prevent denominator from vanishing 22 | kerneltype = 'gauss'; % kernel type 23 | kernelpar = .5; % kernel parameter 24 | end 25 | 26 | properties (GetAccess = 'public', SetAccess = 'private') % variables 27 | dict = []; % dictionary 28 | modict = []; % modulus of the dictionary elements 29 | alpha = []; % expansion coefficients 30 | end 31 | 32 | methods 33 | function kaf = klms_csal1(parameters) % constructor 34 | if (nargin > 0) % copy valid parameters 35 | for fn = fieldnames(parameters)' 36 | if ismember(fn,fieldnames(kaf)) 37 | kaf.(fn{1}) = parameters.(fn{1}); 38 | end 39 | end 40 | end 41 | end 42 | 43 | function y_est = evaluate(kaf,x) % evaluate the algorithm 44 | if size(kaf.dict,1)>0 45 | k = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 46 | y_est = k'*kaf.alpha; 47 | else 48 | y_est = zeros(size(x,1),1); 49 | end 50 | end 51 | 52 | function train(kaf,x,y) % train the algorithm 53 | if size(kaf.dict,2)==0 % initialize 54 | kd = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 55 | kaf.dict = x; 56 | kaf.modict = sqrt(kd); 57 | kaf.alpha = y/kd; 58 | w = 1./abs(kaf.alpha + kaf.eps_alpha); % for sparsification 59 | else 60 | w = 1./abs(kaf.alpha + kaf.eps_alpha); % for sparsification 61 | 62 | kd = kernel(kaf.dict,x,kaf.kerneltype,kaf.kernelpar); 63 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 64 | C = kd./(sqrt(kx)*kaf.modict); % coherence 65 | 66 | if (max(C) <= kaf.mu0) % coherence criterion 67 | kaf.dict = [kaf.dict; x]; % order increase 68 | kaf.modict = [kaf.modict; sqrt(kx)]; 69 | kaf.alpha = [kaf.alpha; 0]; % order increase 70 | kx = kernel(x,x,kaf.kerneltype,kaf.kernelpar); 71 | kd = [kd; kx]; 72 | w = [w; 1]; 73 | end 74 | end 75 | 76 | y_est = kaf.evaluate(x); 77 | e = y - y_est; 78 | kaf.alpha = kaf.alpha + kaf.eta * kd * e; 79 | 80 | kaf.alpha = kaf.prox(kaf.lambda,kaf.alpha,w); 81 | 82 | % prune 83 | idx = find(kaf.alpha == 0); 84 | if ~isempty(idx) 85 | kaf.dict(idx,:) = []; 86 | kaf.modict(idx,:) = []; 87 | kaf.alpha(idx,:) = []; 88 | end 89 | end 90 | end 91 | 92 | methods (Static = true) 93 | % proximal operator for l1 norm 94 | function alphap = prox(lambda,alpha,w) 95 | alphap = sign(alpha).*max(abs(alpha) - lambda*w, 0); 96 | end 97 | end 98 | end 99 | -------------------------------------------------------------------------------- /demo/demo_profiler_prediction_lorenz.m: -------------------------------------------------------------------------------- 1 | % Demo: demonstration of the kernel adaptive filter algorithm profiler. 2 | % Compares the cost vs prediction error tradeoffs and convergence speeds 3 | % for several algorithms on the Lorenz data set. 4 | 5 | clear 6 | close all 7 | 8 | %% PARAMETERS 9 | 10 | output_dir_default = 'profiler_output'; 11 | 12 | % data and algorithm setup 13 | data.name = 'Lorenz'; 14 | data.N = 10000; % number of data points 15 | data.embedding = 6; % time embedding 16 | data.offset = 50; % apply offset per simulation 17 | 18 | sim_opts.numsim = 5; % 10 minutes per simulation on an Intel Pentium Core2 Duo 19 | sim_opts.error_measure = 'MSE'; 20 | 21 | i=0; % initialize setups 22 | 23 | %% KRLS 24 | i=i+1; 25 | algorithms{i}.name = 'KRLS'; 26 | algorithms{i}.class = 'krls'; 27 | algorithms{i}.figstyle = struct('color',[.75 0 .75],'marker','^'); 28 | algorithms{i}.options = struct('sweep_par','nu','sweep_val',[1E-4 2E-4 1E-3 .01 .05 .1],... 29 | 'kerneltype','gauss','kernelpar',32); 30 | 31 | %% QKLMS 32 | i=i+1; 33 | algorithms{i}.name = 'QKLMS'; 34 | algorithms{i}.class = 'qklms'; 35 | algorithms{i}.figstyle = struct('color',[1 0 0],'marker','o'); 36 | algorithms{i}.options = struct('eta',0.5,'sweep_par','epsu','sweep_val',[1 2 5 10 12 15 18],... 37 | 'kerneltype','gauss','kernelpar',32); 38 | 39 | %% SW-KRLS 40 | i=i+1; 41 | algorithms{i}.name = 'SW-KRLS'; 42 | algorithms{i}.class = 'swkrls'; 43 | algorithms{i}.figstyle = struct('color',[0 .75 .75],'marker','s'); 44 | algorithms{i}.options = struct('c',1E-6,'sweep_par','M','sweep_val',[1 2 3 4 5 10 20 50 100 200],... 45 | 'kerneltype','gauss','kernelpar',32); 46 | 47 | %% FB-KRLS 48 | i=i+1; 49 | algorithms{i}.name = 'FB-KRLS'; 50 | algorithms{i}.class = 'fbkrls'; 51 | algorithms{i}.figstyle = struct('color',[0 .5 0],'marker','d'); 52 | algorithms{i}.options = struct('lambda',1E-6,'sweep_par','M','sweep_val',[10 15 20 25 30 50 100],... 53 | 'kerneltype','gauss','kernelpar',32); 54 | 55 | %% KRLS-T 56 | i=i+1; 57 | algorithms{i}.name = 'KRLS-T'; 58 | algorithms{i}.class = 'krlst'; 59 | algorithms{i}.figstyle = struct('color',[0 0 1],'marker','+'); 60 | algorithms{i}.options = struct('sn2',1E-6,'lambda',1,'sweep_par','M','sweep_val',[10 15 17 20 24 30 50 100 200],... 61 | 'kerneltype','gauss','kernelpar',32); 62 | 63 | %% PROGRAM 64 | 65 | fprintf('Running profiler for %d algorithms on %s data.\n',i,data.name); 66 | % output_dir = input('Folder for storing results (ENTER = subfolder here): ','s'); 67 | % if isempty(output_dir), 68 | % output_dir = output_dir_default; 69 | % fprintf('Using default folder "%s" for storing results.\n', output_dir_default); 70 | % end 71 | output_dir = fullfile(fileparts(mfilename('fullpath')),'results'); 72 | 73 | t1 = tic; 74 | [data,algorithms,results] = kafbox_profiler(data,sim_opts,algorithms,output_dir); 75 | t2 = toc(t1); 76 | 77 | fprintf('Elapsed time: %d seconds\n',ceil(t2)); 78 | 79 | %% OUTPUT 80 | 81 | mse_curves = kafbox_profiler_msecurves(results); 82 | 83 | resinds = [1,1;2,1;3,10;4,7;5,9]; % result indices 84 | [f0,h0] = kafbox_profiler_plotconvergence(algorithms,mse_curves,resinds); 85 | 86 | [f1,h1] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'bytes','flops'}); 87 | object_handle = get(h1); 88 | legend(object_handle.String,'Location','NW'); % move legend 89 | 90 | [f2,h2] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'ssmse','flops'}); 91 | 92 | [f3,h3] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'ssmse','bytes'}); 93 | 94 | [f4,h4] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'ssmse','ssplus'},1); 95 | object_handle = get(h4); 96 | legend(object_handle.String,'Location','SW'); % move legend 97 | 98 | [f5,h5] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'ssmse','timeto'},-20); 99 | object_handle = get(h5); 100 | legend(object_handle.String,'Location','NW'); % move legend 101 | 102 | [f6,h6] = kafbox_profiler_plotresults(algorithms,mse_curves,results,{'ssmse','timetopct'},90); 103 | object_handle = get(h6); 104 | legend(object_handle.String,'Location','SW'); % move legend 105 | -------------------------------------------------------------------------------- /lib/profiler/krlst_profiler.m: -------------------------------------------------------------------------------- 1 | % Profiler extension for Kernel Recursive Least-Squares Tracker algorithm 2 | % 3 | % This file is part of the Kernel Adaptive Filtering Toolbox for Matlab. 4 | % https://github.com/steven2358/kafbox/ 5 | 6 | classdef krlst_profiler < krlst 7 | 8 | properties (GetAccess = 'public', SetAccess = 'private') 9 | elapsed = 0; % elapsed time 10 | end 11 | 12 | methods 13 | 14 | function kaf = krlst_profiler(parameters) % constructor 15 | if nargin<1, parameters = struct(); end 16 | kaf = kaf@krlst(parameters); 17 | end 18 | 19 | function flops = lastflops(kaf) % flops for last iteration 20 | m = size(kaf.dict,1); 21 | if kaf.prune 22 | m1 = m; 23 | else 24 | m1 = m - 1; 25 | end 26 | if kaf.reduced 27 | m3 = 0; 28 | else 29 | m3 = m; 30 | end 31 | m2 = m1 + 1; 32 | 33 | floptions = struct(... 34 | 'sum', m1^2 + m1 + 1 + m1^2 - m1 + m1 - 1 + m1 + m1 - 1 + m1 + 1 + m2^2 + m2 + 1 + m2^2 + 2 + 1 + m2^2 - m2 + m3^2, ... 35 | 'mult', 2*m1^2 + m1^2 + m1 + m1 + m1 + m1 + m2^2 + 1 + 1 + m2^2 + 3 + m2^2 + m3^2, ... 36 | 'div', 1 + 1 + 2 + m2 + m3, ... 37 | sprintf('%s_kernel',kaf.kerneltype), [m1^2 + 1,1,size(kaf.dict,2)]); 38 | 39 | flops = kflops(floptions); 40 | end 41 | 42 | %% flops breakdown 43 | 44 | % K = kernel(kaf.dict,kaf.dict,kaf.kerneltype,kaf.kernelpar); 45 | % kernel: m1^2 46 | 47 | % kaf.Sigma = kaf.lambda*kaf.Sigma + (1-kaf.lambda)*K + kaf.jitter*eye(m); % forget 48 | % sum: m1^2 + m1 + 1 49 | % mult: 2*m1^2 50 | 51 | % kaf.mu = sqrt(kaf.lambda)*kaf.mu; % square can be pre-calculated 52 | % mult: m1 53 | 54 | % k = kernel([kaf.dict; x],x,kaf.kerneltype,kaf.kernelpar); 55 | % kernel: m1 56 | 57 | % q = kaf.Q*kt; 58 | % sum: m1^2 - m1 59 | % mult: m1^2 60 | 61 | % y_mean = q'*kaf.mu; % predictive mean 62 | % sum: m1 - 1 63 | % mult: m1 64 | 65 | % gamma2 = ktt - kt'*q; gamma2(gamma2<0)=0; % projection uncertainty 66 | % sum: m1 67 | % mult: m1 68 | 69 | % h = kaf.Sigma*q; 70 | % sum: m1 - 1 71 | % mult: m1 72 | 73 | % sf2 = gamma2 + q'*h; sf2(sf2<0)=0; % noiseless prediction variance 74 | % sum: m1 75 | % mult: m1 76 | 77 | % sy2 = kaf.sn2 + sf2; 78 | % sum: 1 79 | 80 | % kaf.Q = [kaf.Q zeros(m,1);zeros(1,m) 0] + 1/gamma2*(p*p'); with p = [q; -1]; 81 | % sum: m2^2 82 | % mult: m2^2 + 1 83 | % div: 1 84 | 85 | % kaf.mu = [kaf.mu; y_mean] + (y - y_mean)/sy2*p; % with p = [h; sf2]; 86 | % sum: m2 + 1 87 | % mult: m2 + 1 88 | % div: 1 89 | 90 | % kaf.Sigma = [kaf.Sigma h; h' sf2] - 1/sy2*(p*p'); % posterior covariance 91 | % sum: m2^2 92 | % mult: m2^2 93 | 94 | % kaf.nums02ML = kaf.nums02ML + kaf.lambda*(y - y_mean)^2/sy2; 95 | % sum: 2 96 | % mult: 3 97 | 98 | % kaf.dens02ML = kaf.dens02ML + kaf.lambda; 99 | % sum: 1 100 | 101 | % kaf.s02 = kaf.nums02ML/kaf.dens02ML; 102 | % div: 1 103 | 104 | % errors = (kaf.Q*kaf.mu)./diag(kaf.Q); % MSE pruning criterion 105 | % sum: m2^2 - m2 106 | % mult: m2^2 107 | % div: m2 108 | 109 | % kaf.Q = kaf.Q - (Qs*Qs')/qs; % if removed element is not the last 110 | % sum: m3^2 111 | % mult: m3^2 112 | % div: m3 113 | 114 | %% 115 | 116 | function train_profiled(kaf,x,y) 117 | % kaf.prev_dict_size = size(kaf.dict,1); 118 | t1 = tic; 119 | kaf.train(x,y); 120 | t2 = toc(t1); 121 | kaf.elapsed = kaf.elapsed + t2; 122 | end 123 | 124 | function bytes = lastbytes(kaf) % bytes used in last iteration 125 | m = size(kaf.dict,1); 126 | bytes = 8*(m^2 + m^2 + m + 2 + m*size(kaf.dict,2)); % 8 bytes for double precision 127 | % Q, Sigma, mu, nums02ML, dens02ML, dict 128 | end 129 | 130 | end 131 | end 132 | --------------------------------------------------------------------------------