├── AFEm.m ├── GWO.m ├── MLP_kernel.m ├── RBF_kernel.m ├── README.md ├── bay_errorbar.m ├── bay_initlssvm.m ├── bay_lssvm.m ├── bay_lssvmARD.m ├── bay_modoutClass.m ├── bay_optimize.m ├── bay_rr.m ├── bitreverse32.m ├── changelssvm.m ├── cilssvm.m ├── code.m ├── code_ECOC.m ├── code_MOC.m ├── code_OneVsAll.m ├── code_OneVsOne.m ├── codedist_bay.m ├── codedist_hamming.m ├── codedist_loss.m ├── codelssvm.m ├── crossvalidate.m ├── crossvalidatelssvm.m ├── csa.m ├── demo_fixedclass.m ├── demo_fixedsize.m ├── demo_yinyang.m ├── democlass.m ├── democonfint.m ├── demofun.m ├── demomodel.m ├── demomulticlass.m ├── denoise_kpca.m ├── eign.m ├── gcrossvalidate.m ├── gcrossvalidatelssvm.m ├── gridsearch.m ├── initlssvm.m ├── kentropy.m ├── kernel_matrix.m ├── kernel_matrix2.m ├── kpca.m ├── latentlssvm.m ├── latticeseq_b2.m ├── leaveoneout.m ├── leaveoneoutlssvm.m ├── lin_kernel.m ├── linesearch.m ├── linf.m ├── lssvm.m ├── lssvmMATLAB.m ├── lssvm_fitness.m ├── mae.m ├── main.m ├── medae.m ├── misclass.m ├── mse.m ├── plotlssvm.m ├── poly_kernel.m ├── postlssvm.m ├── predict.m ├── predlssvm.m ├── preimage_rbf.m ├── prelssvm.m ├── progress.m ├── range.m ├── rcrossvalidate.m ├── rcrossvalidatelssvm.m ├── ridgeregress.m ├── robustlssvm.m ├── roc.m ├── rsimplex.m ├── simann.m ├── simlssvm.m ├── simplex.m ├── smootherlssvm.m ├── tbform.m ├── trainlssvm.m ├── trimmedmse.m ├── tunelssvm.m ├── weightingscheme.m ├── windowize.m ├── windowizeNARX.m └── 数据集.xlsx /AFEm.m: -------------------------------------------------------------------------------- 1 | function [features,eigvec,eigvals] = AFEm(Xs,kernel, kernel_pars,X,type,nb,eigvec,eigvals) 2 | % Automatic Feature Extraction by Nystrom method 3 | % 4 | % 5 | % >> features = AFE(X, kernel, sig2, Xt) 6 | % 7 | % Description 8 | % Using the Nystr�m approximation method, the mapping of data to 9 | % the feature space can be evaluated explicitly. This gives the 10 | % features that one can use for a linear regression or 11 | % classification. The decomposition of the mapping to the feature 12 | % space relies on the eigenvalue decomposition of the kernel 13 | % matrix. The Matlab ('eigs') or Nystr�m's ('eign') approximation 14 | % using the nb most important eigenvectors/eigenvalues can be 15 | % used. The eigenvalue decomposition is not re-calculated if it is 16 | % passed as an extra argument. This routine internally calls a cmex file. 17 | % 18 | % Full syntax 19 | % 20 | % >> [features, U, lam] = AFE(X, kernel, sig2, Xt) 21 | % >> [features, U, lam] = AFE(X, kernel, sig2, Xt, type) 22 | % >> [features, U, lam] = AFE(X, kernel, sig2, Xt, type, nb) 23 | % >> features = AFE(X, kernel, sig2, Xt, [],[], U, lam) 24 | % 25 | % Outputs 26 | % features : Nt x nb matrix with extracted features 27 | % U(*) : N x nb matrix with eigenvectors 28 | % lam(*) : nb x 1 vector with eigenvalues 29 | % Inputs 30 | % X : N x d matrix with input data 31 | % kernel : Name of the used kernel (e.g. 'RBF_kernel') 32 | % sig2 : parameter of the used kernel 33 | % Xt : Data from which the features are extracted 34 | % type(*): 'eig'(*), 'eigs' or 'eign' 35 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 36 | % U(*) : N x nb matrix with eigenvectors 37 | % lam(*) : nb x 1 vector with eigenvalues 38 | % 39 | % See also: 40 | % kernel_matrix, RBF_kernel, demo_fixedsize 41 | 42 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 43 | 44 | N = size(X,1); 45 | Nc = size(Xs,1); 46 | 47 | eval('type;','type=''eig'';'); 48 | if ~(strcmp(type,'eig') || strcmp(type,'eigs') || strcmp(type,'eign') ) 49 | error('Type needs to be ''eig'', ''eigs'' or ''eign''...'); 50 | end 51 | 52 | 53 | % eigenvalue decomposition to do.. 54 | if nargin<=7, 55 | omega = kernel_matrix(Xs, kernel, kernel_pars); 56 | if strcmp(type,'eig'), 57 | [eigvec,eigvals] = eig(omega+2*eye(size(omega,1))); % + jitter factor 58 | eigvals = diag(eigvals); 59 | clear omega 60 | elseif strcmp(type,'eigs'), 61 | eval('nb;','nb=min(size(omega,1),10);'); options.disp = 0; 62 | [eigvec,eigvals] = eigs(omega+2*eye(size(omega,1)),nb,'lm',options); clear omega % + jitter factor 63 | elseif strcmp(type,'eign'), 64 | eval('nb;','nb=min(size(omega,1),10);'); 65 | [eigvec,eigvals] = eign(omega+2*eye(size(omega,1)),nb); clear omega % + jitter factor 66 | end 67 | eigvals = (eigvals-2)/Nc; 68 | 69 | peff = eigvals>eps; 70 | eigvals = eigvals(peff); 71 | eigvec = eigvec(:,peff); clear peff 72 | 73 | end 74 | 75 | if strcmp(kernel,'RBF_kernel') 76 | omegaN = sum(X.^2,2)*ones(1,Nc); 77 | omegaN = omegaN + ones(N,1)*sum(Xs.^2,2)'; 78 | omegaN = omegaN -2*X*Xs'; clear X Xs 79 | omegaN = exp(-omegaN./kernel_pars); 80 | 81 | elseif strcmp(kernel,'lin_kernel') 82 | omegaN = X*Xs'; clear X Xs 83 | 84 | elseif strcmp(kernel,'poly_kernel') 85 | omegaN = X*Xs'; clear X Xs 86 | omegaN = (omegaN + kernel_pars(1)).^kernel_pars(2); 87 | else 88 | disp('kernel_type unkown') 89 | return; 90 | end 91 | 92 | %A=sqrt(Nc) ./ sqrt(eigvals); clear eigvals 93 | features = omegaN*eigvec; clear omegaN 94 | features = repmat((1 ./ sqrt(eigvals))',N,1).*features; 95 | 96 | 97 | -------------------------------------------------------------------------------- /GWO.m: -------------------------------------------------------------------------------- 1 | 2 | function [gam,sig2]=GWO(SearchAgents_no,Max_iter,lb,ub,dim,inputn_train,outputn_train,inputn_test,outputn_test) 3 | fobj=@(x)lssvm_fitness(x,inputn_train,outputn_train,inputn_test,outputn_test); 4 | % initialize alpha, beta, and delta_pos 5 | Alpha_pos=zeros(1,dim); 6 | Alpha_score=inf; %change this to -inf for maximization problems 7 | 8 | Beta_pos=zeros(1,dim); 9 | Beta_score=inf; %change this to -inf for maximization problems 10 | 11 | Delta_pos=zeros(1,dim); 12 | Delta_score=inf; %change this to -inf for maximization problems 13 | 14 | %Initialize the positions of search agents 15 | Positions=initialization(SearchAgents_no,dim,ub,lb); 16 | 17 | Convergence_curve=zeros(1,Max_iter); 18 | 19 | l=0;% Loop counter 20 | 21 | % Main loop 22 | while lub; 27 | Flag4lb=Positions(i,:)Alpha_score && fitnessAlpha_score && fitness>Beta_score && fitness1 108 | for i=1:dim 109 | ub_i=ub(i); 110 | lb_i=lb(i); 111 | Positions(:,i)=rand(SearchAgents_no,1).*(ub_i-lb_i)+lb_i; 112 | end 113 | end -------------------------------------------------------------------------------- /MLP_kernel.m: -------------------------------------------------------------------------------- 1 | function x = MLP_kernel(a,b, par) 2 | % Multi Layer Perceptron kernel function for implicit higher dimension mapping 3 | % 4 | % x = MLP_kernel(a,b,[s,t]) 5 | % 6 | % 'a' can only contain one datapoint in a row, 'b' can contain N 7 | % datapoints of the same dimension as 'a'. 8 | % 9 | % x = tanh(s*a'b+t^2) 10 | % 11 | % see also: 12 | % poly_kernel, lin_kernel, RBF_kernel, trainlssvm, simlssvm 13 | 14 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 15 | 16 | if length(par)==1, par(2) = 1; end 17 | x = zeros(size(b,1),1); 18 | for i=1:size(b,1), 19 | dp = a*b(i,:)'; 20 | x(i,1) = tanh(par(1)*dp + par(2)^2); 21 | end -------------------------------------------------------------------------------- /RBF_kernel.m: -------------------------------------------------------------------------------- 1 | function x = RBF_kernel(a,b, sigma2) 2 | % Radial Basis Function (RBF) kernel function for implicit higher dimension mapping 3 | % 4 | % X = RBF_kernel(a,b,sig2) 5 | % 6 | % 'sig2' contains the SQUARED variance of the RBF function: 7 | % X = exp(-||a-b||.^2/sig2) 8 | % 9 | % 'a' can only contain one datapoint in a row, 'b' can contain N 10 | % datapoints of the same dimension as 'a'. If the row-vector 'sig2' 11 | % contains i=1 to 'dimension' values, each dimension i has a separate 'sig2(i)'. 12 | % 13 | % see also: 14 | % poly_kernel, lin_kernel, MLP_kernel, trainlssvm, simlssvm 15 | 16 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 17 | 18 | 19 | 20 | x = zeros(size(b,1),1); 21 | 22 | % ARD for different dimensions. 23 | if size(sigma2,2) == length(a), 24 | % rescaling ~ dimensionality 25 | [~,d] = size(b); 26 | for i=1:size(b,1), 27 | dif = a-b(i,:); 28 | x(i,1) = exp( -(sum((dif.*dif)./(sigma2.*d))) ); 29 | end 30 | else 31 | % a single kernel parameter or one for every inputvariable 32 | for i=1:size(b,1), 33 | dif = a-b(i,:); 34 | x(i,1) = exp( -(sum((dif.*dif)./sigma2(1,1))) ); 35 | end 36 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Data-classification-prediction-based-on-Grey-Wolf-Optimization-Algorithm-GWO_LSSVM-Adaboost 2 | Data classification prediction based on the Grey Wolf Optimization Algorithm GWO_LSSVM-Adaboost (Mathematical Modeling Competition Code) 3 | 4 | ![image](https://github.com/user-attachments/assets/71a7ad72-3d2b-44f1-a1cf-6e3a330506f0) 5 | ![image](https://github.com/user-attachments/assets/c3fb8060-552a-400d-9605-aa385c0a458b) 6 | ![image](https://github.com/user-attachments/assets/8e3aa4ab-c8a7-4cf9-aaae-1d6c6c44fca2) 7 | ![image](https://github.com/user-attachments/assets/b02462a7-07d0-4554-a69c-1f3b81331e65) 8 | 9 | -------------------------------------------------------------------------------- /bay_errorbar.m: -------------------------------------------------------------------------------- 1 | function [sig_e, bay,model] = bay_errorbar(model,Xt, type, nb, bay) 2 | % Compute the error bars for a one dimensional regression problem 3 | % 4 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2}, Xt) 5 | % >> sig_e = bay_errorbar(model, Xt) 6 | % 7 | % The computation takes into account the estimated noise variance 8 | % and the uncertainty of the model parameters, estimated by 9 | % Bayesian inference. sig_e is the estimated standard deviation of 10 | % the error bars of the points Xt. A plot is obtained by replacing 11 | % Xt by the string 'figure'. 12 | % 13 | % 14 | % Full syntax 15 | % 16 | % 1. Using the functional interface: 17 | % 18 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, Xt) 19 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, Xt, type) 20 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, Xt, type, nb) 21 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, 'figure') 22 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, 'figure', type) 23 | % >> sig_e = bay_errorbar({X,Y,'function',gam,sig2,kernel,preprocess}, 'figure', type, nb) 24 | % 25 | % Outputs 26 | % sig_e : Nt x 1 vector with the [$ \sigma^2$] errorbands of the test data 27 | % Inputs 28 | % X : N x d matrix with the inputs of the training data 29 | % Y : N x 1 vector with the inputs of the training data 30 | % type : 'function estimation' ('f') 31 | % gam : Regularization parameter 32 | % sig2 : Kernel parameter 33 | % kernel(*) : Kernel type (by default 'RBF_kernel') 34 | % preprocess(*) : 'preprocess'(*) or 'original' 35 | % Xt : Nt x d matrix with the inputs of the test data 36 | % type(*) : 'svd'(*), 'eig', 'eigs' or 'eign' 37 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 38 | % 39 | % 2. Using the object oriented interface: 40 | % 41 | % >> [sig_e, bay, model] = bay_errorbar(model, Xt) 42 | % >> [sig_e, bay, model] = bay_errorbar(model, Xt, type) 43 | % >> [sig_e, bay, model] = bay_errorbar(model, Xt, type, nb) 44 | % >> [sig_e, bay, model] = bay_errorbar(model, 'figure') 45 | % >> [sig_e, bay, model] = bay_errorbar(model, 'figure', type) 46 | % >> [sig_e, bay, model] = bay_errorbar(model, 'figure', type, nb) 47 | % 48 | % Outputs 49 | % sig_e : Nt x 1 vector with the [$ \sigma^2$] errorbands of the test data 50 | % model(*) : Object oriented representation of the LS-SVM model 51 | % bay(*) : Object oriented representation of the results of the Bayesian inference 52 | % Inputs 53 | % model : Object oriented representation of the LS-SVM model 54 | % Xt : Nt x d matrix with the inputs of the test data 55 | % type(*) : 'svd'(*), 'eig', 'eigs' or 'eign' 56 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 57 | % 58 | % See also: 59 | % bay_lssvm, bay_optimize, bay_modoutClass, plotlssvm 60 | 61 | % Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 62 | 63 | if iscell(model), model = initlssvm(model{:}); end 64 | 65 | 66 | if model.type(1)~='f', 67 | error(['confidence bounds only for function estimation. For' ... 68 | ' classification, use ''bay_modoutClass(...)'' instead;']); 69 | end 70 | 71 | eval('type;','type=''svd'';'); 72 | eval('nb;','nb=model.nb_data;'); 73 | if ~(strcmpi(type,'svd') | strcmpi(type,'eig') | strcmpi(type,'eigs') | strcmpi(type,'eign')), 74 | error('Eigenvalue decomposition via ''svd'', ''eig'', ''eigs'' or ''eign''...'); 75 | end 76 | 77 | if strcmpi(type,'eign') 78 | warning('The resulting errorbars are most probably not very usefull...'); 79 | end 80 | 81 | if ~isstr(Xt), 82 | 83 | eval('[sig_e, bay] = bay_confb(model,Xt,type,nb,bay);',... 84 | '[sig_e, bay] = bay_confb(model,Xt,type,nb);'); 85 | 86 | else 87 | 88 | grid = 50; 89 | [X,Y] = postlssvm(model,model.xtrain,model.ytrain); 90 | eval('[sig_e, bay] = bay_confb(model,X,type,nb,bay);',... 91 | '[sig_e, bay] = bay_confb(model,X,type,nb);'); 92 | 93 | % plot the curve including confidence bound 94 | sige = sqrt(sig_e); 95 | Yt = simlssvm(model,X); 96 | 97 | figure; 98 | hold on; 99 | title(['LS-SVM_{\gamma=' num2str(model.gam(1)) ', \sigma^2=' num2str(model.kernel_pars(1)) ... 100 | '}^{' model.kernel_type(1:3) '} and its 95% (2\sigma) error bands']); 101 | 102 | if model.x_dim==1, 103 | xlabel('X'); 104 | ylabel('Y'); 105 | [~,si] = sort(X); 106 | plot(X(si),Yt(si),'k'); hold on; 107 | plot(X(si),Yt(si)+2.*sige(si),'-.r'); 108 | plot(X(si),Yt(si)-2.*sige(si),':r'); 109 | plot(X(si),Y(si),'k*'); hold off; 110 | else 111 | xlabel('time'); 112 | ylabel('Y'); 113 | plot(Yt,'k'); hold on; 114 | plot(Yt+2.*sige,'-.r'); 115 | plot(Yt-2.*sige,':r'); 116 | plot(Y,'k*'); hold off; 117 | end 118 | 119 | end 120 | 121 | 122 | 123 | function [sig_e, bay] = bay_confb(model,X,type,nb,bay) 124 | % see formula's thesis TvG blz 126 125 | 126 | 127 | nD = size(X,1); 128 | %tol = .0001; 129 | 130 | % 131 | % calculate the eigenvalues 132 | % 133 | eval('bay;','[c1,c2,c3,bay] = bay_lssvm(model,1,type,nb);'); 134 | omega = kernel_matrix(model.xtrain(model.selector,1:model.x_dim), ... 135 | model.kernel_type, model.kernel_pars); 136 | oo = ones(1,model.nb_data)*omega; 137 | 138 | % kernel values of X 139 | theta = kernel_matrix(model.xtrain(model.selector, 1:model.x_dim), ... 140 | model.kernel_type, model.kernel_pars, X); 141 | for i=1:nD, 142 | kxx(i,1) = feval(model.kernel_type, X(i,:),X(i,:), model.kernel_pars); 143 | end 144 | 145 | 146 | 147 | Zc = eye(model.nb_data) - ones(model.nb_data)./model.nb_data; 148 | 149 | 150 | Hd = (Zc*bay.Rscores); 151 | Hd = Hd*diag(1./bay.mu - (bay.mu+ bay.zeta*bay.eigvals).^-1)*Hd'; 152 | 153 | 154 | % forall x 155 | for i=1:nD, 156 | term1(i,1) = bay.zeta^-1 + kxx(i)/bay.mu - theta(:,i)'*Hd*theta(:,i); 157 | term2(i,1) = 2/model.nb_data*sum(theta(:,i)'*Hd*omega) - 2/bay.mu/model.nb_data* sum(theta(:,i)); 158 | end 159 | 160 | 161 | % once 162 | term3 = 1/(bay.zeta*model.nb_data) ... 163 | + 1/(bay.mu*model.nb_data^2)* sum(oo) ... 164 | -1/(model.nb_data^2)* oo*Hd*oo'; 165 | 166 | sig_e = term1+term2+term3; 167 | 168 | 169 | -------------------------------------------------------------------------------- /bay_initlssvm.m: -------------------------------------------------------------------------------- 1 | function [model, ss] = bay_initlssvm(model) 2 | % Initialize the hyperparameters [$ \gamma$] and [$ \sigma^2$] before optimization with bay_optimize 3 | % 4 | % >> [gam, sig2] = bay_initlssvm({X,Y,type,[],[]}) 5 | % >> model = bay_initlssvm(model) 6 | % 7 | % 8 | % A starting value for sig2 is only given if the model has kernel type 'RBF_kernel'. 9 | % 10 | % 11 | % Full syntax 12 | % 13 | % 1. Using the functional interface: 14 | % 15 | % >> [gam, sig2] = bay_initlssvm({X,Y,type,[],[],kernel}) 16 | % 17 | % Outputs 18 | % gam : Proposed initial regularization parameter 19 | % sig2 : Proposed initial 'RBF_kernel' parameter 20 | % Inputs 21 | % X : N x d matrix with the inputs of the training data 22 | % Y : N x 1 vector with the outputs of the training data 23 | % type : 'function estimation' ('f') or 'classifier' ('c') 24 | % kernel(*) : Kernel type (by default 'RBF_kernel') 25 | % 26 | % 2. Using the object oriented interface: 27 | % 28 | % >> model = bay_initlssvm(model) 29 | % 30 | % Outputs 31 | % model : Object oriented representation of the LS-SVM model with initial hyperparameters 32 | % Inputs 33 | % model : Object oriented representation of the LS-SVM model 34 | % 35 | % See also: 36 | % bay_lssvm, bay_optimize 37 | 38 | % -disclaimer 39 | 40 | 41 | if iscell(model), 42 | iscell_model = 1; 43 | model = initlssvm(model{:}); 44 | else 45 | iscell_model = 0; 46 | end 47 | 48 | % start sig2 49 | % sig2 as the std of the fitting Gaussian 50 | if strcmp(model.kernel_type,'RBF_kernel'), 51 | model.kernel_pars = sum(range(model.xtrain))./1.96.*ones(1,model.y_dim); 52 | else 53 | warning('Only usefull for ''RBF_kernel''...'); 54 | if iscell_model, 55 | ss = model.kernel_pars; 56 | model = model.gam; 57 | end 58 | return 59 | end 60 | 61 | % set starting value 62 | if numel(model.gam)~=1, model.gam = 1; end 63 | 64 | 65 | % start gamma 66 | for i=1:10, 67 | gam(i,1) = exp(i-5); 68 | model.gam = gam(i,1); 69 | [~,~,~,bay] = bay_lssvm(model,2,'svd'); 70 | gam(i,2) = bay.Geff; 71 | end 72 | [~,index] = sort(abs(gam(:,1)-gam(:,2).*2)); 73 | model.gam = gam(index(1),1); 74 | 75 | if iscell_model, 76 | ss = model.kernel_pars; 77 | model = model.gam; 78 | end 79 | -------------------------------------------------------------------------------- /bay_optimize.m: -------------------------------------------------------------------------------- 1 | function [model,A,B,C,D] = bay_optimize(model,level, type, nb, bay) 2 | % Optimize the posterior probabilities of model (hyper-) parameters with respect to the different levels in Bayesian inference 3 | % 4 | % One can optimize on the three different inference levels: 5 | % 6 | % - First level: In the first level one optimizes the support values alpha 's and the bias b. 7 | % - Second level: In the second level one optimizes the regularization parameter gam. 8 | % - Third level: In the third level one optimizes the kernel 9 | % parameter. In the case of the common 'RBF_kernel' the kernel 10 | % parameter is the bandwidth sig2. 11 | % This routine is only tested with Matlab version 6 using the corresponding optimization toolbox. 12 | % 13 | % Full syntax 14 | % 15 | % 1. Outputs on the first level: 16 | % 17 | % >> [model, alpha, b] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 1) 18 | % >> [model, alpha, b] = bay_optimize(model, 1) 19 | % 20 | % model : Object oriented representation of the LS-SVM model optimized on the first level 21 | % alpha(*) : Support values optimized on the first level of inference 22 | % b(*) : Bias term optimized on the first level of inference 23 | % 24 | % 25 | % 2. Outputs on the second level: 26 | % 27 | % >> [model,gam] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 2) 28 | % >> [model,gam] = bay_optimize(model, 2) 29 | % 30 | % model : Object oriented representation of the LS-SVM model optimized on the second level of inference 31 | % gam(*) : Regularization parameter optimized on the second level of inference 32 | % 33 | % 34 | % 3. Outputs on the third level: 35 | % 36 | % >> [model, sig2] = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, 3) 37 | % 38 | % model : Object oriented representation of the LS-SVM model optimized on the third level of inference 39 | % sig2(*) : Kernel parameter optimized on the third level of inference 40 | % 41 | % 42 | % 4. Inputs using the functional interface 43 | % 44 | % >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level) 45 | % >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level, type) 46 | % >> model = bay_optimize({X,Y,type,gam,sig2,kernel,preprocess}, level, type, nb) 47 | % 48 | % X : N x d matrix with the inputs of the training data 49 | % Y : N x 1 vector with the outputs of the training data 50 | % type : 'function estimation' ('f') or 'classifier' ('c') 51 | % gam : Regularization parameter 52 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 53 | % kernel(*) : Kernel type (by default 'RBF_kernel') 54 | % preprocess(*) : 'preprocess'(*) or 'original' 55 | % level : 1, 2, 3 56 | % type(*) : 'eig', 'svd'(*), 'eigs', 'eign' 57 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 58 | % 59 | % 60 | % 5. Inputs using the object oriented interface 61 | % 62 | % >> model = bay_optimize(model, level) 63 | % >> model = bay_optimize(model, level, type) 64 | % >> model = bay_optimize(model, level, type, nb) 65 | % 66 | % model : Object oriented representation of the LS-SVM model 67 | % level : 1, 2, 3 68 | % type(*) : 'eig', 'svd'(*), 'eigs', 'eign' 69 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 70 | % 71 | % See also: 72 | % bay_lssvm, bay_lssvmARD, bay_modoutClass, bay_errorbar 73 | 74 | 75 | % Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 76 | 77 | vers = version; 78 | if vers(1)<'6', 79 | error(['This routine is only supported currently under MATLAB 6' ... 80 | ' and its corresponding optimization toolbox.']); 81 | end 82 | 83 | if iscell(model), model = initlssvm(model{:}); end 84 | 85 | 86 | if ~(level==1 | level==2 | level==3), 87 | error('level must be 1, 2 or 3.'); 88 | end 89 | 90 | eval('nb;','nb=model.nb_data;'); 91 | 92 | 93 | 94 | if level==1, 95 | 96 | eval('type;','type=''train'';'); 97 | eval('[F1, F2, F3, C , model] = bay_lssvm(model,1,type,nb,bay);',... 98 | '[F1, F2, F3, C , model] = bay_lssvm(model,1,type,nb);'); 99 | A = model.alpha 100 | B = model.b; 101 | 102 | elseif level==2, 103 | 104 | eval('type;','type=''svd'';'); 105 | eval('[model, A,B] = bay_optimize2(model,type,nb,bay); ',... 106 | '[model, A,B] = bay_optimize2(model,type,nb);') 107 | 108 | elseif level==3, 109 | 110 | 111 | % check fminunc 112 | resp = which('fminunc'); 113 | disp(' '); 114 | if isempty(resp), 115 | error(' ''fminunc'' not available'); 116 | end 117 | 118 | eval('type;','type=''svd'';'); 119 | 120 | %startvalues 121 | model = bay_optimize2(model,type,nb); 122 | % start value given in model: not fixed 'cause updating 123 | % of optimal parameters needs to be possible 124 | start_param = model.kernel_pars; 125 | 126 | opties=optimset('MaxFunEvals', 250, 'TolFun', .001, 'TolX', .001 ); 127 | eval('A = fminunc(@costL3, start_param, opties, model, type, nb);'); 128 | 129 | model = changelssvm(model,'kernel_pars',abs(A)); 130 | [~,B,model] = bay_lssvm(model,3, type, nb); 131 | end 132 | 133 | 134 | 135 | 136 | 137 | function [model, A,B] = bay_optimize2(model,type,nb,bay) 138 | 139 | % check fminunc 140 | resp = which('fminunc'); 141 | disp(' '); 142 | if isempty(resp), 143 | error(' ''fminunc'' not available'); 144 | end 145 | 146 | opties=optimset('TypicalX',model.kernel_pars,'MaxFunEvals', 2000,'GradObj','on','DerivativeCheck', 'off', 'TolFun', .0001, 'TolX', .0001 ); 147 | if nargin<4, 148 | [c,dc,o, bay] = bay_lssvm(model,2,type,nb); 149 | end 150 | eval('gam_opt = fminunc(@costL2, abs(model.gam), opties, model, type, nb,bay);'); 151 | model = changelssvm(model,'gam',abs(gam_opt)); 152 | [D1, D2, D3,B,model] = bay_lssvm(model,2,type, nb, bay); 153 | A = model.gam; 154 | 155 | 156 | 157 | function [cost,Dcost] = costL2(lgam, model, type, nb, bay) 158 | % 159 | model = changelssvm(model,'gam',abs(lgam+1000*eps)); 160 | [cost, Dcost] = bay_lssvm(model,2,type, nb, bay); 161 | 162 | 163 | function cost = costL3(sig2, model, type, nb) 164 | % 165 | model = changelssvm(model,'kernel_pars',abs(sig2)); 166 | cost = bay_lssvm(model,3, type, nb); 167 | disp(['sig2 = ' num2str(model.kernel_pars) ' costL3 = ' num2str(cost) ';']) 168 | -------------------------------------------------------------------------------- /bay_rr.m: -------------------------------------------------------------------------------- 1 | function [A,B,C,D,E,F,G] = bay_rr(X,Y,gam,level,nb,eigvals,eigvec) 2 | % Bayesian inference of the cost on the three levels of linear ridge regression 3 | % 4 | % >> cost = bay_rr(X, Y, gam, level) 5 | % 6 | % This function implements the cost functions related to the 7 | % Bayesian framework of linear ridge Regression [29]. Optimizing 8 | % this criteria results in optimal model parameters W,b, 9 | % hyperparameters. The criterion can also be used for model 10 | % comparison. 11 | % 12 | % The obtained model parameters w and b are optimal on the first 13 | % level w.r.t J = 0.5*w'*w+gam*0.5*sum(Y-X*w-b).^2. 14 | % 15 | % Full syntax 16 | % 17 | % * OUTPUTS on the first level: Cost proportional to the posterior of the model parameters. 18 | % 19 | % >> [costL1, Ed, Ew] = bay_rr(X, Y, gam, 1) 20 | % 21 | % costL1: Cost proportional to the posterior 22 | % Ed(*) : Cost of the fitting error term 23 | % Ew(*) : Cost of the regularization parameter 24 | % 25 | % * OUTPUTS on the second level: Cost proportional to the posterior of gam. 26 | % 27 | % >> [costL2, DcostL2, Deff, mu, ksi, eigval, eigvec] = bay_rr(X, Y, gam, 2) 28 | % 29 | % costL2 : Cost proportional to the posterior on the second level 30 | % DcostL2(*): Derivative of the cost proportional to the posterior 31 | % Deff(*) : Effective number of parameters 32 | % mu(*) : Relative importance of the fitting error term 33 | % ksi(*) : Relative importance of the regularization parameter 34 | % eigval(*) : Eigenvalues of the covariance matrix 35 | % eigvec(*) : Eigenvectors of the covariance matrix 36 | % 37 | % * OUTPUTS on the third level: The following commands can be 38 | % used to compute the level 3 cost function for different 39 | % models (e.g. models with different selected sets of 40 | % inputs). The best model can then be chosen as the model with 41 | % best level 3 cost (CostL3). 42 | % 43 | % >> [costL3, gam_optimal] = bay_rr(X, Y, gam, 3) 44 | % 45 | % costL3 : Cost proportional to the posterior on the third inference level 46 | % gam_optimal(*) : Optimal regularization parameter obtained from optimizing the second level 47 | % 48 | % * INPUTS: 49 | % 50 | % >> cost = bay_rr(X, Y, gam, level) 51 | % 52 | % X N x d matrix with the inputs of the training data 53 | % Y N x 1 vector with the outputs of the training data 54 | % gam Regularization parameter 55 | % level 1, 2, 3 56 | % 57 | % See also: 58 | % ridgeregress,bay_lssvm 59 | 60 | % -dislaimer- 61 | 62 | 63 | if ~exist('fminunc'), 64 | error('This function needs the optimization function ''fminunc''.'); 65 | end 66 | 67 | [N,d] = size(X); 68 | 69 | if (level==1 || level==2), 70 | [W,b] = ridgeregress(X,Y,gam); 71 | Ew = .5*W'*W; % Ew 72 | Ed = .5*sum((X*W+b-Y).^2); % Ed 73 | Ewgd = Ew+gam*Ed; % Ewgd 74 | A=Ewgd; C=Ed; B=Ew; 75 | 76 | if level==2, 77 | if nargin>=7, 78 | if numel(eigvals)>length(eigvals), v = diag(eigvals); else v=eigvals; end 79 | V = eigvec; 80 | else 81 | eval('[V,v] = eigs(X''*X+eye(d)*2,nb);','[V,v] = eig(X''*X+eye(d)*2);v=diag(v);');v=v-2; 82 | v = v*(N-1)/N; 83 | end 84 | Peff = find(v>1000*eps); Neff=length(Peff); 85 | v = v(Peff); V= V(:,Peff); 86 | vall = zeros(N-1,1);vall(1:Neff)=v; 87 | Deff = 1+sum(gam.*v./(1+gam.*v)); % Deff 88 | CostL2 = sum(log(vall+1./gam))+(N-1)*log(Ewgd); % CostL2 89 | DcostL2 = -sum(1./(gam+vall.*gam^2))+(N-1)*Ed/Ewgd; % DcostL2 90 | mu = 2*Ed/(N-Deff); ksi = mu*gam; % mu and ksi 91 | A=CostL2; B=DcostL2; C=Deff; D=mu; E=ksi;F=v;G=V; 92 | end 93 | 94 | elseif level==3, 95 | 96 | % check fminunc 97 | resp = which('fminunc'); 98 | %disp(' '); 99 | if isempty(resp), 100 | error(' ''fminunc'' not available'); 101 | end 102 | 103 | eval('nb;','nb=''blabla'';'); 104 | opties=optimset('MaxFunEvals', 2000,'GradObj','on', 'DerivativeCheck', 'off', 'TolFun', .0001, 'TolX', .0001, 'Display','off' ); 105 | [CostL2,DCostL2, Deff, mu,ksi,v, V] = bay_rr(X,Y,gam,2,nb); 106 | gam_opt = exp(fminunc(@costL2, log(gam), opties,X,Y,nb,v,V)); 107 | [CostL2,DCostL2, Deff, mu,ksi,v, V] = bay_rr(X,Y,gam_opt,2,nb,v,V); 108 | CostL3 = .5*length(v)*log(mu)+(N-1)*log(ksi) - log(Deff-1)-log(N-Deff) - sum(log(mu+ksi*v)); 109 | A = CostL3; B = gam_opt; 110 | 111 | else 112 | error('level should be ''1'', ''2'' or ''3''.'); 113 | end 114 | 115 | 116 | 117 | 118 | function [C,Dc] = costL2(log_gam,X,Y,nb,v,V) 119 | % 120 | [C,Dc] = bay_rr(X,Y,exp(log_gam),2,nb,v,V); 121 | -------------------------------------------------------------------------------- /bitreverse32.m: -------------------------------------------------------------------------------- 1 | function v = bitreverse32(k) 2 | % function v = bitreverse32(k) 3 | % 4 | % Reverse the bits of k. 5 | % 6 | % Input: 7 | % k a 32 bit unsigned integer, or an array of such integers, 8 | % note: the input value is automatically converted to this type 9 | % Output: 10 | % v a 32 bit unsigned integer with the bits of the input in reverse 11 | % order, e.g., the MSB is now the LSB and vica versa 12 | % 13 | % See Stanford bit hacks: http://graphics.stanford.edu/~seander/bithacks.html 14 | % 15 | % (w) 2010, Dirk Nuyens, Department of Computer Science, KULeuven, Belgium 16 | 17 | v = uint32(k); 18 | % swap odd and even bits 19 | %v = ((v >> 1) & 0x55555555) | ((v & 0x55555555) << 1); 20 | v = bitxor( bitand( bitshift(v, -1) , 1431655765 ) , ... 21 | bitshift( bitand(v, 1431655765) , 1 ) ); 22 | % swap consecutive pairs 23 | %v = ((v >> 2) & 0x33333333) | ((v & 0x33333333) << 2); 24 | v = bitxor( bitand( bitshift(v, -2), 858993459 ) , ... 25 | bitshift( bitand(v, 858993459), 2 ) ); 26 | % swap nibbles ... 27 | %v = ((v >> 4) & 0x0F0F0F0F) | ((v & 0x0F0F0F0F) << 4); 28 | v = bitxor( bitand( bitshift(v, -4), 252645135 ) , ... 29 | bitshift( bitand(v, 252645135), 4 ) ); 30 | % swap bytes 31 | %v = ((v >> 8) & 0x00FF00FF) | ((v & 0x00FF00FF) << 8); 32 | v = bitxor( bitand( bitshift(v, -8), 16711935 ) , ... 33 | bitshift( bitand(v, 16711935), 8 ) ); 34 | % swap 2-byte long pairs 35 | %v = ( v >> 16 ) | ( v << 16); 36 | v = bitxor( bitshift(v, -16) , ... 37 | bitshift(v, 16) ); -------------------------------------------------------------------------------- /changelssvm.m: -------------------------------------------------------------------------------- 1 | function model = changelssvm(model,option, value) 2 | % Change a field of the object oriented representation of the LS-SVM 3 | % 4 | % 5 | % The different options of the fields are given in following table: 6 | % 7 | % 1. General options representing the kind of model: 8 | % 9 | % type: 'classifier' ,'function estimation' 10 | % implementation: 'CMEX' ,'CFILE' ,'MATLAB' 11 | % status: Status of this model ('trained' or 'changed' ) 12 | % alpha: Support values of the trained LS-SVM model 13 | % b: Bias term of the trained LS-SVM model 14 | % duration: Number of seconds the training lasts 15 | % latent: Returning latent variables ('no' ,'yes' ) 16 | % x_delays: Number of delays of eXogeneous variables (by default 0 ) 17 | % y_delays: Number of delays of responses (by default 0 ) 18 | % steps: Number of steps to predict (by default 1 ) 19 | % gam: Regularisation parameter 20 | % kernel_type: Kernel function 21 | % kernel_pars: Extra parameters of the kernel function 22 | % 23 | % 24 | % 2. Fields used to specify the used training data: 25 | % 26 | % x_dim: Dimension of input space 27 | % y_dim: Dimension of responses 28 | % nb_data: Number of training data 29 | % xtrain: (preprocessed) inputs of training data 30 | % ytrain: (preprocessed,coded) outputs of training data 31 | % selector: Indexes of training data effectively used during training 32 | % 33 | % 34 | % 3. Options used in the Conjugate Gradient (CG) algorithm: 35 | % 36 | % cga_max_itr: Maximum number of iterations in CG 37 | % cga_eps: Stopcriterium of CG, largest allowed error 38 | % cga_fi_bound: Stopcriterium of CG, smallest allowed improvement 39 | % cga_show: Show the results of the CG algorithm (1 or 0) 40 | % cga_startvalues: Starting values of the CG algorithm 41 | % 42 | % 43 | % 4. Fields with the information for pre- and post-processing (only given if appropriate): 44 | % 45 | % preprocess: 'preprocess' or 'original' 46 | % schemed: Status of the preprocessing 47 | % ('coded' ,'original' or 'schemed' ) 48 | % pre_xscheme: Scheme used for preprocessing the input data 49 | % pre_yscheme: Scheme used for preprocessing the output data 50 | % pre_xmean: Mean of the input data 51 | % pre_xstd: Standard deviation of the input data 52 | % pre_ymean: Mean of the responses 53 | % pre_ystd: Standard deviation of the reponses 54 | % 55 | % 56 | % 5. The specifications of the used encoding (only given if appropriate): 57 | % 58 | % code: Status of the coding 59 | % ('original' ,'changed' or 'encoded') 60 | % codetype: Used function for constructing the encoding 61 | % for multiclass classification (by default 'none') 62 | % codetype_args: Arguments of the codetype function 63 | % codedist_fct: Function used to calculate to which class a 64 | % coded result belongs 65 | % codedist_args: Arguments of the codedist function 66 | % codebook2: Codebook of the new coding 67 | % codebook1: Codebook of the original coding 68 | % 69 | % Full syntax 70 | % 71 | % >> model = changelssvm(model, field, value) 72 | % 73 | % Outputs 74 | % model(*) : Obtained object oriented representation of the LS-SVM model 75 | % Inputs 76 | % model : Original object oriented representation of the LS-SVM model 77 | % field : Field of the model one wants to change (e.g. 'preprocess') 78 | % value : New value of the field of the model one wants to change 79 | % 80 | % See also: 81 | % trainlssvm, initlssvm, simlssvm, plotlssvm. 82 | 83 | % Copyright (c) 2010, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 84 | 85 | 86 | % 87 | % alias sigma^2 88 | % 89 | if (strcmpi(option,'sig2')) option = 'kernel_pars'; end 90 | 91 | % 92 | % selector -> nb_data 93 | % nb_data -> selector 94 | % 95 | if strcmp(option,'selector'), 96 | model.nb_data = length(value); 97 | end 98 | if strcmp(option,'nb_data'), 99 | model.selector = 1:value; 100 | end 101 | 102 | % 103 | % xtrain 104 | % 105 | if strcmp(option,'xtrain'), 106 | [nb,model.x_dim] = size(value); 107 | model.nb_data = nb;%min(nb,model.nb_data); 108 | model.selector = 1:model.nb_data; 109 | if length(model.gam)>model.y_dim & length(model.gam)~=size(value,1), 110 | warning('Discarting different gamma''s...'); 111 | model.gam = max(model.gam); 112 | end 113 | eval('value=prelssvm(model,value);',... 114 | 'warning(''new trainings inputdata not comform with used preprocessing'');'); 115 | end 116 | 117 | 118 | % 119 | % ytrain 120 | % 121 | if strcmp(option,'ytrain'), 122 | if size(value,2)~=size(model.ytrain,2), 123 | model.y_dim = size(value,2); 124 | end 125 | eval('value = codelssvm(model,[],value);',... 126 | 'warning(''new trainings outputdata not comform with used encoding;'');'); 127 | eval('[ff,value] = prelssvm(model,[],value);',... 128 | 'warning(''new trainings outputdata not comform with used preprocessing;'');'); 129 | [nb,model.y_dim] = size(value); 130 | model.nb_data = min(nb,model.nb_data); 131 | model.selector = 1:model.nb_data; 132 | end 133 | 134 | 135 | 136 | % 137 | % switch between preprocessing - original data 138 | % model.prestatus = {'changed','ok'} 139 | % 140 | if (strcmpi(option,'preprocess')) & model.preprocess(1)~=value(1), 141 | model.prestatus = 'changed'; 142 | end 143 | 144 | 145 | 146 | 147 | % 148 | % change coding 149 | % 150 | if strcmpi(option,'codetype') | strcmpi(option,'codebook2') | ... 151 | strcmpi(option, 'codeargs') | strcmpi(option, 'codedistfct'), 152 | model.code = 'changed'; 153 | elseif strcmpi(option,'codebook1'), 154 | warning('change original format of the classifier; the toolbox will be unable to return results in the original format'); 155 | end 156 | 157 | 158 | % 159 | % final change 160 | % 161 | eval(['old_value = model.' lower(option) ';'],'old_value=[];'); 162 | eval(['model.' lower(option) '=value;']); 163 | 164 | if (isempty(value) | isempty(old_value)), 165 | different = 1; 166 | else 167 | eval('different = any(old_value~=value);','different=1;'); 168 | end 169 | 170 | if different & ~strcmpi(option,'implementation'), 171 | model.status = 'changed'; 172 | end 173 | 174 | -------------------------------------------------------------------------------- /cilssvm.m: -------------------------------------------------------------------------------- 1 | function ci = cilssvm(model,alpha,conftype) 2 | % 3 | % Construction of bias corrected 100(1-\alpha)% pointwise or 4 | % simultaneous confidence intervals 5 | % 6 | % >> ci = cilssvm({X,Y,type,gam,kernel_par,kernel,preprocess},alpha,conftype) 7 | % >> ci = cilssvm(model,alpha,conftype) 8 | % 9 | % This function calculates bias corrected 100(1-\alpha)% pointwise or 10 | % simultaneous confidence intervals. The procedure support homoscedastic 11 | % data sets as well heteroscedastic data sets. The construction of the 12 | % confidence intervals are based on the central limit theorem for linear 13 | % smoothers combined with bias correction and variance estimation. 14 | % 15 | % 1. Using the functional interface: 16 | % 17 | % 18 | % >> ci = cilssvm({X,Y,type,gam,kernel_par,kernel,preprocess}) 19 | % >> ci = cilssvm({X,Y,type,gam,kernel_par,kernel,preprocess}, alpha) 20 | % >> ci = cilssvm({X,Y,type,gam,kernel_par,kernel,preprocess}, alpha, conftype) 21 | % 22 | % 23 | % Outputs 24 | % ci : N x 2 matrix containing the lower and upper confidence intervals 25 | % 26 | % Inputs 27 | % X : N x d matrix with the inputs of the training data 28 | % Y : N x 1 vector with the outputs of the training data 29 | % type : 'function estimation' ('f') or 'classifier' ('c') 30 | % gam : Regularization parameter 31 | % sig2 : Kernel parameter(s) (bandwidth in the case of the 'RBF_kernel') 32 | % kernel(*) : Kernel type (by default 'RBF_kernel') 33 | % preprocess(*) : 'preprocess'(*) or 'original' 34 | % alpha(*) : Significance level (by default 5%) 35 | % conftype(*) : Type of confidence interval 'pointwise' or 'simultaneous' (by default 'simultaneous') 36 | % 37 | % 2. Using the object oriented interface: 38 | % 39 | % 40 | % >> ci = cilssvm(model) 41 | % >> ci = cilssvm(model, alpha) 42 | % >> ci = cilssvm(model, alpha, conftype) 43 | % 44 | % 45 | % Outputs 46 | % ci : N x 2 matrix containing the lower and upper confidence intervals 47 | % 48 | % Inputs 49 | % model : Object oriented representation of the LS-SVM model 50 | % alpha : Significance level (by default 5%) 51 | % conftype : Type of confidence interval 'pointwise' or 'simultaneous' (by default 'simultaneous') 52 | % 53 | % 54 | % See also: 55 | % trainlssvm, simlssvm, predlssvm 56 | 57 | 58 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 59 | 60 | if iscell(model) 61 | model = initlssvm(model{:}); 62 | end 63 | 64 | if nargin <= 1 65 | alpha = 0.05; 66 | conftype = 'simul'; 67 | 68 | elseif nargin <= 2 69 | conftype = 'simul'; 70 | end 71 | 72 | if model.preprocess(1)=='p' 73 | error('Please use original data to compute confidence intervals...') 74 | end 75 | 76 | x = model.xtrain; y = model.ytrain; 77 | 78 | % train model 79 | if isempty(model.gam) && isempty(model.kernel.pars) 80 | error('Please tune model first with ''tunelssvm'' to obtain tuning parameters'); 81 | end 82 | model = trainlssvm(model); 83 | 84 | s = smootherlssvm(model); 85 | Yhat = simlssvm(model,x); 86 | 87 | % bias: double smoothing with fourt order kernel RBF4 88 | modelb = initlssvm(x,y,'f',[],[],'RBF4_kernel','o'); 89 | modelb = tunelssvm(modelb,'simplex','crossvalidatelssvm',{10,'mse'}); 90 | modelb = trainlssvm(modelb); 91 | 92 | biascorr = (s-eye(size(x,1)))*simlssvm(modelb,x); 93 | 94 | % construct approximate 100(1-alpha)% confidence interval 95 | %1) estimate variance nonparametrically 96 | sigma2 = varest(model); 97 | 98 | %2) calculate var-cov matrix 99 | s = s*diag(sigma2)*s'; 100 | 101 | %2b) find standardized absolute maxbias 102 | delta = max(abs(biascorr./sqrt(diag(s)))); 103 | 104 | %3) pointwise or simultaneous? 105 | if conftype(1)=='s' 106 | z = tbform(model,alpha) + delta; 107 | elseif conftype(1)=='p' 108 | z = norminv(alpha/2); 109 | Yhat = Yhat - biascorr; 110 | else 111 | error('Wrong type of confidence interval. Please choose ''pointwise'' or ''simultaneous'''); 112 | end 113 | 114 | ci = [Yhat+z*sqrt(diag(s)) Yhat-z*sqrt(diag(s))]; 115 | 116 | function [var,modele] = varest(model) 117 | 118 | % if preprocessed data, construct original data 119 | if model.preprocess(1)=='p' 120 | [x,y] = postlssvm(model,model.xtrain,model.ytrain); 121 | else 122 | x = model.xtrain; y = model.ytrain; 123 | end 124 | 125 | model = trainlssvm(model); 126 | 127 | Yh = simlssvm(model,x); 128 | 129 | % Squared normalized residuals 130 | e2 = (y-Yh).^2; 131 | 132 | % Make variance model 133 | if model.nb_data <= 200 134 | costfun = 'leaveoneoutlssvm'; costargs = {'mae'}; 135 | else 136 | costfun = 'crossvalidatelssvm'; costargs = {10,'mae'}; 137 | end 138 | modele = initlssvm(x,e2,'f',[],[],'RBF_kernel'); 139 | modele = tunelssvm(modele,'simplex',costfun,costargs); 140 | modele = trainlssvm(modele); 141 | 142 | % variance model 143 | var = max(simlssvm(modele,x),0); 144 | 145 | % make estimate of var unbiased in homoscedastic case if regression 146 | % estimate is unbiased 147 | L = smootherlssvm(model); 148 | S = smootherlssvm(modele); 149 | 150 | var = var./(ones(size(x,1),1)+S*diag(L*L'-L-L')); -------------------------------------------------------------------------------- /code.m: -------------------------------------------------------------------------------- 1 | function [nsignals, codebook, oldcodebook, scheme] = code(signals,codetype,codetype_args,oldcodebook,fctdist,fctdist_args) 2 | % Encode and decode a multi-class classification task into multiple binary classifiers 3 | % 4 | % >> Yc = code(Y, codebook) 5 | % 6 | % The coding is defined by the codebook. The codebook is 7 | % represented by a matrix where the columns represent all different 8 | % classes and the rows indicate the result of the binary 9 | % classifiers. An example is given: the 3 classes with original 10 | % labels [1 2 3] can be encoded in the following codebook (using Minimal Output Encoding): 11 | % 12 | % >> codebook 13 | % = [-1 -1 1; 14 | % 1 -1 1] 15 | % 16 | % For this codebook, a member of the first class is found if the 17 | % first binary classifier is negative and the second classifier is 18 | % positive. A don't care is represented by eps. By default it is 19 | % assumed that the original classes are represented as different 20 | % numerical labels. One can overrule this by passing the 21 | % old_codebook which contains information about the old representation. 22 | % 23 | % Different encoding schemes are available: 24 | % 25 | % 1. Minimum Output Coding (code_MOC) 26 | % 2. Error Correcting Output Code (code_ECOC) 27 | % This coding scheme uses redundant bits. 28 | % 3. One versus All Coding (code_OneVsAll) 29 | % 4. One Versus One Coding (code_OneVsOns) 30 | % 31 | % Different decoding schemes are implemented: 32 | % 33 | % 1. Hamming Distance (codedist_hamming) 34 | % 2. Bayesian Distance Measure (codedist_bay) 35 | % 36 | % 37 | % Full syntax 38 | % 39 | % 1. For encoding: 40 | % 41 | % >> [Yc, codebook, old_codebook] = code(Y, codefct) 42 | % >> [Yc, codebook, old_codebook] = code(Y, codefct, codefct_args) 43 | % >> Yc = code(Y, given_codebook) 44 | % 45 | % Outputs 46 | % Yc : N x nbits encoded output classifier 47 | % codebook(*) : nbits*nc matrix representing the used encoding 48 | % old_codebook(*) : d*nc matrix representing the original encoding 49 | % Inputs 50 | % Y : N x d matrix representing the original classifier 51 | % codefct(*) : Function to generate a new codebook (e.g. code_MOC) 52 | % codefct_args(*) : Extra arguments for codefct 53 | % given_codebook(*): nbits*nc matrix representing the encoding to use 54 | % 55 | % 2. For decoding: 56 | % 57 | % >> Yd = code(Yc, codebook,[], old_codebook) 58 | % >> Yd = code(Yc, codebook,[], old_codebook, codedist_fct) 59 | % >> Yd = code(Yc, codebook,[], old_codebook, codedist_fct, codedist_args) 60 | % 61 | % Outputs 62 | % Yd : N x nc decoded output classifier 63 | % Inputs 64 | % Y : N x d matrix representing the original classifier 65 | % codebook : d*nc matrix representing the original encoding 66 | % old_codebook : bits*nc matrix representing the encoding of the given classifier 67 | % codedist_fct : Function to calculate the distance between to encoded classifiers (e.g. codedist_hamming) 68 | % codedist_args(*) : Extra arguments of codedist_fct 69 | % 70 | % 71 | % see also 72 | % code_ECOC, code_MOC, code_OneVsAll, code_OneVsOne, codedist_hamming 73 | 74 | % Copyright (c) 2010, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 75 | 76 | 77 | 78 | % 79 | % default handling; 80 | % 81 | eval('fctdist(1,1);','fctdist = ''codedist_hamming'';'); 82 | eval('if isempty(oldcodebook),ss = sort(signals(:,1)); oldcodebook = ss([1;find(ss(2:end)~=ss(1:end-1))+1])'';end ',... 83 | 'ss = sort(signals(:,1)); oldcodebook = ss([1;find(ss(2:end)~=ss(1:end-1))+1])'';'); 84 | 85 | n = size(signals,1); 86 | mc = size(oldcodebook,2); 87 | 88 | 89 | % codebook or codetype 90 | % initialise the new scheme used for preprocessing 91 | % Binary,Ctu,cAtegorical,Original 92 | if isstr(codetype), 93 | eval('[codebook,scheme] = feval(codetype, mc, codetype_args{:});',... 94 | '[codebook,scheme] = feval(codetype, mc);'); 95 | else 96 | codebook = codetype; 97 | scheme=[]; for t=1:size(codebook,2),scheme=[scheme 'b']; end 98 | end 99 | 100 | 101 | % 102 | % convert from old coding towards new coding 103 | % 104 | if nargin==6, 105 | dist = feval(fctdist, signals, oldcodebook,fctdist_args{:}); 106 | else 107 | dist = feval(fctdist, signals, oldcodebook); 108 | end 109 | 110 | for t = 1:n, 111 | [m,mi] = min(dist(t,:)); 112 | m2 = min(dist(t,[1:(mi-1) (mi+1):end])); 113 | if m==m2, 114 | nsignals(t,:) = -inf+codebook(:,mi)'; 115 | else 116 | nsignals(t,:) = codebook(:,mi)'; 117 | end 118 | 119 | end 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | -------------------------------------------------------------------------------- /code_ECOC.m: -------------------------------------------------------------------------------- 1 | function [codebook,scheme] = code_ECOC(m,dist,distfct) 2 | % Generate the codebook for multiclass classification with Error Correcting Output encoding if feasible. 3 | % 4 | % function coding the multiple classes of this classification 5 | % model, using the Error Correcting Output Coding; 6 | % 7 | % codebook = code_ECOC(m) 8 | % codebook = code_ECOC(m, dist) 9 | % codebook = code_ECOC(m, dist, distfct) 10 | % 11 | % a codebook is found such that the minimal distances 12 | % between the 'm' different classes is larger than 'dist' according 13 | % to the distance measure of 'distfct'. The default is 'dist' 2 14 | % for the 'codedist_hamming' distance. Besides the minimal distance 15 | % between class representations, similar binary classifiers are 16 | % also avoided as these do not add reliability in the context of 17 | % deterministic binary classifiers. 18 | % 19 | % A recursive backtracking implementation looks for a 20 | % representation which fullfills the constraint. It can decide 21 | % exhaustively if such a representation is feasable. This can take 22 | % lots of memory and time when 'm' becomes large (>50). 23 | % 24 | % 25 | % see also: 26 | % code, code_OneVsOne, code_OneVsAll, code_MOC, codedist_hamming 27 | 28 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 29 | 30 | % default 31 | eval('distfct;','distfct=''codedist_hamming'';'); 32 | eval('dist;','dist=2'';'); 33 | nb = ceil(log2(m*dist)); 34 | codebook =[]; 35 | 36 | 37 | candidates = eps.*ones(nb,1); 38 | while isempty(codebook), 39 | disp(['number of bits ' num2str(nb)]); 40 | if nb>2^(m-1), error('No such code feasable'); end 41 | [codebook,sc] = create_code(candidates, m, dist, distfct,[]); 42 | if isempty(codebook), 43 | nb=nb+1; 44 | candidates = eps.*ones(nb,1); 45 | else 46 | hd=inf; 47 | hdM = 0; 48 | for t1=1:size(codebook,1), for t2=(t1+1):size(codebook,1), 49 | hd = min(hd,feval(distfct,codebook(t1,:), codebook(t2,:))); 50 | hdM = max(hdM,feval(distfct,codebook(t1,:), codebook(t2,:))); 51 | end; end 52 | 53 | if hd==0|hdM==size(codebook,2), 54 | candidates = sc; 55 | codebook=[]; disp('retry'); 56 | end 57 | end 58 | end 59 | 60 | % 61 | % output format, where 'b' stands for binary discriminator 62 | % see also 'code' and 'codelssvm' 63 | scheme = []; for i=1:nb, scheme = [scheme 'b']; end 64 | 65 | 66 | 67 | 68 | function [code,shrunkcandidate,rc] = create_code(candidates, m, dist, distfct,foundcand) 69 | % 70 | % recursive called function 71 | % 72 | 73 | % base case 74 | if isempty(candidates), code=[]; shrunkcandidate=[]; rc=0; return; end 75 | 76 | 77 | % pick a candidate 78 | [nb,nc] = size(candidates); 79 | rc=ceil(rand*nc); 80 | acode = candidates(:,rc); 81 | 82 | 83 | % initate this candidate 84 | % and remove from the candidate list 85 | acode = (acode~=eps).*acode; 86 | aicode = acode +(acode==0).*sign(rand(nb,1)-.5); 87 | if sum(acode==0)==0, 88 | candidates = candidates(:,[1:(rc-1) (rc+1):nc]); 89 | else 90 | while(acode==aicode), 91 | aicode = acode + (acode==0).*sign(rand(nb,1)); 92 | end 93 | end 94 | aicode = aicode+(aicode==0).*eps; 95 | acode = acode+(acode==0).*eps; 96 | 97 | candidates = shrink(candidates, aicode, dist, distfct); 98 | shrunkcandidate = shrink(acode, aicode, dist, distfct); 99 | 100 | % recursion 101 | if m-1>0, 102 | shrunkc = candidates; 103 | 104 | fprintf('R;'); 105 | [newcode,shrunkcandidate2,cc] = create_code(candidates,m-1, dist, distfct,[foundcand aicode]); 106 | fprintf('O;'); 107 | while isempty(newcode), 108 | if isempty(find(shrunkcandidate2)), code=[]; return; end 109 | disp('retry with left candidates'); 110 | shrunkc = [shrunkc(:,1:(cc-1)) shrunkcandidate2 shrunkc(:,(cc+1):end)]; 111 | [newcode,shrunkcandidate2,cc] = create_code(shrunkc, m, dist, distfct,foundcand); 112 | end 113 | code = [aicode newcode]; 114 | else 115 | code = aicode; 116 | end 117 | 118 | shrunkcandidate = candidates; 119 | 120 | 121 | 122 | function shrunkcandidates = shrinkr(candidates, aicode, dist, distfct) 123 | % refine candidates according to dist 124 | % and shrink list of candidates 125 | % 126 | % recursive algorithm: TAKE CARE many recursions needed 127 | 128 | fprintf('r'); 129 | % end of recursion 130 | if isempty(candidates),shrunkcandidates=[]; return; end 131 | if size(candidates,2)==1 &sum(candidates==eps)==0,shrunkcandidates=[]; return; end 132 | 133 | % recursive step 134 | cand = candidates(:,1); 135 | if feval(distfct, aicode', cand)> codebook = code_MOC(m) 5 | % 6 | % see also: 7 | % code 8 | 9 | % Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 10 | 11 | nb = ceil(log2(m)); 12 | codebook = -ones(nb,m); 13 | for i=1:m, 14 | code = str2num(num2str(dec2bin(i-1)')).*2-1; 15 | codebook((nb-length(code)+1):nb,i) = code; 16 | end 17 | 18 | % output forat, where 'b' stands for binary discriminator 19 | scheme = []; for i=1:nb, scheme = [scheme 'b']; end -------------------------------------------------------------------------------- /code_OneVsAll.m: -------------------------------------------------------------------------------- 1 | function [codebook,scheme] = code_OneVsAll(m) 2 | % Generate the codebook for multiclass classification with One-Versus-All encoding. 3 | % 4 | % codebook = code_OneVsAll(m) 5 | % 6 | % see also: 7 | % code, codedist_hamming 8 | 9 | % (c) SCD-KULeuven, rights & help @ http://www.esat.kuleuven.be/sista/lssvmlab 10 | 11 | 12 | codebook = eye(m).*2-1; 13 | scheme = []; for i=1:m, scheme = [scheme 'b']; end -------------------------------------------------------------------------------- /code_OneVsOne.m: -------------------------------------------------------------------------------- 1 | function [codebook,scheme] = code_OneVsOne(m) 2 | % Generate the codebook for multiclass classification with One-Versus-One encoding. 3 | % 4 | % codebook = code_OneVsOne(m) 5 | % 6 | % see also: 7 | % codelssvm 8 | 9 | % Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 10 | 11 | 12 | nb = m*(m-1)/2; 13 | codebook = NaN*zeros(nb,m); 14 | t=1; 15 | for i=1:m-1, 16 | for j=i+1:m, 17 | codebook(t,i) = 1; 18 | codebook(t,j) = -1; 19 | t=t+1; 20 | end 21 | end 22 | 23 | % output format, where 'b' stands for binary discriminator 24 | scheme = []; for i=1:nb, scheme = [scheme 'b']; end -------------------------------------------------------------------------------- /codedist_bay.m: -------------------------------------------------------------------------------- 1 | function dist = codedist_bay(C1,C2,Py) 2 | % compute the distance using a Bayesian metric between 'C1' and 'C2' 3 | % 4 | % dist = codedist_bay(C1,C2,{Py}) 5 | % 6 | % 'C1' contains the result code, 7 | % 'C2' contains the codebooks prototype. '0' or an infine small number 'eps' represents the don't care 8 | % 'Py' is a matrix containing the moderated outputs of the binary classifiers 9 | % 10 | % 11 | % An example: 12 | % >> Ye = [1 1; 1 1]; 13 | % >> codebook = [1 2 3]; 14 | % >> old_codebook = [1 1 -1; 1 -1 -1]; 15 | % >> Py = [.9 -.1; -.1 .13]; 16 | % >> code(Ye, codebook, [],old_codebook,'codedist_bay',{Py}) 17 | % in this call, 'Ye' is not used explicitly. 18 | % 19 | % To use this distance measure in LS-SVMlab, the following 20 | % procedure is to be followed, assume input data 'X' and multiclass 21 | % output 'Y': 22 | % 23 | % >> [Ycode,codebook,old_codebook] = code(Y,'code_MOC'); 24 | % >> [alpha,b] = trainlssvm({X,Yc,'c',gam,sig2}); 25 | % >> Yhc = simlssvm({X,Yc,'c',gam,sig2},{alpha,b},Xt); 26 | % 27 | % The moderated output for the LS-SVM can be computed using the bayesian inference 28 | % framework for the LS-SVM: 29 | % >> Ymod = bay_modoutClass(model,Xt); 30 | % >> Yh = code(Yhc,old_codebook,[],codebook,'codedist_bay',{Ymod}); 31 | % 32 | % see also: 33 | % bay_modoutClass, codedist_hamming, code_ECOC 34 | 35 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 36 | 37 | % % encode for training 38 | % >> model = initlssvm(X,Y,'classification',gam,sig2,'preprocess','RBF_kernel'); 39 | % >> model = changelssvm(model,'codetype','code_MOC'); 40 | % >> model = changelssvm(model,'codedist_fct','codedist_hamming'); 41 | % >> model = trainlssvm(model); 42 | % 43 | % % decode for simulating 44 | % >> model = changelssvm(model,'codedist_fct','codedist_bay'); 45 | % >> model = changelssvm(model,'codedist_args',{bay_modoutClass(model,Xt)}); 46 | % >> Yt = simlssvm(model,Xt); 47 | 48 | 49 | if nargin<3, 50 | error(['moderated output needed as function arguments' ... 51 | '(for LS-SVM, model.codedist_args =' ... 52 | ' bay_modoutClass(model,X)).']); 53 | end 54 | 55 | 56 | [nb,nbin] = size(Py); 57 | [~,dim] = size(C2); 58 | dist = zeros(nb,dim); 59 | 60 | for d = 1:dim, 61 | for n= 1:nb, 62 | dist(n,d) = sum((1-Py(n,:).*C2(:,d)'))-sum(C2(:,d)==eps); 63 | end 64 | end 65 | 66 | 67 | -------------------------------------------------------------------------------- /codedist_hamming.m: -------------------------------------------------------------------------------- 1 | function dist = codedist_hamming(C1,C2) 2 | % Compute the hamming distance between rows of 'C1' and columns of 'C2' 3 | % 4 | % >> distance = codedist_hamming(encoded_data, codebook); 5 | % 6 | % 'encoded_data' contains the resulting codeword per row, n rows are possible 7 | % 'codebook' contains the codebooks prototype per class as columns. 8 | % an infinitesimal number 'eps' represents the don't care 9 | % 10 | % see also: 11 | % code, codelssvm, code_MOC 12 | 13 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 14 | 15 | [nb,nbin] = size(C1); 16 | [nbin,dim] = size(C2); 17 | dist = zeros(nb,dim); 18 | for d = 1:dim, 19 | for n= 1:nb, 20 | dist(n,d) = nbin-sum(C1(n,:)==C2(:,d)' | C1(n,:) < -10000 | C1(n,:) ==eps | C1(n,:) > 10000 | C2(:,dim)'==eps); 21 | end 22 | end -------------------------------------------------------------------------------- /codedist_loss.m: -------------------------------------------------------------------------------- 1 | function dist = codedist_loss(C1,C2,Ylat,loss) 2 | % compute the distance using a loss function metric between 'C1' and 'C2' 3 | % 4 | % dist = codedist_loss(C1,C2,Ylatent) 5 | % dist = codedist_loss(C1,C2,Ylatent, loss_fct) 6 | % 7 | % 'C1' contains the result code, 8 | % 'C2' contains the codebooks prototype. An infine small number 'eps' represents the don't care 9 | % 'loss_fct' is the loss function used in order to compute the loss 10 | % between 2 codewords. By default the sum of squares is used. 11 | % One can do 'winner-takes-all' decoding by using the 12 | % loss function 'max'. 13 | % 14 | % An example: 15 | % >> Ye = [1 1; 1 1; -1 -1; 1 -1]; 16 | % >> codebook = [1 2 3]; 17 | % >> old_codebook = [1 1 -1; 1 -1 -1]; 18 | % >> code(Ye, codebook, [],old_codebook,'codedist_loss',{'Ylatent','mse'}) 19 | % 20 | % To use this distance measure in LS-SVMlab, the following 21 | % procedure is to be followed, assume input data 'X' and multiclass 22 | % output 'Y' 23 | % 24 | % % encode for training 25 | % >> model = initlssvm(X,Y,'classification',gam,sig2,'preprocess','RBF_kernel'); 26 | % >> model = changelssvm(model,'codetype','code_OneVsOne'); 27 | % >> model = trainlssvm(model); 28 | % 29 | % % decode for simulating 30 | % >> [Yhamming, Ylatent] = simlssvm(model,Xt); 31 | % >> model = changelssvm(model,'codedist_fct','codedist_loss'); 32 | % >> model = changelssvm(model,'codedist_args',{Ylatent,'sse'}); 33 | % >> Yt = simlssvm(model,Xt); 34 | % 35 | % see also: 36 | % bay_modoutClass, codedist_hamming, code_ECOC 37 | 38 | % (c) SCD-KULeuven, rights & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 39 | 40 | 41 | if nargin<3, 42 | warning('The latent variables should be used, proceeding with the binary classifiers...'); 43 | Ylat = C1; 44 | end 45 | eval('loss;','loss=''sse'';'); 46 | 47 | [nb,nbin] = size(Ylat); 48 | [~,dim] = size(C2); 49 | dist = zeros(nb,dim); 50 | 51 | for d = 1:dim, 52 | for n= 1:nb, 53 | nondontcare = find(Ylat(n,:)~=eps & C2(:,d)'~=eps); 54 | dist(n,d) = feval(loss, Ylat(n,nondontcare),C2(nondontcare,d)'); 55 | end 56 | end 57 | dist 58 | 59 | 60 | function l = sse(X,Y) 61 | l = sum(sum((X-Y).^2)); 62 | 63 | function l = winnertakesall(X,Y) 64 | p = find(Y>0); 65 | l = max(X(P)); -------------------------------------------------------------------------------- /codelssvm.m: -------------------------------------------------------------------------------- 1 | function model = codelssvm(model,Yt,Ytt) 2 | % This function is only for intern LS-SVMlab use. For extern coding of the classes, use the functions 'code'. 3 | % 4 | % Function to decode & encode the responses of the classification model 5 | % before applying the LS-SVM. 6 | % 7 | % Firstly, the appropriate functions has to be set: 8 | % >> model = changelssvm(model,'codetype','code_MOC'); 9 | % >> model = changelssvm(model,'codedist_fct','codedist_bay'); 10 | % The corresponding encoding is invoked by 11 | % >> model = codelssvm(model) 12 | % 13 | % The 2nd argument is decoded, 14 | % 15 | % Y = codelssvm(model,Y) 16 | % 17 | % The 3th argument is encoded, 18 | % 19 | % Y = codelssvm(model,[],Ytt)' 20 | % 21 | % By default, a one dimensional categorical coding of the 22 | % (multiclass) labels is assumed. 23 | % 24 | % ENCODE OPTIONS in the model: 25 | % codetype: used coding for multiclass classification; 26 | % code: status of the coding {'original','encoded'}; 27 | % codetype: used coding for multiclass classification or 'none'; 28 | % codedist_fct: function used to calculate to which class a 29 | % coded result belongs; 30 | % codetype_args: arguments of the codetype function; 31 | % codedist_args: arguments of the codedist function; 32 | % codebook2: codebook of the new coding 33 | % codebook1: codebook of the original coding 34 | % 35 | % see also: 36 | % code, trainlssvm, simlssvm, code_OneVsAll, code_OneVsOne, 37 | % code_cat, codedist_hamming 38 | 39 | % (c) SCD-KULeuven, rights & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 40 | 41 | 42 | 43 | % default args 44 | eval('model.codedist_args;','model.codedist_args = {};'); 45 | eval('model.codetype_args;','model.codetype_args = {};'); 46 | eval('model.codedist_fct;','model.codedist_fct = ''codedist_hamming'';'); 47 | 48 | 49 | 50 | % 51 | % encode model 52 | % a new rescaling is looked up in case of preprocessing 53 | % 54 | % 55 | if nargin==1, 56 | % CLASSIFICATION % 57 | if model.type(1)=='c' && model.code(1)~='o', % not 'original' 58 | 59 | if model.code(1)=='c', % 'changed' 60 | 61 | % check dimension 62 | eval('if model.y_dim~=size(model.codebook2,1), warning(''Y different dimension than original code;''); end',... 63 | 'if model.y_dim~=1, warning(''Uncoded Y needs to be dimension 1;''); end'); 64 | 65 | % back to original coding 66 | eval('ys = code(model.ytrain, model.codebook1,{}, model.codebook2, model.codedist_fct,model.codedist_args{:});','ys = model.ytrain;'); 67 | if ~strcmpi(model.codetype,'none'), 68 | % convert into new coding 69 | eval(['[ys, model.codebook2, model.codebook1,pre_yscheme] = ' ... 70 | 'code(ys, model.codetype, model.codetype_args, model.codebook1, model.codedist_fct, model.codedist_args{:});'], ... 71 | ['[ys, model.codebook2, model.codebook1,pre_yscheme] = ' ... 72 | 'code(ys, model.codetype, model.codetype_args, [],model.codedist_fct,model.codedist_args{:});']); 73 | end 74 | 75 | % postprocess - set code - preprocess 76 | prepro = model.preprocess; model = postlssvm(model); 77 | model.pre_yscheme = pre_yscheme; 78 | model.ytrain = ys; 79 | model.y_dim = size(ys,2); 80 | model.code = 'encoded'; 81 | model = changelssvm(model,'preprocess',prepro); model = prelssvm(model); 82 | end; 83 | end 84 | 85 | 86 | % 87 | % decode signal 88 | % 89 | elseif nargin==2, 90 | 91 | % CLASSIFICATION % 92 | if model.type(1)=='c' & model.code(1)~='o', % not 'original' 93 | 94 | eval('model.codebook1; model.codebook2;','model = trainlssvm(model);'); 95 | if ~strcmpi(model.codetype,'none'), 96 | eval('model = code(Yt,model.codebook1, {}, model.codebook2, model.codedist_fct, model.codedist_args);',... 97 | 'model = code(Yt,model.codebook1, {}, model.codebook2);'); 98 | else 99 | model=Yt; 100 | end 101 | else 102 | model=Yt; 103 | end 104 | 105 | 106 | % 107 | % encode signal 108 | % 109 | elseif nargin==3, 110 | 111 | % CLASSIFICATION % 112 | if model.type(1)=='c' & model.code(1)~='o', % not 'original' 113 | 114 | eval('model.codebook1; model.codebook2;','model = trainlssvm(model);'); 115 | if ~strcmpi(model.codetype,'none'), 116 | eval('model = code(Ytt,model.codebook2, {}, model.codebook1, model.codedist_fct, model.codedist_args);',... 117 | 'model = code(Ytt,model.codebook2, {}, model.codebook1);'); 118 | else 119 | model=Ytt; 120 | end 121 | else 122 | model=Ytt; 123 | end 124 | 125 | end 126 | 127 | 128 | -------------------------------------------------------------------------------- /crossvalidate.m: -------------------------------------------------------------------------------- 1 | function [cost,costs] = crossvalidate(model, L, estfct,combinefct) 2 | 3 | % Estimate the model performance of a model with [$ l$] -fold crossvalidation 4 | % 5 | % CAUTION!! Use this function only to obtain the value of the crossvalidation score 6 | % function given the tuning parameters. Do not use this function together with 7 | % 'tunelssvm', but use 'crossvalidatelssvm' instead. The latter is a faster 8 | % implementation which uses previously computed results. 9 | % 10 | % >> cost = crossvalidate({Xtrain,Ytrain,type,gam,sig2}) 11 | % >> cost = crossvalidate( model) 12 | % 13 | % The data is once permutated randomly, then it is divided into L (by default 10) 14 | % disjoint sets. In the i-th (i=1,...,l) iteration, the i-th set is used to estimate 15 | % the performance ('validation set') of the model trained on the other l-1 sets ('training set'). 16 | % Finally, the l (denoted by L) different estimates of the performance are combined (by default by the 'mean'). 17 | % The assumption is made that the input data are distributed independent and identically over the 18 | % input space. As additional output, the costs in the different folds ('costs') of the data are returned: 19 | % 20 | % >> [cost, costs] = crossvalidate(model) 21 | % 22 | % Some commonly used criteria are: 23 | % 24 | % >> cost = crossvalidate(model, 10, 'misclass', 'mean') 25 | % >> cost = crossvalidate(model, 10, 'mse', 'mean') 26 | % >> cost = crossvalidate(model, 10, 'mae', 'median') 27 | % 28 | % Full syntax 29 | % 30 | % 1. Using LS-SVMlab with the functional interface: 31 | % 32 | % >> [cost, costs] = crossvalidate({X,Y,type,gam,sig2,kernel,preprocess}, L, estfct, combinefct) 33 | % 34 | % Outputs 35 | % cost : Cost estimation of the L-fold cross validation 36 | % costs(*) : L x 1 vector with costs estimated on the L different folds 37 | % 38 | % Inputs 39 | % X : Training input data used for defining the LS-SVM and the preprocessing 40 | % Y : Training output data used for defining the LS-SVM and the preprocessing 41 | % type : 'function estimation' ('f') or 'classifier' ('c') 42 | % gam : Regularization parameter 43 | % sig2 : Kernel parameter (squared bandwidth in the case of the 'RBF_kernel') 44 | % kernel(*) : Kernel type (by default 'RBF_kernel') 45 | % preprocess(*) : 'preprocess'(*) or 'original' 46 | % L(*) : Number of folds (by default 10) 47 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 48 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 49 | % 50 | % 51 | % 2. Using the object oriented interface: 52 | % 53 | % >> [cost, costs] = crossvalidate(model, L, estfct, combinefct) 54 | % 55 | % Outputs 56 | % cost : Cost estimation of the L-fold cross validation 57 | % costs(*) : L x 1 vector with costs estimated on the L different folds 58 | % 59 | % Inputs 60 | % model : Object oriented representation of the LS-SVM model 61 | % L(*) : Number of folds (by default 10) 62 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 63 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 64 | % 65 | % 66 | % 3. Using other modeling techniques:: 67 | % 68 | % 69 | % See also: 70 | % leaveoneout, gcrossvalidate, trainlssvm, simlssvm 71 | 72 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 73 | 74 | % LS-SVMlab 75 | eval('model = initlssvm(model{:});',' '); 76 | eval('L;','L=min(ceil(sqrt(model.nb_data)),10);'); 77 | eval('estfct;','estfct=''mse'';'); 78 | eval('combinefct;','combinefct=''mean'';'); 79 | 80 | % 81 | % initialisation and defaults 82 | % 83 | nb_data = size(model.ytrain,1); 84 | d = size(model.xtrain,2); 85 | 86 | if L==nb_data, p = 1:nb_data; else p = randperm(nb_data); end 87 | px = model.xtrain(p,:); 88 | py = model.ytrain(p,:); 89 | 90 | [~,Y] = postlssvm(model,[],py); 91 | 92 | %initialize: no incremental memory allocation 93 | costs = zeros(L,length(model.gam)); 94 | block_size = floor(nb_data/L); 95 | 96 | % calculate matrix for LS-SVM once for the entire data 97 | S = ones(nb_data,1); 98 | Inb = eye(nb_data); 99 | K = kernel_matrix(px,model.kernel_type,model.kernel_pars); 100 | Atot = K+Inb./model.gam; 101 | 102 | % Cholesky factor 103 | try R = chol(Atot); 104 | % Solve full system 105 | q = R\(R'\[py S]); 106 | p = q(:,2); q = q(:,1); 107 | s = 1/sum(p); 108 | bias = s*sum(q); 109 | alpha = q - p*bias; 110 | 111 | % Two expensive steps yet more efficient that using LINSOLVE on each fold 112 | Ri = R\Inb; 113 | C = Ri*Ri' - s*(p)*p'; 114 | 115 | catch %R = cholinc(sparse(Atot),1e-5); 116 | A = [K+Inb./model.gam S; S' 0]; 117 | C = pinv(A); 118 | alpha = C*[py;0]; 119 | %bias = alpha(nb_data+1); 120 | alpha = alpha(1:nb_data); 121 | end 122 | 123 | % Solve full system 124 | q = R\(R'\[py S]); 125 | p = q(:,2); q = q(:,1); 126 | s = 1/sum(p); 127 | bias = s*sum(q); 128 | alpha = q - p*bias; 129 | 130 | % Two expensive steps yet more efficient that using LINSOLVE on each fold 131 | Ri = R\Inb; 132 | C = Ri*Ri' - s*(p)*p'; 133 | 134 | % start loop over l validations 135 | for l = 1:L, 136 | % divide data in validation set and trainings data set 137 | if l==L, 138 | validation = block_size*(l-1)+1:nb_data; 139 | else 140 | validation = block_size*(l-1)+1:block_size*l; 141 | end 142 | % Submatrix of C to compute residuals for the l-th fold left out 143 | Ckk = C(validation,validation); 144 | % Solution of small linear system (block_size x block_size) 145 | try % faster 146 | Rkk = chol(Ckk+eps); 147 | betak = Rkk\(Rkk'\alpha(validation)); 148 | catch 149 | betak = Ckk\alpha(validation); 150 | end 151 | % latent outputs for validation 152 | yh = py(validation) - betak; 153 | [~,yh] = postlssvm(model,[],yh); 154 | if ~(model.type(1)=='c') 155 | costs(l,1) = feval(estfct,yh - Y(validation,:)); 156 | else 157 | costs(l,1) = feval(estfct,Y(validation,:),sign(yh)); 158 | end 159 | end 160 | cost = feval(combinefct,costs); -------------------------------------------------------------------------------- /crossvalidatelssvm.m: -------------------------------------------------------------------------------- 1 | function cost = crossvalidatelssvm(model,Y, L, omega, estfct,combinefct) 2 | % Estimate the model performance of a model with l-fold crossvalidation 3 | % 4 | %%%%%%%%%%%%%%%%%%%%% 5 | % INTERNAL FUNCTION % 6 | %%%%%%%%%%%%%%%%%%%%% 7 | % Estimate the model performance of a model with fast l-fold crossvalidation. 8 | % Implementation based on "De Brabanter et al., Computationsl Statistics & Data Analysis, 2010" 9 | 10 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @% http://www.esat.kuleuven.be/sista/lssvmlab 11 | 12 | % 13 | % See also: 14 | % leaveoneoutlssvm, crossvalidatelssvm, trainlssvm 15 | % Copyright (c) 2002, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.ac.be/sista/lssvmlab 16 | % initialisation and defaults 17 | % 18 | %if size(X,1)~=size(Y,1), error('X and Y have different number of datapoints'); end 19 | nb_data = size(Y,1); 20 | d = size(model.xtrain,2); 21 | % LS-SVMlab 22 | eval('model = initlssvm(model{:});',' '); 23 | model.status = 'changed'; 24 | 25 | eval('L;','L=min(round(sqrt(size(model.xfull,1))),10);'); 26 | eval('estfct;','estfct=''mse'';'); 27 | eval('combinefct;','combinefct=''mean'';'); 28 | 29 | % Y is raw data, non preprocessed 30 | py = Y; 31 | [~,Y] = postlssvm(model,[],Y); 32 | 33 | gams = model.gamcsa; try sig2s = model.kernel_parscsa; catch, sig2s = [];end 34 | 35 | %initialize: no incremental memory allocation 36 | costs = zeros(L,length(gams)); 37 | block_size = floor(nb_data/L); 38 | 39 | % check whether there are more than one gamma or sigma 40 | for j =1:numel(gams) 41 | if strcmp(model.kernel_type,'RBF_kernel') || strcmp(model.kernel_type,'RBF4_kernel') 42 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',sig2s(j)); 43 | elseif strcmp(model.kernel_type,'lin_kernel') 44 | model = changelssvm(model,'gam',gams(j)); 45 | elseif strcmp(model.kernel_type,'poly_kernel') 46 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',[sig2s(1,j);sig2s(2,j)]); 47 | else 48 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',[sig2s(1,j);sig2s(2,j);sig2s(3,j)]); 49 | end 50 | 51 | 52 | % calculate matrix for LS-SVM once for the entire data 53 | S = ones(nb_data,1); 54 | Inb = eye(nb_data); 55 | K = kernel_matrix2(omega,model.kernel_type,model.kernel_pars,d); 56 | Atot = K+Inb./model.gam; 57 | 58 | % Cholesky factor 59 | try R = chol(Atot); 60 | % Solve full system 61 | q = R\(R'\[py S]); 62 | p = q(:,2); q = q(:,1); 63 | s = 1/sum(p); 64 | bias = s*sum(q); 65 | alpha = q - p*bias; 66 | 67 | % Two expensive steps yet more efficient that using LINSOLVE on each fold 68 | Ri = R\Inb; 69 | C = Ri*Ri' - s*(p)*p'; 70 | 71 | catch %R = cholinc(sparse(Atot),1e-5); 72 | A = [K+Inb./model.gam S; S' 0]; 73 | C = pinv(A); 74 | alpha = C*[py;0]; 75 | %bias = alpha(nb_data+1); 76 | alpha = alpha(1:nb_data); 77 | end 78 | 79 | % start loop over l validations 80 | for l = 1:L, 81 | % divide data in validation set and trainings data set 82 | if l==L, 83 | %%train = 1:block_size*(l-1); % not used 84 | validation = block_size*(l-1)+1:nb_data; 85 | else 86 | %%train = [1:block_size*(l-1) block_size*l+1:nb_data]; % not used 87 | validation = block_size*(l-1)+1:block_size*l; 88 | end 89 | % Submatrix of C to compute residuals for the l-th fold left out 90 | Ckk = C(validation,validation); 91 | % Solution of small linear system (block_size x block_size) 92 | try % faster 93 | Rkk = chol(Ckk+eps); 94 | betak = Rkk\(Rkk'\alpha(validation)); 95 | catch 96 | betak = Ckk\alpha(validation); 97 | end 98 | % latent outputs for validation 99 | yh = py(validation) - betak; 100 | [~,yh] = postlssvm(model,[],yh); 101 | if ~(model.type(1)=='c') 102 | costs(l,j) = feval(estfct,yh - Y(validation,:)); 103 | else 104 | costs(l,j) = feval(estfct,Y(validation,:),sign(yh)); 105 | end 106 | end 107 | end 108 | cost = feval(combinefct, costs); -------------------------------------------------------------------------------- /csa.m: -------------------------------------------------------------------------------- 1 | function [pfinal,efinal] = csa(pn,herrfunc,varargin) 2 | 3 | % 4 | % Internal function based on 5 | % Xavier-de-Souza S, Suykens JA, Vandewalle J, Bolle D., Coupled Simulated 6 | % Annealing, IEEE Trans Syst Man Cybern B Cybern. 2010 Apr;40(2):320-35. 7 | % 8 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @% http://www.esat.kuleuven.be/sista/lssvmlab 9 | 10 | 11 | %switch length(varargin) 12 | OPT.T0 = 1; OPT.Tac0 = 1; OPT.FEmax = 40; OPT.FTsteps = 20; OPT.etol = 1e-45; OPT.print = 0; 13 | T0 = OPT.T0; % initial temperature 14 | Tac0 = OPT.Tac0; % initial temperature 15 | FEmax = OPT.FEmax; % max # of function evaluations 16 | FTsteps = OPT.FTsteps; % # of steps at fix temperature 17 | etol = OPT.etol; % energy tolerance 18 | 19 | clear OPT 20 | % initializes M 21 | pdim = size(pn,1); 22 | pnum = size(pn,2); 23 | 24 | NT = ceil(FEmax/FTsteps/pnum); % # max. number of cooling cycles 25 | NI = FTsteps; % #steps per temperature 26 | 27 | rand('twister',sum(pn(1)*clock)) 28 | randn('state',sum(pn(2)*clock)) 29 | 30 | e0 = feval(herrfunc,pn,varargin{:}); 31 | %if any(e0<0), etol = -etol*etol^-2;end 32 | 33 | p0 = pn; 34 | [be0,ind] = min(e0); 35 | bp0 = pn(:,ind); 36 | 37 | pblty = zeros(1,pnum); 38 | sgnCR = -1; 39 | CR = 0.1;%0.05; 40 | pvar_est = 0.995; 41 | 42 | Tac = Tac0; 43 | 44 | for k = 1:NT, 45 | C = progress('init',['Determine initial tuning parameters for simplex...',': # cooling cycle(s) ', num2str(k)]); 46 | pbltvar = var(pblty,1); 47 | sgnCR_ant = sgnCR; 48 | sgnCR = 2*((pbltvar > (pvar_est*(pnum-1)/(pnum^2)))-0.5); 49 | Tac = Tac + sgnCR*CR*Tac; 50 | 51 | % T schedules 52 | % T = T0/log(k+1); 53 | T = T0/k; 54 | % T = T0*exp(-k); 55 | % T = T0*exp(-exp(k)); 56 | 57 | % Tac schedules (not needed when variance control is applied) 58 | % Tac = Tac0/log(h+1); 59 | % Tac = Tac0/h; 60 | % Tac = Tac0*exp(-k); 61 | % Tac = Tac0*exp(-exp(k)); 62 | 63 | % TV(k) = Tac; 64 | for l = 1:NI, 65 | % choose new coordinates and compute 66 | % the function to minimize 67 | r = tan(pi*(rand(pdim,pnum)-0.5)); 68 | % ****************** Wrapping *************** 69 | %pn = 2*mod((p0 + r * T + 1)/2,1)-1; 70 | %**************** Non wrapping ************* 71 | pn = p0 + r * T ;%* (diag(e0)./sum(e0)); 72 | indd = find(abs(pn)>10); 73 | while(numel(indd)), 74 | r(indd) = tan(pi*(rand(size(indd))-0.5)); 75 | pn = p0 + r * T ;%* (diag(e0)./sum(e0)); 76 | indd = find(abs(pn)>15); 77 | end 78 | 79 | en = feval(herrfunc,pn,varargin{:}); 80 | Esum = sum(exp((e0-max(e0))./Tac)); 81 | for i=1:pnum 82 | pblty(i) = min(1,exp((e0(i)-max(e0))/Tac)/Esum); 83 | if (en(i)-e0(i)) < 0, 84 | % accept 85 | p0(:,i) = pn(:,i); e0(i) = en(i); 86 | if e0(i) < be0, 87 | be0 = e0(i); 88 | bp0 = p0(:,i); 89 | % if OPT.print == 1, 90 | % fprintf('v%e %d\n',min(e0),k); 91 | % end 92 | end 93 | else 94 | r = rand; 95 | if (pblty(i)) >= r 96 | % accept 97 | p0(:,i) = pn(:,i); e0(i) = en(i); 98 | end 99 | end 100 | end 101 | C = progress(C,l/NI); 102 | if any(e0> type demo_fixedclass'); 9 | disp(' '); 10 | disp(' or '); 11 | disp(' '); 12 | disp(' >> edit demo_fixedclass'); 13 | disp(' '); 14 | 15 | 16 | load ripley 17 | 18 | % 19 | % initiate values 20 | type = 'classification'; 21 | gamma = 0.1; 22 | kernel = 'RBF_kernel'; 23 | sigma2 = 1; 24 | sigma2ent = 0.1; 25 | crit_old=-inf; 26 | Nc=20; 27 | Xs=X(1:Nc,:); 28 | Ys=Y(1:Nc,:); 29 | 30 | % 31 | % Initiate grid for plot 32 | grain = 25; 33 | xmin1=min(X(:,1)); 34 | xmax1=max(X(:,1)); 35 | xmin2=min(X(:,2)); 36 | xmax2=max(X(:,2)); 37 | xrange1 = xmin1:(xmax1-xmin1)/grain:xmax1; 38 | xrange2 = xmin2:(xmax2-xmin2)/grain:xmax2; 39 | [XX,YY] = meshgrid(xrange1,xrange2); 40 | Xt = [reshape(XX,numel(XX),1) reshape(YY,numel(YY),1)]; 41 | figure; 42 | 43 | 44 | % 45 | % iterate over data 46 | % 47 | for tel=1:5*length(X) 48 | 49 | 50 | % 51 | % new candidate set 52 | % 53 | Xsp=Xs; Ysp=Ys; 54 | S=ceil(length(X)*rand(1)); 55 | Sc=ceil(Nc*rand(1)); 56 | Xs(Sc,:) = X(S,:); 57 | Ys(Sc,:) = Y(S); 58 | Ncc=Nc; 59 | 60 | % 61 | % automaticly extract features and compute entropy 62 | % 63 | crit = kentropy(Xs,kernel, sigma2ent); 64 | 65 | if crit <= crit_old, 66 | crit = crit_old; 67 | Xs=Xsp; 68 | Ys=Ysp; 69 | else 70 | crit_old = crit; 71 | 72 | % 73 | % ridge regression 74 | features = AFEm(Xs,kernel, sigma2,X); 75 | features_t = AFEm(Xs,kernel, sigma2,Xt); 76 | [w,b,Yht] = ridgeregress(features,Y,gamma,features_t); 77 | Yht = sign(Yht); 78 | 79 | % 80 | % make-a-plot 81 | Ygt = reshape(Yht(:,1),size(XX,1),size(XX,2)); 82 | colormap cool; 83 | [C,h]=contourf(XX,YY,Ygt); 84 | hold on; 85 | n = find(Y<=0); 86 | np = plot(X(n,1),X(n,2),'k.'); 87 | p = find(Y>0); 88 | pp = plot(X(p,1),X(p,2),'k+'); 89 | sv = plot(Xs(:,1),Xs(:,2),'go','Linewidth',7); 90 | xlabel('X_1'); ylabel('X_2'); 91 | title(['Approximation by fixed size LS-SVM based on maximal entropy: ' num2str(crit)]); 92 | legend([np pp sv],'Negative points','Positive points',... 93 | 'Support Vectors'); 94 | 95 | hold off; drawnow 96 | 97 | end 98 | 99 | end 100 | 101 | 102 | -------------------------------------------------------------------------------- /demo_fixedsize.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | 4 | disp(' This demo illustrates the idea of fixed size LS-SVM. '); 5 | disp(' The program consists of 2 steps. In the former, one '); 6 | disp(' constructs a reduced set of support vectors base on '); 7 | disp(' an apropriate criterion on the data. In this case'); 8 | disp(' the measure ''kentropy'' is optimized.'); 9 | disp(' '); 10 | disp(' In the latter step, one constructs the implicit mapping'); 11 | disp(' to feature space based on the eigenvalue decomposition.'); 12 | disp(' A parametric linear regression is executed on the mapped'); 13 | disp(' data'); 14 | disp(' '); 15 | disp(' To see the used cose, use the call'); 16 | disp(' '); 17 | disp('>> type demo_fixedsize '); 18 | disp(' '); 19 | disp(' or '); 20 | disp(' '); 21 | disp('>> edit demo_fixedsize '); 22 | disp(' '); 23 | disp(' A dataset is constructed at first...'); 24 | % 25 | % dataset 26 | %clear 27 | figure; 28 | randn('state',0); 29 | x = sort(2.*randn(2000,1)); 30 | x0 = sort(2.*randn(2000,1)); 31 | %x=(-9.95:0.1:10)'; 32 | %x0=(-9.95:0.05:10)'; 33 | eval('y = sinc(x)+0.05.*randn(length(x),1);',... 34 | 'y = sin(pi.*x+12345*eps)./(pi*x+12345*eps)+0.05.*randn(length(x),1);'); 35 | 36 | eval('y0 = sinc(x0)+0.05.*randn(length(x0),1);',... 37 | 'y0 = sin(pi.*x0+12345*eps)./(pi*x0+12345*eps)+0.05.*randn(length(x0),1);'); 38 | 39 | disp(' The parameters are initialized...'); 40 | 41 | 42 | % 43 | % initiate values 44 | kernel = 'RBF_kernel'; 45 | sigma2=.75; 46 | gamma=1; 47 | crit_old=-inf; 48 | Nc=15; 49 | Xs=x(1:Nc,:); 50 | Ys=y(1:Nc,:); 51 | 52 | disp(' The optimal reduced set is constructed iteratively: '); 53 | 54 | % 55 | % iterate over data 56 | % 57 | tv = 1; 58 | for tel=1:length(x) 59 | 60 | 61 | % 62 | % new candidate set 63 | % 64 | Xsp=Xs; Ysp=Ys; 65 | S=ceil(length(x)*rand(1)); 66 | Sc=ceil(Nc*rand(1)); 67 | Xs(Sc,:) = x(S,:); 68 | Ys(Sc,:) = y(S); 69 | Ncc=Nc; 70 | 71 | % 72 | % automaticly extract features and compute entropy 73 | % 74 | crit = kentropy(Xs,kernel, sigma2); 75 | 76 | if crit <= crit_old, 77 | crit = crit_old; 78 | Xs=Xsp; 79 | Ys=Ysp; 80 | else 81 | crit_old = crit; 82 | 83 | % 84 | % ridge regression 85 | % 86 | [features,U,lam] = AFEm(Xs,kernel, sigma2,x); 87 | [w,b,Yh] = ridgeregress(features,y,gamma,features); 88 | 89 | % 90 | % make-a-plot 91 | % 92 | plot(x,y,'*'); hold on 93 | plot(x,Yh,'r-') 94 | plot(Xs,Ys,'go','Linewidth',7) 95 | xlabel('X'); ylabel('Y'); 96 | title(['Approximation by fixed size LS-SVM based on maximal entropy: ' num2str(crit)]); 97 | hold off; drawnow 98 | 99 | 100 | end 101 | 102 | % 103 | % validate 104 | % 105 | %Yh0 = AFE(Xs,kernel, sigma2,x0)*w + b; 106 | %val(tv,2) = mse(Yh0-y0); tv=tv+1; 107 | 108 | end 109 | 110 | 111 | 112 | disp(' The parametric linear ridge regression is calculated:'); 113 | 114 | % 115 | % ridge regression 116 | % 117 | features = AFEm(Xs,kernel, sigma2,x); 118 | 119 | % Bayesian inference of the gamma 120 | try, 121 | [CostL3, gamma_optimal] = bay_rr(features,y,gamma,3); 122 | catch, 123 | warning('no Bayesian optimization of the regularization parameter'); 124 | gamma_optimal = gamma; 125 | end 126 | 127 | [w,b] = ridgeregress(features,y,gamma_optimal); 128 | Yh0 = AFEm(Xs,kernel, sigma2,x0)*w+b; 129 | echo off; 130 | 131 | % 132 | % make-a-plot 133 | plot(x,y,'*'); hold on 134 | plot(x0,Yh0,'r-') 135 | plot(Xs,Ys,'go','Linewidth',7) 136 | xlabel('X'); ylabel('Y'); 137 | title(['Approximation by fixed size LS-SVM based on maximal entropy: ' num2str(crit)]); 138 | hold off; 139 | 140 | 141 | 142 | 143 | 144 | -------------------------------------------------------------------------------- /demo_yinyang.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | disp(' This demo illustrates facilities of LS-SVMlab'); 4 | disp(' with respect to unsupervised learning.'); 5 | 6 | disp(' a demo dataset is generated...'); 7 | clear yin yang samplesyin samplesyang mema 8 | % initiate variables and construct the data 9 | nb =200; 10 | sig = .20; 11 | 12 | % construct data 13 | leng = 1; 14 | for t=1:nb, 15 | yin(t,:) = [2.*sin(t/nb*pi*leng) 2.*cos(.61*t/nb*pi*leng) (t/nb*sig)]; 16 | yang(t,:) = [-2.*sin(t/nb*pi*leng) .45-2.*cos(.61*t/nb*pi*leng) (t/nb*sig)]; 17 | samplesyin(t,:) = [yin(t,1)+yin(t,3).*randn yin(t,2)+yin(t,3).*randn]; 18 | samplesyang(t,:) = [yang(t,1)+yang(t,3).*randn yang(t,2)+yang(t,3).*randn]; 19 | end 20 | 21 | % plot the data 22 | figure; hold on; 23 | plot(samplesyin(:,1),samplesyin(:,2),'+','Color',[0.6 0.6 0.6]); 24 | plot(samplesyang(:,1),samplesyang(:,2),'+','Color',[0.6 0.6 0.6]); 25 | xlabel('X_1'); 26 | ylabel('X_2'); 27 | title('Structured dataset'); 28 | disp(' (press any key)'); 29 | pause 30 | 31 | % 32 | % kernel based Principal Component Analysis 33 | % 34 | disp(' '); 35 | disp(' extract the principal eigenvectors in feature space'); 36 | disp(' >> nb_pcs=4;'); nb_pcs = 4; 37 | disp(' >> sig2 = .8;'); sig2 = .8; 38 | disp(' >> [lam,U] = kpca([samplesyin;samplesyang],''RBF_kernel'',sig2,[],''eigs'',nb_pcs); '); 39 | [lam,U] = kpca([samplesyin;samplesyang],'RBF_kernel',sig2,[],'eigs',nb_pcs); 40 | disp(' (press any key)'); 41 | pause 42 | 43 | % 44 | % make a grid over the inputspace 45 | % 46 | disp(' '); 47 | disp(' make a grid over the inputspace:'); 48 | disp('>> Xax = -3:0.1:3; Yax = -2.0:0.1:2.5;'); Xax = -3:0.1:3; Yax = -2.0:0.1:2.5; 49 | disp('>> [A,B] = meshgrid(Xax,Yax);'); [A,B] = meshgrid(Xax,Yax); 50 | disp('>> grid = [reshape(A,prod(size(A)),1) reshape(B,1,prod(size(B)))'']; '); 51 | grid = [reshape(A,numel(A),1) reshape(B,1,numel(B))']; 52 | 53 | 54 | % 55 | % compute projections of each point of the inputspace on the 56 | % principal components 57 | % 58 | disp(' '); 59 | disp(' compute projections of each point of the inputspace on the '); 60 | disp(' principal components'); 61 | disp('>> k = kernel_matrix([samplesyin;samplesyang],''RBF_kernel'',sig2,grid)''; '); 62 | k = kernel_matrix([samplesyin;samplesyang],'RBF_kernel',sig2,grid)'; 63 | disp('>> projections = k*U;'); projections = k*U; 64 | disp('>> contour(Xax,Yax,reshape(projections(:,1),length(Yax),length(Xax)));'); 65 | contour(Xax,Yax,reshape(projections(:,1),length(Yax),length(Xax))); 66 | title('Projections onto the first kernel PC'); 67 | disp(' (press any key)'); 68 | pause 69 | 70 | 71 | 72 | % 73 | % Compute the approximate pre-image in the input space 74 | disp(' '); 75 | disp(' Compute the approximate pre-image in the input space'); 76 | 77 | 78 | disp(' For every point, the approximate pre-image is computed using:'); 79 | disp(' ----------------------------------------------------------'); 80 | disp(' '); 81 | disp('>> Xd=preimage_rbf([samplesyin;samplesyang],sig2,U); '); 82 | Xd=preimage_rbf([samplesyin;samplesyang],sig2,U); 83 | figure; hold on; 84 | plot(samplesyin(:,1),samplesyin(:,2),'+','Color',[0.6 0.6 0.6]); 85 | plot(samplesyang(:,1),samplesyang(:,2),'+','Color',[0.6 0.6 0.6]); 86 | xlabel('x_1'); 87 | ylabel('x_2'); 88 | disp('>> plot(Xd(:,1),Xd(:,2),''ko''); '); plot(Xd(:,1),Xd(:,2),'bo'); 89 | disp(' '); 90 | title('Denoising (''o'') by computing an approximate pre-image'); 91 | disp(' '); 92 | disp(' In the last figure, one can see the original datapoints'); 93 | disp('(''*'') and the reconstructed data (''o''). '); 94 | disp(' '); 95 | disp(' '); 96 | disp(' This concludes this demo'); 97 | hold off -------------------------------------------------------------------------------- /democlass.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | 4 | clc; 5 | 6 | disp('A simple example shows how to start using the toolbox for a'); 7 | disp('classification task. We start with constructing a simple example'); 8 | disp('dataset according to the right formatting. Data are represented '); 9 | disp('as matrices where each row contains one datapoint: '); 10 | disp(' '); 11 | disp('press key'); pause 12 | disp(' '); 13 | 14 | disp(' >> X = 2.*rand(30,2)-1;'); 15 | X = 2.*rand(30,2)-1; 16 | disp(' >> Y = sign(sin(X(:,1))+X(:,2));'); 17 | Y = sign(sin(X(:,1))+X(:,2)); 18 | disp(' >> X'); 19 | X 20 | 21 | disp(' >> Y'); 22 | Y 23 | 24 | disp(' '); 25 | disp('press key'); pause 26 | disp(' '); 27 | 28 | disp('In order to make an LS-SVM model, we need 2 extra parameters: gamma'); 29 | disp('(gam) is the regularization parameter, determining the trade-off'); 30 | disp('between the fitting error minimization and smoothness. In the'); 31 | disp('common case of the RBF kernel, sigma^2 (sig2) is the bandwidth:'); 32 | disp(' '); 33 | disp(' >> gam = 10;'); 34 | gam = 10; 35 | disp(' >> sig2 = 0.2;'); 36 | sig2 = 0.2; 37 | disp(' >> type = ''classification'';'); 38 | type = 'classification'; 39 | disp(' >> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel''});'); 40 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel'}); 41 | 42 | disp(' '); 43 | disp('press key'); pause 44 | disp(' '); 45 | 46 | disp('The parameters and the variables relevant for the LS-SVM are'); 47 | disp('passed as one cell. This cell allows for consistent default'); 48 | disp('handling of LS-SVM parameters and syntactical grouping of related'); 49 | disp('arguments. This definition should be used consistently throughout'); 50 | disp('the use of that specific LS-SVM model.'); 51 | disp('The corresponding object oriented interface'); 52 | disp('to LS-SVMlab leads to shorter function calls (see demomodel). '); 53 | 54 | disp('By default, the data are preprocessed by application of the function'); 55 | disp('prelssvm to the raw data and the function postlssvm on the'); 56 | disp('predictions of the model. This option can explicitly be switched off in'); 57 | disp('the call: '); 58 | 59 | disp(' '); 60 | disp(' >> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''original''});'); 61 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel','original'}); 62 | disp(' '); 63 | disp('or be switched on (by default):'); 64 | disp(' '); 65 | disp(' >> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''});'); 66 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'}); 67 | 68 | disp(' '); 69 | disp('press key'); pause 70 | disp(' '); 71 | 72 | %disp('Remember to consistently use the same option in all successive calls'); 73 | disp('To evaluate new points for this model, the function'); 74 | disp('simlssvm is used:'); 75 | disp(' '); 76 | disp(' >> Xt = 2.*rand(10,2)-1;'); 77 | Xt = 2.*rand(10,2)-1; 78 | disp(' >> Ytest = simlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''},{alpha,b},Xt);'); 79 | Ytest = simlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'},{alpha,b},Xt); 80 | disp(' '); 81 | disp('The LS-SVM result can be displayed if the dimension of the input'); 82 | disp('data is 2. '); 83 | 84 | disp(' '); 85 | disp('press key'); pause 86 | disp(' '); 87 | 88 | disp(' '); 89 | disp(' >> plotlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''},{alpha,b});'); 90 | figure; plotlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'},{alpha,b}); 91 | disp(' '); 92 | disp('All plotting is done with this simple command. It looks for the'); 93 | disp('best way of displaying the result. '); 94 | disp(' '); 95 | disp(' This concludes the demo'); 96 | -------------------------------------------------------------------------------- /democonfint.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | 4 | clc; 5 | 6 | disp('This is a simple demo, solving a simple regression task using'); 7 | disp('LS-SVMlab and constructing confidence intervals. A dataset is constructed in the right formatting. The'); 8 | disp('data are represented as matrices where each row contains one'); 9 | disp('datapoint: '); 10 | disp(' '); 11 | disp('press key'); pause 12 | disp(' '); 13 | 14 | disp('>> X = (-3:0.02:3)'';'); 15 | X = (-3:0.02:3)'; 16 | disp('>> Y = sinc(X)+0.1.*randn(length(X),1);'); 17 | eval('Y = sinc(X)+0.1.*randn(length(X),1);',... 18 | 'Y = sin(pi.*X+12345*eps)./(pi*X+12345*eps)+0.1.*randn(length(X),1);'); 19 | disp('>> X'); 20 | X 21 | 22 | disp('>> Y'); 23 | Y 24 | 25 | disp('In order to make an LS-SVM model, we need 2 extra parameters: gamma'); 26 | disp('(gam) is the regularization parameter, determining the trade-off'); 27 | disp('between the fitting error minimization and smoothness of the'); 28 | disp('estimated function. sigma^2 (sig2) is the kernel function'); 29 | disp('parameter of the RBF kernel. These can be found via cross-validation:'); 30 | disp(' '); 31 | 32 | model = initlssvm(X,Y,'f',[],[],'RBF_kernel','o'); 33 | 34 | disp('>> model = tunelssvm(model,''simplex'',''crossvalidatelssvm'',{10,''mse''});'); 35 | 36 | model = tunelssvm(model,'simplex','crossvalidatelssvm',{10,'mse'}); 37 | disp(' '); 38 | disp('press key'); pause 39 | disp(' '); 40 | 41 | disp('Training the model '); 42 | 43 | disp(' '); 44 | disp('>> model = trainlssvm(model)'); 45 | model = trainlssvm(model); 46 | disp(' '); 47 | 48 | 49 | 50 | disp('Computation of Confidence Intervals '); 51 | disp(' '); 52 | disp('press key'); pause 53 | 54 | 55 | disp('ci = cilssvm(model);') 56 | ci = cilssvm(model); 57 | 58 | 59 | disp('The LS-SVM result and confidence intervals can be displayed if the dimension of the input'); 60 | disp('data is 1 or 2. '); 61 | 62 | disp(' '); 63 | disp('>> plotlssvm(model);'); 64 | figure; plotlssvm(model); 65 | disp(' '); 66 | 67 | hold all 68 | fill([X;flipud(X)],[ci(:,1);flipud(ci(:,2))],'c','FaceAlpha',0.5,'EdgeAlpha',1,'EdgeColor','w') 69 | 70 | disp('All plotting is done with this simple command. It looks for the'); 71 | disp('best way of displaying the result.') 72 | disp(' '); 73 | disp(' This concludes the demo'); -------------------------------------------------------------------------------- /demofun.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | 4 | clc; 5 | 6 | disp('This is a simple demo, solving a simple regression task using'); 7 | disp('LS-SVMlab. A dataset is constructed in the right formatting. The'); 8 | disp('data are represented as matrices where each row contains one'); 9 | disp('datapoint: '); 10 | disp(' '); 11 | disp('press key'); pause 12 | disp(' '); 13 | 14 | disp('>> X = (-3:0.2:3)'';'); 15 | X = (-3:0.2:3)'; 16 | disp('>> Y = sinc(X)+0.1.*randn(length(X),1);'); 17 | eval('Y = sinc(X)+0.1.*randn(length(X),1);',... 18 | 'Y = sin(pi.*X+12345*eps)./(pi*X+12345*eps)+0.1.*randn(length(X),1);'); 19 | disp('>> X'); 20 | X 21 | 22 | disp('>> Y'); 23 | Y 24 | 25 | disp('In order to make an LS-SVM model, we need 2 extra parameters: gamma'); 26 | disp('(gam) is the regularization parameter, determining the trade-off'); 27 | disp('between the fitting error minimization and smoothness of the'); 28 | disp('estimated function. sigma^2 (sig2) is the kernel function'); 29 | disp('parameter of the RBF kernel:'); 30 | 31 | disp(' '); 32 | disp('>> gam = 10;'); 33 | gam = 10; 34 | disp('>> sig2 = 0.3;'); 35 | sig2 = 0.3; 36 | disp('>> type = ''function estimation'';'); 37 | type = 'function estimation'; 38 | disp('>> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel''});'); 39 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel'}); 40 | disp(' '); 41 | disp('press key'); pause 42 | disp(' '); 43 | 44 | disp('The parameters and the variables relevant for the LS-SVM are'); 45 | disp('passed as one cell. This cell allows for consistent default'); 46 | disp('handling of LS-SVM parameters and syntactical grouping of related'); 47 | disp('arguments. This definition should be used consistently throughout'); 48 | disp('the use of that specific LS-SVM model'); 49 | disp('The object oriented interface to LS-SVMlab leads to'); 50 | disp('shorter function calls (see demomodel). '); 51 | 52 | disp('By default, the data are preprocessed by application of the function'); 53 | disp('prelssvm to the raw data and the function postlssvm on the'); 54 | disp('predictions of the model. This option can explicitly be switched off in'); 55 | disp('the call: '); 56 | 57 | disp(' '); 58 | disp('>> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''original''});'); 59 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel','original'}); 60 | disp(' '); 61 | 62 | disp('or can be switched on (default):'); 63 | 64 | disp(' '); 65 | disp('>> [alpha,b] = trainlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''});'); 66 | [alpha,b] = trainlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'}); 67 | 68 | %disp('Remember to consistently use the same option in all successive calls.'); 69 | disp(' '); 70 | disp('press key'); pause 71 | disp(' '); 72 | 73 | disp('To evaluate new points for this model, the function simlssvm is'); 74 | disp('used. At first, test data is generated: '); 75 | 76 | disp(' '); 77 | disp('>> Xt = 3.*randn(10,1);'); 78 | Xt = 3.*randn(10,1); 79 | disp(' '); 80 | 81 | disp('Then, the obtained model is simulated on the test data:'); 82 | 83 | disp(' '); 84 | disp('>> Yt = simlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''},{alpha,b},Xt);'); 85 | Yt = simlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'},{alpha,b},Xt); 86 | disp(' '); 87 | disp('>> Y'); 88 | Y 89 | disp(' '); 90 | disp('press key'); pause 91 | disp(' '); 92 | 93 | 94 | disp('The LS-SVM result can be displayed if the dimension of the input'); 95 | disp('data is 1 or 2. '); 96 | 97 | disp(' '); 98 | disp('>> plotlssvm({X,Y,type,gam,sig2,''RBF_kernel'',''preprocess''},{alpha,b});'); 99 | figure; plotlssvm({X,Y,type,gam,sig2,'RBF_kernel','preprocess'},{alpha,b}); 100 | disp(' '); 101 | 102 | disp('All plotting is done with this simple command. It looks for the'); 103 | disp('best way of displaying the result. When the real function is known,'); 104 | disp('it can be displayed as follows:'); 105 | disp(' '); 106 | disp('>> hold on; plot(min(X):.1:max(X),sinc(min(X):.1:max(X)),''r-.'');'); hold off 107 | Xt = (min(X):.1:max(X))'; 108 | eval('Yt = sinc(Xt);',... 109 | 'Yt = sin(pi.*Xt+12345*eps)./(pi*Xt+12345*eps)+0.1.*randn(length(Xt),1);'); 110 | hold on; plot(Xt,Yt,'r-.'); hold off 111 | disp(' '); 112 | disp(' This concludes the demo'); -------------------------------------------------------------------------------- /demomodel.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | 4 | clc; 5 | disp(' This demo explains the use of the advanced object oriented interface'); 6 | disp(' ''model''. For first users, we recommend the functional interface'); 7 | disp(' as explained in ''democlass'' and ''demofun''.'); 8 | disp(' '); 9 | disp(' The ''model'' is the generic object which collects '); 10 | disp(' all relevant signals, parameters, options and functions '); 11 | disp(' related to an application of the LS-SVM.'); 12 | disp(' This interface is depreciated for the casional users because'); 13 | disp(' of the implicit nature: the distinction between in- and '); 14 | disp(' output vanishes. If one wants to use the full power '); 15 | disp(' of LS-SVMlab, one is recommended to go through this demo.'); 16 | disp(' '); 17 | disp(' We focus on function estimation, however the insights are completely'); 18 | disp( 'equivalent for classification. A dataset is constructed at first:'); 19 | disp(' '); 20 | disp('> X = (-3:.2:3)'''); 21 | X = (-3:.2:3)'; 22 | disp('> Y = X.^3+2.*randn(length(X),1);'); 23 | Y = X.^3+2.*randn(length(X),1); 24 | disp(' '); 25 | disp(' A model is CONSTRUCTED for this data'); 26 | disp(' '); 27 | disp(' >> gam=1; sig2=1;'); gam=1; sig2=1; 28 | disp(' >> model = initlssvm(X,Y,''function'',gam,sig2,''RBF_kernel'');');model = initlssvm(X,Y,'function',gam,sig2,'RBF_kernel'); 29 | disp(' '); 30 | disp('press enter to continue...'); 31 | pause; 32 | 33 | 34 | disp(' The specifications of the model can be seen typing just the'); 35 | disp(' name of the object that is constructed.'); 36 | disp(' >> model');model 37 | disp(' '); 38 | disp(' If one wants to see the value of a specific option,'); 39 | disp(' the ''.'' operator is to be used:'); 40 | disp(' '); 41 | disp(' >> model.preprocess'); model.preprocess 42 | disp(' '); 43 | disp('press enter to continue...'); 44 | pause; 45 | 46 | disp(' If one wants to CHANGE a value of a specific option of the'); 47 | disp(' model, the function ''changelssvm'' is to be used if consistent'); 48 | disp(' models are wanted:'); 49 | disp(' '); 50 | disp(' >> model = changelssvm(model,''gam'',1.2);');model = changelssvm(model,'gam',1.2); 51 | disp(' '); 52 | disp(' The options can be divided in 4 classes, the general LS-SVM options,'); 53 | disp(' the trainpoint administration,'); 54 | disp(' the preprocess options and the encoding options.'); 55 | disp(' The help of ''changelssvm'' gives a description of the different'); 56 | disp(' fields of the model (type ''help changelssvm''). '); 57 | disp(' The use of the model''s field ''status'' is important to see the full power'); 58 | disp(' of the model concept. The object ''model'' knows '); 59 | disp(' whether it needs to be retrained. Retraining is needed if a specification'); 60 | disp(' of the model is changed since the last training.'); 61 | disp(' '); 62 | disp(' The demo ... shows how to control the preprocessing and the coding'); 63 | disp(' using the appropriate model.'); 64 | disp(' '); 65 | disp(' It is adviced to check carefully the model''s options before'); 66 | disp(' starting the calculations.'); 67 | disp(' '); 68 | disp('press enter to continue...'); 69 | pause; 70 | 71 | disp(' If the model is clearly defined, the routine to train the model'); 72 | disp(' has to be called on this model. In the case of the LS-SVM, '); 73 | disp(' this training is done by:'); 74 | disp(' '); 75 | disp(' >> model = trainlssvm(model);');model = trainlssvm(model); 76 | disp(' '); 77 | disp(' Given the trained model, one can simulate some testpoints'); 78 | disp(' and make a plot of the model. (wait a few seconds...)'); 79 | disp(' '); 80 | disp(' >> Xt = 2.*randn(10,1);');Xt = 2.*randn(10,1); 81 | disp(' >> Yt = simlssvm(model,Xt)');Yt= simlssvm(model,Xt) 82 | disp(' >> plotlssvm(model);');plotlssvm(model); 83 | disp(' '); 84 | disp(' By understanding this step, one masters basicly how to '); 85 | disp(' use the object ''model''.'); 86 | disp(' '); 87 | disp(' As an extra, the underlying function is also given as a dotted line') 88 | disp(' on the plot'); 89 | disp('>> hold on; plot((-3:.2:3)'', sinc(-3:.2:3)'','':'');'); 90 | hold on; plot((-4:.2:4)', (-4:.2:4).^3','r:'); 91 | disp(' '); 92 | disp('press enter to continue...'); 93 | pause; 94 | 95 | 96 | disp(' The same model can be used to try different options:'); 97 | disp(' '); 98 | disp(' >> model = changelssvm(model,''gam'',3);'); model = changelssvm(model,'gam',3); 99 | disp(' >> model = trainlssvm(model);'); model = trainlssvm(model); 100 | disp(' >> plotlssvm(model);');plotlssvm(model); 101 | disp(' '); 102 | disp(' or with a polynomial kernel of degree 3'); 103 | disp(' '); 104 | disp(' >> model = changelssvm(model,''kernel_type'',''poly_kernel'');'); model = changelssvm(model,'kernel_type','poly_kernel'); 105 | disp(' >> model = changelssvm(model,''kernel_pars'',[1;3]);');model = changelssvm(model,'kernel_pars',[1;3]); 106 | disp(' >> model = trainlssvm(model);'); model = trainlssvm(model); 107 | disp(' >> plotlssvm(model);');plotlssvm(model); 108 | disp(' '); 109 | 110 | disp(' This concludes this demo'); 111 | 112 | -------------------------------------------------------------------------------- /demomulticlass.m: -------------------------------------------------------------------------------- 1 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 2 | 3 | %Multi-class example 4 | clear all 5 | clc 6 | disp('This is a simple demo, solving a simple multiclass classification problem with'); 7 | disp('LS-SVMlab using the object interface. The problem is solved using the One vs. One coding scheme.'); 8 | disp('A dataset is constructed in the right formatting.') 9 | disp(' '); 10 | disp('press key'); pause 11 | disp(' '); 12 | %% 13 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 14 | % generate multi-class data composed out of a mixture of 2 gaussians 15 | 16 | DIM=2;SIZE=50;SIZEte=5000;SIZE=floor(SIZE/2)*2;SIZEte=floor(SIZEte/2)*2; 17 | 18 | randn('state',0); 19 | X=[]; 20 | X=[X ; 1.6*randn(SIZE/2,DIM)+repmat([0 0],SIZE/2,1) ]; 21 | X=[X ; 0.9*randn(SIZE/2,DIM)+repmat([2 0],SIZE/2,1) ]; 22 | X=[X ; 0.8*randn(SIZE/2,DIM)+repmat([-1 1],SIZE/2,1)]; 23 | X=[X ; 0.9*randn(SIZE/2,DIM)+repmat([-1.3 3.5],SIZE/2,1)]; 24 | X=[X ; 1*randn(SIZE/2,DIM)+repmat([-2 1],SIZE/2,1)]; 25 | X=[X ; 0.9*randn(SIZE/2,DIM)+repmat([-3.5 0.2],SIZE/2,1)] ; 26 | %[X,m,v,s]=standardize(X); 27 | y=[];for i=1:3, y= [y ; i*ones(SIZE,1)]; end 28 | 29 | Xt=[]; 30 | Xt=[Xt ; 1.6*randn(SIZEte/2,DIM)+repmat([0 0],SIZEte/2,1) ]; 31 | Xt=[Xt ; 0.9*randn(SIZEte/2,DIM)+repmat([2 0],SIZEte/2,1) ]; 32 | Xt=[Xt ; 0.8*randn(SIZEte/2,DIM)+repmat([-1 1],SIZEte/2,1)]; 33 | Xt=[Xt ; 0.9*randn(SIZEte/2,DIM)+repmat([-1.3 3.5],SIZEte/2,1)]; 34 | Xt=[Xt ; 1*randn(SIZEte/2,DIM)+repmat([-2 1],SIZEte/2,1)]; 35 | Xt=[Xt ; 0.9*randn(SIZEte/2,DIM)+repmat([-3.5 0.2],SIZEte/2,1)] ; 36 | 37 | %Xt=standardize(Xt,m,v,s); 38 | yt=[];for i=1:3, yt= [yt ; i*ones(SIZEte,1)];end 39 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 40 | %% 41 | disp('Start initialization, tuning procedure and training'); 42 | disp(''); 43 | disp('>> model = initlssvm(X,y,''c'',[],[],''RBF_kernel'');') 44 | disp('>> model = tunelssvm(model,''simplex'',''crossvalidatelssvm'',{10,''misclass''},''code_OneVsOne'');') 45 | disp('>> model = trainlssvm(model);') 46 | disp('press key'); pause 47 | disp(' '); 48 | 49 | t1=cputime; 50 | model = initlssvm(X,y,'c',[],[],'RBF_kernel'); 51 | model = tunelssvm(model,'simplex','crossvalidatelssvm',{10,'misclass'},'code_OneVsOne'); 52 | model = trainlssvm(model); 53 | Y = simlssvm(model,Xt); 54 | 55 | t2=cputime; 56 | fprintf(1,'Tuning time %i \n',t2-t1); 57 | fprintf(1,'Accuracy: %2.2f\n',100*sum(Y==yt)/length(yt)); 58 | plotlssvm(model,[],150); 59 | 60 | 61 | -------------------------------------------------------------------------------- /denoise_kpca.m: -------------------------------------------------------------------------------- 1 | function [Xd,lam,U] = denoise_kpca(Xo,A1,A2,A3,A4,A5) 2 | % Reconstruct the data mapped on the first principal components 3 | % 4 | % >> Xd = denoise_kpca(X, kernel, kernel_par); 5 | % 6 | % Denoising can be done by moving the point in inputspace so that 7 | % its corresponding map to feature space is optimized. This means 8 | % that the data point in feature space is as close as possible with 9 | % its corresponding reconstructed points using the principal 10 | % components. If the principal components are to be calculated on 11 | % the same data X as the one one wants to denoise, use the command: 12 | % 13 | % >> Xd = denoise_kpca(X, kernel, kernel_par); 14 | % >> [Xd,lam,U] = denoise_kpca(X, kernel, kernel_par, [], type, nb); 15 | % 16 | % When one wants to denoise data 'Xt' other than the data used to obtain the principal components: 17 | % 18 | % >> Xd = denoise_kpca(X, kernel, kernel_par, Xt); 19 | % >> [Xd, lam, U] = denoise_kpca(X, kernel, kernel_par, Xt, type, nb); 20 | % 21 | % 22 | % Full syntax 23 | % 24 | % >> [Xd, lam, U] = denoise_kpca(X, kernel, kernel_par, Xt); 25 | % >> [Xd, lam, U] = denoise_kpca(X, kernel, kernel_par, Xt, type); 26 | % 27 | % Outputs 28 | % Xd : N x d (Nt x d) matrix with denoised data X (Xt) 29 | % lam(*) : nb x 1 vector with eigenvalues of principal components 30 | % U(*) : N x nb (Nt x d) matrix with principal eigenvectors 31 | % Inputs 32 | % X : N x d matrix with data points used for finding the principal components 33 | % kernel : Kernel type (e.g. 'RBF_kernel') 34 | % kernel_par : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 35 | % Xt(*) : Nt x d matrix with the points to denoise (if not specified, X is denoised instead) 36 | % type(*) : 'eig'(*), 'svd', 'eigs', 'eign' 37 | % nb(*) : Number of principal components used in approximation 38 | % 39 | % >> Xd = denoise_kpca(X, U, lam, kernel, kernel_par, Xt); 40 | % 41 | % Outputs 42 | % Xd : N x d (Nt x d) matrix with denoised data X (Xt) 43 | % Inputs 44 | % X : N x d matrix with data points used for finding the principal components 45 | % U : N x nb (Nt x d) matrix with principal eigenvectors 46 | % lam : nb x 1 vector with eigenvalues of principal components 47 | % kernel : Kernel type (e.g. 'RBF_kernel') 48 | % kernel_par : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 49 | % Xt(*) : Nt x d matrix with the points to denoise (if not specified, X is denoised instead) 50 | % 51 | % See also: 52 | % kpca, kernel_matrix, RBF_kernel 53 | 54 | 55 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 56 | 57 | if ~exist('fminunc'), 58 | error('This function needs the optimization function ''fminunc''.'); 59 | end 60 | 61 | 62 | if isstr(A1), 63 | kernel = A1; 64 | par = A2; 65 | eval('if isempty(Xt), Xt = A3; end','Xt = Xo;'); 66 | eval('etype = A4;','etype = ''svd'';'); 67 | eval('nb = A5;','nb = ''inf'';'); 68 | [~,U] = kpca(Xo,kernel,par); 69 | [lam,U] = kpca(Xo,kernel,par,[], etype, nb); 70 | else 71 | U = A1; 72 | lam = A2; 73 | kernel = A3; 74 | par = A4; 75 | eval('Xt = A5;','Xt = Xo;'); 76 | end 77 | 78 | warning off 79 | [nb,d] = size(Xt); 80 | for n=1:nb, 81 | x = Xt(n,:); 82 | %dist_phi(x,x,kernel,par,U,lam,Xo) 83 | Xd(n,:) = fminunc(@dist_phi,x,[],x,kernel,par,U,lam,Xo); 84 | end 85 | warning on 86 | 87 | 88 | function d = dist_phi(x,xor,kernel,par,U,lam,Xo) 89 | % the distance in feature space between x and the subspace spanned 90 | % by U,lam 91 | 92 | k = kernel_matrix(Xo,kernel,par,[x;xor]); 93 | betas = k(:,1)'*U; 94 | alphas = k(:,2)'*U; 95 | d = feval(kernel,x,x,par)-2*betas*alphas'; -------------------------------------------------------------------------------- /eign.m: -------------------------------------------------------------------------------- 1 | function [V,D,Ann] = eign(A1, A2,A3, args) 2 | % Find the principal eigenvalues and eigenvectors of a matrix with Nystr�m's low rank approximation method 3 | % 4 | % >> D = eign(A, nb) 5 | % >> [V, D] = eign(A, nb) 6 | % 7 | % In the case of using this method for low rank approximation and 8 | % decomposing the kernel matrix, one can call the function without 9 | % explicit construction of the matrix A. 10 | % 11 | % >> D = eign(X, kernel, kernel_par, nb) 12 | % >> [V, D] = eign(X, kernel, kernel_par, nb) 13 | % 14 | % 15 | % Full syntax 16 | % (We denote the size of positive definite matrix A with a*a.) 17 | % 18 | % 1. Given the full matrix: 19 | % 20 | % >> D = eign(A,nb) 21 | % >> [V,D] = eign(A,nb) 22 | % 23 | % 24 | % Outputs 25 | % V(*) : a x nb matrix with estimated principal eigenvectors of A 26 | % D : nb x 1 vector with principal estimated eigenvalues of A 27 | % Inputs 28 | % A : a*a positive definite symmetric matrix 29 | % nb(*) : Number of approximated principal eigenvalues/eigenvectors 30 | % 31 | % 32 | % 2. Given the function to calculate the matrix elements: 33 | % 34 | % >> D = eign(X, kernel, kernel_par, nb) 35 | % >> [V,D] = eign(X, kernel, kernel_par, nb) 36 | % 37 | % Outputs 38 | % V(*) : a x nb matrix with estimated principal eigenvectors of A 39 | % D : nb x 1 vector with estimated principal eigenvalues of A 40 | % Inputs 41 | % X : N x d matrix with the training data 42 | % kernel : Kernel type (e.g. 'RBF_kernel') 43 | % kernel_par : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 44 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 45 | % 46 | % See also: 47 | % eig, eigs, kpca, bay_lssvm 48 | 49 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 50 | 51 | % AFUN? 52 | if nargin~=2 53 | X = A1; 54 | kernel = A2; 55 | kernel_par = A3; 56 | N = size(X,1); 57 | 58 | %eval(['if args<1, error(''strict positive number of eigenvalues required;'');else n = args; end;'],... 59 | %'n=min(6,ceil(N*.75));'); 60 | if args<1 61 | error('Strict positive number of eigenvalues required'); 62 | else 63 | n = args; 64 | end; 65 | 66 | % random sampling 67 | s = randperm(N); sr=s(n+1:end); s=s(1:n); 68 | %s = ceil(1:(N-1)/(n-1):N); s = s(1:n); 69 | 70 | ANn = zeros(n,N); 71 | ANn = kernel_matrix(X,kernel,kernel_par,X(s,:)); 72 | Ann = ANn(s,:); 73 | 74 | % centering of matrix 75 | Zc = eye(n) - 1/n; 76 | %ZC = eye(N) - 1/N; 77 | Ann = Zc*Ann*Zc; 78 | %ANn = ZC*ANn*Zc; 79 | ANn = (Zc*(ANn*Zc)')'; 80 | else 81 | A = A1; 82 | N = size(A,1); 83 | 84 | %eval(['if args<1, error(''strict positive number of eigenvalues required;'');else n = args; end;'],... 85 | %'n=min(6,ceil(N*.75));'); 86 | 87 | if A2<1 88 | error('Strict positive number of eigenvalues required'); 89 | else 90 | n = A2; 91 | end; 92 | 93 | % random sampling 94 | s = randperm(N); sr=s(n+1:end); s=s(1:n); 95 | %s = ceil(1:(N-1)/(n-1):N); s = s(1:n); 96 | 97 | 98 | ANn = A(:,s); 99 | Ann = A(s,s); 100 | end 101 | 102 | 103 | 104 | 105 | % 106 | % compute eigenvalues en vectors of low rank approximation 107 | % 108 | [Vn,Dn] = eig(Ann);Dn = diag(Dn); 109 | 110 | 111 | % 112 | % select only relevant eigenvalues and sort 113 | % (only largest eigenvectors are orthogonal) 114 | % 115 | [Dn,peff] = sort(Dn(find(Dn>1000*eps))); 116 | Dn = Dn(end:-1:1); peff = peff(end:-1:1); 117 | Vn = Vn(:,peff); 118 | 119 | % 120 | % Nystrom correction 121 | % 122 | D = (N/n).*Dn; 123 | 124 | 125 | % 126 | % eigenvectoren correctie 127 | % 128 | if nargout>1, 129 | %V = zeros(n,length(peff)); 130 | for i=1:length(peff), 131 | V(:,i) = (sqrt(n/N)/Dn(peff(i)))*ANn*Vn(:,peff(i)); 132 | end 133 | 134 | % 135 | % correction of found eigenvectors: 136 | % orthogonal and unit length 137 | % 138 | 139 | % svd 140 | %[V,D2,ff] = svd(V*diag(D.^.5)); V = V(:,1:length(peff)); 141 | 142 | % gram schmidt 143 | %[V,r] = gramschmidt2(V); 144 | %D = D.*r; 145 | 146 | %D = diag(D.^2); 147 | else 148 | V=D; 149 | end 150 | 151 | -------------------------------------------------------------------------------- /gcrossvalidate.m: -------------------------------------------------------------------------------- 1 | function cost = gcrossvalidate(model,estfct) 2 | 3 | % Estimate the model performance of a model with genralized crossvalidation 4 | % for regression with the LS-SVM 5 | % 6 | % >> cost = gcrossvalidate({Xtrain,Ytrain,type,gam,sig2}) 7 | % >> cost = gcrossvalidate( model) 8 | % 9 | % Instead of dividing the data into $L$ disjoint sets, one takes the 10 | % complete data and the effective degrees of freedom (effective number of parameters) 11 | % into account. The assumption is made that the input data are distributed 12 | % independent and identically over the input space. 13 | % 14 | % >> cost = gcrossvalidate(model) 15 | % 16 | % Some commonly used criteria are: 17 | % 18 | % >> cost = gcrossvalidate(model, 'misclass') 19 | % >> cost = gcrossvalidate(model, 'mse') 20 | % >> cost = gcrossvalidate(model, 'mae') 21 | % 22 | % Full syntax 23 | % 24 | % 1. Using LS-SVMlab with the functional interface: 25 | % 26 | % >> cost = gcrossvalidate({X,Y,type,gam,sig2,kernel,preprocess}, estfct) 27 | % 28 | % Outputs 29 | % cost : Cost estimation of the generalized cross-validation 30 | % 31 | % Inputs 32 | % X : Training input data used for defining the LS-SVM and the preprocessing 33 | % Y : Training output data used for defining the LS-SVM and the preprocessing 34 | % type : 'function estimation' ('f') or 'classifier' ('c') 35 | % gam : Regularization parameter 36 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 37 | % kernel(*) : Kernel type (by default 'RBF_kernel') 38 | % preprocess(*) : 'preprocess'(*) or 'original' 39 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 40 | % 41 | % 2. Using the object oriented interface: 42 | % 43 | % >> cost = gcrossvalidate(model, estfct) 44 | % 45 | % Outputs 46 | % cost : Cost estimation of the generalized cross-validation 47 | % 48 | % Inputs 49 | % model : Object oriented representation of the LS-SVM model 50 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 51 | % 52 | % 53 | % 54 | % See also: 55 | % leaveoneout, crossvalidate, trainlssvm, simlssvm 56 | 57 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 58 | 59 | 60 | % LS-SVMlab 61 | eval('model = initlssvm(model{:});',' '); 62 | eval('estfct;','estfct=''mse'';'); 63 | 64 | % 65 | % initialisation and defaults 66 | % 67 | nb_data = size(model.ytrain,1); 68 | 69 | % Y is raw data, non preprocessed 70 | py = model.ytrain; 71 | [~,Y] = postlssvm(model,[],py); 72 | 73 | % Calculate kernel matrix and trace smoother 74 | K = kernel_matrix(model.xtrain,model.kernel_type,model.kernel_pars); 75 | tr = trace(smoother(model,K)); 76 | 77 | % Solve the linear system 78 | S = ones(model.nb_data,1); 79 | sol = linsolve([0 S';S K+eye(model.nb_data)./model.gam],[0;py],struct('SYM',true)); 80 | 81 | % Simulate 82 | yh = K*sol(2:end) + ones(model.nb_data,1)*sol(1); 83 | [~,yh] = postlssvm(model,[],yh); 84 | 85 | % Generalized cross-validation 86 | if ~(model.type(1)=='c') 87 | cost = feval(estfct,yh-Y); 88 | else 89 | cost = feval(estfct,Y,sign(yh)); 90 | end 91 | cost = cost/((1-tr/size(model.ytrain,1))^2); 92 | 93 | 94 | function S = smoother(model,K) 95 | % Smoother Matrix of the LS-SVM 96 | % f = S*y 97 | Z = pinv(K+eye(model.nb_data)./model.gam); 98 | c = sum(sum(Z)); 99 | J = (ones(model.nb_data)./c); 100 | S = K*(Z-Z*J*Z) + J*Z; 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /gcrossvalidatelssvm.m: -------------------------------------------------------------------------------- 1 | function cost = gcrossvalidatelssvm(model,Y,omega,estfct) 2 | 3 | %%%%%%%%%%%%%%%%%%%%% 4 | % INTERNAL FUNCTION % 5 | %%%%%%%%%%%%%%%%%%%%% 6 | % Estimate the model performance of a model with genralized crossvalidation 7 | % for regression with the LS-SVM 8 | 9 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ 10 | % http://www.esat.kuleuven.be/sista/lssvmlab 11 | 12 | % LS-SVMlab 13 | eval('model = initlssvm(model{:});',' '); 14 | model.status = 'changed'; 15 | d = size(model.xtrain,2); 16 | 17 | eval('estfct;','estfct=''mse'';'); 18 | 19 | gams = model.gamcsa; try sig2s = model.kernel_parscsa; catch, sig2s = [];end 20 | 21 | py = Y; 22 | [~,Y] = postlssvm(model,[],Y); % Y is raw data, non preprocessed 23 | 24 | cost = zeros(length(gams),1); 25 | % check whether there are more than one gamma or sigma 26 | for j =1:numel(gams) 27 | if strcmp(model.kernel_type,'RBF_kernel') || strcmp(model.kernel_type,'RBF4_kernel') 28 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',sig2s(j)); 29 | elseif strcmp(model.kernel_type,'lin_kernel') 30 | model = changelssvm(model,'gam',gams(j)); 31 | else 32 | model = changelssvm(changelssvm(model,'gam',gams),'kernel_pars',[sig2s(1,j);sig2s(2,j)]); 33 | end 34 | 35 | % Calculate Kernel matrix and trace smoother 36 | K = kernel_matrix2(omega,model.kernel_type,model.kernel_pars,d); 37 | tr = trace(smoother(model,K)); 38 | 39 | % Solve the linear system 40 | S = ones(model.nb_data,1); 41 | sol = linsolve([0 S';S K+eye(model.nb_data)./model.gam],[0;py],struct('SYM',true)); 42 | 43 | % Simulate 44 | %ek = sol(2:end)./model.gam; 45 | yh = K*sol(2:end) + ones(model.nb_data,1)*sol(1); 46 | [~,yh] = postlssvm(model,[],yh); 47 | % Generalized cross-validation 48 | if ~(model.type(1)=='c') 49 | cost(j,1) = feval(estfct,yh-Y); 50 | else 51 | cost(j,1) = feval(estfct,Y,sign(yh)); 52 | end 53 | cost(j,1) = cost(j,1)/((1-tr/size(model.ytrain,1))^2); 54 | end 55 | 56 | function S = smoother(model,K) 57 | % Smoother Matrix of the LS-SVM 58 | % 59 | % f = K*alpha+1*b 60 | % f = S*y 61 | Z = pinv(K+eye(model.nb_data)./model.gam); 62 | c = sum(sum(Z)); 63 | J = ones(model.nb_data)./c; 64 | S = K*(Z-Z*J*Z) + J*Z; 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /initlssvm.m: -------------------------------------------------------------------------------- 1 | function model = initlssvm(X,Y,type, gam,sig2, kernel_type, preprocess) 2 | % Initiate the object oriented structure representing the LS-SVM model 3 | % 4 | % model = initlssvm(X,Y, type, gam, sig2) 5 | % model = initlssvm(X,Y, type, gam, sig2, kernel_type) 6 | % 7 | % Full syntax 8 | % 9 | % >> model = initlssvm(X, Y, type, gam, sig2, kernel, preprocess) 10 | % 11 | % Outputs 12 | % model : Object oriented representation of the LS-SVM model 13 | % Inputs 14 | % X : N x d matrix with the inputs of the training data 15 | % Y : N x 1 vector with the outputs of the training data 16 | % type : 'function estimation' ('f') or 'classifier' ('c') 17 | % kernel(*) : Kernel type (by default 'RBF_kernel') 18 | % preprocess(*) : 'preprocess'(*) or 'original' 19 | % 20 | % see also: 21 | % trainlssvm, simlssvm, changelssvm, codelssvm, prelssvm 22 | 23 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 24 | 25 | 26 | 27 | % check enough arguments? 28 | if nargin<5, 29 | error('Not enough arguments to initialize model..'); 30 | elseif ~isnumeric(sig2), 31 | error(['Kernel parameter ''sig2'' needs to be a (array of) reals' ... 32 | ' or the empty matrix..']); 33 | end 34 | 35 | % 36 | % CHECK TYPE 37 | % 38 | if type(1)~='f' 39 | if type(1)~='c' 40 | if type(1)~='t' 41 | if type(1)~='N' 42 | error('type has to be ''function (estimation)'', ''classification'', ''timeserie'' or ''NARX'''); 43 | end 44 | end 45 | end 46 | end 47 | model.type = type; 48 | 49 | % 50 | % check datapoints 51 | % 52 | model.x_dim = size(X,2); 53 | model.y_dim = size(Y,2); 54 | 55 | if and(type(1)~='t',and(size(X,1)~=size(Y,1),size(X,2)~=0)), error('number of datapoints not equal to number of targetpoints...'); end 56 | model.nb_data = size(X,1); 57 | %if size(X,1)> H = kentropy(X, U, lam) 7 | % 8 | % The eigenvalue decomposition can also be computed (or 9 | % approximated) implicitly: 10 | % 11 | % >> H = kentropy(X, kernel, sig2) 12 | % 13 | % 14 | % Full syntax 15 | % 16 | % >> H = kentropy(X, kernel, kernel_par) 17 | % >> H = kentropy(X, kernel, kernel_par, type) 18 | % >> H = kentropy(X, kernel, kernel_par, type, nb) 19 | % 20 | % Outputs 21 | % H : Quadratic Renyi entropy of the kernel matrix 22 | % Inputs 23 | % X : N x d matrix with the training data 24 | % kernel : Kernel type (e.g. 'RBF_kernel') 25 | % kernel_par : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 26 | % type(*) : 'eig'(*), 'eigs', 'eign' 27 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 28 | % 29 | % 30 | % >> H = kentropy(X, U, lam) 31 | % 32 | % Outputs 33 | % H : Quadratic Renyi entropy of the kernel matrix 34 | % Inputs 35 | % X : N x d matrix with the training data 36 | % U : N x nb matrix with principal eigenvectors 37 | % lam : nb x 1 vector with eigenvalues of principal components 38 | % 39 | % See also: 40 | % kernel_matrix, RBF_kernel, demo_fixedsize 41 | 42 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 43 | 44 | n= size(X,1); 45 | 46 | if isstr(A1), % kernel_matrix 47 | 48 | kernel = A1; 49 | kernel_par = A2; 50 | eval('etype = A3;','etype =''eig'';'); 51 | if ~(strcmp(etype, 'eig') |strcmp(etype, 'eigs') |strcmp(etype,'eign')), 52 | error('type has to be ''eig'', ''eigs'' or ''eign''...'); 53 | end 54 | eval('nb = A4;',' '); 55 | 56 | if strcmp(etype,'eign'), 57 | eval('[U,lam] = eign(X,kernel,kernel_par,nb);','[U,lam] = eign(X,kernel,kernel_par);'); 58 | else 59 | omega = kernel_matrix(X, kernel, kernel_par); 60 | eval('[U,lam] = feval(etype,omega,nb);','[U,lam] = feval(etype,omega);'); 61 | if size(lam,1)==size(lam,2), lam = diag(lam); end 62 | %onen = ones(n,1)./n; en = -log(onen'*omega*onen); 63 | end 64 | 65 | 66 | else 67 | U = A1; 68 | lam = A2; 69 | end 70 | en = -log((sum(U,1)/n).^2 * lam); 71 | -------------------------------------------------------------------------------- /kernel_matrix.m: -------------------------------------------------------------------------------- 1 | function omega = kernel_matrix(Xtrain,kernel_type, kernel_pars,Xt) 2 | % Construct the positive (semi-) definite and symmetric kernel matrix 3 | % 4 | % >> Omega = kernel_matrix(X, kernel_fct, sig2) 5 | % 6 | % This matrix should be positive definite if the kernel function 7 | % satisfies the Mercer condition. Construct the kernel values for 8 | % all test data points in the rows of Xt, relative to the points of X. 9 | % 10 | % >> Omega_Xt = kernel_matrix(X, kernel_fct, sig2, Xt) 11 | % 12 | % 13 | % Full syntax 14 | % 15 | % >> Omega = kernel_matrix(X, kernel_fct, sig2) 16 | % >> Omega = kernel_matrix(X, kernel_fct, sig2, Xt) 17 | % 18 | % Outputs 19 | % Omega : N x N (N x Nt) kernel matrix 20 | % Inputs 21 | % X : N x d matrix with the inputs of the training data 22 | % kernel : Kernel type (by default 'RBF_kernel') 23 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 24 | % Xt(*) : Nt x d matrix with the inputs of the test data 25 | % 26 | % See also: 27 | % RBF_kernel, lin_kernel, kpca, trainlssvm, kentropy 28 | 29 | 30 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 31 | 32 | [nb_data,d] = size(Xtrain); 33 | 34 | 35 | if strcmp(kernel_type,'RBF_kernel'), 36 | if nargin<4, 37 | XXh = sum(Xtrain.^2,2)*ones(1,nb_data); 38 | omega = XXh+XXh'-2*(Xtrain*Xtrain'); 39 | omega = exp(-omega./(2*kernel_pars(1))); 40 | else 41 | XXh1 = sum(Xtrain.^2,2)*ones(1,size(Xt,1)); 42 | XXh2 = sum(Xt.^2,2)*ones(1,nb_data); 43 | omega = XXh1+XXh2' - 2*Xtrain*Xt'; 44 | omega = exp(-omega./(2*kernel_pars(1))); 45 | end 46 | 47 | elseif strcmp(kernel_type,'RBF4_kernel'), 48 | if nargin<4, 49 | XXh = sum(Xtrain.^2,2)*ones(1,nb_data); 50 | omega = XXh+XXh'-2*(Xtrain*Xtrain'); 51 | omega = 0.5*(3-omega./kernel_pars).*exp(-omega./(2*kernel_pars(1))); 52 | else 53 | XXh1 = sum(Xtrain.^2,2)*ones(1,size(Xt,1)); 54 | XXh2 = sum(Xt.^2,2)*ones(1,nb_data); 55 | omega = XXh1+XXh2' - 2*Xtrain*Xt'; 56 | omega = 0.5*(3-omega./kernel_pars).*exp(-omega./(2*kernel_pars(1))); 57 | end 58 | 59 | % elseif strcmp(kernel_type,'sinc_kernel'), 60 | % if nargin<4, 61 | % omega = sum(Xtrain,2)*ones(1,size(Xtrain,1)); 62 | % omega = omega - omega'; 63 | % omega = sinc(omega./kernel_pars(1)); 64 | % else 65 | % XXh1 = sum(Xtrain,2)*ones(1,size(Xt,1)); 66 | % XXh2 = sum(Xt,2)*ones(1,nb_data); 67 | % omega = XXh1-XXh2'; 68 | % omega = sinc(omega./kernel_pars(1)); 69 | % end 70 | 71 | elseif strcmp(kernel_type,'lin_kernel') 72 | if nargin<4, 73 | omega = Xtrain*Xtrain'; 74 | else 75 | omega = Xtrain*Xt'; 76 | end 77 | 78 | elseif strcmp(kernel_type,'poly_kernel') 79 | if nargin<4, 80 | omega = (Xtrain*Xtrain'+kernel_pars(1)).^kernel_pars(2); 81 | else 82 | omega = (Xtrain*Xt'+kernel_pars(1)).^kernel_pars(2); 83 | end 84 | 85 | % elseif strcmp(kernel_type,'wav_kernel') 86 | % if nargin<4, 87 | % XXh = sum(Xtrain.^2,2)*ones(1,nb_data); 88 | % omega = XXh+XXh'-2*(Xtrain*Xtrain'); 89 | % 90 | % XXh1 = sum(Xtrain,2)*ones(1,nb_data); 91 | % omega1 = XXh1-XXh1'; 92 | % omega = cos(kernel_pars(3)*omega1./kernel_pars(2)).*exp(-omega./kernel_pars(1)); 93 | % 94 | % else 95 | % XXh1 = sum(Xtrain.^2,2)*ones(1,size(Xt,1)); 96 | % XXh2 = sum(Xt.^2,2)*ones(1,nb_data); 97 | % omega = XXh1+XXh2' - 2*(Xtrain*Xt'); 98 | % 99 | % XXh11 = sum(Xtrain,2)*ones(1,size(Xt,1)); 100 | % XXh22 = sum(Xt,2)*ones(1,nb_data); 101 | % omega1 = XXh11-XXh22'; 102 | % 103 | % omega = cos(kernel_pars(3)*omega1./kernel_pars(2)).*exp(-omega./kernel_pars(1)); 104 | % end 105 | end -------------------------------------------------------------------------------- /kernel_matrix2.m: -------------------------------------------------------------------------------- 1 | function omega = kernel_matrix2(omega,kernel_type, kernel_pars,d) 2 | % INTERNAL USE ONLY 3 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 4 | 5 | if strcmp(kernel_type,'RBF_kernel') 6 | omega = exp(-omega./(2*kernel_pars)); 7 | 8 | elseif strcmp(kernel_type,'RBF4_kernel') 9 | omega = 0.5*(3-omega./kernel_pars).*exp(-omega./(2*kernel_pars)); 10 | 11 | % elseif strcmp(kernel_type,'sinc_kernel') 12 | % omega = sinc(omega./kernel_pars); 13 | 14 | % elseif strcmp(kernel_type,'wav_kernel') 15 | % omega = cos(kernel_pars(3)*omega{2}./kernel_pars(2)).*exp(-omega{1}./kernel_pars(1)); 16 | 17 | elseif strcmp(kernel_type,'lin_kernel') 18 | return 19 | 20 | elseif strcmp(kernel_type,'poly_kernel') 21 | omega = (omega + kernel_pars(1)).^kernel_pars(2); 22 | end 23 | 24 | -------------------------------------------------------------------------------- /kpca.m: -------------------------------------------------------------------------------- 1 | function [eigval, eigvec, scores, omega,recErrors,optOut] = kpca(Xtrain, kernel_type, kernel_pars ,Xt,etype,nb,rescaling) 2 | % Kernel Principal Component Analysis (KPCA) 3 | % 4 | % >> [eigval, eigvec] = kpca(X, kernel_fct, sig2) 5 | % >> [eigval, eigvec, scores] = kpca(X, kernel_fct, sig2, Xt) 6 | % 7 | % Compute the nb largest eigenvalues and the corresponding rescaled 8 | % eigenvectors corresponding with the principal components in the 9 | % feature space of the centered kernel matrix. To calculate the 10 | % eigenvalue decomposition of this N x N matrix, Matlab's 11 | % eig is called by default. The decomposition can also be 12 | % approximated by Matlab ('eigs') or by Nystrom method ('eign') 13 | % using nb components. In some cases one wants to disable 14 | % ('original') the rescaling of the principal components in feature 15 | % space to unit length. 16 | % 17 | % The scores of a test set Xt on the principal components is computed by the call: 18 | % 19 | % >> [eigval, eigvec, scores] = kpca(X, kernel_fct, sig2, Xt) 20 | % 21 | % Full syntax 22 | % 23 | % >> [eigval, eigvec, empty, omega] = kpca(X, kernel_fct, sig2) 24 | % >> [eigval, eigvec, empty, omega] = kpca(X, kernel_fct, sig2, [],etype, nb) 25 | % >> [eigval, eigvec, empty, omega] = kpca(X, kernel_fct, sig2, [],etype, nb, rescaling) 26 | % >> [eigval, eigvec, scores, omega] = kpca(X, kernel_fct, sig2, Xt) 27 | % >> [eigval, eigvec, scores, omega] = kpca(X, kernel_fct, sig2, Xt,etype, nb) 28 | % >> [eigval, eigvec, scores, omega] = kpca(X, kernel_fct, sig2, Xt,etype, nb, rescaling) 29 | % >> [eigval, eigvec, scores, omega, recErrors] = kpca(X, kernel_fct, sig2, Xt) 30 | % >> [eigval, eigvec, scores, omega, recErrors] = kpca(X, kernel_fct, sig2, Xt,etype, nb) 31 | % >> [eigval, eigvec, scores, omega, recErrors] = kpca(X, kernel_fct, sig2, Xt,etype, nb, rescaling) 32 | % >> [eigval, eigvec, scores, omega, recErrors, optOut] = kpca(X, kernel_fct, sig2, Xt) 33 | % >> [eigval, eigvec, scores, omega, recErrors, optOut] = kpca(X, kernel_fct, sig2, Xt,etype, nb) 34 | % >> [eigval, eigvec, scores, omega, recErrors, optOut] = kpca(X, kernel_fct, sig2, Xt,etype, nb, rescaling) 35 | % 36 | % Outputs 37 | % eigval : N (nb) x 1 vector with eigenvalues values 38 | % eigvec : N x N (N x nb) matrix with the principal directions 39 | % scores(*) : Nt x nb matrix with the scores of the test data (or []) 40 | % omega(*) : N x N centered kernel matrix 41 | % recErrors(*) : Nt x 1 vector with the reconstruction error of the test data 42 | % optOut(*) : Optional cell array containing the centered test kernel matrix in optOut{1} 43 | % and the squared 2-norm of the test points in the feature space in optOut{2} 44 | % Inputs 45 | % X : N x d matrix with the inputs of the training data 46 | % kernel : Kernel type (e.g. 'RBF_kernel') 47 | % sig2 : Kernel parameter(s) (for linear kernel, use []) 48 | % Xt(*) : Nt x d matrix with the inputs of the test data (or []) 49 | % etype(*) : 'svd', 'eig'(*),'eigs','eign' 50 | % nb(*) : Number of eigenvalues/eigenvectors used in the eigenvalue decomposition approximation 51 | % rescaling(*) : 'original size' ('o') or 'rescaled'(*) ('r') 52 | % 53 | % See also: 54 | % bay_lssvm, bay_optimize, eign 55 | 56 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 57 | 58 | 59 | % 60 | % defaults 61 | % 62 | nb_data = size(Xtrain,1); 63 | 64 | 65 | if ~exist('nb','var') 66 | nb=10; 67 | end; 68 | 69 | if ~exist('etype','var') 70 | etype='eig'; 71 | end; 72 | 73 | if ~exist('rescaling','var') 74 | rescaling='r'; 75 | end; 76 | 77 | 78 | 79 | 80 | %eval('n=min(n,nb_data);','n=min(10,nb_data);') 81 | % eval('centr;','centr=''rescaled'';'); 82 | % eval('etype;','etype=''eig'';'); 83 | % eval('Xt;','Xt=[];'); 84 | 85 | 86 | % 87 | % tests 88 | % 89 | if exist('Xt','var') && ~isempty(Xt) && size(Xt,2)~=size(Xtrain,2), 90 | error('Training points and test points need to have the same dimension'); 91 | end 92 | 93 | if ~(strcmpi(etype,'svd') || strcmpi(etype,'eig') || strcmpi(etype,'eigs') || strcmpi(etype,'eign')), 94 | error('Eigenvalue decomposition via ''svd'', ''eig'', ''eigs'' or ''eign''...'); 95 | end 96 | 97 | 98 | if (strcmpi(etype,'svd') || strcmpi(etype,'eig') || strcmpi(etype,'eigs')), 99 | 100 | omega = kernel_matrix(Xtrain,kernel_type, kernel_pars); 101 | 102 | % Centering 103 | Meanvec = mean(omega,2); 104 | MM = mean(Meanvec); 105 | omega=omega-Meanvec*ones(1,nb_data)-ones(nb_data,1)*Meanvec'+MM; 106 | 107 | % numerical stability issues 108 | %omega = (omega+omega')./2; 109 | 110 | if strcmpi(etype,'svd'), 111 | [eigvec, eigval] = svd(omega); 112 | elseif strcmpi(etype,'eig'), 113 | [eigvec, eigval] = eig(omega); 114 | elseif (strcmpi(etype,'eigs')), 115 | [eigvec, eigval] = eigs(omega,nb); 116 | end 117 | %eigval = diag(eigval)./(nb_data-1); 118 | eigval=diag(eigval); 119 | 120 | 121 | 122 | elseif strcmpi(etype,'eign'), 123 | if nargout>1, 124 | [eigvec,eigval] = eign(Xtrain,kernel_type,kernel_pars, nb); 125 | else 126 | eigval = eign(Xtrain,kernel_type,kernel_pars, nb); 127 | end 128 | omega = []; 129 | %eigval = (eigval)./(nb_data-1); 130 | Meanvec = []; 131 | MM = []; 132 | 133 | else 134 | error('Unknown type for eigenvalue approximation'); 135 | end 136 | 137 | 138 | %% Eigenvalue/vector sorting in descending order 139 | 140 | [eigval,evidx]=sort(eigval,'descend'); 141 | eigvec=eigvec(:,evidx); 142 | 143 | 144 | 145 | %% 146 | 147 | 148 | % 149 | % only keep relevant eigvals & eigvec 150 | % 151 | peff = find(eigval>1000*eps); 152 | %eigval = eigval(peff); 153 | neff = length(peff); 154 | %if nargout>1, eigvec = eigvec(:,peff); end 155 | 156 | % rescaling the eigenvectors 157 | if (rescaling(1) =='r' && nargout>1), 158 | %disp('rescaling the eigvec'); 159 | for i=1:neff, 160 | eigvec(:,i) = eigvec(:,i)./sqrt(eigval(i)); 161 | end 162 | end 163 | 164 | 165 | % 166 | % compute scores 167 | % 168 | if exist('Xt','var') && ~isempty(Xt), 169 | nt=size(Xt,1); 170 | omega_t = kernel_matrix(Xtrain,kernel_type, kernel_pars,Xt); 171 | MeanvecT=mean(omega_t,1); 172 | omega_t=omega_t-Meanvec*ones(1,nt) - ones(nb_data,1)*MeanvecT+MM; 173 | scores = omega_t'*eigvec; 174 | 175 | normProjXt=diag(omega_t'*(eigvec*eigvec')*omega_t); 176 | 177 | 178 | if strcmp(kernel_type,'RBF_kernel') 179 | ks = ones(1,nt); 180 | else 181 | 182 | for i = 1:nt 183 | ks(i) = feval(kernel_type,Xt(i,:),Xt(i,:),kernel_pars); 184 | end; 185 | 186 | end; 187 | 188 | normPhiXt=ks'-MeanvecT'*2+MM; 189 | 190 | recErrors= normPhiXt-normProjXt; 191 | 192 | optOut={omega_t,normPhiXt}; 193 | else 194 | scores = []; 195 | end 196 | 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /latentlssvm.m: -------------------------------------------------------------------------------- 1 | function [zt,model] = latentlssvm(varargin) 2 | % Calculate the latent variables of the LS-SVM classifier at the given test data 3 | % 4 | % >> Zt = latentlssvm({X,Y,'classifier',gam,sig2,kernel}, {alpha,b}, Xt) 5 | % >> Zt = latentlssvm({X,Y,'classifier',gam,sig2,kernel}, Xt) 6 | % >> [Zt, model] = latentlssvm(model, Xt) 7 | % 8 | % The latent variables of a binary classifier are the continuous 9 | % simulated values of the test data which are used to make the 10 | % final classifications. The classification of a testpoint depends 11 | % on whether the latent value exceeds the model's threshold (b). If 12 | % appropriate, the model is trained by the standard procedure (trainlssvm) first. 13 | % 14 | % As an application example: crossvalidation can be based on the latent variables: 15 | % 16 | % >> cost = crossvalidate(model, X, Y, 10, 'mse', 'mean', 'original', 'trainlssvm', 'latentlssvm') 17 | % 18 | % 19 | % Full syntax 20 | % 21 | % 1. Using the functional interface: 22 | % 23 | % >> Zt = latentlssvm({X,Y,type,gam,sig2,kernel,preprocess}, Xt) 24 | % 25 | % Outputs 26 | % Zt : Nt x m matrix with predicted latent simulated outputs 27 | % Inputs 28 | % X : N x d matrix with the inputs of the training data 29 | % Y : N x 1 vector with the outputs of the training data 30 | % type : 'classifier' ('c') 31 | % gam : Regularization parameter 32 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 33 | % kernel(*) : Kernel type (by default 'RBF_kernel') 34 | % preprocess(*) : 'preprocess'(*) or 'original' 35 | % Xt : Nt x d matrix with the inputs of the test data 36 | % 37 | % 38 | % 2. Using the object oriented interface: 39 | % 40 | % >> [Zt, model] = latentlssvm(model, Xt) 41 | % 42 | % Outputs 43 | % Zt : Nt x m matrix with continuous latent simulated outputs 44 | % model(*) : Trained object oriented representation of the LS-SVM model 45 | % Inputs 46 | % model : Object oriented representation of the LS-SVM model 47 | % Xt : Nt x d matrix with the inputs of the test data 48 | % 49 | % See also: 50 | % trainlssvm, simlssvm 51 | 52 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 53 | 54 | model = varargin{1}; 55 | if iscell(model), 56 | model = initlssvm(model{:}); 57 | end 58 | 59 | if model.type(1)~='c', 60 | error('Only usefull for classification tasks...'); 61 | end 62 | [~, zt, model] = simlssvm(varargin{:}); -------------------------------------------------------------------------------- /latticeseq_b2.m: -------------------------------------------------------------------------------- 1 | function x = latticeseq_b2(s, n, zvec) 2 | % function x = latticeseq_b2(s, n) 3 | % 4 | % Generate the points from a lattice sequence in base 2 in radical inverse 5 | % ordering. 6 | % 7 | % Note: the generator fist has to be initialized, e.g., by 8 | % latticeseq_b2 init0 9 | % Afterwards one can obtain n s-dimensional samples in an [s x n] array as 10 | % x = latticeseq_b2(s, n); 11 | % 12 | % The default lattice sequence in this file is the ``universal'' lattice 13 | % sequence optimized for an unanchored Sobolev space with order-2 weights from 14 | % the paper by Cools, Kuo and Nuyens in SIAM SISC (2006). 15 | % This sequence is ``universal'' in the sense that the weights do not really 16 | % matter, as such, there are no good or bad weights... 17 | % For this lattice sequence the maximum number of dimensions is 250 and the 18 | % maximum number of points is 2^20 = 1048576. The worst-case error was 19 | % optimized starting from 2^10 = 1024 points (this means it may perform badly 20 | % for less than 1024 samples). 21 | % 22 | % Inputs: 23 | % s the number of dimensions per vector 24 | % n the number of samples you want 25 | % Output: 26 | % x an array of sample vectors, size [s x n] 27 | % 28 | % Usage: 29 | % 30 | % 1. Initialize the generator with the default lattice sequence 31 | % 32 | % latticeseq_b2 init0 33 | % 34 | % or initialize the generator with a user supplied generating vector for n 35 | % points 36 | % 37 | % latticeseq_b2('init0', n, zvec) 38 | % 39 | % Valid options for intialization are: 40 | % init0 the lattice sequence as is, the first point is the zero vector 41 | % init1 the first point is changed into an all ones vector 42 | % initskip skip the first point 43 | % 44 | % 2. Generate the next n s-vectors of the sequence, returning an array of 45 | % dimensions s x n: 46 | % 47 | % P = latticeseq_b2(s, n) 48 | % 49 | % 3. Ask the state of the current generator with 50 | % 51 | % latticeseq_b2 state 52 | % 53 | % Example: 54 | % latticeseq_b2('init0'); % Initialize the generator. 55 | % x1 = latticeseq_b2(10, 1024); % This gives an array of 1024 10-dimensional samples. 56 | % x2 = latticeseq_b2(10, 1024); % This will give you the next 1024 10-dimensional samples. 57 | % latticeseq_b2('init0'); % This resets the state of the generator. 58 | % x3 = latticeseq_b2(10, 1024); % This will give the same samples as in x1. 59 | % 60 | % (C) 2005, 2010, Dirk Nuyens, Department of Computer Science, K.U.Leuven, Belgium 61 | 62 | persistent z smax k N b m initmode recipd maxbit 63 | 64 | if ischar(s) && strncmp(s, 'init', 4) 65 | k = uint32(0); 66 | maxbit = 32; 67 | recipd = pow2(-maxbit); 68 | if strcmp(s, 'init0') 69 | k = 0; 70 | initmode = 0; 71 | elseif strcmp(s, 'init1') 72 | k = 0; 73 | initmode = 1; 74 | elseif strcmp(s, 'initskip') 75 | initmode = -1; 76 | k = 1; 77 | else 78 | error('I only know about ''init0'', ''init1'' and ''initskip'', what are you talking about?'); 79 | end; 80 | if (nargin == 3) 81 | % z is given by the user 82 | z = zvec; 83 | N = n; 84 | else 85 | z = [ 1 182667 469891 498753 110745 446247 250185 118627 245333 283199 ... 86 | 408519 391023 246327 126539 399185 461527 300343 69681 516695 436179 ... 87 | 106383 238523 413283 70841 47719 300129 113029 123925 410745 211325 ... 88 | 17489 511893 40767 186077 519471 255369 101819 243573 66189 152143 ... 89 | 503455 113217 132603 463967 297717 157383 224015 502917 36237 94049 ... 90 | 170665 79397 123963 223451 323871 303633 98567 318855 494245 477137 ... 91 | 177975 64483 26695 88779 94497 239429 381007 110205 339157 73397 ... 92 | 407559 181791 442675 301397 32569 147737 189949 138655 350241 63371 ... 93 | 511925 515861 434045 383435 249187 492723 479195 84589 99703 239831 ... 94 | 269423 182241 61063 130789 143095 471209 139019 172565 487045 304803 ... 95 | 45669 380427 19547 425593 337729 237863 428453 291699 238587 110653 ... 96 | 196113 465711 141583 224183 266671 169063 317617 68143 291637 263355 ... 97 | 427191 200211 365773 254701 368663 248047 209221 279201 323179 80217 ... 98 | 122791 316633 118515 14253 129509 410941 402601 511437 10469 366469 ... 99 | 463959 442841 54641 44167 19703 209585 69037 33317 433373 55879 ... 100 | 245295 10905 468881 128617 417919 45067 442243 359529 51109 290275 ... 101 | 168691 212061 217775 405485 313395 256763 152537 326437 332981 406755 ... 102 | 423147 412621 362019 279679 169189 107405 251851 5413 316095 247945 ... 103 | 422489 2555 282267 121027 369319 204587 445191 337315 322505 388411 ... 104 | 102961 506099 399801 254381 452545 309001 147013 507865 32283 320511 ... 105 | 264647 417965 227069 341461 466581 386241 494585 201479 151243 481337 ... 106 | 68195 75401 58359 448107 459499 9873 365117 350845 181873 7917 436695 ... 107 | 43899 348367 423927 437399 385089 21693 268793 49257 250211 125071 ... 108 | 341631 310163 94631 108795 21175 142847 383599 71105 65989 446433 ... 109 | 177457 107311 295679 442763 40729 322721 420175 430359 480757 ]'; 110 | N = pow2(20); 111 | end; 112 | smax = length(z); 113 | b = 2; 114 | m = ceil(log(N)/log(b)); 115 | return; 116 | elseif ischar(s) && strcmp(s, 'state') 117 | x.z = z; 118 | x.smax = smax; 119 | x.k = k; 120 | x.N = N; 121 | x.b = b; 122 | x.m = m; 123 | x.initmode = initmode; 124 | return; 125 | end; 126 | 127 | if ((k + n) > N) || (s > smax) 128 | error(sprintf('Can only generate %d lattice points in %d dimensions', N, smax)); 129 | end; 130 | 131 | x = zeros(s, n); 132 | 133 | if (k == 0) && (initmode == 0) 134 | x(:, 1) = 0; si = 2; k = k + 1; 135 | elseif (k == 0) && (initmode == 1) 136 | x(:, 1) = 1; si = 2; k = k + 1; 137 | else 138 | si = 1; 139 | end; 140 | 141 | for i=si:n 142 | rk = bitreverse32(k); 143 | x(:, i) = mod(double(rk) * recipd * z(1:s), 1); 144 | k = k + 1; 145 | end; -------------------------------------------------------------------------------- /leaveoneout.m: -------------------------------------------------------------------------------- 1 | function cost = leaveoneout(model, estfct,combinefct) 2 | 3 | % Estimate the performance of a trained model with leave-one-out crossvalidation 4 | % 5 | % CAUTION!! Use this function only to obtain the value of the leave-one-out score 6 | % function given the tuning parameters. Do not use this function together with 7 | % 'tunelssvm', but use 'leaveoneoutlssvm' instead. The latter is a faster 8 | % implementation which uses previously computed results. 9 | % 10 | % >> leaveoneout({X,Y,type,gam,sig2}) 11 | % >> leaveoneout(model) 12 | % 13 | % In each iteration, one leaves one point, and fits a model on the 14 | % other data points. The performance of the model is estimated 15 | % based on the point left out. This procedure is repeated for each 16 | % data point. Finally, all the different estimates of the 17 | % performance are combined (default by computing the mean). The 18 | % assumption is made that the input data is distributed independent 19 | % and identically over the input space. 20 | % 21 | % 22 | % Full syntax 23 | % 24 | % 1. Using the functional interface for the LS-SVMs: 25 | % 26 | % >> cost = leaveoneout({X,Y,type,gam,sig2,kernel,preprocess}) 27 | % >> cost = leaveoneout({X,Y,type,gam,sig2,kernel,preprocess}, estfct) 28 | % >> cost = leaveoneout({X,Y,type,gam,sig2,kernel,preprocess}, estfct, combinefct) 29 | % 30 | % Outputs 31 | % cost : Cost estimated by leave-one-out crossvalidation 32 | % 33 | % Inputs 34 | % X : Training input data used for defining the LS-SVM and the preprocessing 35 | % Y : Training output data used for defining the LS-SVM and the preprocessing 36 | % type : 'function estimation' ('f') or 'classifier' ('c') 37 | % gam : Regularization parameter 38 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 39 | % kernel(*) : Kernel type (by default 'RBF_kernel') 40 | % preprocess(*) : 'preprocess'(*) or 'original' 41 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 42 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 43 | % 44 | % 45 | % 2. Using the object oriented interface for the LS-SVMs: 46 | % 47 | % >> cost = leaveoneout(model) 48 | % >> cost = leaveoneout(model, estfct) 49 | % >> cost = leaveoneout(model, estfct, combinefct) 50 | % 51 | % Outputs 52 | % cost : Cost estimated by leave-one-out crossvalidation 53 | % 54 | % Inputs 55 | % model : Object oriented representation of the model 56 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 57 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 58 | % 59 | % 60 | % 61 | % See also: 62 | % crossvalidate, trainlssvm, simlssvm 63 | 64 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 65 | 66 | % LS-SVMlab 67 | eval('model = initlssvm(model{:});',' '); 68 | eval('estfct;','estfct=''mse'';'); 69 | eval('combinefct;','combinefct=''mean'';'); 70 | 71 | % 72 | %initialize: no incremental memory allocation 73 | % 74 | p = randperm(model.nb_data); 75 | px = model.xtrain(p,:); 76 | py = model.ytrain(p,:); 77 | [~,Y] = postlssvm(model,[],py); % Y is raw data, non preprocessed 78 | 79 | % kernel matrix computation 80 | K = kernel_matrix(px,model.kernel_type,model.kernel_pars); 81 | 82 | Ka = pinv([K+eye(model.nb_data)./model.gam ones(model.nb_data,1);ones(1,model.nb_data) 0]); 83 | sol = Ka*[py;0]; model.alpha = sol(1:end-1); model.b = sol(end); 84 | yh = py - model.alpha./diag(Ka(1:model.nb_data,1:model.nb_data)); 85 | 86 | [~,yh] = postlssvm(model,[],yh); 87 | if ~(model.type(1)=='c') 88 | cost = feval(estfct,yh-Y); 89 | else 90 | cost = feval(estfct,Y,sign(yh)); 91 | end 92 | 93 | 94 | -------------------------------------------------------------------------------- /leaveoneoutlssvm.m: -------------------------------------------------------------------------------- 1 | function cost = leaveoneoutlssvm(model,Y,omega, estfct) 2 | % Fast leave-one-out cross-validation for the LS-SVM based on one full matrix inversion 3 | % 4 | %%%%%%%%%%%%%%%%%%%%% 5 | % INTERNAL FUNCTION % 6 | %%%%%%%%%%%%%%%%%%%%% 7 | % Estimate the model performance of a model with fast LOO crossvalidation. 8 | % This implementation is based on one full matrix inverse. Implementation 9 | % based on "Z. Ying and K.C. Keong: Fast Leave-One-Out Evaluation and 10 | % Improvement on Inference for LS-SVM's, Proc. ICPR, 2004" 11 | 12 | % Copyright (c) 2010, KULeuven-ESAT-SCD, License & help @% http://www.esat.kuleuven.ac.be/sista/lssvmlab 13 | 14 | % 15 | % See also: 16 | % leaveoneout, crossvalidate, trainlssvm 17 | 18 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 19 | 20 | % LS-SVMlab 21 | eval('model = initlssvm(model{:});',' '); 22 | model.status = 'changed'; 23 | d = size(model.xtrain,2); 24 | 25 | eval('estfct;','estfct=''mse'';'); 26 | eval('combinefct;','combinefct=''mean'';'); 27 | 28 | gams = model.gamcsa; try sig2s = model.kernel_parscsa; catch, sig2s = [];end 29 | 30 | % 31 | %initialize: no incremental memory allocation 32 | % 33 | cost = zeros(1,length(gams)); 34 | py = Y; 35 | [~,Y] = postlssvm(model,[],Y); % Y is raw data, non preprocessed 36 | 37 | % check whether there are more than one gamma or sigma 38 | for g =1:numel(gams) 39 | if strcmp(model.kernel_type,'RBF_kernel') || strcmp(model.kernel_type,'RBF4_kernel') 40 | model = changelssvm(changelssvm(model,'gam',gams(g)),'kernel_pars',sig2s(g)); 41 | elseif strcmp(model.kernel_type,'lin_kernel') 42 | model = changelssvm(model,'gam',gams(g)); 43 | elseif strcmp(model.kernel_type,'poly_kernel') 44 | model = changelssvm(changelssvm(model,'gam',gams(g)),'kernel_pars',[sig2s(1,g);sig2s(2,g)]); 45 | else 46 | model = changelssvm(changelssvm(model,'gam',gams(g)),'kernel_pars',[sig2s(1,g);sig2s(2,g);sig2s(3,g)]); 47 | end 48 | 49 | % kernel matrix computation 50 | K = kernel_matrix2(omega,model.kernel_type,model.kernel_pars,d); 51 | 52 | Ka = pinv([K+eye(model.nb_data)./model.gam ones(model.nb_data,1);ones(1,model.nb_data) 0]); 53 | sol = Ka*[py;0]; model.alpha = sol(1:end-1); model.b = sol(end); 54 | yh = py - model.alpha./diag(Ka(1:model.nb_data,1:model.nb_data)); 55 | 56 | [~,yh] = postlssvm(model,[],yh); 57 | if ~(model.type(1)=='c') 58 | cost(g) = feval(estfct,yh-Y); 59 | else 60 | cost(g) = feval(estfct,Y,sign(yh)); 61 | end 62 | 63 | end 64 | -------------------------------------------------------------------------------- /lin_kernel.m: -------------------------------------------------------------------------------- 1 | function x = lin_kernel(a,b) 2 | % kernel function for implicit higher dimension mapping, based on 3 | % the standard inner-product 4 | % 5 | % x = lin_kernel(a,b) 6 | % 7 | % 'a' can only contain one datapoint in a row, 'b' can contain N 8 | % datapoints of the same dimension as 'a'. 9 | % 10 | % see also: 11 | % poly_kernel, RBF_kernel, MLP_kernel, trainlssvm, simlssvm 12 | 13 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 14 | 15 | 16 | x = zeros(size(b,1),1); 17 | for i=1:size(b,1), 18 | x(i,1) = a*b(i,:)'; 19 | end -------------------------------------------------------------------------------- /linesearch.m: -------------------------------------------------------------------------------- 1 | function [Xm,Xval,itr,fig] = linesearch(fun,startvalues,funargs,varargin) 2 | % Global optimization by exhaustive search over the parameter space 3 | % 4 | % >> Xopt = gridsearch(fun, startvalues) 5 | % 6 | % The most simple algorithm to determine the minimum of a cost 7 | % function with possibly multiple optima is to evaluate a grid over 8 | % the parameter space and to pick the minimum. This procedure 9 | % iteratively zooms to the candidate optimum. 10 | % The startvalues determine the limits of the grid over parameter 11 | % space. 12 | % 13 | % 14 | % Full syntax 15 | % 16 | % >> [Xopt, Yopt, Evaluations, fig] = linesearch(fun, startvalues, funargs, option1,value1,...) 17 | % 18 | % Outputs 19 | % Xopt : Optimal parameter set 20 | % Yopt : Criterion evaluated at Xopt 21 | % Evaluations : Used number of function evaluations 22 | % fig : handle to used figure 23 | % Inputs 24 | % fun Function : implementing the cost criterion 25 | % startvalues : 2*d matrix with starting values of the optimization routine 26 | % funargs(*) : Cell with optional extra function arguments of fun 27 | % option (*) : The name of the option one wants to change 28 | % value (*) : The new value of the option one wants to change 29 | % 30 | % The different options and their meanings are: 31 | % 32 | % Nofigure : 'figure'(*) or 'nofigure' 33 | % MaxFunEvals : Maximum number of function evaluations (default: 20) 34 | % GridReduction : grid reduction parameter (e.g. '1.5': 35 | % small reduction; `10': heavy reduction; default '2') 36 | % TolFun : Minimal toleration of improvement on function value (default: 0.01) 37 | % TolX : Minimal toleration of improvement on X value (default: 0.01) 38 | % Grain : number of evaluations per iteration (default: 10) 39 | % 40 | % see also: 41 | % gridsearch, tunelssvm, crossvalidate, fminunc 42 | 43 | 44 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 45 | 46 | 47 | dim = prod(size(startvalues)); 48 | if dim~=2, 49 | error('optimization only possible for 1-dimensional problems...'); 50 | end 51 | 52 | 53 | % 54 | % defaults 55 | % 56 | nofigure='figure'; 57 | eval('funargs;','funargs={};'); 58 | maxFunEvals = 20; 59 | TolFun = .01; 60 | TolX = .01; 61 | grain = 10; 62 | zoomfactor=2; 63 | 64 | 65 | % 66 | % extra input arguments 67 | % 68 | for t=1:2:length(varargin), 69 | if t+1>length(varargin), 70 | warning('extra arguments should occur in pairs (''name'',value)'); 71 | else 72 | if strcmpi(varargin{t},'nofigure'), nofigure=varargin{t+1}; 73 | elseif strcmpi(varargin{t},'maxFunEvals'), maxFunEvals=varargin{t+1} 74 | elseif strcmpi(varargin{t},'zoomfactor'), zoomfactor=varargin{t+1} 75 | elseif strcmpi(varargin{t},'TolFun'), TolFun=varargin{t+1} 76 | elseif strcmpi(varargin{t},'TolX'), TolX=varargin{t+1} 77 | elseif strcmpi(varargin{t},'grain'), grain=varargin{t+1}; 78 | else warning(['option ' varargin{t} ' unknown']); 79 | end 80 | end 81 | end 82 | 83 | itr =maxFunEvals; 84 | 85 | 86 | % 87 | % initiate grid 88 | % 89 | graina = -.5:1/(grain-1):.5; 90 | grid = [min(startvalues) max(startvalues)]; 91 | center = mean(grid); 92 | 93 | if nofigure(1) =='f',fig = figure; hold on; end 94 | 95 | 96 | itr = 0; 97 | Xm_old = inf; 98 | Xval_old = inf; 99 | Xm = -inf; 100 | Xval = -inf; 101 | while itrTolX & norm(Xval-Xval_old)>TolFun, 102 | Xm_old = Xm; 103 | Xval_old = Xval; 104 | 105 | xtrma = [min(startvalues) max(startvalues)]; 106 | xline = xtrma(1):(xtrma(2)-xtrma(1))/(grain-1):xtrma(2); 107 | for i = 1:length(xline), 108 | cost(i) = feval(fun, xline(i), funargs{:}); 109 | itr = itr+1; 110 | if nofigure(1) =='f', 111 | plot(xline(i),cost(i),'dk');drawnow 112 | end 113 | end 114 | 115 | [sc, si] = sort(cost); 116 | Xm = xline(si(1)); 117 | Xval = sc(1); 118 | selected = si(1:ceil(length(si)/zoomfactor)); 119 | startvalues = [min(xline(selected)) max(xline(selected))]; 120 | 121 | end 122 | 123 | 124 | 125 | 126 | -------------------------------------------------------------------------------- /linf.m: -------------------------------------------------------------------------------- 1 | function [perf,which] = linf(e) 2 | % L infinity norm of the residuals 3 | % 4 | % >> perf = linf(E); 5 | % 6 | % see also: 7 | % mse, mae, medae, trimmedmse 8 | 9 | 10 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 11 | 12 | [m1,w1] = max(abs(e)); 13 | [perf,w2] = max(m1); 14 | which = [w1(w2) w2]; -------------------------------------------------------------------------------- /lssvm.m: -------------------------------------------------------------------------------- 1 | function [yp,alpha,b,gam,sig2,model] = lssvm(x,y,type,varargin) 2 | % 3 | % one line LS-SVM calculation 4 | % 5 | % >> [yp,alpha,b,gam,sig2,model] = lssvm(x,y,type,varargin) 6 | % 7 | % x is the input data, N x d, (can be uni- or multivariate) and y, N x 1, is the response 8 | % variable 9 | % 10 | % syntax: 11 | % RBF-kernel is used with standard simplex method 12 | % yp = lssvm(x,y,'f') 13 | % 14 | % lin/poly/RBF is used with standard simplex 15 | % yp = lssvm(x,y,'f',kernel) 16 | % 17 | % output: 18 | % yp : N x 1 vector of predicted outputs 19 | % alpha : N x 1 vector of lagrange multipliers of the LS-SVM 20 | % b : LS-SVM bias term 21 | % gam : tuned regularization constant 22 | % sig2 : squared tuned kernel bandwidth 23 | % model : object oriented interface of the LS-SVM 24 | % 25 | % See also: 26 | % trainlssvm, simlssvm, crossvalidate, leaveoneout, plotlssvm 27 | 28 | 29 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 30 | 31 | if isempty(varargin) 32 | kernel = 'RBF_kernel'; 33 | else 34 | kernel = varargin{1}; 35 | end 36 | 37 | if type(1)=='f' 38 | perffun = 'mse'; 39 | elseif type(1)=='c' 40 | perffun = 'misclass'; 41 | else 42 | error('Type not supported. Choose ''f'' or ''c''') 43 | end 44 | 45 | n = size(x,1); 46 | if n <= 300 47 | optfun = 'leaveoneoutlssvm'; 48 | optargs = {perffun}; 49 | else 50 | optfun = 'crossvalidatelssvm'; 51 | optargs = {10,perffun}; 52 | end 53 | 54 | model = initlssvm(x,y,type,[],[],kernel); 55 | model = tunelssvm(model,'simplex',optfun,optargs); 56 | model = trainlssvm(model); 57 | 58 | if size(x,2) <= 2 59 | plotlssvm(model); 60 | end 61 | 62 | % first output 63 | yp = simlssvm(model,x); 64 | 65 | % second output 66 | alpha = model.alpha; 67 | 68 | % third output 69 | b = model.b; 70 | 71 | % fourth and fifth output 72 | gam = model.gam; sig2 = model.kernel_pars; 73 | 74 | 75 | -------------------------------------------------------------------------------- /lssvmMATLAB.m: -------------------------------------------------------------------------------- 1 | function [model,H] = lssvmMATLAB(model) 2 | % Only for intern LS-SVMlab use; 3 | % 4 | % MATLAB implementation of the LS-SVM algorithm. This is slower 5 | % than the C-mex implementation, but it is more reliable and flexible; 6 | % 7 | % 8 | % This implementation is quite straightforward, based on MATLAB's 9 | % backslash matrix division (or PCG if available) and total kernel 10 | % matrix construction. It has some extensions towards advanced 11 | % techniques, especially applicable on small datasets (weighed 12 | % LS-SVM, gamma-per-datapoint) 13 | 14 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 15 | 16 | 17 | %fprintf('~'); 18 | % 19 | % is it weighted LS-SVM ? 20 | % 21 | weighted = (length(model.gam)>model.y_dim); 22 | if and(weighted,length(model.gam)~=model.nb_data), 23 | warning('not enough gamma''s for Weighted LS-SVMs, simple LS-SVM applied'); 24 | weighted=0; 25 | end 26 | 27 | % computation omega and H 28 | omega = kernel_matrix(model.xtrain(model.selector, 1:model.x_dim), ... 29 | model.kernel_type, model.kernel_pars); 30 | 31 | 32 | % initiate alpha and b 33 | model.b = zeros(1,model.y_dim); 34 | model.alpha = zeros(model.nb_data,model.y_dim); 35 | 36 | for i=1:model.y_dim, 37 | H = omega; 38 | model.selector=~isnan(model.ytrain(:,i)); 39 | nb_data=sum(model.selector); 40 | if size(model.gam,2)==model.nb_data, 41 | try invgam = model.gam(i,:).^-1; catch, invgam = model.gam(1,:).^-1;end 42 | for t=1:model.nb_data, H(t,t) = H(t,t)+invgam(t); end 43 | else 44 | try invgam = model.gam(i,1).^-1; catch, invgam = model.gam(1,1).^-1;end 45 | for t=1:model.nb_data, H(t,t) = H(t,t)+invgam; end 46 | end 47 | 48 | v = H(model.selector,model.selector)\model.ytrain(model.selector,i); 49 | %eval('v = pcg(H,model.ytrain(model.selector,i), 100*eps,model.nb_data);','v = H\model.ytrain(model.selector, i);'); 50 | nu = H(model.selector,model.selector)\ones(nb_data,1); 51 | %eval('nu = pcg(H,ones(model.nb_data,i), 100*eps,model.nb_data);','nu = H\ones(model.nb_data,i);'); 52 | s = ones(1,nb_data)*nu(:,1); 53 | model.b(i) = (nu(:,1)'*model.ytrain(model.selector,i))./s; 54 | model.alpha(model.selector,i) = v(:,1)-(nu(:,1)*model.b(i)); 55 | end 56 | return 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /lssvm_fitness.m: -------------------------------------------------------------------------------- 1 | function error = lssvm_fitness(x, p_train, t_train, p_test, t_test) 2 | M=size(p_train,2); 3 | gam = x(1); 4 | sig2 = x(2); 5 | 6 | %% 仿真预测 7 | type = 'function estimation'; 8 | [alpha,b] = trainlssvm({p_train,t_train,type,gam,sig2,'RBF_kernel'}); %%训练模型 9 | t_sim = simlssvm({p_train,t_train,type,gam,sig2,'RBF_kernel','preprocess'},{alpha,b},p_test); 10 | 11 | %% 均方根误差 12 | error = sqrt(sum((t_sim - t_test).^2) ./ M); 13 | 14 | end 15 | -------------------------------------------------------------------------------- /mae.m: -------------------------------------------------------------------------------- 1 | function perf=mae(e) 2 | % 3 | % calculate the absolute error of the given errors 4 | % 5 | % 'perf = mae(E);' 6 | % 7 | % see also: 8 | % mse, linf 9 | % 10 | 11 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 12 | 13 | 14 | perf = sum(sum(abs(e))) / numel(e); -------------------------------------------------------------------------------- /main.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxy0068/Data-classification-prediction-based-on-Grey-Wolf-Optimization-Algorithm-GWO_LSSVM-Adaboost/e3ce62e69527100f33ccf2f6018b8fb9b9c067fa/main.m -------------------------------------------------------------------------------- /medae.m: -------------------------------------------------------------------------------- 1 | function perf=medae(e) 2 | % calculate the median absolute error of the given errors 3 | % 4 | % >> perf = medae(E); 5 | % 6 | % see also: 7 | % mse, mae, linf, trimmedmse 8 | % 9 | 10 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 11 | 12 | perf = median(abs(reshape(e,numel(e),1))); 13 | -------------------------------------------------------------------------------- /misclass.m: -------------------------------------------------------------------------------- 1 | function [perc,n,which] = misclass(Y,Yest) 2 | % The rate of misclassifications. 3 | % 4 | % '[perc,n,which] = misclass(Y,Yest)' 5 | % 6 | % 'Y' contains the real class labels; 7 | % 'Yest' contains the estimated class labels; 8 | % 'perc' is the rate of misclassifications (between 0 and 1); 9 | % 'n' is the number of misclassifications; 10 | % 'which' contains the indices of the misclassificated instances 11 | % (the first column gives the row, the second the column index) 12 | % 13 | % 14 | % see also: 15 | % validate, mse, linf, medae, mae 16 | 17 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 18 | 19 | n = sum(sum(Y~=Yest)); 20 | perc = n/numel(Y); 21 | [I,J] = find(Y~=Yest); 22 | which = [J I]; -------------------------------------------------------------------------------- /mse.m: -------------------------------------------------------------------------------- 1 | function perf=mse(e) 2 | % 3 | % calculate the mean squared error of the given errors 4 | % 5 | % 'perf = mse(E);' 6 | % 7 | % see also: 8 | % mae, linf, trimmedmse 9 | % 10 | 11 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 12 | 13 | 14 | perf = sum(sum(e.^2)) / numel(e); -------------------------------------------------------------------------------- /poly_kernel.m: -------------------------------------------------------------------------------- 1 | function x = poly_kernel(a,b, d) 2 | % polynomial kernel function for implicit higher dimension mapping 3 | % 4 | % X = poly_kernel(a,b,[t,degree]) 5 | % 6 | % 'a' can only contain one datapoint in a row, 'b' can contain N 7 | % datapoints of the same dimension as 'a'. 8 | % 9 | % x = (a*b'+t^2).^degree; 10 | % 11 | % see also: 12 | % RBF_kernel, lin_kernel, MLP_kernel, trainlssvm, simlssvm 13 | % 14 | 15 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 16 | 17 | if length(d)>1, 18 | t=d(1); 19 | d=d(2); 20 | else 21 | d = d(1);t=1; 22 | end 23 | d = (abs(d)>=1)*abs(d)+(abs(d)<1); % >=1 !! 24 | 25 | x=(b*a'+ t^2).^d; 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /postlssvm.m: -------------------------------------------------------------------------------- 1 | function [model,Yt] = postlssvm(model,Xt,Yt) 2 | % Postprocessing of the LS-SVM 3 | % 4 | % These functions should only be called by trainlssvm or by 5 | % simlssvm. At first the preprocessing assigns a label to each in- 6 | % and output component (c for continuous, a for categorical or b 7 | % for binary variables). According to this label each dimension is rescaled: 8 | % 9 | % * continuous: zero mean and unit variance 10 | % * categorical: no preprocessing 11 | % * binary: labels -1 and +1 12 | % 13 | % Full syntax (only using the object oriented interface): 14 | % 15 | % >> model = postssvm(model) 16 | % >> Xp = postlssvm(model, Xt) 17 | % >> [empty, Yp] = postlssvm(model, [], Yt) 18 | % >> [Xp, Yp] = postlssvm(model, Xt, Yt) 19 | % 20 | % Outputs 21 | % model : Preprocessed object oriented representation of the LS-SVM model 22 | % Xt : Nt x d matrix with the inputs of the test data to preprocess 23 | % Yt : Nt x d matrix with the outputs of the test data to preprocess 24 | % Inputs 25 | % model : Object oriented representation of the LS-SVM model 26 | % Xp : Nt x d matrix with the preprocessed inputs of the test data 27 | % Yp : Nt x d matrix with the preprocessed outputs of the test data 28 | % 29 | % 30 | % See also: 31 | % prelssvm, trainlssvm 32 | 33 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 34 | 35 | % 36 | % test of postprocessing needed, and if model is properly coded before 37 | % decoding 38 | % 39 | if model.preprocess(1)~='p', 40 | if nargin>=2, 41 | % no postprocessing nor coding needed 42 | model=Xt; 43 | end 44 | return 45 | end 46 | 47 | 48 | % 49 | % postprocess the LS-SVM 50 | % 51 | if nargin==1, 52 | 53 | % 54 | % execute rescaling as defined 55 | % 56 | if (model.prestatus(1)=='o' & model.preprocess(1)=='p') | ... % 'preprocess' &'ok' 57 | (model.prestatus(1)=='c' & model.preprocess(1)=='o'), % 'original' &'changed' 58 | model=postmodel(model); 59 | model.preprocess = 'original'; 60 | end 61 | model.prestatus='ok'; 62 | 63 | 64 | 65 | % 66 | % rescaling of the to simulate inputs 67 | % 68 | else 69 | eval('Yt;','Yt=[];'); 70 | [model,Yt] = postmodel(model,Xt,Yt); 71 | end 72 | 73 | 74 | 75 | 76 | 77 | function [model,Yt] = postmodel(model,Xt,Yt) 78 | % 79 | % ' [Xt,Yt] = postmodel(model,Xt,Yt)' 80 | % ' [model] = postmodel(model)' 81 | % 82 | 83 | if nargin==1, 84 | 85 | for i=1:model.x_dim, 86 | % CONTINU VARIABLE: 87 | if model.pre_xscheme(i)=='c', 88 | model.xtrain(:,i) = post_zmuv(model.xtrain(:,i),model.pre_xmean(i),model.pre_xstd(i)); 89 | % CATHEGORICAL VARIBALE: 90 | elseif model.pre_xscheme(i)=='a', 91 | model.xtrain(:,i) = post_cat(model.xtrain(:,i),model.pre_xmean(i),model.pre_xstd(i)); 92 | % BINARY VARIBALE: 93 | elseif model.pre_xscheme(i)=='b', 94 | model.xtrain(:,i) = post_bin(model.xtrain(:,i),model.pre_xmean(i),model.pre_xstd(i)); 95 | end 96 | end 97 | 98 | for i=1:model.y_dim, 99 | % CONTINU VARIABLE: 100 | if model.pre_yscheme(i)=='c', 101 | model.ytrain(:,i) = post_zmuv(model.ytrain(:,i),model.pre_ymean(i),model.pre_ystd(i)); 102 | % CATHEGORICAL VARIBALE: 103 | elseif model.pre_yscheme(i)=='a', 104 | model.ytrain(:,i) = post_cat(model.ytrain(:,i),model.pre_ymean(i),model.pre_ystd(i)); 105 | % BINARY VARIBALE: 106 | elseif model.pre_yscheme(i)=='b', 107 | model.ytrain(:,i) = post_bin(model.ytrain(:,i),model.pre_ymean(i),model.pre_ystd(i)); 108 | end 109 | end 110 | 111 | else 112 | 113 | if nargin>1, % testdayta Xt, 114 | if ~isempty(Xt), 115 | if size(Xt,2)~=model.x_dim, warning('dimensions of Xt not compatible with dimensions of supprt vectors...');end 116 | for i=1:model.x_dim, 117 | % CONTINU VARIABLE: 118 | if model.pre_xscheme(i)=='c', 119 | Xt(:,i) = post_zmuv(Xt(:,i),model.pre_xmean(i),model.pre_xstd(i)); 120 | % CATHEGORICAL VARIBALE: 121 | elseif model.pre_xscheme(i)=='a', 122 | Xt(:,i) = post_cat(Xt(:,i),model.pre_xmean(i),model.pre_xstd(i)); 123 | % BINARY VARIBALE: 124 | elseif model.pre_xscheme(i)=='b', 125 | Xt(:,i) = post_bin(Xt(:,i),model.pre_xmean(i),model.pre_xstd(i)); 126 | end 127 | end 128 | end 129 | 130 | if nargin>2 & ~isempty(Yt), 131 | if size(Yt,2)~=model.y_dim, warning('dimensions of Yt not compatible with dimensions of supprt vectors...');end 132 | for i=1:model.y_dim, 133 | % CONTINU VARIABLE: 134 | if model.pre_yscheme(i)=='c', 135 | Yt(:,i) = post_zmuv(Yt(:,i),model.pre_ymean(i), model.pre_ystd(i)); 136 | % CATHEGORICAL VARIBALE: 137 | elseif model.pre_yscheme(i)=='a', 138 | Yt(:,i) = post_cat(Yt(:,i),model.pre_ymean(i),model.pre_ystd(i)); 139 | % BINARY VARIBALE: 140 | elseif model.pre_yscheme(i)=='b', 141 | Yt(:,i) = post_bin(Yt(:,i),model.pre_ymean(i),model.pre_ystd(i)); 142 | end 143 | end 144 | end 145 | model = Xt; 146 | end 147 | end 148 | 149 | 150 | 151 | function X = post_zmuv(X,mean,var) 152 | % 153 | % postprocessing a continu signal; rescaling to zero mean and unit 154 | % variance 155 | % 'c' 156 | % 157 | X = X.*var+mean; 158 | 159 | 160 | function X = post_cat(X,mean,range) 161 | % 162 | % postprocessing a cathegorical signal, rescaling to -1:1; 163 | % 'a' 164 | % 165 | X = X; 166 | 167 | 168 | function X = post_bin(X,min,max) 169 | % 170 | % postprocessing a binary signal, rescaling to -1:1; 171 | % 'a' 172 | % 173 | X = min.*(X<=0)+max.*(X>0); 174 | -------------------------------------------------------------------------------- /predict.m: -------------------------------------------------------------------------------- 1 | function prediction = predict(model,Xt,n, simfct, args) 2 | % Iterative prediction of a trained LS-SVM NARX model (in recurrent mode) 3 | % 4 | % >> Yp = predict({Xw,Yw,type,gam,sig2}, Xt, nb) 5 | % >> Yp = predict(model, Xt, nb) 6 | % 7 | % The model needs to be trained using Xw, Yw which is the result of 8 | % windowize or windowizeNARX. The number of time lags for the model 9 | % is determined by the dimension of the input, or if not 10 | % appropriate, by the number of given starting values. 11 | % 12 | % By default, the model is evaluated on the past points using 13 | % simlssvm. However, if one wants to use this procedure for other 14 | % models, this default can be overwritten by your favorite training 15 | % function. This function (denoted by simfct) has to follow the following syntax: 16 | % 17 | % >> simfct(model,inputs,arguments) 18 | % 19 | % thus: 20 | % 21 | % >> Yp = predict(model, Xt, nb, simfct) 22 | % >> Yp = predict(model, Xt, nb, simfct, arguments) 23 | % 24 | % 25 | % Full syntax 26 | % 27 | % 1. Using the functional interface for the LS-SVMs: 28 | % 29 | % >> Yp = predict({Xw,Yw,type,gam,sig2,kernel,preprocess}, Xt, nb) 30 | % 31 | % Outputs 32 | % Yp : nb x m matrix with the predictions 33 | % Inputs 34 | % Xw : N x d matrix with the inputs of the training data 35 | % Yw : N x w matrix with the outputs of the training data 36 | % type : 'function estimation' ('f') or 'classifier' ('c') 37 | % gam : Regularization parameter 38 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 39 | % kernel(*) : Kernel type (by default 'RBF_kernel') 40 | % preprocess(*) : 'preprocess' or 'original' (by default) 41 | % Xt : nb*d matrix of the starting points for the prediction 42 | % nb(*) : Number of outputs to predict 43 | % 44 | % 45 | % 2. Using the object oriented interface with LS-SVMs: 46 | % 47 | % >> Yp = predict(model, Xt, nb) 48 | % 49 | % Outputs 50 | % Yp : nb x m matrix with the predictions 51 | % Inputs 52 | % Xt : nb x d matrix of the starting points for the prediction 53 | % nb(*) : Number of outputs to predict 54 | % 55 | % 56 | % 3. Using another model: 57 | % 58 | % >> Yp = predict(model, Xt, nb, simfct, arguments) 59 | % 60 | % Outputs 61 | % Yp : nb x m matrix with the predictions 62 | % Inputs 63 | % Xt : nb x d matrix of the starting points for the prediction 64 | % nb : Number of outputs to predict 65 | % simfct : Function used to evaluate a test point 66 | % arguments(*) : Cell with the extra arguments passed to simfct 67 | % 68 | % See also: 69 | % windowize, trainlssvm, simlssvm. 70 | 71 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 72 | 73 | if size(Xt,2)~=1 & size(Xt,1)~=1, 74 | error('prediction only implemented for the one-dimensional autonomous case'); 75 | end 76 | eval('model = initlssvm(model{:});',' '); 77 | eval('xdim = model.x_dim;','xdim = max(size(Xt)); '); 78 | eval('n; alle=0;','n=length(Xt)-xdim; alle=1;'); 79 | Xt = Xt(1:xdim); 80 | Xt = reshape(Xt,length(Xt),1); 81 | eval('simfct;','simfct=''simlssvm'';'); 82 | eval('model = trainlssvm(model);'); 83 | 84 | xdelays = length(Xt); 85 | prediction = zeros(xdelays+n,1); 86 | prediction(1:xdelays) = Xt; 87 | 88 | 89 | % closed loop 90 | eval('for i=1:n, prediction(xdelays+i) = feval(simfct,model, prediction(i-1+(1:xdelays))'',args); end',... 91 | 'for i=1:n, prediction(xdelays+i) = feval(simfct,model,prediction(i-1+(1:xdelays))''); end'); 92 | 93 | if ~alle, 94 | prediction = prediction(xdelays+1:end); 95 | end 96 | 97 | -------------------------------------------------------------------------------- /predlssvm.m: -------------------------------------------------------------------------------- 1 | function pi = predlssvm(model,Xt,alpha,conftype) 2 | 3 | % Construction of bias corrected 100(1-\alpha)% pointwise or 4 | % simultaneous prediction intervals 5 | % 6 | % >> pi = predlssvm({X,Y,type,gam,kernel_par,kernel,preprocess}, Xt, alpha,conftype) 7 | % >> pi = predlssvm(model, Xt, alpha, conftype) 8 | % 9 | % This function calculates bias corrected 100(1-\alpha)% pointwise or 10 | % simultaneous prediction intervals. The procedure support homoscedastic 11 | % data sets as well heteroscedastic data sets. The construction of the 12 | % prediction intervals are based on the central limit theorem for linear 13 | % smoothers combined with bias correction and variance estimation. 14 | % 15 | % 1. Using the functional interface: 16 | % 17 | % 18 | % >> pi = predlssvm({X,Y,type,gam,kernel_par,kernel,preprocess}) 19 | % >> pi = predlssvm({X,Y,type,gam,kernel_par,kernel,preprocess}, alpha) 20 | % >> pi = predlssvm({X,Y,type,gam,kernel_par,kernel,preprocess}, alpha, conftype) 21 | % 22 | % 23 | % Outputs 24 | % pi : N x 2 matrix containing the lower and upper prediction intervals 25 | % 26 | % Inputs 27 | % X : N x d matrix with the inputs of the training data 28 | % Y : N x 1 vector with the outputs of the training data 29 | % type : 'function estimation' ('f') or 'classifier' ('c') 30 | % gam : Regularization parameter 31 | % sig2 : Kernel parameter(s) (bandwidth in the case of the 'RBF_kernel') 32 | % kernel(*) : Kernel type (by default 'RBF_kernel') 33 | % preprocess(*) : 'preprocess'(*) or 'original' 34 | % alpha(*) : Significance level (by default 5%) 35 | % conftype(*) : Type of prediction interval 'pointwise' or 'simultaneous' (by default 'simultaneous') 36 | % 37 | % 2. Using the object oriented interface: 38 | % 39 | % 40 | % >> pi = predlssvm(model) 41 | % >> pi = predlssvm(model, alpha) 42 | % >> pi = predlssvm(model, alpha, conftype) 43 | % 44 | % 45 | % Outputs 46 | % pi : N x 2 matrix containing the lower and upper confidence intervals 47 | % 48 | % Inputs 49 | % model : Object oriented representation of the LS-SVM model 50 | % alpha : Significance level (by default 5%) 51 | % conftype : Type of prediction interval 'pointwise' or 'simultaneous' (by default 'simultaneous') 52 | % 53 | % 54 | % See also: 55 | % trainlssvm, simlssvm, cilssvm 56 | 57 | 58 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 59 | 60 | if iscell(model) 61 | model = initlssvm(model{:}); 62 | end 63 | 64 | if nargin == 1 65 | error('Please specify a test set'); 66 | elseif nargin <= 2 67 | alpha = 0.05; 68 | conftype = 'simul'; 69 | 70 | elseif nargin <= 3 71 | conftype = 'simul'; 72 | end 73 | 74 | 75 | if model.preprocess(1)=='p' 76 | [x,y] = postlssvm(model,model.xtrain,model.ytrain); 77 | else 78 | x = model.xtrain; y = model.ytrain; 79 | end 80 | 81 | % train model 82 | model = trainlssvm(model); 83 | 84 | % smoother on test 85 | S = smootherlssvm(model,Xt); 86 | 87 | % bias: double smoothing with fourt order kernel RBF4 88 | modelb = initlssvm(x,y,'f',[],[],'RBF4_kernel','o'); 89 | modelb = tunelssvm(modelb,'simplex','crossvalidatelssvm',{10,'mse'}); 90 | modelb = trainlssvm(modelb); 91 | 92 | % output on test Yphat 93 | Yphat = simlssvm(modelb,Xt); 94 | 95 | % output of the smoother based on fourth order kernel on training data Yhat 96 | Yhat = simlssvm(modelb,x); 97 | 98 | biascorr = S*Yhat-Yphat; 99 | 100 | %1) estimate variance nonparametrically 101 | [sigma2,modele] = varest(model); 102 | % smoother on training data 103 | L = smootherlssvm(model); 104 | sigma2t = varfun(modele,L,Xt); 105 | 106 | %2) calculate var-cov matrix 107 | S = S*diag(sigma2)*S'; 108 | 109 | %2b) find standardized absolute maxbias 110 | delta = max(abs(biascorr./sqrt(diag(S)))); 111 | 112 | %3) pointwise or simultaneous 113 | if conftype(1)=='s' 114 | if model.preprocess(1)=='p' 115 | model.xtrain = prelssvm(model,x); 116 | end 117 | z = tbform(model,alpha) + delta; 118 | elseif conftype(1)=='p' 119 | z = norminv(alpha/2); 120 | Yphat = Yphat - biascorr; 121 | else 122 | error('Wrong type of confidence interval. Please choose ''pointwise'' or ''simultaneous'''); 123 | end 124 | 125 | V = sqrt(sigma2t+diag(S)); 126 | pi = [Yphat+z*V Yphat-z*V]; 127 | 128 | function V = varfun(modele,L,xt) 129 | % estimate variance op new data, given the smooth of squared residuals 130 | % "modele" and smoother matrix of the original smooth L 131 | V = max(simlssvm(modele,xt),0); 132 | % make estimate of V unbiased in homoscedastic case if regression 133 | % estimate is unbiased 134 | S = smootherlssvm(modele,xt); 135 | V = V./(ones(size(xt,1),1)+S*diag(L*L'-2*L)); 136 | 137 | function [var,modele] = varest(model) 138 | 139 | % if preprocessed data, construct original data 140 | if model.preprocess(1)=='p' 141 | [x,y] = postlssvm(model,model.xtrain,model.ytrain); 142 | else 143 | x = model.xtrain; y = model.ytrain; 144 | end 145 | 146 | model = trainlssvm(model); 147 | 148 | Yh = simlssvm(model,x); 149 | 150 | % Squared normalized residuals 151 | e2 = (y-Yh).^2; 152 | 153 | % Make variance model 154 | if model.nb_data <= 200 155 | costfun = 'leaveoneoutlssvm'; costargs = {'mae'}; 156 | else 157 | costfun = 'crossvalidatelssvm'; costargs = {10,'mae'}; 158 | end 159 | modele = initlssvm(x,e2,'f',[],[],'RBF_kernel'); 160 | modele = tunelssvm(modele,'simplex',costfun,costargs); 161 | modele = trainlssvm(modele); 162 | 163 | % variance model 164 | var = max(simlssvm(modele,x),0); 165 | 166 | % make estimate of Var unbiased in homoscedastic case if regression 167 | % estimate is unbiased 168 | L = smootherlssvm(model); 169 | S = smootherlssvm(modele); 170 | 171 | var = var./(ones(size(x,1),1)+S*diag(L*L'-L-L')); -------------------------------------------------------------------------------- /preimage_rbf.m: -------------------------------------------------------------------------------- 1 | function Ximg = preimage_rbf(Xtr,sig2,U,B,type,npcs,maxIts) 2 | % 3 | % function Ximg = preimage_rbf(Xtr,sig2,U,B,type,npcs,maxIts) 4 | % Reconstruction or denoising after kernel PCA with RBF kernels, i.e. to find the 5 | % approximate preimage (in the input space) of the corresponding feature space expansions 6 | % 7 | % Inputs 8 | % Xtr : N by d matrix of the training data used to find the prinicipal components. 9 | % sig2 : parameter for the RBF kernel used, k(x,z)=exp(-norm(x-z)^2/sig2). 10 | % U : the eigenvectors computed from the kernel PCA using RBF kernel with parameter sig2. 11 | % B : for reconstruction, B is the compressed data, 12 | % i.e. the projections of the data on to the first n PCs; 13 | % for denoising, B is the Nt by d matrix of original noisy data; 14 | % if not specified, Xtr is denoised instead. 15 | % type : 'reconstruct' or 'denoise' 16 | % npcs : number of PCs used for approximation 17 | % maxIts : maximum iterations allowed to update the preimage, 1000 by default. 18 | % 19 | % Outputs 20 | % Ximg : the reconstructed or denoised data in the input space 21 | % 22 | % Usage e.g. 23 | % >> [lam,U] = kpca(Xtr,'RBF_kernel',sig2); 24 | % >> [lam, perm] = sort(-lam); lam = -lam; U = U(:,perm); 25 | % >> projections = kernel_matrix(Xtr,'RBF_kernel',sig2,Xtest)'*U; 26 | % >> Xr = preimage_rbf(Xtr,sig2,U,projections(:,1:npcs),'r'); % Reconstruction 27 | % >> Xd = preimage_rbf(Xtr,sig2,U(:,1:npcs),Xnoisy,'d'); % Denoising 28 | % >> Xdtr = preimage_rbf(Xtr,sig2,U(:,1:npcs)); % Denoising on the training data 29 | % 30 | % see also: 31 | % kpca, denoise_kpca, RBF_kernel 32 | % 33 | % Reference 34 | % Mika S., Schoelkopf B., Smola A., Muller K.-R., Scholz M., Ratsch G. (1999), ``Kernel 35 | % PCA and de-noising in feature spaces'', Advances in Neural Information Processing 36 | % Systems 11, 536-542, MIT Press. 37 | % 38 | 39 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 40 | 41 | MAXDX=1e-6; % Convergence criterion 42 | [~, dim]=size(Xtr); 43 | mX=mean(Xtr); sX=std(Xtr); 44 | 45 | if nargin<4, B = Xtr; end 46 | [Nt, dimB] = size(B); 47 | 48 | if nargin<5 49 | if dimB==dim, type='denoise'; else type='reconstruct'; end 50 | else 51 | if type(1)~='d'&type(1)~='r'| (type(1)=='d'&dimB~=dim), 52 | warning('Invalid type specified, default value is used!') 53 | if dimB==dim, type='denoise'; else type='reconstruct'; end 54 | end 55 | end 56 | 57 | if nargin<6 58 | if type(1)=='r'; npcs=dimB; else npcs=size(U,2); end 59 | else 60 | if npcs>size(U,2) | (type(1)=='r' & npcs~=dimB), 61 | warning('Invalid number of PCs, default value is used!'), 62 | if type(1)=='r'; npcs=dimB; else npcs=size(U,2); end 63 | end 64 | end 65 | 66 | if nargin<7, maxIts=1000; end 67 | 68 | U=U(:,1:npcs); 69 | 70 | for n=1:Nt 71 | cont=1; t=0; ts=0; 72 | if type(1)=='r' 73 | % reconstuction 74 | rs = U*B(n,:)'; 75 | x = zeros(1, dim); % set the initial value of approximate preimage for reconstruction 76 | k = RBF_kernel(x,Xtr,sig2); 77 | else 78 | % denoise 79 | x = B(n,:); % initial value of the approximate preimage for denoising is the noisy data 80 | k = RBF_kernel(x,Xtr,sig2); 81 | rs = U*(k'*U)'; 82 | end 83 | 84 | % iteratively update the approximate preimage x 85 | % 86 | while cont, 87 | 88 | d=rs'*k; % the reconstruction error is (const-2d) 89 | 90 | if d==0; 91 | % choose a different starting value 92 | % [k,id]=min(k); x_new = Xtr(id,:); 93 | randn('state',cputime+ts); x_new=(randn(1,dim)+mX).*sX; 94 | fprintf('%5d> Starting value changed!(d=0) \n', ts); 95 | else 96 | % update approximate preimage with a linear combination of kpca training data 97 | x_new = sum(rs.*k*ones(1,dim).*Xtr)/d; 98 | end 99 | 100 | dx = norm(x_new - x); 101 | x = x_new; 102 | 103 | t=t+1; ts=ts+1; 104 | 105 | if dx Converged! \n', ts); 108 | else 109 | if ts>=maxIts; 110 | cont=0; 111 | fprintf('%5d> Maximum iteration reached!\n', ts); 112 | elseif t>=500; 113 | % choose a different starting value 114 | % [k,id]=min(k); x = Xtr(id,:); 115 | randn('state',cputime+ts); x=(randn(1,dim)+mX).*sX; 116 | t=0; 117 | end 118 | end 119 | k = RBF_kernel(x,Xtr,sig2); 120 | 121 | end % while 122 | Ximg(n,:)=x_new; 123 | end % for 124 | -------------------------------------------------------------------------------- /progress.m: -------------------------------------------------------------------------------- 1 | function cont=progress(status,rate) 2 | %PROGRESS Text progress bar 3 | % Similar to waitbar but without the figure display. 4 | % 5 | % Start: 6 | % C = PROGRESS('init',TITLE), where the default TITLE is 'please wait' 7 | % 8 | % On progress: 9 | % C = PROGRESS(C,RATE); 10 | % 11 | % Examples: 12 | % n=20; 13 | % for i=1:n 14 | % if i==1, c=progress('init'); 15 | % else c=progress(c,i/n); end 16 | % 17 | % % computing something ... 18 | % pause(.1) 19 | % end 20 | % 21 | % % inside a script you may use: 22 | % c=progress('init','wait for ... whatever'); 23 | % for i=1:n 24 | % c=progress(c,i/n); 25 | % ... 26 | % end 27 | % 28 | 29 | lmax=50; 30 | if isequal(status,'init') 31 | if nargin > 1 32 | title = rate; 33 | fprintf(1,'\n %s\n',title); 34 | str = repmat(' ',1,lmax-4); 35 | else 36 | str = ' please wait '; 37 | fprintf(1,'\n'); 38 | end 39 | fprintf(1,' |-%s-|\n',str); 40 | fprintf(1,'%s',' '); 41 | cont=0; 42 | else 43 | cont = status; 44 | n=ceil(rate*lmax); 45 | dif = n - cont; 46 | if dif > 0 & n <=lmax 47 | N=dif; 48 | else 49 | N=0; 50 | end 51 | cont=cont+N; 52 | str = repmat('*',1,N); 53 | fprintf(1,'%s',str); 54 | if rate==1 55 | fprintf(1,'%s\n',' done'); 56 | end 57 | end 58 | 59 | -------------------------------------------------------------------------------- /range.m: -------------------------------------------------------------------------------- 1 | function y = range(x) 2 | %RANGE Sample range. 3 | 4 | y = max(x) - min(x); 5 | 6 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab -------------------------------------------------------------------------------- /rcrossvalidate.m: -------------------------------------------------------------------------------- 1 | function [cost,costs] = rcrossvalidate(model, L, wfun, estfct,combinefct) 2 | 3 | % Estimate the model performance of a model with robust [$ l$] -fold crossvalidation 4 | 5 | % CAUTION!! Use this function only to obtain the value of the rcrossvalidation score 6 | % function given the tuning parameters. Do not use this function together with 7 | % 'tunelssvm', but use 'rcrossvalidatelssvm' instead. The latter is a faster 8 | % implementation which uses previously computed results. 9 | % 10 | % 11 | % >> cost = rcrossvalidate({Xtrain,Ytrain,type,gam,sig2}) 12 | % >> cost = rcrossvalidate( model) 13 | % 14 | % Robustness in the $l$-fold crossvalidation score function is obtained by 15 | % iteratively reweighting schemes. 16 | % 17 | % This routine is computational intensive. 18 | % 19 | % 20 | % Some commonly used criteria are: 21 | % 22 | % >> cost = rcrossvalidate(model, 10, 'whuber', 'mae') 23 | % >> cost = rcrossvalidate(model, 10, 'whampel', 'mae') 24 | % >> cost = rcrossvalidate(model, 10, 'wlogistic', 'mae') 25 | % >> cost = rcrossvalidate(model, 10, 'wmyriad', 'mae') 26 | % 27 | % Full syntax 28 | % 29 | % 1. Using LS-SVMlab with the functional interface: 30 | % 31 | % >> [cost, costs] = rcrossvalidate({X,Y,type,gam,sig2,kernel,preprocess}, L, wfun, estfct, combinefct) 32 | % 33 | % Outputs 34 | % cost : Cost estimation of the L-fold cross validation 35 | % costs(*) : L x 1 vector with costs estimated on the L different folds 36 | % Inputs 37 | % X : Training input data used for defining the LS-SVM and the preprocessing 38 | % Y : Training output data used for defining the LS-SVM and the preprocessing 39 | % type : 'function estimation' ('f') or 'classifier' ('c') 40 | % gam : Regularization parameter 41 | % sig2 : Kernel parameter (bandwidth in the case of the 'RBF_kernel') 42 | % kernel(*) : Kernel type (by default 'RBF_kernel') 43 | % preprocess(*) : 'preprocess'(*) or 'original' 44 | % L(*) : Number of folds (by default 10) 45 | % wfun(*) : weighting scheme (by default: whuber) 46 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 47 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 48 | % 49 | % 50 | % 2. Using the object oriented interface: 51 | % 52 | % >> [cost, costs] = crossvalidate(model, L, wfun, estfct, combinefct) 53 | % 54 | % Outputs 55 | % cost : Cost estimation of the L-fold cross validation 56 | % costs(*) : L x 1 vector with costs estimated on the L different folds 57 | % ec(*) : N x 1 vector with residuals of all data 58 | % Inputs 59 | % model : Object oriented representation of the LS-SVM model 60 | % Xval : Nt x d matrix with the inputs of the validation points used in the procedure 61 | % Yval : Nt x m matrix with the outputs of the validation points used in the procedure 62 | % L(*) : Number of folds (by default 10) 63 | % wfun(*) : weighting scheme (by default: whuber) 64 | % estfct(*) : Function estimating the cost based on the residuals (by default mse) 65 | % combinefct(*) : Function combining the estimated costs on the different folds (by default mean) 66 | % 67 | % See also: 68 | % mae,whuber,wlogistic,whampel,wmyriad, crossvalidate, trainlssvm, robustlssvm 69 | 70 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 71 | 72 | 73 | % LS-SVMlab 74 | eval('model = initlssvm(model{:});',' '); 75 | eval('L;','L=min(ceil(sqrt(model.nb_data)),10);'); 76 | eval('estfct;','estfct=''mae'';'); 77 | eval('combinefct;','combinefct=''mean'';'); 78 | eval('wfun;','wfun=''whuber'';'); 79 | % 80 | % initialisation and defaults 81 | % 82 | nb_data = size(model.ytrain,1); 83 | 84 | if L==nb_data, p = 1:nb_data; else p = randperm(nb_data); end 85 | px = model.xtrain(p,:); 86 | py = model.ytrain(p,:); 87 | 88 | [~,Y] = postlssvm(model,[],py); 89 | 90 | %initialize: no incremental memory allocation 91 | costs = zeros(L,length(model.gam)); 92 | block_size = floor(nb_data/L); 93 | 94 | S = ones(nb_data,1); 95 | Atot = kernel_matrix(px,model.kernel_type,model.kernel_pars)+eye(nb_data)./model.gam; 96 | % 97 | % 98 | % start loop over l validations 99 | % 100 | for l = 1:L, 101 | 102 | % divide in data and validation set, trainings data set is a copy 103 | % of permutated_data, validation set is just a logical index 104 | if l==L, 105 | train = 1:block_size*(l-1); 106 | validation = block_size*(l-1)+1:nb_data; 107 | else 108 | train = [1:block_size*(l-1) block_size*l+1:nb_data]; 109 | validation = block_size*(l-1)+1:block_size*l; 110 | end 111 | 112 | A = [0 S(train)';S(train) Atot(train,train)]; 113 | b = [0;py(train)]; 114 | 115 | % Solve linear system 116 | sol = linsolve(A,b,struct('SYM',true)); 117 | 118 | % Determine residuals ek 119 | ek = sol(2:end)./model.gam; 120 | g = model.gam; 121 | %for i=2:size(A,1), A(i,i) = A(i,i) - 1/g; end 122 | A = A-eye(size(train,2)+1)./g; A(1,1)=0; 123 | Ah = A; 124 | % 125 | % robust estimation of the variance 126 | % 127 | for k = 1:20 128 | vare = 1.483*median(abs((ek)-median(ek))); 129 | alphaold = sol(2:end); 130 | % 131 | % robust re-estimation of the alpha's and the b 132 | % 133 | cases = reshape((ek./vare),1,size(ek,1)); 134 | W = g*weightingscheme(cases,wfun); 135 | 136 | for t=1:size(train,2), A(t+1,t+1) = A(t+1,t+1)+1./W(t); end 137 | 138 | sol = linsolve(A,b,struct('SYM',true)); 139 | 140 | ek = sol(2:end)./W'; 141 | A = Ah; 142 | if norm(abs(alphaold-sol(2:end)),'fro')<=1e-4, 143 | %fprintf('\n Converged after %.0f iteration(s)', k); 144 | k = inf; 145 | end 146 | model.status = 'changed'; 147 | end 148 | 149 | % regression 150 | % Simulate system on validation data 151 | yh = Atot(train,validation)'*sol(2:end) + ones(numel(validation),1)*sol(1); 152 | [~,yh] = postlssvm(model,[],yh); 153 | costs(l,1) = feval(estfct,yh - Y(validation,:)); 154 | end 155 | cost = feval(combinefct, costs); 156 | 157 | 158 | -------------------------------------------------------------------------------- /rcrossvalidatelssvm.m: -------------------------------------------------------------------------------- 1 | function cost = rcrossvalidatelssvm(model,Y, L, omega, estfct,combinefct) 2 | %%%%%%%%%%%%%%%%%%%%% 3 | % INTERNAL FUNCTION % 4 | %%%%%%%%%%%%%%%%%%%%% 5 | % Estimate the model performance of a model with l-fold robust crossvalidation 6 | 7 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ 8 | % http://www.esat.kuleuven.be/sista/lssvmlab 9 | 10 | % 11 | % initialisation and defaults 12 | % 13 | %if size(X,1)~=size(Y,1), error('X and Y have different number of datapoints'); end 14 | [nb_data,y_dim] = size(Y); 15 | d = size(model.xtrain,2); 16 | % LS-SVMlab 17 | eval('model = initlssvm(model{:});',' '); 18 | model.status = 'changed'; 19 | 20 | eval('L;','L=min(round(sqrt(size(model.xfull,1))),10);'); 21 | eval('estfct;','estfct=''mse'';'); 22 | eval('combinefct;','combinefct=''mean'';'); 23 | 24 | py = Y; 25 | [~,Y] = postlssvm(model,[],Y); 26 | 27 | gams = model.gamcsa; 28 | eval('sig2s = model.kernel_parscsa;','sig2s=[];') 29 | eval('deltas = model.deltacsa;','deltas=[];') 30 | % 31 | %initialize: no incremental memory allocation 32 | % 33 | costs = zeros(L,length(gams)); 34 | block_size = floor(nb_data/L); 35 | 36 | % check whether there are more than one gamma or sigma 37 | for j =1:numel(gams) 38 | if strcmp(model.kernel_type,'RBF_kernel') || strcmp(model.kernel_type,'RBF4_kernel') 39 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',sig2s(j)); 40 | eval('model.delta=deltas(j);','model.delta=[];') 41 | elseif strcmp(model.kernel_type,'lin_kernel') 42 | model = changelssvm(model,'gam',gams(j)); 43 | eval('model.delta=deltas(j);','model.delta=[];') 44 | elseif strcmp(model.kernel_type,'poly_kernel') 45 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',[sig2s(1,j);sig2s(2,j)]); 46 | eval('model.delta=deltas(j);','model.delta=[];') 47 | else 48 | model = changelssvm(changelssvm(model,'gam',gams(j)),'kernel_pars',[sig2s(1,j);sig2s(2,j);sig2s(3,j)]); 49 | eval('model.delta=deltas(j);','model.delta=[];') 50 | end 51 | 52 | S = ones(nb_data,1); 53 | Atot = kernel_matrix2(omega,model.kernel_type,model.kernel_pars,d)+eye(nb_data)./model.gam; 54 | % 55 | % 56 | % start loop over l validations 57 | % 58 | for l = 1:L, 59 | 60 | % divide in data and validation set, trainings data set is a copy 61 | % of permutated_data, validation set is just a logical index 62 | if l==L, 63 | train = 1:block_size*(l-1); 64 | validation = block_size*(l-1)+1:nb_data; 65 | else 66 | train = [1:block_size*(l-1) block_size*l+1:nb_data]; 67 | validation = block_size*(l-1)+1:block_size*l; 68 | end 69 | 70 | A = [0 S(train)';S(train) Atot(train,train)]; 71 | b = [0;py(train)]; 72 | 73 | % Solve linear system 74 | sol = linsolve(A,b,struct('SYM',true)); 75 | 76 | % Determine residuals ek 77 | ek = sol(2:end)./model.gam; 78 | g = model.gam; 79 | %for i=2:size(A,1), A(i,i) = A(i,i) - 1/g; end 80 | A = A-eye(size(train,2)+1)./g; A(1,1)=0; 81 | Ah = A; 82 | % 83 | % robust estimation of the variance 84 | % 85 | for k = 1:30 86 | vare = 1.483*median(abs((ek)-median(ek))); 87 | alphaold = sol(2:end); 88 | % 89 | % robust re-estimation of the alpha's and the b 90 | % 91 | cases = reshape((ek./vare),1,size(ek,1)); 92 | W = g*weightingscheme(cases,model.weights,model.delta); 93 | 94 | for t=1:size(train,2), A(t+1,t+1) = A(t+1,t+1)+1./W(t); end 95 | 96 | sol = linsolve(A,b,struct('SYM',true)); 97 | 98 | ek = sol(2:end)./W'; 99 | A = Ah; 100 | if norm(abs(alphaold-sol(2:end)),'fro')<=1e-3, 101 | %fprintf('\n Converged after %.0f iteration(s)', k); 102 | k = inf; 103 | end 104 | model.status = 'changed'; 105 | end 106 | 107 | % regression 108 | % Simulate system on validation data 109 | yh = Atot(train,validation)'*sol(2:end) + ones(numel(validation),1)*sol(1); 110 | [~,yh] = postlssvm(model,[],yh); 111 | z = yh - Y(validation,:); 112 | eval('costs(l,j) = feval(estfct,z);') 113 | end 114 | end 115 | 116 | cost = feval(combinefct, costs); 117 | 118 | 119 | -------------------------------------------------------------------------------- /ridgeregress.m: -------------------------------------------------------------------------------- 1 | function [w,b,Yt] = ridgeregress(X,Y,gam, Xt) 2 | % Linear ridge regression 3 | % 4 | % >> [w, b] = ridgeregress(X, Y, gam) 5 | % >> [w, b, Yt] = ridgeregress(X, Y, gam, Xt) 6 | % 7 | % Ordinary Least squares with a regularization parameter (gam). 8 | % 9 | % Full syntax 10 | % 11 | % >> [w, b, Yt] = ridgeregress(X, Y, gam, Xt) 12 | % 13 | % Outputs 14 | % w : d x 1 vector with the regression coefficients 15 | % b : bias term 16 | % Yt(*) : Nt x 1 vector with predicted outputs of test data 17 | % Inputs 18 | % X : N x d matrix with the inputs of the training data 19 | % Y : N x 1 vector with the outputs of the training data 20 | % gam : Regularization parameter 21 | % Xt(*) : Nt x d matrix with the inputs of the test data 22 | % 23 | % See also: 24 | % bay_rr,bay_lssvm 25 | 26 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 27 | 28 | 29 | if size(X,1)~=size(Y,1), 30 | error('X and Y need to have the same number of data points'); 31 | end 32 | if size(Y,2)~=1, 33 | error('Only handling one-dimensional output'); 34 | end 35 | if nargin==4 & size(Xt,2)~=size(X,2), 36 | error('Training input and test inputs need to have the same dimension'); 37 | end 38 | 39 | [nD,nx] = size(X); 40 | if nx>nD, warning('dim datapoints larger than number of datapoints...');end 41 | 42 | 43 | Xe = [X ones(nD,1)]; 44 | %H = [ Xe'*Xe + gam^-1*[eye(nx) zeros(nx,1); zeros(1,nx+1)]]; 45 | H = Xe'*Xe + inv(gam).*eye(nx+1); 46 | 47 | sol = pinv(H)*Xe'*Y; 48 | w = sol(1:end-1); 49 | b = sol(end); 50 | 51 | 52 | if nargin<4, return; end 53 | Yt = Xt*w+b; 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /robustlssvm.m: -------------------------------------------------------------------------------- 1 | function [model,b] = robustlssvm(model,ab,X,Y) 2 | % Robust training in the case of non-Gaussian noise or outliers 3 | %(only possible with the object oriented interface) 4 | % 5 | % >> model = robustlssvm(model) 6 | % 7 | % Robustness towards outliers can be achieved by reducing the 8 | % influence of support values corresponding to large errors. 9 | % 10 | % 11 | % Full syntax 12 | % 13 | % 1. Using the object oriented interface: 14 | % 15 | % >> model = robustlssvm(model) 16 | % 17 | % Outputs 18 | % model : Robustly trained object oriented representation of the LS-SVM model 19 | % Inputs 20 | % model : Object oriented representation of the LS-SVM model 21 | % 22 | % See also: 23 | % trainlssvm, tunelssvm, crossvalidate 24 | 25 | 26 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 27 | 28 | 29 | 30 | if iscell(model), 31 | func = 1; 32 | model = initlssvm(model{:}); 33 | else 34 | func = 0; 35 | end 36 | 37 | 38 | if model.type(1)~='f', 39 | error('Robustly weighted least squares only implemented for regression case...'); 40 | end 41 | 42 | 43 | 44 | if nargin>1, 45 | if iscell(ab) && ~isempty(ab), 46 | model.alpha = ab{1}; 47 | model.b = ab{2}; 48 | model.status = 'trained'; 49 | if nargin>=4, 50 | model = trainlssvm(model,X,Y); 51 | end 52 | else 53 | model = trainlssvm(model,ab,X); 54 | end 55 | else 56 | model = trainlssvm(model); 57 | end 58 | 59 | 60 | % model errors 61 | ek = model.alpha./model.gam'; 62 | g = model.gam; 63 | % 64 | % robust estimation of the variance 65 | % 66 | eval('delta=model.delta;','delta=[];') 67 | for j=1:500 68 | vare = 1.483*median(abs((ek)-median(ek))); 69 | alphaold = model.alpha; 70 | % 71 | % robust re-estimation of the alpha's and the b 72 | % 73 | cases = reshape((ek./vare),1,model.nb_data); 74 | W = weightingscheme(cases,model.weights,delta); 75 | W = g*W; 76 | 77 | model = changelssvm(model,'gam',W); 78 | % model = changelssvm(model,'implementation','MATLAB'); 79 | model = trainlssvm(model); 80 | ek = model.alpha./model.gam'; 81 | 82 | if norm(abs(alphaold-model.alpha),'fro')<=1e-4, 83 | fprintf('Converged after %.0f iteration(s)', j); 84 | if func && nargout~=1, 85 | b = model.b; 86 | model = model.alpha; 87 | end 88 | return 89 | end 90 | model.status = 'changed'; 91 | end 92 | 93 | -------------------------------------------------------------------------------- /simann.m: -------------------------------------------------------------------------------- 1 | function [xopt,fopt]=simann(func, x, LB, UB, sa_t, sa_rt, sa_nt, sa_ns,rseed) 2 | 3 | % Simulated Annealing programmed for minimization problem 4 | % INPUTS 5 | % func, string variable containing name of function file to be optimized 6 | % x, starting values 7 | % LB, lower bound on optimization parameters 8 | % UB, upper bound on optimization parameters 9 | % sa_t, initial temperature 10 | % sa_rt, temperature reduction factor, 0 < sa_rt < 1, try .85 11 | % sa_nt, number of times through ns loop before temperature reduction (recommended value: 5) 12 | % sa_ns, number of times through function before stepsize adjustment (recommended value: 20) 13 | % 14 | % OUTPUTS 15 | % xopt, the optimal solution 16 | % 17 | % 18 | 19 | 20 | LB=LB(:)'; 21 | UB=UB(:)'; 22 | 23 | rand('state',rseed); %sets seed for random number generator 24 | sa_neps=4; %number of times eps 25 | %tolerance is achieved before termination 26 | sa_eps=eps; %convergence criteria 27 | sa_maxeval=60;%12000000; %maximum number of function evaluations 28 | 29 | sa_nargs=length(LB); %number of parameters 30 | sa_nobds=0; 31 | sa_nacc=0; %number of acceptions 32 | sa_nevals=0; %number of evaluations 33 | sa_opteval=0; %optimum number of 34 | %function evaluations 35 | 36 | fstar=Inf*ones(sa_neps,1); 37 | 38 | %x=LB+(UB-LB).*rand(1, sa_nargs); %starting values for model parameters 39 | f=feval(func,x); %function evaluation with parameters x 40 | %disp('initial loss function value:');disp(f); 41 | sa_nevals=sa_nevals+1; 42 | xopt=x; 43 | fopt=f; 44 | xtot=x; 45 | fstar(1)=f; 46 | 47 | VM=(UB-LB);%/2; %maximum step size 48 | 49 | %LOOP 50 | while 1 51 | 52 | nup=0; %number of uphill movements 53 | nrej=0; %number of rejections 54 | nnew=0; 55 | ndown=0; %number of downhill movements 56 | lnobds=0; 57 | nacp=zeros(sa_nargs,1); 58 | C = progress('init','Determine initial hyperparameters for simplex...'); 59 | for m=1:sa_nt 60 | for j=1:sa_ns 61 | for h=1:sa_nargs 62 | if sa_nevals>=sa_maxeval 63 | %disp('too many function evaluations') 64 | return 65 | end 66 | C = progress(C,sa_nevals/sa_maxeval); 67 | %workbar(sa_nevals/sa_maxeval,'Determine initial hyperparameters to build grid...','Progress') 68 | % generate xp, trial value of x 69 | xp=x; 70 | xp(h)=x(h)+VM(h)*(2*rand(1,1)-1.0); %calculate new value for x (xp) 71 | if (xp(h)UB(h)) 72 | xp(h)=LB(h)+(UB(h)-LB(h))*rand(1,1); 73 | lnobds=lnobds+1; 74 | sa_nobds=sa_nobds+1; 75 | end 76 | % evaluate at xp and return as fp 77 | %disp ('current parameter vector:');disp(xp); 78 | fp=feval(func,xp); %function evaluation with parameters xp 79 | %disp ('function value');disp(fp); 80 | sa_nevals=sa_nevals+1; 81 | 82 | % we minimize! accept if the function value decreases 83 | if fp<=f 84 | x=xp; 85 | f=fp; 86 | sa_nacc=sa_nacc+1; 87 | nacp(h)=nacp(h)+1; 88 | nup=nup+1; 89 | % if smaller than any previous point, record as new optimum 90 | if fp0.6 117 | VM(i)=VM(i) * (1+c(i)*(ratio-0.6)/0.4); 118 | elseif ratio <0.4 119 | VM(i)=VM(i)/(1+c(i)*((0.4-ratio)/0.4)); 120 | end 121 | if VM(i)>(UB(i)-LB(i)) 122 | VM(i)=UB(i)-LB(i); 123 | end 124 | end 125 | 126 | % provide statistics about current state of optimization 127 | 128 | % disp('No. of evaluations');disp(sa_nevals);disp(' current temperature');disp(sa_t); 129 | % disp('current optimum function value');disp(fopt); 130 | % disp('No. of downhill steps');disp(nup); % note misnomer in variable declaration! 131 | % disp('No. of accepted uphill steps');disp(ndown); % we minimize, thus downhill is always accepted! 132 | % disp('No. of rejections');disp(nrej); 133 | % disp('current parameter values');disp(xp); 134 | % disp('current optimum vector');disp(xopt); 135 | % disp('current step size');disp(VM); 136 | %disp('Variables used:');whos; 137 | 138 | for i=1:sa_nargs 139 | nacp(i) = 0; 140 | end 141 | end 142 | 143 | 144 | % check termination criteria 145 | fstar(1)=f; 146 | quit = ((fstar(1)-fopt) <= sa_eps); 147 | if any(abs(fstar-f)>sa_eps) 148 | quit=0; 149 | end 150 | 151 | if quit 152 | disp(['simulated annealing achieved termination after ', num2str(sa_nevals),' evals']); 153 | return 154 | end 155 | 156 | % reduce temperature 157 | sa_t=sa_t*sa_rt; 158 | fstar(2:4)=fstar(1:3); 159 | % continue from current optimum 160 | x=xopt; 161 | f=fopt; 162 | end %while 163 | 164 | 165 | 166 | -------------------------------------------------------------------------------- /smootherlssvm.m: -------------------------------------------------------------------------------- 1 | function S = smootherlssvm(model,Xt) 2 | 3 | % Calculates the smoother matrix for LS-SVM. 4 | % Inputs: 5 | % - model : object oriented interface of the FS-LSSVM model 6 | % - Xt (*): smoother matrix evaluated in test point(s) Xt 7 | % 8 | % Outputs: 9 | % - S: smoother matrix 10 | 11 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 12 | 13 | 14 | if isempty(model.gam) && isempty(model.kernel_pars) 15 | error('Please supply one or more learning parameters'); 16 | end 17 | 18 | K = kernel_matrix(model.xtrain,model.kernel_type,model.kernel_pars); 19 | if nargin < 2 20 | S = smoother(model,K); 21 | else 22 | if model.preprocess(1)=='p' 23 | % Preprocess the test data 24 | Xt = prelssvm(model,Xt); 25 | end 26 | Kt = kernel_matrix(model.xtrain,model.kernel_type,model.kernel_pars,Xt); 27 | S = smoother(model,K,Kt); 28 | end 29 | 30 | 31 | function S = smoother(model,K,varargin) 32 | Z = pinv(K+eye(model.nb_data)./model.gam); 33 | c = sum(sum(Z)); 34 | J = ones(model.nb_data)./c; 35 | if isempty(varargin) 36 | S = K*(Z-Z*J*Z) + J*Z; 37 | else 38 | Kt = varargin{1}'; 39 | J1 = ones(size(Kt,1),size(Z,1))./c; 40 | S = Kt*(Z-Z*J*Z) + J1*Z; 41 | end 42 | -------------------------------------------------------------------------------- /tbform.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxy0068/Data-classification-prediction-based-on-Grey-Wolf-Optimization-Algorithm-GWO_LSSVM-Adaboost/e3ce62e69527100f33ccf2f6018b8fb9b9c067fa/tbform.m -------------------------------------------------------------------------------- /trimmedmse.m: -------------------------------------------------------------------------------- 1 | function [cost,retained] = trimmedmse(R,beta,V); 2 | % Calculate trimmed mean of the squared value of the residuals. 3 | % 4 | % cost = trimmedmse(R); 5 | % 6 | % The factor where one trimms off the normed residuals is 7 | % optimized. However, one can pass a default value when one to 8 | % exclude this optimization, e.g.: 9 | % 10 | % cost = trimmedmse(R,0.15); 11 | % 12 | % One can overrule the default norm (norm='abs') by passing the norm function. 13 | % 14 | % cost = trimmedmse(R,[],norm); 15 | % 16 | % As an additional output, the index of the retained points ca be 17 | % received: 18 | % 19 | % [cost,retained] = trimmedmse(R); 20 | % 21 | % see also: 22 | % mse, misclass 23 | 24 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 25 | 26 | 27 | 28 | % default trimming? 29 | eval('beta;','beta=[];'); 30 | eval('R = feval(V,R);','R = R.^2;'); 31 | [Rs,si] = sort(R); 32 | N = max(size(Rs)); 33 | 34 | %figure; hist(Rs,50); pause 35 | 36 | if ~isempty(beta), 37 | nb = N - floor(N*beta); 38 | mu = mean(Rs(1:nb)); 39 | cost = mu; 40 | else 41 | % optimize trimming factor 42 | best_variance = inf; 43 | t = 1; 44 | %betas = 0:.01:.45; 45 | %betas = [0 0.05 0.10 0.175 0.30 0.45]; 46 | betas = 0.05; 47 | for beta = betas, 48 | 49 | %beta = beta*2; 50 | nb = N - floor(N*beta); 51 | mu = mean(Rs(1:nb)); 52 | %variance = 1/((1-beta)^2) * (sum((Rs(1:nb)-mu).^2)/N+ (beta*(Rs(nb)-mu)^2)); 53 | variance = sum((Rs(1:nb)-mu).^2) + ... 54 | (floor(N*beta)+1)*(Rs(nb)-mu)^2 - ... 55 | 1/N*(floor(N*beta)*(Rs(nb)-mu)^2); 56 | variance = variance/(nb*nb-1); 57 | %v(t,1) = variance; t=t+1; 58 | if variance <= best_variance, 59 | best_variance = variance; 60 | cost = mu; 61 | best_beta = beta; 62 | end 63 | end 64 | end 65 | %figure; plot(betas',[v sum(v,2)]); 66 | %figure; hist(Rs,50); 67 | % which are the retained data points 68 | retained = si(1:nb); 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /weightingscheme.m: -------------------------------------------------------------------------------- 1 | function W = weightingscheme(cases,wfct,varargin) 2 | 3 | % 4 | % INTERNAL FUNCTION 5 | % 6 | 7 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 8 | 9 | switch wfct 10 | 11 | case {'WHuber','whuber'} 12 | b = varargin{1}; 13 | cases = abs(cases); 14 | temp = 1; 15 | W = temp.*(cases=b).*(b./cases); 16 | 17 | case {'WLogistic','wlogistic'} 18 | W = tanh(cases)./cases; 19 | 20 | case {'WHampel','whampel'} 21 | cases = abs(cases); 22 | % defaults for c1 and c2 23 | c1=2.5; c2=3; 24 | %[c1,c2] = adaptweight(cases); 25 | dc = c2-c1; 26 | temp = 1; 27 | W = temp.*(cases<=c1) + ... 28 | temp.*(cases<=c2 & cases>c1).*((c2-cases)./dc) + ... 29 | temp.*(cases>c2).*10e-8; 30 | 31 | case {'WMyriad','wmyriad'} 32 | %K = 0.5*iqr(cases); 33 | K = varargin{1}; 34 | W = K^2./(K^2+cases.^2); 35 | end -------------------------------------------------------------------------------- /windowize.m: -------------------------------------------------------------------------------- 1 | function w = windowize(A,window_array) 2 | % Re-arrange the data points into a Hankel matrix for (N)AR time-series modeling 3 | % 4 | % >> w = windowize(A, window) 5 | % 6 | % Use windowize function to make a nonlinear AR predictor with a 7 | % nonlinear regressor. The last elements of the resulting matrix 8 | % will contain the future values of the time-series, the others 9 | % will contain the past inputs. window is the relative index of 10 | % data points in matrix A, that are selected to make a window. Each 11 | % window is put in a row of matrix W. The matrix W contains as many 12 | % rows as there are different windows selected in A. 13 | % 14 | % Schematically, this becomes 15 | % 16 | % >> A = [a1 a2 a3; 17 | % b1 b2 b3; 18 | % c1 c2 c3; 19 | % d1 d2 d3; 20 | % e1 e2 e3; 21 | % f1 f2 f3; 22 | % g1 g2 g3]; 23 | % 24 | % >> W = windowize(A, [1 2 3]) 25 | % 26 | % W = 27 | % a1 a2 a3 b1 b2 b3 c1 c2 c3 28 | % b1 b2 b3 c1 c2 c3 d1 d2 d3 29 | % c1 c2 c3 d1 d2 d3 e1 e2 e3 30 | % d1 d2 d3 e1 e2 e3 f1 f2 f3 31 | % e1 e2 e3 f1 f2 f3 g1 g2 g3 32 | % 33 | % The function windowizeNARX converts the time-series and his 34 | % exogeneous variables into a block hankel format useful for 35 | % training a nonlinear function approximation as a nonlinear ARX 36 | % model. 37 | % 38 | % Full syntax 39 | % (The length of window is denoted by w.) 40 | % 41 | % >> Xw = windowize(X, window) 42 | % 43 | % Outputs 44 | % Xw : (N-w+1) x w matrix of the sequences of windows over X 45 | % Inputs 46 | % X : N x 1 vector with data points 47 | % w : w x 1 vector with the relative indices of one window 48 | % 49 | % 50 | % see also: 51 | % windowizeNARX, predict, trainlssvm, simlssvm 52 | 53 | 54 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 55 | 56 | 57 | 58 | l = max(window_array); 59 | w = zeros(size(A,1)-l+1,length(window_array)*size(A,2)); 60 | for i=1:size(A,1)-l+1, 61 | for j = 1:length(window_array), 62 | w(i,(j-1)*size(A,2)+1:j*size(A,2)) = A(i-1+window_array(j),:); 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /windowizeNARX.m: -------------------------------------------------------------------------------- 1 | function [wX, wY, xdim, ydim, n] = windowizeNARX(X,Y,x_delays,y_delays, steps) 2 | % Re-arrange the data points into a block Hankel matrix for (N)ARX time-series modeling 3 | % 4 | % >> [Xw,Yw] = windowizeNARX(X,Y,xdelays, ydelays, steps) 5 | % 6 | % Rearrange the points of X and Y in a regressor matrix of the 7 | % past inputs and outputs (Xw) and the future outputs (Yw). 8 | % 9 | % Full syntax 10 | % 11 | % >> [Xw, Yw, xdim, ydim, n] = windowizeNARX(X, Y, xdelays, ydelays, steps) 12 | % 13 | % Outputs 14 | % Xw Matrix of the data used for input including the delays 15 | % Yw Matrix of the data used for output including the next steps 16 | % xdim(*) Number of dimensions in new input 17 | % ydim(*) Number of dimensions in new output 18 | % n(*) Number of new data points 19 | % Inputs 20 | % X : N x m vector with input data points 21 | % Y : N x d vector with output data points 22 | % xdelays : Number of lags of X in new input 23 | % ydelays : Number of lags of Y in new input 24 | % steps(*): Number of future steps of Y in new output (by default 1) 25 | % 26 | % See also: 27 | % windowize, predict, trainlssvm, simlssvm 28 | 29 | % Copyright (c) 2011, KULeuven-ESAT-SCD, License & help @ http://www.esat.kuleuven.be/sista/lssvmlab 30 | 31 | 32 | m=max(x_delays,y_delays); 33 | 34 | eval('steps;','steps = 1;'); 35 | if steps == 0, 36 | n = size(X,1)-m; 37 | else 38 | n = size(X,1)-m -steps+1; 39 | end 40 | 41 | 42 | wX = zeros(n,size(X,2)*(x_delays+1)+size(Y,2)*(y_delays)); 43 | wY = zeros(n,size(Y,2)*steps); 44 | xdim = size(wX,2); 45 | ydim = size(wY,2); 46 | 47 | hdx = (x_delays+1)*size(X,2); 48 | 49 | 50 | for t=1:n, 51 | for i=1:x_delays+1, 52 | wX(t,1+((i-1)*size(X,2):i*size(X,2)-1)) = X(t+m-x_delays+i-1,:); 53 | end 54 | 55 | for i=1:y_delays, 56 | wX(t,hdx + (i-1)*size(Y,2) + (1:size(Y,2))) = Y(t+m-y_delays+i-1,:); 57 | end 58 | 59 | for i=1:steps, 60 | wY(t,i:i+size(Y,2)-1) = Y(t+m+i-1,:); 61 | end 62 | 63 | end 64 | 65 | -------------------------------------------------------------------------------- /数据集.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lxy0068/Data-classification-prediction-based-on-Grey-Wolf-Optimization-Algorithm-GWO_LSSVM-Adaboost/e3ce62e69527100f33ccf2f6018b8fb9b9c067fa/数据集.xlsx --------------------------------------------------------------------------------