├── CreateDataAppTest.m ├── CreateKernelListWithVariable.m ├── README.md ├── UnitTraceNormalization.m ├── VectorizeCofSVM.m ├── WeightK.m ├── build_efficientK.m ├── costsvmclass.m ├── costsvmclassls.m ├── costsvmoneagainstall.m ├── costsvmoneagainstone.m ├── costsvmreg.m ├── devectorize.c ├── devectorize.mexglx ├── devectorize_single.c ├── devectorize_single.mexglx ├── exmklclass.m ├── exmkllsdisk.m ├── exmklmulticlass.m ├── exmklmulticlassrealdata.m ├── exmklreg.m ├── exmklvariationC.m ├── gradsvmclass.m ├── gradsvmclassls.m ├── gradsvmoneagainstall.m ├── gradsvmoneagainstone.m ├── gradsvmreg.m ├── ionosphere.mat ├── mklbuildkernel.m ├── mklkernel.m ├── mklmulticlass.m ├── mklmulticlassupdate.m ├── mklsvm.m ├── mklsvmclassSILP.m ├── mklsvmls.m ├── mklsvmlsupdate.m ├── mklsvmregSILP.m ├── mklsvmupdate.m ├── monqp.m ├── normalizemeanstd.m ├── sumKbeta.m ├── sumKbetals.m ├── svmclass.m ├── svmclasslsformkl.m ├── svmkernel.m ├── svmval.m ├── vectorize.c ├── vectorize.mexglx ├── vectorize_single.c └── vectorize_single.mexglx /CreateDataAppTest.m: -------------------------------------------------------------------------------- 1 | function [xapp,yapp,xtest,ytest,indice]=CreateDataAppTest(x,y,nbtrain, classcode) 2 | 3 | % [xapp,yapp,xtest,ytest,indice]=CreateDataAppTest(x,y,nbtrain, classcode) 4 | % 5 | % if nbtrain =[nbapppos nbappneg ] % we have a specific number of positive 6 | % and negative examples. 7 | 8 | if nargin <4 9 | classcode(1)=1; 10 | classcode(2)=-1; 11 | end; 12 | 13 | if length(nbtrain)==1; 14 | xapp=[]; 15 | yapp=[]; 16 | xtest=[]; 17 | ytest=[]; 18 | indice=[]; 19 | indice.app=[]; 20 | indice.test=[]; 21 | nbclass=length(classcode); 22 | nbdata=length(y); 23 | %keyboard 24 | for i=1:nbclass; 25 | ind=find(y==classcode(i)); 26 | nbclasscode_i=length(ind); 27 | ratioclasscode_i=nbclasscode_i/nbdata; 28 | aux=randperm(nbclasscode_i); 29 | nbtrainclasscode_i=round(ratioclasscode_i*nbtrain); 30 | indapp=ind(aux(1:nbtrainclasscode_i)); 31 | indtest=ind(aux(nbtrainclasscode_i+1:end)); 32 | xapp=[xapp;x(indapp,:)]; 33 | yapp=[yapp;y(indapp,:)]; 34 | xtest=[xtest;x(indtest,:)]; 35 | ytest=[ytest;y(indtest,:)]; 36 | indice.app=[indice.app;indapp]; 37 | indice.test=[indice.test;indtest]; 38 | end; 39 | end; 40 | 41 | if length(nbtrain)==2; 42 | nbapppos=nbtrain(1); 43 | nbappneg=nbtrain(2); 44 | indpos=find(y==1); 45 | indneg=find(y==-1); 46 | nbpos=length(indpos); 47 | nbneg=length(indneg); 48 | auxpos=randperm(nbpos); 49 | auxneg=randperm(nbneg); 50 | indapp=[indpos(auxpos(1:nbapppos)) ;indneg(auxneg(1:nbappneg))]; 51 | indtest=[ indpos(auxpos(nbapppos+1:end)) ; indneg(auxneg(nbappneg+1:end))]; 52 | xapp=x(indapp,:); 53 | yapp=y(indapp); 54 | xtest=x(indtest,:); 55 | ytest=y(indtest,:); 56 | end; -------------------------------------------------------------------------------- /CreateKernelListWithVariable.m: -------------------------------------------------------------------------------- 1 | function [kernelcellaux,kerneloptioncellaux,variablecellaux]=CreateKernelListWithVariable(variablecell,dim,kernelcell,kerneloptioncell) 2 | 3 | 4 | j=1; 5 | for i=1:length(variablecell) 6 | switch variablecell{i} 7 | case 'all' 8 | kernelcellaux{j}=kernelcell{i}; 9 | kerneloptioncellaux{j}=kerneloptioncell{i}; 10 | variablecellaux{j}=1:dim; 11 | j=j+1; 12 | case 'single' 13 | for k=1:dim 14 | kernelcellaux{j}=kernelcell{i}; 15 | kerneloptioncellaux{j}=kerneloptioncell{i}; 16 | variablecellaux{j}=k; 17 | j=j+1; 18 | end; 19 | case 'random' 20 | kernelcellaux{j}=kernelcell{i}; 21 | kerneloptioncellaux{j}=kerneloptioncell{i}; 22 | indicerand=randperm(dim); 23 | nbvarrand=floor(rand*dim)+1; 24 | variablecellaux{j}=indicerand(1:nbvarrand); 25 | j=j+1; 26 | end; 27 | end; 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | SimpleMKL Toolbox 2 | ================= 3 | 4 | ## Description 5 | 6 | This package is a set of Matlab scripts that implements the SimpleMKL algorithm. The related papers are the ICML 2007 paper and the paper entitled SimpleMKL 7 | 8 | This code is provided for a sake of result reproducibility. Most of the scripts are documented with a online matlab help and some demo scripts are provided. 9 | 10 | The demo scripts are 11 | 12 | - exmklclass.m 13 | 14 | - exmklreg.m 15 | 16 | - exmklmulticlass.m 17 | 18 | - exmkllsdisk 19 | 20 | Within these demo scripts, one can just replace the 3-dimensional Gram Matrix given as examples with some ad-hoc matrices and get nice results! :) 21 | 22 | A more detailed documentation will (hopefully) be available as soon as possible. 23 | 24 | ## Features 25 | 26 | - MKL binary classification SVM 27 | - MKL SVM regression 28 | - One-against-all and One-against-One MKL SVM 29 | - Large-scale binary classification MKL SVM 30 | - Less-memory demanding kernel implementations 31 | 32 | This is the Matlab version 1.0 from [@arakoto](http://asi.insa-rouen.fr/enseignants/~arakoto/code/mklindex.html) 33 | -------------------------------------------------------------------------------- /UnitTraceNormalization.m: -------------------------------------------------------------------------------- 1 | function [Weigth,InfoKernel]=UnitTraceNormalization(x,kernelvec,kerneloptionvec,variablevec) 2 | 3 | chunksize=200; 4 | N=size(x,1); 5 | nbk=1; 6 | for i=1:length(kernelvec); 7 | % i 8 | for k=1:length(kerneloptionvec{i}) 9 | 10 | somme=0; 11 | 12 | chunks1=ceil(N/chunksize); 13 | 14 | for ch1=1:chunks1 15 | ind1=(1+(ch1-1)*chunksize) : min( N, ch1*chunksize); 16 | somme=somme+sum(diag(svmkernel(x(ind1,variablevec{i}),kernelvec{i},kerneloptionvec{i}(k)))); 17 | end; 18 | % for j=1:N 19 | % somme=somme+svmkernel(x(j,variablevec{i}),kernelvec{i},kerneloptionvec{i}(k)); 20 | % 21 | % end 22 | if somme~=0 23 | Weigth(nbk)=1/somme; 24 | InfoKernel(nbk).kernel=kernelvec{i}; 25 | InfoKernel(nbk).kerneloption=kerneloptionvec{i}(k); 26 | InfoKernel(nbk).variable=variablevec{i}; 27 | InfoKernel(nbk).Weigth=1/somme; 28 | nbk=nbk+1; 29 | % else 30 | % A 31 | end; 32 | end; 33 | end; -------------------------------------------------------------------------------- /VectorizeCofSVM.m: -------------------------------------------------------------------------------- 1 | function Cv=VectorizeCofSVM(yapp,Cplus,Cminus) 2 | % Cv=VectorizeCofSVM(yapp,Cplus,Cminus) 3 | % n=length(yapp); 4 | % if nargin <3 5 | % indpos=find(yapp==1); 6 | % indneg=find(yapp==-1); 7 | % Cv=zeros(size(yapp)); 8 | % Cv(indpos)=Cplus*length(indpos)/n; 9 | % Cv(indneg)=Cplus*length(indneg)/n; 10 | % else 11 | % Cv=zeros(size(yapp)); 12 | % Cv(indpos)=Cplus; 13 | % Cv(indneg)=Cminus; 14 | % end; 15 | 16 | n=length(yapp); 17 | if nargin <3 18 | indpos=find(yapp==1); 19 | indneg=find(yapp==-1); 20 | Cv=zeros(size(yapp)); 21 | Cv(indpos)=Cplus*length(indneg)/n; 22 | Cv(indneg)=Cplus*length(indpos)/n; 23 | else 24 | Cv=zeros(size(yapp)); 25 | Cv(indpos)=Cplus; 26 | Cv(indneg)=Cminus; 27 | end; 28 | -------------------------------------------------------------------------------- /WeightK.m: -------------------------------------------------------------------------------- 1 | function [K,weight]=WeightK(K,option) 2 | 3 | 4 | % USAGE 5 | % 6 | % [K,weight]=WeightK(K,option) 7 | % 8 | % Normalize kernel to unit trace and eventually reweight them according 9 | % to Bach's 2004 trick. K=K/(nbvp)^option.power if option.power > 0 10 | % 11 | % option.weigth : normalize the kernels with these weights 12 | % option.pow : nbvp^power 13 | % 14 | % 15 | % Outputs 16 | % 17 | % K : normalized kernels 18 | % weights : normalizing weights 19 | % 20 | % 21 | 22 | 23 | if nargin ==1 | ~isfield(option,'weight'); 24 | weight=[]; 25 | else 26 | weight=option.weight; 27 | end; 28 | if nargin ==1 | ~isfield(option,'power'); 29 | pow=0; 30 | else 31 | pow=option.power; 32 | end, 33 | 34 | [nbdata,ndata,nbkernel]=size(K); 35 | seuilvp=1/2/nbdata; 36 | 37 | if isempty(weight) 38 | for i=1:nbkernel 39 | if sum(diag(K(:,:,i)))>0 40 | weight(i)=1/sum(diag(K(:,:,i))); 41 | else 42 | weight(i)=0; 43 | end; 44 | K(:,:,i)=K(:,:,i)*weight(i); 45 | if pow>0 46 | [V,D]=eig(K(:,:,i));clear V; 47 | D=diag(D); 48 | nbvp=sum(D>seuilvp); 49 | weight(i)=weight(i)/(nbvp^pow); 50 | K(:,:,i)=K(:,:,i)/(nbvp^pow); 51 | end 52 | end; 53 | else 54 | 55 | for i=1:nbkernel 56 | K(:,:,i)=K(:,:,i)*weight(i); 57 | end; 58 | end; 59 | -------------------------------------------------------------------------------- /build_efficientK.m: -------------------------------------------------------------------------------- 1 | function Kse = build_efficient_K(Ks); 2 | % build efficient representation of the kernel matrices 3 | % efficient_type = 0 -> not efficient 4 | % 1 -> efficient 5 | %matlab7 = str2num(version('-release')) >=14; 6 | 7 | 8 | Kse.nbkernel = size(Ks,3); 9 | Kse.n = size(Ks,1); 10 | 11 | nbkernel = size(Ks,3); 12 | n = size(Ks,1); 13 | if isa(Ks,'single'); 14 | Kse.data = zeros(n*(n+1)/2,nbkernel,'single'); 15 | else 16 | Kse.data = zeros(n*(n+1)/2,nbkernel); 17 | end 18 | 19 | 20 | for j=1:nbkernel 21 | if isa(Ks,'single'); 22 | Kse.data(:,j) = vectorize_single(Ks(:,:,j)); 23 | else 24 | Kse.data(:,j) = vectorize(Ks(:,:,j)); 25 | end 26 | end 27 | 28 | 29 | -------------------------------------------------------------------------------- /costsvmclass.m: -------------------------------------------------------------------------------- 1 | function [cost,Alpsupaux,w0aux,posaux] = costsvmclass(K,StepSigma,DirSigma,Sigma,indsup,Alpsup,C,yapp,option); 2 | 3 | 4 | global nbcall 5 | nbcall=nbcall+1; 6 | 7 | nsup = length(indsup); 8 | [n]=length(yapp); 9 | 10 | Sigma = Sigma+ StepSigma * DirSigma; 11 | kerneloption.matrix=sumKbeta(K,Sigma); 12 | kernel='numerical'; 13 | span=1; 14 | lambdareg=option.lambdareg; 15 | verbosesvm=option.verbosesvm; 16 | alphainit=zeros(size(yapp)); 17 | alphainit(indsup)=yapp(indsup).*Alpsup; 18 | [xsup,Alpsupaux,w0aux,posaux,timeps,alpha,cost] = svmclass([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,alphainit); -------------------------------------------------------------------------------- /costsvmclassls.m: -------------------------------------------------------------------------------- 1 | function [cost,Alpsupaux,w0aux,posaux] = costsvmclassls(K,StepSigma,DirSigma,Sigma,indsup,Alpsup,C,yapp,option); 2 | 3 | % Usage 4 | % 5 | % [cost,Alpsupaux,w0aux,posaux] = costsvmclassls(K,StepSigma,DirSigma,Sigma,indsup,Alpsup,C,yapp,option); 6 | % 7 | % compute svm solution and cost for given value of Sigma and StepSigma 8 | % 9 | % option.sumbeta must be set to either 'storefullsum' or 'onthefly' 10 | % depending if you want to store the full gram matrix or compute in 11 | % on the fly 12 | % 13 | 14 | 15 | % AR 26/09/2007 16 | 17 | nsup = length(indsup); 18 | [n]=length(yapp); 19 | 20 | Sigma = Sigma+ StepSigma * DirSigma; 21 | 22 | verbosesvm=option.verbosesvm; 23 | lambdareg=option.lambdareg; 24 | span=1; 25 | alphainit=zeros(size(yapp)); 26 | alphainit(indsup)=yapp(indsup).*Alpsup; 27 | 28 | 29 | 30 | kernel='numerical'; 31 | switch option.sumbeta 32 | case 'storefullsum' 33 | kerneloption.matrix=sumKbetals(K,Sigma); 34 | [xsup,Alpsupaux,w0aux,posaux,timeps,alpha,cost] = svmclass([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,alphainit); 35 | 36 | case 'onthefly'; 37 | K.sigma=Sigma; 38 | kerneloption=K; 39 | qpsize=3000; 40 | chunksize=3000; 41 | 42 | [xsup,Alpsupaux,w0aux,posaux,alpha,status,cost] = svmclasslsformkl([],yapp,C,lambdareg,kernel,kerneloption,option.verbosesvm,span,qpsize,chunksize,alphainit); 43 | 44 | otherwise 45 | error('No kernels defined ...'); 46 | end; 47 | 48 | -------------------------------------------------------------------------------- /costsvmoneagainstall.m: -------------------------------------------------------------------------------- 1 | function [cost,Alpsupaux,w0aux,posaux,nbsv] = costsvmoneagainstall(K,StepSigma,DirSigma,Sigma,Alpsup,C,yapp,pos,nbsv,nbclass,option); 2 | 3 | 4 | 5 | 6 | %nsup = length(indsup); 7 | [n]=length(yapp); 8 | 9 | Sigma = Sigma+ StepSigma * DirSigma; 10 | kerneloption.matrix=sumKbeta(K,Sigma); 11 | kernel='numerical'; 12 | span=1; 13 | lambdareg=option.lambdareg; 14 | 15 | warmstart.nbsv=nbsv; 16 | warmstart.alpsup=Alpsup; 17 | warmstart.pos=pos; 18 | verbose=option.verbosesvm; 19 | [xsup,Alpsupaux,w0aux,nbsv,posaux,cost]=svmmulticlassoneagainstall([],yapp,nbclass,C,lambdareg,kernel,kerneloption,verbose,warmstart); 20 | 21 | 22 | -------------------------------------------------------------------------------- /costsvmoneagainstone.m: -------------------------------------------------------------------------------- 1 | function [cost,Alpsupaux,w0aux,posaux,nbsv] = costsvmoneagainstone(K,StepSigma,DirSigma,Sigma,Alpsup,C,yapp,pos,nbsv,nbclass,option); 2 | 3 | 4 | 5 | 6 | %nsup = length(indsup); 7 | [n]=length(yapp); 8 | 9 | Sigma = Sigma+ StepSigma * DirSigma; 10 | kerneloption.matrix=sumKbeta(K,Sigma); 11 | kernel='numerical'; 12 | span=1; 13 | lambdareg=option.lambdareg; 14 | 15 | warmstart.nbsv=nbsv; 16 | warmstart.alpsup=Alpsup; 17 | warmstart.pos=pos; 18 | verbose=option.verbosesvm; 19 | [xsup,Alpsupaux,w0aux,nbsv,aux,posaux,cost]=svmmulticlassoneagainstone([],yapp,nbclass,C,lambdareg,kernel,kerneloption,verbose); 20 | -------------------------------------------------------------------------------- /costsvmreg.m: -------------------------------------------------------------------------------- 1 | function [cost,Alpsupaux,b,posaux] = costsvmreg(K,StepSigma,DirSigma,Sigma,indsup,Alpsup,C,yapp,options); 2 | 3 | 4 | 5 | 6 | nsup = length(indsup); 7 | [n]=length(yapp); 8 | 9 | Sigma = Sigma+ StepSigma * DirSigma; 10 | kerneloption.matrix=sumKbeta(K,Sigma); 11 | kernel='numerical'; 12 | span=[]; 13 | lambdareg=options.lambdareg; 14 | verbose=options.verbosesvm; 15 | 16 | [xsup,ysup,aux,b,posaux,Alpsupaux,cost] = svmreg([],yapp,C,options.svmreg_epsilon,kernel,kerneloption,lambdareg,verbose,span,[],[],[],Alpsup); 17 | -------------------------------------------------------------------------------- /devectorize.c: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include 3 | 4 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 5 | { 6 | int k,k1,i,j,n,ni; 7 | double *K, *z; 8 | 9 | K = mxGetPr(prhs[0]); 10 | n = mxGetM(prhs[0]); 11 | n = floor( sqrt( 2 * n ) ); 12 | 13 | 14 | plhs[0]=mxCreateDoubleMatrix(n,n,0); 15 | z= mxGetPr(plhs[0]); 16 | k=0; 17 | k1=0; 18 | for (j=0;j<=n-1;j++) 19 | { 20 | ni=j; 21 | for (i=0;i<=j;i++) 22 | { 23 | z[i+k1]=K[k]; 24 | z[ni]=K[k]; 25 | k++; 26 | ni+=n; 27 | } 28 | k1 += n; 29 | } 30 | } 31 | 32 | 33 | -------------------------------------------------------------------------------- /devectorize.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/devectorize.mexglx -------------------------------------------------------------------------------- /devectorize_single.c: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include 3 | 4 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 5 | { 6 | int k,k1,i,j,n,ni; 7 | float *K, *z; 8 | 9 | K = mxGetPr(prhs[0]); 10 | n = mxGetM(prhs[0]); 11 | n = floor( sqrt( 2 * n ) ); 12 | 13 | 14 | plhs[0]=mxCreateNumericMatrix(n,n,mxSINGLE_CLASS,0); 15 | z= mxGetPr(plhs[0]); 16 | k=0; 17 | k1=0; 18 | for (j=0;j<=n-1;j++) 19 | { 20 | ni=j; 21 | for (i=0;i<=j;i++) 22 | { 23 | z[i+k1]=K[k]; 24 | z[ni]=K[k]; 25 | k++; 26 | ni+=n; 27 | } 28 | k1 += n; 29 | } 30 | } 31 | 32 | 33 | -------------------------------------------------------------------------------- /devectorize_single.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/devectorize_single.mexglx -------------------------------------------------------------------------------- /exmklclass.m: -------------------------------------------------------------------------------- 1 | % Example of how to use the mklsvm for classification 2 | % 3 | % 4 | 5 | clear all 6 | close all 7 | 8 | nbiter=1; 9 | ratio=0.5; 10 | data='ionosphere' 11 | C = [100]; 12 | verbose=1; 13 | 14 | options.algo='svmclass'; % Choice of algorithm in mklsvm can be either 15 | % 'svmclass' or 'svmreg' 16 | %------------------------------------------------------ 17 | % choosing the stopping criterion 18 | %------------------------------------------------------ 19 | options.stopvariation=0; % use variation of weights for stopping criterion 20 | options.stopKKT=0; % set to 1 if you use KKTcondition for stopping criterion 21 | options.stopdualitygap=1; % set to 1 for using duality gap for stopping criterion 22 | 23 | %------------------------------------------------------ 24 | % choosing the stopping criterion value 25 | %------------------------------------------------------ 26 | options.seuildiffsigma=1e-2; % stopping criterion for weight variation 27 | options.seuildiffconstraint=0.1; % stopping criterion for KKT 28 | options.seuildualitygap=0.01; % stopping criterion for duality gap 29 | 30 | %------------------------------------------------------ 31 | % Setting some numerical parameters 32 | %------------------------------------------------------ 33 | options.goldensearch_deltmax=1e-1; % initial precision of golden section search 34 | options.numericalprecision=1e-8; % numerical precision weights below this value 35 | % are set to zero 36 | options.lambdareg = 1e-8; % ridge added to kernel matrix 37 | 38 | %------------------------------------------------------ 39 | % some algorithms paramaters 40 | %------------------------------------------------------ 41 | options.firstbasevariable='first'; % tie breaking method for choosing the base 42 | % variable in the reduced gradient method 43 | options.nbitermax=500; % maximal number of iteration 44 | options.seuil=0; % forcing to zero weights lower than this 45 | options.seuilitermax=10; % value, for iterations lower than this one 46 | 47 | options.miniter=0; % minimal number of iterations 48 | options.verbosesvm=0; % verbosity of inner svm algorithm 49 | 50 | % 51 | % Note: set 1 would raise the `strrep` 52 | % error in vectorize.dll 53 | % and this error is not able to fix 54 | % because of the missing .h libraay files 55 | % Modify: MaxisKao @ Sep. 4 2014 56 | options.efficientkernel=0; % use efficient storage of kernels 57 | 58 | 59 | %------------------------------------------------------------------------ 60 | % Building the kernels parameters 61 | %------------------------------------------------------------------------ 62 | kernelt={'gaussian' 'gaussian' 'poly' 'poly' }; 63 | kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [0.5 1 2 5 7 10 12 15 17 20] [1 2 3] [1 2 3]}; 64 | variablevec={'all' 'single' 'all' 'single'}; 65 | 66 | 67 | classcode=[1 -1]; 68 | load([data ]); 69 | [nbdata,dim]=size(x); 70 | 71 | nbtrain=floor(nbdata*ratio); 72 | rand('state',0); 73 | 74 | for i=1: nbiter 75 | i 76 | [xapp,yapp,xtest,ytest,indice]=CreateDataAppTest(x, y, nbtrain,classcode); 77 | [xapp,xtest]=normalizemeanstd(xapp,xtest); 78 | [kernel,kerneloptionvec,variableveccell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 79 | [Weight,InfoKernel]=UnitTraceNormalization(xapp,kernel,kerneloptionvec,variableveccell); 80 | K=mklkernel(xapp,InfoKernel,Weight,options); 81 | 82 | 83 | 84 | %------------------------------------------------------------------ 85 | % 86 | % K is a 3-D matrix, where K(:,:,i)= i-th Gram matrix 87 | % 88 | %------------------------------------------------------------------ 89 | % or K can be a structure with uses a more efficient way of storing 90 | % the gram matrices 91 | % 92 | % K = build_efficientK(K); 93 | 94 | tic 95 | [beta,w,b,posw,story(i),obj(i)] = mklsvm(K,yapp,C,options,verbose); 96 | timelasso(i)=toc 97 | 98 | Kt=mklkernel(xtest,InfoKernel,Weight,options,xapp(posw,:),beta); 99 | ypred=Kt*w+b; 100 | 101 | bc(i)=mean(sign(ypred)==ytest) 102 | 103 | end;% 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /exmkllsdisk.m: -------------------------------------------------------------------------------- 1 | % 2 | % 3 | 4 | clear all 5 | close all 6 | 7 | 8 | nbiter=1; 9 | 10 | ratio=0.1; 11 | data='credit' 12 | 13 | C = [1000]; 14 | 15 | options.seuildiffsigma=1e-4; 16 | options.seuildiffconstraint=0.1; 17 | options.seuildualitygap=0.01; 18 | options.goldensearch_deltmax=1e-1; 19 | options.numericalprecision=1e-8; 20 | options.stopvariation=0; 21 | options.stopKKT=0; 22 | options.stopdualitygap=1; 23 | options.firstbasevariable='first'; 24 | options.nbitermax=500; 25 | options.seuil=0.00; 26 | options.seuilitermax=10; 27 | options.lambdareg = 1e-8; 28 | options.miniter=0; 29 | options.verbosesvm=0; 30 | options.sumbeta='storefullsum'; % 'storefullsum' or 'onthefly' 31 | options.storefly='store'; 32 | options.efficientkernel=0; 33 | 34 | verbose=1; 35 | 36 | load(['../data/' data '/' data ]); 37 | %x=single(x); 38 | classcode=[1 -1]; 39 | 40 | kernelt={'gaussian' 'gaussian' 'poly' 'poly' }; 41 | kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [0.5 1 2 5 7 10 12 15 17 20] [1 2 3] [1 2 3]}; 42 | variablevec={'all' 'single' 'all' 'single'}; 43 | 44 | 45 | % % spamdata 46 | % kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [1 2 3 4] [1 2 3 4]}; 47 | % variablevec={'all' 'all' 'single'} 48 | % 49 | % % coverbin 50 | % kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [1 2 3 4]}; 51 | % variablevec={'all' 'all'} 52 | 53 | [nbdata,dim]=size(x); 54 | nbtrain=floor(nbdata*ratio); 55 | rand('state',0);; 56 | 57 | for i=1: nbiter 58 | 59 | 60 | 61 | 62 | indice=randperm(nbdata); 63 | indapp=indice(1:nbtrain); 64 | indtest=indice(nbtrain+1:nbdata); 65 | xapp=x(indapp,:); 66 | xtest=x(indtest,:); 67 | yapp=y(indapp,:); 68 | ytest=y(indtest,:); 69 | 70 | [xapp,xtest]=normalizemeanstd(xapp,xtest); 71 | 72 | fprintf('Creating & Processing Kernels...'); 73 | %------------------------------------------------------ 74 | % create the list of kernels and their weights 75 | %------------------------------------------------------ 76 | 77 | [kernelvec,kerneloptionvec,optionK.variablecell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 78 | [Weight,InfoKernel]=UnitTraceNormalization(xapp,kernelvec,kerneloptionvec,optionK.variablecell); 79 | 80 | %%--------------------------------------------- 81 | %% Comments 82 | %%--------------------------------------------- 83 | %% for large scale mkl, 2 bottlenecks 84 | %% - two many examples : one has to use SVM with decomposition methods 85 | %% - two many kernels and examples: one has to store precomputed kernels 86 | %% or build them on the fly during the decomposition method 87 | %% 88 | %% if there are two many kernels but their sums can be stored in 89 | %% memory (say nb of examples is less than 3000), one can set 90 | %% option.sumbeta to 'storefullsum' so that the kernels are summed from 91 | %% file 'store' or computed on the fly 'fly' and stored in memory... no decomposition methods is used. 92 | %% 93 | %% 94 | %% In the other cases, decomposition methods has to be used. Kernels 95 | %% can be computed on the fly or loaded on the fly. For doing so 96 | %% option.sumbeta must be set to 'onthefly'. 97 | 98 | 99 | 100 | switch options.storefly 101 | 102 | case 'store' 103 | %------------------------------------ 104 | % save each row of a kernel 105 | %------------------------------------ 106 | tempdir='./temp/'; 107 | 108 | 109 | for k=1:length(Weight) 110 | 111 | Kr=svmkernel(xapp(:,InfoKernel(k).variable),InfoKernel(k).kernel,InfoKernel(k).kerneloption); 112 | 113 | Kr=Kr*Weight(k); 114 | if options.efficientkernel 115 | Kr=build_efficientK(Kr); 116 | end; 117 | save([tempdir 'K' int2str(k) '.mat'],'Kr'); 118 | 119 | end; 120 | 121 | 122 | K.size=length(Weight); 123 | K.tempdir=tempdir; 124 | K.nbdata=size(xapp,1); 125 | 126 | 127 | 128 | 129 | case 'fly' 130 | %------------------------------------------ 131 | % For doing on the fly kernel processing 132 | %---------------------------------------- 133 | K.size=length(Weight); 134 | K.x=xapp; 135 | K.info=InfoKernel; 136 | %%Example : Kin=sumKbetaread(K,sigma,1:size(xapp,1), 1:size(xapp,1)); 137 | 138 | end; 139 | 140 | fprintf('done \n'); 141 | Nr=10; 142 | sigmainit=[rand(1,Nr) zeros(1,K.size-Nr)]; 143 | options.sigmainit=sigmainit/sum(sigmainit); 144 | 145 | 146 | 147 | % Cv=VectorizeCofSVM(yapp,C); 148 | tic 149 | [beta,w,b,posw,story,obj]=mklsvmls(K,yapp,C,options,verbose); 150 | time(i)=toc; 151 | 152 | save (['resultat-' data '.mat'],'time') 153 | 154 | end; 155 | -------------------------------------------------------------------------------- /exmklmulticlass.m: -------------------------------------------------------------------------------- 1 | % 2 | % Example MKL MultiClass SVM Classifiction 3 | % 4 | 5 | 6 | 7 | close all 8 | clear all 9 | %------------------------------------------------------ 10 | % Creating data 11 | %------------------------------------------------------ 12 | n=20; 13 | sigma=1.2; 14 | nbclass=3; 15 | 16 | x1= sigma*randn(n,2)+ ones(n,1)*[-1.5 -1.5]; 17 | x2= sigma*randn(n,2)+ ones(n,1)*[0 2]; 18 | x3= sigma*randn(n,2)+ ones(n,1)*[2 -1.5]; 19 | xapp=[x1;x2;x3]; 20 | yapp=[1*ones(1,n) 2*ones(1,n) 3*ones(1,n)]'; 21 | 22 | [n1, n2]=size(xapp); 23 | [xtesta1,xtesta2]=meshgrid([-4:0.1:4],[-4:0.1:4]); 24 | [na,nb]=size(xtesta1); 25 | xtest1=reshape(xtesta1,1,na*nb); 26 | xtest2=reshape(xtesta2,1,na*nb); 27 | xtest=[xtest1;xtest2]'; 28 | 29 | 30 | %---------------------------------------------------------- 31 | % Learning and Learning Parameters 32 | % Parameters are similar to those used for mklsvm 33 | %----------------------------------------------------------- 34 | 35 | C = 100; 36 | lambda = 1e-7; 37 | verbose = 1; 38 | options.algo='oneagainstall'; 39 | options.seuildiffsigma=1e-4; 40 | options.seuildiffconstraint=0.1; 41 | options.seuildualitygap=1e-2; 42 | options.goldensearch_deltmax=1e-1; 43 | options.numericalprecision=1e-8; 44 | options.stopvariation=1; 45 | options.stopKKT=1; 46 | options.stopdualitygap=0; 47 | options.firstbasevariable='first'; 48 | options.nbitermax=500; 49 | options.seuil=0.; 50 | options.seuilitermax=10; 51 | options.lambdareg = 1e-6; 52 | options.miniter=0; 53 | options.verbosesvm=0; 54 | options.efficientkernel=1; 55 | %------------------------------------------------------------ 56 | 57 | kernelt={'gaussian' 'gaussian' 'poly' 'poly' }; 58 | kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [0.5 1 2 5 7 10 12 15 17 20] [1 2 3] [1 2 3]}; 59 | variablevec={'all' 'single' 'all' 'single'}; 60 | 61 | 62 | 63 | [nbdata,dim]=size(xapp); 64 | [kernel,kerneloptionvec,variableveccell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 65 | [Weight,InfoKernel]=UnitTraceNormalization(xapp,kernel,kerneloptionvec,variableveccell); 66 | K=mklkernel(xapp,InfoKernel,Weight,options); 67 | 68 | %---------------------Learning & Testing ---------------- 69 | 70 | 71 | [beta,w,w0,pos,nbsv,SigmaH,obj] = mklmulticlass(K,yapp,C,nbclass,options,verbose); 72 | xsup=xapp(pos,:); 73 | Kt=mklkernel(xtest,InfoKernel,Weight,options,xsup,beta); 74 | kernel='numerical'; 75 | kerneloption.matrix=Kt; 76 | switch options.algo 77 | case 'oneagainstall' 78 | [ypred,maxi] = svmmultival([],[],w,w0,nbsv,kernel,kerneloption); 79 | case 'oneagainstone' 80 | [ypred,vote]=svmmultivaloneagainstone([],[],w,w0,nbsv,kernel,kerneloption); 81 | end; 82 | 83 | 84 | %------------------------------------------------------------------ 85 | % Plotting the decision function 86 | %------------------------------------------------------------------- 87 | ypredmat=reshape(ypred,na,nb); 88 | contour(xtesta1,xtesta2,ypredmat,[1 2 3]);hold on 89 | style=['x+*']; 90 | color=['bgr']; 91 | hold on 92 | for i=0:nbclass-1 93 | h=plot(xapp(i*n+1:(i+1)*n,1),xapp(i*n+1:(i+1)*n,2),[style(i+1) color(i+1)]); 94 | set(h,'LineWidth',2); 95 | hold on 96 | end; 97 | 98 | if ~isempty(xsup) 99 | h=plot(xsup(:,1),xsup(:,2),'ok'); 100 | set(h,'LineWidth',2); 101 | axis( [ -4 4 -4 4]); 102 | legend('classe 1','classe 2','classe 3', 'Support Vector'); 103 | hold off 104 | end 105 | 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /exmklmulticlassrealdata.m: -------------------------------------------------------------------------------- 1 | % 2 | % Example MKL MultiClass SVM Classifiction 3 | % 4 | 5 | 6 | 7 | close all 8 | clear all 9 | %-------------------------------------------------- 10 | % Creatong 11 | 12 | data='dna'; 13 | ratio=0.1; 14 | classcode= [ 1 2 3]; 15 | nbclass=3; 16 | %---------------------------------------------------------- 17 | % Learning and Learning Parameters 18 | C = 1000; 19 | lambda = 1e-7; 20 | verbose = 1; 21 | options.algo='oneagainstall'; 22 | options.seuildiffsigma=1e-4; 23 | options.seuildiffconstraint=0.1; 24 | options.seuildualitygap=1e-2; 25 | options.goldensearch_deltmax=1e-1; 26 | options.numericalprecision=1e-9; 27 | options.stopvariation=1; 28 | options.stopKKT=1; 29 | options.stopdualitygap=0; 30 | options.firstbasevariable='first'; 31 | options.nbitermax=500; 32 | options.seuil=0.000; 33 | options.seuilitermax=10; 34 | options.lambdareg = 1e-8; 35 | options.miniter=0; 36 | options.verbosesvm=0; 37 | options.efficientkernel=1; 38 | %------------------------------------------------------------ 39 | 40 | kernelt={'gaussian' 'gaussian' 'poly' 'poly' }; 41 | kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [1 2 3]}; 42 | variablevec={'all' 'all' }; 43 | 44 | load(['../data/' data '/' data '.mat']); 45 | nbtrain=round(ratio*size(y,1)); 46 | 47 | randn('seed',0); 48 | rand('seed',0); 49 | 50 | [nbdata,dim]=size(x); 51 | [xapp,yapp,xtest,ytest,indice]=CreateDataAppTest(x, y, nbtrain,classcode); 52 | [xapp,xtest,meanxapp,stdxapp] = normalizemeanstd(xapp,xtest); 53 | 54 | 55 | 56 | %-------------------- Creating kernels ------------------------------ 57 | [kernel,kerneloptionvec,variableveccell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 58 | [Weight,InfoKernel]=UnitTraceNormalization(xapp,kernel,kerneloptionvec,variableveccell); 59 | K=mklkernel(xapp,InfoKernel,Weight,options); 60 | 61 | %------------------------------------------------------------------ 62 | % 63 | % K is a 3-D matrix, where K(:,:,i)= i-th Gram matrix 64 | % 65 | %------------------------------------------------------------------ 66 | % or K can be a structure with uses a more efficient way of storing 67 | % the gram matrices 68 | % 69 | K = build_efficientK(K); 70 | 71 | %---------------------One Against All algorithms--------------------- 72 | 73 | [beta,w,w0,pos,nbsv,SigmaH,obj] = mklmulticlass(K,yapp,C,nbclass,options,verbose); 74 | xsup=xapp(pos,:); 75 | Kt=mklkernel(xtest,InfoKernel,Weight,options,xsup,beta); 76 | kernel='numerical'; 77 | kerneloption.matrix=Kt; 78 | [ypred,maxi] = svmmultival([],[],w,w0,nbsv,kernel,kerneloption); 79 | [Conf,metric]=ConfusionMatrix(ypred,ytest,classcode) 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /exmklreg.m: -------------------------------------------------------------------------------- 1 | % Example of how to use the mklreg 2 | % 3 | 4 | clear all 5 | close all 6 | clc 7 | %-------------------creating and plotting data---------------- 8 | n=100; 9 | bruit=0.3; 10 | freq=0.8; 11 | x=linspace(0,8,n)'; 12 | xtest=linspace(0,8,n)'; 13 | xapp=x; 14 | yapp=cos(exp(freq*x)) +randn(n,1)*bruit; 15 | ytest=cos(exp(freq*xtest)); 16 | 17 | 18 | %----------------------Learning Parameters ------------------- 19 | C = 100; 20 | verbose=1; 21 | options.algo='svmreg'; 22 | options.seuildiffsigma=1e-3; 23 | options.seuildiffconstraint=0.01; 24 | options.seuildualitygap=0.01; 25 | options.goldensearch_deltmax=1e-2; 26 | options.numericalprecision=1e-8; 27 | options.stopvariation=0; 28 | options.stopKKT=0; 29 | options.stopdualitygap=1; 30 | options.firstbasevariable='first'; 31 | options.nbitermax=500; 32 | options.seuil=0; 33 | options.seuilitermax=10; 34 | options.lambdareg = 1e-8; 35 | options.miniter=0; 36 | options.verbosesvm=0; 37 | options.svmreg_epsilon=0.01; 38 | options.efficientkernel=0; 39 | optionK.pow=0; 40 | 41 | kernelt={'gaussian' 'poly'}; 42 | kerneloptionvect={[0.01:0.05:0.2 0.5 1 2 5 7 10 12 15 17 20] [1 2 3]}; 43 | variablevec={'all' 'all'}; 44 | dim=size(xapp,2); 45 | 46 | [kernel,kerneloptionvec,optionK.variablecell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 47 | [K]=mklbuildkernel(xapp,kernel,kerneloptionvec,[],[],optionK); 48 | [K,optionK.weightK]=WeightK(K); 49 | 50 | 51 | 52 | [beta,w,b,posw,story,obj] = mklsvm(K,yapp,C,options,verbose); 53 | kerneloption.matrix=mklbuildkernel(xtest,kernel,kerneloptionvec,xapp(posw,:),beta,optionK); 54 | ypred=svmval([],[],w,b,'numerical',kerneloption); 55 | plot(xtest,ytest,'b',xapp,yapp,'r',xapp,yapp,'r+',xtest,ypred,'g') -------------------------------------------------------------------------------- /exmklvariationC.m: -------------------------------------------------------------------------------- 1 | 2 | clear all 3 | %close all 4 | 5 | 6 | data='pima' 7 | nbiter=1; 8 | ratio=0.7; 9 | N=100; 10 | Cvec = logspace(-2,3,N); 11 | 12 | 13 | 14 | options.algo='svmclass'; 15 | options.seuildiffsigma=1e-3; 16 | options.seuildiffconstraint=0.1; 17 | options.seuildualitygap=0.01; 18 | options.goldensearch_deltmax=1e-1; 19 | options.numericalprecision=1e-14; 20 | options.stopvariation=0; 21 | options.stopKKT=0; 22 | options.stopdualitygap=1; 23 | options.firstbasevariable='first'; 24 | options.nbitermax=500; 25 | options.seuil=0.0; 26 | options.seuilitermax=10; 27 | options.lambdareg = 1e-8; 28 | options.miniter=0; 29 | 30 | puiss=0; 31 | 32 | lambda = 1e-8; 33 | span=1; 34 | verbose=1; 35 | load(['../data/' data '/' data ]); 36 | classcode=[1 -1]; 37 | 38 | kernelt={'gaussian' 'gaussian' 'poly' 'poly' }; 39 | kerneloptionvect={[0.5 1 2 5 7 10 12 15 17 20] [0.5 1 2 5 7 10 12 15 17 20] [1 2 3] [1 2 3]}; 40 | variablevec={'all' 'single' 'all' 'single'}; 41 | filename=['resultatvariationC-' data '.mat']; 42 | [nbdata,dim]=size(x); 43 | nbtrain=floor(nbdata*ratio) 44 | rand('state',12); 45 | 46 | 47 | 48 | 49 | 50 | indice=randperm(nbdata); 51 | indapp=indice(1:nbtrain); 52 | indtest=indice(nbtrain+1:nbdata); 53 | xapp=x(indapp,:); 54 | xtest=x(indtest,:); 55 | yapp=y(indapp,:); 56 | ytest=y(indtest,:); 57 | 58 | [xapp,xtest]=normalizemeanstd(xapp,xtest); 59 | [nbdata,dim]=size(xapp); 60 | [kernel,kerneloptionvec,optionK.variablecell]=CreateKernelListWithVariable(variablevec,dim,kernelt,kerneloptionvect); 61 | [K]=mklbuildkernel(xapp,kernel,kerneloptionvec,[],[],optionK); 62 | option.power=0; 63 | [K,optionK.weightK]=WeightK(K,option); 64 | [Kt]=mklbuildkernel(xtest,kernel,kerneloptionvec,xapp,[],optionK); 65 | 66 | 67 | options2=options; 68 | optionssor=options; 69 | % first=1; 70 | % for j=1:length(Cvec); 71 | % C=Cvec(j) 72 | % if ~first 73 | % aux =optionssor.alphainit; 74 | % aux(find(aux)==Cvec(j-1))=Cvec(j); 75 | % optionssor.alphainit=aux; 76 | % optionssor.thetainit=theta; 77 | % end; 78 | % tic 79 | % [w,b,beta,posw,fval(j),storysoren]=mklsvmclassSILP(K,yapp,C,verbose,optionssor); 80 | % theta=fval(i,j); 81 | % time(i,j)=toc 82 | % verbose=1; 83 | % betavecsoren(j,:)=beta'; 84 | % optionssor.sigmainit=beta'; 85 | % alphainit=zeros(size(yapp)); 86 | % alphainit(posw)=w.*yapp(posw); 87 | % optionssor.alphainit=alphainit; 88 | % sumKt=sumKbeta(Kt,beta'.*optionK.weightK); % See mklbuildkernel 89 | % ypred=sumKt(:,posw)*w +b ; 90 | % bcsoren(i,j)=mean(sign(ypred)==ytest); 91 | % first=0; 92 | % end 93 | first=1; 94 | for j=length(Cvec):-1:1 95 | C=Cvec(j) 96 | if ~first 97 | aux =options.alphainit; 98 | aux(find(aux)==Cvec(j+1))=Cvec(j); 99 | options.alphainit=aux; 100 | end; 101 | 102 | tic 103 | [beta2,w2,b2,posw,story(j),obj(j)] = mklsvm(K,yapp,C,options,verbose); 104 | timelasso2(j)=toc 105 | alphainit=zeros(size(yapp)); 106 | alphainit(posw)=w2.*yapp(posw); 107 | 108 | betavec2(j,:)=beta2; 109 | %sumKt=sumKbeta(Kt,beta2.*optionK.weightK); % See mklbuildkernel 110 | % ypred=sumKt(:,posw)*w2 +b2 ; 111 | % bc(i,j)=mean(sign(ypred)==ytest); 112 | 113 | options.sigmainit=beta2; 114 | options.alphainit=alphainit; 115 | first=0; 116 | end; 117 | 118 | plot(Cvec,betavec2,'LineWidth',2); 119 | set(gca,'Xscale','log'); 120 | set(gcf,'color','white'); 121 | xlabel('C','Fonts',16) 122 | ylabel('d_k','Fonts',16) 123 | set(gca,'Fonts',16) 124 | 125 | 126 | -------------------------------------------------------------------------------- /gradsvmclass.m: -------------------------------------------------------------------------------- 1 | function [grad] = gradsvmclass(K,indsup,Alpsup,C,yapp,option); 2 | 3 | nsup = length(indsup); 4 | [n] = length(yapp); 5 | if ~isstruct(K) 6 | 7 | 8 | d=size(K,3); 9 | for k=1:d; 10 | 11 | % grad(k) = - 0.5*Alpsup'*Kaux(indsup,indsup)*(Alpsup) ; 12 | grad(k) = - 0.5*Alpsup'*K(indsup,indsup,k)*(Alpsup) ; 13 | end; 14 | else 15 | d=K.nbkernel; 16 | for k=1:d; 17 | if isa(K.data,'single') 18 | Kaux=devectorize_single(K.data(:,k)); 19 | else 20 | Kaux=devectorize(K.data(:,k)); 21 | end; 22 | grad(k) = - 0.5*Alpsup'*Kaux(indsup,indsup)*(Alpsup) ; 23 | 24 | end; 25 | 26 | end; 27 | 28 | -------------------------------------------------------------------------------- /gradsvmclassls.m: -------------------------------------------------------------------------------- 1 | function [grad] = gradsvmclassls(Kinfo,indsup,Alpsup,C,yapp,option); 2 | 3 | % Usage 4 | % 5 | % [grad] = gradsvmclassls(Kinfo,indsup,Alpsup,C,yapp,option); 6 | % 7 | % compute the gradient of all the weight variables. 8 | % 9 | % 10 | % if the structure Kinfo contains the field 11 | % 'x' and the struc 'info' then the kernel is computed 12 | % on the fly 13 | % 14 | % if the structure Kinfo has a field 'tempdir' 15 | % the kernel is laoded from files stored in './tempdir/' 16 | % 17 | % 18 | % see sumKbetals for the structure of K 19 | 20 | % A.R 26/09/2007 21 | nsup = length(indsup); 22 | [n] = length(yapp); 23 | nbkernel=Kinfo.size; 24 | 25 | chunksize=3000; 26 | for k=1:nbkernel; 27 | 28 | 29 | if ~isstruct(Kinfo) 30 | 31 | 32 | grad(k) = - 0.5*Alpsup'*K.matrix(indsup,indsup,k)*(Alpsup) ; 33 | elseif isfield(Kinfo,'x') & isfield(Kinfo,'info') 34 | %------------------------------------------ 35 | % On the fly 36 | %------------------------------------------ 37 | 38 | variabletouse=Kinfo.info(k).variable; 39 | kernel=Kinfo.info(k).kernel; 40 | kerneloption=Kinfo.info(k).kerneloption; 41 | 42 | if length(indsup)<=chunksize 43 | K=svmkernel(Kinfo.x(indsup,variabletouse),kernel,kerneloption)*Kinfo.info(k).Weigth; 44 | grad(k) = - 0.5*Alpsup'*K*(Alpsup) ; 45 | else 46 | % 47 | % if the nb of SV is too large, it may be useful to chunk 48 | Nbchunk=ceil(length(indsup)/chunksize); 49 | vectemp=zeros(length(indsup),1); 50 | for i=1:Nbchunk 51 | fprintf('.') ; 52 | ind1=(i-1)*chunksize+1:min( [i*chunksize length(indsup)]); 53 | for j=1:Nbchunk 54 | ind2=(j-1)*chunksize+1:min( [j*chunksize length(indsup)]); 55 | K=svmkernel(Kinfo.x(indsup(ind1),variabletouse),kernel,kerneloption,Kinfo.x(indsup(ind2),variabletouse))*Kinfo.info(k).Weigth; 56 | vectemp(ind1)=vectemp(ind1)+K*Alpsup(ind2); 57 | end; 58 | end; 59 | 60 | grad(k) = - 0.5*Alpsup'*vectemp; 61 | end; 62 | 63 | elseif isfield(Kinfo,'tempdir'); 64 | % LOAD from file 65 | 66 | 67 | file=['K' int2str(k)]; 68 | load([Kinfo.tempdir '/' file '.mat']); 69 | if isstruct(Kr) 70 | if isa(Kr.data,'single') 71 | Kr=devectorize_single(Kr.data); 72 | else 73 | Kr=devectorize(Kr.data); 74 | end; 75 | end; 76 | grad(k) = - 0.5*Alpsup'*Kr(indsup,indsup)*Alpsup ; 77 | 78 | 79 | 80 | end; 81 | 82 | end; 83 | -------------------------------------------------------------------------------- /gradsvmoneagainstall.m: -------------------------------------------------------------------------------- 1 | function [grad] = gradsvmoneagainstall(K,pos,Alpsup,yapp,nbsv,option); 2 | 3 | 4 | [n] = length(yapp); 5 | if ~isstruct(K) 6 | d=size(K,3); 7 | else 8 | d=size(K.data,2); % efficient formulation of kernel 9 | end; 10 | nbclass=length(nbsv); 11 | 12 | nbsv=[0 nbsv]; 13 | aux=cumsum(nbsv); 14 | for k=1:d; 15 | S=0; 16 | for i=1:nbclass 17 | waux=Alpsup(aux(i)+1:aux(i)+nbsv(i+1)); 18 | indsup=pos(aux(i)+1:aux(i)+nbsv(i+1)); 19 | if ~isstruct(K) 20 | S=S + (- 0.5* waux'* K(indsup,indsup,k)*waux) ; 21 | else 22 | Kaux=devectorize(K.data(:,k)); 23 | S=S + (- 0.5* waux'* Kaux(indsup,indsup)*waux) ; 24 | end; 25 | end; 26 | grad(k) = S; 27 | end; 28 | 29 | -------------------------------------------------------------------------------- /gradsvmoneagainstone.m: -------------------------------------------------------------------------------- 1 | function [grad] = gradsvmoneagainstone(K,pos,Alpsup,yapp,nbsv,option); 2 | 3 | 4 | [n] = length(yapp); 5 | if ~isstruct(K) 6 | d=size(K,3); 7 | else 8 | d=size(K.data,2); % efficient formulation of kernel 9 | end; 10 | nbclass=length(nbsv); 11 | 12 | nbsv=[0 nbsv]; 13 | aux=cumsum(nbsv); 14 | for k=1:d; 15 | S=0; 16 | for i=1:nbclass 17 | waux=Alpsup(aux(i)+1:aux(i)+nbsv(i+1)); 18 | indsup=pos(aux(i)+1:aux(i)+nbsv(i+1)); 19 | if ~isstruct(K) 20 | S=S + (- 0.5* waux'* K(indsup,indsup,k)*waux) ; 21 | else 22 | Kaux=devectorize(K.data(:,k)); 23 | S=S + (- 0.5* waux'* Kaux(indsup,indsup)*waux) ; 24 | end; 25 | end; 26 | grad(k) = S; 27 | end; 28 | 29 | -------------------------------------------------------------------------------- /gradsvmreg.m: -------------------------------------------------------------------------------- 1 | function [grad] = gradsvmreg(K,Alpsup,yapp,option); 2 | 3 | [n] = length(yapp); 4 | d=size(K,3); 5 | 6 | I = eye(n); 7 | Idif = [I -I]; 8 | 9 | 10 | if ~isstruct(K) 11 | for k=1:d; 12 | H = Idif'*K(:,:,k)*Idif; 13 | grad(k) = -0.5*Alpsup'*H*Alpsup ; 14 | end; 15 | 16 | 17 | else 18 | d=K.nbkernel; 19 | for k=1:d; 20 | if isa(K.data,'single') 21 | Kaux=devectorize_single(K.data(:,k)); 22 | else 23 | Kaux=devectorize(K.data(:,k)); 24 | end; 25 | H = Idif'*Kaux*Idif; 26 | grad(k) = - 0.5*Alpsup'*H*(Alpsup) ; 27 | 28 | end; 29 | 30 | end; -------------------------------------------------------------------------------- /ionosphere.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/ionosphere.mat -------------------------------------------------------------------------------- /mklbuildkernel.m: -------------------------------------------------------------------------------- 1 | function [K,aux]=mklbuildkernel(x,kernelcell,kerneloptioncell,xsup,beta,options); 2 | 3 | 4 | 5 | aux=[]; % compatibility 6 | if nargin < 4 7 | beta=1; 8 | xsup=x; 9 | xegalxsup=1; 10 | weightK=[]; 11 | options=[]; 12 | else 13 | if isempty(xsup) 14 | xsup=x; 15 | end; 16 | end; 17 | 18 | 19 | j=1; 20 | for k=1:length(kernelcell); 21 | kernel=kernelcell{k}; 22 | kerneloptionvec=kerneloptioncell{k}; 23 | nbkernel=length(kerneloptionvec); 24 | if exist('options') & isfield(options,'variablecell') 25 | variabletouse=options.variablecell{k}; 26 | else 27 | variabletouse=1:size(x,2); 28 | end; 29 | for i=1:nbkernel 30 | K(:,:,j)=svmkernel(x(:,variabletouse),kernel,kerneloptionvec(i),xsup(:,variabletouse)); 31 | j=j+1; 32 | end 33 | end; 34 | 35 | 36 | % Process Test Kernel Summation and Normalization + Normalization 37 | 38 | if length(beta)==size(K,3) 39 | if exist('options') & isfield(options,'weightK') 40 | weightK=options.weightK; 41 | else 42 | weightK=ones(size(K,3),1); 43 | end 44 | 45 | Kt=zeros(size(x,1),size(xsup,1)); 46 | nbkernel=size(K,3); 47 | for i=1:nbkernel 48 | Kt=Kt+beta(i)*K(:,:,i)*weightK(i); 49 | end 50 | K=Kt; 51 | end; 52 | 53 | -------------------------------------------------------------------------------- /mklkernel.m: -------------------------------------------------------------------------------- 1 | function K=mklkernel(xapp,InfoKernel,Weight,options,xsup,beta) 2 | 3 | if nargin <5 4 | xsup=xapp; 5 | beta=[]; 6 | 7 | 8 | for k=1:length(Weight) 9 | 10 | Kr=svmkernel(xapp(:,InfoKernel(k).variable),InfoKernel(k).kernel,InfoKernel(k).kerneloption, xsup(:,InfoKernel(k).variable)); 11 | 12 | Kr=Kr*Weight(k); 13 | % if options.efficientkernel 14 | % Kr=build_efficientK(Kr); 15 | % end; 16 | 17 | K(:,:,k)=Kr; 18 | 19 | 20 | end; 21 | else 22 | ind=find(beta); 23 | K=zeros(size(xapp,1),size(xsup,1)); 24 | for i=1:length(ind); 25 | k=ind(i); 26 | Kr=svmkernel(xapp(:,InfoKernel(k).variable),InfoKernel(k).kernel,InfoKernel(k).kerneloption, xsup(:,InfoKernel(k).variable)); 27 | Kr=Kr*Weight(k); 28 | K=K+ Kr*beta(k); 29 | end; 30 | 31 | end; -------------------------------------------------------------------------------- /mklmulticlass.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,nbsv,SigmaH,obj] = mklmulticlass(K,yapp,C,nbclass,option,verbose) 2 | 3 | % USAGE [Sigma,Alpsup,w0,pos,Time,SigmaH,obj] = mkladapt(K,yapp,C,option,verbose) 4 | % 5 | % Input 6 | % K : NxNxD matrix containing all the Gram Matrix 7 | % yapp : training labels 8 | % C : SVM hyperparameter 9 | % nbclass : nb of classes in the problem 10 | % verbose : verbosity of algorithm 11 | % option : mkl algorithm hyperparameter 12 | % 13 | % option.nbitermax : maximal number of iterations (default 1000) 14 | % option.algo : selecting algorithm svmclass (default) or svmreg 15 | % option.seuil : threshold for zeroing kernel coefficients 16 | % (default 1e-12) 17 | % option.sigmainit : initial kernel coefficient (default average) 18 | % option.alphainit : initial Lagrangian coefficient 19 | % 20 | 21 | 22 | [n] = length(yapp); 23 | if ~isempty(K) 24 | if size(K,3)>1 25 | nbkernel=size(K,3); 26 | if option.efficientkernel==1 27 | K = build_efficientK(K); 28 | end; 29 | elseif option.efficientkernel==1 & isstruct(K); 30 | nbkernel=K.nbkernel; 31 | end; 32 | else 33 | error('No kernels defined ...'); 34 | end; 35 | 36 | if ~isfield(option,'nbitermax'); 37 | nloopmax=1000; 38 | else 39 | nloopmax=option.nbitermax; 40 | end; 41 | if ~isfield(option,'algo'); 42 | option.algo='oneagainstall'; 43 | end; 44 | if ~isfield(option,'seuil'); 45 | seuil=1e-12; 46 | else 47 | seuil=option.seuil; 48 | end 49 | if ~isfield(option,'lambdareg'); 50 | lambdareg=1e-10; 51 | option.lambdareg=1e-10; 52 | else 53 | lambdareg=option.lambdareg; 54 | end 55 | 56 | if ~isfield(option,'verbosesvm'); 57 | verbosesvm=0; 58 | option.verbosesvm=0; 59 | else 60 | verbosesvm=option.verbosesvm; 61 | end 62 | 63 | if ~isfield(option,'sigmainit'); 64 | Sigma=ones(1,nbkernel)/nbkernel; 65 | else 66 | Sigma=option.sigmainit ; 67 | ind=find(Sigma==0); 68 | end; 69 | 70 | if isfield(option,'alphainit'); 71 | alphainit=option.alphainit; 72 | else 73 | alphainit=[]; 74 | end; 75 | %-------------------------------------------------------------------------------- 76 | % Options used in subroutines 77 | %-------------------------------------------------------------------------------- 78 | if ~isfield(option,'goldensearch_deltmax'); 79 | option.goldensearch_deltmax=1e-1; 80 | end 81 | if ~isfield(option,'goldensearchmax'); 82 | optiongoldensearchmax=1e-8; 83 | end; 84 | if ~isfield(option,'firstbasevariable'); 85 | option.firstbasevariable='first'; 86 | end; 87 | 88 | %------------------------------------------------------------------------------% 89 | % Initialize 90 | %------------------------------------------------------------------------------% 91 | 92 | kernel = 'numerical'; 93 | span = 1; 94 | nloop = 0; 95 | loop = 1; 96 | goldensearch_deltmaxinit= option.goldensearch_deltmax; 97 | 98 | 99 | 100 | % if option.efficientkernel==1 101 | % K = build_efficientK(K); 102 | % end; 103 | 104 | if nargout>=8, 105 | SigmaH = zeros(nloopmax,d); 106 | end; 107 | 108 | % Initializing SVM 109 | t = cputime ; 110 | SumSigma=sum(Sigma); 111 | if ~isempty(K) 112 | kerneloption.matrix=sumKbeta(K,Sigma); 113 | end; 114 | 115 | switch option.algo 116 | case 'oneagainstall' 117 | 118 | [xsup,Alpsup,w0,nbsv,pos,obj]=svmmulticlassoneagainstall([],yapp,nbclass,C,lambdareg,kernel,kerneloption,verbosesvm); 119 | [grad] = gradsvmoneagainstall(K,pos,Alpsup,yapp,nbsv,option); 120 | 121 | case 'oneagainstone' 122 | [xsup,Alpsup,w0,nbsv,aux,pos,obj]=svmmulticlassoneagainstone([],yapp,nbclass,C,lambdareg,kernel,kerneloption,verbosesvm); 123 | [grad] = gradsvmoneagainstone(K,pos,Alpsup,yapp,nbsv,option); 124 | 125 | end; 126 | 127 | Sigmaold = Sigma ; 128 | Alpsupold = Alpsup ; 129 | w0old = w0; 130 | posold = pos ; 131 | history.obj=[]; 132 | history.sigma=[]; 133 | history.KKTconstraint=[1]; 134 | history.dualitygap=[]; 135 | 136 | %------------------------------------------------------------------------------% 137 | % Update loop 138 | %------------------------------------------------------------------------------% 139 | if nloopmax==0 140 | SigmaH=[]; 141 | return 142 | end; 143 | while loop ; nloop = nloop+1; 144 | 145 | SigmaH(nloop,:) = Sigma; 146 | history.sigma= [history.sigma;Sigma]; 147 | history.obj=[history.obj obj]; 148 | 149 | %--------------------------------------------- 150 | % Update Sigma 151 | %--------------------------------------------- 152 | t = cputime ; 153 | [Sigma,Alpsup,w0,pos,nbsv,obj] = mklmulticlassupdate(K,Sigma,pos,Alpsup,w0,C,yapp,nbclass,nbsv,grad,obj,option) ; 154 | %----------------------------------------- 155 | % Thresholding 156 | %----------------------------------------- 157 | 158 | if seuil ~=0 & max(Sigma)>seuil & nloop < option.seuilitermax 159 | Sigma=(Sigma.*(Sigma>seuil))*SumSigma/sum(Sigma.*(Sigma>seuil)); 160 | end; 161 | 162 | %------------------------------- 163 | % Numerical cleaning 164 | %------------------------------- 165 | Sigma(find(abs(Sigma optiongoldensearchmax 171 | option.goldensearch_deltmax=option.goldensearch_deltmax/10; 172 | elseif option.goldensearch_deltmax~=goldensearch_deltmaxinit 173 | option.goldensearch_deltmax*10; 174 | end; 175 | %----------------------------------------------------------- 176 | % Enhance accuracy of line search if necessary 177 | %----------------------------------------------------------- 178 | if max(abs(Sigma-Sigmaold))==0 & option.goldensearch_deltmax > 1e-12 179 | option.goldensearch_deltmax=option.goldensearch_deltmax/10; 180 | elseif option.goldensearch_deltmax~=goldensearch_deltmaxinit 181 | option.goldensearch_deltmax*10; 182 | end; 183 | 184 | 185 | %---------------------------------------------------- 186 | % process approximate KKT conditions 187 | %---------------------------------------------------- 188 | switch option.algo 189 | case 'oneagainstall' 190 | [grad] = gradsvmoneagainstall(K,pos,Alpsup,yapp,nbsv,option); 191 | case 'oneagainstone'; 192 | [grad] = gradsvmoneagainstone(K,pos,Alpsup,yapp,nbsv,option); 193 | 194 | end; 195 | 196 | 197 | indpos=find(Sigma>option.numericalprecision); 198 | indzero=find(abs(Sigma<=option.numericalprecision)); 199 | 200 | 201 | KKTconstraint=abs ((min(grad(indpos))-max(grad(indpos)))/min(grad(indpos))) ; 202 | KKTconstraintZero= ( min(grad(indzero))> max(grad(indpos)) ); 203 | 204 | history.KKTconstraint=[history.KKTconstraint KKTconstraint]; 205 | 206 | %---------------------------------------------------- 207 | % process duality gap 208 | %---------------------------------------------------- 209 | normek=-2*grad ; % Alpsup'*K(pos,pos,i)*Alpsup; 210 | dualitygap=1; %(obj + 0.5* max(normek) - sum(abs(Alpsup)))/obj; 211 | history.dualitygap=[history.dualitygap dualitygap]; 212 | 213 | %------------------------------------------ 214 | % verbosity 215 | %------------------------------------------ 216 | if verbose 217 | if nloop == 1 || rem(nloop,10)==0 218 | fprintf('--------------------------------------\n'); 219 | fprintf('Iter | Obj. | DiffBetas | KKT C |\n'); 220 | fprintf('---------------------------------------\n'); 221 | end; 222 | fprintf('%d | %8.4f | %6.4f | %6.4f |\n',[nloop obj max(abs(Sigma-Sigmaold)) KKTconstraint]); 223 | end 224 | 225 | 226 | %---------------------------------------------------- 227 | % check variation of Sigma conditions 228 | %---------------------------------------------------- 229 | if option.stopvariation==1 & option.stopKKT== 0 & max(abs(Sigma - Sigmaold))=nloopmax , 284 | loop = 0; 285 | history.sigma= [history.sigma;Sigma]; 286 | history.obj=[history.obj obj]; 287 | status=2; 288 | fprintf(1,'maximum number of iterations reached\n') 289 | end; 290 | if nloop < option.miniter & loop==0 291 | loop=1; 292 | end; 293 | 294 | %----------------------------------------------------- 295 | % Updating Variables 296 | %---------------------------------------------------- 297 | 298 | Sigmaold = Sigma ; 299 | Alpsupold = Alpsup ; 300 | w0old = w0; 301 | posold = pos ; 302 | 303 | 304 | end; 305 | 306 | 307 | -------------------------------------------------------------------------------- /mklmulticlassupdate.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,nbsv,CostNew] = CoeffUpdatemulticlass(K,Sigma,pos,Alpsup,w0,C,yapp,nbclass,nbsv,GradNew,CostNew,option) 2 | 3 | 4 | %------------------------------------------------------------------------------% 5 | % Initialize 6 | %------------------------------------------------------------------------------% 7 | 8 | d = length(Sigma); 9 | gold = (sqrt(5)+1)/2 ; 10 | 11 | 12 | SigmaInit = Sigma ; 13 | SigmaNew = SigmaInit ; 14 | descold = zeros(1,d); 15 | 16 | %--------------------------------------------------------------- 17 | % Compute Current Cost and Gradient 18 | %%-------------------------------------------------------------- 19 | 20 | %[CostNew] = costsvmoneagainstall(K,0,descold,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 21 | %GradNew =gradsvmoneagainstall(K,pos,Alpsup,yapp,nbsv,option); 22 | NormGrad = GradNew*GradNew'; 23 | GradNew=GradNew/sqrt(NormGrad); 24 | CostOld=CostNew; 25 | 26 | %--------------------------------------------------------------- 27 | % Compute reduced Gradient and descent direction 28 | %%-------------------------------------------------------------- 29 | 30 | switch option.firstbasevariable 31 | case 'first' 32 | [val,coord] = max(SigmaNew) ; 33 | 34 | case 'random' 35 | [val,coord] = max(SigmaNew) ; 36 | coord=find(SigmaNew==val); 37 | indperm=randperm(length(coord)); 38 | coord=coord(indperm(1)); 39 | case 'fullrandom' 40 | indzero=find(SigmaNew~=0); 41 | if ~isempty(indzero) 42 | [mini,coord]=min(GradNew(indzero)); 43 | coord=indzero(coord); 44 | else 45 | [val,coord] = max(SigmaNew) ; 46 | end; 47 | 48 | end; 49 | GradNew = GradNew - GradNew(coord) ; 50 | desc = - GradNew.* ( (SigmaNew>0) | (GradNew<0) ) ; 51 | desc(coord) = - sum(desc); % NB: GradNew(coord) = 0 52 | 53 | 54 | 55 | %---------------------------------------------------- 56 | % Compute optimal stepsize 57 | %----------------------------------------------------- 58 | stepmin = 0; 59 | costmin = CostOld ; 60 | costmax = 0 ; 61 | %----------------------------------------------------- 62 | % maximum stepsize 63 | %----------------------------------------------------- 64 | ind = find(desc<0); 65 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 66 | deltmax = stepmax; 67 | if isempty(stepmax) | stepmax==0 68 | Sigma = SigmaNew ; 69 | return 70 | end, 71 | if stepmax > 0.1 72 | stepmax=0.1; 73 | end; 74 | 75 | %----------------------------------------------------- 76 | % Projected gradient 77 | %----------------------------------------------------- 78 | 79 | while costmaxoption.numericalprecision) | (desc>0) ) ; 94 | desc(coord) = - sum(desc([[1:coord-1] [coord+1:end]])); 95 | ind = find(desc<0); 96 | Alpsup=Alpsupaux; 97 | w0=w0aux; 98 | pos=posaux; 99 | nbsv=nbsvaux; 100 | if ~isempty(ind) 101 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 102 | deltmax = stepmax; 103 | costmax = 0; 104 | else 105 | stepmax = 0; 106 | deltmax = 0; 107 | end; 108 | 109 | end; 110 | 111 | 112 | end; 113 | 114 | 115 | Step = [stepmin stepmax]; 116 | Cost = [costmin costmax]; 117 | [val,coord] = min(Cost); 118 | % optimization of stepsize by golden search 119 | while (stepmax-stepmin)>option.goldensearch_deltmax*(abs(deltmax)); 120 | stepmedr = stepmin+(stepmax-stepmin)/gold; 121 | stepmedl = stepmin+(stepmedr-stepmin)/gold; 122 | 123 | switch option.algo 124 | case 'oneagainstall' 125 | [costmedr,Alpsupr,w0r,posr,nbsvr] = costsvmoneagainstall(K,stepmedr,desc,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 126 | case 'oneagainstone' 127 | [costmedr,Alpsupr,w0r,posr,nbsvr] = costsvmoneagainstone(K,stepmedr,desc,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 128 | end; 129 | switch option.algo 130 | case 'oneagainstall' 131 | [costmedl,Alpsupl,w0l,posl,nbsvl] = costsvmoneagainstall(K,stepmedl,desc,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 132 | case 'oneagainstone' 133 | [costmedl,Alpsupl,w0l,posl,nbsvl] = costsvmoneagainstone(K,stepmedl,desc,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 134 | end; 135 | 136 | Step = [stepmin stepmedl stepmedr stepmax]; 137 | Cost = [costmin costmedl costmedr costmax]; 138 | [val,coord] = min(Cost); 139 | switch coord 140 | case 1 141 | stepmax = stepmedl; 142 | costmax = costmedl; 143 | pos=posl; 144 | Alpsup=Alpsupl; 145 | w0=w0l; 146 | nbsv=nbsvl; 147 | case 2 148 | stepmax = stepmedr; 149 | costmax = costmedr; 150 | pos=posr; 151 | Alpsup=Alpsupr; 152 | w0=w0r; 153 | nbsv=nbsvr; 154 | case 3 155 | stepmin = stepmedl; 156 | costmin = costmedl; 157 | pos=posl; 158 | Alpsup=Alpsupl; 159 | w0=w0l; 160 | nbsv=nbsvl; 161 | case 4 162 | stepmin = stepmedr; 163 | costmin = costmedr; 164 | pos=posr; 165 | Alpsup=Alpsupr; 166 | w0=w0r; 167 | nbsv=nbsvr; 168 | end; 169 | end; 170 | CostNew = Cost(coord) ; 171 | step = Step(coord) ; 172 | % Sigma update 173 | if CostNew < CostOld ; 174 | SigmaNew = SigmaNew + step * desc; 175 | 176 | end; 177 | Sigma = SigmaNew ; 178 | %[CostNew,Alpsup,w0,pos,nbsv] = costsvmoneagainstall(K,0,desc,SigmaNew,Alpsup,C,yapp,pos,nbsv,nbclass,option) ; 179 | -------------------------------------------------------------------------------- /mklsvm.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,history,obj,status] = mklsvm(K,yapp,C,option,verbose) 2 | 3 | % USAGE [Sigma,Alpsup,w0,pos,history,obj,status] = mklsvm(K,yapp,C,option,verbose) 4 | % 5 | % Inputs 6 | % 7 | % K : NxNxD matrix containing all the Gram Matrix 8 | % C : SVM hyperparameter 9 | % option : mkl algorithm hyperparameter 10 | % 11 | % option.nbitermax : maximal number of iterations (default 1000) 12 | % option.algo : selecting algorithm svmclass (default) or svmreg 13 | % option.seuil : threshold for zeroing kernel coefficients 14 | % (default 1e-12) during the first 15 | % option.seuilitermax 16 | % option.seuilitermax : threshold weigth when the nb of iteration is 17 | % lower than this value 18 | % option.sigmainit : initial kernel coefficient (default average) 19 | % option.alphainit : initial Lagrangian coefficient 20 | % option.lambdareg : ridge regularization added to kernel's diagonal 21 | % for SVM (default 1e-10) 22 | % option.verbosesvm : verbosity of SVM (default 0) see svmclass or 23 | % svmreg 24 | % option.svmreg_epsilon : epsilon for SVM regression (mandatory for 25 | % svm regression) 26 | % option.numericalprecision : force to 0 weigth lower than this 27 | % value (default = eps) 28 | % 29 | % Outputs 30 | % 31 | % Sigma : the weigths 32 | % Alpsup : the weigthed lagrangian of the support vectors 33 | % w0 : the bias 34 | % pos : the indices of SV 35 | % history : history of the weigths 36 | % obj : objective value 37 | % status : output status (sucessful or max iter) 38 | 39 | [n] = length(yapp); 40 | if ~isempty(K) 41 | if size(K,3)>1 42 | nbkernel=size(K,3); 43 | if option.efficientkernel==1 44 | K = build_efficientK(K); 45 | end; 46 | elseif option.efficientkernel==1 & isstruct(K); 47 | nbkernel=K.nbkernel; 48 | end; 49 | else 50 | error('No kernels defined ...'); 51 | end; 52 | 53 | if ~isfield(option,'nbitermax'); 54 | nloopmax=1000; 55 | else 56 | nloopmax=option.nbitermax; 57 | end; 58 | if ~isfield(option,'algo'); 59 | option.algo='svmclass'; 60 | end; 61 | if ~isfield(option,'seuil'); 62 | seuil=1e-12; 63 | else 64 | seuil=option.seuil; 65 | end 66 | if ~isfield(option,'seuilitermax') 67 | option.seuilitermax=20; 68 | end; 69 | if ~isfield(option,'seuildiffsigma'); 70 | option.seuildiffsigma=1e-5; 71 | end 72 | if ~isfield(option,'seuildiffconstraint'); 73 | option.seuildiffconstraint=0.05; 74 | end 75 | 76 | if ~isfield(option,'lambdareg'); 77 | lambdareg=1e-10; 78 | option.lambdareg=1e-10; 79 | else 80 | lambdareg=option.lambdareg; 81 | end 82 | 83 | 84 | if ~isfield(option,'numericalprecision'); 85 | option.numericalprecision=0; 86 | end; 87 | 88 | if ~isfield(option,'verbosesvm'); 89 | verbosesvm=0; 90 | option.verbosesvm=0; 91 | else 92 | verbosesvm=option.verbosesvm; 93 | end 94 | 95 | if ~isfield(option,'sigmainit'); 96 | Sigma=ones(1,nbkernel)/nbkernel; 97 | else 98 | Sigma=option.sigmainit ; 99 | ind=find(Sigma==0); 100 | end; 101 | 102 | 103 | if isfield(option,'alphainit'); 104 | alphainit=option.alphainit; 105 | else 106 | alphainit=[]; 107 | end; 108 | 109 | 110 | 111 | 112 | %-------------------------------------------------------------------------------- 113 | % Options used in subroutines 114 | %-------------------------------------------------------------------------------- 115 | if ~isfield(option,'goldensearch_deltmax'); 116 | option.goldensearch_deltmax=1e-1; 117 | end 118 | if ~isfield(option,'goldensearchmax'); 119 | optiongoldensearchmax=1e-8; 120 | end; 121 | if ~isfield(option,'firstbasevariable'); 122 | option.firstbasevariable='first'; 123 | end; 124 | 125 | %------------------------------------------------------------------------------% 126 | % Initialize 127 | %------------------------------------------------------------------------------% 128 | kernel = 'numerical'; 129 | span = 1; 130 | nloop = 0; 131 | loop = 1; 132 | status=0; 133 | numericalaccuracy=1e-9; 134 | goldensearch_deltmaxinit= option.goldensearch_deltmax; 135 | 136 | 137 | 138 | 139 | %----------------------------------------- 140 | % Initializing SVM 141 | %------------------------------------------ 142 | SumSigma=sum(Sigma); 143 | if ~isempty(K) 144 | kerneloption.matrix=sumKbeta(K,Sigma); 145 | else 146 | error('No kernels defined ...'); 147 | end; 148 | switch option.algo 149 | case 'svmclass' 150 | [xsup,Alpsup,w0,pos,aux,aux,obj] = svmclass([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,alphainit); 151 | [grad] = gradsvmclass(K,pos,Alpsup,C,yapp,option); 152 | case 'svmreg' 153 | % for svmreg Alpsup is the vector of [alpha alpha*] 154 | if ~isfield(option,'svmreg_epsilon') 155 | error(' Epsilon tube is not defined ... see option.svmreg_epsilon ...'); 156 | end; 157 | [xsup,ysup,Alpsupaux,w0,pos,Alpsup,obj] = svmreg([],yapp,C,option.svmreg_epsilon,kernel,kerneloption,lambdareg,verbosesvm); 158 | grad = gradsvmreg(K,Alpsup,yapp,option) ; 159 | end; 160 | 161 | Sigmaold = Sigma ; 162 | Alpsupold = Alpsup ; 163 | w0old = w0; 164 | posold = pos ; 165 | history.obj=[]; 166 | history.sigma=[]; 167 | history.KKTconstraint=[1]; 168 | history.dualitygap=[1]; 169 | %------------------------------------------------------------------------------% 170 | % Update Main loop 171 | %------------------------------------------------------------------------------% 172 | 173 | while loop & nloopmax >0 ; 174 | 175 | nloop = nloop+1; 176 | history.sigma= [history.sigma;Sigma]; 177 | history.obj=[history.obj obj]; 178 | 179 | %----------------------------------------- 180 | % Update weigths Sigma 181 | %----------------------------------------- 182 | t = cputime ; 183 | [Sigma,Alpsup,w0,pos,obj] = mklsvmupdate(K,Sigma,pos,Alpsup,w0,C,yapp,grad,obj,option) ; 184 | %----------------------------------------- 185 | % Thresholding 186 | %----------------------------------------- 187 | 188 | if seuil ~=0 & max(Sigma)>seuil & nloop < option.seuilitermax 189 | Sigma=(Sigma.*(Sigma>seuil))*SumSigma/sum(Sigma.*(Sigma>seuil)); 190 | end; 191 | 192 | %------------------------------- 193 | % Numerical cleaning 194 | %------------------------------- 195 | Sigma(find(abs(Sigma optiongoldensearchmax 201 | option.goldensearch_deltmax=option.goldensearch_deltmax/10; 202 | elseif option.goldensearch_deltmax~=goldensearch_deltmaxinit 203 | option.goldensearch_deltmax*10; 204 | end; 205 | 206 | 207 | %---------------------------------------------------- 208 | % process approximate KKT conditions 209 | %---------------------------------------------------- 210 | switch option.algo 211 | case 'svmclass' 212 | [grad] = gradsvmclass(K,pos,Alpsup,C,yapp,option); 213 | case 'svmreg' 214 | grad = gradsvmreg(K,Alpsup,yapp,option) ; 215 | end; 216 | 217 | indpos=find(Sigma>option.numericalprecision); 218 | indzero=find(abs(Sigma<=option.numericalprecision)); 219 | 220 | KKTconstraint=abs ((min(grad(indpos))-max(grad(indpos)))/min(grad(indpos))) ; 221 | KKTconstraintZero= ( min(grad(indzero))> max(grad(indpos)) ); 222 | 223 | history.KKTconstraint=[history.KKTconstraint KKTconstraint]; 224 | 225 | %---------------------------------------------------- 226 | % process duality gap 227 | %---------------------------------------------------- 228 | 229 | normek=-grad ; % 0.5*Alpsup'*K(pos,pos,i)*Alpsup; 230 | 231 | switch option.algo 232 | case 'svmclass' 233 | dualitygap=(obj + max(normek) - sum(abs(Alpsup)))/obj; 234 | 235 | %sumK=sumKbeta(K,Sigma); if we suppose that duality gap of SVM 236 | %dualitygap= (max(normek)-0.5*Alpsup'*sumK(pos,pos)*Alpsup)/obj; 237 | case 'svmreg' 238 | 239 | dualitygap=(obj + max(normek) - Alpsup'*[-option.svmreg_epsilon+yapp ; -option.svmreg_epsilon-yapp])/obj; 240 | end; 241 | 242 | 243 | history.dualitygap=[history.dualitygap dualitygap]; 244 | 245 | %------------------------------------------ 246 | % verbosity 247 | %------------------------------------------ 248 | if verbose 249 | if nloop == 1 || rem(nloop,10)==0 250 | fprintf('--------------------------------------------------\n'); 251 | fprintf('Iter | Obj. | DiffBetas | DualGap | KKT C. |\n'); 252 | fprintf('--------------------------------------------------\n'); 253 | end; 254 | fprintf('%d | %8.4f | %6.4f | %6.4f | %6.4f\n',[nloop obj max(abs(Sigma-Sigmaold)) dualitygap KKTconstraint]); 255 | end 256 | 257 | 258 | %---------------------------------------------------- 259 | % check variation of Sigma conditions 260 | %---------------------------------------------------- 261 | if option.stopvariation==1 & option.stopKKT== 0 & max(abs(Sigma - Sigmaold))=nloopmax , 316 | loop = 0; 317 | history.sigma= [history.sigma;Sigma]; 318 | history.obj=[history.obj obj]; 319 | status=2; 320 | fprintf(1,'maximum number of iterations reached\n') 321 | end; 322 | if nloop < option.miniter & loop==0 323 | loop=1; 324 | end; 325 | 326 | %----------------------------------------------------- 327 | % Updating Variables 328 | %---------------------------------------------------- 329 | 330 | Sigmaold = Sigma ; 331 | Alpsupold = Alpsup ; 332 | w0old = w0; 333 | posold = pos ; 334 | 335 | end; 336 | 337 | %keyboard 338 | switch option.algo 339 | 340 | case 'svmreg' % transform the langragian to proper weights 341 | Alpsup= Alpsup(1:n)-Alpsup(n+1:2*n); 342 | %pos=1:n; 343 | pos=find(Alpsup); 344 | Alpsup=Alpsup(pos); 345 | end; 346 | -------------------------------------------------------------------------------- /mklsvmclassSILP.m: -------------------------------------------------------------------------------- 1 | function [w,bsvm,Sigma,posw,fval,history]=mlksvmclass(K,yapp,C,verbose,option); 2 | 3 | nbkernel=size(K,3); 4 | n=size(yapp,1); 5 | if ~isfield(option,'nbitermax'); 6 | nbitermax=1000; 7 | else 8 | nbitermax=option.nbitermax; 9 | end; 10 | if ~isfield(option,'seuildiffsigma'); 11 | option.seuildiffsigma=1e-5; 12 | end 13 | if ~isfield(option,'seuildiffconstraint'); 14 | option.seuildiffconstraint=0.05; 15 | end 16 | if ~isfield(option,'seuildualitygap'); 17 | option.seuildiffconstraint=0.01; 18 | end 19 | if ~isfield(option,'lambdareg'); 20 | lambdareg=1e-10; 21 | option.lambdareg=1e-10; 22 | else 23 | lambdareg=option.lambdareg; 24 | end 25 | 26 | if ~isfield(option,'verbosesvm'); 27 | verbosesvm=0; 28 | option.verbosesvm=0; 29 | else 30 | verbosesvm=option.verbosesvm; 31 | end 32 | 33 | if ~isfield(option,'sigmainit'); 34 | Sigma=ones(nbkernel,1)/nbkernel; 35 | else 36 | Sigma=option.sigmainit ; 37 | if size(Sigma,1)==1 38 | Sigma=Sigma'; 39 | end; 40 | end; 41 | 42 | 43 | if isfield(option,'alphainit'); 44 | alphainit=option.alphainit; 45 | else 46 | alphainit=[]; 47 | end; 48 | if option.efficientkernel==1 49 | K = build_efficientK(K); 50 | end; 51 | 52 | 53 | kernel='numerical'; 54 | span=1; 55 | verbosesvm=0; 56 | sumSigma=sum(Sigma); 57 | theta=-inf; 58 | 59 | 60 | 61 | 62 | % matrix and parameters 63 | % SVMClass Cost function evaluation 64 | % 65 | 66 | 67 | %--------------------------------------------------------------------- 68 | % Setting the linear prog parameters 69 | % nbvar = nbkernel+1; 70 | % 71 | % var = [theta Sigma1, Sigma_2, ..., SigmaK]; 72 | %--------------------------------------------------------------------- 73 | f=[-1;zeros(nbkernel,1)]; 74 | Aeq=[0 ones(1,nbkernel)]; % 1 seule egalit�; 75 | beq=sumSigma; 76 | LB=[-inf;zeros(nbkernel,1)]; 77 | UB=[inf*ones(nbkernel,1)]; 78 | A=[]; 79 | 80 | optimopt=optimset('MaxIter',10000,'Display','off', 'TolCon',1e-3,'TolFun',1e-5); 81 | 82 | 83 | 84 | 85 | 86 | nbverbose=1; 87 | 88 | iter=0; 89 | b=[]; 90 | Sigmaold=Sigma; 91 | Sigmaold(1)=Sigmaold(1)-1; 92 | loop=1; 93 | 94 | history.theta=[]; 95 | history.sigma=[]; 96 | history.KKTconstraint=[1]; 97 | history.dualitygap=[1]; 98 | history.sigma= [history.sigma;Sigma']; 99 | kerneloption.matrix=sumKbeta(K,Sigma); 100 | 101 | x=[]; 102 | exitflag=0; 103 | while loop 104 | 105 | 106 | kerneloption.matrix=sumKbeta(K,Sigma); 107 | if ~isempty(alphainit) & iter >0; 108 | alphainit=zeros(size(yapp)); 109 | alphainit(posw)=alphaw; 110 | end; 111 | [xsup,w,bsvm,posw,timeps,alphaw,obj]=svmclass([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,alphainit); 112 | for i=1:nbkernel 113 | 114 | if ~isstruct(K) 115 | Saux(i)=0.5*w'*K(posw,posw,i)*w;% - sum(alphaw); 116 | else 117 | 118 | Kaux=devectorize(K.data(:,i)); 119 | Saux(i)=0.5*w'*Kaux(posw,posw)*w;% - sum(alphaw); 120 | 121 | 122 | end; 123 | 124 | 125 | 126 | end; 127 | S=Saux-sum(alphaw); 128 | 129 | constraintviol=S*Sigma; 130 | 131 | 132 | sumfk2divdk= Saux*Sigma; 133 | primalobj=sumfk2divdk +C*sum(max( 1-yapp.*(kerneloption.matrix(:,posw)*w + bsvm),0)); 134 | dualobj= -max(Saux) + sum(alphaw); 135 | dualitygap=(primalobj-dualobj)/primalobj; 136 | 137 | %------------------------------------------------------ 138 | % verbosity 139 | %---------------------------------------------------- 140 | 141 | iter=iter+1; 142 | if verbose ~= 0 143 | 144 | if nbverbose == 1 145 | disp('------------------------------------------------'); 146 | disp('iter Theta ConstViol DeltaSigma'); 147 | disp('------------------------------------------------'); 148 | end 149 | if nbverbose == 20 150 | nbverbose=1; 151 | end 152 | 153 | if exitflag==0 154 | fprintf('%d | %8.4f | %8.4f | %6.4f |%6.4f \n',[iter theta constraintviol max(abs(Sigma-Sigmaold))], dualitygap); 155 | else 156 | fprintf('%d | %8.4f | %8.4f | %6.4f | lp cvg pb \n',[iter theta constraintviol max(abs(Sigma-Sigmaold))]); 157 | end; 158 | nbverbose = nbverbose+1; 159 | end 160 | %----------------------------------------------------- 161 | % Maximum constraint violation check 162 | %------------------------------------------------------ 163 | KKTconstraint=abs(S*Sigma/theta-1); 164 | history.KKTconstraint=[history.KKTconstraint KKTconstraint]; 165 | history.dualitygap=[history.dualitygap dualitygap]; 166 | 167 | 168 | %---------------------------------------------------- 169 | % check variation of Sigma conditions 170 | %---------------------------------------------------- 171 | if option.stopvariation==1 & option.stopKKT==0 & max(abs(Sigma - Sigmaold))=nbitermax , 204 | loop = 0; 205 | fprintf(1,'Maximal number of iterations reached \n'); 206 | end; 207 | 208 | %---------------------------------------------------- 209 | % Optimize the weigths Sigma using a LP 210 | %---------------------------------------------------- 211 | Sigmaold=Sigma; 212 | A=[A;1 -S]; 213 | aux=0; 214 | b=[b;aux]; 215 | % [x,fval,exitflag] =linprog(f,A,b,Aeq,beq,LB,UB,[theta;Sigma],optimopt); 216 | 217 | if exist('lp_solve')==2 218 | 219 | sens=[-ones(size(A,1),1); zeros(size(Aeq,1),1)]; 220 | [fval,x,lagrangia,exitflag]=lp_solve(-f,[A;Aeq],[b;beq],sens,LB,UB); 221 | 222 | elseif exist('linprog')==2 223 | [x,fval,exitflag] =linprog(f,A,b,Aeq,beq,LB,UB,[theta;Sigma],optimopt); 224 | exitflag=~(exitflag>0); 225 | else 226 | error('No available linear programming function...'); 227 | end; 228 | if ~isempty(x) 229 | theta=x(1); 230 | Sigma=x(2:end); 231 | xold=x; 232 | fvalold=fval; 233 | else 234 | theta=xold(1); 235 | Sigma=xold(2:end); 236 | fval=fvalold; 237 | loop=0; 238 | fprintf(1,'Premature convergence of the algorithm \n'); 239 | end; 240 | 241 | 242 | history.sigma= [history.sigma;Sigma']; 243 | history.theta=[history.theta theta]; 244 | 245 | 246 | 247 | 248 | 249 | 250 | end; 251 | 252 | 253 | 254 | -------------------------------------------------------------------------------- /mklsvmls.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,history,obj] = mklsvmls(K,yapp,C,option,verbose) 2 | 3 | % USAGE [Sigma,Alpsup,w0,pos,SigmaH,obj] = mklsvmls(K,yapp,C,option,verbose) 4 | % 5 | % Inputs 6 | % 7 | % K : NxNxD matrix containing all the Gram Matrix 8 | % C : SVM hyperparameter 9 | % option : mkl algorithm hyperparameter 10 | % 11 | % option.nbitermax : maximal number of iterations (default 1000) 12 | % option.algo : selecting algorithm svmclass (default) or svmreg 13 | % option.seuil : threshold for zeroing kernel coefficients 14 | % (default 1e-12) 15 | % option.sigmainit : initial kernel coefficient (default average) 16 | % option.alphainit : initial Lagrangian coefficient 17 | % option.lambdareg : ridge regularization added to kernel's diagonal 18 | % for SVM (default 1e-10) 19 | % option.verbosesvm : verbosity of SVM (default 0) see svmclass or 20 | % svmreg 21 | % option.svmreg_epsilon : epsilon for SVM regression (mandatory for 22 | % svm regression) 23 | % 24 | % Outputs 25 | % 26 | % Sigma : the weigths 27 | % Alpsup : the weigthed lagrangian of the support vectors 28 | % w0 : the bias 29 | % pos : the indices of SV 30 | % history : history of the weigths 31 | % obj : objective value 32 | 33 | [n] = length(yapp); 34 | %----------------------------------------- 35 | % specific to Large Scale 36 | %----------------------------------------- 37 | if ~isempty(K) 38 | nbkernel=K.size; 39 | else 40 | error('No kernels defined ...'); 41 | end; 42 | 43 | 44 | 45 | 46 | 47 | if ~isfield(option,'nbitermax'); 48 | nloopmax=1000; 49 | else 50 | nloopmax=option.nbitermax; 51 | end; 52 | if ~isfield(option,'algo'); 53 | option.algo='svmclass'; 54 | end; 55 | if ~isfield(option,'seuil'); 56 | seuil=1e-12; 57 | else 58 | seuil=option.seuil; 59 | end 60 | if ~isfield(option,'seuilitermax') 61 | option.seuilitermax=20; 62 | end; 63 | if ~isfield(option,'seuildiffsigma'); 64 | option.seuildiffsigma=1e-5; 65 | end 66 | if ~isfield(option,'seuildiffconstraint'); 67 | option.seuildiffconstraint=0.05; 68 | end 69 | 70 | if ~isfield(option,'lambdareg'); 71 | lambdareg=1e-10; 72 | option.lambdareg=1e-10; 73 | else 74 | lambdareg=option.lambdareg; 75 | end 76 | 77 | if ~isfield(option,'numericalcleaning'); 78 | option.numericalcleaning=eps; 79 | end 80 | if ~isfield(option,'numericalprecision'); 81 | option.numericalprecision=0; 82 | end; 83 | 84 | 85 | if ~isfield(option,'verbosesvm'); 86 | verbosesvm=0; 87 | option.verbosesvm=0; 88 | else 89 | verbosesvm=option.verbosesvm; 90 | end 91 | 92 | if ~isfield(option,'sigmainit'); 93 | Sigma=ones(1,nbkernel)/nbkernel; 94 | else 95 | Sigma=option.sigmainit ; 96 | ind=find(Sigma==0); 97 | end; 98 | 99 | 100 | if isfield(option,'alphainit'); 101 | alphainit=option.alphainit; 102 | else 103 | alphainit=[]; 104 | end; 105 | 106 | 107 | %-------------------------------------------------------------------------------- 108 | % Options used in subroutines 109 | %-------------------------------------------------------------------------------- 110 | if ~isfield(option,'goldensearch_deltmax'); 111 | option.goldensearch_deltmax=1e-1; 112 | end 113 | if ~isfield(option,'goldensearchmax'); 114 | optiongoldensearchmax=1e-8; 115 | end; 116 | if ~isfield(option,'firstbasevariable'); 117 | option.firstbasevariable='first'; 118 | end; 119 | 120 | %------------------------------------------------------------------------------% 121 | % Initialize 122 | %------------------------------------------------------------------------------% 123 | kernel = 'numerical'; 124 | span = 1; 125 | nloop = 0; 126 | loop = 1; 127 | numericalaccuracy=1e-9; 128 | goldensearch_deltmaxinit= option.goldensearch_deltmax; 129 | qpsize=3000; 130 | chunksize=3000; 131 | % Monitoring 132 | 133 | 134 | 135 | %--------------------------------------------- 136 | % Initializing SVM 137 | %--------------------------------------------- 138 | SumSigma=sum(Sigma); 139 | 140 | switch option.sumbeta 141 | case 'storefullsum' 142 | kerneloption.matrix=sumKbetals(K,Sigma); 143 | qpsize=length(kerneloption.matrix); 144 | chunksize=qpsize; 145 | [xsup,Alpsup,w0,pos,aux,aux,obj] = svmclass([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,alphainit); 146 | 147 | case 'onthefly'; 148 | K.sigma=Sigma; 149 | kerneloption=K; 150 | [xsup,Alpsup,w0,pos,aux,aux,obj] = svmclasslsformkl([],yapp,C,lambdareg,kernel,kerneloption,verbosesvm,span,qpsize,chunksize,alphainit); 151 | 152 | otherwise 153 | error('No kernels defined ...'); 154 | end; 155 | 156 | [grad] = gradsvmclassls(K,pos,Alpsup,C,yapp,option); 157 | %-------------------------------------------- 158 | % 159 | %--------------------------------------------- 160 | 161 | Sigmaold = Sigma ; 162 | Alpsupold = Alpsup ; 163 | w0old = w0; 164 | posold = pos ; 165 | history.obj=[]; 166 | history.sigma=[]; 167 | history.KKTconstraint=[1]; 168 | history.dualitygap=[]; 169 | %------------------------------------------------------------------------------% 170 | % Update Main loop 171 | %------------------------------------------------------------------------------% 172 | 173 | while loop ; nloop = nloop+1; 174 | 175 | history.sigma= [history.sigma;Sigma]; 176 | history.obj=[history.obj obj]; 177 | 178 | %----------------------------------------- 179 | % Update weigths Sigma 180 | %----------------------------------------- 181 | t = cputime ; 182 | [Sigma,Alpsup,w0,pos,obj] = mklsvmlsupdate(K,Sigma,pos,Alpsup,w0,C,yapp,grad,obj,option) ; 183 | if seuil ~=0 & max(Sigma)>seuil & nloop < option.seuilitermax 184 | Sigma=(Sigma.*(Sigma>seuil))*SumSigma/sum(Sigma.*(Sigma>seuil)); 185 | end; 186 | 187 | %------------------------------- 188 | % Numerical cleaning 189 | %------------------------------- 190 | Sigma(find(abs(Sigma optiongoldensearchmax 196 | option.goldensearch_deltmax=option.goldensearch_deltmax/10; 197 | elseif option.goldensearch_deltmax~=goldensearch_deltmaxinit 198 | option.goldensearch_deltmax*10; 199 | end; 200 | 201 | 202 | %---------------------------------------------------- 203 | % process approximate KKT conditions 204 | %---------------------------------------------------- 205 | [grad] = gradsvmclassls(K,pos,Alpsup,C,yapp,option); 206 | indpos=find(Sigma>0); 207 | indzero=find(Sigma==0); 208 | 209 | KKTconstraint=abs ((min(grad(indpos))-max(grad(indpos)))/min(grad(indpos))) ; 210 | KKTconstraintZero= ( min(grad(indzero))> max(grad(indpos)) ); 211 | 212 | history.KKTconstraint=[history.KKTconstraint KKTconstraint]; 213 | 214 | %---------------------------------------------------- 215 | % process duality gap 216 | %---------------------------------------------------- 217 | % for i=1:nbkernel 218 | % normek(i)=Alpsup'*K(pos,pos,i)*Alpsup; 219 | % end 220 | normek=-2*grad ; % Alpsup'*K(pos,pos,i)*Alpsup; 221 | dualitygap=(obj + 0.5* max(normek) - sum(abs(Alpsup)))/obj; 222 | history.dualitygap=[history.dualitygap dualitygap]; 223 | 224 | %------------------------------------------ 225 | % verbosity 226 | %------------------------------------------ 227 | if verbose 228 | if nloop == 1 || rem(nloop,10)==0 229 | fprintf('------------------------------\n'); 230 | fprintf('Iter | Obj. | DiffBetas |\n'); 231 | fprintf('------------------------------\n'); 232 | end; 233 | fprintf('%d | %8.4f | %6.4f | %6.4f |\n',[nloop obj max(abs(Sigma-Sigmaold)) dualitygap]); 234 | end 235 | 236 | 237 | %---------------------------------------------------- 238 | % check variation of Sigma conditions 239 | %---------------------------------------------------- 240 | if option.stopvariation==1 & option.stopKKT== 0 & max(abs(Sigma - Sigmaold))=nloopmax , 297 | loop = 0; 298 | history.sigma= [history.sigma;Sigma]; 299 | history.obj=[history.obj obj]; 300 | fprintf(1,'maximum number of iterations reached\n') 301 | end; 302 | if nloop < option.miniter & loop==0 303 | loop=1; 304 | end; 305 | 306 | %----------------------------------------------------- 307 | % Updating Variables 308 | %---------------------------------------------------- 309 | 310 | Sigmaold = Sigma ; 311 | Alpsupold = Alpsup ; 312 | w0old = w0; 313 | posold = pos ; 314 | 315 | end; 316 | 317 | -------------------------------------------------------------------------------- /mklsvmlsupdate.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,CostNew] = mklsvmpdate(K,Sigma,pos,Alpsup,w0,C,yapp,GradNew,CostNew,option) 2 | 3 | 4 | %------------------------------------------------------------------------------% 5 | % Initialize 6 | %------------------------------------------------------------------------------% 7 | 8 | d = length(Sigma); 9 | gold = (sqrt(5)+1)/2 ; 10 | 11 | 12 | SigmaInit = Sigma ; 13 | SigmaNew = SigmaInit ; 14 | descold = zeros(1,d); 15 | 16 | %--------------------------------------------------------------- 17 | % Compute Current Cost and Gradient 18 | %%-------------------------------------------------------------- 19 | % switch option.algo 20 | % case 'svmclass' 21 | % CostNew = costsvmclass(K,0,descold,SigmaNew,pos,Alpsup,C,yapp,option) ; 22 | % GradNew = gradsvmclass(K,pos,Alpsup,C,yapp,option) ; 23 | % case 'svmreg' 24 | % CostNew = costsvmreg(K,0,descold,SigmaNew,pos,Alpsup,C,yapp,option) ; 25 | % GradNew = gradsvmreg(K,Alpsup,yapp) ; 26 | % end; 27 | 28 | %[CostNew] = costsvmclassls(K,0,descold,SigmaNew,pos,Alpsup,C,yapp,option); 29 | %[GradNew] = gradsvmclassls(K,pos,Alpsup,C,yapp,option); 30 | 31 | NormGrad = GradNew*GradNew'; 32 | GradNew=GradNew/sqrt(NormGrad); 33 | CostOld=CostNew; 34 | %--------------------------------------------------------------- 35 | % Compute reduced Gradient and descent direction 36 | %%-------------------------------------------------------------- 37 | 38 | switch option.firstbasevariable 39 | case 'first' 40 | [val,coord] = max(SigmaNew) ; 41 | 42 | case 'random' 43 | [val,coord] = max(SigmaNew) ; 44 | coord=find(SigmaNew==val); 45 | indperm=randperm(length(coord)); 46 | coord=coord(indperm(1)); 47 | case 'fullrandom' 48 | indzero=find(SigmaNew~=0); 49 | if ~isempty(indzero) 50 | [mini,coord]=min(GradNew(indzero)); 51 | coord=indzero(coord); 52 | else 53 | [val,coord] = max(SigmaNew) ; 54 | end; 55 | 56 | end; 57 | GradNew = GradNew - GradNew(coord) ; 58 | desc = - GradNew.* ( (SigmaNew>0) | (GradNew<0) ) ; 59 | desc(coord) = - sum(desc); % NB: GradNew(coord) = 0 60 | 61 | 62 | 63 | 64 | %---------------------------------------------------- 65 | % Compute optimal stepsize 66 | %----------------------------------------------------- 67 | stepmin = 0; 68 | costmin = CostOld ; 69 | costmax = 0 ; 70 | %----------------------------------------------------- 71 | % maximum stepsize 72 | %----------------------------------------------------- 73 | ind = find(desc<0); 74 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 75 | deltmax = stepmax; 76 | if isempty(stepmax) | stepmax==0 77 | Sigma = SigmaNew ; 78 | return 79 | end, 80 | if stepmax > 0.1 81 | stepmax=0.1; 82 | end; 83 | 84 | 85 | %----------------------------------------------------- 86 | % Projected gradient 87 | %----------------------------------------------------- 88 | 89 | while costmaxoption.numericalprecision) | (desc>0) ) ; 101 | desc(coord) = - sum(desc([[1:coord-1] [coord+1:end]])); 102 | ind = find(desc<0); 103 | Alpsup=Alpsupaux; 104 | w0=w0aux; 105 | pos=posaux; 106 | if ~isempty(ind) 107 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 108 | deltmax = stepmax; 109 | costmax = 0; 110 | else 111 | stepmax = 0; 112 | deltmax = 0; 113 | end; 114 | 115 | end; 116 | end; 117 | 118 | 119 | %----------------------------------------------------- 120 | % Linesearch 121 | %----------------------------------------------------- 122 | 123 | Step = [stepmin stepmax]; 124 | Cost = [costmin costmax]; 125 | [val,coord] = min(Cost); 126 | % optimization of stepsize by golden search 127 | while (stepmax-stepmin)>option.goldensearch_deltmax*(abs(deltmax)); 128 | stepmedr = stepmin+(stepmax-stepmin)/gold; 129 | stepmedl = stepmin+(stepmedr-stepmin)/gold; 130 | 131 | [costmedr,Alpsupr,w0r,posr] = costsvmclassls(K,stepmedr,desc,SigmaNew,pos,Alpsup,C,yapp,option) ; 132 | [costmedl,Alpsupl,w01,posl] = costsvmclassls(K,stepmedl,desc,SigmaNew,posr,Alpsupr,C,yapp,option) ; 133 | 134 | Step = [stepmin stepmedl stepmedr stepmax]; 135 | Cost = [costmin costmedl costmedr costmax]; 136 | [val,coord] = min(Cost); 137 | switch coord 138 | case 1 139 | stepmax = stepmedl; 140 | costmax = costmedl; 141 | pos=posl; 142 | Alpsup=Alpsupl; 143 | w0=w01; 144 | case 2 145 | stepmax = stepmedr; 146 | costmax = costmedr; 147 | pos=posr; 148 | Alpsup=Alpsupr; 149 | w0=w0r; 150 | case 3 151 | stepmin = stepmedl; 152 | costmin = costmedl; 153 | pos=posl; 154 | Alpsup=Alpsupl; 155 | w0=w01; 156 | case 4 157 | stepmin = stepmedr; 158 | costmin = costmedr; 159 | pos=posr; 160 | Alpsup=Alpsupr; 161 | w0=w0r; 162 | end; 163 | end; 164 | 165 | 166 | %--------------------------------- 167 | % Final Updates 168 | %--------------------------------- 169 | 170 | CostNew = Cost(coord) ; 171 | step = Step(coord) ; 172 | % Sigma update 173 | if CostNew < CostOld ; 174 | SigmaNew = SigmaNew + step * desc; 175 | 176 | end; 177 | 178 | Sigma = SigmaNew ; 179 | -------------------------------------------------------------------------------- /mklsvmregSILP.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/mklsvmregSILP.m -------------------------------------------------------------------------------- /mklsvmupdate.m: -------------------------------------------------------------------------------- 1 | function [Sigma,Alpsup,w0,pos,CostNew] = mklsvmpdate(K,Sigma,pos,Alpsup,w0,C,yapp,GradNew,CostNew,option) 2 | 3 | 4 | %------------------------------------------------------------------------------% 5 | % Initialize 6 | %------------------------------------------------------------------------------% 7 | 8 | d = length(Sigma); 9 | gold = (sqrt(5)+1)/2 ; 10 | 11 | 12 | SigmaInit = Sigma ; 13 | SigmaNew = SigmaInit ; 14 | descold = zeros(1,d); 15 | 16 | %--------------------------------------------------------------- 17 | % Compute Current Cost and Gradient 18 | %%-------------------------------------------------------------- 19 | % switch option.algo 20 | % case 'svmclass' 21 | % % CostNew = costsvmclass(K,0,descold,SigmaNew,pos,Alpsup,C,yapp,option) ; 22 | % % GradNew = gradsvmclass(K,pos,Alpsup,C,yapp,option) ; 23 | % case 'svmreg' 24 | % % CostNew = costsvmreg(K,0,descold,SigmaNew,pos,Alpsup,C,yapp,option) ; 25 | % % GradNew = gradsvmreg(K,Alpsup,yapp) ; 26 | % end; 27 | 28 | NormGrad = GradNew*GradNew'; 29 | GradNew=GradNew/sqrt(NormGrad); 30 | CostOld=CostNew; 31 | %--------------------------------------------------------------- 32 | % Compute reduced Gradient and descent direction 33 | %%-------------------------------------------------------------- 34 | 35 | switch option.firstbasevariable 36 | case 'first' 37 | [val,coord] = max(SigmaNew) ; 38 | 39 | case 'random' 40 | [val,coord] = max(SigmaNew) ; 41 | coord=find(SigmaNew==val); 42 | indperm=randperm(length(coord)); 43 | coord=coord(indperm(1)); 44 | case 'fullrandom' 45 | indzero=find(SigmaNew~=0); 46 | if ~isempty(indzero) 47 | [mini,coord]=min(GradNew(indzero)); 48 | coord=indzero(coord); 49 | else 50 | [val,coord] = max(SigmaNew) ; 51 | end; 52 | 53 | end; 54 | GradNew = GradNew - GradNew(coord) ; 55 | desc = - GradNew.* ( (SigmaNew>0) | (GradNew<0) ) ; 56 | desc(coord) = - sum(desc); % NB: GradNew(coord) = 0 57 | 58 | 59 | 60 | 61 | %---------------------------------------------------- 62 | % Compute optimal stepsize 63 | %----------------------------------------------------- 64 | stepmin = 0; 65 | costmin = CostOld ; 66 | costmax = 0 ; 67 | %----------------------------------------------------- 68 | % maximum stepsize 69 | %----------------------------------------------------- 70 | ind = find(desc<0); 71 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 72 | deltmax = stepmax; 73 | if isempty(stepmax) | stepmax==0 74 | Sigma = SigmaNew ; 75 | return 76 | end, 77 | if stepmax > 0.1 78 | stepmax=0.1; 79 | end; 80 | 81 | %----------------------------------------------------- 82 | % Projected gradient 83 | %----------------------------------------------------- 84 | 85 | while costmax0) | (desc>0) ) ; 105 | desc = desc .* ( (SigmaNew>option.numericalprecision) | (desc>0) ) ; 106 | desc(coord) = - sum(desc([[1:coord-1] [coord+1:end]])); 107 | ind = find(desc<0); 108 | Alpsup=Alpsupaux; 109 | w0=w0aux; 110 | pos=posaux; 111 | if ~isempty(ind) 112 | stepmax = min(-(SigmaNew(ind))./desc(ind)); 113 | deltmax = stepmax; 114 | costmax = 0; 115 | else 116 | stepmax = 0; 117 | deltmax = 0; 118 | end; 119 | 120 | end; 121 | end; 122 | 123 | 124 | %----------------------------------------------------- 125 | % Linesearch 126 | %----------------------------------------------------- 127 | 128 | Step = [stepmin stepmax]; 129 | Cost = [costmin costmax]; 130 | [val,coord] = min(Cost); 131 | % optimization of stepsize by golden search 132 | while (stepmax-stepmin)>option.goldensearch_deltmax*(abs(deltmax)) & stepmax > eps; 133 | stepmedr = stepmin+(stepmax-stepmin)/gold; 134 | stepmedl = stepmin+(stepmedr-stepmin)/gold; 135 | switch option.algo 136 | case 'svmclass' 137 | [costmedr,Alpsupr,w0r,posr] = costsvmclass(K,stepmedr,desc,SigmaNew,pos,Alpsup,C,yapp,option) ; 138 | [costmedl,Alpsupl,w01,posl] = costsvmclass(K,stepmedl,desc,SigmaNew,posr,Alpsupr,C,yapp,option) ; 139 | case 'svmreg' 140 | [costmedr,Alpsupr,w0r,posr] = costsvmreg(K,stepmedr,desc,SigmaNew,pos,Alpsup,C,yapp,option) ; 141 | [costmedl,Alpsupl,w01,posl] = costsvmreg(K,stepmedl,desc,SigmaNew,posr,Alpsupr,C,yapp,option) ; 142 | 143 | end; 144 | Step = [stepmin stepmedl stepmedr stepmax]; 145 | Cost = [costmin costmedl costmedr costmax]; 146 | [val,coord] = min(Cost); 147 | switch coord 148 | case 1 149 | stepmax = stepmedl; 150 | costmax = costmedl; 151 | pos=posl; 152 | Alpsup=Alpsupl; 153 | w0=w01; 154 | case 2 155 | stepmax = stepmedr; 156 | costmax = costmedr; 157 | pos=posr; 158 | Alpsup=Alpsupr; 159 | w0=w0r; 160 | case 3 161 | stepmin = stepmedl; 162 | costmin = costmedl; 163 | pos=posl; 164 | Alpsup=Alpsupl; 165 | w0=w01; 166 | case 4 167 | stepmin = stepmedr; 168 | costmin = costmedr; 169 | pos=posr; 170 | Alpsup=Alpsupr; 171 | w0=w0r; 172 | end; 173 | end; 174 | 175 | 176 | %--------------------------------- 177 | % Final Updates 178 | %--------------------------------- 179 | 180 | CostNew = Cost(coord) ; 181 | step = Step(coord) ; 182 | % Sigma update 183 | if CostNew < CostOld ; 184 | SigmaNew = SigmaNew + step * desc; 185 | 186 | end; 187 | 188 | Sigma = SigmaNew ; 189 | -------------------------------------------------------------------------------- /monqp.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/monqp.m -------------------------------------------------------------------------------- /normalizemeanstd.m: -------------------------------------------------------------------------------- 1 | function [xapp,xtest,meanxapp,stdxapp] = normalizemeanstd(xapp,xtest,meanx,stdx) 2 | 3 | % USAGE 4 | % 5 | % [xapp,xtest,meanxapp,stdxapp] = normalizemeanstd(xapp,xtest) 6 | % 7 | % normalize inputs and output mean and standard deviation to 0 and 1 8 | % 9 | % 10 | tol=1e-5; 11 | 12 | 13 | 14 | nbsuppress=0; 15 | if nargin <3 16 | meanxapp=mean(xapp); 17 | stdxapp=std(xapp); 18 | else 19 | meanxapp=meanx; 20 | stdxapp=stdx; 21 | end; 22 | nbxapp=size(xapp,1); 23 | indzero=find(abs(stdxapp)1 & ~isempty(xtest) 35 | nbxtest=size(xtest,1); 36 | xtest= (xtest - ones(nbxtest,1)*meanxapp)./ (ones(nbxtest,1)*stdxapp ); 37 | else 38 | xtest=[]; 39 | end; -------------------------------------------------------------------------------- /sumKbeta.m: -------------------------------------------------------------------------------- 1 | function Kaux=sumKbeta(K,beta) 2 | 3 | % Usage 4 | % Kaux=sumKbeta(K,beta) 5 | % 6 | % K is usually a 3D matrix where K(:,:,i) is 7 | % the K_i gram matrix 8 | % 9 | % K can also be a (n*(n-1)/2) x nbkernel matrix build 10 | % by build_efficientkernel and is a struct 11 | 12 | if ~isstruct(K) 13 | ind=find(beta); 14 | nbkernel=size(K,3); 15 | Kaux=zeros(size(K(:,:,1))); 16 | N=length(ind); 17 | for j=1:N 18 | Kaux=Kaux+ beta(ind(j))*K(:,:,ind(j)); 19 | end 20 | else 21 | if size(beta,1)>1; 22 | beta=beta'; 23 | end; 24 | if isa(K.data,'single'); 25 | Kaux=devectorize_single(K.data*beta'); 26 | else 27 | Kaux=devectorize(K.data*beta'); 28 | end; 29 | end; -------------------------------------------------------------------------------- /sumKbetals.m: -------------------------------------------------------------------------------- 1 | function K=sumKbetals(Kinfo,sigma,ind1,ind2) 2 | 3 | 4 | % Build the full Gram matrix from the stored 5 | % matrix files 6 | % 7 | % LoadFromDisk 8 | % K.size=length(Weight); 9 | % K.tempdir=tempdir; 10 | % K.nbdata=size(xapp,1); 11 | % 12 | % OntheFly 13 | % K.size=length(Weight); 14 | % K.x=xapp; 15 | % K.info=InfoKernel; 16 | 17 | 18 | indsigma=find(sigma); 19 | n=length(indsigma); 20 | 21 | if ~isfield(Kinfo,'x') & ~isfield(Kinfo,'info'); 22 | %--------------------------- 23 | % load from disk 24 | %--------------------------- 25 | N1=Kinfo.nbdata; 26 | if nargin < 3 27 | ind1=1:N1; 28 | ind2=1:N1; 29 | K=zeros(N1,N1); 30 | else 31 | K=zeros(length(ind1),length(ind2)); 32 | end; 33 | 34 | for i=1:n 35 | 36 | file=['K' int2str(indsigma(i))]; 37 | load([Kinfo.tempdir file '.mat']); 38 | if isstruct(Kr) % you have used efficient kernel representation 39 | if isa(Kr.data,'single') 40 | Kr=devectorize_single(Kr.data); 41 | else 42 | Kr=devectorize(Kr.data); 43 | end; 44 | end; 45 | K=K+ sigma(indsigma(i))* Kr(ind1,ind2); 46 | 47 | 48 | 49 | 50 | end; 51 | 52 | else 53 | %-------------------------- 54 | % Compute on the fly 55 | %-------------------------- 56 | N1=size(Kinfo.x,1); 57 | 58 | if nargin < 3 59 | ind1=1:N1; 60 | ind2=1:N1; 61 | K=zeros(N1,N1); 62 | else 63 | K=zeros(length(ind1),length(ind2)); 64 | end; 65 | for k=1:n 66 | indk=indsigma(k); 67 | variabletouse=Kinfo.info(indk).variable; 68 | poids=sigma(indsigma(k))*Kinfo.info(indk).Weigth; 69 | kernel=Kinfo.info(indk).kernel; 70 | kerneloption=Kinfo.info(indk).kerneloption; 71 | K=K+poids*svmkernel(Kinfo.x(ind1,variabletouse),kernel,kerneloption,Kinfo.x(ind2,variabletouse)); 72 | 73 | 74 | end 75 | end; -------------------------------------------------------------------------------- /svmclass.m: -------------------------------------------------------------------------------- 1 | function [xsup,w,d,pos,timeps,alpha,obj]=svmclass(x,y,c,lambda,kernel,kerneloption,verbose,span, alphainit) 2 | % USAGE [xsup,w,b,pos,timeps,alpha,obj]=svmclass(x,y,c,lambda,kernel,kerneloption,verbose,span, alphainit) 3 | % 4 | % Support vector machine for CLASSIFICATION 5 | % This routine classify the training set with a support vector machine 6 | % using quadratic programming algorithm (active constraints method) 7 | % 8 | % INPUT 9 | % 10 | % Training set 11 | % x : input data 12 | % y : output data 13 | % parameters 14 | % c : Bound on the lagrangian multipliers 15 | % lambda : Conditioning parameter for QP method 16 | % kernel : kernel type. classical kernel are 17 | % 18 | % Name parameters 19 | % 'poly' polynomial degree 20 | % 'gaussian' gaussian standard deviation 21 | % 22 | % for more details see svmkernel 23 | % 24 | % kerneloption : parameters of kernel 25 | % 26 | % for more details see svmkernel 27 | % 28 | % verbose : display outputs (default value is 0: no display) 29 | % 30 | % Span : span matrix for semiparametric learning 31 | % This vector is sized Nbapp*Nbbasisfunction where 32 | % phi(i,j)= f_j(x(i)); 33 | % 34 | % 35 | % 36 | % OUTPUT 37 | % 38 | % xsup coordinates of the Support Vector 39 | % w weight 40 | % b bias 41 | % pos position of Support Vector 42 | % timeps time for processing the scalar product 43 | % alpha Lagragian multiplier 44 | % obj Value of Objective function 45 | % 46 | % 47 | % see also svmreg, svmkernel, svmval 48 | 49 | % 21/09/97 S. Canu 50 | % 04/06/00 A. Rakotomamonjy -inclusion of other kernel functions 51 | % 04/05/01 S. Canu -inclusion of multi-constraint optimization for frame-SVM 52 | % 53 | % scanu@insa-rouen.fr, alain.rakoto@insa-rouen.fr 54 | 55 | 56 | if nargin< 9 57 | alphainit=[]; 58 | end; 59 | 60 | if nargin < 8 | isempty(span) 61 | A = y; 62 | b = 0; 63 | else 64 | if span==1 65 | span=ones(size(y)); 66 | end; 67 | [na,m]=size(span); 68 | [n un] = size(y); 69 | if n ~= na 70 | error('span, x and y must have the same number of row') 71 | end 72 | A = (y*ones(1,m)).*span; 73 | b = zeros(m,1); 74 | end 75 | if nargin < 7 76 | verbose = 0; 77 | end 78 | 79 | if nargin < 6 80 | gamma = 1; 81 | end 82 | 83 | if nargin < 5 84 | kernel = 'gaussian'; 85 | end 86 | 87 | if nargin < 4 88 | lambda = 0.000000001; 89 | end 90 | 91 | if nargin < 3 92 | c = inf; 93 | end 94 | 95 | 96 | [n un] = size(y); 97 | 98 | if ~isempty(x) 99 | [nl nc] = size(x); 100 | if n ~= nl 101 | error('x and y must have the same number of row') 102 | end 103 | end; 104 | 105 | if min(y) ~= -1 106 | error(' y must coded: 1 for class one and -1 for class two') 107 | end 108 | 109 | if verbose ~= 0 disp('building the distance matrix'); end; 110 | 111 | ttt = cputime; 112 | 113 | ps = zeros(n,n); 114 | ps=svmkernel(x,kernel,kerneloption); 115 | 116 | 117 | %---------------------------------------------------------------------- 118 | % monqp(H,b,c) solves the quadratic programming problem: 119 | % 120 | % min 0.5*x'Hx - d'x subject to: A'x = b and 0 <= x <= c 121 | % x 122 | %---------------------------------------------------------------------- 123 | H =ps.*(y*y'); 124 | e = ones(size(y)); 125 | 126 | timeps = cputime - ttt; 127 | 128 | if verbose ~= 0 disp('in QP'); end; 129 | if isinf(c) 130 | [alpha , lambda , pos] = monqpCinfty(H,e,A,b,lambda,verbose,x,ps,alphainit); 131 | else 132 | [alpha , lambda , pos] = monqp(H,e,A,b,c,lambda,verbose,x,ps,alphainit); 133 | 134 | end 135 | if verbose ~= 0 disp('out QP'); end; 136 | 137 | alphaall=zeros(size(e)); 138 | alphaall(pos)=alpha; 139 | obj=-0.5*alphaall'*H*alphaall +e'*alphaall; 140 | 141 | if ~isempty(x) 142 | xsup = x(pos,:); 143 | else 144 | xsup=[]; 145 | end; 146 | 147 | ysup = y(pos); 148 | 149 | 150 | 151 | w = (alpha.*ysup); 152 | d = lambda; 153 | 154 | if verbose ~= 0 155 | disp('max(alpha)') 156 | fprintf(1,'%6.2f\n',max(alpha)) 157 | end 158 | -------------------------------------------------------------------------------- /svmclasslsformkl.m: -------------------------------------------------------------------------------- 1 | function [xsup,w,b,pos,alpha,status,cost]=svmclassLSformkl(x,y,c,lambda,kernel,kerneloption,verbose,span,qpsize,chunksize,alphainit) 2 | 3 | 4 | % 5 | % [xsup,w,b,pos,timeps,alpha,status,cost]]=svmclassLS(x,y,c,lambda,kernel,kerneloption,verbose,span,qpsize,chunksize) 6 | % 7 | % % 8 | % % large-scale classification svm 9 | % % 10 | 11 | 12 | %dbstop if warning 13 | if nargin<11 14 | alphainit=[]; 15 | end; 16 | if nargin < 10 17 | chunksize=100; 18 | end; 19 | if nargin<9 20 | qpsize=100; 21 | end; 22 | maxqpsize=qpsize; 23 | if nargin < 10 24 | % even number 25 | chunksize=qpsize; 26 | end; 27 | if isstruct(x) 28 | if length(x.indice)~=length(y) 29 | error('Length of x and y should be equal'); 30 | end; 31 | end 32 | 33 | n=size(y,1); 34 | kkttol=1e-3; 35 | difftol=1e-10; 36 | notchangedmax=5; 37 | status=1; 38 | 39 | 40 | if isempty(alphainit) 41 | alphaold=zeros(n,1); 42 | alpha=zeros(n,1); 43 | else 44 | alpha=alphainit; 45 | end; 46 | 47 | workingset=zeros(n,1); 48 | nws=zeros(n,1); 49 | 50 | class1=(y>=0); 51 | class0=(y<0); 52 | iteration=0; 53 | bias=0; 54 | 55 | notchanged=0; 56 | 57 | 58 | 59 | %keyboard 60 | 61 | while 1 62 | 63 | 64 | 65 | 66 | % 67 | % calcul des indices des SV et non SV 68 | % 69 | 70 | SVbound=(alpha>=c-difftol); 71 | 72 | SV=(abs(alpha)>=difftol); 73 | 74 | SVnonbound= (~SVbound & SV); 75 | 76 | 77 | % 78 | % Calcul de la sortie du SVM 79 | % 80 | 81 | if iteration==0 ; 82 | changedSV=find(SV); 83 | changedAlpha=alpha(changedSV); 84 | s=zeros(n,1); 85 | 86 | else 87 | changedSV=find( abs(alpha-alphaold)> difftol ); 88 | changedAlpha=alpha(changedSV)-alphaold(changedSV); 89 | end; 90 | 91 | if ~isempty(changedSV) 92 | 93 | chunks1=ceil(n/chunksize); 94 | chunks2=ceil(length(changedSV)/chunksize); 95 | 96 | for ch1=1:chunks1 97 | %fprintf('%d sur %d\n',ch1,chunks1) ; 98 | ind1=(1+(ch1-1)*chunksize) : min( n, ch1*chunksize); 99 | for ch2=1:chunks2 100 | ind2=(1+(ch2-1)*chunksize) : min(length(changedSV), ch2*chunksize); 101 | 102 | 103 | %------------------------------------------------------- 104 | % c'est ici qu'on calcule la matrice 105 | %------------------------------------------------------- 106 | 107 | if isfield(kerneloption,'matrix') 108 | kchunk=kerneloption.matrix(ind1,changedSV(ind2)); 109 | else 110 | kchunk=sumKbetals(kerneloption,kerneloption.sigma,ind1,changedSV(ind2)); 111 | 112 | end; 113 | 114 | 115 | 116 | %----------------------------------------------------------- 117 | %kchunk=svmkernel(x(ind1,:),kernel,kerneloption,x(changedSV(ind2),:)); 118 | %----------------------------------------------------------- 119 | coeff=changedAlpha(ind2).*y(changedSV(ind2)); 120 | 121 | s(ind1)=s(ind1)+ kchunk*coeff; 122 | end; 123 | end 124 | 125 | end; 126 | 127 | % 128 | % calcul du biais du SVM que sur l'ensemble du working set et 129 | % SVnonbound 130 | 131 | indworkingSVnonbound= find(SVnonbound& workingset); 132 | if ~isempty(indworkingSVnonbound) 133 | bias= mean( y(indworkingSVnonbound)-s(indworkingSVnonbound) ); 134 | end; 135 | 136 | 137 | 138 | % 139 | % KKT Conditions 140 | % 141 | 142 | kkt=(s+bias).*y - 1; 143 | kktviolation= (SVnonbound & ( abs(kkt)>kkttol) )|( ~SV & (kkt < -kkttol)) | ( SVbound & (kkt > kkttol)); 144 | 145 | if sum(kktviolation)==0 146 | break; % c'est fini tout 147 | end; 148 | 149 | 150 | 151 | % 152 | % Calcul du nouveau working set 153 | % 154 | 155 | if iteration==0 156 | searchdir=rand(n,1); 157 | set1=class1; 158 | set2=class0; 159 | 160 | else 161 | searchdir=s-y; 162 | set1 = (SV |class0) & (~SVbound |class1); 163 | set2= (SV |class1) & (~SVbound |class0); 164 | end; 165 | 166 | 167 | 168 | oldworkingset=workingset; 169 | workingset=zeros(n,1); 170 | n1=sum(set1); 171 | n2=sum(set2); 172 | if n1+n2 <= qpsize 173 | aux=find( set1 |set2); 174 | workingset(aux)=ones(length(aux),1); 175 | %workingset(find( set1 |set2))=ones(n1+n2,1); 176 | elseif n1 <=floor(qpsize)/2 177 | 178 | workingset(find(set1))=ones(n1,1); 179 | set2= set2 &~workingset; 180 | n2=sum(set2); 181 | [aux,ind]=sort(searchdir(set2)); 182 | from2=min(n2,qpsize-n1); 183 | aux=find(set2); 184 | workingset(aux(1:from2))=ones(from2,1); 185 | elseif n2 <=floor(qpsize)/2 186 | 187 | workingset(find(set2))=ones(n2,1); 188 | set1= set1 &~workingset; 189 | n1=sum(set1); 190 | [aux,ind]=sort(-searchdir(set1)); 191 | from1=min(n1,qpsize-n2); 192 | aux=find(set1); 193 | workingset(aux(1:from1))=ones(from1,1); 194 | else 195 | 196 | set1=find(set1); 197 | [aux,ind]=sort(-searchdir(set1)); 198 | from1=min(length(set1),qpsize/2); 199 | workingset(set1(ind(1:from1)))=ones(from1,1); 200 | set2=find(set2 & ~workingset); 201 | [aux,ind]=sort(searchdir(set2)); 202 | from2=min(length(set2),qpsize-sum(workingset)); 203 | workingset(set2(ind(1:from2)))=ones(from2,1); 204 | end; 205 | 206 | if all(workingset==oldworkingset) 207 | % fprintf('Not changed \n'); 208 | 209 | indpos=find(y==1); 210 | indneg=find(y==-1); 211 | RandIndpos=randperm(length(indpos)); 212 | RandIndneg=randperm(length(indneg)); 213 | nbpos=min(length(indpos),round(qpsize/2)); 214 | nbneg=min(length(indneg),round(qpsize/2)); 215 | ind=[indpos(RandIndpos(1:nbpos));indneg(RandIndneg(1:nbneg))]; 216 | workingset(ind)=ones(length(ind),1); 217 | 218 | 219 | 220 | end; 221 | indworkingset=find(workingset); 222 | workingsize=length(indworkingset); 223 | nws=~workingset; 224 | indnws= find(nws); 225 | 226 | 227 | % 228 | % Resolution du QP probleme sur le nouveau Working set 229 | % 230 | 231 | % le calcul de Qbn*alphan ne fait intervenir que les données aux alphan non nulles et les données de la working 232 | % set 233 | 234 | 235 | nwSV= (nws & SV); 236 | indnwSV=find(nwSV); 237 | Qbnalphan=0; 238 | if length(indnwSV)>0 239 | 240 | chunks=ceil(length(indnwSV)/chunksize); 241 | for ch=1:chunks 242 | ind=(1+(ch-1)*chunksize ): min( length(indnwSV), ch*chunksize); 243 | 244 | %------------------------------------------------------- 245 | % c'est ici qu'on calcule la matrice 246 | %------------------------------------------------------- 247 | 248 | % pschunk=kerneloption.matrix(indworkingset,indnwSV(ind)); 249 | 250 | 251 | if isfield(kerneloption,'matrix') 252 | pschunk=kerneloption.matrix(indworkingset,indnwSV(ind)); 253 | else 254 | 255 | 256 | pschunk=sumKbetals(kerneloption,kerneloption.sigma,indworkingset,indnwSV(ind)); 257 | 258 | end; 259 | 260 | 261 | 262 | %----------------------------------------------------------- 263 | % pschunk=svmkernel(x(indworkingset,:),kernel,kerneloption,x(indnwSV(ind),:)); 264 | %----------------------------------------------------------- 265 | 266 | 267 | 268 | Qbnalphan=Qbnalphan + y(indworkingset).*(pschunk*(alpha(indnwSV(ind)).*y(indnwSV(ind)))); 269 | end; 270 | e= - (Qbnalphan - ones(workingsize,1)); 271 | 272 | else 273 | e=ones(workingsize,1); 274 | end; 275 | 276 | %------------------------------------------------------- 277 | % c'est ici qu'on calcule la matrice 278 | %------------------------------------------------------- 279 | % psbb=kerneloption.matrix(indworkingset,indworkingset); 280 | 281 | if isfield(kerneloption,'matrix') 282 | psbb=kerneloption.matrix(indworkingset,indworkingset); 283 | else 284 | 285 | 286 | psbb=sumKbetals(kerneloption,kerneloption.sigma,indworkingset,indworkingset); 287 | 288 | end; 289 | 290 | 291 | yb=y(indworkingset); 292 | A=yb; 293 | if length(indnws)>0 294 | b=-alpha(indnws)'*y(indnws); 295 | else 296 | b=0; 297 | end; 298 | [alphab,lambdab,pos]=monqp(psbb.*(yb*yb'),e,A,b,c,lambda,0);%,psbb); 299 | 300 | alphaold=alpha; 301 | aux=zeros(workingsize,1); 302 | aux(pos)=alphab; 303 | alpha(indworkingset)=aux; 304 | iteration=iteration+1; 305 | 306 | 307 | if length(find( abs(alpha-alphaold)> difftol))==0 308 | notchanged=notchanged+1; 309 | if notchanged>notchangedmax 310 | fprintf('Optimization not successfull\n'); 311 | status=0; 312 | break; 313 | 314 | end; 315 | else 316 | notchanged=0; 317 | end; 318 | 319 | if verbose >0 320 | obj= 0.5*aux'*(psbb.*(yb*yb'))*aux- aux'*e; 321 | fprintf('i: %d number changedAlpha : %d Nb KKT Violation: %d Objective Val:%f\n',iteration,length(find( abs(alpha-alphaold)> difftol)),sum(kktviolation),obj); 322 | end; 323 | if sum(kktviolation) < maxqpsize 324 | qpsize=maxqpsize; 325 | chunksize=maxqpsize; 326 | end; 327 | end; 328 | 329 | % SVbound=(alpha>=c); 330 | % SV=(alpha ~=0); 331 | % SVnonbound= (~SVbound & SV); 332 | 333 | SVbound=(alpha>=c-difftol); 334 | SV=(abs(alpha)>=difftol); 335 | SVnonbound= (~SVbound & SV); 336 | 337 | pos=find(alpha ~=0); 338 | 339 | 340 | if ~isempty(x) 341 | if ~isfield(x,'datafile') 342 | xsup = x(pos,:); 343 | else 344 | xsup=x; 345 | xsup.indice=x.indice(pos); 346 | end; 347 | else 348 | xsup=[]; 349 | end; 350 | ysup = y(pos); 351 | w = (alpha(pos).*ysup); 352 | 353 | indworkingSVnonbound= find(SVnonbound& workingset); 354 | if ~isempty(indworkingSVnonbound) 355 | bias= mean( y(indworkingSVnonbound)-s(indworkingSVnonbound) ); 356 | end; 357 | b = bias; 358 | alpha=alpha(pos); 359 | 360 | % s= K*alpha(pos) 361 | 362 | cost= -0.5*w'*s(pos) + sum(alpha); 363 | 364 | -------------------------------------------------------------------------------- /svmkernel.m: -------------------------------------------------------------------------------- 1 | function [K,option]=svmkernel(x,kernel,kerneloption,xsup,framematrix,vector,dual); 2 | 3 | % Usage K=svkernel(x,kernel,kerneloption,xsup,frame,vector,dual); 4 | % 5 | % Returns the scalar product of the vectors x by using the 6 | % mapping defined by the kernel function or x and xsup 7 | % if the matrix xsup is defined 8 | % 9 | % Input 10 | % 11 | % x :input vectors 12 | % kernel : kernel function 13 | % Type Function Option 14 | % Polynomial 'poly' Degree (+1)^d 15 | % Homogeneous polynomial 'polyhomog' Degree ^d 16 | % Gaussian 'gaussian' Bandwidth 17 | % Heavy Tailed RBF 'htrbf' [a,b] %see Chappelle 1999 18 | % Mexican 1D Wavelet 'wavelet' 19 | % Frame kernel 'frame' 'sin','numerical'... 20 | % 21 | % kerneloption : scalar or vector containing the option for the kernel 22 | % 'gaussian' : scalar gamma is identical for all coordinates 23 | % otherwise is a vector of length equal to the number of 24 | % coordinate 25 | % 26 | % 27 | % 'poly' : kerneloption is a scalar given the degree of the polynomial 28 | % or is a vector which first element is the degree of the polynomial 29 | % and other elements gives the bandwidth of each dimension. 30 | % thus the vector is of size n+1 where n is the dimension of the problem. 31 | % 32 | % 33 | % xsup : support vector 34 | % 35 | % ----- 1D Frame Kernel -------------------------- 36 | % 37 | % framematrix frame elements for frame kernel 38 | % vector sampling position of frame elements 39 | % dual dual frame 40 | % frame,vector and dual are respectively the matrices and the vector where the frame 41 | % elements have been processed. these parameters are used only in case 42 | % 43 | % 44 | % see also svmreg,svmclass,svmval, kernelwavelet,kernelframe 45 | % 46 | 47 | % O4/O6/2000 A. Rakotomamonjy 48 | 49 | 50 | if nargin < 6 51 | vector=[]; 52 | dual=[]; 53 | end; 54 | if nargin <5 55 | frame=[]; 56 | end; 57 | 58 | if nargin<4 59 | xsup=x; 60 | end; 61 | if nargin<3 62 | kerneloption=1; 63 | end; 64 | if nargin<2 65 | kernel='gaussian'; 66 | end; 67 | if isempty(xsup) 68 | xsup=x; 69 | end; 70 | [n1 n2]=size(x); 71 | [n n3]=size(xsup); 72 | ps = zeros(n1,n); % produit scalaire 73 | switch lower(kernel) 74 | case 'poly' 75 | 76 | [nk,nk2]=size(kerneloption); 77 | if nk>nk2 78 | kerneloption=kerneloption'; 79 | nk2=nk; 80 | end; 81 | if nk2==1 82 | degree=kerneloption; 83 | var=ones(1,n2); 84 | 85 | elseif nk2 ==2 86 | degree=kerneloption(1); 87 | var=ones(1,n2)*kerneloption(2); 88 | 89 | elseif nk2== n2+1 90 | degree=kerneloption(1); 91 | var=kerneloption(2:n2+1); 92 | 93 | elseif nk2 ==n2+2 94 | degree=kerneloption(1); 95 | var=kerneloption(2:n2+1); 96 | end; 97 | 98 | if nk2==1 99 | aux=1; 100 | else 101 | aux=repmat(var,n,1); 102 | end; 103 | 104 | ps= x *(xsup.*aux.^2)'; 105 | 106 | if degree > 1 107 | K =(ps+1).^degree; 108 | else 109 | K=ps; 110 | end; 111 | case 'polyhomog' 112 | 113 | [nk,nk2]=size(kerneloption); 114 | if nk>nk2 115 | kerneloption=kerneloption'; 116 | nk2=nk; 117 | end; 118 | if nk2==1 119 | degree=kerneloption; 120 | var=ones(1,n2); 121 | else 122 | if nk2 ~=n2+1 123 | degree=kerneloption(1); 124 | var=ones(1,n2)*kerneloption(2); 125 | else 126 | degree=kerneloption(1); 127 | var=kerneloption(2:nk2); 128 | end; 129 | end; 130 | 131 | 132 | aux=repmat(var,n,1); 133 | ps= x *(xsup.*aux.^2)'; 134 | K =(ps).^degree; 135 | 136 | 137 | case 'gaussian' 138 | [nk,nk2]=size(kerneloption); 139 | if nk ~=nk2 140 | if nk>nk2 141 | kerneloption=kerneloption'; 142 | end; 143 | else 144 | kerneloption=ones(1,n2)*kerneloption; 145 | end; 146 | 147 | if length(kerneloption)~=n2 & length(kerneloption)~=n2+1 148 | error('Number of kerneloption is not compatible with data...'); 149 | end; 150 | 151 | 152 | metric = diag(1./kerneloption.^2); 153 | ps = x*metric*xsup'; 154 | [nps,pps]=size(ps); 155 | normx = sum(x.^2*metric,2); 156 | normxsup = sum(xsup.^2*metric,2); 157 | ps = -2*ps + repmat(normx,1,pps) + repmat(normxsup',nps,1) ; 158 | 159 | 160 | K = exp(-ps/2); 161 | 162 | case 'htrbf' % heavy tailed RBF %see Chappelle Paper% 163 | b=kerneloption(2); 164 | a=kerneloption(1); 165 | for i=1:n 166 | ps(:,i) = sum( abs((x.^a - ones(n1,1)*xsup(i,:).^a)).^b ,2); 167 | end; 168 | 169 | 170 | K = exp(-ps); 171 | 172 | case 'gaussianslow' % 173 | %b=kerneloption(2); 174 | %a=kerneloption(1); 175 | for i=1:n 176 | ps(:,i) = sum( abs((x - ones(n1,1)*xsup(i,:))).^2 ,2)./kerneloption.^2/2; 177 | end; 178 | 179 | 180 | K = exp(-ps); 181 | case 'multiquadric' 182 | metric = diag(1./kerneloption); 183 | ps = x*metric*xsup'; 184 | [nps,pps]=size(ps); 185 | normx = sum(x.^2*metric,2); 186 | normxsup = sum(xsup.^2*metric,2); 187 | ps = -2*ps + repmat(normx,1,pps) + repmat(normxsup',nps,1) ; 188 | K=sqrt(ps + 0.1); 189 | case 'wavelet' 190 | K=kernelwavelet(x,kerneloption,xsup); 191 | case 'frame' 192 | K=kernelframe(x,kerneloption,xsup,framematrix,vector,dual); 193 | case 'wavelet2d' 194 | K=wav2dkernelint(x,xsup,kerneloption); 195 | case 'radialwavelet2d' 196 | K=radialwavkernel(x,xsup); 197 | case 'tensorwavkernel' 198 | [K,option]=tensorwavkernel(x,xsup,kerneloption); 199 | 200 | case 'numerical' 201 | K=kerneloption.matrix; 202 | case 'polymetric' 203 | K=x*kerneloption.metric*xsup'; 204 | 205 | case 'jcb' 206 | K=x*xsup'; 207 | 208 | end; 209 | 210 | 211 | 212 | -------------------------------------------------------------------------------- /svmval.m: -------------------------------------------------------------------------------- 1 | function [y,y1,y2]=svmval(x,xsup,w,b,kernel,kerneloption,span,framematrix,vector,dual) 2 | 3 | % USAGE 4 | % [y,y1,y2]=svmval(x,xsup,w,b,kernel,kerneloption,span,framematrix,vector,dual) 5 | % 6 | % svmval computes the prediction of a support vector machine 7 | % using the kernel function and its parameter for classification 8 | % or regression 9 | % 10 | % INPUT 11 | % x : input data 12 | % xsup : support vector list 13 | % w : weight 14 | % kernel : string containing the type of kernel 15 | % kerneloption : setting parameter of kernel. 16 | % b : bias. this can be a column vector in case of semiparametric SVM 17 | % span : span matrix for semiparametric SVM 18 | % 19 | % ----- 1D Frame Kernel -------------------------- 20 | % 21 | % framematrix frame elements for frame kernel 22 | % vector sampling position of frame elements 23 | % dual dual frame 24 | % 25 | % OUTPUT 26 | % 27 | % y : the output ouf the network at point (vector or matrix) x 28 | % 29 | % y = w phi(x) - b*span(x) 30 | % y1= w phi(x) 31 | % y2= b*span(x) 32 | % 33 | % 34 | % See also svmclass,svmreg, svmkernel 35 | % 36 | % 37 | 38 | % 12/10/00 A. Rakotomamonjy Including SVM kernel 39 | 40 | 41 | % 42 | % Usual verifications 43 | % 44 | semiparam=0; 45 | if nargin<4 46 | error('Insufficients number of input arguments....'); 47 | end; 48 | if nargin < 5 49 | kernel='gaussian'; 50 | end; 51 | if nargin < 6 52 | kerneloption=1; 53 | end; 54 | if nargin <7 55 | span=[]; 56 | end; 57 | if ~isempty(span) 58 | semiparam=1; 59 | end; 60 | if ~strcmp(kernel,'frame') | nargin<8; 61 | framematrix=[]; 62 | vector=[]; 63 | end; 64 | if nargin <10 65 | dual=[]; 66 | end; 67 | 68 | % [nl nc] = size(x); 69 | % if~isstruct(xsup) 70 | % [nsup nd] = size(xsup); 71 | % if nc ~= nd 72 | % error('x and xsup must have the same number of column') 73 | % end 74 | % 75 | % 76 | % end; 77 | 78 | if~isstruct(xsup) 79 | [nsup nd] = size(xsup); 80 | else 81 | nsup=length(xsup.indice); 82 | nd= xsup.dimension; 83 | end; 84 | 85 | if~isstruct(x) 86 | [nl nc] = size(x); 87 | else 88 | nl=length(x.indice); 89 | nc= x.dimension; 90 | end; 91 | if nc ~= nd 92 | error('x and xsup must have the same number of column') 93 | end 94 | 95 | 96 | % 97 | % 98 | % 99 | %keyboard 100 | % these is a chunking procedure if number of sv is too large 101 | % or number of data to test is to large. 102 | if ~strcmp(kernel,'numerical')& ~isstruct(x) & ~isstruct(xsup) &(nl > 1000 | nsup > 1000) ; 103 | if ~isempty(w) 104 | chunksize=100; 105 | chunks1=ceil(nsup/chunksize); 106 | chunks2=ceil(nl/chunksize); 107 | y2=zeros(nl,1); 108 | for ch1=1:chunks1 109 | ind1=(1+(ch1-1)*chunksize) : min( nsup, ch1*chunksize); 110 | 111 | for ch2=1:chunks2 112 | ind2=(1+(ch2-1)*chunksize) : min(nl, ch2*chunksize); 113 | kchunk=svmkernel(x(ind2,:),kernel,kerneloption,xsup(ind1,:)); 114 | 115 | y2(ind2)=y2(ind2)+ kchunk*w(ind1) ; 116 | end; 117 | end 118 | if semiparam 119 | y1=span*b; 120 | y=y1+y2; 121 | else 122 | % keyboard 123 | y=y2+b; 124 | end; 125 | else 126 | y=[]; 127 | end; 128 | 129 | elseif isfield(xsup,'datafile') | isfield(x,'datafile'); % data is stored in file and not in memory 130 | 131 | if isstruct(xsup); 132 | nsup=length(xsup.indice); 133 | else 134 | nsup=size(xsup,1); 135 | end; 136 | if isstruct(x); 137 | nl=length(x.indice); 138 | else 139 | nl=size(x,1); 140 | end; 141 | 142 | chunksize=100; 143 | chunks1=ceil(nsup/chunksize); 144 | chunks2=ceil(nl/chunksize); 145 | y2=zeros(nl,1); 146 | for ch1=1:chunks1 147 | ind1=(1+(ch1-1)*chunksize) : min( nsup, ch1*chunksize); 148 | 149 | for ch2=1:chunks2 150 | ind2=(1+(ch2-1)*chunksize) : min(nl, ch2*chunksize); 151 | 152 | %----------------------------------------------------------- 153 | if ~isfield(x,'datafile') 154 | x1=x(ind2,:); 155 | else 156 | x1=fileaccess(x.datafile,x.indice(ind2),x.dimension); 157 | end; 158 | if ~isfield(xsup,'datafile') 159 | x2=xsup(ind1,:); 160 | else 161 | x2=fileaccess(xsup.datafile,xsup.indice(ind1),xsup.dimension); 162 | end; 163 | kchunk=svmkernel(x1,kernel,kerneloption,x2); 164 | %kchunk=svmkernel(x(ind2,:),kernel,kerneloption,xsup(ind1,:)); 165 | 166 | y2(ind2)=y2(ind2)+ kchunk*w(ind1) ; 167 | end; 168 | end 169 | if semiparam 170 | y1=span*b; 171 | y=y1+y2; 172 | else 173 | % keyboard 174 | y=y2+b; 175 | end; 176 | 177 | else 178 | ps=svmkernel(x,kernel,kerneloption,xsup,framematrix,vector,dual); 179 | 180 | 181 | if semiparam 182 | 183 | y1=span*b; 184 | if isempty(w) 185 | y=y1; 186 | y2=zeros(size(y1)); 187 | 188 | else 189 | y2=ps*w; 190 | y = y1+y2; 191 | end; 192 | else 193 | y=ps*w+b; 194 | end; 195 | end; 196 | -------------------------------------------------------------------------------- /vectorize.c: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include 3 | 4 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 5 | { 6 | int k,k1,i,j,n; 7 | double *K, *z; 8 | 9 | K = mxGetPr(prhs[0]); 10 | n = mxGetM(prhs[0]); 11 | 12 | plhs[0]=mxCreateDoubleMatrix(n*(n+1)/2,1,0); 13 | z= mxGetPr(plhs[0]); 14 | k=0; 15 | k1=0; 16 | for (j=0;j<=n-1;j++) 17 | { 18 | for (i=0;i<=j;i++) 19 | { 20 | z[k]=K[i+k1]; 21 | k++; 22 | } 23 | k1 += n; 24 | } 25 | } 26 | 27 | 28 | -------------------------------------------------------------------------------- /vectorize.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/vectorize.mexglx -------------------------------------------------------------------------------- /vectorize_single.c: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include 3 | 4 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 5 | { 6 | int k,k1,i,j,n; 7 | float *K, *z; 8 | 9 | K = mxGetPr(prhs[0]); 10 | n = mxGetM(prhs[0]); 11 | 12 | plhs[0]=mxCreateNumericMatrix(n*(n+1)/2,1,mxSINGLE_CLASS,0); 13 | z= mxGetPr(plhs[0]); 14 | k=0; 15 | k1=0; 16 | for (j=0;j<=n-1;j++) 17 | { 18 | for (i=0;i<=j;i++) 19 | { 20 | z[k]=K[i+k1]; 21 | k++; 22 | } 23 | k1 += n; 24 | } 25 | } 26 | 27 | 28 | -------------------------------------------------------------------------------- /vectorize_single.mexglx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxis1718/SimpleMKL/f44a55c15ad6ec43f5ec94a00c7ddcb9970bd566/vectorize_single.mexglx --------------------------------------------------------------------------------