├── README.md ├── dutradaboost ├── DuTrAdaBoostTrain.m ├── DuTrPredict.m ├── README └── toolbox │ └── libsvm-weights-3.20 │ ├── COPYRIGHT │ ├── FAQ.html │ ├── Makefile │ ├── Makefile.win │ ├── README │ ├── READMEweight │ ├── heart_scale │ ├── heart_scale.wgt │ ├── matlab │ ├── Makefile │ ├── README │ ├── READMEweight │ ├── libsvmread.c │ ├── libsvmread.mexmaci64 │ ├── libsvmwrite.c │ ├── libsvmwrite.mexmaci64 │ ├── make.m │ ├── svm_model_matlab.c │ ├── svm_model_matlab.h │ ├── svmpredict.c │ ├── svmpredict.mexmaci64 │ ├── svmtrain.c │ └── svmtrain.mexmaci64 │ ├── python │ ├── Makefile │ ├── README │ ├── README.weight │ ├── svm.py │ └── svmutil.py │ ├── svm-predict.c │ ├── svm-scale.c │ ├── svm-train.c │ ├── svm.cpp │ ├── svm.def │ ├── svm.h │ └── tools │ ├── README │ ├── checkdata.py │ ├── easy.py │ ├── grid.py │ └── subset.py ├── mmd ├── README ├── mmd.m └── rbf_dot.m ├── multiple-source-tradaboost ├── README ├── mstradaboost.m ├── mstrpredict.m └── toolbox │ └── libsvm-weights-3.20 │ ├── COPYRIGHT │ ├── FAQ.html │ ├── Makefile │ ├── Makefile.win │ ├── README │ ├── READMEweight │ ├── heart_scale │ ├── heart_scale.wgt │ ├── matlab │ ├── Makefile │ ├── README │ ├── READMEweight │ ├── libsvmread.c │ ├── libsvmread.mexmaci64 │ ├── libsvmwrite.c │ ├── libsvmwrite.mexmaci64 │ ├── make.m │ ├── svm_model_matlab.c │ ├── svm_model_matlab.h │ ├── svmpredict.c │ ├── svmpredict.mexmaci64 │ ├── svmtrain.c │ └── svmtrain.mexmaci64 │ ├── python │ ├── Makefile │ ├── README │ ├── README.weight │ ├── svm.py │ └── svmutil.py │ ├── svm-predict.c │ ├── svm-scale.c │ ├── svm-train.c │ ├── svm.cpp │ ├── svm.def │ ├── svm.h │ └── tools │ ├── README │ ├── checkdata.py │ ├── easy.py │ ├── grid.py │ └── subset.py ├── tca ├── README ├── rbf_dot.m ├── reducedvector.m ├── tca.m ├── test.m └── toolbox │ └── libsvm-3.20 │ ├── COPYRIGHT │ ├── FAQ.html │ ├── Makefile │ ├── Makefile.win │ ├── README │ ├── heart_scale │ ├── java │ ├── Makefile │ ├── libsvm.jar │ ├── libsvm │ │ ├── svm.java │ │ ├── svm.m4 │ │ ├── svm_model.java │ │ ├── svm_node.java │ │ ├── svm_parameter.java │ │ ├── svm_print_interface.java │ │ └── svm_problem.java │ ├── svm_predict.java │ ├── svm_scale.java │ ├── svm_toy.java │ ├── svm_train.java │ └── test_applet.html │ ├── matlab │ ├── Makefile │ ├── README │ ├── libsvmread.c │ ├── libsvmread.mexmaci64 │ ├── libsvmwrite.c │ ├── libsvmwrite.mexmaci64 │ ├── make.m │ ├── svm_model_matlab.c │ ├── svm_model_matlab.h │ ├── svmpredict.c │ ├── svmpredict.mexmaci64 │ ├── svmtrain.c │ └── svmtrain.mexmaci64 │ ├── python │ ├── Makefile │ ├── README │ ├── svm.py │ └── svmutil.py │ ├── svm-predict.c │ ├── svm-scale.c │ ├── svm-toy │ ├── gtk │ │ ├── Makefile │ │ ├── callbacks.cpp │ │ ├── callbacks.h │ │ ├── interface.c │ │ ├── interface.h │ │ ├── main.c │ │ └── svm-toy.glade │ ├── qt │ │ ├── Makefile │ │ └── svm-toy.cpp │ └── windows │ │ └── svm-toy.cpp │ ├── svm-train.c │ ├── svm.cpp │ ├── svm.def │ ├── svm.h │ ├── tools │ ├── README │ ├── checkdata.py │ ├── easy.py │ ├── grid.py │ └── subset.py │ └── windows │ ├── libsvm.dll │ ├── libsvmread.mexw64 │ ├── libsvmwrite.mexw64 │ ├── svm-predict.exe │ ├── svm-scale.exe │ ├── svm-toy.exe │ ├── svm-train.exe │ ├── svmpredict.mexw64 │ └── svmtrain.mexw64 └── tradaboost ├── README ├── TrAdaBoostTrain.m ├── TrPredict.m └── toolbox └── libsvm-weights-3.20 ├── COPYRIGHT ├── FAQ.html ├── Makefile ├── Makefile.win ├── README ├── READMEweight ├── heart_scale ├── heart_scale.wgt ├── matlab ├── Makefile ├── README ├── READMEweight ├── libsvmread.c ├── libsvmread.mexmaci64 ├── libsvmwrite.c ├── libsvmwrite.mexmaci64 ├── make.m ├── svm_model_matlab.c ├── svm_model_matlab.h ├── svmpredict.c ├── svmpredict.mexmaci64 ├── svmtrain.c └── svmtrain.mexmaci64 ├── python ├── Makefile ├── README ├── README.weight ├── svm.py └── svmutil.py ├── svm-predict.c ├── svm-scale.c ├── svm-train.c ├── svm.cpp ├── svm.def ├── svm.h └── tools ├── README ├── checkdata.py ├── easy.py ├── grid.py └── subset.py /README.md: -------------------------------------------------------------------------------- 1 | # transfer-learning 2 | This repository contains some useful functions in transfer learning. 3 | 4 | 1. tradaboost: Transfer Adaptive Boost. 5 | 6 | 2. mstradaboost: Multiple Sources Tradaboost. 7 | 8 | 3. dutradaboost: Tradaboost Using dynamic updates. 9 | 10 | 4. tca: Transfer Component Analysis 11 | 12 | 5. mmd: Maximum Mean Discrepancy. 13 | 14 | Many of the codes in this repository depends on libsvm and libsvm-weights. Please compile them before use. If you don't know how to compile, refer to the README in the toolbox. 15 | -------------------------------------------------------------------------------- /dutradaboost/DuTrAdaBoostTrain.m: -------------------------------------------------------------------------------- 1 | function [model, beta ] = DuTrAdaboostTrain(tdX,tdY,tsX,tsY) 2 | 3 | %% tdX: features of source domain 4 | %% tdY: labels of source domain 5 | %% tsX: features of target domain 6 | %% tsY: labels of target domain 7 | 8 | tX = [tdX ; tsX]; 9 | tY = [tdY ; tsY]; 10 | n = size(tdY,1); 11 | m = size(tsY,1); 12 | T = 20; %iteration number 13 | w = ones(m+n,1); 14 | model = cell(1,T); 15 | beta = zeros(1,T); 16 | bsrc = 1/(1+sqrt(2*log(n)/T)); 17 | for t = 1:T 18 | %p = w./(sum(abs(w))); 19 | model{t} = svmtrain(w,tY,tX,'-t 2'); % using linear kernel 20 | predict = svmpredict(tY,tX,model{t}); 21 | sW = sum(w(n+1:m+n)); 22 | et = sum(w(n+1:m+n).*(predict(n+1:m+n)~=tsY))/sW; 23 | if et >= 0.5 24 | et = 0.499; 25 | elseif et == 0 26 | et = 0.001; 27 | end 28 | beta(t) = et/(1-et); 29 | Ct = 2*(1-et); 30 | wUpdate = [(Ct*bsrc*ones(n,1)).^(predict(1:n)~=tdY) ; (beta(t)*ones(m,1)).^(-(predict(n+1:m+n)~=tsY)) ]; 31 | w = w.*wUpdate; 32 | end 33 | end 34 | 35 | -------------------------------------------------------------------------------- /dutradaboost/DuTrPredict.m: -------------------------------------------------------------------------------- 1 | function Ydash = DuTrPredict(X, svmmodels, beta) 2 | N = length(svmmodels); 3 | start = ceil(N/2); 4 | l = size(X,1); 5 | yOne = ones(l,1); 6 | yTwo = ones(l,1); 7 | Ydash = ones(l,1); 8 | for i = start:N 9 | predict = svmpredict(yOne,X,svmmodels{i}); 10 | yOne = yOne.*((beta(i)*ones(l,1)).^(-predict)); 11 | yTwo = yTwo.*((beta(i)*ones(l,1)).^(-0.5)); 12 | end 13 | Ydash(yOne < yTwo) = -1; 14 | end -------------------------------------------------------------------------------- /dutradaboost/README: -------------------------------------------------------------------------------- 1 | This is the code for ‘Adaptive Boosting for Transfer Learning using Dynamic Updates’ by Samir Al-Stouhi and Chandan K. Reddy. Usually this code performs a little bit better than tradaboost. 2 | 3 | 4 | DuTrAdaBoostTrain.m is for training 5 | DuTrPredict.m is for prediction. 6 | 7 | Please compile the toolbox and addpath to toolbox before using. 8 | addpath('./toolbox/libsvm-weights-3.20/matlab'); -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/COPYRIGHT: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | 3. Neither name of copyright holders nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/Makefile: -------------------------------------------------------------------------------- 1 | CXX ?= g++ 2 | CFLAGS = -Wall -Wconversion -O3 -fPIC 3 | SHVER = 2 4 | OS = $(shell uname) 5 | 6 | all: svm-train svm-predict svm-scale 7 | 8 | lib: svm.o 9 | if [ "$(OS)" = "Darwin" ]; then \ 10 | SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)"; \ 11 | else \ 12 | SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \ 13 | fi; \ 14 | $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER) 15 | 16 | svm-predict: svm-predict.c svm.o 17 | $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm 18 | svm-train: svm-train.c svm.o 19 | $(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm 20 | svm-scale: svm-scale.c 21 | $(CXX) $(CFLAGS) svm-scale.c -o svm-scale 22 | svm.o: svm.cpp svm.h 23 | $(CXX) $(CFLAGS) -c svm.cpp 24 | clean: 25 | rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so.$(SHVER) 26 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/Makefile.win: -------------------------------------------------------------------------------- 1 | #You must ensure nmake.exe, cl.exe, link.exe are in system path. 2 | #VCVARS32.bat 3 | #Under dosbox prompt 4 | #nmake -f Makefile.win 5 | 6 | ########################################## 7 | CXX = cl.exe 8 | CFLAGS = /nologo /O2 /EHsc /I. /D _WIN32 /D _CRT_SECURE_NO_DEPRECATE 9 | TARGET = windows 10 | 11 | all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib 12 | 13 | $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj 14 | $(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe 15 | 16 | $(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj 17 | $(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe 18 | 19 | $(TARGET)\svm-scale.exe: svm.h svm-scale.c 20 | $(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe 21 | 22 | $(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp 23 | $(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe 24 | 25 | svm.obj: svm.cpp svm.h 26 | $(CXX) $(CFLAGS) -c svm.cpp 27 | 28 | lib: svm.cpp svm.h svm.def 29 | $(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def 30 | 31 | clean: 32 | -erase /Q *.obj $(TARGET)\. 33 | 34 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/READMEweight: -------------------------------------------------------------------------------- 1 | Usage: 2 | use '-W weight_file' to assign weights for each instance. 3 | Please make sure all weights are non-negative. 4 | 5 | Example: 6 | $ ./svm-train -W heart_scale.wgt heart_scale 7 | 8 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/heart_scale.wgt: -------------------------------------------------------------------------------- 1 | 20 2 | 10 3 | 5.5 4 | 1 5 | 1 6 | 1 7 | 1 8 | 1 9 | 1 10 | 1 11 | 1 12 | 1 13 | 1 14 | 1 15 | 1 16 | 1 17 | 1 18 | 1 19 | 1 20 | 1 21 | 1 22 | 1 23 | 1 24 | 1 25 | 1 26 | 1 27 | 1 28 | 1 29 | 1 30 | 1 31 | 1 32 | 1 33 | 1 34 | 1 35 | 1 36 | 1 37 | 1 38 | 1 39 | 1 40 | 1 41 | 1 42 | 1 43 | 1 44 | 1 45 | 1 46 | 1 47 | 1 48 | 1 49 | 1 50 | 1 51 | 1 52 | 1 53 | 1 54 | 1 55 | 1 56 | 1 57 | 1 58 | 1 59 | 1 60 | 1 61 | 1 62 | 1 63 | 1 64 | 1 65 | 1 66 | 1 67 | 1 68 | 1 69 | 1 70 | 1 71 | 1 72 | 1 73 | 1 74 | 1 75 | 1 76 | 1 77 | 1 78 | 1 79 | 1 80 | 1 81 | 1 82 | 1 83 | 1 84 | 1 85 | 1 86 | 1 87 | 1 88 | 1 89 | 1 90 | 1 91 | 1 92 | 1 93 | 1 94 | 1 95 | 1 96 | 1 97 | 1 98 | 1 99 | 1 100 | 1 101 | 1 102 | 1 103 | 1 104 | 1 105 | 1 106 | 1 107 | 1 108 | 1 109 | 1 110 | 1 111 | 1 112 | 1 113 | 1 114 | 1 115 | 1 116 | 1 117 | 1 118 | 1 119 | 1 120 | 1 121 | 1 122 | 1 123 | 1 124 | 1 125 | 1 126 | 1 127 | 1 128 | 1 129 | 1 130 | 1 131 | 1 132 | 1 133 | 1 134 | 1 135 | 1 136 | 1 137 | 1 138 | 1 139 | 1 140 | 1 141 | 1 142 | 1 143 | 1 144 | 1 145 | 1 146 | 1 147 | 1 148 | 1 149 | 1 150 | 1 151 | 1 152 | 1 153 | 1 154 | 1 155 | 1 156 | 1 157 | 1 158 | 1 159 | 1 160 | 1 161 | 1 162 | 1 163 | 1 164 | 1 165 | 1 166 | 1 167 | 1 168 | 1 169 | 1 170 | 1 171 | 1 172 | 1 173 | 1 174 | 1 175 | 1 176 | 1 177 | 1 178 | 1 179 | 1 180 | 1 181 | 1 182 | 1 183 | 1 184 | 1 185 | 1 186 | 1 187 | 1 188 | 1 189 | 1 190 | 1 191 | 1 192 | 1 193 | 1 194 | 1 195 | 1 196 | 1 197 | 1 198 | 1 199 | 1 200 | 1 201 | 1 202 | 1 203 | 1 204 | 1 205 | 1 206 | 1 207 | 1 208 | 1 209 | 1 210 | 1 211 | 1 212 | 1 213 | 1 214 | 1 215 | 1 216 | 1 217 | 1 218 | 1 219 | 1 220 | 1 221 | 1 222 | 1 223 | 1 224 | 1 225 | 1 226 | 1 227 | 1 228 | 1 229 | 1 230 | 1 231 | 1 232 | 1 233 | 1 234 | 1 235 | 1 236 | 1 237 | 1 238 | 1 239 | 1 240 | 1 241 | 1 242 | 1 243 | 1 244 | 1 245 | 1 246 | 1 247 | 1 248 | 1 249 | 1 250 | 1 251 | 1 252 | 1 253 | 1 254 | 1 255 | 1 256 | 1 257 | 1 258 | 1 259 | 1 260 | 1 261 | 1 262 | 1 263 | 1 264 | 1 265 | 1 266 | 1 267 | 1 268 | 1 269 | 1 270 | 1 271 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile is used under Linux 2 | 3 | MATLABDIR ?= /usr/local/matlab 4 | # for Mac 5 | # MATLABDIR ?= /opt/local/matlab 6 | 7 | CXX ?= g++ 8 | #CXX = g++-4.1 9 | CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I.. 10 | 11 | MEX = $(MATLABDIR)/bin/mex 12 | MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)" 13 | # comment the following line if you use MATLAB on 32-bit computer 14 | MEX_OPTION += -largeArrayDims 15 | MEX_EXT = $(shell $(MATLABDIR)/bin/mexext) 16 | 17 | all: matlab 18 | 19 | matlab: binary 20 | 21 | octave: 22 | @echo "please type make under Octave" 23 | 24 | binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT) 25 | 26 | svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o 27 | $(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o 28 | 29 | svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o 30 | $(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o 31 | 32 | libsvmread.$(MEX_EXT): libsvmread.c 33 | $(MEX) $(MEX_OPTION) libsvmread.c 34 | 35 | libsvmwrite.$(MEX_EXT): libsvmwrite.c 36 | $(MEX) $(MEX_OPTION) libsvmwrite.c 37 | 38 | svm_model_matlab.o: svm_model_matlab.c ../svm.h 39 | $(CXX) $(CFLAGS) -c svm_model_matlab.c 40 | 41 | ../svm.o: ../svm.cpp ../svm.h 42 | make -C .. svm.o 43 | 44 | clean: 45 | rm -f *~ *.o *.mex* *.obj ../svm.o 46 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/READMEweight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a simple interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for the detail. 10 | 11 | Usage 12 | ===== 13 | 14 | matlab> model = svmtrain(training_weight_vector, training_label_vector, training_instance_matrix, 'libsvm_options') 15 | 16 | -training_weight_vector: 17 | An m by 1 vector of training weights. (type must be double) 18 | -training_label_vector: 19 | An m by 1 vector of training labels. (type must be double) 20 | -training_instance_matrix: 21 | An m by n matrix of m training instances with n features. (type must be double) 22 | -libsvm_options: 23 | A string of training options in the same format as that of LIBSVM. 24 | 25 | Examples 26 | ======== 27 | 28 | Train and test on the provided data heart_scale: 29 | 30 | matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale'); 31 | matlab> heart_scale_weight = load('../heart_scale.wgt'); 32 | matlab> model = svmtrain(heart_scale_weight, heart_scale_label, heart_scale_inst, '-c 1'); 33 | matlab> [predict_label, accuracy, dec_values] = svmpredict(heart_scale_label, heart_scale_inst, model); % test the training data 34 | 35 | Train and test without weights: 36 | 37 | matlab> model = svmtrain([], heart_scale_label, heart_scale_inst, '-c 1'); 38 | 39 | 40 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "mex.h" 8 | 9 | #ifdef MX_API_VER 10 | #if MX_API_VER < 0x07030000 11 | typedef int mwIndex; 12 | #endif 13 | #endif 14 | #ifndef max 15 | #define max(x,y) (((x)>(y))?(x):(y)) 16 | #endif 17 | #ifndef min 18 | #define min(x,y) (((x)<(y))?(x):(y)) 19 | #endif 20 | 21 | void exit_with_help() 22 | { 23 | mexPrintf( 24 | "Usage: [label_vector, instance_matrix] = libsvmread('filename');\n" 25 | ); 26 | } 27 | 28 | static void fake_answer(int nlhs, mxArray *plhs[]) 29 | { 30 | int i; 31 | for(i=0;i start from 0 86 | strtok(line," \t"); // label 87 | while (1) 88 | { 89 | idx = strtok(NULL,":"); // index:value 90 | val = strtok(NULL," \t"); 91 | if(val == NULL) 92 | break; 93 | 94 | errno = 0; 95 | index = (int) strtol(idx,&endptr,10); 96 | if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index) 97 | { 98 | mexPrintf("Wrong input format at line %d\n",l+1); 99 | fake_answer(nlhs, plhs); 100 | return; 101 | } 102 | else 103 | inst_max_index = index; 104 | 105 | min_index = min(min_index, index); 106 | elements++; 107 | } 108 | max_index = max(max_index, inst_max_index); 109 | l++; 110 | } 111 | rewind(fp); 112 | 113 | // y 114 | plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL); 115 | // x^T 116 | if (min_index <= 0) 117 | plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL); 118 | else 119 | plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL); 120 | 121 | labels = mxGetPr(plhs[0]); 122 | samples = mxGetPr(plhs[1]); 123 | ir = mxGetIr(plhs[1]); 124 | jc = mxGetJc(plhs[1]); 125 | 126 | k=0; 127 | for(i=0;i start from 0 158 | 159 | errno = 0; 160 | samples[k] = strtod(val,&endptr); 161 | if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 162 | { 163 | mexPrintf("Wrong input format at line %d\n",i+1); 164 | fake_answer(nlhs, plhs); 165 | return; 166 | } 167 | ++k; 168 | } 169 | } 170 | jc[l] = k; 171 | 172 | fclose(fp); 173 | free(line); 174 | 175 | { 176 | mxArray *rhs[1], *lhs[1]; 177 | rhs[0] = plhs[1]; 178 | if(mexCallMATLAB(1, lhs, 1, rhs, "transpose")) 179 | { 180 | mexPrintf("Error: cannot transpose problem\n"); 181 | fake_answer(nlhs, plhs); 182 | return; 183 | } 184 | plhs[1] = lhs[0]; 185 | } 186 | } 187 | 188 | void mexFunction( int nlhs, mxArray *plhs[], 189 | int nrhs, const mxArray *prhs[] ) 190 | { 191 | char filename[256]; 192 | 193 | if(nrhs != 1 || nlhs != 2) 194 | { 195 | exit_with_help(); 196 | fake_answer(nlhs, plhs); 197 | return; 198 | } 199 | 200 | mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1); 201 | 202 | if(filename == NULL) 203 | { 204 | mexPrintf("Error: filename is NULL\n"); 205 | return; 206 | } 207 | 208 | read_problem(filename, nlhs, plhs); 209 | 210 | return; 211 | } 212 | 213 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64 -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "mex.h" 6 | 7 | #ifdef MX_API_VER 8 | #if MX_API_VER < 0x07030000 9 | typedef int mwIndex; 10 | #endif 11 | #endif 12 | 13 | void exit_with_help() 14 | { 15 | mexPrintf( 16 | "Usage: libsvmwrite('filename', label_vector, instance_matrix);\n" 17 | ); 18 | } 19 | 20 | static void fake_answer(int nlhs, mxArray *plhs[]) 21 | { 22 | int i; 23 | for(i=0;i 0) 89 | { 90 | exit_with_help(); 91 | fake_answer(nlhs, plhs); 92 | return; 93 | } 94 | 95 | // Transform the input Matrix to libsvm format 96 | if(nrhs == 3) 97 | { 98 | char filename[256]; 99 | if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2])) 100 | { 101 | mexPrintf("Error: label vector and instance matrix must be double\n"); 102 | return; 103 | } 104 | 105 | mxGetString(prhs[0], filename, mxGetN(prhs[0])+1); 106 | 107 | if(mxIsSparse(prhs[2])) 108 | libsvmwrite(filename, prhs[1], prhs[2]); 109 | else 110 | { 111 | mexPrintf("Instance_matrix must be sparse\n"); 112 | return; 113 | } 114 | } 115 | else 116 | { 117 | exit_with_help(); 118 | return; 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/dutradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64 -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/make.m: -------------------------------------------------------------------------------- 1 | % This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix 2 | 3 | try 4 | Type = ver; 5 | % This part is for OCTAVE 6 | if(strcmp(Type(1).Name, 'Octave') == 1) 7 | mex libsvmread.c 8 | mex libsvmwrite.c 9 | mex svmtrain.c ../svm.cpp svm_model_matlab.c 10 | mex svmpredict.c ../svm.cpp svm_model_matlab.c 11 | % This part is for MATLAB 12 | % Add -largeArrayDims on 64-bit machines of MATLAB 13 | else 14 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c 15 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c 16 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c 17 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c 18 | end 19 | catch 20 | fprintf('If make.m fails, please check README about detailed instructions.\n'); 21 | end 22 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/svm_model_matlab.h: -------------------------------------------------------------------------------- 1 | const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model); 2 | struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message); 3 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/dutradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64 -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/dutradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64 -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/python/Makefile: -------------------------------------------------------------------------------- 1 | all = lib 2 | 3 | lib: 4 | make -C .. lib 5 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/python/README.weight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a Python interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for detail. 10 | 11 | USAGE 12 | ===== 13 | 14 | The usage is bascally the same as the version without supporting 15 | instance weights. We only show differences below. 16 | 17 | - Function: svm_train 18 | 19 | There are three ways to call svm_train() 20 | 21 | >>> model = svm_train(W, y, x [, 'training_options']) 22 | >>> model = svm_train(prob [, 'training_options']) 23 | >>> model = svm_train(prob, param) 24 | 25 | W: a list/tuple of l training weights (type must be double). 26 | Use [] if no weights. 27 | 28 | y: a list/tuple of l training labels (type must be int/double). 29 | 30 | x: a list/tuple of l training instances. The feature vector of 31 | each training instance is an instance of list/tuple or dictionary. 32 | 33 | training_options: a string in the same form as that for LIBSVM command 34 | mode. 35 | 36 | prob: an svm_problem instance generated by calling 37 | svm_problem(W, y, x). 38 | 39 | param: an svm_parameter instance generated by calling 40 | svm_parameter('training_options') 41 | 42 | model: the returned svm_model instance. See svm.h for details of this 43 | structure. If '-v' is specified, cross validation is 44 | conducted and the returned model is just a scalar: cross-validation 45 | accuracy for classification and mean-squared error for regression. 46 | 47 | To train the same data many times with different 48 | parameters, the second and the third ways should be faster.. 49 | 50 | Examples: 51 | 52 | >>> y, x = svm_read_problem('../heart_scale') 53 | >>> W = [1] * len(y) 54 | >>> W[0] = 10 55 | >>> prob = svm_problem(W, y, x) 56 | >>> param = svm_parameter('-s 3 -c 5 -h 0') 57 | >>> m = svm_train([], y, x, '-c 5') 58 | >>> m = svm_train(W, y, x) 59 | >>> m = svm_train(prob, '-t 2 -c 5') 60 | >>> m = svm_train(prob, param) 61 | >>> CV_ACC = svm_train(W, y, x, '-v 3') 62 | 63 | 64 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/svm-predict.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "svm.h" 7 | 8 | int print_null(const char *s,...) {return 0;} 9 | 10 | static int (*info)(const char *fmt,...) = &printf; 11 | 12 | struct svm_node *x; 13 | int max_nr_attr = 64; 14 | 15 | struct svm_model* model; 16 | int predict_probability=0; 17 | 18 | static char *line = NULL; 19 | static int max_line_len; 20 | 21 | static char* readline(FILE *input) 22 | { 23 | int len; 24 | 25 | if(fgets(line,max_line_len,input) == NULL) 26 | return NULL; 27 | 28 | while(strrchr(line,'\n') == NULL) 29 | { 30 | max_line_len *= 2; 31 | line = (char *) realloc(line,max_line_len); 32 | len = (int) strlen(line); 33 | if(fgets(line+len,max_line_len-len,input) == NULL) 34 | break; 35 | } 36 | return line; 37 | } 38 | 39 | void exit_input_error(int line_num) 40 | { 41 | fprintf(stderr,"Wrong input format at line %d\n", line_num); 42 | exit(1); 43 | } 44 | 45 | void predict(FILE *input, FILE *output) 46 | { 47 | int correct = 0; 48 | int total = 0; 49 | double error = 0; 50 | double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0; 51 | 52 | int svm_type=svm_get_svm_type(model); 53 | int nr_class=svm_get_nr_class(model); 54 | double *prob_estimates=NULL; 55 | int j; 56 | 57 | if(predict_probability) 58 | { 59 | if (svm_type==NU_SVR || svm_type==EPSILON_SVR) 60 | info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model)); 61 | else 62 | { 63 | int *labels=(int *) malloc(nr_class*sizeof(int)); 64 | svm_get_labels(model,labels); 65 | prob_estimates = (double *) malloc(nr_class*sizeof(double)); 66 | fprintf(output,"labels"); 67 | for(j=0;j start from 0 82 | 83 | label = strtok(line," \t\n"); 84 | if(label == NULL) // empty line 85 | exit_input_error(total+1); 86 | 87 | target_label = strtod(label,&endptr); 88 | if(endptr == label || *endptr != '\0') 89 | exit_input_error(total+1); 90 | 91 | while(1) 92 | { 93 | if(i>=max_nr_attr-1) // need one more for index = -1 94 | { 95 | max_nr_attr *= 2; 96 | x = (struct svm_node *) realloc(x,max_nr_attr*sizeof(struct svm_node)); 97 | } 98 | 99 | idx = strtok(NULL,":"); 100 | val = strtok(NULL," \t"); 101 | 102 | if(val == NULL) 103 | break; 104 | errno = 0; 105 | x[i].index = (int) strtol(idx,&endptr,10); 106 | if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index) 107 | exit_input_error(total+1); 108 | else 109 | inst_max_index = x[i].index; 110 | 111 | errno = 0; 112 | x[i].value = strtod(val,&endptr); 113 | if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 114 | exit_input_error(total+1); 115 | 116 | ++i; 117 | } 118 | x[i].index = -1; 119 | 120 | if (predict_probability && (svm_type==C_SVC || svm_type==NU_SVC)) 121 | { 122 | predict_label = svm_predict_probability(model,x,prob_estimates); 123 | fprintf(output,"%g",predict_label); 124 | for(j=0;j=argc-2) 195 | exit_with_help(); 196 | 197 | input = fopen(argv[i],"r"); 198 | if(input == NULL) 199 | { 200 | fprintf(stderr,"can't open input file %s\n",argv[i]); 201 | exit(1); 202 | } 203 | 204 | output = fopen(argv[i+2],"w"); 205 | if(output == NULL) 206 | { 207 | fprintf(stderr,"can't open output file %s\n",argv[i+2]); 208 | exit(1); 209 | } 210 | 211 | if((model=svm_load_model(argv[i+1]))==0) 212 | { 213 | fprintf(stderr,"can't open model file %s\n",argv[i+1]); 214 | exit(1); 215 | } 216 | 217 | x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node)); 218 | if(predict_probability) 219 | { 220 | if(svm_check_probability_model(model)==0) 221 | { 222 | fprintf(stderr,"Model does not support probabiliy estimates\n"); 223 | exit(1); 224 | } 225 | } 226 | else 227 | { 228 | if(svm_check_probability_model(model)!=0) 229 | info("Model supports probability estimates, but disabled in prediction.\n"); 230 | } 231 | 232 | predict(input,output); 233 | svm_free_and_destroy_model(&model); 234 | free(x); 235 | free(line); 236 | fclose(input); 237 | fclose(output); 238 | return 0; 239 | } 240 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/svm.def: -------------------------------------------------------------------------------- 1 | LIBRARY libsvm 2 | EXPORTS 3 | svm_train @1 4 | svm_cross_validation @2 5 | svm_save_model @3 6 | svm_load_model @4 7 | svm_get_svm_type @5 8 | svm_get_nr_class @6 9 | svm_get_labels @7 10 | svm_get_svr_probability @8 11 | svm_predict_values @9 12 | svm_predict @10 13 | svm_predict_probability @11 14 | svm_free_model_content @12 15 | svm_free_and_destroy_model @13 16 | svm_destroy_param @14 17 | svm_check_parameter @15 18 | svm_check_probability_model @16 19 | svm_set_print_string_function @17 20 | svm_get_sv_indices @18 21 | svm_get_nr_sv @19 22 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/svm.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIBSVM_H 2 | #define _LIBSVM_H 3 | 4 | #define LIBSVM_VERSION 320 5 | 6 | #ifdef __cplusplus 7 | extern "C" { 8 | #endif 9 | 10 | extern int libsvm_version; 11 | 12 | struct svm_node 13 | { 14 | int index; 15 | double value; 16 | }; 17 | 18 | struct svm_problem 19 | { 20 | int l; 21 | double *y; 22 | struct svm_node **x; 23 | double *W; /* instance weight */ 24 | }; 25 | 26 | enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR }; /* svm_type */ 27 | enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */ 28 | 29 | struct svm_parameter 30 | { 31 | int svm_type; 32 | int kernel_type; 33 | int degree; /* for poly */ 34 | double gamma; /* for poly/rbf/sigmoid */ 35 | double coef0; /* for poly/sigmoid */ 36 | 37 | /* these are for training only */ 38 | double cache_size; /* in MB */ 39 | double eps; /* stopping criteria */ 40 | double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ 41 | int nr_weight; /* for C_SVC */ 42 | int *weight_label; /* for C_SVC */ 43 | double* weight; /* for C_SVC */ 44 | double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ 45 | double p; /* for EPSILON_SVR */ 46 | int shrinking; /* use the shrinking heuristics */ 47 | int probability; /* do probability estimates */ 48 | }; 49 | 50 | // 51 | // svm_model 52 | // 53 | struct svm_model 54 | { 55 | struct svm_parameter param; /* parameter */ 56 | int nr_class; /* number of classes, = 2 in regression/one class svm */ 57 | int l; /* total #SV */ 58 | struct svm_node **SV; /* SVs (SV[l]) */ 59 | double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */ 60 | double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */ 61 | double *probA; /* pariwise probability information */ 62 | double *probB; 63 | int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */ 64 | 65 | /* for classification only */ 66 | 67 | int *label; /* label of each class (label[k]) */ 68 | int *nSV; /* number of SVs for each class (nSV[k]) */ 69 | /* nSV[0] + nSV[1] + ... + nSV[k-1] = l */ 70 | /* XXX */ 71 | int free_sv; /* 1 if svm_model is created by svm_load_model*/ 72 | /* 0 if svm_model is created by svm_train */ 73 | }; 74 | 75 | struct svm_model *svm_train(const struct svm_problem *prob, const struct svm_parameter *param); 76 | void svm_cross_validation(const struct svm_problem *prob, const struct svm_parameter *param, int nr_fold, double *target); 77 | 78 | int svm_save_model(const char *model_file_name, const struct svm_model *model); 79 | struct svm_model *svm_load_model(const char *model_file_name); 80 | 81 | int svm_get_svm_type(const struct svm_model *model); 82 | int svm_get_nr_class(const struct svm_model *model); 83 | void svm_get_labels(const struct svm_model *model, int *label); 84 | void svm_get_sv_indices(const struct svm_model *model, int *sv_indices); 85 | int svm_get_nr_sv(const struct svm_model *model); 86 | double svm_get_svr_probability(const struct svm_model *model); 87 | 88 | double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); 89 | double svm_predict(const struct svm_model *model, const struct svm_node *x); 90 | double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates); 91 | 92 | void svm_free_model_content(struct svm_model *model_ptr); 93 | void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr); 94 | void svm_destroy_param(struct svm_parameter *param); 95 | 96 | const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param); 97 | int svm_check_probability_model(const struct svm_model *model); 98 | 99 | void svm_set_print_string_function(void (*print_func)(const char *)); 100 | 101 | #ifdef __cplusplus 102 | } 103 | #endif 104 | 105 | #endif /* _LIBSVM_H */ 106 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/tools/checkdata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # A format checker for LIBSVM 5 | # 6 | 7 | # 8 | # Copyright (c) 2007, Rong-En Fan 9 | # 10 | # All rights reserved. 11 | # 12 | # This program is distributed under the same license of the LIBSVM package. 13 | # 14 | 15 | from sys import argv, exit 16 | import os.path 17 | 18 | def err(line_no, msg): 19 | print("line {0}: {1}".format(line_no, msg)) 20 | 21 | # works like float() but does not accept nan and inf 22 | def my_float(x): 23 | if x.lower().find("nan") != -1 or x.lower().find("inf") != -1: 24 | raise ValueError 25 | 26 | return float(x) 27 | 28 | def main(): 29 | if len(argv) != 2: 30 | print("Usage: {0} dataset".format(argv[0])) 31 | exit(1) 32 | 33 | dataset = argv[1] 34 | 35 | if not os.path.exists(dataset): 36 | print("dataset {0} not found".format(dataset)) 37 | exit(1) 38 | 39 | line_no = 1 40 | error_line_count = 0 41 | for line in open(dataset, 'r'): 42 | line_error = False 43 | 44 | # each line must end with a newline character 45 | if line[-1] != '\n': 46 | err(line_no, "missing a newline character in the end") 47 | line_error = True 48 | 49 | nodes = line.split() 50 | 51 | # check label 52 | try: 53 | label = nodes.pop(0) 54 | 55 | if label.find(',') != -1: 56 | # multi-label format 57 | try: 58 | for l in label.split(','): 59 | l = my_float(l) 60 | except: 61 | err(line_no, "label {0} is not a valid multi-label form".format(label)) 62 | line_error = True 63 | else: 64 | try: 65 | label = my_float(label) 66 | except: 67 | err(line_no, "label {0} is not a number".format(label)) 68 | line_error = True 69 | except: 70 | err(line_no, "missing label, perhaps an empty line?") 71 | line_error = True 72 | 73 | # check features 74 | prev_index = -1 75 | for i in range(len(nodes)): 76 | try: 77 | (index, value) = nodes[i].split(':') 78 | 79 | index = int(index) 80 | value = my_float(value) 81 | 82 | # precomputed kernel's index starts from 0 and LIBSVM 83 | # checks it. Hence, don't treat index 0 as an error. 84 | if index < 0: 85 | err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i])) 86 | line_error = True 87 | elif index <= prev_index: 88 | err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i])) 89 | line_error = True 90 | prev_index = index 91 | except: 92 | err(line_no, "feature '{0}' not an : pair, integer, real number ".format(nodes[i])) 93 | line_error = True 94 | 95 | line_no += 1 96 | 97 | if line_error: 98 | error_line_count += 1 99 | 100 | if error_line_count > 0: 101 | print("Found {0} lines with error.".format(error_line_count)) 102 | return 1 103 | else: 104 | print("No error.") 105 | return 0 106 | 107 | if __name__ == "__main__": 108 | exit(main()) 109 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/tools/easy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | from subprocess import * 6 | 7 | if len(sys.argv) <= 1: 8 | print('Usage: {0} training_file [testing_file]'.format(sys.argv[0])) 9 | raise SystemExit 10 | 11 | # svm, grid, and gnuplot executable files 12 | 13 | is_win32 = (sys.platform == 'win32') 14 | if not is_win32: 15 | svmscale_exe = "../svm-scale" 16 | svmtrain_exe = "../svm-train" 17 | svmpredict_exe = "../svm-predict" 18 | grid_py = "./grid.py" 19 | gnuplot_exe = "/usr/bin/gnuplot" 20 | else: 21 | # example for windows 22 | svmscale_exe = r"..\windows\svm-scale.exe" 23 | svmtrain_exe = r"..\windows\svm-train.exe" 24 | svmpredict_exe = r"..\windows\svm-predict.exe" 25 | gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe" 26 | grid_py = r".\grid.py" 27 | 28 | assert os.path.exists(svmscale_exe),"svm-scale executable not found" 29 | assert os.path.exists(svmtrain_exe),"svm-train executable not found" 30 | assert os.path.exists(svmpredict_exe),"svm-predict executable not found" 31 | assert os.path.exists(gnuplot_exe),"gnuplot executable not found" 32 | assert os.path.exists(grid_py),"grid.py not found" 33 | 34 | train_pathname = sys.argv[1] 35 | assert os.path.exists(train_pathname),"training file not found" 36 | file_name = os.path.split(train_pathname)[1] 37 | scaled_file = file_name + ".scale" 38 | model_file = file_name + ".model" 39 | range_file = file_name + ".range" 40 | 41 | if len(sys.argv) > 2: 42 | test_pathname = sys.argv[2] 43 | file_name = os.path.split(test_pathname)[1] 44 | assert os.path.exists(test_pathname),"testing file not found" 45 | scaled_test_file = file_name + ".scale" 46 | predict_test_file = file_name + ".predict" 47 | 48 | cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file) 49 | print('Scaling training data...') 50 | Popen(cmd, shell = True, stdout = PIPE).communicate() 51 | 52 | cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file) 53 | print('Cross validation...') 54 | f = Popen(cmd, shell = True, stdout = PIPE).stdout 55 | 56 | line = '' 57 | while True: 58 | last_line = line 59 | line = f.readline() 60 | if not line: break 61 | c,g,rate = map(float,last_line.split()) 62 | 63 | print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate)) 64 | 65 | cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file) 66 | print('Training...') 67 | Popen(cmd, shell = True, stdout = PIPE).communicate() 68 | 69 | print('Output model: {0}'.format(model_file)) 70 | if len(sys.argv) > 2: 71 | cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file) 72 | print('Scaling testing data...') 73 | Popen(cmd, shell = True, stdout = PIPE).communicate() 74 | 75 | cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file) 76 | print('Testing...') 77 | Popen(cmd, shell = True).communicate() 78 | 79 | print('Output prediction: {0}'.format(predict_test_file)) 80 | -------------------------------------------------------------------------------- /dutradaboost/toolbox/libsvm-weights-3.20/tools/subset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, math, random 4 | from collections import defaultdict 5 | 6 | if sys.version_info[0] >= 3: 7 | xrange = range 8 | 9 | def exit_with_help(argv): 10 | print("""\ 11 | Usage: {0} [options] dataset subset_size [output1] [output2] 12 | 13 | This script randomly selects a subset of the dataset. 14 | 15 | options: 16 | -s method : method of selection (default 0) 17 | 0 -- stratified selection (classification only) 18 | 1 -- random selection 19 | 20 | output1 : the subset (optional) 21 | output2 : rest of the data (optional) 22 | If output1 is omitted, the subset will be printed on the screen.""".format(argv[0])) 23 | exit(1) 24 | 25 | def process_options(argv): 26 | argc = len(argv) 27 | if argc < 3: 28 | exit_with_help(argv) 29 | 30 | # default method is stratified selection 31 | method = 0 32 | subset_file = sys.stdout 33 | rest_file = None 34 | 35 | i = 1 36 | while i < argc: 37 | if argv[i][0] != "-": 38 | break 39 | if argv[i] == "-s": 40 | i = i + 1 41 | method = int(argv[i]) 42 | if method not in [0,1]: 43 | print("Unknown selection method {0}".format(method)) 44 | exit_with_help(argv) 45 | i = i + 1 46 | 47 | dataset = argv[i] 48 | subset_size = int(argv[i+1]) 49 | if i+2 < argc: 50 | subset_file = open(argv[i+2],'w') 51 | if i+3 < argc: 52 | rest_file = open(argv[i+3],'w') 53 | 54 | return dataset, subset_size, method, subset_file, rest_file 55 | 56 | def random_selection(dataset, subset_size): 57 | l = sum(1 for line in open(dataset,'r')) 58 | return sorted(random.sample(xrange(l), subset_size)) 59 | 60 | def stratified_selection(dataset, subset_size): 61 | labels = [line.split(None,1)[0] for line in open(dataset)] 62 | label_linenums = defaultdict(list) 63 | for i, label in enumerate(labels): 64 | label_linenums[label] += [i] 65 | 66 | l = len(labels) 67 | remaining = subset_size 68 | ret = [] 69 | 70 | # classes with fewer data are sampled first; otherwise 71 | # some rare classes may not be selected 72 | for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])): 73 | linenums = label_linenums[label] 74 | label_size = len(linenums) 75 | # at least one instance per class 76 | s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l))))) 77 | if s == 0: 78 | sys.stderr.write('''\ 79 | Error: failed to have at least one instance per class 80 | 1. You may have regression data. 81 | 2. Your classification data is unbalanced or too small. 82 | Please use -s 1. 83 | ''') 84 | sys.exit(-1) 85 | remaining -= s 86 | ret += [linenums[i] for i in random.sample(xrange(label_size), s)] 87 | return sorted(ret) 88 | 89 | def main(argv=sys.argv): 90 | dataset, subset_size, method, subset_file, rest_file = process_options(argv) 91 | #uncomment the following line to fix the random seed 92 | #random.seed(0) 93 | selected_lines = [] 94 | 95 | if method == 0: 96 | selected_lines = stratified_selection(dataset, subset_size) 97 | elif method == 1: 98 | selected_lines = random_selection(dataset, subset_size) 99 | 100 | #select instances based on selected_lines 101 | dataset = open(dataset,'r') 102 | prev_selected_linenum = -1 103 | for i in xrange(len(selected_lines)): 104 | for cnt in xrange(selected_lines[i]-prev_selected_linenum-1): 105 | line = dataset.readline() 106 | if rest_file: 107 | rest_file.write(line) 108 | subset_file.write(dataset.readline()) 109 | prev_selected_linenum = selected_lines[i] 110 | subset_file.close() 111 | 112 | if rest_file: 113 | for line in dataset: 114 | rest_file.write(line) 115 | rest_file.close() 116 | dataset.close() 117 | 118 | if __name__ == '__main__': 119 | main(sys.argv) 120 | 121 | -------------------------------------------------------------------------------- /mmd/README: -------------------------------------------------------------------------------- 1 | This code computes the empirical estimate of the distance between distributions P (source) and Q (target), as defined by Maximum Mean Discrepancy. 2 | -------------------------------------------------------------------------------- /mmd/mmd.m: -------------------------------------------------------------------------------- 1 | function [ dist ] = mmd( sourcefeatures,targetfeatures,sigma ) 2 | %MMD Summary of this function goes here 3 | % This code computes the empirical estimate of the distance between 4 | % distributions P (source) and Q (target), as defined by Maximum Mean 5 | % Discrepancy. 6 | % 7 | % Dist(Xs,Xt) = sqrt(tr(KL) 8 | % K = [Kss Kst 9 | % Kts Ktt] 10 | % L = 1/n1^2(i,j belong to Xs), = 1/n2^2(i,j belong to Xt), = -1/(n1n2) 11 | % (otherwise) 12 | 13 | %% compute K 14 | Kss = rbf_dot(sourcefeatures,sourcefeatures,sigma); 15 | Kst = rbf_dot(sourcefeatures,targetfeatures,sigma); 16 | Kts = rbf_dot(targetfeatures,sourcefeatures,sigma); 17 | Ktt = rbf_dot(targetfeatures,targetfeatures,sigma); 18 | K = [[Kss,Kst];[Kts,Ktt]]; 19 | 20 | %% compute L 21 | n1 = size(sourcefeatures,1); 22 | n2 = size(targetfeatures,1); 23 | L = zeros(n1+n2); 24 | L(1:n1,1:n1) = 1/(n1^2); 25 | L(n1+1:end,n1+1:end) = 1/(n2^2); 26 | L(n1+1:end,1:n1) = -1/(n1*n2); 27 | L(1:n1,n1+1:end) = -1/(n1*n2); 28 | 29 | %% compute dist 30 | dist = sqrt(trace(K*L)); 31 | 32 | end 33 | 34 | -------------------------------------------------------------------------------- /mmd/rbf_dot.m: -------------------------------------------------------------------------------- 1 | %Radial basis function inner product 2 | %Arthur Gretton 3 | 4 | %Pattern input format : [pattern1 ; pattern2 ; ...] 5 | %Output : Matrix of RBF values k(x1,x2) 6 | %Deg is kernel size 7 | 8 | 9 | function [H]=rbf_dot(patterns1,patterns2,deg) 10 | 11 | %Note : patterns are transposed for compatibility with C code. 12 | 13 | size1=size(patterns1); 14 | size2=size(patterns2); 15 | 16 | 17 | G = sum((patterns1.*patterns1),2); 18 | H = sum((patterns2.*patterns2),2); 19 | 20 | Q = repmat(G,1,size2(1)); 21 | R = repmat(H',size1(1),1); 22 | 23 | H = Q + R - 2*patterns1*patterns2'; 24 | 25 | 26 | H=exp(-H/2/deg^2); 27 | % 28 | % function K = rbf_dot(X, Y,rbf_var) 29 | % 30 | % % Rows of X and Y are data points 31 | % 32 | % xnum = size(X,1); 33 | % 34 | % ynum = size(Y,1); 35 | % 36 | % %if (kernel == 1) % Apply Gaussian kernel 37 | % for i=1:xnum 38 | % % fprintf('i=%d\n',i); 39 | % for j=1:ynum 40 | % K(i,j) = exp(-norm(X(i,:)-Y(j,:))^2/rbf_var); 41 | % % K(i,j) = X(i,:)*Y(j,:)'; 42 | % end 43 | % end 44 | % 45 | % % % elseif(kernel==2) % Apply linear kernel 46 | % % K = X*Y'; 47 | % % elseif(kernel==2) %polynomial kernel 48 | % % K = 49 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/README: -------------------------------------------------------------------------------- 1 | The code for "Boosting for transfer learning with multiple sources" by Yi Yao and Gianfranco Doretto. 2 | 3 | mstradaboost.m is for training 4 | mstrpredict.m is for prediction 5 | 6 | Please compile the toolbox and addpath to toolbox before using. 7 | addpath('./toolbox/libsvm-weights-3.20/matlab'); 8 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/mstradaboost.m: -------------------------------------------------------------------------------- 1 | function [hyp,alpha] = mstradaboost(source,targettrnfeatures,targettrnlabels) 2 | %% source.trn.labels : labels of source domain 3 | %% source.trn.features : features of source domain 4 | 5 | M = 10; %%iteration number 6 | N = length(source); 7 | ns = 0; 8 | for i = 1:N 9 | ns = ns + length(source(i).trn.labels); 10 | ws(i).weight = ones(length(source(i).trn.labels),1); 11 | end 12 | m = length(targettrnlabels); 13 | as = log(1 + sqrt(2* log(ns/M)))/2; 14 | wt.weight = ones(length(targettrnlabels),1); 15 | sW = sum(wt.weight); 16 | hyp = {}; 17 | er = ones(M,1); 18 | 19 | %% begin of iteration 20 | for t = 1:M 21 | model = {}; 22 | et = ones(N,1); 23 | bestaccuracy = 0; 24 | for k = 1:N 25 | W = [ws(k).weight;wt.weight]; 26 | X = [source(k).trn.features;targettrnfeatures]; 27 | Y = [source(k).trn.labels;targettrnlabels]; 28 | % for l =1:length(Y) 29 | % if Y(l) == -1 30 | % W(l) = W(l)*1.3; 31 | % end 32 | % end 33 | model{k} = svmtrain(W,Y,X,'-t 2'); 34 | [predict,accuracy,prob] = svmpredict(Y,X,model{k}); 35 | n = length(source(k).trn.labels); 36 | et(k) = sum(wt.weight.*(predict(n+1:m+n)~=targettrnlabels)/sW); 37 | %% choose the model with least error rate and best accuracy 38 | if et(k) < er(t) 39 | er(t) = et(k); 40 | bestaccuracy = accuracy(1); 41 | hyp{t} = model{k}; 42 | elseif et(k)==er(t) 43 | if accuracy(1) > bestaccuracy 44 | bestaccuracy = accuracy(1); 45 | hyp{t} = model{k}; 46 | end 47 | end 48 | end 49 | % F = []; 50 | % js = 0; 51 | % for k=1:N 52 | % if et(k) == er(t) 53 | % js = js+1; 54 | % F(js) = k; 55 | % end 56 | % end 57 | % k = ceil(rand()*js); 58 | % if k==0 59 | % k = 1; 60 | % end 61 | % hyp{t} = model{F(k)}; 62 | if er(t) > 0.5 63 | er(t) = 0.499; 64 | end 65 | if er(t) == 0 66 | er(t) = 0.001; 67 | end 68 | alpha(t) = log((1-er(t))/er(t))/2; 69 | 70 | %% updating weights 71 | for k=1:N 72 | predict = svmpredict(source(k).trn.labels,source(k).trn.features,hyp{t}); 73 | n = length(ws(k).weight); 74 | for j=1:n 75 | ws(k).weight(j) = ws(k).weight(j)*exp(-as*abs(predict(j)-source(k).trn.labels(j))); 76 | end 77 | end 78 | predict = svmpredict(targettrnlabels,targettrnfeatures,hyp{t}); 79 | for j=1:m 80 | wt.weight(j) = wt.weight(j)*exp(alpha(t)*abs(predict(j)-targettrnlabels(j))); 81 | end 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/mstrpredict.m: -------------------------------------------------------------------------------- 1 | function label = mstrpredict(X, hyp, alpha) 2 | % X: features of target data 3 | 4 | M = length(hyp); 5 | l = size(X,1); 6 | label = zeros(l,1); 7 | for i=1:M 8 | label = label + (alpha(i)*svmpredict(label,X,hyp{i})); 9 | end 10 | label = sign(label); 11 | 12 | % M = length(hyp); 13 | % start = ceil(M/2); 14 | % l = size(X,1); 15 | % label = zeros(l,1); 16 | % for i=1:M 17 | % label = label + (alpha(i)*svmpredict(label,X,hyp{i})); 18 | % end 19 | % maxlabel = 0; 20 | % for i=1:length(label) 21 | % if abs(label(i))>maxlabel 22 | % maxlabel = abs(label(i)); 23 | % end 24 | % end 25 | % trainIndex = []; 26 | % testIndex = []; 27 | % for i=1:length(label) 28 | % if abs(label(i)) == maxlabel 29 | % label(i) = sign(label(i)); 30 | % trainIndex = [trainIndex,i]; 31 | % else 32 | % label(i) = 0; 33 | % testIndex = [testIndex,i]; 34 | % end 35 | % end 36 | % w = ones(length(trainIndex),1); 37 | % model = svmtrain(w,label(trainIndex),X(trainIndex,:),'-t 0'); 38 | % label2 = svmpredict(zeros(length(testIndex),1),X(testIndex,:),model); 39 | % label(testIndex) = sign(label2); 40 | 41 | end 42 | 43 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/COPYRIGHT: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | 3. Neither name of copyright holders nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/Makefile: -------------------------------------------------------------------------------- 1 | CXX ?= g++ 2 | CFLAGS = -Wall -Wconversion -O3 -fPIC 3 | SHVER = 2 4 | OS = $(shell uname) 5 | 6 | all: svm-train svm-predict svm-scale 7 | 8 | lib: svm.o 9 | if [ "$(OS)" = "Darwin" ]; then \ 10 | SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)"; \ 11 | else \ 12 | SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \ 13 | fi; \ 14 | $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER) 15 | 16 | svm-predict: svm-predict.c svm.o 17 | $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm 18 | svm-train: svm-train.c svm.o 19 | $(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm 20 | svm-scale: svm-scale.c 21 | $(CXX) $(CFLAGS) svm-scale.c -o svm-scale 22 | svm.o: svm.cpp svm.h 23 | $(CXX) $(CFLAGS) -c svm.cpp 24 | clean: 25 | rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so.$(SHVER) 26 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/Makefile.win: -------------------------------------------------------------------------------- 1 | #You must ensure nmake.exe, cl.exe, link.exe are in system path. 2 | #VCVARS32.bat 3 | #Under dosbox prompt 4 | #nmake -f Makefile.win 5 | 6 | ########################################## 7 | CXX = cl.exe 8 | CFLAGS = /nologo /O2 /EHsc /I. /D _WIN32 /D _CRT_SECURE_NO_DEPRECATE 9 | TARGET = windows 10 | 11 | all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib 12 | 13 | $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj 14 | $(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe 15 | 16 | $(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj 17 | $(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe 18 | 19 | $(TARGET)\svm-scale.exe: svm.h svm-scale.c 20 | $(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe 21 | 22 | $(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp 23 | $(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe 24 | 25 | svm.obj: svm.cpp svm.h 26 | $(CXX) $(CFLAGS) -c svm.cpp 27 | 28 | lib: svm.cpp svm.h svm.def 29 | $(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def 30 | 31 | clean: 32 | -erase /Q *.obj $(TARGET)\. 33 | 34 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/READMEweight: -------------------------------------------------------------------------------- 1 | Usage: 2 | use '-W weight_file' to assign weights for each instance. 3 | Please make sure all weights are non-negative. 4 | 5 | Example: 6 | $ ./svm-train -W heart_scale.wgt heart_scale 7 | 8 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/heart_scale.wgt: -------------------------------------------------------------------------------- 1 | 20 2 | 10 3 | 5.5 4 | 1 5 | 1 6 | 1 7 | 1 8 | 1 9 | 1 10 | 1 11 | 1 12 | 1 13 | 1 14 | 1 15 | 1 16 | 1 17 | 1 18 | 1 19 | 1 20 | 1 21 | 1 22 | 1 23 | 1 24 | 1 25 | 1 26 | 1 27 | 1 28 | 1 29 | 1 30 | 1 31 | 1 32 | 1 33 | 1 34 | 1 35 | 1 36 | 1 37 | 1 38 | 1 39 | 1 40 | 1 41 | 1 42 | 1 43 | 1 44 | 1 45 | 1 46 | 1 47 | 1 48 | 1 49 | 1 50 | 1 51 | 1 52 | 1 53 | 1 54 | 1 55 | 1 56 | 1 57 | 1 58 | 1 59 | 1 60 | 1 61 | 1 62 | 1 63 | 1 64 | 1 65 | 1 66 | 1 67 | 1 68 | 1 69 | 1 70 | 1 71 | 1 72 | 1 73 | 1 74 | 1 75 | 1 76 | 1 77 | 1 78 | 1 79 | 1 80 | 1 81 | 1 82 | 1 83 | 1 84 | 1 85 | 1 86 | 1 87 | 1 88 | 1 89 | 1 90 | 1 91 | 1 92 | 1 93 | 1 94 | 1 95 | 1 96 | 1 97 | 1 98 | 1 99 | 1 100 | 1 101 | 1 102 | 1 103 | 1 104 | 1 105 | 1 106 | 1 107 | 1 108 | 1 109 | 1 110 | 1 111 | 1 112 | 1 113 | 1 114 | 1 115 | 1 116 | 1 117 | 1 118 | 1 119 | 1 120 | 1 121 | 1 122 | 1 123 | 1 124 | 1 125 | 1 126 | 1 127 | 1 128 | 1 129 | 1 130 | 1 131 | 1 132 | 1 133 | 1 134 | 1 135 | 1 136 | 1 137 | 1 138 | 1 139 | 1 140 | 1 141 | 1 142 | 1 143 | 1 144 | 1 145 | 1 146 | 1 147 | 1 148 | 1 149 | 1 150 | 1 151 | 1 152 | 1 153 | 1 154 | 1 155 | 1 156 | 1 157 | 1 158 | 1 159 | 1 160 | 1 161 | 1 162 | 1 163 | 1 164 | 1 165 | 1 166 | 1 167 | 1 168 | 1 169 | 1 170 | 1 171 | 1 172 | 1 173 | 1 174 | 1 175 | 1 176 | 1 177 | 1 178 | 1 179 | 1 180 | 1 181 | 1 182 | 1 183 | 1 184 | 1 185 | 1 186 | 1 187 | 1 188 | 1 189 | 1 190 | 1 191 | 1 192 | 1 193 | 1 194 | 1 195 | 1 196 | 1 197 | 1 198 | 1 199 | 1 200 | 1 201 | 1 202 | 1 203 | 1 204 | 1 205 | 1 206 | 1 207 | 1 208 | 1 209 | 1 210 | 1 211 | 1 212 | 1 213 | 1 214 | 1 215 | 1 216 | 1 217 | 1 218 | 1 219 | 1 220 | 1 221 | 1 222 | 1 223 | 1 224 | 1 225 | 1 226 | 1 227 | 1 228 | 1 229 | 1 230 | 1 231 | 1 232 | 1 233 | 1 234 | 1 235 | 1 236 | 1 237 | 1 238 | 1 239 | 1 240 | 1 241 | 1 242 | 1 243 | 1 244 | 1 245 | 1 246 | 1 247 | 1 248 | 1 249 | 1 250 | 1 251 | 1 252 | 1 253 | 1 254 | 1 255 | 1 256 | 1 257 | 1 258 | 1 259 | 1 260 | 1 261 | 1 262 | 1 263 | 1 264 | 1 265 | 1 266 | 1 267 | 1 268 | 1 269 | 1 270 | 1 271 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile is used under Linux 2 | 3 | MATLABDIR ?= /usr/local/matlab 4 | # for Mac 5 | # MATLABDIR ?= /opt/local/matlab 6 | 7 | CXX ?= g++ 8 | #CXX = g++-4.1 9 | CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I.. 10 | 11 | MEX = $(MATLABDIR)/bin/mex 12 | MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)" 13 | # comment the following line if you use MATLAB on 32-bit computer 14 | MEX_OPTION += -largeArrayDims 15 | MEX_EXT = $(shell $(MATLABDIR)/bin/mexext) 16 | 17 | all: matlab 18 | 19 | matlab: binary 20 | 21 | octave: 22 | @echo "please type make under Octave" 23 | 24 | binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT) 25 | 26 | svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o 27 | $(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o 28 | 29 | svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o 30 | $(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o 31 | 32 | libsvmread.$(MEX_EXT): libsvmread.c 33 | $(MEX) $(MEX_OPTION) libsvmread.c 34 | 35 | libsvmwrite.$(MEX_EXT): libsvmwrite.c 36 | $(MEX) $(MEX_OPTION) libsvmwrite.c 37 | 38 | svm_model_matlab.o: svm_model_matlab.c ../svm.h 39 | $(CXX) $(CFLAGS) -c svm_model_matlab.c 40 | 41 | ../svm.o: ../svm.cpp ../svm.h 42 | make -C .. svm.o 43 | 44 | clean: 45 | rm -f *~ *.o *.mex* *.obj ../svm.o 46 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/READMEweight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a simple interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for the detail. 10 | 11 | Usage 12 | ===== 13 | 14 | matlab> model = svmtrain(training_weight_vector, training_label_vector, training_instance_matrix, 'libsvm_options') 15 | 16 | -training_weight_vector: 17 | An m by 1 vector of training weights. (type must be double) 18 | -training_label_vector: 19 | An m by 1 vector of training labels. (type must be double) 20 | -training_instance_matrix: 21 | An m by n matrix of m training instances with n features. (type must be double) 22 | -libsvm_options: 23 | A string of training options in the same format as that of LIBSVM. 24 | 25 | Examples 26 | ======== 27 | 28 | Train and test on the provided data heart_scale: 29 | 30 | matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale'); 31 | matlab> heart_scale_weight = load('../heart_scale.wgt'); 32 | matlab> model = svmtrain(heart_scale_weight, heart_scale_label, heart_scale_inst, '-c 1'); 33 | matlab> [predict_label, accuracy, dec_values] = svmpredict(heart_scale_label, heart_scale_inst, model); % test the training data 34 | 35 | Train and test without weights: 36 | 37 | matlab> model = svmtrain([], heart_scale_label, heart_scale_inst, '-c 1'); 38 | 39 | 40 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "mex.h" 8 | 9 | #ifdef MX_API_VER 10 | #if MX_API_VER < 0x07030000 11 | typedef int mwIndex; 12 | #endif 13 | #endif 14 | #ifndef max 15 | #define max(x,y) (((x)>(y))?(x):(y)) 16 | #endif 17 | #ifndef min 18 | #define min(x,y) (((x)<(y))?(x):(y)) 19 | #endif 20 | 21 | void exit_with_help() 22 | { 23 | mexPrintf( 24 | "Usage: [label_vector, instance_matrix] = libsvmread('filename');\n" 25 | ); 26 | } 27 | 28 | static void fake_answer(int nlhs, mxArray *plhs[]) 29 | { 30 | int i; 31 | for(i=0;i start from 0 86 | strtok(line," \t"); // label 87 | while (1) 88 | { 89 | idx = strtok(NULL,":"); // index:value 90 | val = strtok(NULL," \t"); 91 | if(val == NULL) 92 | break; 93 | 94 | errno = 0; 95 | index = (int) strtol(idx,&endptr,10); 96 | if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index) 97 | { 98 | mexPrintf("Wrong input format at line %d\n",l+1); 99 | fake_answer(nlhs, plhs); 100 | return; 101 | } 102 | else 103 | inst_max_index = index; 104 | 105 | min_index = min(min_index, index); 106 | elements++; 107 | } 108 | max_index = max(max_index, inst_max_index); 109 | l++; 110 | } 111 | rewind(fp); 112 | 113 | // y 114 | plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL); 115 | // x^T 116 | if (min_index <= 0) 117 | plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL); 118 | else 119 | plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL); 120 | 121 | labels = mxGetPr(plhs[0]); 122 | samples = mxGetPr(plhs[1]); 123 | ir = mxGetIr(plhs[1]); 124 | jc = mxGetJc(plhs[1]); 125 | 126 | k=0; 127 | for(i=0;i start from 0 158 | 159 | errno = 0; 160 | samples[k] = strtod(val,&endptr); 161 | if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 162 | { 163 | mexPrintf("Wrong input format at line %d\n",i+1); 164 | fake_answer(nlhs, plhs); 165 | return; 166 | } 167 | ++k; 168 | } 169 | } 170 | jc[l] = k; 171 | 172 | fclose(fp); 173 | free(line); 174 | 175 | { 176 | mxArray *rhs[1], *lhs[1]; 177 | rhs[0] = plhs[1]; 178 | if(mexCallMATLAB(1, lhs, 1, rhs, "transpose")) 179 | { 180 | mexPrintf("Error: cannot transpose problem\n"); 181 | fake_answer(nlhs, plhs); 182 | return; 183 | } 184 | plhs[1] = lhs[0]; 185 | } 186 | } 187 | 188 | void mexFunction( int nlhs, mxArray *plhs[], 189 | int nrhs, const mxArray *prhs[] ) 190 | { 191 | char filename[256]; 192 | 193 | if(nrhs != 1 || nlhs != 2) 194 | { 195 | exit_with_help(); 196 | fake_answer(nlhs, plhs); 197 | return; 198 | } 199 | 200 | mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1); 201 | 202 | if(filename == NULL) 203 | { 204 | mexPrintf("Error: filename is NULL\n"); 205 | return; 206 | } 207 | 208 | read_problem(filename, nlhs, plhs); 209 | 210 | return; 211 | } 212 | 213 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64 -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "mex.h" 6 | 7 | #ifdef MX_API_VER 8 | #if MX_API_VER < 0x07030000 9 | typedef int mwIndex; 10 | #endif 11 | #endif 12 | 13 | void exit_with_help() 14 | { 15 | mexPrintf( 16 | "Usage: libsvmwrite('filename', label_vector, instance_matrix);\n" 17 | ); 18 | } 19 | 20 | static void fake_answer(int nlhs, mxArray *plhs[]) 21 | { 22 | int i; 23 | for(i=0;i 0) 89 | { 90 | exit_with_help(); 91 | fake_answer(nlhs, plhs); 92 | return; 93 | } 94 | 95 | // Transform the input Matrix to libsvm format 96 | if(nrhs == 3) 97 | { 98 | char filename[256]; 99 | if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2])) 100 | { 101 | mexPrintf("Error: label vector and instance matrix must be double\n"); 102 | return; 103 | } 104 | 105 | mxGetString(prhs[0], filename, mxGetN(prhs[0])+1); 106 | 107 | if(mxIsSparse(prhs[2])) 108 | libsvmwrite(filename, prhs[1], prhs[2]); 109 | else 110 | { 111 | mexPrintf("Instance_matrix must be sparse\n"); 112 | return; 113 | } 114 | } 115 | else 116 | { 117 | exit_with_help(); 118 | return; 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64 -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/make.m: -------------------------------------------------------------------------------- 1 | % This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix 2 | 3 | try 4 | Type = ver; 5 | % This part is for OCTAVE 6 | if(strcmp(Type(1).Name, 'Octave') == 1) 7 | mex libsvmread.c 8 | mex libsvmwrite.c 9 | mex svmtrain.c ../svm.cpp svm_model_matlab.c 10 | mex svmpredict.c ../svm.cpp svm_model_matlab.c 11 | % This part is for MATLAB 12 | % Add -largeArrayDims on 64-bit machines of MATLAB 13 | else 14 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c 15 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c 16 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c 17 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c 18 | end 19 | catch 20 | fprintf('If make.m fails, please check README about detailed instructions.\n'); 21 | end 22 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/svm_model_matlab.h: -------------------------------------------------------------------------------- 1 | const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model); 2 | struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message); 3 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64 -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/multiple-source-tradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64 -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/python/Makefile: -------------------------------------------------------------------------------- 1 | all = lib 2 | 3 | lib: 4 | make -C .. lib 5 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/python/README.weight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a Python interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for detail. 10 | 11 | USAGE 12 | ===== 13 | 14 | The usage is bascally the same as the version without supporting 15 | instance weights. We only show differences below. 16 | 17 | - Function: svm_train 18 | 19 | There are three ways to call svm_train() 20 | 21 | >>> model = svm_train(W, y, x [, 'training_options']) 22 | >>> model = svm_train(prob [, 'training_options']) 23 | >>> model = svm_train(prob, param) 24 | 25 | W: a list/tuple of l training weights (type must be double). 26 | Use [] if no weights. 27 | 28 | y: a list/tuple of l training labels (type must be int/double). 29 | 30 | x: a list/tuple of l training instances. The feature vector of 31 | each training instance is an instance of list/tuple or dictionary. 32 | 33 | training_options: a string in the same form as that for LIBSVM command 34 | mode. 35 | 36 | prob: an svm_problem instance generated by calling 37 | svm_problem(W, y, x). 38 | 39 | param: an svm_parameter instance generated by calling 40 | svm_parameter('training_options') 41 | 42 | model: the returned svm_model instance. See svm.h for details of this 43 | structure. If '-v' is specified, cross validation is 44 | conducted and the returned model is just a scalar: cross-validation 45 | accuracy for classification and mean-squared error for regression. 46 | 47 | To train the same data many times with different 48 | parameters, the second and the third ways should be faster.. 49 | 50 | Examples: 51 | 52 | >>> y, x = svm_read_problem('../heart_scale') 53 | >>> W = [1] * len(y) 54 | >>> W[0] = 10 55 | >>> prob = svm_problem(W, y, x) 56 | >>> param = svm_parameter('-s 3 -c 5 -h 0') 57 | >>> m = svm_train([], y, x, '-c 5') 58 | >>> m = svm_train(W, y, x) 59 | >>> m = svm_train(prob, '-t 2 -c 5') 60 | >>> m = svm_train(prob, param) 61 | >>> CV_ACC = svm_train(W, y, x, '-v 3') 62 | 63 | 64 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/svm-predict.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "svm.h" 7 | 8 | int print_null(const char *s,...) {return 0;} 9 | 10 | static int (*info)(const char *fmt,...) = &printf; 11 | 12 | struct svm_node *x; 13 | int max_nr_attr = 64; 14 | 15 | struct svm_model* model; 16 | int predict_probability=0; 17 | 18 | static char *line = NULL; 19 | static int max_line_len; 20 | 21 | static char* readline(FILE *input) 22 | { 23 | int len; 24 | 25 | if(fgets(line,max_line_len,input) == NULL) 26 | return NULL; 27 | 28 | while(strrchr(line,'\n') == NULL) 29 | { 30 | max_line_len *= 2; 31 | line = (char *) realloc(line,max_line_len); 32 | len = (int) strlen(line); 33 | if(fgets(line+len,max_line_len-len,input) == NULL) 34 | break; 35 | } 36 | return line; 37 | } 38 | 39 | void exit_input_error(int line_num) 40 | { 41 | fprintf(stderr,"Wrong input format at line %d\n", line_num); 42 | exit(1); 43 | } 44 | 45 | void predict(FILE *input, FILE *output) 46 | { 47 | int correct = 0; 48 | int total = 0; 49 | double error = 0; 50 | double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0; 51 | 52 | int svm_type=svm_get_svm_type(model); 53 | int nr_class=svm_get_nr_class(model); 54 | double *prob_estimates=NULL; 55 | int j; 56 | 57 | if(predict_probability) 58 | { 59 | if (svm_type==NU_SVR || svm_type==EPSILON_SVR) 60 | info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model)); 61 | else 62 | { 63 | int *labels=(int *) malloc(nr_class*sizeof(int)); 64 | svm_get_labels(model,labels); 65 | prob_estimates = (double *) malloc(nr_class*sizeof(double)); 66 | fprintf(output,"labels"); 67 | for(j=0;j start from 0 82 | 83 | label = strtok(line," \t\n"); 84 | if(label == NULL) // empty line 85 | exit_input_error(total+1); 86 | 87 | target_label = strtod(label,&endptr); 88 | if(endptr == label || *endptr != '\0') 89 | exit_input_error(total+1); 90 | 91 | while(1) 92 | { 93 | if(i>=max_nr_attr-1) // need one more for index = -1 94 | { 95 | max_nr_attr *= 2; 96 | x = (struct svm_node *) realloc(x,max_nr_attr*sizeof(struct svm_node)); 97 | } 98 | 99 | idx = strtok(NULL,":"); 100 | val = strtok(NULL," \t"); 101 | 102 | if(val == NULL) 103 | break; 104 | errno = 0; 105 | x[i].index = (int) strtol(idx,&endptr,10); 106 | if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index) 107 | exit_input_error(total+1); 108 | else 109 | inst_max_index = x[i].index; 110 | 111 | errno = 0; 112 | x[i].value = strtod(val,&endptr); 113 | if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 114 | exit_input_error(total+1); 115 | 116 | ++i; 117 | } 118 | x[i].index = -1; 119 | 120 | if (predict_probability && (svm_type==C_SVC || svm_type==NU_SVC)) 121 | { 122 | predict_label = svm_predict_probability(model,x,prob_estimates); 123 | fprintf(output,"%g",predict_label); 124 | for(j=0;j=argc-2) 195 | exit_with_help(); 196 | 197 | input = fopen(argv[i],"r"); 198 | if(input == NULL) 199 | { 200 | fprintf(stderr,"can't open input file %s\n",argv[i]); 201 | exit(1); 202 | } 203 | 204 | output = fopen(argv[i+2],"w"); 205 | if(output == NULL) 206 | { 207 | fprintf(stderr,"can't open output file %s\n",argv[i+2]); 208 | exit(1); 209 | } 210 | 211 | if((model=svm_load_model(argv[i+1]))==0) 212 | { 213 | fprintf(stderr,"can't open model file %s\n",argv[i+1]); 214 | exit(1); 215 | } 216 | 217 | x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node)); 218 | if(predict_probability) 219 | { 220 | if(svm_check_probability_model(model)==0) 221 | { 222 | fprintf(stderr,"Model does not support probabiliy estimates\n"); 223 | exit(1); 224 | } 225 | } 226 | else 227 | { 228 | if(svm_check_probability_model(model)!=0) 229 | info("Model supports probability estimates, but disabled in prediction.\n"); 230 | } 231 | 232 | predict(input,output); 233 | svm_free_and_destroy_model(&model); 234 | free(x); 235 | free(line); 236 | fclose(input); 237 | fclose(output); 238 | return 0; 239 | } 240 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/svm.def: -------------------------------------------------------------------------------- 1 | LIBRARY libsvm 2 | EXPORTS 3 | svm_train @1 4 | svm_cross_validation @2 5 | svm_save_model @3 6 | svm_load_model @4 7 | svm_get_svm_type @5 8 | svm_get_nr_class @6 9 | svm_get_labels @7 10 | svm_get_svr_probability @8 11 | svm_predict_values @9 12 | svm_predict @10 13 | svm_predict_probability @11 14 | svm_free_model_content @12 15 | svm_free_and_destroy_model @13 16 | svm_destroy_param @14 17 | svm_check_parameter @15 18 | svm_check_probability_model @16 19 | svm_set_print_string_function @17 20 | svm_get_sv_indices @18 21 | svm_get_nr_sv @19 22 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/svm.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIBSVM_H 2 | #define _LIBSVM_H 3 | 4 | #define LIBSVM_VERSION 320 5 | 6 | #ifdef __cplusplus 7 | extern "C" { 8 | #endif 9 | 10 | extern int libsvm_version; 11 | 12 | struct svm_node 13 | { 14 | int index; 15 | double value; 16 | }; 17 | 18 | struct svm_problem 19 | { 20 | int l; 21 | double *y; 22 | struct svm_node **x; 23 | double *W; /* instance weight */ 24 | }; 25 | 26 | enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR }; /* svm_type */ 27 | enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */ 28 | 29 | struct svm_parameter 30 | { 31 | int svm_type; 32 | int kernel_type; 33 | int degree; /* for poly */ 34 | double gamma; /* for poly/rbf/sigmoid */ 35 | double coef0; /* for poly/sigmoid */ 36 | 37 | /* these are for training only */ 38 | double cache_size; /* in MB */ 39 | double eps; /* stopping criteria */ 40 | double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ 41 | int nr_weight; /* for C_SVC */ 42 | int *weight_label; /* for C_SVC */ 43 | double* weight; /* for C_SVC */ 44 | double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ 45 | double p; /* for EPSILON_SVR */ 46 | int shrinking; /* use the shrinking heuristics */ 47 | int probability; /* do probability estimates */ 48 | }; 49 | 50 | // 51 | // svm_model 52 | // 53 | struct svm_model 54 | { 55 | struct svm_parameter param; /* parameter */ 56 | int nr_class; /* number of classes, = 2 in regression/one class svm */ 57 | int l; /* total #SV */ 58 | struct svm_node **SV; /* SVs (SV[l]) */ 59 | double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */ 60 | double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */ 61 | double *probA; /* pariwise probability information */ 62 | double *probB; 63 | int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */ 64 | 65 | /* for classification only */ 66 | 67 | int *label; /* label of each class (label[k]) */ 68 | int *nSV; /* number of SVs for each class (nSV[k]) */ 69 | /* nSV[0] + nSV[1] + ... + nSV[k-1] = l */ 70 | /* XXX */ 71 | int free_sv; /* 1 if svm_model is created by svm_load_model*/ 72 | /* 0 if svm_model is created by svm_train */ 73 | }; 74 | 75 | struct svm_model *svm_train(const struct svm_problem *prob, const struct svm_parameter *param); 76 | void svm_cross_validation(const struct svm_problem *prob, const struct svm_parameter *param, int nr_fold, double *target); 77 | 78 | int svm_save_model(const char *model_file_name, const struct svm_model *model); 79 | struct svm_model *svm_load_model(const char *model_file_name); 80 | 81 | int svm_get_svm_type(const struct svm_model *model); 82 | int svm_get_nr_class(const struct svm_model *model); 83 | void svm_get_labels(const struct svm_model *model, int *label); 84 | void svm_get_sv_indices(const struct svm_model *model, int *sv_indices); 85 | int svm_get_nr_sv(const struct svm_model *model); 86 | double svm_get_svr_probability(const struct svm_model *model); 87 | 88 | double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); 89 | double svm_predict(const struct svm_model *model, const struct svm_node *x); 90 | double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates); 91 | 92 | void svm_free_model_content(struct svm_model *model_ptr); 93 | void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr); 94 | void svm_destroy_param(struct svm_parameter *param); 95 | 96 | const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param); 97 | int svm_check_probability_model(const struct svm_model *model); 98 | 99 | void svm_set_print_string_function(void (*print_func)(const char *)); 100 | 101 | #ifdef __cplusplus 102 | } 103 | #endif 104 | 105 | #endif /* _LIBSVM_H */ 106 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/tools/checkdata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # A format checker for LIBSVM 5 | # 6 | 7 | # 8 | # Copyright (c) 2007, Rong-En Fan 9 | # 10 | # All rights reserved. 11 | # 12 | # This program is distributed under the same license of the LIBSVM package. 13 | # 14 | 15 | from sys import argv, exit 16 | import os.path 17 | 18 | def err(line_no, msg): 19 | print("line {0}: {1}".format(line_no, msg)) 20 | 21 | # works like float() but does not accept nan and inf 22 | def my_float(x): 23 | if x.lower().find("nan") != -1 or x.lower().find("inf") != -1: 24 | raise ValueError 25 | 26 | return float(x) 27 | 28 | def main(): 29 | if len(argv) != 2: 30 | print("Usage: {0} dataset".format(argv[0])) 31 | exit(1) 32 | 33 | dataset = argv[1] 34 | 35 | if not os.path.exists(dataset): 36 | print("dataset {0} not found".format(dataset)) 37 | exit(1) 38 | 39 | line_no = 1 40 | error_line_count = 0 41 | for line in open(dataset, 'r'): 42 | line_error = False 43 | 44 | # each line must end with a newline character 45 | if line[-1] != '\n': 46 | err(line_no, "missing a newline character in the end") 47 | line_error = True 48 | 49 | nodes = line.split() 50 | 51 | # check label 52 | try: 53 | label = nodes.pop(0) 54 | 55 | if label.find(',') != -1: 56 | # multi-label format 57 | try: 58 | for l in label.split(','): 59 | l = my_float(l) 60 | except: 61 | err(line_no, "label {0} is not a valid multi-label form".format(label)) 62 | line_error = True 63 | else: 64 | try: 65 | label = my_float(label) 66 | except: 67 | err(line_no, "label {0} is not a number".format(label)) 68 | line_error = True 69 | except: 70 | err(line_no, "missing label, perhaps an empty line?") 71 | line_error = True 72 | 73 | # check features 74 | prev_index = -1 75 | for i in range(len(nodes)): 76 | try: 77 | (index, value) = nodes[i].split(':') 78 | 79 | index = int(index) 80 | value = my_float(value) 81 | 82 | # precomputed kernel's index starts from 0 and LIBSVM 83 | # checks it. Hence, don't treat index 0 as an error. 84 | if index < 0: 85 | err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i])) 86 | line_error = True 87 | elif index <= prev_index: 88 | err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i])) 89 | line_error = True 90 | prev_index = index 91 | except: 92 | err(line_no, "feature '{0}' not an : pair, integer, real number ".format(nodes[i])) 93 | line_error = True 94 | 95 | line_no += 1 96 | 97 | if line_error: 98 | error_line_count += 1 99 | 100 | if error_line_count > 0: 101 | print("Found {0} lines with error.".format(error_line_count)) 102 | return 1 103 | else: 104 | print("No error.") 105 | return 0 106 | 107 | if __name__ == "__main__": 108 | exit(main()) 109 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/tools/easy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | from subprocess import * 6 | 7 | if len(sys.argv) <= 1: 8 | print('Usage: {0} training_file [testing_file]'.format(sys.argv[0])) 9 | raise SystemExit 10 | 11 | # svm, grid, and gnuplot executable files 12 | 13 | is_win32 = (sys.platform == 'win32') 14 | if not is_win32: 15 | svmscale_exe = "../svm-scale" 16 | svmtrain_exe = "../svm-train" 17 | svmpredict_exe = "../svm-predict" 18 | grid_py = "./grid.py" 19 | gnuplot_exe = "/usr/bin/gnuplot" 20 | else: 21 | # example for windows 22 | svmscale_exe = r"..\windows\svm-scale.exe" 23 | svmtrain_exe = r"..\windows\svm-train.exe" 24 | svmpredict_exe = r"..\windows\svm-predict.exe" 25 | gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe" 26 | grid_py = r".\grid.py" 27 | 28 | assert os.path.exists(svmscale_exe),"svm-scale executable not found" 29 | assert os.path.exists(svmtrain_exe),"svm-train executable not found" 30 | assert os.path.exists(svmpredict_exe),"svm-predict executable not found" 31 | assert os.path.exists(gnuplot_exe),"gnuplot executable not found" 32 | assert os.path.exists(grid_py),"grid.py not found" 33 | 34 | train_pathname = sys.argv[1] 35 | assert os.path.exists(train_pathname),"training file not found" 36 | file_name = os.path.split(train_pathname)[1] 37 | scaled_file = file_name + ".scale" 38 | model_file = file_name + ".model" 39 | range_file = file_name + ".range" 40 | 41 | if len(sys.argv) > 2: 42 | test_pathname = sys.argv[2] 43 | file_name = os.path.split(test_pathname)[1] 44 | assert os.path.exists(test_pathname),"testing file not found" 45 | scaled_test_file = file_name + ".scale" 46 | predict_test_file = file_name + ".predict" 47 | 48 | cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file) 49 | print('Scaling training data...') 50 | Popen(cmd, shell = True, stdout = PIPE).communicate() 51 | 52 | cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file) 53 | print('Cross validation...') 54 | f = Popen(cmd, shell = True, stdout = PIPE).stdout 55 | 56 | line = '' 57 | while True: 58 | last_line = line 59 | line = f.readline() 60 | if not line: break 61 | c,g,rate = map(float,last_line.split()) 62 | 63 | print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate)) 64 | 65 | cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file) 66 | print('Training...') 67 | Popen(cmd, shell = True, stdout = PIPE).communicate() 68 | 69 | print('Output model: {0}'.format(model_file)) 70 | if len(sys.argv) > 2: 71 | cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file) 72 | print('Scaling testing data...') 73 | Popen(cmd, shell = True, stdout = PIPE).communicate() 74 | 75 | cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file) 76 | print('Testing...') 77 | Popen(cmd, shell = True).communicate() 78 | 79 | print('Output prediction: {0}'.format(predict_test_file)) 80 | -------------------------------------------------------------------------------- /multiple-source-tradaboost/toolbox/libsvm-weights-3.20/tools/subset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, math, random 4 | from collections import defaultdict 5 | 6 | if sys.version_info[0] >= 3: 7 | xrange = range 8 | 9 | def exit_with_help(argv): 10 | print("""\ 11 | Usage: {0} [options] dataset subset_size [output1] [output2] 12 | 13 | This script randomly selects a subset of the dataset. 14 | 15 | options: 16 | -s method : method of selection (default 0) 17 | 0 -- stratified selection (classification only) 18 | 1 -- random selection 19 | 20 | output1 : the subset (optional) 21 | output2 : rest of the data (optional) 22 | If output1 is omitted, the subset will be printed on the screen.""".format(argv[0])) 23 | exit(1) 24 | 25 | def process_options(argv): 26 | argc = len(argv) 27 | if argc < 3: 28 | exit_with_help(argv) 29 | 30 | # default method is stratified selection 31 | method = 0 32 | subset_file = sys.stdout 33 | rest_file = None 34 | 35 | i = 1 36 | while i < argc: 37 | if argv[i][0] != "-": 38 | break 39 | if argv[i] == "-s": 40 | i = i + 1 41 | method = int(argv[i]) 42 | if method not in [0,1]: 43 | print("Unknown selection method {0}".format(method)) 44 | exit_with_help(argv) 45 | i = i + 1 46 | 47 | dataset = argv[i] 48 | subset_size = int(argv[i+1]) 49 | if i+2 < argc: 50 | subset_file = open(argv[i+2],'w') 51 | if i+3 < argc: 52 | rest_file = open(argv[i+3],'w') 53 | 54 | return dataset, subset_size, method, subset_file, rest_file 55 | 56 | def random_selection(dataset, subset_size): 57 | l = sum(1 for line in open(dataset,'r')) 58 | return sorted(random.sample(xrange(l), subset_size)) 59 | 60 | def stratified_selection(dataset, subset_size): 61 | labels = [line.split(None,1)[0] for line in open(dataset)] 62 | label_linenums = defaultdict(list) 63 | for i, label in enumerate(labels): 64 | label_linenums[label] += [i] 65 | 66 | l = len(labels) 67 | remaining = subset_size 68 | ret = [] 69 | 70 | # classes with fewer data are sampled first; otherwise 71 | # some rare classes may not be selected 72 | for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])): 73 | linenums = label_linenums[label] 74 | label_size = len(linenums) 75 | # at least one instance per class 76 | s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l))))) 77 | if s == 0: 78 | sys.stderr.write('''\ 79 | Error: failed to have at least one instance per class 80 | 1. You may have regression data. 81 | 2. Your classification data is unbalanced or too small. 82 | Please use -s 1. 83 | ''') 84 | sys.exit(-1) 85 | remaining -= s 86 | ret += [linenums[i] for i in random.sample(xrange(label_size), s)] 87 | return sorted(ret) 88 | 89 | def main(argv=sys.argv): 90 | dataset, subset_size, method, subset_file, rest_file = process_options(argv) 91 | #uncomment the following line to fix the random seed 92 | #random.seed(0) 93 | selected_lines = [] 94 | 95 | if method == 0: 96 | selected_lines = stratified_selection(dataset, subset_size) 97 | elif method == 1: 98 | selected_lines = random_selection(dataset, subset_size) 99 | 100 | #select instances based on selected_lines 101 | dataset = open(dataset,'r') 102 | prev_selected_linenum = -1 103 | for i in xrange(len(selected_lines)): 104 | for cnt in xrange(selected_lines[i]-prev_selected_linenum-1): 105 | line = dataset.readline() 106 | if rest_file: 107 | rest_file.write(line) 108 | subset_file.write(dataset.readline()) 109 | prev_selected_linenum = selected_lines[i] 110 | subset_file.close() 111 | 112 | if rest_file: 113 | for line in dataset: 114 | rest_file.write(line) 115 | rest_file.close() 116 | dataset.close() 117 | 118 | if __name__ == '__main__': 119 | main(sys.argv) 120 | 121 | -------------------------------------------------------------------------------- /tca/README: -------------------------------------------------------------------------------- 1 | The code for "Domain Adaptation via Transfer Component Analysis" by Sinno Jialin Pan, Ivor W. Tsang, James T. Kwok and Qiang Yang 2 | 3 | The main function of tca is ‘tca.m’. 4 | You can refer to ‘test.m’ to see an example of how to use tca. 5 | 6 | Please compile the toolbox and addpath to toolbox before using. 7 | addpath('./toolbox/libsvm-3.20/matlab'); 8 | -------------------------------------------------------------------------------- /tca/rbf_dot.m: -------------------------------------------------------------------------------- 1 | %Radial basis function inner product 2 | %Arthur Gretton 3 | 4 | %Pattern input format : [pattern1 ; pattern2 ; ...] 5 | %Output : Matrix of RBF values k(x1,x2) 6 | %Deg is kernel size 7 | 8 | 9 | function [H]=rbf_dot(patterns1,patterns2,deg) 10 | 11 | %Note : patterns are transposed for compatibility with C code. 12 | 13 | size1=size(patterns1); 14 | size2=size(patterns2); 15 | 16 | 17 | G = sum((patterns1.*patterns1),2); 18 | H = sum((patterns2.*patterns2),2); 19 | 20 | Q = repmat(G,1,size2(1)); 21 | R = repmat(H',size1(1),1); 22 | 23 | H = Q + R - 2*patterns1*patterns2'; 24 | 25 | 26 | H=exp(-H/2/deg^2); 27 | % 28 | % function K = rbf_dot(X, Y,rbf_var) 29 | % 30 | % % Rows of X and Y are data points 31 | % 32 | % xnum = size(X,1); 33 | % 34 | % ynum = size(Y,1); 35 | % 36 | % %if (kernel == 1) % Apply Gaussian kernel 37 | % for i=1:xnum 38 | % % fprintf('i=%d\n',i); 39 | % for j=1:ynum 40 | % K(i,j) = exp(-norm(X(i,:)-Y(j,:))^2/rbf_var); 41 | % % K(i,j) = X(i,:)*Y(j,:)'; 42 | % end 43 | % end 44 | % 45 | % % % elseif(kernel==2) % Apply linear kernel 46 | % % K = X*Y'; 47 | % % elseif(kernel==2) %polynomial kernel 48 | % % K = 49 | -------------------------------------------------------------------------------- /tca/reducedvector.m: -------------------------------------------------------------------------------- 1 | function [features] = reducedvector(K,m) 2 | %REDUCEDVECTOR Summary of this function goes here 3 | % This function returns the data of reduced feature space 4 | 5 | %% getting the m-leading eigenvectors and eigenvalues of K 6 | [V,D] = eig(K); 7 | Dreal = real(diag(D)); 8 | [Dreal,indice] = sort(Dreal,'descend'); 9 | V = V(:,indice(1:m)); 10 | 11 | %% compute the phi(xi) using eigenvector and eigenvalues 12 | features = zeros(size(Dreal),m); 13 | for i=1:size(Dreal) 14 | for j=1:m 15 | features(i,j) = sqrt(Dreal(j)) * V(i,j); 16 | end 17 | end 18 | end 19 | 20 | -------------------------------------------------------------------------------- /tca/tca.m: -------------------------------------------------------------------------------- 1 | function [Kr] = tca( sourcefeatures,targetfeatures,m,sigma) 2 | %TCA Summary of this function goes here 3 | % This is the code for 'Domain Adaptation via Transfer Component Analysis' by Sinno Jialin Pan, Ivor W. Tsang, James T. Kwok and Qiang Yang 4 | % input: source features, target features, the number of vectors after 5 | % tca, and the parameter sigma for gaussian kernel. 6 | % output: resultant kernel matrix 7 | %% error 8 | if m >= size(sourcefeatures,2) 9 | error 'm is larger than the total number of features'; 10 | end 11 | %% compute K 12 | Kss = rbf_dot(sourcefeatures,sourcefeatures,sigma); 13 | Kst = rbf_dot(sourcefeatures,targetfeatures,sigma); 14 | Kts = rbf_dot(targetfeatures,sourcefeatures,sigma); 15 | Ktt = rbf_dot(targetfeatures,targetfeatures,sigma); 16 | K = [[Kss,Kst];[Kts,Ktt]]; 17 | 18 | %% compute L 19 | n1 = size(sourcefeatures,1); 20 | n2 = size(targetfeatures,1); 21 | L = zeros(n1+n2); 22 | L(1:n1,1:n1) = 1/(n1^2); 23 | L(n1+1:end,n1+1:end) = 1/(n2^2); 24 | L(n1+1:end,1:n1) = -1/(n1*n2); 25 | L(1:n1,n1+1:end) = -1/(n1*n2); 26 | 27 | %% compute u and H 28 | u = 0.1; %trade-off parameter 29 | H = eye(n1+n2) - ones(n1+n2)/(n1+n2); %centering matrix 30 | 31 | %% calculate m-leading eigenvalues and corresponding eigenvectors 32 | I = eye(n1+n2); 33 | M = (I + u*K*L*K)\K*H*K; 34 | [V,D] = eig(M); 35 | Dreal = real(diag(D)); 36 | [Dreal,indice] = sort(Dreal,'descend'); 37 | W = V(:,indice(1:m)); 38 | 39 | %% calculate resultant kernel matrix 40 | Kr = K*W*W'*K; 41 | end 42 | 43 | -------------------------------------------------------------------------------- /tca/test.m: -------------------------------------------------------------------------------- 1 | clear all; 2 | addpath('./toolbox/libsvm-3.20/matlab'); 3 | load ../code/features; 4 | K = tca(source.trn.features,target.test.features,20,10); 5 | n = length(source.trn.labels); 6 | m = length(target.test.labels); 7 | %% if directly use the resultant kernel to do the training 8 | model = svmtrain(source.trn.labels,[(1:n)',K(1:n,1:n)],'-t 4'); 9 | label = svmpredict(target.test.labels,[(1:m)',K(n+1:end,1:n)],model); 10 | 11 | %% or you can use the data with reduced feature space to do the training 12 | features = reducedvector(K,20); 13 | sourcefeatures = features(1:n,:); 14 | targetfeatures = features(n+1:end,:); 15 | model = svmtrain(source.trn.labels,sourcefeatures,'-t 2'); 16 | label = svmpredict(target.test.labels,targetfeatures,model); -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/COPYRIGHT: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | 3. Neither name of copyright holders nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/Makefile: -------------------------------------------------------------------------------- 1 | CXX ?= g++ 2 | CFLAGS = -Wall -Wconversion -O3 -fPIC 3 | SHVER = 2 4 | OS = $(shell uname) 5 | 6 | all: svm-train svm-predict svm-scale 7 | 8 | lib: svm.o 9 | if [ "$(OS)" = "Darwin" ]; then \ 10 | SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)"; \ 11 | else \ 12 | SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \ 13 | fi; \ 14 | $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER) 15 | 16 | svm-predict: svm-predict.c svm.o 17 | $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm 18 | svm-train: svm-train.c svm.o 19 | $(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm 20 | svm-scale: svm-scale.c 21 | $(CXX) $(CFLAGS) svm-scale.c -o svm-scale 22 | svm.o: svm.cpp svm.h 23 | $(CXX) $(CFLAGS) -c svm.cpp 24 | clean: 25 | rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so.$(SHVER) 26 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/Makefile.win: -------------------------------------------------------------------------------- 1 | #You must ensure nmake.exe, cl.exe, link.exe are in system path. 2 | #VCVARS32.bat 3 | #Under dosbox prompt 4 | #nmake -f Makefile.win 5 | 6 | ########################################## 7 | CXX = cl.exe 8 | CFLAGS = /nologo /O2 /EHsc /I. /D _WIN32 /D _CRT_SECURE_NO_DEPRECATE 9 | TARGET = windows 10 | 11 | all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib 12 | 13 | $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj 14 | $(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe 15 | 16 | $(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj 17 | $(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe 18 | 19 | $(TARGET)\svm-scale.exe: svm.h svm-scale.c 20 | $(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe 21 | 22 | $(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp 23 | $(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe 24 | 25 | svm.obj: svm.cpp svm.h 26 | $(CXX) $(CFLAGS) -c svm.cpp 27 | 28 | lib: svm.cpp svm.h svm.def 29 | $(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def 30 | 31 | clean: 32 | -erase /Q *.obj $(TARGET)\. 33 | 34 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/Makefile: -------------------------------------------------------------------------------- 1 | .SUFFIXES: .class .java 2 | FILES = libsvm/svm.class libsvm/svm_model.class libsvm/svm_node.class \ 3 | libsvm/svm_parameter.class libsvm/svm_problem.class \ 4 | libsvm/svm_print_interface.class \ 5 | svm_train.class svm_predict.class svm_toy.class svm_scale.class 6 | 7 | #JAVAC = jikes 8 | JAVAC_FLAGS = -target 1.5 -source 1.5 9 | JAVAC = javac 10 | # JAVAC_FLAGS = 11 | 12 | all: $(FILES) 13 | jar cvf libsvm.jar *.class libsvm/*.class 14 | 15 | .java.class: 16 | $(JAVAC) $(JAVAC_FLAGS) $< 17 | 18 | libsvm/svm.java: libsvm/svm.m4 19 | m4 libsvm/svm.m4 > libsvm/svm.java 20 | 21 | clean: 22 | rm -f libsvm/*.class *.class *.jar libsvm/*~ *~ libsvm/svm.java 23 | 24 | dist: clean all 25 | rm *.class libsvm/*.class 26 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/java/libsvm.jar -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm/svm_model.java: -------------------------------------------------------------------------------- 1 | // 2 | // svm_model 3 | // 4 | package libsvm; 5 | public class svm_model implements java.io.Serializable 6 | { 7 | public svm_parameter param; // parameter 8 | public int nr_class; // number of classes, = 2 in regression/one class svm 9 | public int l; // total #SV 10 | public svm_node[][] SV; // SVs (SV[l]) 11 | public double[][] sv_coef; // coefficients for SVs in decision functions (sv_coef[k-1][l]) 12 | public double[] rho; // constants in decision functions (rho[k*(k-1)/2]) 13 | public double[] probA; // pariwise probability information 14 | public double[] probB; 15 | public int[] sv_indices; // sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set 16 | 17 | // for classification only 18 | 19 | public int[] label; // label of each class (label[k]) 20 | public int[] nSV; // number of SVs for each class (nSV[k]) 21 | // nSV[0] + nSV[1] + ... + nSV[k-1] = l 22 | }; 23 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm/svm_node.java: -------------------------------------------------------------------------------- 1 | package libsvm; 2 | public class svm_node implements java.io.Serializable 3 | { 4 | public int index; 5 | public double value; 6 | } 7 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm/svm_parameter.java: -------------------------------------------------------------------------------- 1 | package libsvm; 2 | public class svm_parameter implements Cloneable,java.io.Serializable 3 | { 4 | /* svm_type */ 5 | public static final int C_SVC = 0; 6 | public static final int NU_SVC = 1; 7 | public static final int ONE_CLASS = 2; 8 | public static final int EPSILON_SVR = 3; 9 | public static final int NU_SVR = 4; 10 | 11 | /* kernel_type */ 12 | public static final int LINEAR = 0; 13 | public static final int POLY = 1; 14 | public static final int RBF = 2; 15 | public static final int SIGMOID = 3; 16 | public static final int PRECOMPUTED = 4; 17 | 18 | public int svm_type; 19 | public int kernel_type; 20 | public int degree; // for poly 21 | public double gamma; // for poly/rbf/sigmoid 22 | public double coef0; // for poly/sigmoid 23 | 24 | // these are for training only 25 | public double cache_size; // in MB 26 | public double eps; // stopping criteria 27 | public double C; // for C_SVC, EPSILON_SVR and NU_SVR 28 | public int nr_weight; // for C_SVC 29 | public int[] weight_label; // for C_SVC 30 | public double[] weight; // for C_SVC 31 | public double nu; // for NU_SVC, ONE_CLASS, and NU_SVR 32 | public double p; // for EPSILON_SVR 33 | public int shrinking; // use the shrinking heuristics 34 | public int probability; // do probability estimates 35 | 36 | public Object clone() 37 | { 38 | try 39 | { 40 | return super.clone(); 41 | } catch (CloneNotSupportedException e) 42 | { 43 | return null; 44 | } 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm/svm_print_interface.java: -------------------------------------------------------------------------------- 1 | package libsvm; 2 | public interface svm_print_interface 3 | { 4 | public void print(String s); 5 | } 6 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/libsvm/svm_problem.java: -------------------------------------------------------------------------------- 1 | package libsvm; 2 | public class svm_problem implements java.io.Serializable 3 | { 4 | public int l; 5 | public double[] y; 6 | public svm_node[][] x; 7 | } 8 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/svm_predict.java: -------------------------------------------------------------------------------- 1 | import libsvm.*; 2 | import java.io.*; 3 | import java.util.*; 4 | 5 | class svm_predict { 6 | private static svm_print_interface svm_print_null = new svm_print_interface() 7 | { 8 | public void print(String s) {} 9 | }; 10 | 11 | private static svm_print_interface svm_print_stdout = new svm_print_interface() 12 | { 13 | public void print(String s) 14 | { 15 | System.out.print(s); 16 | } 17 | }; 18 | 19 | private static svm_print_interface svm_print_string = svm_print_stdout; 20 | 21 | static void info(String s) 22 | { 23 | svm_print_string.print(s); 24 | } 25 | 26 | private static double atof(String s) 27 | { 28 | return Double.valueOf(s).doubleValue(); 29 | } 30 | 31 | private static int atoi(String s) 32 | { 33 | return Integer.parseInt(s); 34 | } 35 | 36 | private static void predict(BufferedReader input, DataOutputStream output, svm_model model, int predict_probability) throws IOException 37 | { 38 | int correct = 0; 39 | int total = 0; 40 | double error = 0; 41 | double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0; 42 | 43 | int svm_type=svm.svm_get_svm_type(model); 44 | int nr_class=svm.svm_get_nr_class(model); 45 | double[] prob_estimates=null; 46 | 47 | if(predict_probability == 1) 48 | { 49 | if(svm_type == svm_parameter.EPSILON_SVR || 50 | svm_type == svm_parameter.NU_SVR) 51 | { 52 | svm_predict.info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma="+svm.svm_get_svr_probability(model)+"\n"); 53 | } 54 | else 55 | { 56 | int[] labels=new int[nr_class]; 57 | svm.svm_get_labels(model,labels); 58 | prob_estimates = new double[nr_class]; 59 | output.writeBytes("labels"); 60 | for(int j=0;j=argv.length-2) 155 | exit_with_help(); 156 | try 157 | { 158 | BufferedReader input = new BufferedReader(new FileReader(argv[i])); 159 | DataOutputStream output = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(argv[i+2]))); 160 | svm_model model = svm.svm_load_model(argv[i+1]); 161 | if (model == null) 162 | { 163 | System.err.print("can't open model file "+argv[i+1]+"\n"); 164 | System.exit(1); 165 | } 166 | if(predict_probability == 1) 167 | { 168 | if(svm.svm_check_probability_model(model)==0) 169 | { 170 | System.err.print("Model does not support probabiliy estimates\n"); 171 | System.exit(1); 172 | } 173 | } 174 | else 175 | { 176 | if(svm.svm_check_probability_model(model)!=0) 177 | { 178 | svm_predict.info("Model supports probability estimates, but disabled in prediction.\n"); 179 | } 180 | } 181 | predict(input,output,model,predict_probability); 182 | input.close(); 183 | output.close(); 184 | } 185 | catch(FileNotFoundException e) 186 | { 187 | exit_with_help(); 188 | } 189 | catch(ArrayIndexOutOfBoundsException e) 190 | { 191 | exit_with_help(); 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/java/test_applet.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile is used under Linux 2 | 3 | MATLABDIR ?= /usr/local/matlab 4 | # for Mac 5 | # MATLABDIR ?= /opt/local/matlab 6 | 7 | CXX ?= g++ 8 | #CXX = g++-4.1 9 | CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I.. 10 | 11 | MEX = $(MATLABDIR)/bin/mex 12 | MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)" 13 | # comment the following line if you use MATLAB on 32-bit computer 14 | MEX_OPTION += -largeArrayDims 15 | MEX_EXT = $(shell $(MATLABDIR)/bin/mexext) 16 | 17 | all: matlab 18 | 19 | matlab: binary 20 | 21 | octave: 22 | @echo "please type make under Octave" 23 | 24 | binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT) 25 | 26 | svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o 27 | $(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o 28 | 29 | svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o 30 | $(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o 31 | 32 | libsvmread.$(MEX_EXT): libsvmread.c 33 | $(MEX) $(MEX_OPTION) libsvmread.c 34 | 35 | libsvmwrite.$(MEX_EXT): libsvmwrite.c 36 | $(MEX) $(MEX_OPTION) libsvmwrite.c 37 | 38 | svm_model_matlab.o: svm_model_matlab.c ../svm.h 39 | $(CXX) $(CFLAGS) -c svm_model_matlab.c 40 | 41 | ../svm.o: ../svm.cpp ../svm.h 42 | make -C .. svm.o 43 | 44 | clean: 45 | rm -f *~ *.o *.mex* *.obj ../svm.o 46 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/libsvmread.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "mex.h" 8 | 9 | #ifdef MX_API_VER 10 | #if MX_API_VER < 0x07030000 11 | typedef int mwIndex; 12 | #endif 13 | #endif 14 | #ifndef max 15 | #define max(x,y) (((x)>(y))?(x):(y)) 16 | #endif 17 | #ifndef min 18 | #define min(x,y) (((x)<(y))?(x):(y)) 19 | #endif 20 | 21 | void exit_with_help() 22 | { 23 | mexPrintf( 24 | "Usage: [label_vector, instance_matrix] = libsvmread('filename');\n" 25 | ); 26 | } 27 | 28 | static void fake_answer(int nlhs, mxArray *plhs[]) 29 | { 30 | int i; 31 | for(i=0;i start from 0 86 | strtok(line," \t"); // label 87 | while (1) 88 | { 89 | idx = strtok(NULL,":"); // index:value 90 | val = strtok(NULL," \t"); 91 | if(val == NULL) 92 | break; 93 | 94 | errno = 0; 95 | index = (int) strtol(idx,&endptr,10); 96 | if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index) 97 | { 98 | mexPrintf("Wrong input format at line %d\n",l+1); 99 | fake_answer(nlhs, plhs); 100 | return; 101 | } 102 | else 103 | inst_max_index = index; 104 | 105 | min_index = min(min_index, index); 106 | elements++; 107 | } 108 | max_index = max(max_index, inst_max_index); 109 | l++; 110 | } 111 | rewind(fp); 112 | 113 | // y 114 | plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL); 115 | // x^T 116 | if (min_index <= 0) 117 | plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL); 118 | else 119 | plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL); 120 | 121 | labels = mxGetPr(plhs[0]); 122 | samples = mxGetPr(plhs[1]); 123 | ir = mxGetIr(plhs[1]); 124 | jc = mxGetJc(plhs[1]); 125 | 126 | k=0; 127 | for(i=0;i start from 0 158 | 159 | errno = 0; 160 | samples[k] = strtod(val,&endptr); 161 | if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 162 | { 163 | mexPrintf("Wrong input format at line %d\n",i+1); 164 | fake_answer(nlhs, plhs); 165 | return; 166 | } 167 | ++k; 168 | } 169 | } 170 | jc[l] = k; 171 | 172 | fclose(fp); 173 | free(line); 174 | 175 | { 176 | mxArray *rhs[1], *lhs[1]; 177 | rhs[0] = plhs[1]; 178 | if(mexCallMATLAB(1, lhs, 1, rhs, "transpose")) 179 | { 180 | mexPrintf("Error: cannot transpose problem\n"); 181 | fake_answer(nlhs, plhs); 182 | return; 183 | } 184 | plhs[1] = lhs[0]; 185 | } 186 | } 187 | 188 | void mexFunction( int nlhs, mxArray *plhs[], 189 | int nrhs, const mxArray *prhs[] ) 190 | { 191 | char filename[256]; 192 | 193 | if(nrhs != 1 || nlhs != 2) 194 | { 195 | exit_with_help(); 196 | fake_answer(nlhs, plhs); 197 | return; 198 | } 199 | 200 | mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1); 201 | 202 | if(filename == NULL) 203 | { 204 | mexPrintf("Error: filename is NULL\n"); 205 | return; 206 | } 207 | 208 | read_problem(filename, nlhs, plhs); 209 | 210 | return; 211 | } 212 | 213 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/libsvmread.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/matlab/libsvmread.mexmaci64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/libsvmwrite.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "mex.h" 5 | 6 | #ifdef MX_API_VER 7 | #if MX_API_VER < 0x07030000 8 | typedef int mwIndex; 9 | #endif 10 | #endif 11 | 12 | void exit_with_help() 13 | { 14 | mexPrintf( 15 | "Usage: libsvmwrite('filename', label_vector, instance_matrix);\n" 16 | ); 17 | } 18 | 19 | static void fake_answer(int nlhs, mxArray *plhs[]) 20 | { 21 | int i; 22 | for(i=0;i 0) 88 | { 89 | exit_with_help(); 90 | fake_answer(nlhs, plhs); 91 | return; 92 | } 93 | 94 | // Transform the input Matrix to libsvm format 95 | if(nrhs == 3) 96 | { 97 | char filename[256]; 98 | if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2])) 99 | { 100 | mexPrintf("Error: label vector and instance matrix must be double\n"); 101 | return; 102 | } 103 | 104 | mxGetString(prhs[0], filename, mxGetN(prhs[0])+1); 105 | 106 | if(mxIsSparse(prhs[2])) 107 | libsvmwrite(filename, prhs[1], prhs[2]); 108 | else 109 | { 110 | mexPrintf("Instance_matrix must be sparse\n"); 111 | return; 112 | } 113 | } 114 | else 115 | { 116 | exit_with_help(); 117 | return; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/libsvmwrite.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/matlab/libsvmwrite.mexmaci64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/make.m: -------------------------------------------------------------------------------- 1 | % This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix 2 | 3 | try 4 | Type = ver; 5 | % This part is for OCTAVE 6 | if(strcmp(Type(1).Name, 'Octave') == 1) 7 | mex libsvmread.c 8 | mex libsvmwrite.c 9 | mex svmtrain.c ../svm.cpp svm_model_matlab.c 10 | mex svmpredict.c ../svm.cpp svm_model_matlab.c 11 | % This part is for MATLAB 12 | % Add -largeArrayDims on 64-bit machines of MATLAB 13 | else 14 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c 15 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c 16 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c 17 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c 18 | end 19 | catch 20 | fprintf('If make.m fails, please check README about detailed instructions.\n'); 21 | end 22 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/svm_model_matlab.h: -------------------------------------------------------------------------------- 1 | const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model); 2 | struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message); 3 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/svmpredict.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/matlab/svmpredict.mexmaci64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/matlab/svmtrain.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/matlab/svmtrain.mexmaci64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/python/Makefile: -------------------------------------------------------------------------------- 1 | all = lib 2 | 3 | lib: 4 | make -C .. lib 5 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-predict.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "svm.h" 7 | 8 | int print_null(const char *s,...) {return 0;} 9 | 10 | static int (*info)(const char *fmt,...) = &printf; 11 | 12 | struct svm_node *x; 13 | int max_nr_attr = 64; 14 | 15 | struct svm_model* model; 16 | int predict_probability=0; 17 | 18 | static char *line = NULL; 19 | static int max_line_len; 20 | 21 | static char* readline(FILE *input) 22 | { 23 | int len; 24 | 25 | if(fgets(line,max_line_len,input) == NULL) 26 | return NULL; 27 | 28 | while(strrchr(line,'\n') == NULL) 29 | { 30 | max_line_len *= 2; 31 | line = (char *) realloc(line,max_line_len); 32 | len = (int) strlen(line); 33 | if(fgets(line+len,max_line_len-len,input) == NULL) 34 | break; 35 | } 36 | return line; 37 | } 38 | 39 | void exit_input_error(int line_num) 40 | { 41 | fprintf(stderr,"Wrong input format at line %d\n", line_num); 42 | exit(1); 43 | } 44 | 45 | void predict(FILE *input, FILE *output) 46 | { 47 | int correct = 0; 48 | int total = 0; 49 | double error = 0; 50 | double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0; 51 | 52 | int svm_type=svm_get_svm_type(model); 53 | int nr_class=svm_get_nr_class(model); 54 | double *prob_estimates=NULL; 55 | int j; 56 | 57 | if(predict_probability) 58 | { 59 | if (svm_type==NU_SVR || svm_type==EPSILON_SVR) 60 | info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model)); 61 | else 62 | { 63 | int *labels=(int *) malloc(nr_class*sizeof(int)); 64 | svm_get_labels(model,labels); 65 | prob_estimates = (double *) malloc(nr_class*sizeof(double)); 66 | fprintf(output,"labels"); 67 | for(j=0;j start from 0 82 | 83 | label = strtok(line," \t\n"); 84 | if(label == NULL) // empty line 85 | exit_input_error(total+1); 86 | 87 | target_label = strtod(label,&endptr); 88 | if(endptr == label || *endptr != '\0') 89 | exit_input_error(total+1); 90 | 91 | while(1) 92 | { 93 | if(i>=max_nr_attr-1) // need one more for index = -1 94 | { 95 | max_nr_attr *= 2; 96 | x = (struct svm_node *) realloc(x,max_nr_attr*sizeof(struct svm_node)); 97 | } 98 | 99 | idx = strtok(NULL,":"); 100 | val = strtok(NULL," \t"); 101 | 102 | if(val == NULL) 103 | break; 104 | errno = 0; 105 | x[i].index = (int) strtol(idx,&endptr,10); 106 | if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index) 107 | exit_input_error(total+1); 108 | else 109 | inst_max_index = x[i].index; 110 | 111 | errno = 0; 112 | x[i].value = strtod(val,&endptr); 113 | if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 114 | exit_input_error(total+1); 115 | 116 | ++i; 117 | } 118 | x[i].index = -1; 119 | 120 | if (predict_probability && (svm_type==C_SVC || svm_type==NU_SVC)) 121 | { 122 | predict_label = svm_predict_probability(model,x,prob_estimates); 123 | fprintf(output,"%g",predict_label); 124 | for(j=0;j=argc-2) 195 | exit_with_help(); 196 | 197 | input = fopen(argv[i],"r"); 198 | if(input == NULL) 199 | { 200 | fprintf(stderr,"can't open input file %s\n",argv[i]); 201 | exit(1); 202 | } 203 | 204 | output = fopen(argv[i+2],"w"); 205 | if(output == NULL) 206 | { 207 | fprintf(stderr,"can't open output file %s\n",argv[i+2]); 208 | exit(1); 209 | } 210 | 211 | if((model=svm_load_model(argv[i+1]))==0) 212 | { 213 | fprintf(stderr,"can't open model file %s\n",argv[i+1]); 214 | exit(1); 215 | } 216 | 217 | x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node)); 218 | if(predict_probability) 219 | { 220 | if(svm_check_probability_model(model)==0) 221 | { 222 | fprintf(stderr,"Model does not support probabiliy estimates\n"); 223 | exit(1); 224 | } 225 | } 226 | else 227 | { 228 | if(svm_check_probability_model(model)!=0) 229 | info("Model supports probability estimates, but disabled in prediction.\n"); 230 | } 231 | 232 | predict(input,output); 233 | svm_free_and_destroy_model(&model); 234 | free(x); 235 | free(line); 236 | fclose(input); 237 | fclose(output); 238 | return 0; 239 | } 240 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/Makefile: -------------------------------------------------------------------------------- 1 | CC? = gcc 2 | CXX? = g++ 3 | CFLAGS = -Wall -O3 -g `pkg-config --cflags gtk+-2.0` 4 | LIBS = `pkg-config --libs gtk+-2.0` 5 | 6 | svm-toy: main.o interface.o callbacks.o ../../svm.o 7 | $(CXX) $(CFLAGS) main.o interface.o callbacks.o ../../svm.o -o svm-toy $(LIBS) 8 | 9 | main.o: main.c 10 | $(CC) $(CFLAGS) -c main.c 11 | 12 | interface.o: interface.c interface.h 13 | $(CC) $(CFLAGS) -c interface.c 14 | 15 | callbacks.o: callbacks.cpp callbacks.h 16 | $(CXX) $(CFLAGS) -c callbacks.cpp 17 | 18 | ../../svm.o: ../../svm.cpp ../../svm.h 19 | make -C ../.. svm.o 20 | 21 | clean: 22 | rm -f *~ callbacks.o svm-toy main.o interface.o callbacks.o ../../svm.o 23 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/callbacks.h: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef __cplusplus 4 | extern "C" { 5 | #endif 6 | 7 | void 8 | on_window1_destroy (GtkObject *object, 9 | gpointer user_data); 10 | 11 | gboolean 12 | on_draw_main_button_press_event (GtkWidget *widget, 13 | GdkEventButton *event, 14 | gpointer user_data); 15 | 16 | gboolean 17 | on_draw_main_expose_event (GtkWidget *widget, 18 | GdkEventExpose *event, 19 | gpointer user_data); 20 | 21 | void 22 | on_button_change_clicked (GtkButton *button, 23 | gpointer user_data); 24 | 25 | void 26 | on_button_run_clicked (GtkButton *button, 27 | gpointer user_data); 28 | 29 | void 30 | on_button_clear_clicked (GtkButton *button, 31 | gpointer user_data); 32 | 33 | void 34 | on_button_save_clicked (GtkButton *button, 35 | gpointer user_data); 36 | 37 | void 38 | on_button_load_clicked (GtkButton *button, 39 | gpointer user_data); 40 | 41 | void 42 | on_fileselection_destroy (GtkObject *object, 43 | gpointer user_data); 44 | 45 | void 46 | on_filesel_ok_clicked (GtkButton *button, 47 | gpointer user_data); 48 | 49 | void 50 | on_filesel_cancel_clicked (GtkButton *button, 51 | gpointer user_data); 52 | #ifdef __cplusplus 53 | } 54 | #endif 55 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/interface.c: -------------------------------------------------------------------------------- 1 | /* 2 | * DO NOT EDIT THIS FILE - it is generated by Glade. 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #include "callbacks.h" 14 | #include "interface.h" 15 | 16 | GtkWidget* 17 | create_window (void) 18 | { 19 | GtkWidget *window; 20 | GtkWidget *vbox1; 21 | extern GtkWidget *draw_main; 22 | GtkWidget *hbox1; 23 | GtkWidget *button_change; 24 | GtkWidget *button_run; 25 | GtkWidget *button_clear; 26 | GtkWidget *button_save; 27 | GtkWidget *button_load; 28 | extern GtkWidget *entry_option; 29 | 30 | window = gtk_window_new (GTK_WINDOW_TOPLEVEL); 31 | gtk_object_set_data (GTK_OBJECT (window), "window", window); 32 | gtk_window_set_title (GTK_WINDOW (window), "SVM Toy"); 33 | 34 | vbox1 = gtk_vbox_new (FALSE, 0); 35 | gtk_widget_ref (vbox1); 36 | gtk_object_set_data_full (GTK_OBJECT (window), "vbox1", vbox1, 37 | (GtkDestroyNotify) gtk_widget_unref); 38 | gtk_widget_show (vbox1); 39 | gtk_container_add (GTK_CONTAINER (window), vbox1); 40 | 41 | draw_main = gtk_drawing_area_new (); 42 | gtk_widget_ref (draw_main); 43 | gtk_object_set_data_full (GTK_OBJECT (window), "draw_main", draw_main, 44 | (GtkDestroyNotify) gtk_widget_unref); 45 | gtk_widget_show (draw_main); 46 | gtk_box_pack_start (GTK_BOX (vbox1), draw_main, TRUE, TRUE, 0); 47 | gtk_widget_set_usize (draw_main, 500, 500); 48 | gtk_widget_set_events (draw_main, GDK_EXPOSURE_MASK | GDK_BUTTON_PRESS_MASK); 49 | 50 | hbox1 = gtk_hbox_new (FALSE, 0); 51 | gtk_widget_ref (hbox1); 52 | gtk_object_set_data_full (GTK_OBJECT (window), "hbox1", hbox1, 53 | (GtkDestroyNotify) gtk_widget_unref); 54 | gtk_widget_show (hbox1); 55 | gtk_box_pack_start (GTK_BOX (vbox1), hbox1, FALSE, FALSE, 0); 56 | 57 | button_change = gtk_button_new_with_label ("Change"); 58 | gtk_widget_ref (button_change); 59 | gtk_object_set_data_full (GTK_OBJECT (window), "button_change", button_change, 60 | (GtkDestroyNotify) gtk_widget_unref); 61 | gtk_widget_show (button_change); 62 | gtk_box_pack_start (GTK_BOX (hbox1), button_change, FALSE, FALSE, 0); 63 | 64 | button_run = gtk_button_new_with_label ("Run"); 65 | gtk_widget_ref (button_run); 66 | gtk_object_set_data_full (GTK_OBJECT (window), "button_run", button_run, 67 | (GtkDestroyNotify) gtk_widget_unref); 68 | gtk_widget_show (button_run); 69 | gtk_box_pack_start (GTK_BOX (hbox1), button_run, FALSE, FALSE, 0); 70 | 71 | button_clear = gtk_button_new_with_label ("Clear"); 72 | gtk_widget_ref (button_clear); 73 | gtk_object_set_data_full (GTK_OBJECT (window), "button_clear", button_clear, 74 | (GtkDestroyNotify) gtk_widget_unref); 75 | gtk_widget_show (button_clear); 76 | gtk_box_pack_start (GTK_BOX (hbox1), button_clear, FALSE, FALSE, 0); 77 | 78 | button_save = gtk_button_new_with_label ("Save"); 79 | gtk_widget_ref (button_save); 80 | gtk_object_set_data_full (GTK_OBJECT (window), "button_save", button_save, 81 | (GtkDestroyNotify) gtk_widget_unref); 82 | gtk_widget_show (button_save); 83 | gtk_box_pack_start (GTK_BOX (hbox1), button_save, FALSE, FALSE, 0); 84 | 85 | button_load = gtk_button_new_with_label ("Load"); 86 | gtk_widget_ref (button_load); 87 | gtk_object_set_data_full (GTK_OBJECT (window), "button_load", button_load, 88 | (GtkDestroyNotify) gtk_widget_unref); 89 | gtk_widget_show (button_load); 90 | gtk_box_pack_start (GTK_BOX (hbox1), button_load, FALSE, FALSE, 0); 91 | 92 | entry_option = gtk_entry_new (); 93 | gtk_widget_ref (entry_option); 94 | gtk_object_set_data_full (GTK_OBJECT (window), "entry_option", entry_option, 95 | (GtkDestroyNotify) gtk_widget_unref); 96 | gtk_widget_show (entry_option); 97 | gtk_box_pack_start (GTK_BOX (hbox1), entry_option, TRUE, TRUE, 0); 98 | 99 | gtk_signal_connect (GTK_OBJECT (window), "destroy", 100 | GTK_SIGNAL_FUNC (on_window1_destroy), 101 | NULL); 102 | gtk_signal_connect (GTK_OBJECT (draw_main), "button_press_event", 103 | GTK_SIGNAL_FUNC (on_draw_main_button_press_event), 104 | NULL); 105 | gtk_signal_connect (GTK_OBJECT (draw_main), "expose_event", 106 | GTK_SIGNAL_FUNC (on_draw_main_expose_event), 107 | NULL); 108 | gtk_signal_connect (GTK_OBJECT (button_change), "clicked", 109 | GTK_SIGNAL_FUNC (on_button_change_clicked), 110 | NULL); 111 | gtk_signal_connect (GTK_OBJECT (button_run), "clicked", 112 | GTK_SIGNAL_FUNC (on_button_run_clicked), 113 | NULL); 114 | gtk_signal_connect (GTK_OBJECT (button_clear), "clicked", 115 | GTK_SIGNAL_FUNC (on_button_clear_clicked), 116 | NULL); 117 | gtk_signal_connect (GTK_OBJECT (button_save), "clicked", 118 | GTK_SIGNAL_FUNC (on_button_save_clicked), 119 | NULL); 120 | gtk_signal_connect (GTK_OBJECT (button_load), "clicked", 121 | GTK_SIGNAL_FUNC (on_button_load_clicked), 122 | NULL); 123 | gtk_signal_connect (GTK_OBJECT (entry_option), "activate", 124 | GTK_SIGNAL_FUNC (on_button_run_clicked), 125 | NULL); 126 | 127 | return window; 128 | } 129 | 130 | GtkWidget* 131 | create_fileselection (void) 132 | { 133 | GtkWidget *fileselection; 134 | GtkWidget *filesel_ok; 135 | GtkWidget *filesel_cancel; 136 | 137 | fileselection = gtk_file_selection_new ("Select File"); 138 | gtk_object_set_data (GTK_OBJECT (fileselection), "fileselection", fileselection); 139 | gtk_container_set_border_width (GTK_CONTAINER (fileselection), 10); 140 | gtk_window_set_modal (GTK_WINDOW (fileselection), TRUE); 141 | 142 | filesel_ok = GTK_FILE_SELECTION (fileselection)->ok_button; 143 | gtk_object_set_data (GTK_OBJECT (fileselection), "filesel_ok", filesel_ok); 144 | gtk_widget_show (filesel_ok); 145 | GTK_WIDGET_SET_FLAGS (filesel_ok, GTK_CAN_DEFAULT); 146 | 147 | filesel_cancel = GTK_FILE_SELECTION (fileselection)->cancel_button; 148 | gtk_object_set_data (GTK_OBJECT (fileselection), "filesel_cancel", filesel_cancel); 149 | gtk_widget_show (filesel_cancel); 150 | GTK_WIDGET_SET_FLAGS (filesel_cancel, GTK_CAN_DEFAULT); 151 | 152 | gtk_signal_connect (GTK_OBJECT (fileselection), "destroy", 153 | GTK_SIGNAL_FUNC (on_fileselection_destroy), 154 | NULL); 155 | gtk_signal_connect (GTK_OBJECT (filesel_ok), "clicked", 156 | GTK_SIGNAL_FUNC (on_filesel_ok_clicked), 157 | NULL); 158 | gtk_signal_connect (GTK_OBJECT (filesel_cancel), "clicked", 159 | GTK_SIGNAL_FUNC (on_filesel_cancel_clicked), 160 | NULL); 161 | 162 | return fileselection; 163 | } 164 | 165 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/interface.h: -------------------------------------------------------------------------------- 1 | /* 2 | * DO NOT EDIT THIS FILE - it is generated by Glade. 3 | */ 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | GtkWidget* create_window (void); 10 | GtkWidget* create_fileselection (void); 11 | 12 | #ifdef __cplusplus 13 | } 14 | #endif 15 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/main.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Initial main.c file generated by Glade. Edit as required. 3 | * Glade will not overwrite this file. 4 | */ 5 | 6 | #include 7 | #include "interface.h" 8 | void svm_toy_initialize(); 9 | 10 | int main (int argc, char *argv[]) 11 | { 12 | GtkWidget *window; 13 | 14 | gtk_set_locale (); 15 | gtk_init (&argc, &argv); 16 | 17 | window = create_window (); 18 | gtk_widget_show (window); 19 | 20 | svm_toy_initialize(); 21 | gtk_main (); 22 | return 0; 23 | } 24 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/gtk/svm-toy.glade: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | svm-toy 6 | svm-toy 7 | 8 | src 9 | pixmaps 10 | C 11 | False 12 | False 13 | False 14 | True 15 | True 16 | True 17 | False 18 | interface.c 19 | interface.h 20 | callbacks.c 21 | callbacks.h 22 | support.c 23 | support.h 24 | 25 | 26 | 27 | 28 | GtkWindow 29 | window 30 | 31 | destroy 32 | on_window1_destroy 33 | Sun, 16 Apr 2000 09:47:10 GMT 34 | 35 | SVM Toy 36 | GTK_WINDOW_TOPLEVEL 37 | GTK_WIN_POS_NONE 38 | False 39 | False 40 | True 41 | False 42 | 43 | 44 | GtkVBox 45 | vbox1 46 | False 47 | 0 48 | 49 | 50 | GtkDrawingArea 51 | draw_main 52 | 500 53 | 500 54 | GDK_EXPOSURE_MASK | GDK_BUTTON_PRESS_MASK 55 | 56 | button_press_event 57 | on_draw_main_button_press_event 58 | Sun, 16 Apr 2000 13:02:05 GMT 59 | 60 | 61 | expose_event 62 | on_draw_main_expose_event 63 | Sun, 16 Apr 2000 14:27:05 GMT 64 | 65 | 66 | 0 67 | True 68 | True 69 | 70 | 71 | 72 | 73 | GtkHBox 74 | hbox1 75 | False 76 | 0 77 | 78 | 0 79 | False 80 | False 81 | 82 | 83 | 84 | GtkButton 85 | button_change 86 | True 87 | 88 | clicked 89 | on_button_change_clicked 90 | Sun, 16 Apr 2000 09:40:18 GMT 91 | 92 | 93 | 94 | 0 95 | False 96 | False 97 | 98 | 99 | 100 | 101 | GtkButton 102 | button_run 103 | True 104 | 105 | clicked 106 | on_button_run_clicked 107 | Sun, 16 Apr 2000 09:40:37 GMT 108 | 109 | 110 | 111 | 0 112 | False 113 | False 114 | 115 | 116 | 117 | 118 | GtkButton 119 | button_clear 120 | True 121 | 122 | clicked 123 | on_button_clear_clicked 124 | Sun, 16 Apr 2000 09:40:44 GMT 125 | 126 | 127 | 128 | 0 129 | False 130 | False 131 | 132 | 133 | 134 | 135 | GtkButton 136 | button_save 137 | True 138 | 139 | clicked 140 | on_button_save_clicked 141 | Fri, 16 Jun 2000 18:23:46 GMT 142 | 143 | 144 | 145 | 0 146 | False 147 | False 148 | 149 | 150 | 151 | 152 | GtkButton 153 | button_load 154 | True 155 | 156 | clicked 157 | on_button_load_clicked 158 | Fri, 16 Jun 2000 18:23:56 GMT 159 | 160 | 161 | 162 | 0 163 | False 164 | False 165 | 166 | 167 | 168 | 169 | GtkEntry 170 | entry_option 171 | True 172 | 173 | activate 174 | on_button_run_clicked 175 | Sun, 16 Apr 2000 09:42:46 GMT 176 | 177 | True 178 | True 179 | 0 180 | 181 | 182 | 0 183 | True 184 | True 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | GtkFileSelection 193 | fileselection 194 | 10 195 | 196 | destroy 197 | on_fileselection_destroy 198 | Fri, 16 Jun 2000 18:11:28 GMT 199 | 200 | Select File 201 | GTK_WINDOW_TOPLEVEL 202 | GTK_WIN_POS_NONE 203 | True 204 | False 205 | True 206 | False 207 | True 208 | 209 | 210 | GtkButton 211 | FileSel:ok_button 212 | filesel_ok 213 | True 214 | True 215 | 216 | clicked 217 | on_filesel_ok_clicked 218 | Fri, 16 Jun 2000 18:09:56 GMT 219 | 220 | 221 | 222 | 223 | 224 | GtkButton 225 | FileSel:cancel_button 226 | filesel_cancel 227 | True 228 | True 229 | 230 | clicked 231 | on_filesel_cancel_clicked 232 | Fri, 16 Jun 2000 18:09:46 GMT 233 | 234 | 235 | 236 | 237 | 238 | 239 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm-toy/qt/Makefile: -------------------------------------------------------------------------------- 1 | CXX? = g++ 2 | CFLAGS = -Wall -O3 -I$(INCLUDE) -I$(INCLUDE)/QtGui -lQtGui 3 | INCLUDE = /usr/include/qt4 4 | MOC = /usr/bin/moc-qt4 5 | 6 | svm-toy: svm-toy.cpp svm-toy.moc ../../svm.o 7 | $(CXX) $(CFLAGS) svm-toy.cpp ../../svm.o -o svm-toy 8 | 9 | svm-toy.moc: svm-toy.cpp 10 | $(MOC) svm-toy.cpp -o svm-toy.moc 11 | 12 | ../../svm.o: ../../svm.cpp ../../svm.h 13 | make -C ../.. svm.o 14 | 15 | clean: 16 | rm -f *~ svm-toy svm-toy.moc ../../svm.o 17 | 18 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm.def: -------------------------------------------------------------------------------- 1 | LIBRARY libsvm 2 | EXPORTS 3 | svm_train @1 4 | svm_cross_validation @2 5 | svm_save_model @3 6 | svm_load_model @4 7 | svm_get_svm_type @5 8 | svm_get_nr_class @6 9 | svm_get_labels @7 10 | svm_get_svr_probability @8 11 | svm_predict_values @9 12 | svm_predict @10 13 | svm_predict_probability @11 14 | svm_free_model_content @12 15 | svm_free_and_destroy_model @13 16 | svm_destroy_param @14 17 | svm_check_parameter @15 18 | svm_check_probability_model @16 19 | svm_set_print_string_function @17 20 | svm_get_sv_indices @18 21 | svm_get_nr_sv @19 22 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/svm.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIBSVM_H 2 | #define _LIBSVM_H 3 | 4 | #define LIBSVM_VERSION 320 5 | 6 | #ifdef __cplusplus 7 | extern "C" { 8 | #endif 9 | 10 | extern int libsvm_version; 11 | 12 | struct svm_node 13 | { 14 | int index; 15 | double value; 16 | }; 17 | 18 | struct svm_problem 19 | { 20 | int l; 21 | double *y; 22 | struct svm_node **x; 23 | }; 24 | 25 | enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR }; /* svm_type */ 26 | enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */ 27 | 28 | struct svm_parameter 29 | { 30 | int svm_type; 31 | int kernel_type; 32 | int degree; /* for poly */ 33 | double gamma; /* for poly/rbf/sigmoid */ 34 | double coef0; /* for poly/sigmoid */ 35 | 36 | /* these are for training only */ 37 | double cache_size; /* in MB */ 38 | double eps; /* stopping criteria */ 39 | double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ 40 | int nr_weight; /* for C_SVC */ 41 | int *weight_label; /* for C_SVC */ 42 | double* weight; /* for C_SVC */ 43 | double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ 44 | double p; /* for EPSILON_SVR */ 45 | int shrinking; /* use the shrinking heuristics */ 46 | int probability; /* do probability estimates */ 47 | }; 48 | 49 | // 50 | // svm_model 51 | // 52 | struct svm_model 53 | { 54 | struct svm_parameter param; /* parameter */ 55 | int nr_class; /* number of classes, = 2 in regression/one class svm */ 56 | int l; /* total #SV */ 57 | struct svm_node **SV; /* SVs (SV[l]) */ 58 | double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */ 59 | double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */ 60 | double *probA; /* pariwise probability information */ 61 | double *probB; 62 | int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */ 63 | 64 | /* for classification only */ 65 | 66 | int *label; /* label of each class (label[k]) */ 67 | int *nSV; /* number of SVs for each class (nSV[k]) */ 68 | /* nSV[0] + nSV[1] + ... + nSV[k-1] = l */ 69 | /* XXX */ 70 | int free_sv; /* 1 if svm_model is created by svm_load_model*/ 71 | /* 0 if svm_model is created by svm_train */ 72 | }; 73 | 74 | struct svm_model *svm_train(const struct svm_problem *prob, const struct svm_parameter *param); 75 | void svm_cross_validation(const struct svm_problem *prob, const struct svm_parameter *param, int nr_fold, double *target); 76 | 77 | int svm_save_model(const char *model_file_name, const struct svm_model *model); 78 | struct svm_model *svm_load_model(const char *model_file_name); 79 | 80 | int svm_get_svm_type(const struct svm_model *model); 81 | int svm_get_nr_class(const struct svm_model *model); 82 | void svm_get_labels(const struct svm_model *model, int *label); 83 | void svm_get_sv_indices(const struct svm_model *model, int *sv_indices); 84 | int svm_get_nr_sv(const struct svm_model *model); 85 | double svm_get_svr_probability(const struct svm_model *model); 86 | 87 | double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); 88 | double svm_predict(const struct svm_model *model, const struct svm_node *x); 89 | double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates); 90 | 91 | void svm_free_model_content(struct svm_model *model_ptr); 92 | void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr); 93 | void svm_destroy_param(struct svm_parameter *param); 94 | 95 | const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param); 96 | int svm_check_probability_model(const struct svm_model *model); 97 | 98 | void svm_set_print_string_function(void (*print_func)(const char *)); 99 | 100 | #ifdef __cplusplus 101 | } 102 | #endif 103 | 104 | #endif /* _LIBSVM_H */ 105 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/tools/checkdata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # A format checker for LIBSVM 5 | # 6 | 7 | # 8 | # Copyright (c) 2007, Rong-En Fan 9 | # 10 | # All rights reserved. 11 | # 12 | # This program is distributed under the same license of the LIBSVM package. 13 | # 14 | 15 | from sys import argv, exit 16 | import os.path 17 | 18 | def err(line_no, msg): 19 | print("line {0}: {1}".format(line_no, msg)) 20 | 21 | # works like float() but does not accept nan and inf 22 | def my_float(x): 23 | if x.lower().find("nan") != -1 or x.lower().find("inf") != -1: 24 | raise ValueError 25 | 26 | return float(x) 27 | 28 | def main(): 29 | if len(argv) != 2: 30 | print("Usage: {0} dataset".format(argv[0])) 31 | exit(1) 32 | 33 | dataset = argv[1] 34 | 35 | if not os.path.exists(dataset): 36 | print("dataset {0} not found".format(dataset)) 37 | exit(1) 38 | 39 | line_no = 1 40 | error_line_count = 0 41 | for line in open(dataset, 'r'): 42 | line_error = False 43 | 44 | # each line must end with a newline character 45 | if line[-1] != '\n': 46 | err(line_no, "missing a newline character in the end") 47 | line_error = True 48 | 49 | nodes = line.split() 50 | 51 | # check label 52 | try: 53 | label = nodes.pop(0) 54 | 55 | if label.find(',') != -1: 56 | # multi-label format 57 | try: 58 | for l in label.split(','): 59 | l = my_float(l) 60 | except: 61 | err(line_no, "label {0} is not a valid multi-label form".format(label)) 62 | line_error = True 63 | else: 64 | try: 65 | label = my_float(label) 66 | except: 67 | err(line_no, "label {0} is not a number".format(label)) 68 | line_error = True 69 | except: 70 | err(line_no, "missing label, perhaps an empty line?") 71 | line_error = True 72 | 73 | # check features 74 | prev_index = -1 75 | for i in range(len(nodes)): 76 | try: 77 | (index, value) = nodes[i].split(':') 78 | 79 | index = int(index) 80 | value = my_float(value) 81 | 82 | # precomputed kernel's index starts from 0 and LIBSVM 83 | # checks it. Hence, don't treat index 0 as an error. 84 | if index < 0: 85 | err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i])) 86 | line_error = True 87 | elif index <= prev_index: 88 | err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i])) 89 | line_error = True 90 | prev_index = index 91 | except: 92 | err(line_no, "feature '{0}' not an : pair, integer, real number ".format(nodes[i])) 93 | line_error = True 94 | 95 | line_no += 1 96 | 97 | if line_error: 98 | error_line_count += 1 99 | 100 | if error_line_count > 0: 101 | print("Found {0} lines with error.".format(error_line_count)) 102 | return 1 103 | else: 104 | print("No error.") 105 | return 0 106 | 107 | if __name__ == "__main__": 108 | exit(main()) 109 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/tools/easy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | from subprocess import * 6 | 7 | if len(sys.argv) <= 1: 8 | print('Usage: {0} training_file [testing_file]'.format(sys.argv[0])) 9 | raise SystemExit 10 | 11 | # svm, grid, and gnuplot executable files 12 | 13 | is_win32 = (sys.platform == 'win32') 14 | if not is_win32: 15 | svmscale_exe = "../svm-scale" 16 | svmtrain_exe = "../svm-train" 17 | svmpredict_exe = "../svm-predict" 18 | grid_py = "./grid.py" 19 | gnuplot_exe = "/usr/bin/gnuplot" 20 | else: 21 | # example for windows 22 | svmscale_exe = r"..\windows\svm-scale.exe" 23 | svmtrain_exe = r"..\windows\svm-train.exe" 24 | svmpredict_exe = r"..\windows\svm-predict.exe" 25 | gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe" 26 | grid_py = r".\grid.py" 27 | 28 | assert os.path.exists(svmscale_exe),"svm-scale executable not found" 29 | assert os.path.exists(svmtrain_exe),"svm-train executable not found" 30 | assert os.path.exists(svmpredict_exe),"svm-predict executable not found" 31 | assert os.path.exists(gnuplot_exe),"gnuplot executable not found" 32 | assert os.path.exists(grid_py),"grid.py not found" 33 | 34 | train_pathname = sys.argv[1] 35 | assert os.path.exists(train_pathname),"training file not found" 36 | file_name = os.path.split(train_pathname)[1] 37 | scaled_file = file_name + ".scale" 38 | model_file = file_name + ".model" 39 | range_file = file_name + ".range" 40 | 41 | if len(sys.argv) > 2: 42 | test_pathname = sys.argv[2] 43 | file_name = os.path.split(test_pathname)[1] 44 | assert os.path.exists(test_pathname),"testing file not found" 45 | scaled_test_file = file_name + ".scale" 46 | predict_test_file = file_name + ".predict" 47 | 48 | cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file) 49 | print('Scaling training data...') 50 | Popen(cmd, shell = True, stdout = PIPE).communicate() 51 | 52 | cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file) 53 | print('Cross validation...') 54 | f = Popen(cmd, shell = True, stdout = PIPE).stdout 55 | 56 | line = '' 57 | while True: 58 | last_line = line 59 | line = f.readline() 60 | if not line: break 61 | c,g,rate = map(float,last_line.split()) 62 | 63 | print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate)) 64 | 65 | cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file) 66 | print('Training...') 67 | Popen(cmd, shell = True, stdout = PIPE).communicate() 68 | 69 | print('Output model: {0}'.format(model_file)) 70 | if len(sys.argv) > 2: 71 | cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file) 72 | print('Scaling testing data...') 73 | Popen(cmd, shell = True, stdout = PIPE).communicate() 74 | 75 | cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file) 76 | print('Testing...') 77 | Popen(cmd, shell = True).communicate() 78 | 79 | print('Output prediction: {0}'.format(predict_test_file)) 80 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/tools/subset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, math, random 4 | from collections import defaultdict 5 | 6 | if sys.version_info[0] >= 3: 7 | xrange = range 8 | 9 | def exit_with_help(argv): 10 | print("""\ 11 | Usage: {0} [options] dataset subset_size [output1] [output2] 12 | 13 | This script randomly selects a subset of the dataset. 14 | 15 | options: 16 | -s method : method of selection (default 0) 17 | 0 -- stratified selection (classification only) 18 | 1 -- random selection 19 | 20 | output1 : the subset (optional) 21 | output2 : rest of the data (optional) 22 | If output1 is omitted, the subset will be printed on the screen.""".format(argv[0])) 23 | exit(1) 24 | 25 | def process_options(argv): 26 | argc = len(argv) 27 | if argc < 3: 28 | exit_with_help(argv) 29 | 30 | # default method is stratified selection 31 | method = 0 32 | subset_file = sys.stdout 33 | rest_file = None 34 | 35 | i = 1 36 | while i < argc: 37 | if argv[i][0] != "-": 38 | break 39 | if argv[i] == "-s": 40 | i = i + 1 41 | method = int(argv[i]) 42 | if method not in [0,1]: 43 | print("Unknown selection method {0}".format(method)) 44 | exit_with_help(argv) 45 | i = i + 1 46 | 47 | dataset = argv[i] 48 | subset_size = int(argv[i+1]) 49 | if i+2 < argc: 50 | subset_file = open(argv[i+2],'w') 51 | if i+3 < argc: 52 | rest_file = open(argv[i+3],'w') 53 | 54 | return dataset, subset_size, method, subset_file, rest_file 55 | 56 | def random_selection(dataset, subset_size): 57 | l = sum(1 for line in open(dataset,'r')) 58 | return sorted(random.sample(xrange(l), subset_size)) 59 | 60 | def stratified_selection(dataset, subset_size): 61 | labels = [line.split(None,1)[0] for line in open(dataset)] 62 | label_linenums = defaultdict(list) 63 | for i, label in enumerate(labels): 64 | label_linenums[label] += [i] 65 | 66 | l = len(labels) 67 | remaining = subset_size 68 | ret = [] 69 | 70 | # classes with fewer data are sampled first; otherwise 71 | # some rare classes may not be selected 72 | for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])): 73 | linenums = label_linenums[label] 74 | label_size = len(linenums) 75 | # at least one instance per class 76 | s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l))))) 77 | if s == 0: 78 | sys.stderr.write('''\ 79 | Error: failed to have at least one instance per class 80 | 1. You may have regression data. 81 | 2. Your classification data is unbalanced or too small. 82 | Please use -s 1. 83 | ''') 84 | sys.exit(-1) 85 | remaining -= s 86 | ret += [linenums[i] for i in random.sample(xrange(label_size), s)] 87 | return sorted(ret) 88 | 89 | def main(argv=sys.argv): 90 | dataset, subset_size, method, subset_file, rest_file = process_options(argv) 91 | #uncomment the following line to fix the random seed 92 | #random.seed(0) 93 | selected_lines = [] 94 | 95 | if method == 0: 96 | selected_lines = stratified_selection(dataset, subset_size) 97 | elif method == 1: 98 | selected_lines = random_selection(dataset, subset_size) 99 | 100 | #select instances based on selected_lines 101 | dataset = open(dataset,'r') 102 | prev_selected_linenum = -1 103 | for i in xrange(len(selected_lines)): 104 | for cnt in xrange(selected_lines[i]-prev_selected_linenum-1): 105 | line = dataset.readline() 106 | if rest_file: 107 | rest_file.write(line) 108 | subset_file.write(dataset.readline()) 109 | prev_selected_linenum = selected_lines[i] 110 | subset_file.close() 111 | 112 | if rest_file: 113 | for line in dataset: 114 | rest_file.write(line) 115 | rest_file.close() 116 | dataset.close() 117 | 118 | if __name__ == '__main__': 119 | main(sys.argv) 120 | 121 | -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/libsvm.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/libsvm.dll -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/libsvmread.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/libsvmread.mexw64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/libsvmwrite.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/libsvmwrite.mexw64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svm-predict.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svm-predict.exe -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svm-scale.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svm-scale.exe -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svm-toy.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svm-toy.exe -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svm-train.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svm-train.exe -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svmpredict.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svmpredict.mexw64 -------------------------------------------------------------------------------- /tca/toolbox/libsvm-3.20/windows/svmtrain.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tca/toolbox/libsvm-3.20/windows/svmtrain.mexw64 -------------------------------------------------------------------------------- /tradaboost/README: -------------------------------------------------------------------------------- 1 | The code for "Boosting for Transfer Learning" by Wenyuan Dai, Qiang Yang, Gui-Rong Xue, Yong Yu 2 | 3 | TrAdaBoostTrain.m is for training. 4 | TrPredict.m is for prediction 5 | 6 | Please compile the toolbox and addpath to toolbox before using. 7 | addpath('./toolbox/libsvm-weights-3.20/matlab'); 8 | -------------------------------------------------------------------------------- /tradaboost/TrAdaBoostTrain.m: -------------------------------------------------------------------------------- 1 | function [model, beta ] = TrAdaboostTrain(tdX,tdY,tsX,tsY) 2 | %%tdX: features from source domain 3 | %%tdY: labels from source domain 4 | %%tsX: features from target domain 5 | %%tsY: labels from target domain 6 | 7 | tX = [tdX ; tsX]; 8 | tY = [tdY ; tsY]; 9 | n = size(tdY,1); 10 | m = size(tsY,1); 11 | T = 20; 12 | w = ones(m+n,1); 13 | model = cell(1,T); 14 | beta = zeros(1,T); 15 | for t = 1:T 16 | %p = w./(sum(abs(w))); 17 | model{t} = svmtrain(w,tY,tX,'-t 2'); 18 | predict = svmpredict(tY,tX,model{t}); 19 | sW = sum(w(n+1:m+n)); 20 | et = sum(w(n+1:m+n).*(predict(n+1:m+n)~=tsY)/sW); 21 | if et >= 0.5 22 | et = 0.499; 23 | elseif et == 0 24 | et = 0.001; 25 | end 26 | bT = et/(1-et); 27 | beta(t) =bT; 28 | b = 1/(1+sqrt(2*log(n/T))); 29 | wUpdate = [(b*ones(n,1)).^(predict(1:n)~=tdY) ; (bT*ones(m,1)).^(-(predict(n+1:m+n)~=tsY)) ]; 30 | w = w.*wUpdate; 31 | end 32 | end 33 | 34 | -------------------------------------------------------------------------------- /tradaboost/TrPredict.m: -------------------------------------------------------------------------------- 1 | function Ydash = TrPredict(X, svmmodels, beta) 2 | % X: features of test data 3 | <<<<<<< HEAD 4 | 5 | ======= 6 | 7 | >>>>>>> origin/master 8 | N = length(svmmodels); 9 | start = ceil(N/2); 10 | l = size(X,1); 11 | yOne = ones(l,1); 12 | yTwo = ones(l,1); 13 | Ydash = ones(l,1); 14 | for i = start:N 15 | predict = svmpredict(yOne,X,svmmodels{i}); 16 | %predict = predict == 1; 17 | yOne = yOne.*((beta(i)*ones(l,1)).^(-predict)); 18 | yTwo = yTwo.*((beta(i)*ones(l,1)).^(-0.5)); 19 | end 20 | Ydash(yOne < yTwo) = -1; 21 | end 22 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/COPYRIGHT: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2000-2014 Chih-Chung Chang and Chih-Jen Lin 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | 3. Neither name of copyright holders nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR 25 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 28 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/Makefile: -------------------------------------------------------------------------------- 1 | CXX ?= g++ 2 | CFLAGS = -Wall -Wconversion -O3 -fPIC 3 | SHVER = 2 4 | OS = $(shell uname) 5 | 6 | all: svm-train svm-predict svm-scale 7 | 8 | lib: svm.o 9 | if [ "$(OS)" = "Darwin" ]; then \ 10 | SHARED_LIB_FLAG="-dynamiclib -Wl,-install_name,libsvm.so.$(SHVER)"; \ 11 | else \ 12 | SHARED_LIB_FLAG="-shared -Wl,-soname,libsvm.so.$(SHVER)"; \ 13 | fi; \ 14 | $(CXX) $${SHARED_LIB_FLAG} svm.o -o libsvm.so.$(SHVER) 15 | 16 | svm-predict: svm-predict.c svm.o 17 | $(CXX) $(CFLAGS) svm-predict.c svm.o -o svm-predict -lm 18 | svm-train: svm-train.c svm.o 19 | $(CXX) $(CFLAGS) svm-train.c svm.o -o svm-train -lm 20 | svm-scale: svm-scale.c 21 | $(CXX) $(CFLAGS) svm-scale.c -o svm-scale 22 | svm.o: svm.cpp svm.h 23 | $(CXX) $(CFLAGS) -c svm.cpp 24 | clean: 25 | rm -f *~ svm.o svm-train svm-predict svm-scale libsvm.so.$(SHVER) 26 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/Makefile.win: -------------------------------------------------------------------------------- 1 | #You must ensure nmake.exe, cl.exe, link.exe are in system path. 2 | #VCVARS32.bat 3 | #Under dosbox prompt 4 | #nmake -f Makefile.win 5 | 6 | ########################################## 7 | CXX = cl.exe 8 | CFLAGS = /nologo /O2 /EHsc /I. /D _WIN32 /D _CRT_SECURE_NO_DEPRECATE 9 | TARGET = windows 10 | 11 | all: $(TARGET)\svm-train.exe $(TARGET)\svm-predict.exe $(TARGET)\svm-scale.exe $(TARGET)\svm-toy.exe lib 12 | 13 | $(TARGET)\svm-predict.exe: svm.h svm-predict.c svm.obj 14 | $(CXX) $(CFLAGS) svm-predict.c svm.obj -Fe$(TARGET)\svm-predict.exe 15 | 16 | $(TARGET)\svm-train.exe: svm.h svm-train.c svm.obj 17 | $(CXX) $(CFLAGS) svm-train.c svm.obj -Fe$(TARGET)\svm-train.exe 18 | 19 | $(TARGET)\svm-scale.exe: svm.h svm-scale.c 20 | $(CXX) $(CFLAGS) svm-scale.c -Fe$(TARGET)\svm-scale.exe 21 | 22 | $(TARGET)\svm-toy.exe: svm.h svm.obj svm-toy\windows\svm-toy.cpp 23 | $(CXX) $(CFLAGS) svm-toy\windows\svm-toy.cpp svm.obj user32.lib gdi32.lib comdlg32.lib -Fe$(TARGET)\svm-toy.exe 24 | 25 | svm.obj: svm.cpp svm.h 26 | $(CXX) $(CFLAGS) -c svm.cpp 27 | 28 | lib: svm.cpp svm.h svm.def 29 | $(CXX) $(CFLAGS) -LD svm.cpp -Fe$(TARGET)\libsvm -link -DEF:svm.def 30 | 31 | clean: 32 | -erase /Q *.obj $(TARGET)\. 33 | 34 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/READMEweight: -------------------------------------------------------------------------------- 1 | Usage: 2 | use '-W weight_file' to assign weights for each instance. 3 | Please make sure all weights are non-negative. 4 | 5 | Example: 6 | $ ./svm-train -W heart_scale.wgt heart_scale 7 | 8 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/heart_scale.wgt: -------------------------------------------------------------------------------- 1 | 20 2 | 10 3 | 5.5 4 | 1 5 | 1 6 | 1 7 | 1 8 | 1 9 | 1 10 | 1 11 | 1 12 | 1 13 | 1 14 | 1 15 | 1 16 | 1 17 | 1 18 | 1 19 | 1 20 | 1 21 | 1 22 | 1 23 | 1 24 | 1 25 | 1 26 | 1 27 | 1 28 | 1 29 | 1 30 | 1 31 | 1 32 | 1 33 | 1 34 | 1 35 | 1 36 | 1 37 | 1 38 | 1 39 | 1 40 | 1 41 | 1 42 | 1 43 | 1 44 | 1 45 | 1 46 | 1 47 | 1 48 | 1 49 | 1 50 | 1 51 | 1 52 | 1 53 | 1 54 | 1 55 | 1 56 | 1 57 | 1 58 | 1 59 | 1 60 | 1 61 | 1 62 | 1 63 | 1 64 | 1 65 | 1 66 | 1 67 | 1 68 | 1 69 | 1 70 | 1 71 | 1 72 | 1 73 | 1 74 | 1 75 | 1 76 | 1 77 | 1 78 | 1 79 | 1 80 | 1 81 | 1 82 | 1 83 | 1 84 | 1 85 | 1 86 | 1 87 | 1 88 | 1 89 | 1 90 | 1 91 | 1 92 | 1 93 | 1 94 | 1 95 | 1 96 | 1 97 | 1 98 | 1 99 | 1 100 | 1 101 | 1 102 | 1 103 | 1 104 | 1 105 | 1 106 | 1 107 | 1 108 | 1 109 | 1 110 | 1 111 | 1 112 | 1 113 | 1 114 | 1 115 | 1 116 | 1 117 | 1 118 | 1 119 | 1 120 | 1 121 | 1 122 | 1 123 | 1 124 | 1 125 | 1 126 | 1 127 | 1 128 | 1 129 | 1 130 | 1 131 | 1 132 | 1 133 | 1 134 | 1 135 | 1 136 | 1 137 | 1 138 | 1 139 | 1 140 | 1 141 | 1 142 | 1 143 | 1 144 | 1 145 | 1 146 | 1 147 | 1 148 | 1 149 | 1 150 | 1 151 | 1 152 | 1 153 | 1 154 | 1 155 | 1 156 | 1 157 | 1 158 | 1 159 | 1 160 | 1 161 | 1 162 | 1 163 | 1 164 | 1 165 | 1 166 | 1 167 | 1 168 | 1 169 | 1 170 | 1 171 | 1 172 | 1 173 | 1 174 | 1 175 | 1 176 | 1 177 | 1 178 | 1 179 | 1 180 | 1 181 | 1 182 | 1 183 | 1 184 | 1 185 | 1 186 | 1 187 | 1 188 | 1 189 | 1 190 | 1 191 | 1 192 | 1 193 | 1 194 | 1 195 | 1 196 | 1 197 | 1 198 | 1 199 | 1 200 | 1 201 | 1 202 | 1 203 | 1 204 | 1 205 | 1 206 | 1 207 | 1 208 | 1 209 | 1 210 | 1 211 | 1 212 | 1 213 | 1 214 | 1 215 | 1 216 | 1 217 | 1 218 | 1 219 | 1 220 | 1 221 | 1 222 | 1 223 | 1 224 | 1 225 | 1 226 | 1 227 | 1 228 | 1 229 | 1 230 | 1 231 | 1 232 | 1 233 | 1 234 | 1 235 | 1 236 | 1 237 | 1 238 | 1 239 | 1 240 | 1 241 | 1 242 | 1 243 | 1 244 | 1 245 | 1 246 | 1 247 | 1 248 | 1 249 | 1 250 | 1 251 | 1 252 | 1 253 | 1 254 | 1 255 | 1 256 | 1 257 | 1 258 | 1 259 | 1 260 | 1 261 | 1 262 | 1 263 | 1 264 | 1 265 | 1 266 | 1 267 | 1 268 | 1 269 | 1 270 | 1 271 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile is used under Linux 2 | 3 | MATLABDIR ?= /usr/local/matlab 4 | # for Mac 5 | # MATLABDIR ?= /opt/local/matlab 6 | 7 | CXX ?= g++ 8 | #CXX = g++-4.1 9 | CFLAGS = -Wall -Wconversion -O3 -fPIC -I$(MATLABDIR)/extern/include -I.. 10 | 11 | MEX = $(MATLABDIR)/bin/mex 12 | MEX_OPTION = CC="$(CXX)" CXX="$(CXX)" CFLAGS="$(CFLAGS)" CXXFLAGS="$(CFLAGS)" 13 | # comment the following line if you use MATLAB on 32-bit computer 14 | MEX_OPTION += -largeArrayDims 15 | MEX_EXT = $(shell $(MATLABDIR)/bin/mexext) 16 | 17 | all: matlab 18 | 19 | matlab: binary 20 | 21 | octave: 22 | @echo "please type make under Octave" 23 | 24 | binary: svmpredict.$(MEX_EXT) svmtrain.$(MEX_EXT) libsvmread.$(MEX_EXT) libsvmwrite.$(MEX_EXT) 25 | 26 | svmpredict.$(MEX_EXT): svmpredict.c ../svm.h ../svm.o svm_model_matlab.o 27 | $(MEX) $(MEX_OPTION) svmpredict.c ../svm.o svm_model_matlab.o 28 | 29 | svmtrain.$(MEX_EXT): svmtrain.c ../svm.h ../svm.o svm_model_matlab.o 30 | $(MEX) $(MEX_OPTION) svmtrain.c ../svm.o svm_model_matlab.o 31 | 32 | libsvmread.$(MEX_EXT): libsvmread.c 33 | $(MEX) $(MEX_OPTION) libsvmread.c 34 | 35 | libsvmwrite.$(MEX_EXT): libsvmwrite.c 36 | $(MEX) $(MEX_OPTION) libsvmwrite.c 37 | 38 | svm_model_matlab.o: svm_model_matlab.c ../svm.h 39 | $(CXX) $(CFLAGS) -c svm_model_matlab.c 40 | 41 | ../svm.o: ../svm.cpp ../svm.h 42 | make -C .. svm.o 43 | 44 | clean: 45 | rm -f *~ *.o *.mex* *.obj ../svm.o 46 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/READMEweight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a simple interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for the detail. 10 | 11 | Usage 12 | ===== 13 | 14 | matlab> model = svmtrain(training_weight_vector, training_label_vector, training_instance_matrix, 'libsvm_options') 15 | 16 | -training_weight_vector: 17 | An m by 1 vector of training weights. (type must be double) 18 | -training_label_vector: 19 | An m by 1 vector of training labels. (type must be double) 20 | -training_instance_matrix: 21 | An m by n matrix of m training instances with n features. (type must be double) 22 | -libsvm_options: 23 | A string of training options in the same format as that of LIBSVM. 24 | 25 | Examples 26 | ======== 27 | 28 | Train and test on the provided data heart_scale: 29 | 30 | matlab> [heart_scale_label, heart_scale_inst] = libsvmread('../heart_scale'); 31 | matlab> heart_scale_weight = load('../heart_scale.wgt'); 32 | matlab> model = svmtrain(heart_scale_weight, heart_scale_label, heart_scale_inst, '-c 1'); 33 | matlab> [predict_label, accuracy, dec_values] = svmpredict(heart_scale_label, heart_scale_inst, model); % test the training data 34 | 35 | Train and test without weights: 36 | 37 | matlab> model = svmtrain([], heart_scale_label, heart_scale_inst, '-c 1'); 38 | 39 | 40 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "mex.h" 8 | 9 | #ifdef MX_API_VER 10 | #if MX_API_VER < 0x07030000 11 | typedef int mwIndex; 12 | #endif 13 | #endif 14 | #ifndef max 15 | #define max(x,y) (((x)>(y))?(x):(y)) 16 | #endif 17 | #ifndef min 18 | #define min(x,y) (((x)<(y))?(x):(y)) 19 | #endif 20 | 21 | void exit_with_help() 22 | { 23 | mexPrintf( 24 | "Usage: [label_vector, instance_matrix] = libsvmread('filename');\n" 25 | ); 26 | } 27 | 28 | static void fake_answer(int nlhs, mxArray *plhs[]) 29 | { 30 | int i; 31 | for(i=0;i start from 0 86 | strtok(line," \t"); // label 87 | while (1) 88 | { 89 | idx = strtok(NULL,":"); // index:value 90 | val = strtok(NULL," \t"); 91 | if(val == NULL) 92 | break; 93 | 94 | errno = 0; 95 | index = (int) strtol(idx,&endptr,10); 96 | if(endptr == idx || errno != 0 || *endptr != '\0' || index <= inst_max_index) 97 | { 98 | mexPrintf("Wrong input format at line %d\n",l+1); 99 | fake_answer(nlhs, plhs); 100 | return; 101 | } 102 | else 103 | inst_max_index = index; 104 | 105 | min_index = min(min_index, index); 106 | elements++; 107 | } 108 | max_index = max(max_index, inst_max_index); 109 | l++; 110 | } 111 | rewind(fp); 112 | 113 | // y 114 | plhs[0] = mxCreateDoubleMatrix(l, 1, mxREAL); 115 | // x^T 116 | if (min_index <= 0) 117 | plhs[1] = mxCreateSparse(max_index-min_index+1, l, elements, mxREAL); 118 | else 119 | plhs[1] = mxCreateSparse(max_index, l, elements, mxREAL); 120 | 121 | labels = mxGetPr(plhs[0]); 122 | samples = mxGetPr(plhs[1]); 123 | ir = mxGetIr(plhs[1]); 124 | jc = mxGetJc(plhs[1]); 125 | 126 | k=0; 127 | for(i=0;i start from 0 158 | 159 | errno = 0; 160 | samples[k] = strtod(val,&endptr); 161 | if (endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 162 | { 163 | mexPrintf("Wrong input format at line %d\n",i+1); 164 | fake_answer(nlhs, plhs); 165 | return; 166 | } 167 | ++k; 168 | } 169 | } 170 | jc[l] = k; 171 | 172 | fclose(fp); 173 | free(line); 174 | 175 | { 176 | mxArray *rhs[1], *lhs[1]; 177 | rhs[0] = plhs[1]; 178 | if(mexCallMATLAB(1, lhs, 1, rhs, "transpose")) 179 | { 180 | mexPrintf("Error: cannot transpose problem\n"); 181 | fake_answer(nlhs, plhs); 182 | return; 183 | } 184 | plhs[1] = lhs[0]; 185 | } 186 | } 187 | 188 | void mexFunction( int nlhs, mxArray *plhs[], 189 | int nrhs, const mxArray *prhs[] ) 190 | { 191 | char filename[256]; 192 | 193 | if(nrhs != 1 || nlhs != 2) 194 | { 195 | exit_with_help(); 196 | fake_answer(nlhs, plhs); 197 | return; 198 | } 199 | 200 | mxGetString(prhs[0], filename, mxGetN(prhs[0]) + 1); 201 | 202 | if(filename == NULL) 203 | { 204 | mexPrintf("Error: filename is NULL\n"); 205 | return; 206 | } 207 | 208 | read_problem(filename, nlhs, plhs); 209 | 210 | return; 211 | } 212 | 213 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmread.mexmaci64 -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "mex.h" 6 | 7 | #ifdef MX_API_VER 8 | #if MX_API_VER < 0x07030000 9 | typedef int mwIndex; 10 | #endif 11 | #endif 12 | 13 | void exit_with_help() 14 | { 15 | mexPrintf( 16 | "Usage: libsvmwrite('filename', label_vector, instance_matrix);\n" 17 | ); 18 | } 19 | 20 | static void fake_answer(int nlhs, mxArray *plhs[]) 21 | { 22 | int i; 23 | for(i=0;i 0) 89 | { 90 | exit_with_help(); 91 | fake_answer(nlhs, plhs); 92 | return; 93 | } 94 | 95 | // Transform the input Matrix to libsvm format 96 | if(nrhs == 3) 97 | { 98 | char filename[256]; 99 | if(!mxIsDouble(prhs[1]) || !mxIsDouble(prhs[2])) 100 | { 101 | mexPrintf("Error: label vector and instance matrix must be double\n"); 102 | return; 103 | } 104 | 105 | mxGetString(prhs[0], filename, mxGetN(prhs[0])+1); 106 | 107 | if(mxIsSparse(prhs[2])) 108 | libsvmwrite(filename, prhs[1], prhs[2]); 109 | else 110 | { 111 | mexPrintf("Instance_matrix must be sparse\n"); 112 | return; 113 | } 114 | } 115 | else 116 | { 117 | exit_with_help(); 118 | return; 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tradaboost/toolbox/libsvm-weights-3.20/matlab/libsvmwrite.mexmaci64 -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/make.m: -------------------------------------------------------------------------------- 1 | % This make.m is for MATLAB and OCTAVE under Windows, Mac, and Unix 2 | 3 | try 4 | Type = ver; 5 | % This part is for OCTAVE 6 | if(strcmp(Type(1).Name, 'Octave') == 1) 7 | mex libsvmread.c 8 | mex libsvmwrite.c 9 | mex svmtrain.c ../svm.cpp svm_model_matlab.c 10 | mex svmpredict.c ../svm.cpp svm_model_matlab.c 11 | % This part is for MATLAB 12 | % Add -largeArrayDims on 64-bit machines of MATLAB 13 | else 14 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmread.c 15 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims libsvmwrite.c 16 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmtrain.c ../svm.cpp svm_model_matlab.c 17 | mex CFLAGS="\$CFLAGS -std=c99" -largeArrayDims svmpredict.c ../svm.cpp svm_model_matlab.c 18 | end 19 | catch 20 | fprintf('If make.m fails, please check README about detailed instructions.\n'); 21 | end 22 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/svm_model_matlab.h: -------------------------------------------------------------------------------- 1 | const char *model_to_matlab_structure(mxArray *plhs[], int num_of_feature, struct svm_model *model); 2 | struct svm_model *matlab_matrix_to_model(const mxArray *matlab_struct, const char **error_message); 3 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tradaboost/toolbox/libsvm-weights-3.20/matlab/svmpredict.mexmaci64 -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinZhineng/transfer-learning/98ab225b4e8202786023e78bbc71111f9bc1a3a8/tradaboost/toolbox/libsvm-weights-3.20/matlab/svmtrain.mexmaci64 -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/python/Makefile: -------------------------------------------------------------------------------- 1 | all = lib 2 | 3 | lib: 4 | make -C .. lib 5 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/python/README.weight: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This tool provides a Python interface to LIBSVM with instance weight support 5 | 6 | Installation 7 | ============ 8 | 9 | Please check README for detail. 10 | 11 | USAGE 12 | ===== 13 | 14 | The usage is bascally the same as the version without supporting 15 | instance weights. We only show differences below. 16 | 17 | - Function: svm_train 18 | 19 | There are three ways to call svm_train() 20 | 21 | >>> model = svm_train(W, y, x [, 'training_options']) 22 | >>> model = svm_train(prob [, 'training_options']) 23 | >>> model = svm_train(prob, param) 24 | 25 | W: a list/tuple of l training weights (type must be double). 26 | Use [] if no weights. 27 | 28 | y: a list/tuple of l training labels (type must be int/double). 29 | 30 | x: a list/tuple of l training instances. The feature vector of 31 | each training instance is an instance of list/tuple or dictionary. 32 | 33 | training_options: a string in the same form as that for LIBSVM command 34 | mode. 35 | 36 | prob: an svm_problem instance generated by calling 37 | svm_problem(W, y, x). 38 | 39 | param: an svm_parameter instance generated by calling 40 | svm_parameter('training_options') 41 | 42 | model: the returned svm_model instance. See svm.h for details of this 43 | structure. If '-v' is specified, cross validation is 44 | conducted and the returned model is just a scalar: cross-validation 45 | accuracy for classification and mean-squared error for regression. 46 | 47 | To train the same data many times with different 48 | parameters, the second and the third ways should be faster.. 49 | 50 | Examples: 51 | 52 | >>> y, x = svm_read_problem('../heart_scale') 53 | >>> W = [1] * len(y) 54 | >>> W[0] = 10 55 | >>> prob = svm_problem(W, y, x) 56 | >>> param = svm_parameter('-s 3 -c 5 -h 0') 57 | >>> m = svm_train([], y, x, '-c 5') 58 | >>> m = svm_train(W, y, x) 59 | >>> m = svm_train(prob, '-t 2 -c 5') 60 | >>> m = svm_train(prob, param) 61 | >>> CV_ACC = svm_train(W, y, x, '-v 3') 62 | 63 | 64 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/svm-predict.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "svm.h" 7 | 8 | int print_null(const char *s,...) {return 0;} 9 | 10 | static int (*info)(const char *fmt,...) = &printf; 11 | 12 | struct svm_node *x; 13 | int max_nr_attr = 64; 14 | 15 | struct svm_model* model; 16 | int predict_probability=0; 17 | 18 | static char *line = NULL; 19 | static int max_line_len; 20 | 21 | static char* readline(FILE *input) 22 | { 23 | int len; 24 | 25 | if(fgets(line,max_line_len,input) == NULL) 26 | return NULL; 27 | 28 | while(strrchr(line,'\n') == NULL) 29 | { 30 | max_line_len *= 2; 31 | line = (char *) realloc(line,max_line_len); 32 | len = (int) strlen(line); 33 | if(fgets(line+len,max_line_len-len,input) == NULL) 34 | break; 35 | } 36 | return line; 37 | } 38 | 39 | void exit_input_error(int line_num) 40 | { 41 | fprintf(stderr,"Wrong input format at line %d\n", line_num); 42 | exit(1); 43 | } 44 | 45 | void predict(FILE *input, FILE *output) 46 | { 47 | int correct = 0; 48 | int total = 0; 49 | double error = 0; 50 | double sump = 0, sumt = 0, sumpp = 0, sumtt = 0, sumpt = 0; 51 | 52 | int svm_type=svm_get_svm_type(model); 53 | int nr_class=svm_get_nr_class(model); 54 | double *prob_estimates=NULL; 55 | int j; 56 | 57 | if(predict_probability) 58 | { 59 | if (svm_type==NU_SVR || svm_type==EPSILON_SVR) 60 | info("Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g\n",svm_get_svr_probability(model)); 61 | else 62 | { 63 | int *labels=(int *) malloc(nr_class*sizeof(int)); 64 | svm_get_labels(model,labels); 65 | prob_estimates = (double *) malloc(nr_class*sizeof(double)); 66 | fprintf(output,"labels"); 67 | for(j=0;j start from 0 82 | 83 | label = strtok(line," \t\n"); 84 | if(label == NULL) // empty line 85 | exit_input_error(total+1); 86 | 87 | target_label = strtod(label,&endptr); 88 | if(endptr == label || *endptr != '\0') 89 | exit_input_error(total+1); 90 | 91 | while(1) 92 | { 93 | if(i>=max_nr_attr-1) // need one more for index = -1 94 | { 95 | max_nr_attr *= 2; 96 | x = (struct svm_node *) realloc(x,max_nr_attr*sizeof(struct svm_node)); 97 | } 98 | 99 | idx = strtok(NULL,":"); 100 | val = strtok(NULL," \t"); 101 | 102 | if(val == NULL) 103 | break; 104 | errno = 0; 105 | x[i].index = (int) strtol(idx,&endptr,10); 106 | if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index) 107 | exit_input_error(total+1); 108 | else 109 | inst_max_index = x[i].index; 110 | 111 | errno = 0; 112 | x[i].value = strtod(val,&endptr); 113 | if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr))) 114 | exit_input_error(total+1); 115 | 116 | ++i; 117 | } 118 | x[i].index = -1; 119 | 120 | if (predict_probability && (svm_type==C_SVC || svm_type==NU_SVC)) 121 | { 122 | predict_label = svm_predict_probability(model,x,prob_estimates); 123 | fprintf(output,"%g",predict_label); 124 | for(j=0;j=argc-2) 195 | exit_with_help(); 196 | 197 | input = fopen(argv[i],"r"); 198 | if(input == NULL) 199 | { 200 | fprintf(stderr,"can't open input file %s\n",argv[i]); 201 | exit(1); 202 | } 203 | 204 | output = fopen(argv[i+2],"w"); 205 | if(output == NULL) 206 | { 207 | fprintf(stderr,"can't open output file %s\n",argv[i+2]); 208 | exit(1); 209 | } 210 | 211 | if((model=svm_load_model(argv[i+1]))==0) 212 | { 213 | fprintf(stderr,"can't open model file %s\n",argv[i+1]); 214 | exit(1); 215 | } 216 | 217 | x = (struct svm_node *) malloc(max_nr_attr*sizeof(struct svm_node)); 218 | if(predict_probability) 219 | { 220 | if(svm_check_probability_model(model)==0) 221 | { 222 | fprintf(stderr,"Model does not support probabiliy estimates\n"); 223 | exit(1); 224 | } 225 | } 226 | else 227 | { 228 | if(svm_check_probability_model(model)!=0) 229 | info("Model supports probability estimates, but disabled in prediction.\n"); 230 | } 231 | 232 | predict(input,output); 233 | svm_free_and_destroy_model(&model); 234 | free(x); 235 | free(line); 236 | fclose(input); 237 | fclose(output); 238 | return 0; 239 | } 240 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/svm.def: -------------------------------------------------------------------------------- 1 | LIBRARY libsvm 2 | EXPORTS 3 | svm_train @1 4 | svm_cross_validation @2 5 | svm_save_model @3 6 | svm_load_model @4 7 | svm_get_svm_type @5 8 | svm_get_nr_class @6 9 | svm_get_labels @7 10 | svm_get_svr_probability @8 11 | svm_predict_values @9 12 | svm_predict @10 13 | svm_predict_probability @11 14 | svm_free_model_content @12 15 | svm_free_and_destroy_model @13 16 | svm_destroy_param @14 17 | svm_check_parameter @15 18 | svm_check_probability_model @16 19 | svm_set_print_string_function @17 20 | svm_get_sv_indices @18 21 | svm_get_nr_sv @19 22 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/svm.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIBSVM_H 2 | #define _LIBSVM_H 3 | 4 | #define LIBSVM_VERSION 320 5 | 6 | #ifdef __cplusplus 7 | extern "C" { 8 | #endif 9 | 10 | extern int libsvm_version; 11 | 12 | struct svm_node 13 | { 14 | int index; 15 | double value; 16 | }; 17 | 18 | struct svm_problem 19 | { 20 | int l; 21 | double *y; 22 | struct svm_node **x; 23 | double *W; /* instance weight */ 24 | }; 25 | 26 | enum { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR }; /* svm_type */ 27 | enum { LINEAR, POLY, RBF, SIGMOID, PRECOMPUTED }; /* kernel_type */ 28 | 29 | struct svm_parameter 30 | { 31 | int svm_type; 32 | int kernel_type; 33 | int degree; /* for poly */ 34 | double gamma; /* for poly/rbf/sigmoid */ 35 | double coef0; /* for poly/sigmoid */ 36 | 37 | /* these are for training only */ 38 | double cache_size; /* in MB */ 39 | double eps; /* stopping criteria */ 40 | double C; /* for C_SVC, EPSILON_SVR and NU_SVR */ 41 | int nr_weight; /* for C_SVC */ 42 | int *weight_label; /* for C_SVC */ 43 | double* weight; /* for C_SVC */ 44 | double nu; /* for NU_SVC, ONE_CLASS, and NU_SVR */ 45 | double p; /* for EPSILON_SVR */ 46 | int shrinking; /* use the shrinking heuristics */ 47 | int probability; /* do probability estimates */ 48 | }; 49 | 50 | // 51 | // svm_model 52 | // 53 | struct svm_model 54 | { 55 | struct svm_parameter param; /* parameter */ 56 | int nr_class; /* number of classes, = 2 in regression/one class svm */ 57 | int l; /* total #SV */ 58 | struct svm_node **SV; /* SVs (SV[l]) */ 59 | double **sv_coef; /* coefficients for SVs in decision functions (sv_coef[k-1][l]) */ 60 | double *rho; /* constants in decision functions (rho[k*(k-1)/2]) */ 61 | double *probA; /* pariwise probability information */ 62 | double *probB; 63 | int *sv_indices; /* sv_indices[0,...,nSV-1] are values in [1,...,num_traning_data] to indicate SVs in the training set */ 64 | 65 | /* for classification only */ 66 | 67 | int *label; /* label of each class (label[k]) */ 68 | int *nSV; /* number of SVs for each class (nSV[k]) */ 69 | /* nSV[0] + nSV[1] + ... + nSV[k-1] = l */ 70 | /* XXX */ 71 | int free_sv; /* 1 if svm_model is created by svm_load_model*/ 72 | /* 0 if svm_model is created by svm_train */ 73 | }; 74 | 75 | struct svm_model *svm_train(const struct svm_problem *prob, const struct svm_parameter *param); 76 | void svm_cross_validation(const struct svm_problem *prob, const struct svm_parameter *param, int nr_fold, double *target); 77 | 78 | int svm_save_model(const char *model_file_name, const struct svm_model *model); 79 | struct svm_model *svm_load_model(const char *model_file_name); 80 | 81 | int svm_get_svm_type(const struct svm_model *model); 82 | int svm_get_nr_class(const struct svm_model *model); 83 | void svm_get_labels(const struct svm_model *model, int *label); 84 | void svm_get_sv_indices(const struct svm_model *model, int *sv_indices); 85 | int svm_get_nr_sv(const struct svm_model *model); 86 | double svm_get_svr_probability(const struct svm_model *model); 87 | 88 | double svm_predict_values(const struct svm_model *model, const struct svm_node *x, double* dec_values); 89 | double svm_predict(const struct svm_model *model, const struct svm_node *x); 90 | double svm_predict_probability(const struct svm_model *model, const struct svm_node *x, double* prob_estimates); 91 | 92 | void svm_free_model_content(struct svm_model *model_ptr); 93 | void svm_free_and_destroy_model(struct svm_model **model_ptr_ptr); 94 | void svm_destroy_param(struct svm_parameter *param); 95 | 96 | const char *svm_check_parameter(const struct svm_problem *prob, const struct svm_parameter *param); 97 | int svm_check_probability_model(const struct svm_model *model); 98 | 99 | void svm_set_print_string_function(void (*print_func)(const char *)); 100 | 101 | #ifdef __cplusplus 102 | } 103 | #endif 104 | 105 | #endif /* _LIBSVM_H */ 106 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/tools/checkdata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # 4 | # A format checker for LIBSVM 5 | # 6 | 7 | # 8 | # Copyright (c) 2007, Rong-En Fan 9 | # 10 | # All rights reserved. 11 | # 12 | # This program is distributed under the same license of the LIBSVM package. 13 | # 14 | 15 | from sys import argv, exit 16 | import os.path 17 | 18 | def err(line_no, msg): 19 | print("line {0}: {1}".format(line_no, msg)) 20 | 21 | # works like float() but does not accept nan and inf 22 | def my_float(x): 23 | if x.lower().find("nan") != -1 or x.lower().find("inf") != -1: 24 | raise ValueError 25 | 26 | return float(x) 27 | 28 | def main(): 29 | if len(argv) != 2: 30 | print("Usage: {0} dataset".format(argv[0])) 31 | exit(1) 32 | 33 | dataset = argv[1] 34 | 35 | if not os.path.exists(dataset): 36 | print("dataset {0} not found".format(dataset)) 37 | exit(1) 38 | 39 | line_no = 1 40 | error_line_count = 0 41 | for line in open(dataset, 'r'): 42 | line_error = False 43 | 44 | # each line must end with a newline character 45 | if line[-1] != '\n': 46 | err(line_no, "missing a newline character in the end") 47 | line_error = True 48 | 49 | nodes = line.split() 50 | 51 | # check label 52 | try: 53 | label = nodes.pop(0) 54 | 55 | if label.find(',') != -1: 56 | # multi-label format 57 | try: 58 | for l in label.split(','): 59 | l = my_float(l) 60 | except: 61 | err(line_no, "label {0} is not a valid multi-label form".format(label)) 62 | line_error = True 63 | else: 64 | try: 65 | label = my_float(label) 66 | except: 67 | err(line_no, "label {0} is not a number".format(label)) 68 | line_error = True 69 | except: 70 | err(line_no, "missing label, perhaps an empty line?") 71 | line_error = True 72 | 73 | # check features 74 | prev_index = -1 75 | for i in range(len(nodes)): 76 | try: 77 | (index, value) = nodes[i].split(':') 78 | 79 | index = int(index) 80 | value = my_float(value) 81 | 82 | # precomputed kernel's index starts from 0 and LIBSVM 83 | # checks it. Hence, don't treat index 0 as an error. 84 | if index < 0: 85 | err(line_no, "feature index must be positive; wrong feature {0}".format(nodes[i])) 86 | line_error = True 87 | elif index <= prev_index: 88 | err(line_no, "feature indices must be in an ascending order, previous/current features {0} {1}".format(nodes[i-1], nodes[i])) 89 | line_error = True 90 | prev_index = index 91 | except: 92 | err(line_no, "feature '{0}' not an : pair, integer, real number ".format(nodes[i])) 93 | line_error = True 94 | 95 | line_no += 1 96 | 97 | if line_error: 98 | error_line_count += 1 99 | 100 | if error_line_count > 0: 101 | print("Found {0} lines with error.".format(error_line_count)) 102 | return 1 103 | else: 104 | print("No error.") 105 | return 0 106 | 107 | if __name__ == "__main__": 108 | exit(main()) 109 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/tools/easy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | from subprocess import * 6 | 7 | if len(sys.argv) <= 1: 8 | print('Usage: {0} training_file [testing_file]'.format(sys.argv[0])) 9 | raise SystemExit 10 | 11 | # svm, grid, and gnuplot executable files 12 | 13 | is_win32 = (sys.platform == 'win32') 14 | if not is_win32: 15 | svmscale_exe = "../svm-scale" 16 | svmtrain_exe = "../svm-train" 17 | svmpredict_exe = "../svm-predict" 18 | grid_py = "./grid.py" 19 | gnuplot_exe = "/usr/bin/gnuplot" 20 | else: 21 | # example for windows 22 | svmscale_exe = r"..\windows\svm-scale.exe" 23 | svmtrain_exe = r"..\windows\svm-train.exe" 24 | svmpredict_exe = r"..\windows\svm-predict.exe" 25 | gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe" 26 | grid_py = r".\grid.py" 27 | 28 | assert os.path.exists(svmscale_exe),"svm-scale executable not found" 29 | assert os.path.exists(svmtrain_exe),"svm-train executable not found" 30 | assert os.path.exists(svmpredict_exe),"svm-predict executable not found" 31 | assert os.path.exists(gnuplot_exe),"gnuplot executable not found" 32 | assert os.path.exists(grid_py),"grid.py not found" 33 | 34 | train_pathname = sys.argv[1] 35 | assert os.path.exists(train_pathname),"training file not found" 36 | file_name = os.path.split(train_pathname)[1] 37 | scaled_file = file_name + ".scale" 38 | model_file = file_name + ".model" 39 | range_file = file_name + ".range" 40 | 41 | if len(sys.argv) > 2: 42 | test_pathname = sys.argv[2] 43 | file_name = os.path.split(test_pathname)[1] 44 | assert os.path.exists(test_pathname),"testing file not found" 45 | scaled_test_file = file_name + ".scale" 46 | predict_test_file = file_name + ".predict" 47 | 48 | cmd = '{0} -s "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, train_pathname, scaled_file) 49 | print('Scaling training data...') 50 | Popen(cmd, shell = True, stdout = PIPE).communicate() 51 | 52 | cmd = '{0} -svmtrain "{1}" -gnuplot "{2}" "{3}"'.format(grid_py, svmtrain_exe, gnuplot_exe, scaled_file) 53 | print('Cross validation...') 54 | f = Popen(cmd, shell = True, stdout = PIPE).stdout 55 | 56 | line = '' 57 | while True: 58 | last_line = line 59 | line = f.readline() 60 | if not line: break 61 | c,g,rate = map(float,last_line.split()) 62 | 63 | print('Best c={0}, g={1} CV rate={2}'.format(c,g,rate)) 64 | 65 | cmd = '{0} -c {1} -g {2} "{3}" "{4}"'.format(svmtrain_exe,c,g,scaled_file,model_file) 66 | print('Training...') 67 | Popen(cmd, shell = True, stdout = PIPE).communicate() 68 | 69 | print('Output model: {0}'.format(model_file)) 70 | if len(sys.argv) > 2: 71 | cmd = '{0} -r "{1}" "{2}" > "{3}"'.format(svmscale_exe, range_file, test_pathname, scaled_test_file) 72 | print('Scaling testing data...') 73 | Popen(cmd, shell = True, stdout = PIPE).communicate() 74 | 75 | cmd = '{0} "{1}" "{2}" "{3}"'.format(svmpredict_exe, scaled_test_file, model_file, predict_test_file) 76 | print('Testing...') 77 | Popen(cmd, shell = True).communicate() 78 | 79 | print('Output prediction: {0}'.format(predict_test_file)) 80 | -------------------------------------------------------------------------------- /tradaboost/toolbox/libsvm-weights-3.20/tools/subset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, math, random 4 | from collections import defaultdict 5 | 6 | if sys.version_info[0] >= 3: 7 | xrange = range 8 | 9 | def exit_with_help(argv): 10 | print("""\ 11 | Usage: {0} [options] dataset subset_size [output1] [output2] 12 | 13 | This script randomly selects a subset of the dataset. 14 | 15 | options: 16 | -s method : method of selection (default 0) 17 | 0 -- stratified selection (classification only) 18 | 1 -- random selection 19 | 20 | output1 : the subset (optional) 21 | output2 : rest of the data (optional) 22 | If output1 is omitted, the subset will be printed on the screen.""".format(argv[0])) 23 | exit(1) 24 | 25 | def process_options(argv): 26 | argc = len(argv) 27 | if argc < 3: 28 | exit_with_help(argv) 29 | 30 | # default method is stratified selection 31 | method = 0 32 | subset_file = sys.stdout 33 | rest_file = None 34 | 35 | i = 1 36 | while i < argc: 37 | if argv[i][0] != "-": 38 | break 39 | if argv[i] == "-s": 40 | i = i + 1 41 | method = int(argv[i]) 42 | if method not in [0,1]: 43 | print("Unknown selection method {0}".format(method)) 44 | exit_with_help(argv) 45 | i = i + 1 46 | 47 | dataset = argv[i] 48 | subset_size = int(argv[i+1]) 49 | if i+2 < argc: 50 | subset_file = open(argv[i+2],'w') 51 | if i+3 < argc: 52 | rest_file = open(argv[i+3],'w') 53 | 54 | return dataset, subset_size, method, subset_file, rest_file 55 | 56 | def random_selection(dataset, subset_size): 57 | l = sum(1 for line in open(dataset,'r')) 58 | return sorted(random.sample(xrange(l), subset_size)) 59 | 60 | def stratified_selection(dataset, subset_size): 61 | labels = [line.split(None,1)[0] for line in open(dataset)] 62 | label_linenums = defaultdict(list) 63 | for i, label in enumerate(labels): 64 | label_linenums[label] += [i] 65 | 66 | l = len(labels) 67 | remaining = subset_size 68 | ret = [] 69 | 70 | # classes with fewer data are sampled first; otherwise 71 | # some rare classes may not be selected 72 | for label in sorted(label_linenums, key=lambda x: len(label_linenums[x])): 73 | linenums = label_linenums[label] 74 | label_size = len(linenums) 75 | # at least one instance per class 76 | s = int(min(remaining, max(1, math.ceil(label_size*(float(subset_size)/l))))) 77 | if s == 0: 78 | sys.stderr.write('''\ 79 | Error: failed to have at least one instance per class 80 | 1. You may have regression data. 81 | 2. Your classification data is unbalanced or too small. 82 | Please use -s 1. 83 | ''') 84 | sys.exit(-1) 85 | remaining -= s 86 | ret += [linenums[i] for i in random.sample(xrange(label_size), s)] 87 | return sorted(ret) 88 | 89 | def main(argv=sys.argv): 90 | dataset, subset_size, method, subset_file, rest_file = process_options(argv) 91 | #uncomment the following line to fix the random seed 92 | #random.seed(0) 93 | selected_lines = [] 94 | 95 | if method == 0: 96 | selected_lines = stratified_selection(dataset, subset_size) 97 | elif method == 1: 98 | selected_lines = random_selection(dataset, subset_size) 99 | 100 | #select instances based on selected_lines 101 | dataset = open(dataset,'r') 102 | prev_selected_linenum = -1 103 | for i in xrange(len(selected_lines)): 104 | for cnt in xrange(selected_lines[i]-prev_selected_linenum-1): 105 | line = dataset.readline() 106 | if rest_file: 107 | rest_file.write(line) 108 | subset_file.write(dataset.readline()) 109 | prev_selected_linenum = selected_lines[i] 110 | subset_file.close() 111 | 112 | if rest_file: 113 | for line in dataset: 114 | rest_file.write(line) 115 | rest_file.close() 116 | dataset.close() 117 | 118 | if __name__ == '__main__': 119 | main(sys.argv) 120 | 121 | --------------------------------------------------------------------------------