├── Src ├── RecalWin.m ├── kyheader.cpp ├── CmShow.h ├── LibLinear │ ├── blas │ │ ├── Makefile │ │ ├── blas.h │ │ ├── dscal.c │ │ ├── daxpy.c │ │ ├── ddot.c │ │ ├── dnrm2.c │ │ └── blasp.h │ ├── tron.h │ ├── linear.h │ ├── LibLinear.vcxproj │ ├── tron.cpp │ ├── train.c │ └── README.1.93.txt ├── yml.m ├── xml2yaml.m ├── CMakeLists.txt ├── FilterTIG.h ├── main.cpp ├── ImgContrastBB.h ├── ReadMe.txt ├── DataSetVOC.h ├── CmTimer.h ├── CmFile.h ├── CmShow.cpp ├── ValStructVec.h ├── FilterTIG.cpp ├── kyheader.h ├── CmFile.cpp ├── Objectness.h ├── DataSetVOC.cpp ├── CMakeLists.txt.user └── Objectness.cpp ├── BING.png ├── bing_vs_convnet.jpg ├── .gitignore ├── LICENSE └── README.md /Src/RecalWin.m: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /BING.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/torrvision/Objectness/HEAD/BING.png -------------------------------------------------------------------------------- /bing_vs_convnet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/torrvision/Objectness/HEAD/bing_vs_convnet.jpg -------------------------------------------------------------------------------- /Src/kyheader.cpp: -------------------------------------------------------------------------------- 1 | // kyheader.cpp : source file that includes just the standard includes 2 | // Objectness.pch will be the pre-compiled header 3 | // stdafx.obj will contain the pre-compiled type information 4 | 5 | #include "kyheader.h" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | *.obj 6 | 7 | # Compiled Dynamic libraries 8 | *.so 9 | *.dylib 10 | *.dll 11 | 12 | # Compiled Static libraries 13 | *.lai 14 | *.la 15 | *.a 16 | *.lib 17 | 18 | # Executables 19 | *.exe 20 | *.out 21 | *.app 22 | -------------------------------------------------------------------------------- /Src/CmShow.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | class CmShow 3 | { 4 | public: 5 | static Mat HistBins(CMat& color3f, CMat& val, CStr& title, bool descendShow = false, CMat &with = Mat()); 6 | static void showTinyMat(CStr &title, CMat &m); 7 | static inline void SaveShow(CMat& img, CStr& title); 8 | }; 9 | 10 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/Makefile: -------------------------------------------------------------------------------- 1 | AR = ar rcv 2 | RANLIB = ranlib 3 | 4 | HEADERS = blas.h blasp.h 5 | FILES = dnrm2.o daxpy.o ddot.o dscal.o 6 | 7 | CFLAGS = $(OPTFLAGS) 8 | FFLAGS = $(OPTFLAGS) 9 | 10 | blas: $(FILES) $(HEADERS) 11 | $(AR) blas.a $(FILES) 12 | $(RANLIB) blas.a 13 | 14 | clean: 15 | - rm -f *.o 16 | - rm -f *.a 17 | - rm -f *~ 18 | 19 | .c.o: 20 | $(CC) $(CFLAGS) -c $*.c 21 | 22 | 23 | -------------------------------------------------------------------------------- /Src/yml.m: -------------------------------------------------------------------------------- 1 | wkDir = 'D:/WkDir/DetectionProposals/VOC2007/Annotations/'; 2 | xml2yaml(wkDir); 3 | wkDir = 'D:\WkDir\DetectionProposals\ImageNet\ILSVRC2012_bbox_val_v3\val\'; 4 | xml2yaml(wkDir); 5 | 6 | ImgNetDir = 'D:\WkDir\DetectionProposals\ImageNet\ILSVRC2012_bbox_train_v2\'; 7 | d = dir(ImgNetDir); 8 | isub = [d(:).isdir]; %# returns logical vector 9 | nameFolds = {d(isub).name}'; 10 | nameFolds(ismember(nameFolds,{'.','..'})) = []; 11 | for i=1:length(nameFolds) 12 | wkDir = [ImgNetDir nameFolds{i} '\']; 13 | fprintf('%d/%d: %s\n', i, length(nameFolds), wkDir); 14 | xml2yaml(wkDir); 15 | end 16 | 17 | 18 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/blas.h: -------------------------------------------------------------------------------- 1 | /* blas.h -- C header file for BLAS Ver 1.0 */ 2 | /* Jesse Bennett March 23, 2000 */ 3 | 4 | /** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed." 5 | 6 | - From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */ 7 | 8 | #ifndef BLAS_INCLUDE 9 | #define BLAS_INCLUDE 10 | 11 | /* Data types specific to BLAS implementation */ 12 | typedef struct { float r, i; } fcomplex; 13 | typedef struct { double r, i; } dcomplex; 14 | typedef int blasbool; 15 | 16 | #include "blasp.h" /* Prototypes for all BLAS functions */ 17 | 18 | #define FALSE 0 19 | #define TRUE 1 20 | 21 | /* Macro functions */ 22 | #define MIN(a,b) ((a) <= (b) ? (a) : (b)) 23 | #define MAX(a,b) ((a) >= (b) ? (a) : (b)) 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /Src/LibLinear/tron.h: -------------------------------------------------------------------------------- 1 | #ifndef _TRON_H 2 | #define _TRON_H 3 | 4 | #pragma warning(disable:4996) 5 | 6 | class function 7 | { 8 | public: 9 | virtual double fun(double *w) = 0 ; 10 | virtual void grad(double *w, double *g) = 0 ; 11 | virtual void Hv(double *s, double *Hs) = 0 ; 12 | 13 | virtual int get_nr_variable(void) = 0 ; 14 | virtual ~function(void){} 15 | }; 16 | 17 | class TRON 18 | { 19 | public: 20 | TRON(const function *fun_obj, double eps = 0.1, int max_iter = 1000); 21 | ~TRON(); 22 | 23 | void tron(double *w); 24 | void set_print_string(void (*i_print) (const char *buf)); 25 | 26 | private: 27 | int trcg(double delta, double *g, double *s, double *r); 28 | double norm_inf(int n, double *x); 29 | 30 | double eps; 31 | int max_iter; 32 | function *fun_obj; 33 | void info(const char *fmt,...); 34 | void (*tron_print_string)(const char *buf); 35 | }; 36 | #endif 37 | -------------------------------------------------------------------------------- /Src/xml2yaml.m: -------------------------------------------------------------------------------- 1 | %% Convert the file type of opencv xml annotations to yaml thus it can be read by opencv 2 | % This functions relies on http://code.google.com/p/yamlmatlab/ 3 | % The results needs to be further refined to deal with indentation problem 4 | 5 | function xml2yaml(wkDir) 6 | fNs = dir([wkDir '*.xml']); 7 | fNum = length(fNs); 8 | for i = 1:fNum 9 | [~, nameNE, ~] = fileparts(fNs(i).name); 10 | %fprintf('%d/%d: %s\n', i, fNum, [wkDir nameNE]); 11 | fPathN = [wkDir nameNE '.yaml']; 12 | x=VOCreadxml([wkDir nameNE '.xml']); 13 | if isfield(x.annotation, 'owner') 14 | x.annotation = rmfield(x.annotation, 'owner'); 15 | end 16 | names = fieldnames(x.annotation.object); 17 | if (strcmp(names{end}, 'bndbox')) 18 | x.annotation.object = orderfields(x.annotation.object, names([end, 1:end-1])); 19 | end 20 | WriteYaml(fPathN, x); 21 | end 22 | end -------------------------------------------------------------------------------- /Src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(BING_linux) 2 | cmake_minimum_required(VERSION 2.8) 3 | 4 | find_package(OpenMP REQUIRED) 5 | 6 | # compile LibLinear 7 | include_directories("LibLinear") 8 | file(GLOB SOURCES "LibLinear/*.cpp" "LibLinear/blas/*.c") 9 | add_library(LibLinear STATIC ${SOURCES}) 10 | 11 | #OPENCV 12 | #include_directories(/usr/local/include) 13 | #link_directories(/usr/local/lib) 14 | ##if this does not work, then try to uncomment the things below. 15 | find_package( OpenCV REQUIRED ) 16 | if(OpenCV_FOUND) 17 | include_directories( ${OpenCV_INCLUDE_DIRS} ) 18 | endif( OpenCV_FOUND ) 19 | list( APPEND CMAKE_CXX_FLAGS "-std=c++0x ${CMAKE_CXX_FLAGS} -fopenmp -ftree-vectorize") 20 | #list( APPEND CMAKE_CXX_FLAGS "-std=c++0x ${CMAKE_CXX_FLAGS} -fopenmp -ftest-coverage -fprofile-arcs") 21 | 22 | # compile BING 23 | file(GLOB SOURCES "*.cpp") 24 | add_library(BING STATIC ${SOURCES}) 25 | 26 | add_executable(${PROJECT_NAME} main.cpp) 27 | 28 | set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall") 29 | set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall") 30 | target_link_libraries(${PROJECT_NAME} opencv_core opencv_imgproc opencv_highgui opencv_imgcodecs ${EXTERNAL_LIBS} BING LibLinear) 31 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/dscal.c: -------------------------------------------------------------------------------- 1 | #include "blas.h" 2 | 3 | int dscal_(int *n, double *sa, double *sx, int *incx) 4 | { 5 | long int i, m, nincx, nn, iincx; 6 | double ssa; 7 | 8 | /* scales a vector by a constant. 9 | uses unrolled loops for increment equal to 1. 10 | jack dongarra, linpack, 3/11/78. 11 | modified 3/93 to return if incx .le. 0. 12 | modified 12/3/93, array(1) declarations changed to array(*) */ 13 | 14 | /* Dereference inputs */ 15 | nn = *n; 16 | iincx = *incx; 17 | ssa = *sa; 18 | 19 | if (nn > 0 && iincx > 0) 20 | { 21 | if (iincx == 1) /* code for increment equal to 1 */ 22 | { 23 | m = nn-4; 24 | for (i = 0; i < m; i += 5) 25 | { 26 | sx[i] = ssa * sx[i]; 27 | sx[i+1] = ssa * sx[i+1]; 28 | sx[i+2] = ssa * sx[i+2]; 29 | sx[i+3] = ssa * sx[i+3]; 30 | sx[i+4] = ssa * sx[i+4]; 31 | } 32 | for ( ; i < nn; ++i) /* clean-up loop */ 33 | sx[i] = ssa * sx[i]; 34 | } 35 | else /* code for increment not equal to 1 */ 36 | { 37 | nincx = nn * iincx; 38 | for (i = 0; i < nincx; i += iincx) 39 | sx[i] = ssa * sx[i]; 40 | } 41 | } 42 | 43 | return 0; 44 | } /* dscal_ */ 45 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/daxpy.c: -------------------------------------------------------------------------------- 1 | #include "blas.h" 2 | 3 | int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy, 4 | int *incy) 5 | { 6 | long int i, m, ix, iy, nn, iincx, iincy; 7 | register double ssa; 8 | 9 | /* constant times a vector plus a vector. 10 | uses unrolled loop for increments equal to one. 11 | jack dongarra, linpack, 3/11/78. 12 | modified 12/3/93, array(1) declarations changed to array(*) */ 13 | 14 | /* Dereference inputs */ 15 | nn = *n; 16 | ssa = *sa; 17 | iincx = *incx; 18 | iincy = *incy; 19 | 20 | if( nn > 0 && ssa != 0.0 ) 21 | { 22 | if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */ 23 | { 24 | m = nn-3; 25 | for (i = 0; i < m; i += 4) 26 | { 27 | sy[i] += ssa * sx[i]; 28 | sy[i+1] += ssa * sx[i+1]; 29 | sy[i+2] += ssa * sx[i+2]; 30 | sy[i+3] += ssa * sx[i+3]; 31 | } 32 | for ( ; i < nn; ++i) /* clean-up loop */ 33 | sy[i] += ssa * sx[i]; 34 | } 35 | else /* code for unequal increments or equal increments not equal to 1 */ 36 | { 37 | ix = iincx >= 0 ? 0 : (1 - nn) * iincx; 38 | iy = iincy >= 0 ? 0 : (1 - nn) * iincy; 39 | for (i = 0; i < nn; i++) 40 | { 41 | sy[iy] += ssa * sx[ix]; 42 | ix += iincx; 43 | iy += iincy; 44 | } 45 | } 46 | } 47 | 48 | return 0; 49 | } /* daxpy_ */ 50 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/ddot.c: -------------------------------------------------------------------------------- 1 | #include "blas.h" 2 | 3 | double ddot_(int *n, double *sx, int *incx, double *sy, int *incy) 4 | { 5 | long int i, m, nn, iincx, iincy; 6 | double stemp; 7 | long int ix, iy; 8 | 9 | /* forms the dot product of two vectors. 10 | uses unrolled loops for increments equal to one. 11 | jack dongarra, linpack, 3/11/78. 12 | modified 12/3/93, array(1) declarations changed to array(*) */ 13 | 14 | /* Dereference inputs */ 15 | nn = *n; 16 | iincx = *incx; 17 | iincy = *incy; 18 | 19 | stemp = 0.0; 20 | if (nn > 0) 21 | { 22 | if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */ 23 | { 24 | m = nn-4; 25 | for (i = 0; i < m; i += 5) 26 | stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] + 27 | sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4]; 28 | 29 | for ( ; i < nn; i++) /* clean-up loop */ 30 | stemp += sx[i] * sy[i]; 31 | } 32 | else /* code for unequal increments or equal increments not equal to 1 */ 33 | { 34 | ix = 0; 35 | iy = 0; 36 | if (iincx < 0) 37 | ix = (1 - nn) * iincx; 38 | if (iincy < 0) 39 | iy = (1 - nn) * iincy; 40 | for (i = 0; i < nn; i++) 41 | { 42 | stemp += sx[ix] * sy[iy]; 43 | ix += iincx; 44 | iy += iincy; 45 | } 46 | } 47 | } 48 | 49 | return stemp; 50 | } /* ddot_ */ 51 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/dnrm2.c: -------------------------------------------------------------------------------- 1 | #include /* Needed for fabs() and sqrt() */ 2 | #include "blas.h" 3 | 4 | double dnrm2_(int *n, double *x, int *incx) 5 | { 6 | long int ix, nn, iincx; 7 | double norm, scale, absxi, ssq, temp; 8 | 9 | /* DNRM2 returns the euclidean norm of a vector via the function 10 | name, so that 11 | 12 | DNRM2 := sqrt( x'*x ) 13 | 14 | -- This version written on 25-October-1982. 15 | Modified on 14-October-1993 to inline the call to SLASSQ. 16 | Sven Hammarling, Nag Ltd. */ 17 | 18 | /* Dereference inputs */ 19 | nn = *n; 20 | iincx = *incx; 21 | 22 | if( nn > 0 && iincx > 0 ) 23 | { 24 | if (nn == 1) 25 | { 26 | norm = fabs(x[0]); 27 | } 28 | else 29 | { 30 | scale = 0.0; 31 | ssq = 1.0; 32 | 33 | /* The following loop is equivalent to this call to the LAPACK 34 | auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */ 35 | 36 | for (ix=(nn-1)*iincx; ix>=0; ix-=iincx) 37 | { 38 | if (x[ix] != 0.0) 39 | { 40 | absxi = fabs(x[ix]); 41 | if (scale < absxi) 42 | { 43 | temp = scale / absxi; 44 | ssq = ssq * (temp * temp) + 1.0; 45 | scale = absxi; 46 | } 47 | else 48 | { 49 | temp = absxi / scale; 50 | ssq += temp * temp; 51 | } 52 | } 53 | } 54 | norm = scale * sqrt(ssq); 55 | } 56 | } 57 | else 58 | norm = 0.0; 59 | 60 | return norm; 61 | 62 | } /* dnrm2_ */ 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Ming-Ming Cheng & Shuai Zheng 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the {organization} nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /Src/FilterTIG.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | class FilterTIG 4 | { 5 | public: 6 | void update(CMat &w); 7 | 8 | // For a W by H gradient magnitude map, find a W-7 by H-7 CV_32F matching score map 9 | Mat matchTemplate(const Mat &mag1u); 10 | 11 | inline float dot(const INT64 tig1, const INT64 tig2, const INT64 tig4, const INT64 tig8); 12 | 13 | public: 14 | void reconstruct(Mat &w); // For illustration purpose 15 | 16 | private: 17 | static const int NUM_COMP = 2; // Number of components 18 | static const int D = 64; // Dimension of TIG 19 | INT64 _bTIGs[NUM_COMP]; // Binary TIG features 20 | float _coeffs1[NUM_COMP]; // Coefficients of binary TIG features 21 | 22 | // For efficiently deals with different bits in CV_8U gradient map 23 | float _coeffs2[NUM_COMP], _coeffs4[NUM_COMP], _coeffs8[NUM_COMP]; 24 | }; 25 | 26 | 27 | inline float FilterTIG::dot(const INT64 tig1, const INT64 tig2, const INT64 tig4, const INT64 tig8) 28 | { 29 | INT64 bcT1 = __builtin_popcountll(tig1); 30 | INT64 bcT2 = __builtin_popcountll(tig2); 31 | INT64 bcT4 = __builtin_popcountll(tig4); 32 | INT64 bcT8 = __builtin_popcountll(tig8); 33 | 34 | INT64 bc01 = (__builtin_popcountll(_bTIGs[0] & tig1) << 1) - bcT1; 35 | INT64 bc02 = ((__builtin_popcountll(_bTIGs[0] & tig2) << 1) - bcT2) << 1; 36 | INT64 bc04 = ((__builtin_popcountll(_bTIGs[0] & tig4) << 1) - bcT4) << 2; 37 | INT64 bc08 = ((__builtin_popcountll(_bTIGs[0] & tig8) << 1) - bcT8) << 3; 38 | 39 | INT64 bc11 = (__builtin_popcountll(_bTIGs[1] & tig1) << 1) - bcT1; 40 | INT64 bc12 = ((__builtin_popcountll(_bTIGs[1] & tig2) << 1) - bcT2) << 1; 41 | INT64 bc14 = ((__builtin_popcountll(_bTIGs[1] & tig4) << 1) - bcT4) << 2; 42 | INT64 bc18 = ((__builtin_popcountll(_bTIGs[1] & tig8) << 1) - bcT8) << 3; 43 | 44 | return _coeffs1[0] * (bc01 + bc02 + bc04 + bc08) + _coeffs1[1] * (bc11 + bc12 + bc14 + bc18); 45 | } 46 | -------------------------------------------------------------------------------- /Src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | #include "Objectness.h" 3 | #include "ValStructVec.h" 4 | #include "CmShow.h" 5 | 6 | 7 | void RunObjectness(CStr &resName, double base, int W, int NSS, int numPerSz); 8 | 9 | void illutrateLoG() 10 | { 11 | for (float delta = 0.5f; delta < 1.1f; delta+=0.1f){ 12 | Mat f = Objectness::aFilter(delta, 8); 13 | normalize(f, f, 0, 1, NORM_MINMAX); 14 | CmShow::showTinyMat(format("D=%g", delta), f); 15 | } 16 | waitKey(0); 17 | } 18 | 19 | 20 | 21 | int main(int argc, char* argv[]) 22 | { 23 | //CStr wkDir = "D:/WkDir/DetectionProposals/VOC2007/Local/"; 24 | //illutrateLoG(); 25 | //RunObjectness("WinRecall.m", 2, 8, 2, 130); 26 | //RunObjectness("WinRecall.m", 2, 8, 2, 130); 27 | RunObjectness("WinRecall.m", 2, 8, 2, 130); 28 | //RunObjectness("WinRecall.m", 2, 8, 2, 130); 29 | //RunObjectness("WinRecall.m", 2, 8, 2, 130); 30 | 31 | return 0; 32 | } 33 | 34 | void RunObjectness(CStr &resName, double base, int W, int NSS, int numPerSz) 35 | { 36 | srand((unsigned int)time(NULL)); 37 | DataSetVOC voc2007("/home/bittnt/BING/BING_beta1/VOC/VOC2007/"); 38 | voc2007.loadAnnotations(); 39 | //voc2007.loadDataGenericOverCls(); 40 | 41 | printf("Dataset:`%s' with %d training and %d testing\n", _S(voc2007.wkDir), voc2007.trainNum, voc2007.testNum); 42 | printf("%s Base = %g, W = %d, NSS = %d, perSz = %d\n", _S(resName), base, W, NSS, numPerSz); 43 | 44 | Objectness objNess(voc2007, base, W, NSS); 45 | 46 | vector> boxesTests; 47 | //objNess.getObjBndBoxesForTests(boxesTests, 250); 48 | objNess.getObjBndBoxesForTestsFast(boxesTests, numPerSz); 49 | //objNess.getRandomBoxes(boxesTests); 50 | 51 | //objNess.evaluatePerClassRecall(boxesTests, resName, 1000); 52 | //objNess.illuTestReults(boxesTests); 53 | //objNess.evaluatePAMI12(); 54 | //objNess.evaluateIJCV13(); 55 | } 56 | -------------------------------------------------------------------------------- /Src/ImgContrastBB.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | struct ImgContrastBB 4 | { 5 | ImgContrastBB(CStr &imgPath); 6 | ImgContrastBB(CMat &img3u); 7 | 8 | inline float contrastVal(Vec4i ¢er); 9 | inline int regSum(Vec4i &box, Vec3i &sumColor); // Return region size and sum color 10 | 11 | private: 12 | Mat iImg; 13 | int _w, _h; 14 | inline void assertBBox(Vec4i ¢er, CStr &name); 15 | }; 16 | 17 | ImgContrastBB::ImgContrastBB(CStr &imgPath) 18 | { 19 | Mat img3u = imread(imgPath); 20 | integral(img3u, iImg, CV_32SC3); 21 | _w = img3u.cols; 22 | _h = img3u.rows; 23 | } 24 | 25 | ImgContrastBB::ImgContrastBB(CMat &img3u) 26 | { 27 | integral(img3u, iImg, CV_32SC3); 28 | _w = img3u.cols; 29 | _h = img3u.rows; 30 | } 31 | 32 | int ImgContrastBB::regSum(Vec4i &box, Vec3i &sumColor) 33 | { 34 | int x1 = box[0] - 1, y1 = box[1] - 1, x2 = box[2] - 1, y2 = box[3] - 1; 35 | sumColor = iImg.at(y2, x2) + iImg.at(y1, x1) - iImg.at(y1, x2) - iImg.at(y2, x1); 36 | return (x2 - x1)*(y2 - y1); 37 | } 38 | 39 | 40 | float ImgContrastBB::contrastVal(Vec4i ¢er) 41 | { 42 | int wd = (center[2] - center[0])/2, hd = (center[3] - center[1])/2; 43 | Vec4i surround(max(center[0] - wd, 1), max(center[1] - hd, 1), min(center[2] + wd, _w), min(center[3] + hd, _h)); 44 | Vec3i cColr, sColr; 45 | 46 | assertBBox(center, "Center"); 47 | assertBBox(center, "Surround"); 48 | int cSz = regSum(center, cColr); 49 | int sSz = regSum(surround, sColr); 50 | 51 | sColr -= cColr; 52 | sSz -= cSz; 53 | sColr /= sSz; 54 | cColr /= cSz; 55 | return sqrtf((float)(sqr(sColr[0] - cColr[0]) + sqr(sColr[1] - cColr[1]) + sqr(sColr[2] - cColr[2])))/100.0f; 56 | } 57 | 58 | void ImgContrastBB::assertBBox(Vec4i ¢er, CStr &name) 59 | { 60 | if (center[0] < 1 || center[1] < 1 || center[2] > _w || center[3] > _h) 61 | printf("%s: (%d, %d, %d, %d), (%d, %d)\n", _S(name), center[0], center[1], center[2], center[3], _w, _h); 62 | } -------------------------------------------------------------------------------- /Src/ReadMe.txt: -------------------------------------------------------------------------------- 1 | Objectness 2 | ========== 3 | BING Objectness proposal estimator linux/mac/windows version implementation, 4 | runs at 1000 FPS. This code is under BSD License. 5 | Objectness Proposal estimator运行代码,已经在linux/mac/windows下测试成功, 6 | 执行速度超过1000帧每秒。 7 | 8 | 9 | This is the 1000 FPS BING objectness linux version library for efficient 10 | objectness proposal estimator following the CVPR 2014 paper BING, please 11 | consider to cite and refer to this paper. 12 | 13 | @inproceedings{BingObj2014, 14 | title={{BING}: Binarized Normed Gradients for Objectness Estimation at 300fps}, 15 | author={Ming-Ming Cheng and Ziming Zhang and Wen-Yan Lin and Philip H. S. Torr}, 16 | booktitle={IEEE CVPR}, 17 | year={2014}, 18 | } 19 | 20 | The original author Ming-Ming Cheng has already released the source code for 21 | windows 64-bit platform. In this library, we intend to release the code for the 22 | linux/mac/windows users. You can maintain the code with Qt Creator IDE. 23 | 24 | Please find the original windows code / FAQ / Paper from this link: 25 | http://mmcheng.net/bing/ 26 | 27 | In order to make the code running as the original version in windows, you need 28 | to download the images/annotations PASCAL VOC 2007 data from the website. 29 | (http://pascallin.ecs.soton.ac.uk/challenges/VOC/voc2007/#testdata) 30 | 31 | We have tested the code, it produces the same accuracy results as the original windows 32 | version, while it runs at 1111 FPS(frame per second) at Ubuntu 12.04 with a Dell T7600 33 | workstation computer, which has two Intel Xeon E5-2687W (3.1GHz, 1600MHz) and 64 GB 34 | 1600MHz DDR3 Memory. 35 | 36 | FAQ 37 | 1. To run the code, you have to install OpenCV in the your ubuntu linux system. 38 | We specify the dependencies on opencv at 39 | " 40 | include_directories(/usr/local/include) 41 | link_directories(/usr/local/lib) 42 | " 43 | 2. You can use/debug/change the code with Qt Creator IDE on ubuntu/mac. 44 | 45 | 46 | Author: Ming-Ming Cheng(程明明) removethisifyouarehuman-cmm.thu@gmail.com 47 | Linux Author: Shuai Zheng (Kyle,郑帅) removethisifyouarehuman-szhengcvpr@gmail.com 48 | Please find more information from http://kylezheng.org/objectness/ 49 | Date: 19, February 50 | -------------------------------------------------------------------------------- /Src/LibLinear/linear.h: -------------------------------------------------------------------------------- 1 | #ifndef _LIBLINEAR_H 2 | #define _LIBLINEAR_H 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | struct feature_node 9 | { 10 | int index; 11 | double value; 12 | }; 13 | 14 | struct problem 15 | { 16 | int l, n; 17 | double *y; 18 | struct feature_node **x; 19 | double bias; /* < 0 if no bias term */ 20 | }; 21 | 22 | enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR = 11, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL }; /* solver_type */ 23 | 24 | struct parameter 25 | { 26 | int solver_type; 27 | 28 | /* these are for training only */ 29 | double eps; /* stopping criteria */ 30 | double C; 31 | int nr_weight; 32 | int *weight_label; 33 | double* weight; 34 | double p; 35 | }; 36 | 37 | struct model 38 | { 39 | struct parameter param; 40 | int nr_class; /* number of classes */ 41 | int nr_feature; 42 | double *w; 43 | int *label; /* label of each class */ 44 | double bias; 45 | }; 46 | 47 | struct model* train(const struct problem *prob, const struct parameter *param); 48 | void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, double *target); 49 | 50 | double predict_values(const struct model *model_, const struct feature_node *x, double* dec_values); 51 | double predict(const struct model *model_, const struct feature_node *x); 52 | double predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates); 53 | 54 | int save_model(const char *model_file_name, const struct model *model_); 55 | struct model *load_model(const char *model_file_name); 56 | 57 | int get_nr_feature(const struct model *model_); 58 | int get_nr_class(const struct model *model_); 59 | void get_labels(const struct model *model_, int* label); 60 | 61 | void free_model_content(struct model *model_ptr); 62 | void free_and_destroy_model(struct model **model_ptr_ptr); 63 | void destroy_param(struct parameter *param); 64 | 65 | const char *check_parameter(const struct problem *prob, const struct parameter *param); 66 | int check_probability_model(const struct model *model); 67 | void set_print_string_function(void (*print_func) (const char*)); 68 | 69 | #ifdef __cplusplus 70 | } 71 | #endif 72 | 73 | #endif /* _LIBLINEAR_H */ 74 | 75 | -------------------------------------------------------------------------------- /Src/DataSetVOC.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | struct DataSetVOC 4 | { 5 | DataSetVOC(CStr &wkDir); 6 | ~DataSetVOC(void); 7 | 8 | // Organization structure data for the dataset 9 | string wkDir; // Root working directory, all other directories are relative to this one 10 | string resDir, localDir; // Directory for saving results and local data 11 | string imgPathW, annoPathW; // Image and annotation path 12 | 13 | // Information for training and testing 14 | int trainNum, testNum; 15 | vecS trainSet, testSet; // File names (NE) for training and testing images 16 | vecS classNames; // Object class names 17 | vector> gtTrainBoxes, gtTestBoxes; // Ground truth bounding boxes for training and testing images 18 | vector gtTrainClsIdx, gtTestClsIdx; // Object class indexes 19 | 20 | 21 | // Load annotations 22 | void loadAnnotations(); 23 | 24 | static bool cvt2OpenCVYml(CStr &annoDir); // Needs to call yml.m in this solution before running this function. 25 | 26 | static inline double interUnio(const Vec4i &box1, const Vec4i &box2); 27 | 28 | // Get training and testing for demonstrating the generative of the objectness over classes 29 | void getTrainTest(); 30 | 31 | public: // Used for testing the ability of generic over classes 32 | void loadDataGenericOverCls(); 33 | 34 | private: 35 | void loadBox(const FileNode &fn, vector &boxes, vecI &clsIdx); 36 | bool loadBBoxes(CStr &nameNE, vector &boxes, vecI &clsIdx); 37 | static void getXmlStrVOC(CStr &fName, string &buf); 38 | static inline string keepXmlChar(CStr &str); 39 | static bool cvt2OpenCVYml(CStr &yamlName, CStr &ymlName); // Needs to call yml.m in this solution before running this function. 40 | }; 41 | 42 | string DataSetVOC::keepXmlChar(CStr &_str) 43 | { 44 | string str = _str; 45 | int sz = (int)str.size(), count = 0; 46 | for (int i = 0; i < sz; i++){ 47 | char c = str[i]; 48 | if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == ' ' || c == '.') 49 | str[count++] = str[i]; 50 | } 51 | str.resize(count); 52 | return str; 53 | } 54 | 55 | double DataSetVOC::interUnio(const Vec4i &bb, const Vec4i &bbgt) 56 | { 57 | int bi[4]; 58 | bi[0] = max(bb[0], bbgt[0]); 59 | bi[1] = max(bb[1], bbgt[1]); 60 | bi[2] = min(bb[2], bbgt[2]); 61 | bi[3] = min(bb[3], bbgt[3]); 62 | 63 | double iw = bi[2] - bi[0] + 1; 64 | double ih = bi[3] - bi[1] + 1; 65 | double ov = 0; 66 | if (iw>0 && ih>0){ 67 | double ua = (bb[2]-bb[0]+1)*(bb[3]-bb[1]+1)+(bbgt[2]-bbgt[0]+1)*(bbgt[3]-bbgt[1]+1)-iw*ih; 68 | ov = iw*ih/ua; 69 | } 70 | return ov; 71 | } 72 | -------------------------------------------------------------------------------- /Src/CmTimer.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | class CmTimer 4 | { 5 | public: 6 | CmTimer(CStr t):title(t) { is_started = false; gettimeofday(&start_clock,NULL); gettimeofday(&end_clock,NULL); n_starts = 0; } 7 | 8 | ~CmTimer(){ if (is_started) printf("CmTimer '%s' is started and is being destroyed.\n", title.c_str()); } 9 | 10 | inline void Start(); 11 | inline void Stop(); 12 | inline void Reset(); 13 | 14 | inline bool Report(); 15 | inline bool StopAndReport() { Stop(); return Report(); } 16 | inline float TimeInSeconds(); 17 | 18 | private: 19 | CStr title; 20 | 21 | bool is_started; 22 | struct timeval start_clock, end_clock; 23 | //clock_t start_clock; 24 | //clock_t cumulative_clock; 25 | unsigned int n_starts; 26 | }; 27 | 28 | /************************************************************************/ 29 | /* Implementations */ 30 | /************************************************************************/ 31 | 32 | void CmTimer::Start() 33 | { 34 | if (is_started){ 35 | printf("CmTimer '%s' is already started. Nothing done.\n", title.c_str()); 36 | return; 37 | } 38 | 39 | is_started = true; 40 | n_starts++; 41 | //start_clock = clock(); 42 | gettimeofday(&start_clock,NULL); 43 | } 44 | 45 | void CmTimer::Stop() 46 | { 47 | if (!is_started){ 48 | printf("CmTimer '%s' is started. Nothing done\n", title.c_str()); 49 | return; 50 | } 51 | gettimeofday(&end_clock,NULL); 52 | //cumulative_clock += clock() - start_clock; 53 | is_started = false; 54 | } 55 | 56 | void CmTimer::Reset() 57 | { 58 | if (is_started) { 59 | printf("CmTimer '%s'is started during reset request.\n Only reset cumulative time.\n"); 60 | return; 61 | } 62 | gettimeofday(&start_clock,NULL); 63 | gettimeofday(&end_clock,NULL); 64 | //cumulative_clock = 0; 65 | } 66 | 67 | bool CmTimer::Report() 68 | { 69 | if (is_started){ 70 | printf("CmTimer '%s' is started.\n Cannot provide a time report.", title.c_str()); 71 | return false; 72 | } 73 | 74 | float timeUsed = TimeInSeconds(); 75 | printf("[%s] CumuTime: %gs, #run: %d, AvgTime: %gs\n", title.c_str(), timeUsed, n_starts, timeUsed/n_starts); 76 | return true; 77 | } 78 | 79 | float CmTimer::TimeInSeconds() 80 | { 81 | if (is_started){ 82 | printf("CmTimer '%s' is started. Nothing done\n", title.c_str()); 83 | return 0; 84 | } 85 | return double((end_clock.tv_sec - start_clock.tv_sec) * 1000000u + 86 | end_clock.tv_usec - start_clock.tv_usec) / 1.e6; 87 | //return float(cumulative_clock) / CLOCKS_PER_SEC; 88 | } 89 | 90 | -------------------------------------------------------------------------------- /Src/CmFile.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #ifdef _WIN32 3 | #include 4 | #else 5 | #include 6 | #include 7 | #include 8 | #include 9 | #endif 10 | struct CmFile 11 | { 12 | static inline string GetFolder(CStr& path); 13 | static inline string GetName(CStr& path); 14 | static inline string GetNameNE(CStr& path); 15 | static inline string GetPathNE(CStr& path); 16 | 17 | // Get file names from a wildcard. Eg: GetNames("D:\\*.jpg", imgNames); 18 | static int GetNames(CStr &nameW, vecS &names, string &_dir); 19 | static int GetNames(CStr &nameW, vecS &names); 20 | static int GetNames(CStr& rootFolder, CStr &fileW, vecS &names); 21 | static int GetNamesNE(CStr& nameWC, vecS &names, string &dir, string &ext); 22 | static int GetNamesNE(CStr& nameWC, vecS &names); 23 | static int GetNamesNE(CStr& rootFolder, CStr &fileW, vecS &names); 24 | static inline string GetExtention(CStr name); 25 | 26 | static int GetSubFolders(CStr& folder, vecS& subFolders); 27 | 28 | static inline string GetWkDir(); 29 | 30 | static bool MkDir(CStr& path); 31 | static vecS loadStrList(CStr &fName); 32 | static bool writeStrList(CStr &fName, const vecS &strs); 33 | }; 34 | 35 | /************************************************************************/ 36 | /* Implementation of inline functions */ 37 | /************************************************************************/ 38 | string CmFile::GetFolder(CStr& path) 39 | { 40 | return path.substr(0, path.find_last_of("\\/")+1); 41 | } 42 | 43 | string CmFile::GetName(CStr& path) 44 | { 45 | int start = path.find_last_of("\\/")+1; 46 | int end = path.find_last_not_of(' ')+1; 47 | return path.substr(start, end - start); 48 | } 49 | 50 | string CmFile::GetNameNE(CStr& path) 51 | { 52 | int start = path.find_last_of("\\/")+1; 53 | int end = path.find_last_of('.'); 54 | if (end >= 0) 55 | return path.substr(start, end - start); 56 | else 57 | return path.substr(start, path.find_last_not_of(' ')+1 - start); 58 | } 59 | 60 | string CmFile::GetPathNE(CStr& path) 61 | { 62 | int end = path.find_last_of('.'); 63 | if (end >= 0) 64 | return path.substr(0, end); 65 | else 66 | return path.substr(0, path.find_last_not_of(' ') + 1); 67 | } 68 | 69 | string CmFile::GetExtention(CStr name) 70 | { 71 | return name.substr(name.find_last_of('.')); 72 | } 73 | /************************************************************************/ 74 | /* Implementations */ 75 | /************************************************************************/ 76 | -------------------------------------------------------------------------------- /Src/CmShow.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | #include "CmShow.h" 3 | 4 | 5 | 6 | typedef pair CostiIdx; 7 | Mat CmShow::HistBins(CMat& color3f, CMat& val, CStr& title, bool descendShow, CMat &with) 8 | { 9 | // Prepare data 10 | int H = 300, spaceH = 6, barH = 10, n = color3f.cols; 11 | CV_Assert(color3f.size() == val.size() && color3f.rows == 1); 12 | Mat binVal1i, binColor3b, width1i; 13 | if (with.size() == val.size()) 14 | with.convertTo(width1i, CV_32S, 400/sum(with).val[0]); // Default shown width 15 | else 16 | width1i = Mat(1, n, CV_32S, Scalar(10)); // Default bin width = 10 17 | int W = cvRound(sum(width1i).val[0]); 18 | color3f.convertTo(binColor3b, CV_8UC3, 255); 19 | double maxVal, minVal; 20 | minMaxLoc(val, &minVal, &maxVal); 21 | printf("%g\n", H/max(maxVal, -minVal)); 22 | val.convertTo(binVal1i, CV_32S, 20000); 23 | Size szShow(W, H + spaceH + barH); 24 | szShow.height += minVal < 0 && !descendShow ? H + spaceH : 0; 25 | Mat showImg3b(szShow, CV_8UC3, Scalar(255, 255, 255)); 26 | int* binH = (int*)(binVal1i.data); 27 | Vec3b* binColor = (Vec3b*)(binColor3b.data); 28 | int* binW = (int*)(width1i.data); 29 | vector costIdx(n); 30 | if (descendShow){ 31 | for (int i = 0; i < n; i++) 32 | costIdx[i] = make_pair(binH[i], i); 33 | sort(costIdx.begin(), costIdx.end(), std::greater()); 34 | } 35 | 36 | // Show image 37 | for (int i = 0, x = 0; i < n; i++){ 38 | int idx = descendShow ? costIdx[i].second : i; 39 | int h = descendShow ? abs(binH[idx]) : binH[idx]; 40 | Scalar color(binColor[idx]); 41 | Rect reg(x, H + spaceH, binW[idx], barH); 42 | showImg3b(reg) = color; // Draw bar 43 | rectangle(showImg3b, reg, Scalar(0)); 44 | 45 | reg.height = abs(h); 46 | reg.y = h >= 0 ? H - h : H + 2 * spaceH + barH; 47 | showImg3b(reg) = color; 48 | rectangle(showImg3b, reg, Scalar(0)); 49 | 50 | x += binW[idx]; 51 | } 52 | imshow(title, showImg3b); 53 | return showImg3b; 54 | } 55 | 56 | void CmShow::showTinyMat(CStr &title, CMat &m) 57 | { 58 | int scale = 50, sz = m.rows * m.cols; 59 | while (sz > 200){ 60 | scale /= 2; 61 | sz /= 4; 62 | } 63 | 64 | Mat img; 65 | resize(m, img, Size(), scale, scale, CV_INTER_NN); 66 | if (img.channels() == 3) 67 | cvtColor(img, img, CV_RGB2BGR); 68 | SaveShow(img, title); 69 | } 70 | 71 | void CmShow::SaveShow(CMat& img, CStr& title) 72 | { 73 | if (title.size() == 0) 74 | return; 75 | 76 | int mDepth = CV_MAT_DEPTH(img.type()); 77 | double scale = (mDepth == CV_32F || mDepth == CV_64F ? 255 : 1); 78 | if (title.size() > 4 && title[title.size() - 4] == '.') 79 | imwrite(title, img*scale); 80 | else if (title.size()) 81 | imshow(title, img); 82 | } 83 | -------------------------------------------------------------------------------- /Src/ValStructVec.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | /************************************************************************/ 4 | /* A value struct vector that supports efficient sorting */ 5 | /************************************************************************/ 6 | 7 | template 8 | struct ValStructVec 9 | { 10 | ValStructVec(){clear();} 11 | inline int size() const {return sz;} 12 | inline void clear() {sz = 0; structVals.clear(); valIdxes.clear();} 13 | inline void reserve(int resSz){clear(); structVals.reserve(resSz); valIdxes.reserve(resSz); } 14 | inline void pushBack(const VT& val, const ST& structVal) {valIdxes.push_back(make_pair(val, sz)); structVals.push_back(structVal); sz++;} 15 | 16 | inline const VT& operator ()(int i) const {return valIdxes[i].first;} // Should be called after sort 17 | inline const ST& operator [](int i) const {return structVals[valIdxes[i].second];} // Should be called after sort 18 | inline VT& operator ()(int i) {return valIdxes[i].first;} // Should be called after sort 19 | inline ST& operator [](int i) {return structVals[valIdxes[i].second];} // Should be called after sort 20 | 21 | void sort(bool descendOrder = true); 22 | const vector &getSortedStructVal(); 23 | void append(const ValStructVec &newVals, int startV = 0); 24 | 25 | vector structVals; // struct values 26 | 27 | private: 28 | int sz; // size of the value struct vector 29 | vector> valIdxes; // Indexes after sort 30 | bool smaller() {return true;}; 31 | vector sortedStructVals; 32 | }; 33 | 34 | template 35 | void ValStructVec::append(const ValStructVec &newVals, int startV) 36 | { 37 | int sz = newVals.size(); 38 | for (int i = 0; i < sz; i++) 39 | pushBack((float)((i+300)*startV)/*newVals(i)*/, newVals[i]); 40 | } 41 | 42 | template 43 | void ValStructVec::sort(bool descendOrder /* = true */) 44 | { 45 | if (descendOrder) 46 | std::sort(valIdxes.begin(), valIdxes.end(), std::greater>()); 47 | else 48 | std::sort(valIdxes.begin(), valIdxes.end(), std::less>()); 49 | } 50 | 51 | template 52 | const vector& ValStructVec::getSortedStructVal() 53 | { 54 | sortedStructVals.resize(sz); 55 | for (int i = 0; i < sz; i++) 56 | sortedStructVals[i] = structVals[valIdxes[i].second]; 57 | return sortedStructVals; 58 | } 59 | 60 | /* 61 | void valStructVecDemo() 62 | { 63 | ValStructVec sVals; 64 | sVals.pushBack(3, "String 3"); 65 | sVals.pushBack(5, "String 5"); 66 | sVals.pushBack(4, "String 4"); 67 | sVals.pushBack(1, "String 1"); 68 | sVals.sort(false); 69 | for (int i = 0; i < sVals.size(); i++) 70 | printf("%d, %s\n", sVals(i), _S(sVals[i])); 71 | } 72 | */ -------------------------------------------------------------------------------- /Src/FilterTIG.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | #include "FilterTIG.h" 3 | #include "CmShow.h" 4 | 5 | 6 | void FilterTIG::update(CMat &w1f){ 7 | CV_Assert(w1f.cols * w1f.rows == D && w1f.type() == CV_32F && w1f.isContinuous()); 8 | float b[D], residuals[D]; 9 | memcpy(residuals, w1f.data, sizeof(float)*D); 10 | for (int i = 0; i < NUM_COMP; i++){ 11 | float avg = 0; 12 | for (int j = 0; j < D; j++){ 13 | b[j] = residuals[j] >= 0.0f ? 1.0f : -1.0f; 14 | avg += residuals[j] * b[j]; 15 | } 16 | avg /= D; 17 | _coeffs1[i] = avg, _coeffs2[i] = avg*2, _coeffs4[i] = avg*4, _coeffs8[i] = avg*8; 18 | for (int j = 0; j < D; j++) 19 | residuals[j] -= avg*b[j]; 20 | UINT64 tig = 0; 21 | for (int j = 0; j < D; j++) 22 | tig = (tig << 1) | (b[j] > 0 ? 1 : 0); 23 | _bTIGs[i] = tig; 24 | } 25 | } 26 | 27 | void FilterTIG::reconstruct(Mat &w1f){ 28 | w1f = Mat::zeros(8, 8, CV_32F); 29 | float *weight = (float*)w1f.data; 30 | for (int i = 0; i < NUM_COMP; i++){ 31 | UINT64 tig = _bTIGs[i]; 32 | for (int j = 0; j < D; j++) 33 | weight[j] += _coeffs1[i] * (((tig >> (63-j)) & 1) ? 1 : -1); 34 | } 35 | } 36 | 37 | // For a W by H gradient magnitude map, find a W-7 by H-7 CV_32F matching score map 38 | // Please refer to my paper for definition of the variables used in this function 39 | Mat FilterTIG::matchTemplate(const Mat &mag1u){ 40 | const int H = mag1u.rows, W = mag1u.cols; 41 | const Size sz(W+1, H+1); // Expand original size to avoid dealing with boundary conditions 42 | Mat_ Tig1 = Mat_::zeros(sz), Tig2 = Mat_::zeros(sz); 43 | Mat_ Tig4 = Mat_::zeros(sz), Tig8 = Mat_::zeros(sz); 44 | Mat_ Row1 = Mat_::zeros(sz), Row2 = Mat_::zeros(sz); 45 | Mat_ Row4 = Mat_::zeros(sz), Row8 = Mat_::zeros(sz); 46 | Mat_ scores(sz); 47 | for(int y = 1; y <= H; y++){ 48 | const byte* G = mag1u.ptr(y-1); 49 | INT64* T1 = Tig1.ptr(y); // Binary TIG of current row 50 | INT64* T2 = Tig2.ptr(y); 51 | INT64* T4 = Tig4.ptr(y); 52 | INT64* T8 = Tig8.ptr(y); 53 | INT64* Tu1 = Tig1.ptr(y-1); // Binary TIG of upper row 54 | INT64* Tu2 = Tig2.ptr(y-1); 55 | INT64* Tu4 = Tig4.ptr(y-1); 56 | INT64* Tu8 = Tig8.ptr(y-1); 57 | byte* R1 = Row1.ptr(y); 58 | byte* R2 = Row2.ptr(y); 59 | byte* R4 = Row4.ptr(y); 60 | byte* R8 = Row8.ptr(y); 61 | float *s = scores.ptr(y); 62 | for (int x = 1; x <= W; x++) { 63 | byte g = G[x-1]; 64 | R1[x] = (R1[x-1] << 1) | ((g >> 4) & 1); 65 | R2[x] = (R2[x-1] << 1) | ((g >> 5) & 1); 66 | R4[x] = (R4[x-1] << 1) | ((g >> 6) & 1); 67 | R8[x] = (R8[x-1] << 1) | ((g >> 7) & 1); 68 | T1[x] = (Tu1[x] << 8) | R1[x]; 69 | T2[x] = (Tu2[x] << 8) | R2[x]; 70 | T4[x] = (Tu4[x] << 8) | R4[x]; 71 | T8[x] = (Tu8[x] << 8) | R8[x]; 72 | s[x] = dot(T1[x], T2[x], T4[x], T8[x]); 73 | } 74 | } 75 | Mat matchCost1f; 76 | scores(Rect(8, 8, W-7, H-7)).copyTo(matchCost1f); 77 | return matchCost1f; 78 | } 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Objectness Proposal Generator with BING 2 | ========== 3 | [![License (3-Clause BSD)](https://img.shields.io/badge/license-BSD%203--Clause-brightgreen.svg?style=flat-square)](https://github.com/bittnt/Objectness/blob/master/LICENSE) 4 | 5 | 6 | BING Objectness proposal estimator Linux Ubuntu 14.04/Mac OSX Yosemite/iOS 8.1 version implementation, 7 | runs at 1000 FPS at a Dell 7600 workstation with Linux Ubuntu 14.04. 8 | ![sample](BING.png) 9 | 10 | ![sample](bing_vs_convnet.jpg) 11 | 12 | 13 | ## INSTALL 14 | To make this program running fast, you need to enable release mode: 15 | > cmake -DCMAKE_BUILD_TYPE=Release ../ 16 | 17 | ## DEMO 18 | If you run this in Ubuntu 14.04 or other similar Linux system, feel free to 19 | change the path in main.cpp 20 | > DataSetVOC voc2007("YOUR_PATH_TO_THE_VOC2007DATA"); 21 | 22 | Notice that WinRecall.m is generated by this code, you do not need them. If you 23 | want to make this code working with other datasets, one simple solution is to 24 | make the folders similar to the VOC 2007 one. 25 | 26 | ## Introduction 27 | This is the 1000 FPS BING objectness linux version library for efficient 28 | objectness proposal estimator,We would appreciate if you could cite and refer to 29 | the papers below. 30 | ``` 31 | @inproceedings{BingObj2014, 32 | title={{BING}: Binarized Normed Gradients for Objectness Estimation at 300fps}, 33 | author={Ming-Ming Cheng and Ziming Zhang and Wen-Yan Lin and Philip H. S. Torr}, 34 | booktitle={IEEE CVPR}, 35 | year={2014}, 36 | } 37 | ``` 38 | ``` 39 | @inproceedings{depthobjectproposals_GCPR2015, 40 | author = {Shuai Zheng and Victor Adrian Prisacariu and Melinos Averkiou and Ming-Ming Cheng and Niloy J. Mitra and Jamie Shotton and Philip H. S. Torr and Carsten Rother}, 41 | title = {Object Proposal Estimation in Depth Images using Compact 3D Shape Manifolds}, 42 | booktitle = {German Conference on Pattern Recognition (GCPR)}, 43 | year = {2015} 44 | } 45 | ``` 46 | The original author Ming-Ming Cheng has already released the source code for 47 | windows 64-bit platform. In this library, we intend to release the code for the 48 | linux/mac/iOS users. You can maintain the code with Qt Creator IDE. 49 | 50 | Please find the original windows code / FAQ / Paper from this link: 51 | http://mmcheng.net/bing/ 52 | 53 | ## FAQ 54 | In order to make the code running as the original version in windows, you need 55 | to download the images/annotations PASCAL VOC 2007 data from the website. 56 | (http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata) 57 | 58 | We have tested the code, it produces the same accuracy results as the original windows 59 | version, while it runs at 1111 FPS(frame per second) at Ubuntu 12.04 with a Dell T7600 60 | workstation computer, which has two Intel Xeon E5-2687W (3.1GHz, 1600MHz) and 64 GB 61 | 1600MHz DDR3 Memory. 62 | 63 | ## Author Info 64 | Author: Ming-Ming Cheng removethisifyouarehuman-cmm.thu@gmail.com 65 | Linux Author: Shuai Zheng (Kyle) removethisifyouarehuman-szhengcvpr@gmail.com 66 | Please find more information from http://kylezheng.org/objectproposal/ 67 | Date: 19, February 68 | 69 | -------------------------------------------------------------------------------- /Src/kyheader.h: -------------------------------------------------------------------------------- 1 | // stdafx.h : include file for standard system include files, 2 | // or project specific include files that are used frequently, but 3 | // are changed infrequently 4 | // 5 | #pragma once 6 | #pragma warning(disable: 4996) 7 | #pragma warning(disable: 4995) 8 | #pragma warning(disable: 4805) 9 | #pragma warning(disable: 4267) 10 | 11 | #define _CRTDBG_MAP_ALLOC 12 | #include 13 | //#include 14 | 15 | //#include 16 | #include 17 | 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | //#include 31 | //#include 32 | #include 33 | #include 34 | using namespace std; 35 | 36 | // TODO: reference additional headers your program requires here 37 | #include "LibLinear/linear.h" 38 | #include 39 | 40 | #define CV_VERSION_ID CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION) 41 | #ifdef _DEBUG 42 | #define cvLIB(name) "opencv_" name CV_VERSION_ID "d" 43 | #else 44 | #define cvLIB(name) "opencv_" name CV_VERSION_ID 45 | #endif 46 | 47 | #pragma comment( lib, cvLIB("core")) 48 | #pragma comment( lib, cvLIB("imgproc")) 49 | #pragma comment( lib, cvLIB("highgui")) 50 | using namespace cv; 51 | #ifdef WIN32 52 | /* windows stuff */ 53 | #else 54 | typedef unsigned long DWORD; 55 | typedef unsigned short WORD; 56 | typedef unsigned int UNINT32; 57 | typedef bool BOOL; 58 | typedef void *HANDLE; 59 | typedef unsigned char byte; 60 | #endif 61 | typedef vector vecI; 62 | typedef const string CStr; 63 | typedef const Mat CMat; 64 | typedef vector vecS; 65 | typedef vector vecM; 66 | typedef vector vecF; 67 | typedef vector vecD; 68 | 69 | enum{CV_FLIP_BOTH = -1, CV_FLIP_VERTICAL = 0, CV_FLIP_HORIZONTAL = 1}; 70 | #define _S(str) ((str).c_str()) 71 | #define CHK_IND(p) ((p).x >= 0 && (p).x < _w && (p).y >= 0 && (p).y < _h) 72 | #define CV_Assert_(expr, args) \ 73 | {\ 74 | if(!(expr)) {\ 75 | string msg = cv::format args; \ 76 | printf("%s in %s:%d\n", msg.c_str(), __FILE__, __LINE__); \ 77 | cv::error(cv::Exception(CV_StsAssert, msg, __FUNCTION__, __FILE__, __LINE__) ); }\ 78 | } 79 | 80 | // Return -1 if not in the list 81 | template 82 | static inline int findFromList(const T &word, const vector &strList) {size_t idx = find(strList.begin(), strList.end(), word) - strList.begin(); return idx < strList.size() ? idx : -1;} 83 | template inline T sqr(T x) { return x * x; } // out of range risk for T = byte, ... 84 | template inline T vecSqrDist(const Vec &v1, const Vec &v2) {T s = 0; for (int i=0; i inline T vecDist(const Vec &v1, const Vec &v2) { return sqrt(vecSqrDist(v1, v2)); } // out of range risk for T = byte, ... 86 | 87 | inline Rect Vec4i2Rect(Vec4i &v){return Rect(Point(v[0] - 1, v[1] - 1), Point(v[2], v[3])); } 88 | #ifdef __WIN32 89 | #define INT64 long long 90 | #else 91 | #define INT64 long 92 | typedef unsigned long UINT64; 93 | #endif 94 | 95 | #define __POPCNT__ 96 | #include 97 | #include 98 | #ifdef __WIN32 99 | # include 100 | # define POPCNT(x) __popcnt(x) 101 | # define POPCNT64(x) __popcnt64(x) 102 | #endif 103 | #ifndef __WIN32 104 | # define POPCNT(x) __builtin_popcount(x) 105 | # define POPCNT64(x) __builtin_popcountll(x) 106 | #endif 107 | 108 | #include "CmFile.h" 109 | #include "CmTimer.h" 110 | 111 | -------------------------------------------------------------------------------- /Src/LibLinear/LibLinear.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | x64 7 | 8 | 9 | Release 10 | x64 11 | 12 | 13 | 14 | {86266F16-8B7E-4666-A12F-96E351579ADA} 15 | Win32Proj 16 | LibLinear 17 | 18 | 19 | 20 | StaticLibrary 21 | true 22 | MultiByte 23 | v110 24 | 25 | 26 | StaticLibrary 27 | false 28 | true 29 | MultiByte 30 | v110 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | Level3 48 | Disabled 49 | WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) 50 | 51 | 52 | Console 53 | true 54 | 55 | 56 | 57 | 58 | Level3 59 | 60 | 61 | MaxSpeed 62 | true 63 | true 64 | WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) 65 | 66 | 67 | Console 68 | true 69 | true 70 | true 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /Src/CmFile.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | 3 | 4 | #ifdef _WIN32 5 | #include 6 | #include 7 | #include 8 | #include 9 | #else 10 | #include 11 | #include 12 | #include 13 | #include 14 | #endif 15 | 16 | 17 | // Get image names from a wildcard. Eg: GetNames("D:\\*.jpg", imgNames); 18 | int CmFile::GetNames(CStr &_nameW, vecS &_names) 19 | { 20 | string _dir = GetFolder(_nameW); 21 | _names.clear(); 22 | 23 | DIR *dir; 24 | struct dirent *ent; 25 | if((dir = opendir(_dir.c_str()))!=NULL){ 26 | //print all the files and directories within directory 27 | while((ent = readdir(dir))!=NULL){ 28 | if(ent->d_name[0] == '.') 29 | continue; 30 | if(ent->d_type ==4) 31 | continue; 32 | _names.push_back(ent->d_name); 33 | } 34 | closedir(dir); 35 | } else { 36 | perror(""); 37 | return EXIT_FAILURE; 38 | } 39 | return (int)_names.size(); 40 | } 41 | int CmFile::GetNames(CStr &_nameW, vecS &_names, string &_dir) 42 | { 43 | _dir = GetFolder(_nameW); 44 | _names.clear(); 45 | 46 | DIR *dir; 47 | struct dirent *ent; 48 | if((dir = opendir(_dir.c_str()))!=NULL){ 49 | //print all the files and directories within directory 50 | while((ent = readdir(dir))!=NULL){ 51 | if(ent->d_name[0] == '.') 52 | continue; 53 | if(ent->d_type ==4) 54 | continue; 55 | _names.push_back(ent->d_name); 56 | } 57 | closedir(dir); 58 | } else { 59 | perror(""); 60 | return EXIT_FAILURE; 61 | } 62 | return (int)_names.size(); 63 | } 64 | int CmFile::GetSubFolders(CStr &folder, vecS &subFolders) 65 | { 66 | subFolders.clear(); 67 | string nameWC = GetFolder(folder);//folder + "/*"; 68 | 69 | DIR *dir; 70 | struct dirent *ent; 71 | if((dir = opendir(nameWC.c_str()))!=NULL){ 72 | while((ent = readdir(dir))!=NULL){ 73 | if(ent->d_name[0] == '.') 74 | continue; 75 | if(ent->d_type == 4){ 76 | subFolders.push_back(ent->d_name); 77 | } 78 | } 79 | closedir(dir); 80 | } else { 81 | perror(""); 82 | return EXIT_FAILURE; 83 | } 84 | return (int)subFolders.size(); 85 | } 86 | int CmFile::GetNames(CStr& rootFolder, CStr &fileW, vecS &names) 87 | { 88 | GetNames(rootFolder + fileW, names); 89 | vecS subFolders, tmpNames; 90 | int subNum = CmFile::GetSubFolders(rootFolder, subFolders);// 91 | for (int i = 0; i < subNum; i++){ 92 | subFolders[i] += "/"; 93 | int subNum = GetNames(rootFolder + subFolders[i], fileW, tmpNames); 94 | for (int j = 0; j < subNum; j++) 95 | names.push_back(subFolders[i] + tmpNames[j]); 96 | } 97 | return (int)names.size(); 98 | } 99 | int CmFile::GetNamesNE(CStr& nameWC, vecS &names) 100 | { 101 | string dir = string(); 102 | string ext = string(); 103 | int fNum = GetNames(nameWC, names, dir); 104 | ext = GetExtention(nameWC); 105 | for (int i = 0; i < fNum; i++) 106 | names[i] = GetNameNE(names[i]); 107 | return fNum; 108 | } 109 | int CmFile::GetNamesNE(CStr& nameWC, vecS &names, string &dir, string &ext) 110 | { 111 | int fNum = GetNames(nameWC, names, dir); 112 | ext = GetExtention(nameWC); 113 | for (int i = 0; i < fNum; i++) 114 | names[i] = GetNameNE(names[i]); 115 | return fNum; 116 | } 117 | int CmFile::GetNamesNE(CStr& rootFolder, CStr &fileW, vecS &names) 118 | { 119 | int fNum = GetNames(rootFolder, fileW, names); 120 | int extS = GetExtention(fileW).size(); 121 | for (int i = 0; i < fNum; i++) 122 | names[i].resize(names[i].size() - extS); 123 | return fNum; 124 | } 125 | bool CmFile::MkDir(CStr &_path) 126 | { 127 | if(_path.size() == 0) 128 | return false; 129 | static char buffer[1024]; 130 | strcpy(buffer, _S(_path)); 131 | #ifdef _WIN32 132 | for (int i = 0; buffer[i] != 0; i ++) { 133 | if (buffer[i] == '\\' || buffer[i] == '/') { 134 | buffer[i] = '\0'; 135 | CreateDirectoryA(buffer, 0); 136 | buffer[i] = '/'; 137 | } 138 | } 139 | return CreateDirectoryA(_S(_path), 0); 140 | #else 141 | for (int i = 0; buffer[i] != 0; i ++) { 142 | if (buffer[i] == '\\' || buffer[i] == '/') { 143 | buffer[i] = '\0'; 144 | mkdir(buffer, 0755); 145 | buffer[i] = '/'; 146 | } 147 | } 148 | return mkdir(_S(_path), 0755); 149 | #endif 150 | } 151 | vecS CmFile::loadStrList(CStr &fName) 152 | { 153 | ifstream fIn(fName); 154 | string line; 155 | vecS strs; 156 | while(getline(fIn, line) && line.size()){ 157 | unsigned sz = line.size(); 158 | line.resize(sz - 1); //Please use script to convert the VOC format data into the OpenCV format data 159 | //line.resize(sz); 160 | strs.push_back(line); 161 | } 162 | return strs; 163 | } 164 | bool CmFile::writeStrList(CStr &fName, const vecS &strs) 165 | { 166 | FILE *f = fopen(_S(fName), "w"); 167 | if (f == NULL) 168 | return false; 169 | for (size_t i = 0; i < strs.size(); i++) 170 | fprintf(f, "%s\n", _S(strs[i])); 171 | fclose(f); 172 | return true; 173 | } 174 | -------------------------------------------------------------------------------- /Src/Objectness.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "DataSetVOC.h" 3 | #include "ValStructVec.h" 4 | #include "FilterTIG.h" 5 | class Objectness 6 | { 7 | public: 8 | // base for window size quantization, feature window size (W, W), and non-maximal suppress size NSS 9 | Objectness(DataSetVOC &voc, double base = 2, int W = 8, int NSS = 2); 10 | ~Objectness(void); 11 | 12 | // Load trained model. 13 | int loadTrainedModel(string modelName = ""); // Return -1, 0, or 1 if partial, none, or all loaded 14 | 15 | // Get potential bounding boxes, each of which is represented by a Vec4i for (minX, minY, maxX, maxY). 16 | // The trained model should be prepared before calling this function: loadTrainedModel() or trainStageI() + trainStageII(). 17 | // Use numDet to control the final number of proposed bounding boxes, and number of per size (scale and aspect ratio) 18 | void getObjBndBoxes(CMat &img3u, ValStructVec &valBoxes, int numDetPerSize = 120); 19 | 20 | // Training and testing on the dataset 21 | void trainObjectness(int numDetPerSize = 100); 22 | void getObjBndBoxesForTests(vector> &boxesTests, int numDetPerSize = 100); 23 | void getObjBndBoxesForTestsFast(vector> &boxesTests, int numDetPerSize = 100); 24 | void getRandomBoxes(vector> &boxesTests, int numD = 10000); 25 | void evaluatePAMI12(CStr &saveName = "PlotMAMI12.m"); 26 | void evaluateIJCV13(CStr &saveName = "IJCV13.m"); 27 | void evaluatePerClassRecall(vector> &boxesTests, CStr &saveName = "Plot.m", const int numDet = 1000); 28 | void evaluatePerImgRecall(const vector> &boxesTests, CStr &saveName, const int numDet = 1000); 29 | void illuTestReults(const vector> &boxesTests); 30 | void setColorSpace(int clr = MAXBGR); 31 | 32 | // Training SVM with feature vector X and label Y. 33 | // Each row of X is a feature vector, with corresponding label in Y. 34 | // Return a CV_32F weight Mat 35 | static Mat trainSVM(CMat &X1f, const vecI &Y, int sT, double C, double bias = -1, double eps = 0.01); 36 | 37 | // pX1f, nX1f are positive and negative training samples, each is a row vector 38 | static Mat trainSVM(const vector &pX1f, const vector &nX1f, int sT, double C, double bias = -1, double eps = 0.01, int maxTrainNum = 100000); 39 | 40 | // Write matrix to binary file 41 | static bool matWrite(CStr& filename, CMat& M); 42 | 43 | // Read matrix from binary file 44 | static bool matRead( const string& filename, Mat& M); 45 | 46 | enum {MAXBGR, HSV, G}; 47 | 48 | static void meanStdDev(CMat &data1f, Mat &mean1f, Mat &stdDev1f); 49 | 50 | void illustrate(); 51 | 52 | inline static float LoG(float x, float y, float delta) {float d = -(x*x+y*y)/(2*delta*delta); return -1.0f/((float)(CV_PI)*pow(delta, 4)) * (1+d)*exp(d);} // Laplacian of Gaussian 53 | static Mat aFilter(float delta, int sz); 54 | 55 | private: // Parameters 56 | const double _base, _logBase; // base for window size quantization 57 | const int _W; // As described in the paper: #Size, Size(_W, _H) of feature window. 58 | const int _NSS; // Size for non-maximal suppress 59 | const int _maxT, _minT, _numT; // The minimal and maximal dimensions of the template 60 | 61 | int _Clr; // 62 | static const char* _clrName[3]; 63 | 64 | DataSetVOC &_voc; // The dataset for training, testing 65 | string _modelName, _trainDirSI, _bbResDir; 66 | 67 | vecI _svmSzIdxs; // Indexes of active size. It's equal to _svmFilters.size() and _svmReW1f.rows 68 | Mat _svmFilter; // Filters learned at stage I, each is a _H by _W CV_32F matrix 69 | FilterTIG _tigF; // TIG filter 70 | Mat _svmReW1f; // Re-weight parameters learned at stage II. 71 | 72 | private: // Help functions 73 | 74 | bool filtersLoaded() {int n = _svmSzIdxs.size(); return n > 0 && _svmReW1f.size() == Size(2, n) && _svmFilter.size() == Size(_W, _W);} 75 | 76 | int gtBndBoxSampling(const Vec4i &bbgt, vector &samples, vecI &bbR); 77 | 78 | Mat getFeature(CMat &img3u, const Vec4i &bb); // Return region feature 79 | 80 | inline double maxIntUnion(const Vec4i &bb, const vector &bbgts) {double maxV = 0; for(size_t i = 0; i < bbgts.size(); i++) maxV = max(maxV, DataSetVOC::interUnio(bb, bbgts[i])); return maxV; } 81 | 82 | // Convert VOC bounding box type to OpenCV Rect 83 | inline Rect pnt2Rect(const Vec4i &bb){int x = bb[0] - 1, y = bb[1] - 1; return Rect(x, y, bb[2] - x, bb[3] - y);} 84 | 85 | // Template length at quantized scale t 86 | inline int tLen(int t){return cvRound(pow(_base, t));} 87 | 88 | // Sub to quantization index 89 | inline int sz2idx(int w, int h) {w -= _minT; h -= _minT; CV_Assert(w >= 0 && h >= 0 && w < _numT && h < _numT); return h * _numT + w + 1; } 90 | inline string strVec4i(const Vec4i &v) const {return format("%d, %d, %d, %d", v[0], v[1], v[2], v[3]);} 91 | 92 | void generateTrianData(); 93 | void trainStageI(); 94 | void trainStateII(int numPerSz = 100); 95 | void predictBBoxSI(CMat &mag3u, ValStructVec &valBoxes, vecI &sz, int NUM_WIN_PSZ = 100, bool fast = true); 96 | void predictBBoxSII(ValStructVec &valBoxes, const vecI &sz); 97 | 98 | // Calculate the image gradient: center option as in VLFeat 99 | void gradientMag(CMat &imgBGR3u, Mat &mag1u); 100 | 101 | static void gradientRGB(CMat &bgr3u, Mat &mag1u); 102 | static void gradientGray(CMat &bgr3u, Mat &mag1u); 103 | static void gradientHSV(CMat &bgr3u, Mat &mag1u); 104 | static void gradientXY(CMat &x1i, CMat &y1i, Mat &mag1u); 105 | 106 | static inline int bgrMaxDist(const Vec3b &u, const Vec3b &v) {int b = abs(u[0]-v[0]), g = abs(u[1]-v[1]), r = abs(u[2]-v[2]); b = max(b,g); return max(b,r);} 107 | static inline int vecDist3b(const Vec3b &u, const Vec3b &v) {return abs(u[0]-v[0]) + abs(u[1]-v[1]) + abs(u[2]-v[2]);} 108 | 109 | //Non-maximal suppress 110 | static void nonMaxSup(CMat &matchCost1f, ValStructVec &matchCost, int NSS = 1, int maxPoint = 50, bool fast = true); 111 | 112 | static void PrintVector(FILE *f, const vecD &v, CStr &name); 113 | 114 | vecD getVector(CMat &t1f); 115 | }; 116 | 117 | -------------------------------------------------------------------------------- /Src/LibLinear/tron.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "tron.h" 6 | 7 | #ifndef min 8 | template static inline T min(T x,T y) { return (x static inline T max(T x,T y) { return (x>y)?x:y; } 13 | #endif 14 | 15 | #ifdef __cplusplus 16 | extern "C" { 17 | #endif 18 | 19 | extern double dnrm2_(int *, double *, int *); 20 | extern double ddot_(int *, double *, int *, double *, int *); 21 | extern int daxpy_(int *, double *, double *, int *, double *, int *); 22 | extern int dscal_(int *, double *, double *, int *); 23 | 24 | #ifdef __cplusplus 25 | } 26 | #endif 27 | 28 | static void default_print(const char *buf) 29 | { 30 | fputs(buf,stdout); 31 | fflush(stdout); 32 | } 33 | 34 | void TRON::info(const char *fmt,...) 35 | { 36 | char buf[BUFSIZ]; 37 | va_list ap; 38 | va_start(ap,fmt); 39 | vsprintf(buf,fmt,ap); 40 | va_end(ap); 41 | (*tron_print_string)(buf); 42 | } 43 | 44 | TRON::TRON(const function *fun_obj, double eps, int max_iter) 45 | { 46 | this->fun_obj=const_cast(fun_obj); 47 | this->eps=eps; 48 | this->max_iter=max_iter; 49 | tron_print_string = default_print; 50 | } 51 | 52 | TRON::~TRON() 53 | { 54 | } 55 | 56 | void TRON::tron(double *w) 57 | { 58 | // Parameters for updating the iterates. 59 | double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75; 60 | 61 | // Parameters for updating the trust region size delta. 62 | double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4; 63 | 64 | int n = fun_obj->get_nr_variable(); 65 | int i, cg_iter; 66 | double delta, snorm, one=1.0; 67 | double alpha, f, fnew, prered, actred, gs; 68 | int search = 1, iter = 1, inc = 1; 69 | double *s = new double[n]; 70 | double *r = new double[n]; 71 | double *w_new = new double[n]; 72 | double *g = new double[n]; 73 | 74 | for (i=0; ifun(w); 78 | fun_obj->grad(w, g); 79 | delta = dnrm2_(&n, g, &inc); 80 | double gnorm1 = delta; 81 | double gnorm = gnorm1; 82 | 83 | if (gnorm <= eps*gnorm1) 84 | search = 0; 85 | 86 | iter = 1; 87 | 88 | while (iter <= max_iter && search) 89 | { 90 | cg_iter = trcg(delta, g, s, r); 91 | 92 | memcpy(w_new, w, sizeof(double)*n); 93 | daxpy_(&n, &one, s, &inc, w_new, &inc); 94 | 95 | gs = ddot_(&n, g, &inc, s, &inc); 96 | prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc)); 97 | fnew = fun_obj->fun(w_new); 98 | 99 | // Compute the actual reduction. 100 | actred = f - fnew; 101 | 102 | // On the first iteration, adjust the initial step bound. 103 | snorm = dnrm2_(&n, s, &inc); 104 | if (iter == 1) 105 | delta = min(delta, snorm); 106 | 107 | // Compute prediction alpha*snorm of the step. 108 | if (fnew - f - gs <= 0) 109 | alpha = sigma3; 110 | else 111 | alpha = max(sigma1, -0.5*(gs/(fnew - f - gs))); 112 | 113 | // Update the trust region bound according to the ratio of actual to predicted reduction. 114 | if (actred < eta0*prered) 115 | delta = min(max(alpha, sigma1)*snorm, sigma2*delta); 116 | else if (actred < eta1*prered) 117 | delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta)); 118 | else if (actred < eta2*prered) 119 | delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta)); 120 | else 121 | delta = max(delta, min(alpha*snorm, sigma3*delta)); 122 | 123 | info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter); 124 | 125 | if (actred > eta0*prered) 126 | { 127 | iter++; 128 | memcpy(w, w_new, sizeof(double)*n); 129 | f = fnew; 130 | fun_obj->grad(w, g); 131 | 132 | gnorm = dnrm2_(&n, g, &inc); 133 | if (gnorm <= eps*gnorm1) 134 | break; 135 | } 136 | if (f < -1.0e+32) 137 | { 138 | info("WARNING: f < -1.0e+32\n"); 139 | break; 140 | } 141 | if (fabs(actred) <= 0 && prered <= 0) 142 | { 143 | info("WARNING: actred and prered <= 0\n"); 144 | break; 145 | } 146 | if (fabs(actred) <= 1.0e-12*fabs(f) && 147 | fabs(prered) <= 1.0e-12*fabs(f)) 148 | { 149 | info("WARNING: actred and prered too small\n"); 150 | break; 151 | } 152 | } 153 | 154 | delete[] g; 155 | delete[] r; 156 | delete[] w_new; 157 | delete[] s; 158 | } 159 | 160 | int TRON::trcg(double delta, double *g, double *s, double *r) 161 | { 162 | int i, inc = 1; 163 | int n = fun_obj->get_nr_variable(); 164 | double one = 1; 165 | double *d = new double[n]; 166 | double *Hd = new double[n]; 167 | double rTr, rnewTrnew, alpha, beta, cgtol; 168 | 169 | for (i=0; iHv(d, Hd); 185 | 186 | alpha = rTr/ddot_(&n, d, &inc, Hd, &inc); 187 | daxpy_(&n, &alpha, d, &inc, s, &inc); 188 | if (dnrm2_(&n, s, &inc) > delta) 189 | { 190 | info("cg reaches trust region boundary\n"); 191 | alpha = -alpha; 192 | daxpy_(&n, &alpha, d, &inc, s, &inc); 193 | 194 | double std = ddot_(&n, s, &inc, d, &inc); 195 | double sts = ddot_(&n, s, &inc, s, &inc); 196 | double dtd = ddot_(&n, d, &inc, d, &inc); 197 | double dsq = delta*delta; 198 | double rad = sqrt(std*std + dtd*(dsq-sts)); 199 | if (std >= 0) 200 | alpha = (dsq - sts)/(std + rad); 201 | else 202 | alpha = (rad - std)/dtd; 203 | daxpy_(&n, &alpha, d, &inc, s, &inc); 204 | alpha = -alpha; 205 | daxpy_(&n, &alpha, Hd, &inc, r, &inc); 206 | break; 207 | } 208 | alpha = -alpha; 209 | daxpy_(&n, &alpha, Hd, &inc, r, &inc); 210 | rnewTrnew = ddot_(&n, r, &inc, r, &inc); 211 | beta = rnewTrnew/rTr; 212 | dscal_(&n, &beta, d, &inc); 213 | daxpy_(&n, &one, r, &inc, d, &inc); 214 | rTr = rnewTrnew; 215 | } 216 | 217 | delete[] d; 218 | delete[] Hd; 219 | 220 | return(cg_iter); 221 | } 222 | 223 | double TRON::norm_inf(int n, double *x) 224 | { 225 | double dmax = fabs(x[0]); 226 | for (int i=1; i= dmax) 228 | dmax = fabs(x[i]); 229 | return(dmax); 230 | } 231 | 232 | void TRON::set_print_string(void (*print_string) (const char *buf)) 233 | { 234 | tron_print_string = print_string; 235 | } 236 | -------------------------------------------------------------------------------- /Src/DataSetVOC.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | #include "DataSetVOC.h" 3 | 4 | 5 | DataSetVOC::DataSetVOC(CStr &_wkDir) 6 | { 7 | wkDir = _wkDir; 8 | resDir = wkDir + "Results/"; 9 | localDir = wkDir + "Local/"; 10 | imgPathW = wkDir + "JPEGImages/%s.jpg"; 11 | annoPathW = wkDir + "Annotations/%s.yml"; 12 | CmFile::MkDir(resDir); 13 | CmFile::MkDir(localDir); 14 | 15 | trainSet = CmFile::loadStrList(wkDir + "ImageSets/Main/train.txt"); 16 | testSet = CmFile::loadStrList(wkDir + "ImageSets/Main/test.txt"); 17 | classNames = CmFile::loadStrList(wkDir + "ImageSets/Main/class.txt"); 18 | 19 | // testSet.insert(testSet.end(), trainSet.begin(), trainSet.end()); 20 | // testSet.resize(min(1000, (int)testSet.size())); 21 | 22 | trainNum = trainSet.size(); 23 | testNum = testSet.size(); 24 | } 25 | 26 | 27 | Vec4i getMaskRange(CMat &mask1u, int ext = 0) 28 | { 29 | int maxX = INT_MIN, maxY = INT_MIN, minX = INT_MAX, minY = INT_MAX, rows = mask1u.rows, cols = mask1u.cols; 30 | for (int r = 0; r < rows; r++) { 31 | const byte* data = mask1u.ptr(r); 32 | for (int c = 0; c < cols; c++) 33 | if (data[c] > 10) { 34 | maxX = max(maxX, c); 35 | minX = min(minX, c); 36 | maxY = max(maxY, r); 37 | minY = min(minY, r); 38 | } 39 | } 40 | 41 | maxX = maxX + ext + 1 < cols ? maxX + ext + 1 : cols; 42 | maxY = maxY + ext + 1 < rows ? maxY + ext + 1 : rows; 43 | minX = minX - ext > 0 ? minX - ext : 0; 44 | minY = minY - ext > 0 ? minY - ext : 0; 45 | 46 | return Vec4i(minX + 1, minY + 1, maxX, maxY); // Rect(minX, minY, maxX - minX, maxY - minY); 47 | } 48 | 49 | 50 | DataSetVOC::~DataSetVOC(void) 51 | { 52 | } 53 | 54 | void DataSetVOC::loadAnnotations() 55 | { 56 | gtTrainBoxes.resize(trainNum); 57 | gtTrainClsIdx.resize(trainNum); 58 | for (int i = 0; i < trainNum; i++) 59 | if (!loadBBoxes(trainSet[i], gtTrainBoxes[i], gtTrainClsIdx[i])) 60 | return; 61 | 62 | gtTestBoxes.resize(testNum); 63 | gtTestClsIdx.resize(testNum); 64 | for (int i = 0; i < testNum; i++) 65 | if(!loadBBoxes(testSet[i], gtTestBoxes[i], gtTestClsIdx[i])) 66 | return; 67 | printf("Load annotations finished\n"); 68 | } 69 | 70 | void DataSetVOC::loadDataGenericOverCls() 71 | { 72 | vecS allSet = trainSet; 73 | allSet.insert(allSet.end(), testSet.begin(), testSet.end()); 74 | int imgN = (int)allSet.size(); 75 | trainSet.clear(), testSet.clear(); 76 | trainSet.reserve(imgN), testSet.reserve(imgN); 77 | vector> gtBoxes(imgN); 78 | vector gtClsIdx(imgN); 79 | for (int i = 0; i < imgN; i++){ 80 | if (!loadBBoxes(allSet[i], gtBoxes[i], gtClsIdx[i])) 81 | return; 82 | vector trainBoxes, testBoxes; 83 | vecI trainIdx, testIdx; 84 | for (size_t j = 0; j < gtBoxes[i].size(); j++) 85 | if (gtClsIdx[i][j] < 6){ 86 | trainBoxes.push_back(gtBoxes[i][j]); 87 | trainIdx.push_back(gtClsIdx[i][j]); 88 | } 89 | else{ 90 | testBoxes.push_back(gtBoxes[i][j]); 91 | testIdx.push_back(gtClsIdx[i][j]); 92 | } 93 | if (trainBoxes.size()){ 94 | trainSet.push_back(allSet[i]); 95 | gtTrainBoxes.push_back(trainBoxes); 96 | gtTrainClsIdx.push_back(trainIdx); 97 | } 98 | else{ 99 | testSet.push_back(allSet[i]); 100 | gtTestBoxes.push_back(testBoxes); 101 | gtTestClsIdx.push_back(testIdx); 102 | } 103 | } 104 | trainNum = trainSet.size(); 105 | testNum = testSet.size(); 106 | printf("Load annotations (generic over classes) finished\n"); 107 | } 108 | 109 | void DataSetVOC::loadBox(const FileNode &fn, vector &boxes, vecI &clsIdx){ 110 | string isDifficult; 111 | fn["difficult"]>>isDifficult; 112 | if (isDifficult == "1") 113 | return; 114 | 115 | string strXmin, strYmin, strXmax, strYmax; 116 | fn["bndbox"]["xmin"] >> strXmin; 117 | fn["bndbox"]["ymin"] >> strYmin; 118 | fn["bndbox"]["xmax"] >> strXmax; 119 | fn["bndbox"]["ymax"] >> strYmax; 120 | boxes.push_back(Vec4i(atoi(_S(strXmin)), atoi(_S(strYmin)), atoi(_S(strXmax)), atoi(_S(strYmax)))); 121 | 122 | string clsName; 123 | fn["name"]>>clsName; 124 | clsIdx.push_back(findFromList(clsName, classNames)); 125 | CV_Assert_(clsIdx[clsIdx.size() - 1] >= 0, ("Invalidate class name\n")); 126 | } 127 | 128 | bool DataSetVOC::loadBBoxes(CStr &nameNE, vector &boxes, vecI &clsIdx) 129 | { 130 | string fName = format(_S(annoPathW), _S(nameNE)); 131 | FileStorage fs(fName, FileStorage::READ); 132 | FileNode fn = fs["annotation"]["object"]; 133 | boxes.clear(); 134 | clsIdx.clear(); 135 | if (fn.isSeq()){ 136 | for (FileNodeIterator it = fn.begin(), it_end = fn.end(); it != it_end; it++){ 137 | loadBox(*it, boxes, clsIdx); 138 | } 139 | } 140 | else 141 | loadBox(fn, boxes, clsIdx); 142 | return true; 143 | } 144 | 145 | // Needs to call yml.m in this solution before running this function. 146 | bool DataSetVOC::cvt2OpenCVYml(CStr &annoDir) 147 | { 148 | vecS namesNE; 149 | int imgNum = CmFile::GetNamesNE(annoDir + "*.yaml", namesNE); 150 | printf("Converting annotations to OpenCV yml format:\n"); 151 | for (int i = 0; i < imgNum; i++){ 152 | printf("%d/%d %s.yaml\r", i, imgNum, _S(namesNE[i])); 153 | string fPath = annoDir + namesNE[i]; 154 | cvt2OpenCVYml(fPath + ".yaml", fPath + ".yml"); 155 | } 156 | return true; 157 | } 158 | 159 | // Needs to call yml.m in this solution before running this function. 160 | bool DataSetVOC::cvt2OpenCVYml(CStr &yamlName, CStr &ymlName) 161 | { 162 | ifstream f(yamlName); 163 | FILE *fO = fopen(_S(ymlName), "w"); 164 | if (!f.is_open() && fO == NULL) 165 | return false; 166 | fprintf(fO, "%s\n", "%YAML:1.0\n"); 167 | string line; 168 | 169 | int addIdent = 0; 170 | while(getline(f, line)){ 171 | if (line.substr(0, 12) == " filename: ") 172 | line = " filename: \"" + line.substr(12) + "\""; 173 | int tmp = line.find_first_of('-'); 174 | if (tmp != string::npos){ 175 | bool allSpace = true; 176 | for (int k = 0; k < tmp; k++) 177 | if (line[k] != ' ') 178 | allSpace = false; 179 | if (allSpace) 180 | addIdent = tmp; 181 | } 182 | for (int k = 0; k < addIdent; k++) 183 | fprintf(fO, " "); 184 | fprintf(fO, "%s\n", _S(line)); 185 | } 186 | fclose(fO); 187 | 188 | FileStorage fs(ymlName, FileStorage::READ); 189 | string tmp; 190 | fs["annotation"]["folder"]>>tmp; 191 | return true; 192 | } 193 | 194 | 195 | // Get training and testing for demonstrating the generative of the objectness over classes 196 | void DataSetVOC::getTrainTest() 197 | { 198 | const int TRAIN_CLS_NUM = 6; 199 | string trainCls[TRAIN_CLS_NUM] = {"bird", "car", "cat", "cow", "dog", "sheep"}; 200 | 201 | } 202 | 203 | void DataSetVOC::getXmlStrVOC(CStr &fName, string &buf) 204 | { 205 | ifstream fin(fName); 206 | string strLine; 207 | buf.clear(); 208 | buf.reserve(100000); 209 | buf += "\n\n"; 210 | while (getline(fin, strLine) && strLine.size()) { 211 | int startP = strLine.find_first_of(">") + 1; 212 | int endP = strLine.find_last_of("<"); 213 | if (endP > startP){ 214 | string val = keepXmlChar(strLine.substr(startP, endP - startP)); 215 | if (val.size() < endP - startP) 216 | strLine = strLine.substr(0, startP) + val + strLine.substr(endP); 217 | } 218 | buf += strLine + "\n"; 219 | } 220 | buf += "\n"; 221 | //FileStorage fs(buf, FileStorage::READ + FileStorage::MEMORY); 222 | ofstream fout("D:/t.xml"); 223 | fout<< buf; 224 | } 225 | -------------------------------------------------------------------------------- /Src/LibLinear/train.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "linear.h" 8 | #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) 9 | #define INF HUGE_VAL 10 | 11 | #pragma warning(disable:4996) 12 | 13 | void print_null(const char *s) {} 14 | 15 | void exit_with_help() 16 | { 17 | printf( 18 | "Usage: train [options] training_set_file [model_file]\n" 19 | "options:\n" 20 | "-s type : set type of solver (default 1)\n" 21 | " for multi-class classification\n" 22 | " 0 -- L2-regularized logistic regression (primal)\n" 23 | " 1 -- L2-regularized L2-loss support vector classification (dual)\n" 24 | " 2 -- L2-regularized L2-loss support vector classification (primal)\n" 25 | " 3 -- L2-regularized L1-loss support vector classification (dual)\n" 26 | " 4 -- support vector classification by Crammer and Singer\n" 27 | " 5 -- L1-regularized L2-loss support vector classification\n" 28 | " 6 -- L1-regularized logistic regression\n" 29 | " 7 -- L2-regularized logistic regression (dual)\n" 30 | " for regression\n" 31 | " 11 -- L2-regularized L2-loss support vector regression (primal)\n" 32 | " 12 -- L2-regularized L2-loss support vector regression (dual)\n" 33 | " 13 -- L2-regularized L1-loss support vector regression (dual)\n" 34 | "-c cost : set the parameter C (default 1)\n" 35 | "-p epsilon : set the epsilon in loss function of SVR (default 0.1)\n" 36 | "-e epsilon : set tolerance of termination criterion\n" 37 | " -s 0 and 2\n" 38 | " |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n" 39 | " where f is the primal function and pos/neg are # of\n" 40 | " positive/negative data (default 0.01)\n" 41 | " -s 11\n" 42 | " |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001)\n" 43 | " -s 1, 3, 4, and 7\n" 44 | " Dual maximal violation <= eps; similar to libsvm (default 0.1)\n" 45 | " -s 5 and 6\n" 46 | " |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n" 47 | " where f is the primal function (default 0.01)\n" 48 | " -s 12 and 13\n" 49 | " |f'(alpha)|_1 <= eps |f'(alpha0)|,\n" 50 | " where f is the dual function (default 0.1)\n" 51 | "-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n" 52 | "-wi weight: weights adjust the parameter C of different classes (see README for details)\n" 53 | "-v n: n-fold cross validation mode\n" 54 | "-q : quiet mode (no outputs)\n" 55 | ); 56 | exit(1); 57 | } 58 | 59 | void exit_input_error(int line_num) 60 | { 61 | fprintf(stderr,"Wrong input format at line %d\n", line_num); 62 | exit(1); 63 | } 64 | 65 | static char *line = NULL; 66 | static int max_line_len; 67 | 68 | static char* readline(FILE *input) 69 | { 70 | int len; 71 | 72 | if(fgets(line,max_line_len,input) == NULL) 73 | return NULL; 74 | 75 | while(strrchr(line,'\n') == NULL) 76 | { 77 | max_line_len *= 2; 78 | line = (char *) realloc(line,max_line_len); 79 | len = (int) strlen(line); 80 | if(fgets(line+len,max_line_len-len,input) == NULL) 81 | break; 82 | } 83 | return line; 84 | } 85 | 86 | void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name); 87 | void read_problem(const char *filename); 88 | void do_cross_validation(); 89 | 90 | struct feature_node *x_space; 91 | struct parameter param; 92 | struct problem prob; 93 | struct model* model_; 94 | int flag_cross_validation; 95 | int nr_fold; 96 | double bias; 97 | 98 | int main(int argc, char **argv) 99 | { 100 | char input_file_name[1024]; 101 | char model_file_name[1024]; 102 | const char *error_msg; 103 | 104 | parse_command_line(argc, argv, input_file_name, model_file_name); 105 | read_problem(input_file_name); 106 | error_msg = check_parameter(&prob,¶m); 107 | 108 | if(error_msg) 109 | { 110 | fprintf(stderr,"ERROR: %s\n",error_msg); 111 | exit(1); 112 | } 113 | 114 | if(flag_cross_validation) 115 | { 116 | do_cross_validation(); 117 | } 118 | else 119 | { 120 | model_=train(&prob, ¶m); 121 | if(save_model(model_file_name, model_)) 122 | { 123 | fprintf(stderr,"can't save model to file %s\n",model_file_name); 124 | exit(1); 125 | } 126 | free_and_destroy_model(&model_); 127 | } 128 | destroy_param(¶m); 129 | free(prob.y); 130 | free(prob.x); 131 | free(x_space); 132 | free(line); 133 | 134 | return 0; 135 | } 136 | 137 | void do_cross_validation() 138 | { 139 | int i; 140 | int total_correct = 0; 141 | double total_error = 0; 142 | double sumv = 0, sumy = 0, sumvv = 0, sumyy = 0, sumvy = 0; 143 | double *target = Malloc(double, prob.l); 144 | 145 | cross_validation(&prob,¶m,nr_fold,target); 146 | if(param.solver_type == L2R_L2LOSS_SVR || 147 | param.solver_type == L2R_L1LOSS_SVR_DUAL || 148 | param.solver_type == L2R_L2LOSS_SVR_DUAL) 149 | { 150 | for(i=0;i=argc) 199 | exit_with_help(); 200 | switch(argv[i-1][1]) 201 | { 202 | case 's': 203 | param.solver_type = atoi(argv[i]); 204 | break; 205 | 206 | case 'c': 207 | param.C = atof(argv[i]); 208 | break; 209 | 210 | case 'p': 211 | param.p = atof(argv[i]); 212 | break; 213 | 214 | case 'e': 215 | param.eps = atof(argv[i]); 216 | break; 217 | 218 | case 'B': 219 | bias = atof(argv[i]); 220 | break; 221 | 222 | case 'w': 223 | ++param.nr_weight; 224 | param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight); 225 | param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight); 226 | param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]); 227 | param.weight[param.nr_weight-1] = atof(argv[i]); 228 | break; 229 | 230 | case 'v': 231 | flag_cross_validation = 1; 232 | nr_fold = atoi(argv[i]); 233 | if(nr_fold < 2) 234 | { 235 | fprintf(stderr,"n-fold cross validation: n must >= 2\n"); 236 | exit_with_help(); 237 | } 238 | break; 239 | 240 | case 'q': 241 | print_func = &print_null; 242 | i--; 243 | break; 244 | 245 | default: 246 | fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]); 247 | exit_with_help(); 248 | break; 249 | } 250 | } 251 | 252 | set_print_string_function(print_func); 253 | 254 | // determine filenames 255 | if(i>=argc) 256 | exit_with_help(); 257 | 258 | strcpy(input_file_name, argv[i]); 259 | 260 | if(i max_index) 382 | max_index = inst_max_index; 383 | 384 | if(prob.bias >= 0) 385 | x_space[j++].value = prob.bias; 386 | 387 | x_space[j++].index = -1; 388 | } 389 | 390 | if(prob.bias >= 0) 391 | { 392 | prob.n=max_index+1; 393 | for(i=1;iindex = prob.n; 395 | x_space[j-2].index = prob.n; 396 | } 397 | else 398 | prob.n=max_index; 399 | 400 | fclose(fp); 401 | } 402 | -------------------------------------------------------------------------------- /Src/CMakeLists.txt.user: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | ProjectExplorer.Project.ActiveTarget 7 | 0 8 | 9 | 10 | ProjectExplorer.Project.EditorSettings 11 | 12 | true 13 | false 14 | true 15 | 16 | Cpp 17 | 18 | CppGlobal 19 | 20 | 21 | 22 | QmlJS 23 | 24 | QmlJSGlobal 25 | 26 | 27 | 2 28 | System 29 | false 30 | 4 31 | false 32 | true 33 | 1 34 | true 35 | 0 36 | true 37 | 0 38 | 8 39 | true 40 | 1 41 | true 42 | true 43 | true 44 | false 45 | 46 | 47 | 48 | ProjectExplorer.Project.PluginSettings 49 | 50 | 51 | 52 | ProjectExplorer.Project.Target.0 53 | 54 | Desktop 55 | 56 | CMakeProjectManager.DefaultCMakeTarget 57 | 0 58 | 0 59 | 0 60 | 61 | /home/bittnt/BING/BING_beta3/build 62 | ProjectExplorer.ToolChain.Gcc:{966d53c9-fe6c-4bac-8571-0e5a33bebf05} 63 | ProjectExplorer.ToolChain.Gcc:{966d53c9-fe6c-4bac-8571-0e5a33bebf05} 64 | 65 | 66 | 67 | 68 | false 69 | true 70 | Make 71 | 72 | CMakeProjectManager.MakeStep 73 | 74 | 1 75 | Build 76 | 77 | ProjectExplorer.BuildSteps.Build 78 | 79 | 80 | 81 | clean 82 | 83 | true 84 | true 85 | Make 86 | 87 | CMakeProjectManager.MakeStep 88 | 89 | 1 90 | Clean 91 | 92 | ProjectExplorer.BuildSteps.Clean 93 | 94 | 2 95 | false 96 | 97 | all 98 | 99 | CMakeProjectManager.CMakeBuildConfiguration 100 | 101 | 1 102 | 103 | 104 | 0 105 | Deploy 106 | 107 | ProjectExplorer.BuildSteps.Deploy 108 | 109 | 1 110 | No deployment 111 | 112 | ProjectExplorer.DefaultDeployConfiguration 113 | 114 | 1 115 | 116 | true 117 | 118 | false 119 | false 120 | false 121 | false 122 | true 123 | 0.01 124 | 10 125 | true 126 | 25 127 | 128 | true 129 | /usr/bin/valgrind 130 | 131 | 0 132 | 1 133 | 2 134 | 3 135 | 4 136 | 5 137 | 6 138 | 7 139 | 8 140 | 9 141 | 10 142 | 11 143 | 12 144 | 13 145 | 14 146 | 147 | 2 148 | BING_linux 149 | 150 | false 151 | 152 | 153 | BING_linux 154 | 155 | CMakeProjectManager.CMakeRunConfiguration. 156 | 3768 157 | true 158 | false 159 | true 160 | 161 | 1 162 | 163 | 164 | 165 | ProjectExplorer.Project.TargetCount 166 | 1 167 | 168 | 169 | ProjectExplorer.Project.Updater.EnvironmentId 170 | {1c480e06-4a57-40ca-8868-29ff19bb1281} 171 | 172 | 173 | ProjectExplorer.Project.Updater.FileVersion 174 | 11 175 | 176 | 177 | -------------------------------------------------------------------------------- /Src/LibLinear/blas/blasp.h: -------------------------------------------------------------------------------- 1 | /* blasp.h -- C prototypes for BLAS Ver 1.0 */ 2 | /* Jesse Bennett March 23, 2000 */ 3 | 4 | /* Functions listed in alphabetical order */ 5 | 6 | #ifdef F2C_COMPAT 7 | 8 | void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx, 9 | fcomplex *cy, int *incy); 10 | 11 | void cdotu_(fcomplex *dotval, int *n, fcomplex *cx, int *incx, 12 | fcomplex *cy, int *incy); 13 | 14 | double sasum_(int *n, float *sx, int *incx); 15 | 16 | double scasum_(int *n, fcomplex *cx, int *incx); 17 | 18 | double scnrm2_(int *n, fcomplex *x, int *incx); 19 | 20 | double sdot_(int *n, float *sx, int *incx, float *sy, int *incy); 21 | 22 | double snrm2_(int *n, float *x, int *incx); 23 | 24 | void zdotc_(dcomplex *dotval, int *n, dcomplex *cx, int *incx, 25 | dcomplex *cy, int *incy); 26 | 27 | void zdotu_(dcomplex *dotval, int *n, dcomplex *cx, int *incx, 28 | dcomplex *cy, int *incy); 29 | 30 | #else 31 | 32 | fcomplex cdotc_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); 33 | 34 | fcomplex cdotu_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); 35 | 36 | float sasum_(int *n, float *sx, int *incx); 37 | 38 | float scasum_(int *n, fcomplex *cx, int *incx); 39 | 40 | float scnrm2_(int *n, fcomplex *x, int *incx); 41 | 42 | float sdot_(int *n, float *sx, int *incx, float *sy, int *incy); 43 | 44 | float snrm2_(int *n, float *x, int *incx); 45 | 46 | dcomplex zdotc_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); 47 | 48 | dcomplex zdotu_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); 49 | 50 | #endif 51 | 52 | /* Remaining functions listed in alphabetical order */ 53 | 54 | int caxpy_(int *n, fcomplex *ca, fcomplex *cx, int *incx, fcomplex *cy, 55 | int *incy); 56 | 57 | int ccopy_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); 58 | 59 | int cgbmv_(char *trans, int *m, int *n, int *kl, int *ku, 60 | fcomplex *alpha, fcomplex *a, int *lda, fcomplex *x, int *incx, 61 | fcomplex *beta, fcomplex *y, int *incy); 62 | 63 | int cgemm_(char *transa, char *transb, int *m, int *n, int *k, 64 | fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, int *ldb, 65 | fcomplex *beta, fcomplex *c, int *ldc); 66 | 67 | int cgemv_(char *trans, int *m, int *n, fcomplex *alpha, fcomplex *a, 68 | int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, 69 | int *incy); 70 | 71 | int cgerc_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx, 72 | fcomplex *y, int *incy, fcomplex *a, int *lda); 73 | 74 | int cgeru_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx, 75 | fcomplex *y, int *incy, fcomplex *a, int *lda); 76 | 77 | int chbmv_(char *uplo, int *n, int *k, fcomplex *alpha, fcomplex *a, 78 | int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, 79 | int *incy); 80 | 81 | int chemm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha, 82 | fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, 83 | fcomplex *c, int *ldc); 84 | 85 | int chemv_(char *uplo, int *n, fcomplex *alpha, fcomplex *a, int *lda, 86 | fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, int *incy); 87 | 88 | int cher_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx, 89 | fcomplex *a, int *lda); 90 | 91 | int cher2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx, 92 | fcomplex *y, int *incy, fcomplex *a, int *lda); 93 | 94 | int cher2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, 95 | fcomplex *a, int *lda, fcomplex *b, int *ldb, float *beta, 96 | fcomplex *c, int *ldc); 97 | 98 | int cherk_(char *uplo, char *trans, int *n, int *k, float *alpha, 99 | fcomplex *a, int *lda, float *beta, fcomplex *c, int *ldc); 100 | 101 | int chpmv_(char *uplo, int *n, fcomplex *alpha, fcomplex *ap, fcomplex *x, 102 | int *incx, fcomplex *beta, fcomplex *y, int *incy); 103 | 104 | int chpr_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx, 105 | fcomplex *ap); 106 | 107 | int chpr2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx, 108 | fcomplex *y, int *incy, fcomplex *ap); 109 | 110 | int crotg_(fcomplex *ca, fcomplex *cb, float *c, fcomplex *s); 111 | 112 | int cscal_(int *n, fcomplex *ca, fcomplex *cx, int *incx); 113 | 114 | int csscal_(int *n, float *sa, fcomplex *cx, int *incx); 115 | 116 | int cswap_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy); 117 | 118 | int csymm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha, 119 | fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, 120 | fcomplex *c, int *ldc); 121 | 122 | int csyr2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, 123 | fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta, 124 | fcomplex *c, int *ldc); 125 | 126 | int csyrk_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha, 127 | fcomplex *a, int *lda, fcomplex *beta, fcomplex *c, int *ldc); 128 | 129 | int ctbmv_(char *uplo, char *trans, char *diag, int *n, int *k, 130 | fcomplex *a, int *lda, fcomplex *x, int *incx); 131 | 132 | int ctbsv_(char *uplo, char *trans, char *diag, int *n, int *k, 133 | fcomplex *a, int *lda, fcomplex *x, int *incx); 134 | 135 | int ctpmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap, 136 | fcomplex *x, int *incx); 137 | 138 | int ctpsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap, 139 | fcomplex *x, int *incx); 140 | 141 | int ctrmm_(char *side, char *uplo, char *transa, char *diag, int *m, 142 | int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, 143 | int *ldb); 144 | 145 | int ctrmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a, 146 | int *lda, fcomplex *x, int *incx); 147 | 148 | int ctrsm_(char *side, char *uplo, char *transa, char *diag, int *m, 149 | int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, 150 | int *ldb); 151 | 152 | int ctrsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a, 153 | int *lda, fcomplex *x, int *incx); 154 | 155 | int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy, 156 | int *incy); 157 | 158 | int dcopy_(int *n, double *sx, int *incx, double *sy, int *incy); 159 | 160 | int dgbmv_(char *trans, int *m, int *n, int *kl, int *ku, 161 | double *alpha, double *a, int *lda, double *x, int *incx, 162 | double *beta, double *y, int *incy); 163 | 164 | int dgemm_(char *transa, char *transb, int *m, int *n, int *k, 165 | double *alpha, double *a, int *lda, double *b, int *ldb, 166 | double *beta, double *c, int *ldc); 167 | 168 | int dgemv_(char *trans, int *m, int *n, double *alpha, double *a, 169 | int *lda, double *x, int *incx, double *beta, double *y, 170 | int *incy); 171 | 172 | int dger_(int *m, int *n, double *alpha, double *x, int *incx, 173 | double *y, int *incy, double *a, int *lda); 174 | 175 | int drot_(int *n, double *sx, int *incx, double *sy, int *incy, 176 | double *c, double *s); 177 | 178 | int drotg_(double *sa, double *sb, double *c, double *s); 179 | 180 | int dsbmv_(char *uplo, int *n, int *k, double *alpha, double *a, 181 | int *lda, double *x, int *incx, double *beta, double *y, 182 | int *incy); 183 | 184 | int dscal_(int *n, double *sa, double *sx, int *incx); 185 | 186 | int dspmv_(char *uplo, int *n, double *alpha, double *ap, double *x, 187 | int *incx, double *beta, double *y, int *incy); 188 | 189 | int dspr_(char *uplo, int *n, double *alpha, double *x, int *incx, 190 | double *ap); 191 | 192 | int dspr2_(char *uplo, int *n, double *alpha, double *x, int *incx, 193 | double *y, int *incy, double *ap); 194 | 195 | int dswap_(int *n, double *sx, int *incx, double *sy, int *incy); 196 | 197 | int dsymm_(char *side, char *uplo, int *m, int *n, double *alpha, 198 | double *a, int *lda, double *b, int *ldb, double *beta, 199 | double *c, int *ldc); 200 | 201 | int dsymv_(char *uplo, int *n, double *alpha, double *a, int *lda, 202 | double *x, int *incx, double *beta, double *y, int *incy); 203 | 204 | int dsyr_(char *uplo, int *n, double *alpha, double *x, int *incx, 205 | double *a, int *lda); 206 | 207 | int dsyr2_(char *uplo, int *n, double *alpha, double *x, int *incx, 208 | double *y, int *incy, double *a, int *lda); 209 | 210 | int dsyr2k_(char *uplo, char *trans, int *n, int *k, double *alpha, 211 | double *a, int *lda, double *b, int *ldb, double *beta, 212 | double *c, int *ldc); 213 | 214 | int dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha, 215 | double *a, int *lda, double *beta, double *c, int *ldc); 216 | 217 | int dtbmv_(char *uplo, char *trans, char *diag, int *n, int *k, 218 | double *a, int *lda, double *x, int *incx); 219 | 220 | int dtbsv_(char *uplo, char *trans, char *diag, int *n, int *k, 221 | double *a, int *lda, double *x, int *incx); 222 | 223 | int dtpmv_(char *uplo, char *trans, char *diag, int *n, double *ap, 224 | double *x, int *incx); 225 | 226 | int dtpsv_(char *uplo, char *trans, char *diag, int *n, double *ap, 227 | double *x, int *incx); 228 | 229 | int dtrmm_(char *side, char *uplo, char *transa, char *diag, int *m, 230 | int *n, double *alpha, double *a, int *lda, double *b, 231 | int *ldb); 232 | 233 | int dtrmv_(char *uplo, char *trans, char *diag, int *n, double *a, 234 | int *lda, double *x, int *incx); 235 | 236 | int dtrsm_(char *side, char *uplo, char *transa, char *diag, int *m, 237 | int *n, double *alpha, double *a, int *lda, double *b, 238 | int *ldb); 239 | 240 | int dtrsv_(char *uplo, char *trans, char *diag, int *n, double *a, 241 | int *lda, double *x, int *incx); 242 | 243 | 244 | int saxpy_(int *n, float *sa, float *sx, int *incx, float *sy, int *incy); 245 | 246 | int scopy_(int *n, float *sx, int *incx, float *sy, int *incy); 247 | 248 | int sgbmv_(char *trans, int *m, int *n, int *kl, int *ku, 249 | float *alpha, float *a, int *lda, float *x, int *incx, 250 | float *beta, float *y, int *incy); 251 | 252 | int sgemm_(char *transa, char *transb, int *m, int *n, int *k, 253 | float *alpha, float *a, int *lda, float *b, int *ldb, 254 | float *beta, float *c, int *ldc); 255 | 256 | int sgemv_(char *trans, int *m, int *n, float *alpha, float *a, 257 | int *lda, float *x, int *incx, float *beta, float *y, 258 | int *incy); 259 | 260 | int sger_(int *m, int *n, float *alpha, float *x, int *incx, 261 | float *y, int *incy, float *a, int *lda); 262 | 263 | int srot_(int *n, float *sx, int *incx, float *sy, int *incy, 264 | float *c, float *s); 265 | 266 | int srotg_(float *sa, float *sb, float *c, float *s); 267 | 268 | int ssbmv_(char *uplo, int *n, int *k, float *alpha, float *a, 269 | int *lda, float *x, int *incx, float *beta, float *y, 270 | int *incy); 271 | 272 | int sscal_(int *n, float *sa, float *sx, int *incx); 273 | 274 | int sspmv_(char *uplo, int *n, float *alpha, float *ap, float *x, 275 | int *incx, float *beta, float *y, int *incy); 276 | 277 | int sspr_(char *uplo, int *n, float *alpha, float *x, int *incx, 278 | float *ap); 279 | 280 | int sspr2_(char *uplo, int *n, float *alpha, float *x, int *incx, 281 | float *y, int *incy, float *ap); 282 | 283 | int sswap_(int *n, float *sx, int *incx, float *sy, int *incy); 284 | 285 | int ssymm_(char *side, char *uplo, int *m, int *n, float *alpha, 286 | float *a, int *lda, float *b, int *ldb, float *beta, 287 | float *c, int *ldc); 288 | 289 | int ssymv_(char *uplo, int *n, float *alpha, float *a, int *lda, 290 | float *x, int *incx, float *beta, float *y, int *incy); 291 | 292 | int ssyr_(char *uplo, int *n, float *alpha, float *x, int *incx, 293 | float *a, int *lda); 294 | 295 | int ssyr2_(char *uplo, int *n, float *alpha, float *x, int *incx, 296 | float *y, int *incy, float *a, int *lda); 297 | 298 | int ssyr2k_(char *uplo, char *trans, int *n, int *k, float *alpha, 299 | float *a, int *lda, float *b, int *ldb, float *beta, 300 | float *c, int *ldc); 301 | 302 | int ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha, 303 | float *a, int *lda, float *beta, float *c, int *ldc); 304 | 305 | int stbmv_(char *uplo, char *trans, char *diag, int *n, int *k, 306 | float *a, int *lda, float *x, int *incx); 307 | 308 | int stbsv_(char *uplo, char *trans, char *diag, int *n, int *k, 309 | float *a, int *lda, float *x, int *incx); 310 | 311 | int stpmv_(char *uplo, char *trans, char *diag, int *n, float *ap, 312 | float *x, int *incx); 313 | 314 | int stpsv_(char *uplo, char *trans, char *diag, int *n, float *ap, 315 | float *x, int *incx); 316 | 317 | int strmm_(char *side, char *uplo, char *transa, char *diag, int *m, 318 | int *n, float *alpha, float *a, int *lda, float *b, 319 | int *ldb); 320 | 321 | int strmv_(char *uplo, char *trans, char *diag, int *n, float *a, 322 | int *lda, float *x, int *incx); 323 | 324 | int strsm_(char *side, char *uplo, char *transa, char *diag, int *m, 325 | int *n, float *alpha, float *a, int *lda, float *b, 326 | int *ldb); 327 | 328 | int strsv_(char *uplo, char *trans, char *diag, int *n, float *a, 329 | int *lda, float *x, int *incx); 330 | 331 | int zaxpy_(int *n, dcomplex *ca, dcomplex *cx, int *incx, dcomplex *cy, 332 | int *incy); 333 | 334 | int zcopy_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); 335 | 336 | int zdscal_(int *n, double *sa, dcomplex *cx, int *incx); 337 | 338 | int zgbmv_(char *trans, int *m, int *n, int *kl, int *ku, 339 | dcomplex *alpha, dcomplex *a, int *lda, dcomplex *x, int *incx, 340 | dcomplex *beta, dcomplex *y, int *incy); 341 | 342 | int zgemm_(char *transa, char *transb, int *m, int *n, int *k, 343 | dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, int *ldb, 344 | dcomplex *beta, dcomplex *c, int *ldc); 345 | 346 | int zgemv_(char *trans, int *m, int *n, dcomplex *alpha, dcomplex *a, 347 | int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, 348 | int *incy); 349 | 350 | int zgerc_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx, 351 | dcomplex *y, int *incy, dcomplex *a, int *lda); 352 | 353 | int zgeru_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx, 354 | dcomplex *y, int *incy, dcomplex *a, int *lda); 355 | 356 | int zhbmv_(char *uplo, int *n, int *k, dcomplex *alpha, dcomplex *a, 357 | int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, 358 | int *incy); 359 | 360 | int zhemm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha, 361 | dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, 362 | dcomplex *c, int *ldc); 363 | 364 | int zhemv_(char *uplo, int *n, dcomplex *alpha, dcomplex *a, int *lda, 365 | dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, int *incy); 366 | 367 | int zher_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx, 368 | dcomplex *a, int *lda); 369 | 370 | int zher2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx, 371 | dcomplex *y, int *incy, dcomplex *a, int *lda); 372 | 373 | int zher2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, 374 | dcomplex *a, int *lda, dcomplex *b, int *ldb, double *beta, 375 | dcomplex *c, int *ldc); 376 | 377 | int zherk_(char *uplo, char *trans, int *n, int *k, double *alpha, 378 | dcomplex *a, int *lda, double *beta, dcomplex *c, int *ldc); 379 | 380 | int zhpmv_(char *uplo, int *n, dcomplex *alpha, dcomplex *ap, dcomplex *x, 381 | int *incx, dcomplex *beta, dcomplex *y, int *incy); 382 | 383 | int zhpr_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx, 384 | dcomplex *ap); 385 | 386 | int zhpr2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx, 387 | dcomplex *y, int *incy, dcomplex *ap); 388 | 389 | int zrotg_(dcomplex *ca, dcomplex *cb, double *c, dcomplex *s); 390 | 391 | int zscal_(int *n, dcomplex *ca, dcomplex *cx, int *incx); 392 | 393 | int zswap_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy); 394 | 395 | int zsymm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha, 396 | dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, 397 | dcomplex *c, int *ldc); 398 | 399 | int zsyr2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, 400 | dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta, 401 | dcomplex *c, int *ldc); 402 | 403 | int zsyrk_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha, 404 | dcomplex *a, int *lda, dcomplex *beta, dcomplex *c, int *ldc); 405 | 406 | int ztbmv_(char *uplo, char *trans, char *diag, int *n, int *k, 407 | dcomplex *a, int *lda, dcomplex *x, int *incx); 408 | 409 | int ztbsv_(char *uplo, char *trans, char *diag, int *n, int *k, 410 | dcomplex *a, int *lda, dcomplex *x, int *incx); 411 | 412 | int ztpmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap, 413 | dcomplex *x, int *incx); 414 | 415 | int ztpsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap, 416 | dcomplex *x, int *incx); 417 | 418 | int ztrmm_(char *side, char *uplo, char *transa, char *diag, int *m, 419 | int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, 420 | int *ldb); 421 | 422 | int ztrmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a, 423 | int *lda, dcomplex *x, int *incx); 424 | 425 | int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m, 426 | int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, 427 | int *ldb); 428 | 429 | int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a, 430 | int *lda, dcomplex *x, int *incx); 431 | -------------------------------------------------------------------------------- /Src/LibLinear/README.1.93.txt: -------------------------------------------------------------------------------- 1 | LIBLINEAR is a simple package for solving large-scale regularized linear 2 | classification and regression. It currently supports 3 | - L2-regularized logistic regression/L2-loss support vector classification/L1-loss support vector classification 4 | - L1-regularized L2-loss support vector classification/L1-regularized logistic regression 5 | - L2-regularized L2-loss support vector regression/L1-loss support vector regression. 6 | This document explains the usage of LIBLINEAR. 7 | 8 | To get started, please read the ``Quick Start'' section first. 9 | For developers, please check the ``Library Usage'' section to learn 10 | how to integrate LIBLINEAR in your software. 11 | 12 | Table of Contents 13 | ================= 14 | 15 | - When to use LIBLINEAR but not LIBSVM 16 | - Quick Start 17 | - Installation 18 | - `train' Usage 19 | - `predict' Usage 20 | - Examples 21 | - Library Usage 22 | - Building Windows Binaries 23 | - Additional Information 24 | - MATLAB/OCTAVE interface 25 | - PYTHON interface 26 | 27 | When to use LIBLINEAR but not LIBSVM 28 | ==================================== 29 | 30 | There are some large data for which with/without nonlinear mappings 31 | gives similar performances. Without using kernels, one can 32 | efficiently train a much larger set via linear classification/regression. 33 | These data usually have a large number of features. Document classification 34 | is an example. 35 | 36 | Warning: While generally liblinear is very fast, its default solver 37 | may be slow under certain situations (e.g., data not scaled or C is 38 | large). See Appendix B of our SVM guide about how to handle such 39 | cases. 40 | http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf 41 | 42 | Warning: If you are a beginner and your data sets are not large, you 43 | should consider LIBSVM first. 44 | 45 | LIBSVM page: 46 | http://www.csie.ntu.edu.tw/~cjlin/libsvm 47 | 48 | 49 | Quick Start 50 | =========== 51 | 52 | See the section ``Installation'' for installing LIBLINEAR. 53 | 54 | After installation, there are programs `train' and `predict' for 55 | training and testing, respectively. 56 | 57 | About the data format, please check the README file of LIBSVM. Note 58 | that feature index must start from 1 (but not 0). 59 | 60 | A sample classification data included in this package is `heart_scale'. 61 | 62 | Type `train heart_scale', and the program will read the training 63 | data and output the model file `heart_scale.model'. If you have a test 64 | set called heart_scale.t, then type `predict heart_scale.t 65 | heart_scale.model output' to see the prediction accuracy. The `output' 66 | file contains the predicted class labels. 67 | 68 | For more information about `train' and `predict', see the sections 69 | `train' Usage and `predict' Usage. 70 | 71 | To obtain good performances, sometimes one needs to scale the 72 | data. Please check the program `svm-scale' of LIBSVM. For large and 73 | sparse data, use `-l 0' to keep the sparsity. 74 | 75 | Installation 76 | ============ 77 | 78 | On Unix systems, type `make' to build the `train' and `predict' 79 | programs. Run them without arguments to show the usages. 80 | 81 | On other systems, consult `Makefile' to build them (e.g., see 82 | 'Building Windows binaries' in this file) or use the pre-built 83 | binaries (Windows binaries are in the directory `windows'). 84 | 85 | This software uses some level-1 BLAS subroutines. The needed functions are 86 | included in this package. If a BLAS library is available on your 87 | machine, you may use it by modifying the Makefile: Unmark the following line 88 | 89 | #LIBS ?= -lblas 90 | 91 | and mark 92 | 93 | LIBS ?= blas/blas.a 94 | 95 | `train' Usage 96 | ============= 97 | 98 | Usage: train [options] training_set_file [model_file] 99 | options: 100 | -s type : set type of solver (default 1) 101 | for multi-class classification 102 | 0 -- L2-regularized logistic regression (primal) 103 | 1 -- L2-regularized L2-loss support vector classification (dual) 104 | 2 -- L2-regularized L2-loss support vector classification (primal) 105 | 3 -- L2-regularized L1-loss support vector classification (dual) 106 | 4 -- support vector classification by Crammer and Singer 107 | 5 -- L1-regularized L2-loss support vector classification 108 | 6 -- L1-regularized logistic regression 109 | 7 -- L2-regularized logistic regression (dual) 110 | for regression 111 | 11 -- L2-regularized L2-loss support vector regression (primal) 112 | 12 -- L2-regularized L2-loss support vector regression (dual) 113 | 13 -- L2-regularized L1-loss support vector regression (dual) 114 | -c cost : set the parameter C (default 1) 115 | -p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1) 116 | -e epsilon : set tolerance of termination criterion 117 | -s 0 and 2 118 | |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2, 119 | where f is the primal function and pos/neg are # of 120 | positive/negative data (default 0.01) 121 | -s 11 122 | |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001) 123 | -s 1, 3, 4 and 7 124 | Dual maximal violation <= eps; similar to libsvm (default 0.1) 125 | -s 5 and 6 126 | |f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf, 127 | where f is the primal function (default 0.01) 128 | -s 12 and 13\n" 129 | |f'(alpha)|_1 <= eps |f'(alpha0)|, 130 | where f is the dual function (default 0.1) 131 | -B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1) 132 | -wi weight: weights adjust the parameter C of different classes (see README for details) 133 | -v n: n-fold cross validation mode 134 | -q : quiet mode (no outputs) 135 | 136 | Option -v randomly splits the data into n parts and calculates cross 137 | validation accuracy on them. 138 | 139 | Formulations: 140 | 141 | For L2-regularized logistic regression (-s 0), we solve 142 | 143 | min_w w^Tw/2 + C \sum log(1 + exp(-y_i w^Tx_i)) 144 | 145 | For L2-regularized L2-loss SVC dual (-s 1), we solve 146 | 147 | min_alpha 0.5(alpha^T (Q + I/2/C) alpha) - e^T alpha 148 | s.t. 0 <= alpha_i, 149 | 150 | For L2-regularized L2-loss SVC (-s 2), we solve 151 | 152 | min_w w^Tw/2 + C \sum max(0, 1- y_i w^Tx_i)^2 153 | 154 | For L2-regularized L1-loss SVC dual (-s 3), we solve 155 | 156 | min_alpha 0.5(alpha^T Q alpha) - e^T alpha 157 | s.t. 0 <= alpha_i <= C, 158 | 159 | For L1-regularized L2-loss SVC (-s 5), we solve 160 | 161 | min_w \sum |w_j| + C \sum max(0, 1- y_i w^Tx_i)^2 162 | 163 | For L1-regularized logistic regression (-s 6), we solve 164 | 165 | min_w \sum |w_j| + C \sum log(1 + exp(-y_i w^Tx_i)) 166 | 167 | For L2-regularized logistic regression (-s 7), we solve 168 | 169 | min_alpha 0.5(alpha^T Q alpha) + \sum alpha_i*log(alpha_i) + \sum (C-alpha_i)*log(C-alpha_i) - a constant 170 | s.t. 0 <= alpha_i <= C, 171 | 172 | where 173 | 174 | Q is a matrix with Q_ij = y_i y_j x_i^T x_j. 175 | 176 | For L2-regularized L2-loss SVR (-s 11), we solve 177 | 178 | min_w w^Tw/2 + C \sum max(0, |y_i-w^Tx_i|-epsilon)^2 179 | 180 | For L2-regularized L2-loss SVR dual (-s 12), we solve 181 | 182 | min_beta 0.5(beta^T (Q + lambda I/2/C) beta) - y^T beta + \sum |beta_i| 183 | 184 | For L2-regularized L1-loss SVR dual (-s 13), we solve 185 | 186 | min_beta 0.5(beta^T Q beta) - y^T beta + \sum |beta_i| 187 | s.t. -C <= beta_i <= C, 188 | 189 | where 190 | 191 | Q is a matrix with Q_ij = x_i^T x_j. 192 | 193 | If bias >= 0, w becomes [w; w_{n+1}] and x becomes [x; bias]. 194 | 195 | The primal-dual relationship implies that -s 1 and -s 2 give the same 196 | model, -s 0 and -s 7 give the same, and -s 11 and -s 12 give the same. 197 | 198 | We implement 1-vs-the rest multi-class strategy for classification. 199 | In training i vs. non_i, their C parameters are (weight from -wi)*C 200 | and C, respectively. If there are only two classes, we train only one 201 | model. Thus weight1*C vs. weight2*C is used. See examples below. 202 | 203 | We also implement multi-class SVM by Crammer and Singer (-s 4): 204 | 205 | min_{w_m, \xi_i} 0.5 \sum_m ||w_m||^2 + C \sum_i \xi_i 206 | s.t. w^T_{y_i} x_i - w^T_m x_i >= \e^m_i - \xi_i \forall m,i 207 | 208 | where e^m_i = 0 if y_i = m, 209 | e^m_i = 1 if y_i != m, 210 | 211 | Here we solve the dual problem: 212 | 213 | min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i 214 | s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i 215 | 216 | where w_m(\alpha) = \sum_i \alpha^m_i x_i, 217 | and C^m_i = C if m = y_i, 218 | C^m_i = 0 if m != y_i. 219 | 220 | `predict' Usage 221 | =============== 222 | 223 | Usage: predict [options] test_file model_file output_file 224 | options: 225 | -b probability_estimates: whether to output probability estimates, 0 or 1 (default 0); currently for logistic regression only 226 | -q : quiet mode (no outputs) 227 | 228 | Note that -b is only needed in the prediction phase. This is different 229 | from the setting of LIBSVM. 230 | 231 | Examples 232 | ======== 233 | 234 | > train data_file 235 | 236 | Train linear SVM with L2-loss function. 237 | 238 | > train -s 0 data_file 239 | 240 | Train a logistic regression model. 241 | 242 | > train -v 5 -e 0.001 data_file 243 | 244 | Do five-fold cross-validation using L2-loss svm. 245 | Use a smaller stopping tolerance 0.001 than the default 246 | 0.1 if you want more accurate solutions. 247 | 248 | > train -c 10 -w1 2 -w2 5 -w3 2 four_class_data_file 249 | 250 | Train four classifiers: 251 | positive negative Cp Cn 252 | class 1 class 2,3,4. 20 10 253 | class 2 class 1,3,4. 50 10 254 | class 3 class 1,2,4. 20 10 255 | class 4 class 1,2,3. 10 10 256 | 257 | > train -c 10 -w3 1 -w2 5 two_class_data_file 258 | 259 | If there are only two classes, we train ONE model. 260 | The C values for the two classes are 10 and 50. 261 | 262 | > predict -b 1 test_file data_file.model output_file 263 | 264 | Output probability estimates (for logistic regression only). 265 | 266 | Library Usage 267 | ============= 268 | 269 | - Function: model* train(const struct problem *prob, 270 | const struct parameter *param); 271 | 272 | This function constructs and returns a linear classification 273 | or regression model according to the given training data and 274 | parameters. 275 | 276 | struct problem describes the problem: 277 | 278 | struct problem 279 | { 280 | int l, n; 281 | int *y; 282 | struct feature_node **x; 283 | double bias; 284 | }; 285 | 286 | where `l' is the number of training data. If bias >= 0, we assume 287 | that one additional feature is added to the end of each data 288 | instance. `n' is the number of feature (including the bias feature 289 | if bias >= 0). `y' is an array containing the target values. (integers 290 | in classification, real numbers in regression) And `x' is an array 291 | of pointers, each of which points to a sparse representation (array 292 | of feature_node) of one training vector. 293 | 294 | For example, if we have the following training data: 295 | 296 | LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5 297 | ----- ----- ----- ----- ----- ----- 298 | 1 0 0.1 0.2 0 0 299 | 2 0 0.1 0.3 -1.2 0 300 | 1 0.4 0 0 0 0 301 | 2 0 0.1 0 1.4 0.5 302 | 3 -0.1 -0.2 0.1 1.1 0.1 303 | 304 | and bias = 1, then the components of problem are: 305 | 306 | l = 5 307 | n = 6 308 | 309 | y -> 1 2 1 2 3 310 | 311 | x -> [ ] -> (2,0.1) (3,0.2) (6,1) (-1,?) 312 | [ ] -> (2,0.1) (3,0.3) (4,-1.2) (6,1) (-1,?) 313 | [ ] -> (1,0.4) (6,1) (-1,?) 314 | [ ] -> (2,0.1) (4,1.4) (5,0.5) (6,1) (-1,?) 315 | [ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (6,1) (-1,?) 316 | 317 | struct parameter describes the parameters of a linear classification 318 | or regression model: 319 | 320 | struct parameter 321 | { 322 | int solver_type; 323 | 324 | /* these are for training only */ 325 | double eps; /* stopping criteria */ 326 | double C; 327 | int nr_weight; 328 | int *weight_label; 329 | double* weight; 330 | double p; 331 | }; 332 | 333 | solver_type can be one of L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL, L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL. 334 | for classification 335 | L2R_LR L2-regularized logistic regression (primal) 336 | L2R_L2LOSS_SVC_DUAL L2-regularized L2-loss support vector classification (dual) 337 | L2R_L2LOSS_SVC L2-regularized L2-loss support vector classification (primal) 338 | L2R_L1LOSS_SVC_DUAL L2-regularized L1-loss support vector classification (dual) 339 | MCSVM_CS support vector classification by Crammer and Singer 340 | L1R_L2LOSS_SVC L1-regularized L2-loss support vector classification 341 | L1R_LR L1-regularized logistic regression 342 | L2R_LR_DUAL L2-regularized logistic regression (dual) 343 | for regression 344 | L2R_L2LOSS_SVR L2-regularized L2-loss support vector regression (primal) 345 | L2R_L2LOSS_SVR_DUAL L2-regularized L2-loss support vector regression (dual) 346 | L2R_L1LOSS_SVR_DUAL L2-regularized L1-loss support vector regression (dual) 347 | 348 | C is the cost of constraints violation. 349 | p is the sensitiveness of loss of support vector regression. 350 | eps is the stopping criterion. 351 | 352 | nr_weight, weight_label, and weight are used to change the penalty 353 | for some classes (If the weight for a class is not changed, it is 354 | set to 1). This is useful for training classifier using unbalanced 355 | input data or with asymmetric misclassification cost. 356 | 357 | nr_weight is the number of elements in the array weight_label and 358 | weight. Each weight[i] corresponds to weight_label[i], meaning that 359 | the penalty of class weight_label[i] is scaled by a factor of weight[i]. 360 | 361 | If you do not want to change penalty for any of the classes, 362 | just set nr_weight to 0. 363 | 364 | *NOTE* To avoid wrong parameters, check_parameter() should be 365 | called before train(). 366 | 367 | struct model stores the model obtained from the training procedure: 368 | 369 | struct model 370 | { 371 | struct parameter param; 372 | int nr_class; /* number of classes */ 373 | int nr_feature; 374 | double *w; 375 | int *label; /* label of each class */ 376 | double bias; 377 | }; 378 | 379 | param describes the parameters used to obtain the model. 380 | 381 | nr_class and nr_feature are the number of classes and features, 382 | respectively. nr_class = 2 for regression. 383 | 384 | The nr_feature*nr_class array w gives feature weights. We use one 385 | against the rest for multi-class classification, so each feature 386 | index corresponds to nr_class weight values. Weights are 387 | organized in the following way 388 | 389 | +------------------+------------------+------------+ 390 | | nr_class weights | nr_class weights | ... 391 | | for 1st feature | for 2nd feature | 392 | +------------------+------------------+------------+ 393 | 394 | If bias >= 0, x becomes [x; bias]. The number of features is 395 | increased by one, so w is a (nr_feature+1)*nr_class array. The 396 | value of bias is stored in the variable bias. 397 | 398 | The array label stores class labels. 399 | 400 | - Function: void cross_validation(const problem *prob, const parameter *param, int nr_fold, double *target); 401 | 402 | This function conducts cross validation. Data are separated to 403 | nr_fold folds. Under given parameters, sequentially each fold is 404 | validated using the model from training the remaining. Predicted 405 | labels in the validation process are stored in the array called 406 | target. 407 | 408 | The format of prob is same as that for train(). 409 | 410 | - Function: double predict(const model *model_, const feature_node *x); 411 | 412 | For a classification model, the predicted class for x is returned. 413 | For a regression model, the function value of x calculated using 414 | the model is returned. 415 | 416 | - Function: double predict_values(const struct model *model_, 417 | const struct feature_node *x, double* dec_values); 418 | 419 | This function gives nr_w decision values in the array dec_values. 420 | nr_w=1 if regression is applied or the number of classes is two. An exception is 421 | multi-class svm by Crammer and Singer (-s 4), where nr_w = 2 if there are two classes. For all other situations, nr_w is the 422 | number of classes. 423 | 424 | We implement one-vs-the rest multi-class strategy (-s 0,1,2,3,5,6,7) 425 | and multi-class svm by Crammer and Singer (-s 4) for multi-class SVM. 426 | The class with the highest decision value is returned. 427 | 428 | - Function: double predict_probability(const struct model *model_, 429 | const struct feature_node *x, double* prob_estimates); 430 | 431 | This function gives nr_class probability estimates in the array 432 | prob_estimates. nr_class can be obtained from the function 433 | get_nr_class. The class with the highest probability is 434 | returned. Currently, we support only the probability outputs of 435 | logistic regression. 436 | 437 | - Function: int get_nr_feature(const model *model_); 438 | 439 | The function gives the number of attributes of the model. 440 | 441 | - Function: int get_nr_class(const model *model_); 442 | 443 | The function gives the number of classes of the model. 444 | For a regression model, 2 is returned. 445 | 446 | - Function: void get_labels(const model *model_, int* label); 447 | 448 | This function outputs the name of labels into an array called label. 449 | For a regression model, label is unchanged. 450 | 451 | - Function: const char *check_parameter(const struct problem *prob, 452 | const struct parameter *param); 453 | 454 | This function checks whether the parameters are within the feasible 455 | range of the problem. This function should be called before calling 456 | train() and cross_validation(). It returns NULL if the 457 | parameters are feasible, otherwise an error message is returned. 458 | 459 | - Function: int save_model(const char *model_file_name, 460 | const struct model *model_); 461 | 462 | This function saves a model to a file; returns 0 on success, or -1 463 | if an error occurs. 464 | 465 | - Function: struct model *load_model(const char *model_file_name); 466 | 467 | This function returns a pointer to the model read from the file, 468 | or a null pointer if the model could not be loaded. 469 | 470 | - Function: void free_model_content(struct model *model_ptr); 471 | 472 | This function frees the memory used by the entries in a model structure. 473 | 474 | - Function: void free_and_destroy_model(struct model **model_ptr_ptr); 475 | 476 | This function frees the memory used by a model and destroys the model 477 | structure. 478 | 479 | - Function: void destroy_param(struct parameter *param); 480 | 481 | This function frees the memory used by a parameter set. 482 | 483 | - Function: void set_print_string_function(void (*print_func)(const char *)); 484 | 485 | Users can specify their output format by a function. Use 486 | set_print_string_function(NULL); 487 | for default printing to stdout. 488 | 489 | Building Windows Binaries 490 | ========================= 491 | 492 | Windows binaries are in the directory `windows'. To build them via 493 | Visual C++, use the following steps: 494 | 495 | 1. Open a dos command box and change to liblinear directory. If 496 | environment variables of VC++ have not been set, type 497 | 498 | "C:\Program Files\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat" 499 | 500 | You may have to modify the above command according which version of 501 | VC++ or where it is installed. 502 | 503 | 2. Type 504 | 505 | nmake -f Makefile.win clean all 506 | 507 | 508 | MATLAB/OCTAVE Interface 509 | ======================= 510 | 511 | Please check the file README in the directory `matlab'. 512 | 513 | PYTHON Interface 514 | ================ 515 | 516 | Please check the file README in the directory `python'. 517 | 518 | Additional Information 519 | ====================== 520 | 521 | If you find LIBLINEAR helpful, please cite it as 522 | 523 | R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin. 524 | LIBLINEAR: A Library for Large Linear Classification, Journal of 525 | Machine Learning Research 9(2008), 1871-1874. Software available at 526 | http://www.csie.ntu.edu.tw/~cjlin/liblinear 527 | 528 | For any questions and comments, please send your email to 529 | cjlin@csie.ntu.edu.tw 530 | 531 | 532 | -------------------------------------------------------------------------------- /Src/Objectness.cpp: -------------------------------------------------------------------------------- 1 | #include "kyheader.h" 2 | #include "Objectness.h" 3 | #include "CmShow.h" 4 | 5 | #define Malloc(type,n) (type *)malloc((n)*sizeof(type)) 6 | void print_null(const char *s) {} 7 | const char* Objectness::_clrName[3] = {"MAXBGR", "HSV", "I"}; 8 | const int CN = 21; // Color Number 9 | const char* COLORs[CN] = {"'k'", "'b'", "'g'", "'r'", "'c'", "'m'", "'y'", 10 | "':k'", "':b'", "':g'", "':r'", "':c'", "':m'", "':y'", 11 | "'--k'", "'--b'", "'--g'", "'--r'", "'--c'", "'--m'", "'--y'" 12 | }; 13 | 14 | 15 | // base for window size quantization, R orientation channels, and feature window size (_W, _W) 16 | Objectness::Objectness(DataSetVOC &voc, double base, int W, int NSS) 17 | : _voc(voc) 18 | , _base(base) 19 | , _W(W) 20 | , _NSS(NSS) 21 | , _logBase(log(_base)) 22 | , _minT(cvCeil(log(10.)/_logBase)) 23 | , _maxT(cvCeil(log(500.)/_logBase)) 24 | , _numT(_maxT - _minT + 1) 25 | , _Clr(MAXBGR) 26 | { 27 | setColorSpace(_Clr); 28 | } 29 | 30 | Objectness::~Objectness(void) 31 | { 32 | } 33 | 34 | void Objectness::setColorSpace(int clr) 35 | { 36 | _Clr = clr; 37 | _modelName = _voc.resDir + format("ObjNessB%gW%d%s", _base, _W, _clrName[_Clr]); 38 | _trainDirSI = _voc.localDir + format("TrainS1B%gW%d%s/", _base, _W, _clrName[_Clr]); 39 | _bbResDir = _voc.resDir + format("BBoxesB%gW%d%s/", _base, _W, _clrName[_Clr]); 40 | } 41 | 42 | int Objectness::loadTrainedModel(string modelName) // Return -1, 0, or 1 if partial, none, or all loaded 43 | { 44 | if (modelName.size() == 0) 45 | modelName = _modelName; 46 | CStr s1 = modelName + ".wS1", s2 = modelName + ".wS2", sI = modelName + ".idx"; 47 | Mat filters1f, reW1f, idx1i, show3u; 48 | if (!matRead(s1, filters1f) || !matRead(sI, idx1i)){ 49 | printf("Can't load model: %s or %s\n", _S(s1), _S(sI)); 50 | return 0; 51 | } 52 | 53 | //filters1f = aFilter(0.8f, 8); 54 | //normalize(filters1f, filters1f, p, 1, NORM_MINMAX); 55 | 56 | normalize(filters1f, show3u, 1, 255, NORM_MINMAX, CV_8U); 57 | CmShow::showTinyMat(_voc.resDir + "Filter.png", show3u); 58 | _tigF.update(filters1f); 59 | _tigF.reconstruct(filters1f); 60 | 61 | _svmSzIdxs = idx1i; 62 | CV_Assert(_svmSzIdxs.size() > 1 && filters1f.size() == Size(_W, _W) && filters1f.type() == CV_32F); 63 | _svmFilter = filters1f; 64 | 65 | if (!matRead(s2, _svmReW1f) || _svmReW1f.size() != Size(2, _svmSzIdxs.size())){ 66 | _svmReW1f = Mat(); 67 | return -1; 68 | } 69 | return 1; 70 | } 71 | 72 | void Objectness::predictBBoxSI(CMat &img3u, ValStructVec &valBoxes, vecI &sz, int NUM_WIN_PSZ, bool fast) 73 | { 74 | const int numSz = _svmSzIdxs.size(); 75 | const int imgW = img3u.cols, imgH = img3u.rows; 76 | valBoxes.reserve(10000); 77 | sz.clear(); sz.reserve(10000); 78 | for (int ir = numSz - 1; ir >= 0; ir--){ 79 | int r = _svmSzIdxs[ir]; 80 | int height = cvRound(pow(_base, r/_numT + _minT)), width = cvRound(pow(_base, r%_numT + _minT)); 81 | if (height > imgH * _base || width > imgW * _base) 82 | continue; 83 | 84 | height = min(height, imgH), width = min(width, imgW); 85 | Mat im3u, matchCost1f, mag1u; 86 | resize(img3u, im3u, Size(cvRound(_W*imgW*1.0/width), cvRound(_W*imgH*1.0/height))); 87 | gradientMag(im3u, mag1u); 88 | 89 | //imwrite(_voc.localDir + format("%d.png", r), mag1u); 90 | //Mat mag1f; 91 | //mag1u.convertTo(mag1f, CV_32F); 92 | //matchTemplate(mag1f, _svmFilter, matchCost1f, CV_TM_CCORR); 93 | 94 | matchCost1f = _tigF.matchTemplate(mag1u); 95 | 96 | ValStructVec matchCost; 97 | nonMaxSup(matchCost1f, matchCost, _NSS, NUM_WIN_PSZ, fast); 98 | 99 | // Find true locations and match values 100 | double ratioX = width/_W, ratioY = height/_W; 101 | int iMax = min(matchCost.size(), NUM_WIN_PSZ); 102 | for (int i = 0; i < iMax; i++){ 103 | float mVal = matchCost(i); 104 | Point pnt = matchCost[i]; 105 | Vec4i box(cvRound(pnt.x * ratioX), cvRound(pnt.y*ratioY)); 106 | box[2] = cvRound(min(box[0] + width, imgW)); 107 | box[3] = cvRound(min(box[1] + height, imgH)); 108 | box[0] ++; 109 | box[1] ++; 110 | valBoxes.pushBack(mVal, box); 111 | sz.push_back(ir); 112 | } 113 | } 114 | //exit(0); 115 | } 116 | 117 | void Objectness::predictBBoxSII(ValStructVec &valBoxes, const vecI &sz) 118 | { 119 | int numI = valBoxes.size(); 120 | for (int i = 0; i < numI; i++){ 121 | const float* svmIIw = _svmReW1f.ptr(sz[i]); 122 | valBoxes(i) = valBoxes(i) * svmIIw[0] + svmIIw[1]; 123 | } 124 | valBoxes.sort(); 125 | } 126 | 127 | // Get potential bounding boxes, each of which is represented by a Vec4i for (minX, minY, maxX, maxY). 128 | // The trained model should be prepared before calling this function: loadTrainedModel() or trainStageI() + trainStageII(). 129 | // Use numDet to control the final number of proposed bounding boxes, and number of per size (scale and aspect ratio) 130 | void Objectness::getObjBndBoxes(CMat &img3u, ValStructVec &valBoxes, int numDetPerSize) 131 | { 132 | CV_Assert_(filtersLoaded() , ("SVM filters should be initialized before getting object proposals\n")); 133 | vecI sz; 134 | predictBBoxSI(img3u, valBoxes, sz, numDetPerSize, false); 135 | predictBBoxSII(valBoxes, sz); 136 | return; 137 | } 138 | 139 | void Objectness::nonMaxSup(CMat &matchCost1f, ValStructVec &matchCost, int NSS, int maxPoint, bool fast) 140 | { 141 | const int _h = matchCost1f.rows, _w = matchCost1f.cols; 142 | Mat isMax1u = Mat::ones(_h, _w, CV_8U), costSmooth1f; 143 | ValStructVec valPnt; 144 | matchCost.reserve(_h * _w); 145 | valPnt.reserve(_h * _w); 146 | if (fast){ 147 | blur(matchCost1f, costSmooth1f, Size(3, 3)); 148 | for (int r = 0; r < _h; r++){ 149 | const float* d = matchCost1f.ptr(r); 150 | const float* ds = costSmooth1f.ptr(r); 151 | for (int c = 0; c < _w; c++) 152 | if (d[c] >= ds[c]) 153 | valPnt.pushBack(d[c], Point(c, r)); 154 | } 155 | } 156 | else{ 157 | for (int r = 0; r < _h; r++){ 158 | const float* d = matchCost1f.ptr(r); 159 | for (int c = 0; c < _w; c++) 160 | valPnt.pushBack(d[c], Point(c, r)); 161 | } 162 | } 163 | 164 | valPnt.sort(); 165 | for (int i = 0; i < valPnt.size(); i++){ 166 | Point &pnt = valPnt[i]; 167 | if (isMax1u.at(pnt)){ 168 | matchCost.pushBack(valPnt(i), pnt); 169 | for (int dy = -NSS; dy <= NSS; dy++) for (int dx = -NSS; dx <= NSS; dx++){ 170 | Point neighbor = pnt + Point(dx, dy); 171 | if (!CHK_IND(neighbor)) 172 | continue; 173 | isMax1u.at(neighbor) = false; 174 | } 175 | } 176 | if (matchCost.size() >= maxPoint) 177 | return; 178 | } 179 | } 180 | 181 | void Objectness::gradientMag(CMat &imgBGR3u, Mat &mag1u) 182 | { 183 | switch (_Clr){ 184 | case MAXBGR: 185 | gradientRGB(imgBGR3u, mag1u); break; 186 | case G: 187 | gradientGray(imgBGR3u, mag1u); break; 188 | case HSV: 189 | gradientHSV(imgBGR3u, mag1u); break; 190 | default: 191 | printf("Error: not recognized color space\n"); 192 | } 193 | } 194 | 195 | void Objectness::gradientRGB(CMat &bgr3u, Mat &mag1u) 196 | { 197 | const int H = bgr3u.rows, W = bgr3u.cols; 198 | Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S); 199 | 200 | // Left/right most column Ix 201 | for (int y = 0; y < H; y++){ 202 | Ix.at(y, 0) = bgrMaxDist(bgr3u.at(y, 1), bgr3u.at(y, 0))*2; 203 | Ix.at(y, W-1) = bgrMaxDist(bgr3u.at(y, W-1), bgr3u.at(y, W-2))*2; 204 | } 205 | 206 | // Top/bottom most column Iy 207 | for (int x = 0; x < W; x++) { 208 | Iy.at(0, x) = bgrMaxDist(bgr3u.at(1, x), bgr3u.at(0, x))*2; 209 | Iy.at(H-1, x) = bgrMaxDist(bgr3u.at(H-1, x), bgr3u.at(H-2, x))*2; 210 | } 211 | 212 | // Find the gradient for inner regions 213 | for (int y = 0; y < H; y++){ 214 | const Vec3b *dataP = bgr3u.ptr(y); 215 | for (int x = 2; x < W; x++) 216 | Ix.at(y, x-1) = bgrMaxDist(dataP[x-2], dataP[x]); // bgr3u.at(y, x+1), bgr3u.at(y, x-1)); 217 | } 218 | for (int y = 1; y < H-1; y++){ 219 | const Vec3b *tP = bgr3u.ptr(y-1); 220 | const Vec3b *bP = bgr3u.ptr(y+1); 221 | for (int x = 0; x < W; x++) 222 | Iy.at(y, x) = bgrMaxDist(tP[x], bP[x]); 223 | } 224 | gradientXY(Ix, Iy, mag1u); 225 | } 226 | 227 | void Objectness::gradientGray(CMat &bgr3u, Mat &mag1u) 228 | { 229 | Mat g1u; 230 | cvtColor(bgr3u, g1u, CV_BGR2GRAY); 231 | const int H = g1u.rows, W = g1u.cols; 232 | Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S); 233 | 234 | // Left/right most column Ix 235 | for (int y = 0; y < H; y++){ 236 | Ix.at(y, 0) = abs(g1u.at(y, 1) - g1u.at(y, 0)) * 2; 237 | Ix.at(y, W-1) = abs(g1u.at(y, W-1) - g1u.at(y, W-2)) * 2; 238 | } 239 | 240 | // Top/bottom most column Iy 241 | for (int x = 0; x < W; x++) { 242 | Iy.at(0, x) = abs(g1u.at(1, x) - g1u.at(0, x)) * 2; 243 | Iy.at(H-1, x) = abs(g1u.at(H-1, x) - g1u.at(H-2, x)) * 2; 244 | } 245 | 246 | // Find the gradient for inner regions 247 | for (int y = 0; y < H; y++) 248 | for (int x = 1; x < W-1; x++) 249 | Ix.at(y, x) = abs(g1u.at(y, x+1) - g1u.at(y, x-1)); 250 | for (int y = 1; y < H-1; y++) 251 | for (int x = 0; x < W; x++) 252 | Iy.at(y, x) = abs(g1u.at(y+1, x) - g1u.at(y-1, x)); 253 | 254 | gradientXY(Ix, Iy, mag1u); 255 | } 256 | 257 | 258 | void Objectness::gradientHSV(CMat &bgr3u, Mat &mag1u) 259 | { 260 | Mat hsv3u; 261 | cvtColor(bgr3u, hsv3u, CV_BGR2HSV); 262 | const int H = hsv3u.rows, W = hsv3u.cols; 263 | Mat Ix(H, W, CV_32S), Iy(H, W, CV_32S); 264 | 265 | // Left/right most column Ix 266 | for (int y = 0; y < H; y++){ 267 | Ix.at(y, 0) = vecDist3b(hsv3u.at(y, 1), hsv3u.at(y, 0)); 268 | Ix.at(y, W-1) = vecDist3b(hsv3u.at(y, W-1), hsv3u.at(y, W-2)); 269 | } 270 | 271 | // Top/bottom most column Iy 272 | for (int x = 0; x < W; x++) { 273 | Iy.at(0, x) = vecDist3b(hsv3u.at(1, x), hsv3u.at(0, x)); 274 | Iy.at(H-1, x) = vecDist3b(hsv3u.at(H-1, x), hsv3u.at(H-2, x)); 275 | } 276 | 277 | // Find the gradient for inner regions 278 | for (int y = 0; y < H; y++) 279 | for (int x = 1; x < W-1; x++) 280 | Ix.at(y, x) = vecDist3b(hsv3u.at(y, x+1), hsv3u.at(y, x-1))/2; 281 | for (int y = 1; y < H-1; y++) 282 | for (int x = 0; x < W; x++) 283 | Iy.at(y, x) = vecDist3b(hsv3u.at(y+1, x), hsv3u.at(y-1, x))/2; 284 | 285 | gradientXY(Ix, Iy, mag1u); 286 | } 287 | 288 | void Objectness::gradientXY(CMat &x1i, CMat &y1i, Mat &mag1u) 289 | { 290 | const int H = x1i.rows, W = x1i.cols; 291 | mag1u.create(H, W, CV_8U); 292 | for (int r = 0; r < H; r++){ 293 | const int *x = x1i.ptr(r), *y = y1i.ptr(r); 294 | byte* m = mag1u.ptr(r); 295 | for (int c = 0; c < W; c++) 296 | m[c] = min(x[c] + y[c], 255); //((int)sqrt(sqr(x[c]) + sqr(y[c])), 255); 297 | } 298 | } 299 | 300 | void Objectness::trainObjectness(int numDetPerSize) 301 | { 302 | CmTimer tm1("Train1"), tm2("Train 2"); 303 | 304 | //* Learning stage I 305 | generateTrianData(); 306 | tm1.Start(); 307 | trainStageI(); 308 | tm1.Stop(); 309 | printf("Learning stage I takes %g seconds... \n", tm1.TimeInSeconds()); //*/ 310 | 311 | //* Learning stage II 312 | tm2.Start(); 313 | trainStateII(numDetPerSize); 314 | tm2.Stop(); 315 | printf("Learning stage II takes %g seconds... \n", tm2.TimeInSeconds()); //*/ 316 | return; 317 | } 318 | 319 | void Objectness::generateTrianData() 320 | { 321 | const int NUM_TRAIN = _voc.trainNum; 322 | const int FILTER_SZ = _W*_W; 323 | vector> xTrainP(NUM_TRAIN), xTrainN(NUM_TRAIN); 324 | vector szTrainP(NUM_TRAIN); // Corresponding size index. 325 | const int NUM_NEG_BOX = 100; // Number of negative windows sampled from each image 326 | 327 | #pragma omp parallel for 328 | for (int i = 0; i < NUM_TRAIN; i++) { 329 | const int NUM_GT_BOX = (int)_voc.gtTrainBoxes[i].size(); 330 | vector &xP = xTrainP[i], &xN = xTrainN[i]; 331 | vecI &szP = szTrainP[i]; 332 | xP.reserve(NUM_GT_BOX*4), szP.reserve(NUM_GT_BOX*4), xN.reserve(NUM_NEG_BOX); 333 | Mat im3u = imread(format(_S(_voc.imgPathW), _S(_voc.trainSet[i]))); 334 | 335 | // Get positive training data 336 | for (int k = 0; k < NUM_GT_BOX; k++){ 337 | const Vec4i& bbgt = _voc.gtTrainBoxes[i][k]; 338 | vector bbs; // bounding boxes; 339 | vecI bbR; // Bounding box ratios 340 | int nS = gtBndBoxSampling(bbgt, bbs, bbR); 341 | for (int j = 0; j < nS; j++){ 342 | bbs[j][2] = min(bbs[j][2], im3u.cols); 343 | bbs[j][3] = min(bbs[j][3], im3u.rows); 344 | Mat mag1f = getFeature(im3u, bbs[j]), magF1f; 345 | flip(mag1f, magF1f, CV_FLIP_HORIZONTAL); 346 | xP.push_back(mag1f); 347 | xP.push_back(magF1f); 348 | szP.push_back(bbR[j]); 349 | szP.push_back(bbR[j]); 350 | } 351 | } 352 | // Get negative training data 353 | for (int k = 0; k < NUM_NEG_BOX; k++){ 354 | int x1 = rand() % im3u.cols + 1, x2 = rand() % im3u.cols + 1; 355 | int y1 = rand() % im3u.rows + 1, y2 = rand() % im3u.rows + 1; 356 | Vec4i bb(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)); 357 | if (maxIntUnion(bb, _voc.gtTrainBoxes[i]) < 0.5) 358 | xN.push_back(getFeature(im3u, bb)); 359 | } 360 | } 361 | 362 | const int NUM_R = _numT * _numT + 1; 363 | vecI szCount(NUM_R); // Object counts of each size (combination of scale and aspect ratio) 364 | int numP = 0, numN = 0, iP = 0, iN = 0; 365 | for (int i = 0; i < NUM_TRAIN; i++){ 366 | numP += xTrainP[i].size(); 367 | numN += xTrainN[i].size(); 368 | const vecI &rP = szTrainP[i]; 369 | for (size_t j = 0; j < rP.size(); j++) 370 | szCount[rP[j]]++; 371 | } 372 | vecI szActive; // Indexes of active size 373 | for (int r = 1; r < NUM_R; r++){ 374 | if (szCount[r] > 50) // If only 50- positive samples at this size, ignore it. 375 | szActive.push_back(r-1); 376 | } 377 | matWrite(_modelName + ".idx", Mat(szActive)); 378 | 379 | Mat xP1f(numP, FILTER_SZ, CV_32F), xN1f(numN, FILTER_SZ, CV_32F); 380 | for (int i = 0; i < NUM_TRAIN; i++) { 381 | vector &xP = xTrainP[i], &xN = xTrainN[i]; 382 | for (size_t j = 0; j < xP.size(); j++) 383 | memcpy(xP1f.ptr(iP++), xP[j].data, FILTER_SZ*sizeof(float)); 384 | for (size_t j = 0; j < xN.size(); j++) 385 | memcpy(xN1f.ptr(iN++), xN[j].data, FILTER_SZ*sizeof(float)); 386 | } 387 | CV_Assert(numP == iP && numN == iN); 388 | matWrite(_modelName + ".xP", xP1f); 389 | matWrite(_modelName + ".xN", xN1f); 390 | } 391 | 392 | Mat Objectness::getFeature(CMat &img3u, const Vec4i &bb) 393 | { 394 | int x = bb[0] - 1, y = bb[1] - 1; 395 | Rect reg(x, y, bb[2] - x, bb[3] - y); 396 | Mat subImg3u, mag1f, mag1u; 397 | resize(img3u(reg), subImg3u, Size(_W, _W)); 398 | gradientMag(subImg3u, mag1u); 399 | mag1u.convertTo(mag1f, CV_32F); 400 | return mag1f; 401 | } 402 | 403 | int Objectness::gtBndBoxSampling(const Vec4i &bbgt, vector &samples, vecI &bbR) 404 | { 405 | double wVal = bbgt[2] - bbgt[0] + 1, hVal = (bbgt[3] - bbgt[1]) + 1; 406 | wVal = log(wVal)/_logBase, hVal = log(hVal)/_logBase; 407 | int wMin = max((int)(wVal - 0.5), _minT), wMax = min((int)(wVal + 1.5), _maxT); 408 | int hMin = max((int)(hVal - 0.5), _minT), hMax = min((int)(hVal + 1.5), _maxT); 409 | for (int h = hMin; h <= hMax; h++) for (int w = wMin; w <= wMax; w++){ 410 | int wT = tLen(w) - 1, hT = tLen(h) - 1; 411 | Vec4i bb(bbgt[0], bbgt[1], bbgt[0] + wT, bbgt[1] + hT); 412 | if (DataSetVOC::interUnio(bb, bbgt) >= 0.5){ 413 | samples.push_back(bb); 414 | bbR.push_back(sz2idx(w, h)); 415 | //if (bbgt[3] > hT){ 416 | // bb = Vec4i(bbgt[0], bbgt[3] - hT, bbgt[0] + wT, bbgt[3]); 417 | // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5); 418 | // samples.push_back(bb); 419 | // bbR.push_back(sz2idx(w, h)); 420 | //} 421 | //if (bbgt[2] > wT){ 422 | // bb = Vec4i(bbgt[2] - wT, bbgt[1], bbgt[2], bbgt[1] + hT); 423 | // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5); 424 | // samples.push_back(bb); 425 | // bbR.push_back(sz2idx(w, h)); 426 | //} 427 | //if (bbgt[2] > wT && bbgt[3] > hT){ 428 | // bb = Vec4i(bbgt[2] - wT, bbgt[3] - hT, bbgt[2], bbgt[3]); 429 | // CV_Assert(DataSetVOC::interUnio(bb, bbgt) >= 0.5); 430 | // samples.push_back(bb); 431 | // bbR.push_back(sz2idx(w, h)); 432 | //} 433 | } 434 | } 435 | return samples.size(); 436 | } 437 | 438 | void Objectness::trainStateII(int numPerSz) 439 | { 440 | loadTrainedModel(); 441 | const int NUM_TRAIN = _voc.trainNum; 442 | vector SZ(NUM_TRAIN), Y(NUM_TRAIN); 443 | vector VAL(NUM_TRAIN); 444 | 445 | #pragma omp parallel for 446 | for (int i = 0; i < _voc.trainNum; i++) { 447 | const vector &bbgts = _voc.gtTrainBoxes[i]; 448 | ValStructVec valBoxes; 449 | vecI &sz = SZ[i], &y = Y[i]; 450 | vecF &val = VAL[i]; 451 | CStr imgPath = format(_S(_voc.imgPathW), _S(_voc.trainSet[i])); 452 | predictBBoxSI(imread(imgPath), valBoxes, sz, numPerSz, false); 453 | const int num = valBoxes.size(); 454 | CV_Assert(sz.size() == num); 455 | y.resize(num), val.resize(num); 456 | for (int j = 0; j < num; j++){ 457 | Vec4i bb = valBoxes[j]; 458 | val[j] = valBoxes(j); 459 | y[j] = maxIntUnion(bb, bbgts) >= 0.5 ? 1 : -1; 460 | } 461 | } 462 | 463 | const int NUM_SZ = _svmSzIdxs.size(); 464 | const int maxTrainNum = 100000; 465 | vector rXP(NUM_SZ), rXN(NUM_SZ); 466 | for (int r = 0; r < NUM_SZ; r++){ 467 | rXP[r].reserve(maxTrainNum); 468 | rXN[r].reserve(1000000); 469 | } 470 | for (int i = 0; i < NUM_TRAIN; i++){ 471 | const vecI &sz = SZ[i], &y = Y[i]; 472 | vecF &val = VAL[i]; 473 | int num = sz.size(); 474 | for (int j = 0; j < num; j++){ 475 | int r = sz[j]; 476 | CV_Assert(r >= 0 && r < NUM_SZ); 477 | if (y[j] == 1) 478 | rXP[r].push_back(Mat(1, 1, CV_32F, &val[j])); 479 | else 480 | rXN[r].push_back(Mat(1, 1, CV_32F, &val[j])); 481 | } 482 | } 483 | 484 | Mat wMat(NUM_SZ, 2, CV_32F); 485 | for (int i = 0; i < NUM_SZ; i++){ 486 | const vecM &xP = rXP[i], &xN = rXN[i]; 487 | if (xP.size() < 10 || xN.size() < 10) 488 | printf("Warning %s:%d not enough training sample for r[%d] = %d. P = %d, N = %d\n", __FILE__, __LINE__, i, _svmSzIdxs[i], xP.size(), xN.size()); 489 | for (size_t k = 0; k < xP.size(); k++) 490 | CV_Assert(xP[k].size() == Size(1, 1) && xP[k].type() == CV_32F); 491 | 492 | Mat wr = trainSVM(xP, xN, L1R_L2LOSS_SVC, 100, 1); 493 | CV_Assert(wr.size() == Size(2, 1)); 494 | wr.copyTo(wMat.row(i)); 495 | } 496 | matWrite(_modelName + ".wS2", wMat); 497 | _svmReW1f = wMat; 498 | } 499 | 500 | void Objectness::meanStdDev(CMat &data1f, Mat &mean1f, Mat &stdDev1f) 501 | { 502 | const int DIM = data1f.cols, NUM = data1f.rows; 503 | mean1f = Mat::zeros(1, DIM, CV_32F), stdDev1f = Mat::zeros(1, DIM, CV_32F); 504 | for (int i = 0; i < NUM; i++) 505 | mean1f += data1f.row(i); 506 | mean1f /= NUM; 507 | for (int i = 0; i < NUM; i++){ 508 | Mat tmp; 509 | pow(data1f.row(i) - mean1f, 2, tmp); 510 | stdDev1f += tmp; 511 | } 512 | pow(stdDev1f/NUM, 0.5, stdDev1f); 513 | } 514 | 515 | vecD Objectness::getVector(const Mat &_t1f) 516 | { 517 | Mat t1f; 518 | _t1f.convertTo(t1f, CV_64F); 519 | return (vecD)(t1f.reshape(1, 1)); 520 | } 521 | 522 | void Objectness::illustrate() 523 | { 524 | Mat xP1f, xN1f; 525 | CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f)); 526 | CV_Assert(xP1f.cols == xN1f.cols && xP1f.cols == _W*_W && xP1f.type() == CV_32F && xN1f.type() == CV_32F); 527 | Mat meanP, meanN, stdDevP, stdDevN; 528 | meanStdDev(xP1f, meanP, stdDevP); 529 | meanStdDev(xN1f, meanN, stdDevN); 530 | Mat meanV(_W, _W*2, CV_32F), stdDev(_W, _W*2, CV_32F); 531 | meanP.reshape(1, _W).copyTo(meanV.colRange(0, _W)); 532 | meanN.reshape(1, _W).copyTo(meanV.colRange(_W, _W*2)); 533 | stdDevP.reshape(1, _W).copyTo(stdDev.colRange(0, _W)); 534 | stdDevN.reshape(1, _W).copyTo(stdDev.colRange(_W, _W*2)); 535 | normalize(meanV, meanV, 0, 255, NORM_MINMAX, CV_8U); 536 | CmShow::showTinyMat(_voc.resDir + "PosNeg.png", meanV); 537 | 538 | FILE* f = fopen(_S(_voc.resDir + "PosNeg.m"), "w"); 539 | CV_Assert(f != NULL); 540 | fprintf(f, "figure(1);\n\n"); 541 | PrintVector(f, getVector(meanP), "MeanP"); 542 | PrintVector(f, getVector(meanN), "MeanN"); 543 | PrintVector(f, getVector(stdDevP), "StdDevP"); 544 | PrintVector(f, getVector(stdDevN), "StdDevN"); 545 | PrintVector(f, getVector(_svmFilter), "Filter"); 546 | fprintf(f, "hold on;\nerrorbar(MeanP, StdDevP, 'r');\nerrorbar(MeanN, StdDevN, 'g');\nhold off;"); 547 | fclose(f); 548 | } 549 | 550 | void Objectness::trainStageI() 551 | { 552 | vecM pX, nX; 553 | pX.reserve(200000), nX.reserve(200000); 554 | Mat xP1f, xN1f; 555 | CV_Assert(matRead(_modelName + ".xP", xP1f) && matRead(_modelName + ".xN", xN1f)); 556 | for (int r = 0; r < xP1f.rows; r++) 557 | pX.push_back(xP1f.row(r)); 558 | for (int r = 0; r < xN1f.rows; r++) 559 | nX.push_back(xN1f.row(r)); 560 | Mat crntW = trainSVM(pX, nX, L1R_L2LOSS_SVC, 10, 1); 561 | crntW = crntW.colRange(0, crntW.cols - 1).reshape(1, _W); 562 | CV_Assert(crntW.size() == Size(_W, _W)); 563 | matWrite(_modelName + ".wS1", crntW); 564 | } 565 | 566 | // Training SVM with feature vector X and label Y. 567 | // Each row of X is a feature vector, with corresponding label in Y. 568 | // Return a CV_32F weight Mat 569 | Mat Objectness::trainSVM(CMat &X1f, const vecI &Y, int sT, double C, double bias, double eps) 570 | { 571 | // Set SVM parameters 572 | parameter param; { 573 | param.solver_type = sT; // L2R_L2LOSS_SVC_DUAL; 574 | param.C = C; 575 | param.eps = eps; // see setting below 576 | param.p = 0.1; 577 | param.nr_weight = 0; 578 | param.weight_label = NULL; 579 | param.weight = NULL; 580 | set_print_string_function(print_null); 581 | CV_Assert(X1f.rows == Y.size() && X1f.type() == CV_32F); 582 | } 583 | 584 | // Initialize a problem 585 | feature_node *x_space = NULL; 586 | problem prob;{ 587 | prob.l = X1f.rows; 588 | prob.bias = bias; 589 | prob.y = Malloc(double, prob.l); 590 | prob.x = Malloc(feature_node*, prob.l); 591 | const int DIM_FEA = X1f.cols; 592 | prob.n = DIM_FEA + (bias >= 0 ? 1 : 0); 593 | x_space = Malloc(feature_node, (prob.n + 1) * prob.l); 594 | int j = 0; 595 | for (int i = 0; i < prob.l; i++){ 596 | prob.y[i] = Y[i]; 597 | prob.x[i] = &x_space[j]; 598 | const float* xData = X1f.ptr(i); 599 | for (int k = 0; k < DIM_FEA; k++){ 600 | x_space[j].index = k + 1; 601 | x_space[j++].value = xData[k]; 602 | } 603 | if (bias >= 0){ 604 | x_space[j].index = prob.n; 605 | x_space[j++].value = bias; 606 | } 607 | x_space[j++].index = -1; 608 | } 609 | CV_Assert(j == (prob.n + 1) * prob.l); 610 | } 611 | 612 | // Training SVM for current problem 613 | const char* error_msg = check_parameter(&prob, ¶m); 614 | if(error_msg){ 615 | fprintf(stderr,"ERROR: %s\n",error_msg); 616 | exit(1); 617 | } 618 | model *svmModel = train(&prob, ¶m); 619 | Mat wMat(1, prob.n, CV_64F, svmModel->w); 620 | wMat.convertTo(wMat, CV_32F); 621 | free_and_destroy_model(&svmModel); 622 | destroy_param(¶m); 623 | free(prob.y); 624 | free(prob.x); 625 | free(x_space); 626 | return wMat; 627 | } 628 | 629 | // pX1f, nX1f are positive and negative training samples, each is a row vector 630 | Mat Objectness::trainSVM(const vector &pX1f, const vector &nX1f, int sT, double C, double bias, double eps, int maxTrainNum) 631 | { 632 | vecI ind(nX1f.size()); 633 | for (size_t i = 0; i < ind.size(); i++) 634 | ind[i] = i; 635 | int numP = pX1f.size(), feaDim = pX1f[0].cols; 636 | int totalSample = numP + nX1f.size(); 637 | if (totalSample > maxTrainNum) 638 | random_shuffle(ind.begin(), ind.end()); 639 | totalSample = min(totalSample, maxTrainNum); 640 | Mat X1f(totalSample, feaDim, CV_32F); 641 | vecI Y(totalSample); 642 | for(int i = 0; i < numP; i++){ 643 | pX1f[i].copyTo(X1f.row(i)); 644 | Y[i] = 1; 645 | } 646 | for (int i = numP; i < totalSample; i++){ 647 | nX1f[ind[i - numP]].copyTo(X1f.row(i)); 648 | Y[i] = -1; 649 | } 650 | return trainSVM(X1f, Y, sT, C, bias, eps); 651 | } 652 | 653 | // Get potential bounding boxes for all test images 654 | void Objectness::getObjBndBoxesForTests(vector> &_boxesTests, int numDetPerSize) 655 | { 656 | const int TestNum = _voc.testSet.size(); 657 | vecM imgs3u(TestNum); 658 | vector> boxesTests; 659 | boxesTests.resize(TestNum); 660 | 661 | #pragma omp parallel for 662 | for (int i = 0; i < TestNum; i++){ 663 | imgs3u[i] = imread(format(_S(_voc.imgPathW), _S(_voc.testSet[i]))); 664 | boxesTests[i].reserve(10000); 665 | } 666 | 667 | int scales[3] = {1, 3, 5}; 668 | for (int clr = MAXBGR; clr <= G; clr++){ 669 | setColorSpace(clr); 670 | trainObjectness(numDetPerSize); 671 | loadTrainedModel(); 672 | CmTimer tm("Predict"); 673 | tm.Start(); 674 | 675 | #pragma omp parallel for 676 | for (int i = 0; i < TestNum; i++){ 677 | ValStructVec boxes; 678 | getObjBndBoxes(imgs3u[i], boxes, numDetPerSize); 679 | boxesTests[i].append(boxes, scales[clr]); 680 | } 681 | 682 | tm.Stop(); 683 | printf("Average time for predicting an image (%s) is %gs\n", _clrName[_Clr], tm.TimeInSeconds()/TestNum); 684 | } 685 | 686 | _boxesTests.resize(TestNum); 687 | CmFile::MkDir(_bbResDir); 688 | #pragma omp parallel for 689 | for (int i = 0; i < TestNum; i++){ 690 | CStr fName = _bbResDir + _voc.testSet[i]; 691 | ValStructVec &boxes = boxesTests[i]; 692 | FILE *f = fopen(_S(fName + ".txt"), "w"); 693 | fprintf(f, "%d\n", boxes.size()); 694 | for (size_t k = 0; k < boxes.size(); k++) 695 | fprintf(f, "%g, %s\n", boxes(k), _S(strVec4i(boxes[k]))); 696 | fclose(f); 697 | 698 | _boxesTests[i].resize(boxesTests[i].size()); 699 | for (int j = 0; j < boxesTests[i].size(); j++) 700 | _boxesTests[i][j] = boxesTests[i][j]; 701 | } 702 | 703 | evaluatePerImgRecall(_boxesTests, "PerImgAllNS.m", 5000); 704 | 705 | #pragma omp parallel for 706 | for (int i = 0; i < TestNum; i++){ 707 | boxesTests[i].sort(false); 708 | for (int j = 0; j < boxesTests[i].size(); j++) 709 | _boxesTests[i][j] = boxesTests[i][j]; 710 | } 711 | evaluatePerImgRecall(_boxesTests, "PerImgAllS.m", 5000); 712 | } 713 | 714 | 715 | // Get potential bounding boxes for all test images 716 | void Objectness::getObjBndBoxesForTestsFast(vector> &_boxesTests, int numDetPerSize) 717 | { 718 | //setColorSpace(HSV); 719 | trainObjectness(numDetPerSize); 720 | loadTrainedModel(); 721 | illustrate(); 722 | 723 | 724 | const int TestNum = _voc.testSet.size(); 725 | vecM imgs3u(TestNum); 726 | vector> boxesTests; 727 | boxesTests.resize(TestNum); 728 | 729 | #pragma omp parallel for 730 | for (int i = 0; i < TestNum; i++){ 731 | imgs3u[i] = imread(format(_S(_voc.imgPathW), _S(_voc.testSet[i]))); 732 | boxesTests[i].reserve(10000); 733 | } 734 | 735 | printf("Start predicting\n"); 736 | CmTimer tm("Predict"); 737 | tm.Start(); 738 | 739 | #pragma omp parallel for 740 | for (int i = 0; i < TestNum; i++) 741 | getObjBndBoxes(imgs3u[i], boxesTests[i], numDetPerSize); 742 | 743 | tm.Stop(); 744 | printf("Average time for predicting an image (%s) is %gs\n", _clrName[_Clr], tm.TimeInSeconds()/TestNum); 745 | 746 | _boxesTests.resize(TestNum); 747 | CmFile::MkDir(_bbResDir); 748 | 749 | #pragma omp parallel for 750 | for (int i = 0; i < TestNum; i++){ 751 | CStr fName = _bbResDir + _voc.testSet[i]; 752 | ValStructVec &boxes = boxesTests[i]; 753 | FILE *f = fopen(_S(fName + ".txt"), "w"); 754 | fprintf(f, "%d\n", boxes.size()); 755 | for (size_t k = 0; k < boxes.size(); k++) 756 | fprintf(f, "%g, %s\n", boxes(k), _S(strVec4i(boxes[k]))); 757 | fclose(f); 758 | 759 | _boxesTests[i].resize(boxesTests[i].size()); 760 | for (int j = 0; j < boxesTests[i].size(); j++) 761 | _boxesTests[i][j] = boxesTests[i][j]; 762 | } 763 | 764 | evaluatePerImgRecall(_boxesTests, "PerImgAll.m", 5000); 765 | } 766 | 767 | 768 | void Objectness::getRandomBoxes(vector> &boxesTests, int num) 769 | { 770 | const int TestNum = _voc.testSet.size(); 771 | boxesTests.resize(TestNum); 772 | #pragma omp parallel for 773 | for (int i = 0; i < TestNum; i++){ 774 | Mat imgs3u = imread(format(_S(_voc.imgPathW), _S(_voc.testSet[i]))); 775 | int H = imgs3u.cols, W = imgs3u.rows; 776 | boxesTests[i].reserve(num); 777 | for (int k = 0; k < num; k++){ 778 | int x1 = rand()%W + 1, x2 = rand()%W + 1; 779 | int y1 = rand()%H + 1, y2 = rand()%H + 1; 780 | boxesTests[i].push_back(Vec4i(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2))); 781 | } 782 | } 783 | evaluatePerImgRecall(boxesTests, "PerImgAll.m", num); 784 | } 785 | 786 | void Objectness::evaluatePerImgRecall(const vector> &boxesTests, CStr &saveName, const int NUM_WIN) 787 | { 788 | vecD recalls(NUM_WIN); 789 | vecD avgScore(NUM_WIN); 790 | const int TEST_NUM = _voc.testSet.size(); 791 | for (int i = 0; i < TEST_NUM; i++){ 792 | const vector &boxesGT = _voc.gtTestBoxes[i]; 793 | const vector &boxes = boxesTests[i]; 794 | const int gtNumCrnt = boxesGT.size(); 795 | vecI detected(gtNumCrnt); 796 | vecD score(gtNumCrnt); 797 | double sumDetected = 0, abo = 0; 798 | for (int j = 0; j < NUM_WIN; j++){ 799 | if (j >= (int)boxes.size()){ 800 | recalls[j] += sumDetected/gtNumCrnt; 801 | avgScore[j] += abo/gtNumCrnt; 802 | continue; 803 | } 804 | 805 | for (int k = 0; k < gtNumCrnt; k++) { 806 | double s = DataSetVOC::interUnio(boxes[j], boxesGT[k]); 807 | score[k] = max(score[k], s); 808 | detected[k] = score[k] >= 0.5 ? 1 : 0; 809 | } 810 | sumDetected = 0, abo = 0; 811 | for (int k = 0; k < gtNumCrnt; k++) 812 | sumDetected += detected[k], abo += score[k]; 813 | recalls[j] += sumDetected/gtNumCrnt; 814 | avgScore[j] += abo/gtNumCrnt; 815 | } 816 | } 817 | 818 | for (int i = 0; i < NUM_WIN; i++){ 819 | recalls[i] /= TEST_NUM; 820 | avgScore[i] /= TEST_NUM; 821 | } 822 | 823 | int idx[8] = {1, 10, 100, 1000, 2000, 3000, 4000, 5000}; 824 | for (int i = 0; i < 8; i++){ 825 | if (idx[i] > NUM_WIN) 826 | continue; 827 | printf("%d:%.3g,%.3g\t", idx[i], recalls[idx[i] - 1], avgScore[idx[i] - 1]); 828 | } 829 | printf("\n"); 830 | 831 | FILE* f = fopen(_S(_voc.resDir + saveName), "w"); 832 | CV_Assert(f != NULL); 833 | fprintf(f, "figure(1);\n\n"); 834 | PrintVector(f, recalls, "DR"); 835 | PrintVector(f, avgScore, "MABO"); 836 | fprintf(f, "semilogx(1:%d, DR(1:%d));\nhold on;\nsemilogx(1:%d, DR(1:%d));\naxis([1, 5000, 0, 1]);\nhold off;\n", NUM_WIN, NUM_WIN, NUM_WIN, NUM_WIN); 837 | fclose(f); 838 | } 839 | 840 | void Objectness::illuTestReults(const vector> &boxesTests) 841 | { 842 | CStr resDir = _voc.localDir + "ResIlu/"; 843 | CmFile::MkDir(resDir); 844 | const int TEST_NUM = _voc.testSet.size(); 845 | for (int i = 0; i < TEST_NUM; i++){ 846 | const vector &boxesGT = _voc.gtTestBoxes[i]; 847 | const vector &boxes = boxesTests[i]; 848 | const int gtNumCrnt = boxesGT.size(); 849 | CStr imgPath = format(_S(_voc.imgPathW), _S(_voc.testSet[i])); 850 | CStr resNameNE = CmFile::GetNameNE(imgPath); 851 | Mat img = imread(imgPath); 852 | Mat bboxMatchImg = Mat::zeros(img.size(), CV_32F); 853 | 854 | vecD score(gtNumCrnt); 855 | vector bboxMatch(gtNumCrnt); 856 | for (int j = 0; j < boxes.size(); j++){ 857 | const Vec4i &bb = boxes[j]; 858 | for (int k = 0; k < gtNumCrnt; k++) { 859 | double mVal = DataSetVOC::interUnio(boxes[j], boxesGT[k]); 860 | if (mVal < score[k]) 861 | continue; 862 | score[k] = mVal; 863 | bboxMatch[k] = boxes[j]; 864 | } 865 | } 866 | 867 | for (int k = 0; k < gtNumCrnt; k++){ 868 | const Vec4i &bb = bboxMatch[k]; 869 | rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(0), 3); 870 | rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(255, 255, 255), 2); 871 | rectangle(img, Point(bb[0], bb[1]), Point(bb[2], bb[3]), Scalar(0, 0, 255), 1); 872 | } 873 | 874 | imwrite(resDir + resNameNE + "_Match.jpg", img); 875 | } 876 | } 877 | 878 | void Objectness::evaluatePerClassRecall(vector> &boxesTests, CStr &saveName, const int WIN_NUM) 879 | { 880 | const int TEST_NUM = _voc.testSet.size(), CLS_NUM = _voc.classNames.size(); 881 | if (boxesTests.size() != TEST_NUM){ 882 | boxesTests.resize(TEST_NUM); 883 | for (int i = 0; i < TEST_NUM; i++){ 884 | Mat boxes; 885 | matRead(_voc.localDir + _voc.testSet[i] + ".dat", boxes); 886 | Vec4i* d = (Vec4i*)boxes.data; 887 | boxesTests[i].resize(boxes.rows, WIN_NUM); 888 | memcpy(&boxesTests[i][0], boxes.data, sizeof(Vec4i)*boxes.rows); 889 | } 890 | } 891 | 892 | for (int i = 0; i < TEST_NUM; i++) 893 | if ((int)boxesTests[i].size() < WIN_NUM){ 894 | printf("%s.dat: %d, %d\n", _S(_voc.testSet[i]), boxesTests[i].size(), WIN_NUM); 895 | boxesTests[i].resize(WIN_NUM); 896 | } 897 | 898 | 899 | // #class by #win matrix for saving correct detection number and gt number 900 | Mat crNum1i = Mat::zeros(CLS_NUM, WIN_NUM, CV_32S); 901 | vecD gtNums(CLS_NUM); { 902 | for (int i = 0; i < TEST_NUM; i++){ 903 | const vector &boxes = boxesTests[i]; 904 | const vector &boxesGT = _voc.gtTestBoxes[i]; 905 | const vecI &clsGT = _voc.gtTestClsIdx[i]; 906 | CV_Assert((int)boxes.size() >= WIN_NUM); 907 | const int gtNumCrnt = boxesGT.size(); 908 | for (int j = 0; j < gtNumCrnt; j++){ 909 | gtNums[clsGT[j]]++; 910 | double maxIntUni = 0; 911 | int* crNum = crNum1i.ptr(clsGT[j]); 912 | for (int k = 0; k < WIN_NUM; k++) { 913 | double val = DataSetVOC::interUnio(boxes[k], boxesGT[j]); 914 | maxIntUni = max(maxIntUni, val); 915 | crNum[k] += maxIntUni >= 0.5 ? 1 : 0; 916 | } 917 | } 918 | } 919 | } 920 | 921 | FILE* f = fopen(_S(_voc.resDir + saveName), "w"); { 922 | CV_Assert(f != NULL); 923 | fprintf(f, "figure(1);\nhold on;\n\n\n"); 924 | vecD val(WIN_NUM), recallObjs(WIN_NUM), recallClss(WIN_NUM); 925 | for (int i = 0; i < WIN_NUM; i++) 926 | val[i] = i; 927 | PrintVector(f, gtNums, "GtNum"); 928 | PrintVector(f, val, "WinNum"); 929 | fprintf(f, "\n"); 930 | string leglendStr("legend("); 931 | double sumObjs = 0; 932 | for (int c = 0; c < CLS_NUM; c++){ 933 | sumObjs += gtNums[c]; 934 | memset(&val[0], 0, sizeof(double)*WIN_NUM); 935 | int* crNum = crNum1i.ptr(c); 936 | for (int i = 0; i < WIN_NUM; i++){ 937 | val[i] = crNum[i]/(gtNums[c] + 1e-200); 938 | recallClss[i] += val[i]; 939 | recallObjs[i] += crNum[i]; 940 | } 941 | CStr className = _voc.classNames[c]; 942 | PrintVector(f, val, className); 943 | fprintf(f, "plot(WinNum, %s, %s, 'linewidth', 2);\n", _S(className), COLORs[c % CN]); 944 | leglendStr += format("'%s', ", _S(className)); 945 | } 946 | for (int i = 0; i < WIN_NUM; i++){ 947 | recallClss[i] /= CLS_NUM; 948 | recallObjs[i] /= sumObjs; 949 | } 950 | PrintVector(f, recallClss, "class"); 951 | fprintf(f, "plot(WinNum, %s, %s, 'linewidth', 2);\n", "class", COLORs[CLS_NUM % CN]); 952 | leglendStr += format("'%s', ", "class"); 953 | PrintVector(f, recallObjs, "objects"); 954 | fprintf(f, "plot(WinNum, %s, %s, 'linewidth', 2);\n", "objects", COLORs[(CLS_NUM+1) % CN]); 955 | leglendStr += format("'%s', ", "objects"); 956 | leglendStr.resize(leglendStr.size() - 2); 957 | leglendStr += ");"; 958 | fprintf(f, "%s\nhold off;\nxlabel('#WIN');\nylabel('Recall');\ngrid on;\naxis([0 %d 0 1]);\n", _S(leglendStr), WIN_NUM); 959 | fprintf(f, "[class([1,10,100,1000]);objects([1,10,100,1000])]\ntitle('%s')\n", _S(saveName)); 960 | fclose(f); 961 | printf("%-70s\r", ""); 962 | } 963 | evaluatePerImgRecall(boxesTests, CmFile::GetNameNE(saveName) + "_PerI.m", WIN_NUM); 964 | } 965 | 966 | void Objectness::PrintVector(FILE *f, const vecD &v, CStr &name) 967 | { 968 | fprintf(f, "%s = [", name.c_str()); 969 | for (size_t i = 0; i < v.size(); i++) 970 | fprintf(f, "%g ", v[i]); 971 | fprintf(f, "];\n"); 972 | } 973 | 974 | // Write matrix to binary file 975 | bool Objectness::matWrite(CStr& filename, CMat& _M){ 976 | Mat M; 977 | _M.copyTo(M); 978 | FILE* file = fopen(_S(filename), "wb"); 979 | if (file == NULL || M.empty()) 980 | return false; 981 | fwrite("CmMat", sizeof(char), 5, file); 982 | int headData[3] = {M.cols, M.rows, M.type()}; 983 | fwrite(headData, sizeof(int), 3, file); 984 | fwrite(M.data, sizeof(char), M.step * M.rows, file); 985 | fclose(file); 986 | return true; 987 | } 988 | 989 | // Read matrix from binary file 990 | bool Objectness::matRead(const string& filename, Mat& _M){ 991 | FILE* f = fopen(_S(filename), "rb"); 992 | if (f == NULL) 993 | return false; 994 | char buf[8]; 995 | int pre = fread(buf,sizeof(char), 5, f); 996 | if (strncmp(buf, "CmMat", 5) != 0) { 997 | printf("Invalidate CvMat data file %s\n", _S(filename)); 998 | return false; 999 | } 1000 | int headData[3]; // Width, height, type 1001 | fread(headData, sizeof(int), 3, f); 1002 | Mat M(headData[1], headData[0], headData[2]); 1003 | fread(M.data, sizeof(char), M.step * M.rows, f); 1004 | fclose(f); 1005 | M.copyTo(_M); 1006 | return true; 1007 | } 1008 | 1009 | void Objectness::evaluatePAMI12(CStr &saveName) 1010 | { 1011 | const int TEST_NUM = _voc.testSet.size(); 1012 | vector> boxesTests(TEST_NUM); 1013 | CStr dir = _voc.wkDir + "PAMI12/"; 1014 | const int numDet = 1853; 1015 | for (int i = 0; i < TEST_NUM; i++){ 1016 | FILE *f = fopen(_S(dir + _voc.testSet[i] + ".txt"), "r"); 1017 | double score; 1018 | boxesTests[i].resize(numDet); 1019 | for (int j = 0; j < numDet; j++){ 1020 | Vec4i &v = boxesTests[i][j]; 1021 | fscanf(f, "%d %d %d %d %g\n", &v[0], &v[1], &v[2], &v[3], &score); 1022 | } 1023 | fclose(f); 1024 | } 1025 | printf("Load data finished\r"); 1026 | //evaluatePerImgRecall(boxesTests, saveName, numDet); 1027 | evaluatePerClassRecall(boxesTests, saveName, numDet); 1028 | } 1029 | 1030 | void Objectness::evaluateIJCV13(CStr &saveName) 1031 | { 1032 | const int TEST_NUM = _voc.testSet.size(); 1033 | vector> boxesTests(TEST_NUM); 1034 | CStr dir = _voc.wkDir + "IJCV13/"; 1035 | const int numDet = 10000; 1036 | for (int i = 0; i < TEST_NUM; i++){ 1037 | FILE *f = fopen(_S(dir + _voc.testSet[i] + ".txt"), "r"); 1038 | boxesTests[i].resize(numDet); 1039 | for (int j = 0; j < numDet; j++){ 1040 | Vec4i &v = boxesTests[i][j]; 1041 | fscanf(f, "%d, %d, %d, %d\n", &v[1], &v[0], &v[3], &v[2]); 1042 | } 1043 | fclose(f); 1044 | } 1045 | printf("Load data finished\r"); 1046 | evaluatePerImgRecall(boxesTests, saveName, numDet); 1047 | //evaluate(boxesTests, saveName, numDet); 1048 | } 1049 | float distG(float d, float delta) {return exp(-d*d/(2*delta*delta));} 1050 | 1051 | Mat Objectness::aFilter(float delta, int sz) 1052 | { 1053 | float dis = float(sz-1)/2.f; 1054 | Mat mat(sz, sz, CV_32F); 1055 | for (int r = 0; r < sz; r++) 1056 | for (int c = 0; c < sz; c++) 1057 | mat.at(r, c) = distG(sqrt(sqr(r-dis)+sqr(c-dis)) - dis, delta); 1058 | return mat; 1059 | } 1060 | 1061 | // 1062 | //// Calculate the image gradient: center option as in VLFeat 1063 | //void Objectness::gradientMag3(CMat &img3f, Mat &mag1f) 1064 | //{ 1065 | // const int H = img3f.rows, W = img3f.cols; 1066 | // Mat Ix(H, W, CV_32F), Iy(H, W, CV_32F); 1067 | // 1068 | // // Left/right most column Ix 1069 | // for (int y = 0; y < H; y++){ 1070 | // Ix.at(y, 0) = vecDist(img3f.at(y, 1), img3f.at(y, 0)); 1071 | // Ix.at(y, W-1) = vecDist(img3f.at(y, W-1), img3f.at(y, W-2)); 1072 | // } 1073 | // 1074 | // // Top/bottom most column Iy 1075 | // for (int x = 0; x < W; x++) { 1076 | // Iy.at(0, x) = vecDist(img3f.at(1, x), img3f.at(0, x)); 1077 | // Iy.at(H-1, x) = vecDist(img3f.at(H-1, x), img3f.at(H-2, x)); 1078 | // } 1079 | // 1080 | // // Find the gradient for inner regions 1081 | // for (int y = 0; y < H; y++) 1082 | // for (int x = 1; x < W-1; x++) 1083 | // Ix.at(y, x) = 0.5f * vecDist(img3f.at(y, x+1), img3f.at(y, x-1)); 1084 | // for (int y = 1; y < H-1; y++) 1085 | // for (int x = 0; x < W; x++) 1086 | // Iy.at(y, x) = 0.5f * vecDist(img3f.at(y+1, x), img3f.at(y-1, x)); 1087 | // mag1f = abs(Ix) + abs(Iy); 1088 | //} 1089 | // 1090 | //// 1091 | //// Calculate the image gradient: center option as in VLFeat 1092 | //void Objectness::gradientMag1(CMat &img1f, Mat &mag1f) 1093 | //{ 1094 | // CV_Assert(img1f.type() == CV_32F); 1095 | // const int H = img1f.rows, W = img1f.cols; 1096 | // Mat Ix(H, W, CV_32F), Iy(H, W, CV_32F); 1097 | // 1098 | // // Left/right most column Ix 1099 | // for (int y = 0; y < H; y++){ 1100 | // Ix.at(y, 0) = img1f.at(y, 1) - img1f.at(y, 0); 1101 | // Ix.at(y, W-1) = img1f.at(y, W-1) - img1f.at(y, W-2); 1102 | // } 1103 | // 1104 | // // Top/bottom most column Iy 1105 | // for (int x = 0; x < W; x++) { 1106 | // Iy.at(0, x) = img1f.at(1, x) - img1f.at(0, x); 1107 | // Iy.at(H-1, x) = img1f.at(H-1, x) - img1f.at(H-2, x); 1108 | // } 1109 | // 1110 | // // Find the gradient for inner regions 1111 | // for (int y = 0; y < H; y++) 1112 | // for (int x = 1; x < W-1; x++) 1113 | // Ix.at(y, x) = 0.5f * (img1f.at(y, x+1) - img1f.at(y, x-1)); 1114 | // for (int y = 1; y < H-1; y++) 1115 | // for (int x = 0; x < W; x++) 1116 | // Iy.at(y, x) = 0.5f * (img1f.at(y+1, x) - img1f.at(y-1, x)); 1117 | // mag1f = abs(Ix) + abs(Iy); 1118 | //} 1119 | --------------------------------------------------------------------------------