├── box.png ├── targetver.h ├── box_in_scene.png ├── KAZE ├── kaze_ipoint.h ├── kaze_utils.h ├── kaze_nldiffusion_functions.h ├── kaze_features.h ├── kaze_config.cpp ├── kaze_ipoint.cpp ├── kaze_utils.cpp ├── kaze_config.h ├── kaze.h ├── kaze_features.cpp ├── kaze_nldiffusion_functions.cpp └── kaze.cpp ├── KazeOpenCV - Sample Project - VS2010.zip ├── .gitignore ├── predep.h ├── README.md └── KazeOpenCV.cpp /box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuhuazou/kaze_opencv/HEAD/box.png -------------------------------------------------------------------------------- /targetver.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuhuazou/kaze_opencv/HEAD/targetver.h -------------------------------------------------------------------------------- /box_in_scene.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuhuazou/kaze_opencv/HEAD/box_in_scene.png -------------------------------------------------------------------------------- /KAZE/kaze_ipoint.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuhuazou/kaze_opencv/HEAD/KAZE/kaze_ipoint.h -------------------------------------------------------------------------------- /KazeOpenCV - Sample Project - VS2010.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuhuazou/kaze_opencv/HEAD/KazeOpenCV - Sample Project - VS2010.zip -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | 6 | # Compiled Dynamic libraries 7 | *.so 8 | *.dylib 9 | 10 | # Compiled Static libraries 11 | *.lai 12 | *.la 13 | *.a 14 | -------------------------------------------------------------------------------- /predep.h: -------------------------------------------------------------------------------- 1 | #ifndef _PREDEP_H_ 2 | #define _PREDEP_H_ 3 | 4 | #pragma once 5 | 6 | #include "targetver.h" 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include "opencv2/core/version.hpp" 13 | 14 | #define CV_VERSION_ID CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION) 15 | 16 | #ifdef _DEBUG 17 | #define cvLIB(name) "opencv_" name CV_VERSION_ID "d" 18 | #else 19 | #define cvLIB(name) "opencv_" name CV_VERSION_ID 20 | #endif 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | kaze_opencv 2 | =========== 3 | 4 | wrap KAZE features implementation to cv::Feature2D API without rebuilding OpenCV (minimum version 2.4) 5 | 6 | Files: 7 | 8 | | .gitignore 9 | | README.md 10 | | box.png // Test image 1 11 | | box_in_scene.png // Test image 2 12 | | KazeOpenCV.cpp // Sample code of using KAZE to match images 13 | | predep.h // Head file for recognizing OpenCV version 14 | | targetver.h // Windows system level dependences (auto-generated by Visual Studio) 15 | | 16 | |--KAZE 17 | | kaze_features.cpp // Class that warps KAZE to cv::Feature2D 18 | | kaze_features.h 19 | | kaze.cpp // Implementation of KAZE 20 | | kaze.h 21 | | kaze_config.cpp // Configuration variables and options 22 | | kaze_config.h 23 | | kaze_ipoint.cpp // Class that defines a point of interest 24 | | kaze_ipoint.h 25 | | kaze_nldiffusion_functions.cpp // Functions for non-linear diffusion applications 26 | | kaze_nldiffusion_functions.h 27 | | kaze_utils.cpp // Some useful functions 28 | | kaze_utils.h 29 | -------------------------------------------------------------------------------- /KAZE/kaze_utils.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file utils.h 4 | * @brief Some useful functions 5 | * @date Dec 29, 2011 6 | * @author Pablo F. Alcantarilla 7 | */ 8 | 9 | #ifndef _UTILS_H_ 10 | #define _UTILS_H_ 11 | 12 | //****************************************************************************** 13 | //****************************************************************************** 14 | 15 | // OPENCV Includes 16 | #include "opencv2/core/core.hpp" 17 | 18 | // System Includes 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | 29 | // Other Includes 30 | #include "kaze_ipoint.h" 31 | 32 | //************************************************************************************* 33 | //************************************************************************************* 34 | 35 | // Declaration of Functions 36 | void Compute_min_32F(const cv::Mat &src, float &value); 37 | void Compute_max_32F(const cv::Mat &src, float &value); 38 | void Convert_Scale(cv::Mat &src); 39 | void Copy_and_Convert_Scale(const cv::Mat &src, cv::Mat &dst); 40 | void Draw_Ipoints(cv::Mat &img, const std::vector &keypoints); 41 | int fRound(float flt); 42 | 43 | //************************************************************************************* 44 | //************************************************************************************* 45 | 46 | #endif 47 | -------------------------------------------------------------------------------- /KAZE/kaze_nldiffusion_functions.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file nldiffusion_functions.h 4 | * @brief Functions for non-linear diffusion applications: 5 | * 2D Gaussian Derivatives 6 | * Perona and Malik conductivity equations 7 | * Perona and Malik evolution 8 | * @date Dec 27, 2011 9 | * @author Pablo F. Alcantarilla 10 | */ 11 | 12 | #ifndef _NLDIFFUSION_FUNCTIONS_H_ 13 | #define _NLDIFFUSION_FUNCTIONS_H_ 14 | 15 | //****************************************************************************** 16 | //****************************************************************************** 17 | 18 | // Includes 19 | #include "kaze_config.h" 20 | 21 | //************************************************************************************* 22 | //************************************************************************************* 23 | 24 | // For the scale invariance of the differential operators 25 | const bool use_natural_coordinates = false; 26 | 27 | //************************************************************************************* 28 | //************************************************************************************* 29 | 30 | // Declaration of functions 31 | void Gaussian_2D_Convolution(const cv::Mat &src, cv::Mat &dst, unsigned int ksize_x, unsigned int ksize_y, float sigma); 32 | void Image_Derivatives_SD(const cv::Mat &src, cv::Mat &dst, unsigned int xorder, unsigned int yorder); 33 | void Image_Derivatives_Scharr(const cv::Mat &src, cv::Mat &dst, unsigned int xorder, unsigned int yorder); 34 | void Compute_Gaussian_2D_Derivatives(const cv::Mat &src, cv::Mat &smooth, cv::Mat &Lx, cv::Mat &Ly, 35 | cv::Mat &Lxy, cv::Mat &Lxx, cv::Mat &Lyy, 36 | unsigned int ksize_x, unsigned int ksize_y, float sigma); 37 | void PM_G1(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k); 38 | void PM_G2(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k); 39 | void Weickert_Diffusivity(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k); 40 | float Compute_K_Percentile(const cv::Mat &img, float perc, float gscale, unsigned int nbins, unsigned int ksize_x, unsigned int ksize_y); 41 | void Compute_Scharr_Derivatives(const cv::Mat &src, cv::Mat &dst, int xorder, int yorder, int scale); 42 | void NLD_Step_Scalar(cv::Mat &Ld2, const cv::Mat &Ld1, const cv::Mat &c, float stepsize); 43 | bool Check_Maximum_Neighbourhood(cv::Mat &img, int dsize, float value, int row, int col, bool same_img ); 44 | bool Check_Minimum_Neighbourhood(cv::Mat &img, int dsize, float value, int row, int col, bool same_img ); 45 | 46 | //************************************************************************************* 47 | //************************************************************************************* 48 | 49 | 50 | #endif 51 | -------------------------------------------------------------------------------- /KAZE/kaze_features.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file kaze_features.h 4 | * @brief Class that defines cv::KAZE 5 | * @author Ievgen Khvedchenia 6 | * @update: 2013-03-28 by Yuhua Zou 7 | */ 8 | 9 | #ifndef _KAZE_FEATURES_H_ 10 | #define _KAZE_FEATURES_H_ 11 | 12 | #ifdef HAVE_CVCONFIG_H 13 | #include "cvconfig.h" 14 | #endif 15 | 16 | #include "opencv2/core/version.hpp" 17 | 18 | #if ((CV_MAJOR_VERSION>=2) && (CV_MINOR_VERSION>=4)) 19 | 20 | #include "opencv2/features2d/features2d.hpp" 21 | #include "opencv2/imgproc/imgproc.hpp" 22 | #include "opencv2/imgproc/imgproc_c.h" 23 | #include "opencv2/core/internal.hpp" 24 | 25 | #else 26 | 27 | #include "Minimum_version_2.4.0_please_upgrade_your_OpenCV" 28 | 29 | #endif 30 | 31 | #include 32 | 33 | #ifdef HAVE_TEGRA_OPTIMIZATION 34 | #include "opencv2/features2d/features2d_tegra.hpp" 35 | #endif 36 | 37 | #include "kaze_config.h" 38 | 39 | /*! 40 | KAZE features implementation. 41 | http://www.robesafe.com/personal/pablo.alcantarilla/papers/Alcantarilla12eccv.pdf 42 | */ 43 | namespace cv 44 | { 45 | class CV_EXPORTS_W KAZE : public Feature2D 46 | { 47 | public: 48 | 49 | CV_WRAP explicit KAZE( int nfeatures = 0, int noctaves = 4, int nlevels = 4, float detectorThreshold = 0.001, 50 | int diffusivityType = 1, int descriptorMode = 1, bool extendDescriptor = false, bool uprightOrient = false, bool verbosity = false ); 51 | 52 | KAZE(toptions &_options); 53 | 54 | // returns the descriptor size in bytes 55 | int descriptorSize() const; 56 | 57 | // returns the descriptor type 58 | int descriptorType() const; 59 | 60 | // Compute the KAZE features and descriptors on an image 61 | void operator()( InputArray image, InputArray mask, vector& keypoints, 62 | OutputArray descriptors, bool useProvidedKeypoints=false ) const; 63 | 64 | // Compute the KAZE features with mask 65 | void operator()(InputArray image, InputArray mask, vector& keypoints) const; 66 | 67 | // Compute the KAZE features and descriptors on an image without mask 68 | void operator()(InputArray image, vector& keypoints, OutputArray descriptors) const; 69 | 70 | AlgorithmInfo* info() const; 71 | 72 | protected: 73 | 74 | void detectImpl( const Mat& image, vector& keypoints, const Mat& mask=Mat() ) const; 75 | 76 | // !! NOT recommend to use because KAZE descriptors ONLY work with KAZE features 77 | void computeImpl( const Mat& image, vector& keypoints, Mat& descriptors ) const; 78 | 79 | CV_PROP_RW int nfeatures; 80 | 81 | private: 82 | toptions options; 83 | }; 84 | 85 | typedef KAZE KazeFeatureDetector; 86 | //typedef KAZE KazeDescriptorExtractor; // NOT available because KAZE descriptors ONLY work with KAZE features 87 | } 88 | 89 | #endif 90 | -------------------------------------------------------------------------------- /KAZE/kaze_config.cpp: -------------------------------------------------------------------------------- 1 | 2 | //============================================================================= 3 | // 4 | // Ipoint.cpp 5 | // Author: Pablo F. Alcantarilla 6 | // Institution: University d'Auvergne 7 | // Address: Clermont Ferrand, France 8 | // Date: 21/01/2012 9 | // Email: pablofdezalc@gmail.com 10 | // 11 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla 12 | // All Rights Reserved 13 | // See LICENSE for the license information 14 | //============================================================================= 15 | 16 | /** 17 | * @file Ipoint.cpp 18 | * @brief Class that defines a point of interest 19 | * @date Jan 21, 2012 20 | * @author Pablo F. Alcantarilla 21 | */ 22 | 23 | #include "kaze_config.h" 24 | 25 | //******************************************************************************* 26 | //******************************************************************************* 27 | 28 | /** 29 | * @brief Ipoint default constructor 30 | */ 31 | toptions::toptions(void) 32 | { 33 | soffset = DEFAULT_SCALE_OFFSET; 34 | omax = DEFAULT_OCTAVE_MAX; 35 | nsublevels = DEFAULT_NSUBLEVELS; 36 | dthreshold = DEFAULT_DETECTOR_THRESHOLD; 37 | dthreshold2 = DEFAULT_DETECTOR_THRESHOLD; 38 | diffusivity = DEFAULT_DIFFUSIVITY_TYPE; 39 | descriptor = DEFAULT_DESCRIPTOR_MODE; 40 | sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; 41 | upright = DEFAULT_UPRIGHT; 42 | extended = DEFAULT_EXTENDED; 43 | save_scale_space = DEFAULT_SAVE_SCALE_SPACE; 44 | save_keypoints = DEFAULT_SAVE_KEYPOINTS; 45 | verbosity = DEFAULT_VERBOSITY; 46 | show_results = DEFAULT_SHOW_RESULTS; 47 | nfeatures = 0; 48 | } 49 | 50 | //******************************************************************************* 51 | //******************************************************************************* 52 | 53 | /** 54 | * @brief Ipoint constructor specifying the value of an item 55 | */ 56 | toptions::toptions(std::string name, float val) 57 | { 58 | soffset = DEFAULT_SCALE_OFFSET; 59 | omax = DEFAULT_OCTAVE_MAX; 60 | nsublevels = DEFAULT_NSUBLEVELS; 61 | dthreshold = DEFAULT_DETECTOR_THRESHOLD; 62 | dthreshold2 = DEFAULT_DETECTOR_THRESHOLD; 63 | diffusivity = DEFAULT_DIFFUSIVITY_TYPE; 64 | descriptor = DEFAULT_DESCRIPTOR_MODE; 65 | sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; 66 | upright = DEFAULT_UPRIGHT; 67 | extended = DEFAULT_EXTENDED; 68 | save_scale_space = DEFAULT_SAVE_SCALE_SPACE; 69 | save_keypoints = DEFAULT_SAVE_KEYPOINTS; 70 | verbosity = DEFAULT_VERBOSITY; 71 | show_results = DEFAULT_SHOW_RESULTS; 72 | nfeatures = 0; 73 | 74 | if (name == "soffset") { 75 | soffset = val; 76 | } else if (name == "omax") { 77 | omax = static_cast(val); 78 | } else if (name == "nsublevels") { 79 | nsublevels = static_cast(val); 80 | } else if (name == "dthreshold") { 81 | dthreshold = val; 82 | } else if (name == "diffusivity") { 83 | diffusivity = static_cast(val); 84 | } else if (name == "descriptor"){ 85 | descriptor = static_cast(val); 86 | } else if (name == "sderivatives") { 87 | sderivatives = val; 88 | } else if (name == "upright") { 89 | upright = static_cast(val); 90 | } else if (name == "extended") { 91 | extended = static_cast(val); 92 | } else if (name == "nfeatures") { 93 | nfeatures = static_cast(val); 94 | } 95 | 96 | } 97 | 98 | //******************************************************************************* 99 | //******************************************************************************* 100 | -------------------------------------------------------------------------------- /KAZE/kaze_ipoint.cpp: -------------------------------------------------------------------------------- 1 | //============================================================================= 2 | // 3 | // Ipoint.cpp 4 | // Author: Pablo F. Alcantarilla 5 | // Institution: University d'Auvergne 6 | // Address: Clermont Ferrand, France 7 | // Date: 21/01/2012 8 | // Email: pablofdezalc@gmail.com 9 | // 10 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla 11 | // All Rights Reserved 12 | // See LICENSE for the license information 13 | //============================================================================= 14 | 15 | /** 16 | * @file Ipoint.cpp 17 | * @brief Class that defines a point of interest 18 | * @date Jan 21, 2012 19 | * @author Pablo F. Alcantarilla 20 | * @update: 2013-03-28 by Yuhua Zou 21 | */ 22 | 23 | #include "kaze_ipoint.h" 24 | 25 | //******************************************************************************* 26 | //******************************************************************************* 27 | 28 | /** 29 | * @brief Ipoint default constructor 30 | */ 31 | Ipoint::Ipoint(void) 32 | { 33 | xf = yf = 0.0; 34 | x = y = 0; 35 | scale = 0.0; 36 | dresponse = 0.0; 37 | tevolution = 0.0; 38 | octave = 0.0; 39 | sublevel = 0.0; 40 | descriptor_size = 0; 41 | descriptor_mode = 0; 42 | laplacian = 0; 43 | level = 0; 44 | } 45 | 46 | //******************************************************************************* 47 | //******************************************************************************* 48 | 49 | /******************Updated by Yuhua Zou begin************************************/ 50 | 51 | /*** 52 | * Filters for KAZE Ipoint 53 | */ 54 | class MaskPredicate 55 | { 56 | public: 57 | MaskPredicate( const cv::Mat& _mask ) : mask(_mask) {} 58 | bool operator() (const Ipoint& key_pt) const 59 | { 60 | return mask.at( (int)(key_pt.yf + 0.5f), (int)(key_pt.xf + 0.5f) ) == 0; 61 | } 62 | 63 | private: 64 | const cv::Mat mask; 65 | MaskPredicate& operator = (const MaskPredicate&); 66 | }; 67 | 68 | void filterByPixelsMask( std::vector& keypoints, const cv::Mat& mask ) 69 | { 70 | if( mask.empty() ) 71 | return; 72 | 73 | keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end()); 74 | } 75 | 76 | class ResponsePredicate 77 | { 78 | public: 79 | ResponsePredicate() {} 80 | bool operator() (const Ipoint& key_pt) const 81 | { 82 | return key_pt.dresponse == 0; 83 | } 84 | 85 | private: 86 | ResponsePredicate& operator=(const ResponsePredicate&); 87 | }; 88 | 89 | void filterUnvalidKeypoints( std::vector& keypoints ) 90 | { 91 | if( keypoints.empty() ) 92 | return; 93 | 94 | keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), ResponsePredicate()), keypoints.end()); 95 | } 96 | 97 | void filterDuplicated( std::vector& keypoints ) 98 | { 99 | int i, j, n = (int)keypoints.size(); 100 | int esigma = 0, level = 0; 101 | float dist = 0.0; 102 | 103 | for (i = 0; i < n; i++) 104 | { 105 | if (keypoints[i].dresponse == 0) 106 | continue; 107 | 108 | level = keypoints[i].level; 109 | esigma = keypoints[i].sigma_size; 110 | esigma *= esigma; 111 | 112 | for (j = 0; j < n; j++) 113 | { 114 | if ( (j != i) && (keypoints[j].dresponse == 0) && 115 | ( keypoints[j].level == level || keypoints[j].level == level+1 || keypoints[j].level == level-1 )) 116 | { 117 | dist = pow(keypoints[j].xf-keypoints[i].xf,2)+pow(keypoints[j].yf-keypoints[i].yf,2); 118 | if( dist < esigma ) 119 | { 120 | if( keypoints[j].dresponse > keypoints[i].dresponse ) 121 | keypoints[i].dresponse = 0; 122 | else 123 | keypoints[j].dresponse = 0; 124 | 125 | break; 126 | } 127 | } 128 | } 129 | } 130 | 131 | filterUnvalidKeypoints(keypoints); 132 | } 133 | 134 | void filterRetainBest(std::vector& keypoints, int n_points) 135 | { 136 | //this is only necessary if the keypoints size is greater than the number of desired points. 137 | if( n_points > 0 && keypoints.size() > (size_t)n_points ) 138 | { 139 | std::sort(keypoints.begin(), keypoints.end(), std::greater()); 140 | keypoints.resize(n_points); 141 | } 142 | } 143 | 144 | /******************Updated by Yuhua Zou end************************************/ 145 | -------------------------------------------------------------------------------- /KAZE/kaze_utils.cpp: -------------------------------------------------------------------------------- 1 | 2 | //============================================================================= 3 | // 4 | // utils.cpp 5 | // Author: Pablo F. Alcantarilla 6 | // Institution: University d'Auvergne 7 | // Address: Clermont Ferrand, France 8 | // Date: 29/12/2011 9 | // Email: pablofdezalc@gmail.com 10 | // 11 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla 12 | // All Rights Reserved 13 | // See LICENSE for the license information 14 | //============================================================================= 15 | 16 | /** 17 | * @file utils.cpp 18 | * @brief Some useful functions 19 | * @date Dec 29, 2011 20 | * @author Pablo F. Alcantarilla 21 | */ 22 | 23 | #include "kaze_utils.h" 24 | 25 | // Namespaces 26 | using namespace std; 27 | 28 | //************************************************************************************* 29 | //************************************************************************************* 30 | 31 | /** 32 | * @brief This function computes the minimum value of a float image 33 | * @param src Input image 34 | * @param value Minimum value 35 | */ 36 | void Compute_min_32F(const cv::Mat &src, float &value) 37 | { 38 | float aux = 1000.0; 39 | 40 | for( int i = 0; i < src.rows; i++ ) 41 | { 42 | for( int j = 0; j < src.cols; j++ ) 43 | { 44 | if( src.at(i,j) < aux ) 45 | { 46 | aux = src.at(i,j); 47 | } 48 | } 49 | } 50 | 51 | value = aux; 52 | } 53 | 54 | //************************************************************************************* 55 | //************************************************************************************* 56 | 57 | /** 58 | * @brief This function computes the maximum value of a float image 59 | * @param src Input image 60 | * @param value Maximum value 61 | */ 62 | void Compute_max_32F(const cv::Mat &src, float &value) 63 | { 64 | float aux = 0.0; 65 | 66 | for( int i = 0; i < src.rows; i++ ) 67 | { 68 | for( int j = 0; j < src.cols; j++ ) 69 | { 70 | if( src.at(i,j) > aux ) 71 | { 72 | aux = src.at(i,j); 73 | } 74 | } 75 | } 76 | 77 | value = aux; 78 | } 79 | 80 | //************************************************************************************* 81 | //************************************************************************************* 82 | 83 | /** 84 | * @brief This function converts the scale of the input image prior to visualization 85 | * @param src Input/Output image 86 | * @param value Maximum value 87 | */ 88 | void Convert_Scale(cv::Mat &src) 89 | { 90 | float min_val = 0, max_val = 0; 91 | 92 | Compute_min_32F(src,min_val); 93 | 94 | src = src - min_val; 95 | 96 | Compute_max_32F(src,max_val); 97 | src = src / max_val; 98 | } 99 | 100 | //************************************************************************************* 101 | //************************************************************************************* 102 | 103 | /** 104 | * @brief This function copies the input image and converts the scale of the copied 105 | * image prior visualization 106 | * @param src Input image 107 | * @param dst Output image 108 | */ 109 | void Copy_and_Convert_Scale(const cv::Mat &src, cv::Mat dst) 110 | { 111 | float min_val = 0, max_val = 0; 112 | 113 | src.copyTo(dst); 114 | Compute_min_32F(dst,min_val); 115 | 116 | dst = dst - min_val; 117 | 118 | Compute_max_32F(dst,max_val); 119 | dst = dst / max_val; 120 | 121 | } 122 | 123 | //************************************************************************************* 124 | //************************************************************************************* 125 | 126 | /** 127 | * @brief This function draws a vector of Ipoints 128 | * @param img Input/Output Image 129 | * @param dst Vector of keypoints 130 | */ 131 | void Draw_Ipoints(cv::Mat &img, const std::vector &keypoints) 132 | { 133 | int x = 0, y = 0; 134 | float s = 0.0; 135 | 136 | for( unsigned int i = 0; i < keypoints.size(); i++ ) 137 | { 138 | x = keypoints[i].x; 139 | y = keypoints[i].y; 140 | s = keypoints[i].scale*2.0; 141 | 142 | // Draw a circle centered on the interest point 143 | cv::circle(img,cv::Point(x,y),s,cv::Scalar(255,0,0),1); 144 | cv::circle(img,cv::Point(x,y),1.0,cv::Scalar(0,255,0),-1); 145 | } 146 | } 147 | 148 | //************************************************************************************* 149 | //************************************************************************************* 150 | 151 | /** 152 | * @brief This funtion rounds float to nearest integer 153 | * @param flt Input float 154 | * @return dst Nearest integer 155 | */ 156 | int fRound(float flt) 157 | { 158 | return (int)(flt+0.5); 159 | } 160 | -------------------------------------------------------------------------------- /KAZE/kaze_config.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file config.h 3 | * @brief Configuration file 4 | * @date Dec 27, 2011 5 | * @author Pablo F. Alcantarilla 6 | * @update 2013-03-28 by Yuhua Zou 7 | */ 8 | 9 | #ifndef _CONFIG_H_ 10 | #define _CONFIG_H_ 11 | 12 | //****************************************************************************** 13 | //****************************************************************************** 14 | 15 | // OPENCV Includes 16 | #include "opencv2/core/core.hpp" 17 | #include "opencv2/imgproc/imgproc.hpp" 18 | #include "opencv2/core/utility.hpp" 19 | 20 | // System Includes 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include // Enalbel OpenMP 29 | 30 | //************************************************************************************* 31 | //************************************************************************************* 32 | 33 | // Some defines 34 | #define NMAX_CHAR 400 35 | #define HAVE_BOOST_THREADING 0 // 1: you have installed and included Boost library, 0: otherwise 36 | 37 | // Options structure 38 | struct toptions 39 | { 40 | float soffset; // Base scale offset (sigma units), Default: 1.60 41 | int omax; // Maximum octave evolution of the image, Default: 4. If set to 0, omax = log(min(img.rows,img.cols)) / log(2) - 2 42 | int nsublevels; // Number of sublevels per scale level, Default: 4 43 | int img_width; // Image width 44 | int img_height; // Image height 45 | int diffusivity; // Diffusivity function type, 0 -> PM G1, 1 -> PM G2 (default), 2 -> Weickert 46 | float sderivatives; // Sigma smoothing derivatives, used for Gaussian smoothing 47 | float dthreshold; // Detector response threshold to accept point, Default: 0.001 48 | float dthreshold2; // Minimum Detector response threshold to accept point, Default: 0.00001 49 | bool upright; // Use upright descriptors, not invariant to rotation, Default: false 50 | bool extended; // Use extended descriptor, dimension 128, Default: false 51 | int descriptor; // Descriptor Mode, 0->SURF, 1->M-SURF (default), 2->G-SURF 52 | bool save_scale_space; // Default: false 53 | bool save_keypoints; // Default: false 54 | bool verbosity; // Default: false 55 | bool show_results; // Default: false 56 | int nfeatures; // Demanded number of keypoints, Default: 0 57 | 58 | toptions(); 59 | toptions(std::string name, float val); 60 | }; 61 | 62 | typedef struct 63 | { 64 | cv::Mat Lx, Ly; // 一阶微分图像(First order spatial derivatives) 65 | cv::Mat Lxx, Lxy, Lyy; // 二阶微分图像(Second order spatial derivatives) 66 | cv::Mat Lflow; // 传导图像(Diffusivity image) 67 | cv::Mat Lt; // 进化图像(Evolution image) 68 | cv::Mat Lsmooth; // 平滑图像(Smoothed image) 69 | cv::Mat Lstep; // 进化步长更新矩阵(Evolution step update)(!!实际未被使用!!) 70 | cv::Mat Ldet; // 检测响应矩阵(Detector response) 71 | float etime; // 进化时间(Evolution time) 72 | float esigma; // 进化尺度(Evolution sigma. For linear diffusion t = sigma^2 / 2) 73 | float octave; // 图像组(Image octave) 74 | float sublevel; // 图像层级(Image sublevel in each octave) 75 | int sigma_size; // 图像尺度参数的整数值,用于计算检测响应(Integer esigma. For computing the feature detector responses) 76 | }tevolution; 77 | 78 | // Some default options 79 | const float DEFAULT_SCALE_OFFSET = 1.60; // Base scale offset (sigma units) 80 | const float DEFAULT_OCTAVE_MAX = 4.0; // Maximum octave evolution of the image 2^sigma (coarsest scale sigma units) 81 | const int DEFAULT_NSUBLEVELS = 4; // Default number of sublevels per scale level 82 | const float DEFAULT_DETECTOR_THRESHOLD = 0.001; // Detector response threshold to accept point 83 | const float DEFAULT_MIN_DETECTOR_THRESHOLD = 0.00001; // Minimum Detector response threshold to accept point 84 | const int DEFAULT_DESCRIPTOR_MODE = 1; // Descriptor Mode 0->SURF, 1->M-SURF 85 | const bool DEFAULT_UPRIGHT = false; // Upright descriptors, not invariant to rotation 86 | const bool DEFAULT_EXTENDED = false; // Extended descriptor, dimension 128 87 | const bool DEFAULT_SAVE_SCALE_SPACE = false; // For saving the scale space images 88 | const bool DEFAULT_VERBOSITY = false; // Verbosity level (0->no verbosity) 89 | const bool DEFAULT_SHOW_RESULTS = true; // For showing the output image with the detected features plus some ratios 90 | const bool DEFAULT_SAVE_KEYPOINTS = false; // For saving the list of keypoints 91 | 92 | // Some important configuration variables 93 | const float DEFAULT_SIGMA_SMOOTHING_DERIVATIVES = 1.0; 94 | const float DEFAULT_KCONTRAST = .01; 95 | const float KCONTRAST_PERCENTILE = 0.7; 96 | const int KCONTRAST_NBINS = 300; 97 | const bool COMPUTE_KCONTRAST = true; 98 | const bool SUBPIXEL_REFINEMENT = true; 99 | const int DEFAULT_DIFFUSIVITY_TYPE = 1; // 0 -> PM G1, 1 -> PM G2, 2 -> Weickert 100 | const bool USE_CLIPPING_NORMALIZATION = false; 101 | const float CLIPPING_NORMALIZATION_RATIO = 1.6; 102 | const int CLIPPING_NORMALIZATION_NITER = 5; 103 | const float PI = 3.14159; 104 | const float M2_PI = 6.2832; 105 | 106 | //************************************************************************************* 107 | //************************************************************************************* 108 | 109 | #endif 110 | 111 | 112 | 113 | 114 | -------------------------------------------------------------------------------- /KAZE/kaze.h: -------------------------------------------------------------------------------- 1 | 2 | /** 3 | * @file KAZE.h 4 | * @brief Main program for detecting and computing descriptors in a nonlinear 5 | * scale space 6 | * @date Jan 21, 2012 7 | * @author Pablo F. Alcantarilla 8 | */ 9 | 10 | #ifndef _KAZE_H_ 11 | #define _KAZE_H_ 12 | 13 | //************************************************************************************* 14 | //************************************************************************************* 15 | 16 | // Includes 17 | #include "kaze_config.h" 18 | #include "kaze_nldiffusion_functions.h" 19 | #include "kaze_utils.h" 20 | #include "kaze_ipoint.h" 21 | 22 | // KAZE Class Declaration 23 | class KAZE 24 | { 25 | 26 | private: 27 | 28 | // Parameters of the Nonlinear diffusion class 29 | float soffset; // Base scale offset 30 | float sderivatives; // Standard deviation of the Gaussian for the nonlinear diff. derivatives 31 | int omax; // Maximum octave level, Default: 4. If set to 0, omax = log(min(img.rows,img.cols)) / log(2) - 2 32 | int nsublevels; // Number of sublevels per octave level 33 | int img_width; // Width of the original image 34 | int img_height; // Height of the original image 35 | bool save_scale_space; // For saving scale space images 36 | bool verbosity; // Verbosity level 37 | std::vector evolution; // Vector of nonlinear diffusion evolution 38 | float kcontrast; // The contrast parameter for the scalar nonlinear diffusion 39 | float dthreshold; // Feature detector threshold response 40 | int diffusivity; // Diffusivity type, 0->PM G1, 1->PM G2, 2-> Weickert 41 | int descriptor_mode; // Descriptor mode 42 | bool use_upright; // Set to true in case we want to use the upright version of the descriptors 43 | bool use_extended; // Set to true in case we want to use the extended version of the descriptors 44 | 45 | // Vector of keypoint vectors for finding extrema in multiple threads 46 | std::vector > kpts_par; 47 | 48 | // Computation times variables in ms 49 | double tkcontrast; // Kcontrast factor computation 50 | double tnlscale; // Nonlinear Scale space generation 51 | double tdetector; // Feature detector 52 | double tmderivatives; // Multiscale derivatives computation 53 | double tdresponse; // Detector response computation 54 | double tdescriptor; // Feature descriptor 55 | double tsubpixel; // Subpixel refinement 56 | 57 | // Some auxiliary variables used in the AOS step 58 | cv::Mat Ltx, Lty, px, py, ax, ay, bx, by, qr, qc; 59 | 60 | public: 61 | 62 | // Constructor 63 | explicit KAZE(toptions &options); 64 | //KAZE(); 65 | 66 | void Allocate_Memory_Evolution(void); 67 | int Create_Nonlinear_Scale_Space(const cv::Mat &img); 68 | void Compute_KContrast(const cv::Mat &img, const float &kper); 69 | void Compute_Multiscale_Derivatives(void); 70 | 71 | // Feature Detection Methods 72 | void Compute_Detector_Response(void); 73 | void Feature_Detection(std::vector &kpts); 74 | void Determinant_Hessian_Parallel(std::vector &kpts); 75 | void Find_Extremum_Threading(int level); 76 | void Do_Subpixel_Refinement(std::vector &kpts); 77 | void Feature_Suppression_Distance(std::vector &kpts, float mdist); 78 | 79 | // AOS Methods 80 | void AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize); 81 | void AOS_Step_Scalar_Parallel(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize); 82 | void AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize); 83 | void AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize); 84 | void Thomas(cv::Mat a, cv::Mat b, cv::Mat Ld, cv::Mat x); 85 | 86 | // Methods for saving the scale space set of images and detector responses 87 | void Save_Nonlinear_Scale_Space(void); 88 | void Save_Detector_Responses(void); 89 | void Save_Flow_Responses(void); 90 | 91 | // Feature Description methods 92 | void Feature_Description(std::vector &kpts); 93 | void Compute_Main_Orientation_SURF(Ipoint &kpt); 94 | 95 | // Descriptor Mode -> 0 SURF 64 96 | void Get_SURF_Upright_Descriptor_64(Ipoint &kpt); 97 | void Get_SURF_Descriptor_64(Ipoint &kpt); 98 | 99 | // Descriptor Mode -> 0 SURF 128 100 | void Get_SURF_Upright_Descriptor_128(Ipoint &kpt); 101 | void Get_SURF_Descriptor_128(Ipoint &kpt); 102 | 103 | // Descriptor Mode -> 1 M-SURF 64 104 | void Get_MSURF_Upright_Descriptor_64(Ipoint &kpt); 105 | void Get_MSURF_Descriptor_64(Ipoint &kpt); 106 | 107 | // Descriptor Mode -> 1 M-SURF 128 108 | void Get_MSURF_Upright_Descriptor_128(Ipoint &kpt); 109 | void Get_MSURF_Descriptor_128(Ipoint &kpt); 110 | 111 | // Descriptor Mode -> 2 G-SURF 64 112 | void Get_GSURF_Upright_Descriptor_64(Ipoint &kpt); 113 | void Get_GSURF_Descriptor_64(Ipoint &kpt); 114 | 115 | // Descriptor Mode -> 2 G-SURF 128 116 | void Get_GSURF_Upright_Descriptor_128(Ipoint &kpt); 117 | void Get_GSURF_Descriptor_128(Ipoint &kpt); 118 | }; 119 | 120 | // Inline functions 121 | float Get_Angle(float X, float Y); 122 | float gaussian(float x, float y, float sig); 123 | void Check_Descriptor_Limits(int &x, int &y, int width, int height ); 124 | void Clipping_Descriptor(Ipoint &keypoint, int niter, float ratio); 125 | 126 | //************************************************************************************* 127 | //************************************************************************************* 128 | 129 | #endif 130 | -------------------------------------------------------------------------------- /KAZE/kaze_features.cpp: -------------------------------------------------------------------------------- 1 | /********************************************************************* 2 | * Software License Agreement (BSD License) 3 | * 4 | * Copyright (c) 2009, Willow Garage, Inc. 5 | * All rights reserved. 6 | * 7 | * Redistribution and use in source and binary forms, with or without 8 | * modification, are permitted provided that the following conditions 9 | * are met: 10 | * 11 | * * Redistributions of source code must retain the above copyright 12 | * notice, this list of conditions and the following disclaimer. 13 | * * Redistributions in binary form must reproduce the above 14 | * copyright notice, this list of conditions and the following 15 | * disclaimer in the documentation and/or other materials provided 16 | * with the distribution. 17 | * * Neither the name of the Willow Garage nor the names of its 18 | * contributors may be used to endorse or promote products derived 19 | * from this software without specific prior written permission. 20 | * 21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 29 | * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 31 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | * POSSIBILITY OF SUCH DAMAGE. 33 | *********************************************************************/ 34 | 35 | /** Authors: Ievgen Khvedchenia */ 36 | /** Update: 2013-03-28 by Yuhua Zou*/ 37 | 38 | #include 39 | #include "kaze_features.h" 40 | #include "kaze.h" 41 | 42 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 43 | 44 | #define DEGREE_TO_RADIAN(x) ((x) * CV_PI / 180.0) 45 | #define RADIAN_TO_DEGREE(x) ((x) * 180.0 / CV_PI) 46 | 47 | namespace cv 48 | { 49 | /*** 50 | * Convertions between cv::Keypoint and KAZE::Ipoint 51 | */ 52 | static inline void convertPoint(const cv::KeyPoint& kp, Ipoint& aux) 53 | { 54 | aux.xf = kp.pt.x; 55 | aux.yf = kp.pt.y; 56 | aux.x = fRound(aux.xf); 57 | aux.y = fRound(aux.yf); 58 | 59 | //cout << "SURF size: " << kpts_surf1_[i].size*.5 << endl; 60 | aux.octave = kp.octave; 61 | 62 | // Get the radius for visualization 63 | aux.scale = kp.size*.2; // Updated by Yuhua Zou 64 | aux.angle = DEGREE_TO_RADIAN(kp.angle); 65 | 66 | //aux.descriptor_size = 64; 67 | } 68 | 69 | static inline void convertPoint(const Ipoint& src, cv::KeyPoint& kp) 70 | { 71 | kp.pt.x = src.xf; 72 | kp.pt.y = src.yf; 73 | 74 | kp.angle = RADIAN_TO_DEGREE(src.angle); 75 | kp.response = src.dresponse; 76 | 77 | kp.octave = src.octave; 78 | kp.size = src.scale*5.0; // Updated by Yuhua Zou 79 | } 80 | 81 | /*** 82 | * Implementation of cv::KAZE 83 | */ 84 | KAZE::KAZE( int nfeatures /* = 1000 */, int noctaves /* = 2 */, 85 | int nlevels /* = 4 */, float detectorThreshold /* = 0.001 */, 86 | int diffusivityType /* = 1 */, int descriptorMode /* = 1 */, 87 | bool extendDescriptor /* = false */, bool uprightOrient /* = false */, 88 | bool verbosity /* = false */ ) 89 | { 90 | options.nfeatures = nfeatures; 91 | options.omax = noctaves; 92 | options.nsublevels = nlevels; 93 | options.dthreshold = detectorThreshold; 94 | options.diffusivity = diffusivityType; 95 | options.descriptor = descriptorMode; 96 | options.extended = extendDescriptor; 97 | options.upright = uprightOrient; 98 | options.verbosity = verbosity; 99 | } 100 | 101 | KAZE::KAZE(toptions &_options) 102 | { 103 | options = _options; 104 | } 105 | 106 | int KAZE::descriptorSize() const 107 | { 108 | return options.extended ? 128 : 64; 109 | } 110 | 111 | int KAZE::descriptorType() const 112 | { 113 | return CV_32F; 114 | } 115 | 116 | void KAZE::operator()(InputArray _image, InputArray _mask, vector& _keypoints, 117 | OutputArray _descriptors, bool useProvidedKeypoints) const 118 | { 119 | 120 | bool do_keypoints = !useProvidedKeypoints; 121 | bool do_descriptors = _descriptors.needed(); 122 | 123 | if( (!do_keypoints && !do_descriptors) || _image.empty() ) 124 | return; 125 | 126 | cv::Mat img1_8, img1_32; 127 | 128 | // Convert to gray scale iamge and float image 129 | if (_image.getMat().channels() == 3) 130 | cv::cvtColor(_image, img1_8, CV_RGB2GRAY); 131 | else 132 | _image.getMat().copyTo(img1_8); 133 | 134 | img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0); 135 | 136 | // Construct KAZE 137 | toptions opt = options; 138 | opt.img_width = img1_32.cols; 139 | opt.img_height = img1_32.rows; 140 | 141 | ::KAZE kazeEvolution(opt); 142 | 143 | // Create nonlinear scale space 144 | kazeEvolution.Create_Nonlinear_Scale_Space(img1_32); 145 | 146 | // Feature detection 147 | std::vector kazePoints; 148 | 149 | if (do_keypoints) 150 | { 151 | kazeEvolution.Feature_Detection(kazePoints); 152 | filterDuplicated(kazePoints); 153 | 154 | if (!_mask.empty()) 155 | { 156 | filterByPixelsMask(kazePoints, _mask.getMat()); 157 | } 158 | 159 | if (opt.nfeatures > 0) 160 | { 161 | filterRetainBest(kazePoints, opt.nfeatures); 162 | } 163 | 164 | } 165 | else 166 | { 167 | kazePoints.resize(_keypoints.size()); 168 | 169 | #pragma omp parallel for 170 | for (size_t i = 0; i < kazePoints.size(); i++) 171 | { 172 | convertPoint(_keypoints[i], kazePoints[i]); 173 | } 174 | } 175 | 176 | // Descriptor caculation 177 | if (do_descriptors) 178 | { 179 | kazeEvolution.Feature_Description(kazePoints); 180 | 181 | cv::Mat& descriptors = _descriptors.getMatRef(); 182 | descriptors.create(kazePoints.size(), descriptorSize(), descriptorType()); 183 | 184 | for (size_t i = 0; i < kazePoints.size(); i++) 185 | { 186 | std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data); 187 | } 188 | } 189 | 190 | // Transfer from KAZE::Ipoint to cv::KeyPoint 191 | if (do_keypoints) 192 | { 193 | _keypoints.resize(kazePoints.size()); 194 | 195 | #pragma omp parallel for 196 | for (size_t i = 0; i < kazePoints.size(); i++) 197 | { 198 | convertPoint(kazePoints[i], _keypoints[i]); 199 | } 200 | } 201 | 202 | } 203 | 204 | void KAZE::operator()(InputArray image, InputArray mask, vector& keypoints ) const 205 | { 206 | (*this)(image, mask, keypoints, noArray(), false); 207 | } 208 | 209 | void KAZE::operator()(InputArray image, vector& keypoints, OutputArray descriptors) const 210 | { 211 | (*this)(image, noArray(), keypoints, descriptors, false); 212 | } 213 | 214 | void KAZE::detectImpl( const Mat& image, vector& keypoints, const Mat& mask) const 215 | { 216 | (*this)(image, mask, keypoints, noArray(), false); 217 | } 218 | 219 | void KAZE::computeImpl( const Mat& image, vector& keypoints, Mat& descriptors) const 220 | { 221 | (*this)(image, Mat(), keypoints, descriptors, false); 222 | } 223 | 224 | CV_INIT_ALGORITHM(KAZE, "Feature2D.KAZE", 225 | obj.info()->addParam(obj, "nfeatures", obj.options.nfeatures); 226 | obj.info()->addParam(obj, "noctaves", obj.options.omax); 227 | obj.info()->addParam(obj, "nlevels", obj.options.nsublevels); 228 | obj.info()->addParam(obj, "detectorThreshold", obj.options.dthreshold); 229 | obj.info()->addParam(obj, "diffusivityType", obj.options.diffusivity); 230 | obj.info()->addParam(obj, "descriptorMode", obj.options.descriptor); 231 | obj.info()->addParam(obj, "extendDescriptor", obj.options.extended); 232 | obj.info()->addParam(obj, "uprightOrient", obj.options.upright); 233 | obj.info()->addParam(obj, "verbosity", obj.options.verbosity)); 234 | } 235 | -------------------------------------------------------------------------------- /KazeOpenCV.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * @file KazeOpenCV.cpp 3 | * @brief Sample code showing how to match images using KAZE features 4 | * @date March 28, 2013 5 | * @author Yuhua Zou (yuhuazou@gmail.com) 6 | */ 7 | 8 | #include "predep.h" 9 | 10 | #include "opencv2/imgproc/imgproc.hpp" 11 | #include "opencv2/highgui/highgui.hpp" 12 | #include "opencv2/calib3d/calib3d.hpp" 13 | 14 | // !! Please enable /openmp in your project configurations (in /C++/Language) in Visual Studio 15 | // If you have installed and included Boost in your project, 16 | // please set 'HAVE_BOOST_THREADING' to 1 in ./KAZE/kaze_config.h to enable Boost-based multi-threading 17 | #include "KAZE/kaze_features.h" 18 | 19 | #pragma comment( lib, cvLIB("core") ) 20 | #pragma comment( lib, cvLIB("imgproc") ) 21 | #pragma comment( lib, cvLIB("highgui") ) 22 | #pragma comment( lib, cvLIB("flann") ) 23 | #pragma comment( lib, cvLIB("features2d") ) 24 | #pragma comment( lib, cvLIB("calib3d") ) 25 | 26 | // Define 'USE_SIFT' to use SIFT keypoints instead of KAZE for comparation 27 | #define USE_SIFT0 28 | 29 | #ifdef USE_SIFT 30 | #include "opencv2/nonfree/features2d.hpp" 31 | #pragma comment( lib, cvLIB("nonfree") ) 32 | #endif 33 | 34 | 35 | using namespace cv; 36 | using namespace std; 37 | 38 | // @brief Show text in the upper left corner of the image 39 | void showText(cv::Mat& img, string text) 40 | { 41 | int fontFace = cv::FONT_HERSHEY_SIMPLEX; 42 | double fontScale = 1.5; 43 | int fontThickness = 3; 44 | 45 | // get text size 46 | int textBaseline=0; 47 | cv::Size textSize = cv::getTextSize(text, fontFace, 48 | fontScale, fontThickness, &textBaseline); 49 | textBaseline += fontThickness; 50 | 51 | // put the text at upper right corner 52 | //cv::Point textOrg((img.cols - textSize.width - 10), textSize.height + 10); 53 | cv::Point textOrg(10, textSize.height + 10); // upper left corner 54 | 55 | // draw the box 56 | rectangle(img, textOrg + cv::Point(0, textBaseline), 57 | textOrg + cv::Point(textSize.width, -textSize.height-10), 58 | cv::Scalar(50,50,50), -1); 59 | 60 | // then put the text itself 61 | putText(img, text, textOrg, fontFace, fontScale, 62 | cv::Scalar(0,0,255), fontThickness, 8); 63 | } 64 | 65 | // @brief Find homography and inliers 66 | bool findHomography( const vector& source, const vector& result, const vector& input, vector& inliers, cv::Mat& homography) 67 | { 68 | if (input.size() < 4) 69 | return false; 70 | 71 | const int pointsCount = input.size(); 72 | const float reprojectionThreshold = 3; 73 | 74 | //Prepare src and dst points 75 | std::vector srcPoints, dstPoints; 76 | for (int i = 0; i < pointsCount; i++) 77 | { 78 | srcPoints.push_back(source[input[i].queryIdx].pt); 79 | dstPoints.push_back(result[input[i].trainIdx].pt); 80 | } 81 | 82 | // Find homography using RANSAC algorithm 83 | std::vector status; 84 | homography = cv::findHomography(srcPoints, dstPoints, CV_FM_RANSAC, reprojectionThreshold, status); 85 | 86 | // Warp dstPoints to srcPoints domain using inverted homography transformation 87 | std::vector srcReprojected; 88 | cv::perspectiveTransform(dstPoints, srcReprojected, homography.inv()); 89 | 90 | // Pass only matches with low reprojection error (less than reprojectionThreshold value in pixels) 91 | inliers.clear(); 92 | for (int i = 0; i < pointsCount; i++) 93 | { 94 | cv::Point2f actual = srcPoints[i]; 95 | cv::Point2f expect = srcReprojected[i]; 96 | cv::Point2f v = actual - expect; 97 | float distanceSquared = v.dot(v); 98 | 99 | if (/*status[i] && */distanceSquared <= reprojectionThreshold * reprojectionThreshold) 100 | { 101 | inliers.push_back(input[i]); 102 | } 103 | } 104 | 105 | return inliers.size() >= 4; 106 | } 107 | 108 | // @brief Use BFMatcher to match descriptors 109 | void bfMatch( Mat& descriptors_1, Mat& descriptors_2, vector& good_matches, bool filterMatches = true ) 110 | { 111 | //-- Matching descriptor vectors using Brute-Force matcher 112 | cout << "--> Use BFMatcher..." << endl; 113 | BFMatcher matcher(cv::NORM_L2, true); 114 | vector< DMatch > matches; 115 | matcher.match( descriptors_1, descriptors_2, matches ); 116 | 117 | if (!filterMatches) 118 | { 119 | good_matches = matches; 120 | } 121 | else 122 | { 123 | double max_dist = 0, min_dist = 100, thresh = 0; 124 | 125 | //-- Quick calculation of max and min distances between keypoints 126 | for( int i = 0; i < matches.size(); i++ ) 127 | { 128 | double dist = matches[i].distance; 129 | if( dist < min_dist ) min_dist = dist; 130 | if( dist > max_dist ) max_dist = dist; 131 | } 132 | //thresh = MAX(2*min_dist, min_dist + 0.5*(max_dist - min_dist)); 133 | thresh = 2*min_dist; 134 | 135 | //-- Find initial good matches (i.e. whose distance is less than 2*min_dist ) 136 | for( int i = 0; i < matches.size(); i++ ) 137 | { 138 | if( matches[i].distance < thresh ) 139 | { 140 | good_matches.push_back( matches[i]); 141 | } 142 | } 143 | } 144 | } 145 | 146 | // @brief Use FlannBasedMatcher to match descriptors 147 | void flannMatch( Mat& descriptors_1, Mat& descriptors_2, vector& good_matches, bool filterMatches = true ) 148 | { 149 | cout << "--> Use FlannBasedMatcher..." << endl; 150 | FlannBasedMatcher matcher; 151 | vector< DMatch > matches; 152 | matcher.match( descriptors_1, descriptors_2, matches ); 153 | 154 | if (!filterMatches) 155 | { 156 | good_matches = matches; 157 | } 158 | else 159 | { 160 | double max_dist = 0, min_dist = 100, thresh = 0; 161 | 162 | //-- Quick calculation of max and min distances between keypoints 163 | for( int i = 0; i < matches.size(); i++ ) 164 | { 165 | double dist = matches[i].distance; 166 | if( dist < min_dist ) min_dist = dist; 167 | if( dist > max_dist ) max_dist = dist; 168 | } 169 | //thresh = MAX(2*min_dist, min_dist + 0.5*(max_dist - min_dist)); 170 | thresh = 2*min_dist; 171 | 172 | //-- Find initial good matches (i.e. whose distance is less than 2*min_dist ) 173 | for( int i = 0; i < matches.size(); i++ ) 174 | { 175 | if( matches[i].distance < thresh ) 176 | { 177 | good_matches.push_back( matches[i]); 178 | } 179 | } 180 | } 181 | } 182 | 183 | // @brief Use FlannBasedMatcher with knnMatch to match descriptors 184 | void knnMatch( Mat& descriptors_1, Mat& descriptors_2, vector& good_matches ) 185 | { 186 | cout << "--> Use knnMatch..." << endl; 187 | vector > knMatches; 188 | FlannBasedMatcher matcher; 189 | int k = 2; 190 | float maxRatio = 0.75; 191 | 192 | matcher.knnMatch(descriptors_1, descriptors_2, knMatches, k); 193 | 194 | good_matches.clear(); 195 | 196 | for (size_t i=0; i< knMatches.size(); i++) 197 | { 198 | const cv::DMatch& best = knMatches[i][0]; 199 | const cv::DMatch& good = knMatches[i][1]; 200 | 201 | //if (best.distance <= good.distance) continue; 202 | 203 | float ratio = (best.distance / good.distance); 204 | if (ratio <= maxRatio) 205 | { 206 | good_matches.push_back(best); 207 | } 208 | } 209 | } 210 | 211 | //////////////////////////////////////////////////////////////////////////////////// 212 | // @brief Main function 213 | int main(int argc, char** argv) 214 | { 215 | if (argc < 3) 216 | return 0; 217 | 218 | //////////////////////////////////////////////////////////////////////////////////// 219 | //-- Load object image 220 | Mat img_1 = imread(argv[1]); if (img_1.empty()) return -1; 221 | 222 | std::vector keypoints_1, keypoints_2; 223 | Mat descriptors_1, descriptors_2; 224 | bool doDrawKeypoint = true; 225 | float beta = 1; 226 | int nMatches = argc - 2; 227 | Mat imgMatches; 228 | int roiHeight = (int)(img_1.rows*beta); 229 | 230 | 231 | //-- Construct feature engine for object image 232 | #ifdef USE_SIFT 233 | cv::SiftFeatureDetector detector_1; 234 | cv::SiftDescriptorExtractor extractor_1; 235 | #else 236 | toptions opt; 237 | opt.omax = 2; 238 | //opt.nfeatures = 1000; 239 | //opt.verbosity = true; 240 | KAZE detector_1(opt), detector_2(opt); 241 | #endif 242 | 243 | double tkaze = 0.0; 244 | int64 t1 = cv::getTickCount(), t2 = 0; 245 | 246 | //-- Detect keypoints and calculate descriptors 247 | #ifdef USE_SIFT 248 | detector_1.detect(img_1, keypoints_1); 249 | extractor_1.compute(img_1,keypoints_1,descriptors_1); 250 | #else 251 | detector_1(img_1, keypoints_1, descriptors_1); 252 | #endif 253 | 254 | t2 = cv::getTickCount(); 255 | tkaze = 1000.0 * (t2 - t1) / cv::getTickFrequency(); 256 | 257 | cout << "\n-- Detection time (ms): " << tkaze << endl; 258 | printf("-- Keypoint number of img_1 : %d \n", keypoints_1.size() ); 259 | 260 | //return 0; 261 | 262 | //-- Draw Keypoints 263 | showText(img_1, "Image #1"); 264 | if (doDrawKeypoint) 265 | { 266 | drawKeypoints(img_1, keypoints_1, img_1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS); 267 | } 268 | 269 | //////////////////////////////////////////////////////////////////////////////////// 270 | for (int k = 2; k < argc; k++) 271 | { 272 | Mat img_2 = imread(argv[k]); if (img_2.empty()) continue; 273 | 274 | //////////////////////////////////////////////////////////////////////////////////// 275 | t1 = cv::getTickCount(); 276 | 277 | //-- Detect keypoints and calculate descriptors 278 | #ifdef USE_SIFT 279 | detector_2.detect(img_2, keypoints_2); 280 | extractor_2.compute(img_2,keypoints_2,descriptors_2); 281 | #else 282 | detector_2(img_2, keypoints_2, descriptors_2); 283 | #endif 284 | 285 | t2 = cv::getTickCount(); 286 | tkaze = 1000.0 * (t2 - t1) / cv::getTickFrequency(); 287 | 288 | cout << "\n-- Detection time (ms): " << tkaze << endl; 289 | printf("-- Keypoint number of img_2 : %d \n", keypoints_2.size() ); 290 | 291 | if (keypoints_1.size() < 4 || keypoints_2.size() < 4) 292 | continue; 293 | 294 | //////////////////////////////////////////////////////////////////////////////////// 295 | //-- Matching Keypoints 296 | cout << "-- Computing homography (RANSAC)..." << endl; 297 | vector matches, inliers; 298 | Mat H; 299 | bool filterMatches = true; 300 | 301 | bfMatch(descriptors_1, descriptors_2, matches, filterMatches); 302 | if (!::findHomography(keypoints_1, keypoints_2, matches, inliers, H)) 303 | { 304 | matches.clear(); 305 | flannMatch(descriptors_1, descriptors_2, matches, filterMatches); 306 | if (!::findHomography(keypoints_1, keypoints_2, matches, inliers, H)) 307 | { 308 | matches.clear(); 309 | knnMatch(descriptors_1, descriptors_2, matches); 310 | if (!::findHomography(keypoints_1, keypoints_2, matches, inliers, H)) 311 | { 312 | inliers.clear(); 313 | H = Mat(); 314 | } 315 | } 316 | } 317 | 318 | //////////////////////////////////////////////////////////////////////////////////// 319 | //-- Draw Keypoints 320 | char tiImg[20]; 321 | sprintf_s(tiImg, "Image #%d", k); 322 | showText(img_2, tiImg); 323 | if (doDrawKeypoint) 324 | { 325 | drawKeypoints(img_2, keypoints_2, img_2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS); 326 | } 327 | 328 | //-- Draw inliers 329 | Mat imgMatch; 330 | drawMatches( img_1, keypoints_1, img_2, keypoints_2, 331 | inliers, imgMatch, Scalar::all(-1), Scalar::all(-1), 332 | vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 333 | 334 | printf("-- Number of Matches : %d \n", matches.size() ); 335 | printf("-- Number of Inliers : %d \n", inliers.size() ); 336 | printf("-- Match rate : %f \n", inliers.size() / (float)matches.size() ); 337 | 338 | //-- Localize the object 339 | //-- Get the corners from the image_1 ( the object to be "detected" ) 340 | vector obj_corners; 341 | obj_corners.push_back( Point2f(0,0) ); 342 | obj_corners.push_back( Point2f(img_1.cols,0) ); 343 | obj_corners.push_back( Point2f(img_1.cols,img_1.rows) ); 344 | obj_corners.push_back( Point2f(0,img_1.rows) ); 345 | 346 | if (!H.empty()) 347 | { 348 | vector scene_corners; 349 | perspectiveTransform(obj_corners, scene_corners, H); 350 | 351 | //-- Draw lines between the corners (the mapped object in the scene - image_2 ) 352 | int npts = scene_corners.size(); 353 | for (int i=0; i imgMatches.cols) 384 | { 385 | flip(imgMatches.t(), imgMatches, 0); 386 | } 387 | 388 | // Show detected matches 389 | namedWindow("Matches",CV_WINDOW_NORMAL); 390 | imshow( "Matches", imgMatches ); 391 | 392 | waitKey(0); 393 | destroyAllWindows(); 394 | 395 | return 0; 396 | } 397 | -------------------------------------------------------------------------------- /KAZE/kaze_nldiffusion_functions.cpp: -------------------------------------------------------------------------------- 1 | 2 | //============================================================================= 3 | // 4 | // nldiffusion_functions.cpp 5 | // Author: Pablo F. Alcantarilla 6 | // Institution: University d'Auvergne 7 | // Address: Clermont Ferrand, France 8 | // Date: 27/12/2011 9 | // Email: pablofdezalc@gmail.com 10 | // 11 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla 12 | // All Rights Reserved 13 | // See LICENSE for the license information 14 | //============================================================================= 15 | 16 | /** 17 | * @file nldiffusion_functions.cpp 18 | * @brief Functions for non-linear diffusion applications: 19 | * 2D Gaussian Derivatives 20 | * Perona and Malik conductivity equations 21 | * Perona and Malik evolution 22 | * @date Dec 27, 2011 23 | * @author Pablo F. Alcantarilla 24 | * @update 2013-03-28 by Yuhua Zou 25 | */ 26 | 27 | #include "kaze_nldiffusion_functions.h" 28 | 29 | // Namespaces 30 | using namespace std; 31 | 32 | //************************************************************************************* 33 | //************************************************************************************* 34 | 35 | /** 36 | * @brief This function smoothes an image with a Gaussian kernel 37 | * @param src Input image 38 | * @param dst Output image 39 | * @param ksize_x Kernel size in X-direction (horizontal) 40 | * @param ksize_y Kernel size in Y-direction (vertical) 41 | * @param sigma Kernel standard deviation 42 | */ 43 | void Gaussian_2D_Convolution(const cv::Mat &src, cv::Mat &dst, unsigned int ksize_x, 44 | unsigned int ksize_y, float sigma) 45 | { 46 | // Compute an appropriate kernel size according to the specified sigma 47 | if( sigma > ksize_x || sigma > ksize_y || ksize_x == 0 || ksize_y == 0 ) 48 | { 49 | ksize_x = ceil(2.0*(1.0 + (sigma-0.8)/(0.3))); 50 | ksize_y = ksize_x; 51 | } 52 | 53 | // The kernel size must be and odd number 54 | if( (ksize_x % 2) == 0 ) 55 | { 56 | ksize_x += 1; 57 | } 58 | 59 | if( (ksize_y % 2) == 0 ) 60 | { 61 | ksize_y += 1; 62 | } 63 | 64 | // Perform the Gaussian Smoothing with border replication 65 | cv::GaussianBlur(src,dst,cv::Size(ksize_x,ksize_y),sigma,sigma,cv::BORDER_REPLICATE); 66 | 67 | } 68 | 69 | //************************************************************************************* 70 | //************************************************************************************* 71 | 72 | /** 73 | * @brief This function computes image derivatives with symmetric differences 74 | * @param src Input image 75 | * @param dst Output image 76 | * @param xorder Derivative order in X-direction (horizontal) 77 | * @param yorder Derivative order in Y-direction (vertical) 78 | */ 79 | void Image_Derivatives_SD(const cv::Mat &src, cv::Mat &dst, unsigned int xorder, unsigned int yorder) 80 | { 81 | unsigned int norder_x = xorder; 82 | unsigned int norder_y = yorder; 83 | int left = 0, right = 0, up = 0, down = 0; 84 | 85 | // Initialize the destination image 86 | dst = cv::Mat::zeros(dst.rows,dst.cols,CV_32F); 87 | 88 | // Create an auxiliary image 89 | cv::Mat aux(dst.rows,dst.cols,CV_32F); 90 | src.copyTo(aux); 91 | 92 | // Firstly compute derivatives in the x-axis (horizontal) 93 | while( norder_x != 0 ) 94 | { 95 | for( int i = 0; i < aux.rows; i++ ) 96 | { 97 | for( int j = 0; j < aux.cols; j++ ) 98 | { 99 | left = j-1; 100 | right = j+1; 101 | 102 | // Check the horizontal bounds 103 | if( left < 0 ) 104 | { 105 | left = 0; 106 | } 107 | 108 | if( right >= aux.cols) 109 | { 110 | right = aux.cols-1; 111 | } 112 | 113 | *(dst.ptr(i)+j) = 0.5*((*(aux.ptr(i)+right))-(*(aux.ptr(i)+left))); 114 | } 115 | } 116 | 117 | norder_x--; 118 | 119 | if( norder_x != 0 ) 120 | { 121 | dst.copyTo(aux); 122 | } 123 | } 124 | 125 | // Compute derivatives in the y-axis (vertical) 126 | while( norder_y != 0 ) 127 | { 128 | for( int i = 0; i < aux.cols; i++ ) 129 | { 130 | for( int j = 0; j < aux.rows; j++ ) 131 | { 132 | up = j-1; 133 | down = j+1; 134 | 135 | // Check the vertical bounds 136 | if( up < 0 ) 137 | { 138 | up = 0; 139 | } 140 | 141 | if( down >= aux.rows) 142 | { 143 | down = aux.rows-1; 144 | } 145 | 146 | *(dst.ptr(j)+i) = 0.5*((*(aux.ptr(down)+i))-(*(aux.ptr(up)+i))); 147 | } 148 | } 149 | 150 | norder_y--; 151 | 152 | if( norder_y != 0 ) 153 | { 154 | dst.copyTo(aux); 155 | } 156 | } 157 | } 158 | 159 | //************************************************************************************* 160 | //************************************************************************************* 161 | 162 | /** 163 | * @brief This function computes image derivatives with Scharr kernel 164 | * @param src Input image 165 | * @param dst Output image 166 | * @param xorder Derivative order in X-direction (horizontal) 167 | * @param yorder Derivative order in Y-direction (vertical) 168 | * @note Scharr operator approximates better rotation invariance than 169 | * other stencils such as Sobel. See Weickert and Scharr, 170 | * A Scheme for Coherence-Enhancing Diffusion Filtering with Optimized Rotation Invariance, 171 | * Journal of Visual Communication and Image Representation 2002 172 | */ 173 | void Image_Derivatives_Scharr(const cv::Mat &src, cv::Mat &dst, unsigned int xorder, unsigned int yorder) 174 | { 175 | // Compute Scharr filter 176 | cv::Scharr(src,dst,CV_32F,xorder,yorder,1,0,cv::BORDER_DEFAULT); 177 | } 178 | 179 | //************************************************************************************* 180 | //************************************************************************************* 181 | 182 | /** 183 | * @brief This function computes Gaussian image derivatives up to second order 184 | * @param src Input image 185 | * @param smooth Smoothed version of the input image 186 | * @param Lx First order image derivative in X-direction (horizontal) 187 | * @param Ly First order image derivative in Y-direction (vertical) 188 | * @param Lxy Second order cross image derivative 189 | * @param Lxx Second order image derivative in X-direction (horizontal) 190 | * @param Lyy Second order image derivative in Y-direction (vertical) 191 | * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel 192 | * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel 193 | * @param sigma Standard deviation of the Gaussian kernel 194 | */ 195 | void Compute_Gaussian_2D_Derivatives(const cv::Mat &src, cv::Mat &smooth,cv::Mat &Lx, cv::Mat &Ly, 196 | cv::Mat &Lxy, cv::Mat &Lxx, cv::Mat &Lyy, 197 | unsigned int ksize_x, unsigned int ksize_y, float sigma ) 198 | { 199 | // Firstly, convolve the original image with a Gaussian kernel 200 | Gaussian_2D_Convolution(src,smooth,ksize_x,ksize_y,sigma); 201 | 202 | Image_Derivatives_Scharr(src,Lx,1,0); 203 | Image_Derivatives_Scharr(src,Ly,0,1); 204 | Image_Derivatives_Scharr(Lx,Lxx,1,0); 205 | Image_Derivatives_Scharr(Ly,Lyy,0,1); 206 | Image_Derivatives_Scharr(Lx,Lxy,0,1); 207 | 208 | // In case we use natural coordinates 209 | if( use_natural_coordinates == true ) 210 | { 211 | Lx = Lx*sigma; 212 | Ly = Ly*sigma; 213 | Lxx = Lxx*sigma*sigma; 214 | Lyy = Lyy*sigma*sigma; 215 | Lxy = Lxy*sigma*sigma; 216 | } 217 | } 218 | 219 | //************************************************************************************* 220 | //************************************************************************************* 221 | 222 | /** 223 | * @brief This function computes the Perona and Malik conductivity coefficient g1 224 | * g1 = exp(-|dL|^2/k^2) 225 | * @param src Input image 226 | * @param dst Output image 227 | * @param Lx First order image derivative in X-direction (horizontal) 228 | * @param Ly First order image derivative in Y-direction (vertical) 229 | * @param k Contrast factor parameter 230 | */ 231 | void PM_G1(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k ) 232 | { 233 | //cv::exp(-(Lx.mul(Lx) + Ly.mul(Ly))/(k*k),dst); 234 | int N = Lx.rows * Lx.cols; 235 | float lx = 0.0, ly = 0.0, k2 = k*k; 236 | 237 | for (int i = 0; i < N; i++) 238 | { 239 | lx = *(Lx.ptr(0)+i); 240 | ly = *(Ly.ptr(0)+i); 241 | lx *= lx; 242 | ly *= ly; 243 | *(dst.ptr(0)+i) = std::exp( -(lx + ly)/k2 ); 244 | } 245 | 246 | } 247 | 248 | //************************************************************************************* 249 | //************************************************************************************* 250 | 251 | /** 252 | * @brief This function computes the Perona and Malik conductivity coefficient g2 253 | * g2 = 1 / (1 + dL^2 / k^2) 254 | * @param src Input image 255 | * @param dst Output image 256 | * @param Lx First order image derivative in X-direction (horizontal) 257 | * @param Ly First order image derivative in Y-direction (vertical) 258 | * @param k Contrast factor parameter 259 | */ 260 | void PM_G2(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k ) 261 | { 262 | //dst = 1./(1. + (Lx.mul(Lx) + Ly.mul(Ly))/(k*k)); 263 | int N = Lx.rows * Lx.cols; 264 | float lx = 0.0, ly = 0.0, k2 = k*k; 265 | 266 | for (int i = 0; i < N; i++) 267 | { 268 | lx = *(Lx.ptr(0)+i); 269 | ly = *(Ly.ptr(0)+i); 270 | lx *= lx; 271 | ly *= ly; 272 | *(dst.ptr(0)+i) = 1.0 / (1.0 + (lx + ly)/k2); 273 | } 274 | 275 | } 276 | 277 | //************************************************************************************* 278 | //************************************************************************************* 279 | 280 | /** 281 | * @brief This function computes Weickert conductivity coefficient g3 282 | * @param src Input image 283 | * @param dst Output image 284 | * @param Lx First order image derivative in X-direction (horizontal) 285 | * @param Ly First order image derivative in Y-direction (vertical) 286 | * @param k Contrast factor parameter 287 | * @note For more information check the following paper: J. Weickert 288 | * Applications of nonlinear diffusion in image processing and computer vision, 289 | * Proceedings of Algorithmy 2000 290 | */ 291 | void Weickert_Diffusivity(const cv::Mat &src, cv::Mat &dst, cv::Mat &Lx, cv::Mat &Ly, float k ) 292 | { 293 | //cv::Mat modg; 294 | //cv::pow((Lx.mul(Lx) + Ly.mul(Ly))/(k*k),4,modg); 295 | //cv::exp(-3.315/modg, dst); 296 | //dst = 1.0 - dst; 297 | 298 | int N = Lx.rows * Lx.cols; 299 | float lx2 = 0.0, ly2 = 0.0, modg = 0.0; 300 | const float k2 = k*k; 301 | 302 | for (int i = 0; i < N; i++) 303 | { 304 | lx2 = *(Lx.ptr(0)+i); 305 | ly2 = *(Ly.ptr(0)+i); 306 | lx2 *= lx2; 307 | ly2 *= ly2; 308 | modg = std::pow( (lx2 + ly2)/k2, 4 ); 309 | *(dst.ptr(0)+i) = 1.0 - std::exp( -3.315/modg ); 310 | } 311 | 312 | } 313 | 314 | //************************************************************************************* 315 | //************************************************************************************* 316 | 317 | /** 318 | * @brief This function computes a good empirical value for the k contrast factor 319 | * given an input image, the percentile (0-1), the gradient scale and the number of 320 | * bins in the histogram 321 | * @param img Input image 322 | * @param perc Percentile of the image gradient histogram (0-1) 323 | * @param gscale Scale for computing the image gradient histogram 324 | * @param nbins Number of histogram bins 325 | * @param ksize_x Kernel size in X-direction (horizontal) for the Gaussian smoothing kernel 326 | * @param ksize_y Kernel size in Y-direction (vertical) for the Gaussian smoothing kernel 327 | * @return k contrast factor 328 | * @note vectors are used to compute the histogram and thus improves efficiency (by Yuhua Zou) 329 | */ 330 | float Compute_K_Percentile(const cv::Mat &img, float perc, float gscale, unsigned int nbins, unsigned int ksize_x, unsigned int ksize_y) 331 | { 332 | float kperc = 0.0, modg = 0.0, lx = 0.0, ly = 0.0; 333 | unsigned int nbin = 0, nelements = 0, nthreshold = 0, k = 0; 334 | float hmax = 0.0; // maximum gradient 335 | int npoints = 0.0; // number of points of which gradient greater than zero 336 | 337 | // Create the array for the histogram 338 | std::vector hist(nbins,0); 339 | std::vector Mo; 340 | 341 | // Create the matrices 342 | cv::Mat gaussian = cv::Mat::zeros(img.rows,img.cols,CV_32F); 343 | cv::Mat Lx = cv::Mat::zeros(img.rows,img.cols,CV_32F); 344 | cv::Mat Ly = cv::Mat::zeros(img.rows,img.cols,CV_32F); 345 | 346 | // Perform the Gaussian convolution 347 | Gaussian_2D_Convolution(img,gaussian,ksize_x,ksize_y,gscale); 348 | 349 | // Compute the Gaussian derivatives Lx and Ly 350 | Image_Derivatives_Scharr(gaussian,Lx,1,0); 351 | Image_Derivatives_Scharr(gaussian,Ly,0,1); 352 | 353 | // Get the maximum 354 | cv::Mat Lx1 = Lx.rowRange(1,Lx.rows-1).colRange(1,Lx.cols-1); 355 | cv::Mat Ly1 = Ly.rowRange(1,Ly.rows-1).colRange(1,Ly.cols-1); 356 | int N = Lx1.rows*Lx1.cols; 357 | 358 | for( int j = 0; j < N; j++ ) 359 | { 360 | lx = *(Lx.ptr(0)+j); 361 | ly = *(Ly.ptr(0)+j); 362 | if (!lx && !ly) 363 | continue; 364 | 365 | modg = sqrt(lx*lx + ly*ly); 366 | 367 | Mo.push_back(modg); 368 | } 369 | 370 | hmax = *std::max_element(Mo.begin(), Mo.end()); 371 | 372 | // Compute the histogram 373 | float hmax1 = 1.00001*hmax; 374 | npoints = Mo.size(); 375 | 376 | for (int i = 0; i < npoints; i++) 377 | { 378 | nbin = floor(nbins*(Mo[i]/hmax1)); 379 | 380 | hist[nbin]++; 381 | } 382 | 383 | // Now find the perc of the histogram percentile 384 | nthreshold = (unsigned int)(npoints*perc); 385 | 386 | // find the bin (k) in which accumulated points are greater than 70% (perc) of total valid points (npoints) 387 | for( k = 0; nelements < nthreshold && k < nbins; k++) 388 | { 389 | nelements = nelements + hist[k]; 390 | } 391 | 392 | if( nelements < nthreshold ) 393 | { 394 | kperc = 0.03; 395 | } 396 | else 397 | { 398 | kperc = hmax*(k/(float)nbins); 399 | } 400 | 401 | return kperc; 402 | } 403 | 404 | //************************************************************************************* 405 | //************************************************************************************* 406 | 407 | /** 408 | * @brief This function computes Scharr image derivatives 409 | * @param src Input image 410 | * @param dst Output image 411 | * @param xorder Derivative order in X-direction (horizontal) 412 | * @param yorder Derivative order in Y-direction (vertical) 413 | * @param scale Scale factor or derivative size 414 | * @note the if block for border check has been replaced by two index mapping vectors 415 | * to improve efficiency (by Yuhua Zou) 416 | */ 417 | 418 | void Compute_Scharr_Derivatives(const cv::Mat &src, cv::Mat &dst, int xorder, int yorder, int scale ) 419 | { 420 | int a_i = 0, b_i = 0, c_i = 0, d_i = 0, e_i = 0, f_i = 0; 421 | int a_j = 0, b_j = 0, c_j = 0, d_j = 0, e_j = 0, f_j = 0; 422 | float sum_pos = 0.0, sum_neg = 0.0, w = 0.0, norm = 0.0; 423 | 424 | // Values for the Scharr kernel 425 | w = 10.0/3.0; 426 | norm = 1.0/(2.0*scale*(w+2.0)); 427 | 428 | // Build reflect-border index map 429 | int rows = src.rows, cols = src.cols; 430 | vector imap(rows+2*scale,0), jmap(cols+2*scale,0); 431 | for ( int i = 0; i < scale; i++ ) 432 | imap[i] = scale-i; 433 | for ( int i = scale; i < rows+scale; i++ ) 434 | imap[i] = i-scale; 435 | for ( int i = rows+scale; i < imap.size(); i++ ) 436 | imap[i] = rows-(i-rows-scale)-1; 437 | for ( int i = 0; i < scale; i++ ) 438 | jmap[i] = scale-i; 439 | for ( int i = scale; i < cols+scale; i++ ) 440 | jmap[i] = i-scale; 441 | for ( int i = cols+scale; i < jmap.size(); i++ ) 442 | jmap[i] = cols-(i-cols-scale)-1; 443 | 444 | // Horizontal derivative 445 | // Lx = (1/(2*scale))*(L(i,j+scale)-L(i,j-scale)) 446 | if( xorder == 1 && yorder == 0 ) 447 | { 448 | for( int i = 0; i < rows; i++ ) 449 | { 450 | for( int j = 0; j < cols; j++ ) 451 | { 452 | sum_pos = sum_neg = 0.0; 453 | a_i = imap[i]; a_j = jmap[j]; 454 | b_i = imap[i]; b_j = jmap[j+scale+scale]; 455 | c_i = imap[i+scale]; c_j = jmap[j]; 456 | d_i = imap[i+scale]; d_j = jmap[j+scale+scale]; 457 | e_i = imap[i+scale+scale]; e_j = jmap[j]; 458 | f_i = imap[i+scale+scale]; f_j = jmap[j+scale+scale]; 459 | 460 | sum_pos += w*(*(src.ptr(d_i)+d_j)); 461 | sum_pos += (*(src.ptr(b_i)+b_j)); 462 | sum_pos += (*(src.ptr(f_i)+f_j)); 463 | 464 | sum_neg += w*(*(src.ptr(c_i)+c_j)); 465 | sum_neg += (*(src.ptr(a_i)+a_j)); 466 | sum_neg += (*(src.ptr(e_i)+e_j)); 467 | 468 | *(dst.ptr(i)+j) = norm*(sum_pos - sum_neg); 469 | } 470 | } 471 | } 472 | // Vertical derivative 473 | // Ly = (1/(2*scale))*(L(i+scale,j)-L(i-scale,j)) 474 | else if( xorder == 0 && yorder == 1 ) 475 | { 476 | for( int j = 0; j < cols; j++ ) 477 | { 478 | for( int i = 0; i < rows; i++ ) 479 | { 480 | sum_pos = sum_neg = 0.0; 481 | a_i = imap[i]; a_j = jmap[j]; 482 | b_i = imap[i+scale+scale]; b_j = jmap[j]; 483 | c_i = imap[i]; c_j = jmap[j+scale]; 484 | d_i = imap[i+scale+scale]; d_j = jmap[j+scale]; 485 | e_i = imap[i]; e_j = jmap[j+scale+scale]; 486 | f_i = imap[i+scale+scale]; f_j = jmap[j+scale+scale]; 487 | 488 | sum_pos += w*(*(src.ptr(d_i)+d_j)); 489 | sum_pos += (*(src.ptr(b_i)+b_j)); 490 | sum_pos += (*(src.ptr(f_i)+f_j)); 491 | 492 | sum_neg += w*(*(src.ptr(c_i)+c_j)); 493 | sum_neg += (*(src.ptr(a_i)+a_j)); 494 | sum_neg += (*(src.ptr(e_i)+e_j)); 495 | 496 | *(dst.ptr(i)+j) = norm*(sum_pos - sum_neg); 497 | } 498 | } 499 | } 500 | } 501 | 502 | //************************************************************************************* 503 | //************************************************************************************* 504 | 505 | /** 506 | * @brief This function performs a scalar non-linear diffusion step 507 | * @param Ld2 Output image in the evolution 508 | * @param c Conductivity image 509 | * @param Ld1 Previous image in the evolution 510 | * @param stepsize The step size in time units 511 | * @note Forward Euler Scheme 3x3 stencil 512 | * The function c is a scalar value that depends on the gradient norm 513 | * dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy 514 | */ 515 | void NLD_Step_Scalar(cv::Mat &Ld2, const cv::Mat &Ld1, const cv::Mat &c, float stepsize) 516 | { 517 | // Auxiliary variables 518 | float xpos = 0.0, xneg = 0.0, ypos = 0.0, yneg = 0.0; 519 | int ipos = 0, ineg = 0, jpos = 0, jneg = 0; 520 | 521 | for( int i = 0; i < Ld2.rows; i++ ) 522 | { 523 | for( int j = 0; j < Ld2.cols; j++ ) 524 | { 525 | ineg = i-1; 526 | ipos = i+1; 527 | jneg = j-1; 528 | jpos = j+1; 529 | 530 | if( ineg < 0 ) ineg = 0; 531 | if( ipos >= Ld2.rows ) ipos = Ld2.rows-1; 532 | if( jneg < 0 ) jneg = 0; 533 | if( jpos >= Ld2.cols ) jpos = Ld2.cols-1; 534 | 535 | xpos = ((*(c.ptr(i)+j))+(*(c.ptr(i)+jpos)))*((*(Ld1.ptr(i)+jpos))-(*(Ld1.ptr(i)+j))); 536 | xneg = ((*(c.ptr(i)+jneg))+(*(c.ptr(i)+j)))*((*(Ld1.ptr(i)+j))-(*(Ld1.ptr(i)+jneg))); 537 | 538 | ypos = ((*(c.ptr(i)+j))+(*(c.ptr(ipos)+j)))*((*(Ld1.ptr(ipos)+j))-(*(Ld1.ptr(i)+j))); 539 | yneg = ((*(c.ptr(ineg)+j))+(*(c.ptr(i)+j)))*((*(Ld1.ptr(i)+j))-(*(Ld1.ptr(ineg)+j))); 540 | 541 | *(Ld2.ptr(i)+j) = *(Ld1.ptr(i)+j) + 0.5*stepsize*(xpos-xneg+ypos-yneg); 542 | } 543 | } 544 | } 545 | 546 | //************************************************************************************* 547 | //************************************************************************************* 548 | 549 | /** 550 | * @brief This function checks if a given pixel is a maximum in a local neighbourhood 551 | * @param img Input image where we will perform the maximum search 552 | * @param dsize Half size of the neighbourhood 553 | * @param value Response value at (x,y) position 554 | * @param row Image row coordinate 555 | * @param col Image column coordinate 556 | * @param same_img Flag to indicate if the image value at (x,y) is in the input image 557 | * @return 1->is maximum, 0->otherwise 558 | */ 559 | bool Check_Maximum_Neighbourhood(cv::Mat &img, int dsize, float value, int row, int col, bool same_img ) 560 | { 561 | bool response = true; 562 | 563 | for( int i = row-dsize; i <= row+dsize; i++ ) 564 | { 565 | for( int j = col-dsize; j <= col+dsize; j++ ) 566 | { 567 | if( i >= 0 && i < img.rows && j >= 0 && j < img.cols ) 568 | { 569 | if( same_img == true ) 570 | { 571 | if( i != row || j != col ) 572 | { 573 | if( (*(img.ptr(i)+j)) > value ) 574 | { 575 | response = false; 576 | return response; 577 | } 578 | } 579 | } 580 | else 581 | { 582 | if( (*(img.ptr(i)+j)) > value ) 583 | { 584 | response = false; 585 | return response; 586 | } 587 | } 588 | } 589 | } 590 | } 591 | 592 | return response; 593 | } 594 | 595 | //************************************************************************************* 596 | //************************************************************************************* 597 | 598 | /** 599 | * @brief This function checks if a given pixel is a minimum in a local neighbourhood 600 | * @param img Input image where we will perform the minimum search 601 | * @param dsize Half size of the neighbourhood 602 | * @param value Response value at (x,y) position 603 | * @param row Image row coordinate 604 | * @param col Image column coordinate 605 | * @param same_img Flag to indicate if the image value at (x,y) is in the input image 606 | * @return 1->is a minimum, 0->otherwise 607 | */ 608 | bool Check_Minimum_Neighbourhood(cv::Mat &img, int dsize, float value, int row, int col, bool same_img ) 609 | { 610 | bool response = true; 611 | 612 | for( int i = row-dsize; i <= row+dsize; i++ ) 613 | { 614 | for( int j = col-dsize; j <= col+dsize; j++ ) 615 | { 616 | if( i >= 0 && i < img.rows && j >= 0 && j < img.cols ) 617 | { 618 | if( same_img == true ) 619 | { 620 | if( i != row || j != col ) 621 | { 622 | if( (*(img.ptr(i)+j)) <= value ) 623 | { 624 | response = false; 625 | return response; 626 | } 627 | } 628 | } 629 | else 630 | { 631 | if( (*(img.ptr(i)+j)) <= value ) 632 | { 633 | response = false; 634 | return response; 635 | } 636 | } 637 | } 638 | } 639 | } 640 | 641 | return response; 642 | } 643 | -------------------------------------------------------------------------------- /KAZE/kaze.cpp: -------------------------------------------------------------------------------- 1 | 2 | //============================================================================= 3 | // 4 | // KAZE.cpp 5 | // Author: Pablo F. Alcantarilla 6 | // Institution: University d'Auvergne 7 | // Address: Clermont Ferrand, France 8 | // Date: 21/01/2012 9 | // Email: pablofdezalc@gmail.com 10 | // 11 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla 12 | // All Rights Reserved 13 | // See LICENSE for the license information 14 | //============================================================================= 15 | 16 | /** 17 | * @file KAZE.cpp 18 | * @brief Main class for detecting and describing features in a nonlinear 19 | * scale space 20 | * @date Jan 21, 2012 21 | * @author Pablo F. Alcantarilla 22 | * @update 2013-03-28 by Yuhua Zou 23 | * Code optimization has been implemented via using 24 | * OpenMP and Boost Thread for multi-threading, 25 | * changing the way to access matrix elements, etc. 26 | */ 27 | 28 | #include "kaze.h" 29 | #include "kaze_config.h" 30 | #include 31 | #include 32 | 33 | #if HAVE_BOOST_THREADING 34 | #include 35 | #include 36 | #include 37 | #endif 38 | 39 | // Namespaces 40 | using namespace std; 41 | 42 | /** 43 | * @brief KAZE default constructor 44 | * @note The constructor does not allocate memory for the nonlinear scale space 45 | */ 46 | //KAZE::KAZE(void) 47 | //{ 48 | // soffset = DEFAULT_SCALE_OFFSET; 49 | // sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES; 50 | // omax = DEFAULT_OCTAVE_MAX; 51 | // nsublevels = DEFAULT_NSUBLEVELS; 52 | // save_scale_space = DEFAULT_SAVE_SCALE_SPACE; 53 | // verbosity = DEFAULT_VERBOSITY; 54 | // kcontrast = DEFAULT_KCONTRAST; 55 | // descriptor_mode = DEFAULT_DESCRIPTOR_MODE; 56 | // use_upright = DEFAULT_UPRIGHT; 57 | // use_extended = DEFAULT_EXTENDED; 58 | // diffusivity = DEFAULT_DIFFUSIVITY_TYPE; 59 | // tkcontrast = 0.0; 60 | // tnlscale = 0.0; 61 | // tdetector = 0.0; 62 | // tmderivatives = 0.0; 63 | // tdescriptor = 0.0; 64 | // tsubpixel = 0.0; 65 | // img_width = 0; 66 | // img_height = 0; 67 | //} 68 | 69 | //******************************************************************************* 70 | //******************************************************************************* 71 | 72 | /** 73 | * @brief KAZE constructor with input options 74 | * @param options KAZE configuration options 75 | * @note The constructor allocates memory for the nonlinear scale space 76 | */ 77 | KAZE::KAZE(toptions &options) 78 | { 79 | img_width = options.img_width; 80 | img_height = options.img_height; 81 | soffset = options.soffset; 82 | sderivatives = options.sderivatives; 83 | omax = options.omax; 84 | nsublevels = options.nsublevels; 85 | save_scale_space = options.save_scale_space; 86 | verbosity = options.verbosity; 87 | dthreshold = options.dthreshold; 88 | diffusivity = options.diffusivity; 89 | descriptor_mode = options.descriptor; 90 | use_upright = options.upright; 91 | use_extended = options.extended; 92 | 93 | kcontrast = DEFAULT_KCONTRAST; 94 | tkcontrast = 0.0; 95 | tnlscale = 0.0; 96 | tdetector = 0.0; 97 | tmderivatives = 0.0; 98 | tdresponse = 0.0; 99 | tdescriptor = 0.0; 100 | 101 | omax = options.omax > 0 ? options.omax : cvRound(std::log( (double)std::min( options.img_height, options.img_width ) ) / std::log(2.) - 2); 102 | //dthreshold = DEFAULT_DETECTOR_THRESHOLD + floorf( img_width/256.0f ) * 0.0015; 103 | 104 | // Now allocate memory for the evolution 105 | Allocate_Memory_Evolution(); 106 | } 107 | 108 | //******************************************************************************* 109 | //******************************************************************************* 110 | 111 | /** 112 | * @brief This method allocates the memory for the nonlinear diffusion evolution 113 | */ 114 | void KAZE::Allocate_Memory_Evolution(void) 115 | { 116 | // Allocate the dimension of the matrices for the evolution 117 | for( int i = 0; i <= omax-1; i++ ) 118 | { 119 | for( int j = 0; j <= nsublevels-1; j++ ) 120 | { 121 | tevolution aux; 122 | aux.Lx = cv::Mat::zeros(img_height,img_width,CV_32F); 123 | aux.Ly = cv::Mat::zeros(img_height,img_width,CV_32F); 124 | 125 | aux.Lxx = cv::Mat::zeros(img_height,img_width,CV_32F); 126 | aux.Lxy = cv::Mat::zeros(img_height,img_width,CV_32F); 127 | aux.Lyy = cv::Mat::zeros(img_height,img_width,CV_32F); 128 | aux.Lflow = cv::Mat::zeros(img_height,img_width,CV_32F); 129 | aux.Lt = cv::Mat::zeros(img_height,img_width,CV_32F); 130 | aux.Lsmooth = cv::Mat::zeros(img_height,img_width,CV_32F); 131 | aux.Lstep = cv::Mat::zeros(img_height,img_width,CV_32F); 132 | aux.Ldet = cv::Mat::zeros(img_height,img_width,CV_32F); 133 | 134 | aux.esigma = soffset*pow((float)2.0,(float)(j)/(float)(nsublevels) + i); 135 | aux.etime = 0.5*(aux.esigma*aux.esigma); 136 | aux.sigma_size = fRound(aux.esigma); 137 | 138 | aux.octave = i; 139 | aux.sublevel = j; 140 | evolution.push_back(aux); 141 | } 142 | } 143 | 144 | // Allocate memory for the auxiliary variables that are used in the AOS scheme 145 | Ltx = cv::Mat::zeros(img_width,img_height,CV_32F); 146 | Lty = cv::Mat::zeros(img_height,img_width,CV_32F); 147 | px = cv::Mat::zeros(img_height,img_width,CV_32F); 148 | py = cv::Mat::zeros(img_height,img_width,CV_32F); 149 | ax = cv::Mat::zeros(img_height,img_width,CV_32F); 150 | ay = cv::Mat::zeros(img_height,img_width,CV_32F); 151 | bx = cv::Mat::zeros(img_height-1,img_width,CV_32F); 152 | by = cv::Mat::zeros(img_height-1,img_width,CV_32F); 153 | qr = cv::Mat::zeros(img_height-1,img_width,CV_32F); 154 | qc = cv::Mat::zeros(img_height,img_width-1,CV_32F); 155 | 156 | } 157 | 158 | //******************************************************************************* 159 | //******************************************************************************* 160 | 161 | /** 162 | * @brief This method creates the nonlinear scale space for a given image 163 | * @param img Input image for which the nonlinear scale space needs to be created 164 | * @return 0 if the nonlinear scale space was created successfully. -1 otherwise 165 | */ 166 | int KAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img) 167 | { 168 | if( verbosity == true ) 169 | { 170 | std::cout << "\n> Creating nonlinear scale space." << std::endl; 171 | } 172 | 173 | double t2 = 0.0, t1 = 0.0; 174 | 175 | if( evolution.size() == 0 ) 176 | { 177 | std::cout << "---> Error generating the nonlinear scale space!!" << std::endl; 178 | std::cout << "---> Firstly you need to call KAZE::Allocate_Memory_Evolution()" << std::endl; 179 | return -1; 180 | } 181 | 182 | int64 start_t1 = cv::getTickCount(); 183 | 184 | // Copy the original image to the first level of the evolution 185 | if( verbosity == true ) 186 | { 187 | std::cout << "-> Perform the Gaussian smoothing." << std::endl; 188 | } 189 | 190 | img.copyTo(evolution[0].Lt); 191 | Gaussian_2D_Convolution(evolution[0].Lt,evolution[0].Lt,0,0,soffset); 192 | Gaussian_2D_Convolution(evolution[0].Lt,evolution[0].Lsmooth,0,0,sderivatives); 193 | 194 | // Firstly compute the kcontrast factor 195 | Compute_KContrast(evolution[0].Lt,KCONTRAST_PERCENTILE); 196 | 197 | t2 = cv::getTickCount(); 198 | tkcontrast = 1000.0 * (t2 - start_t1) / cv::getTickFrequency(); 199 | 200 | if( verbosity == true ) 201 | { 202 | std::cout << "-> Computed K-contrast factor. Execution time (ms): " << tkcontrast << std::endl; 203 | std::cout << "-> Now computing the nonlinear scale space!!" << std::endl; 204 | } 205 | 206 | // Now generate the rest of evolution levels 207 | for( unsigned int i = 1; i < evolution.size(); i++ ) 208 | { 209 | Gaussian_2D_Convolution(evolution[i-1].Lt,evolution[i].Lsmooth,0,0,sderivatives); 210 | 211 | // Compute the Gaussian derivatives Lx and Ly 212 | Image_Derivatives_Scharr(evolution[i].Lsmooth,evolution[i].Lx,1,0); 213 | Image_Derivatives_Scharr(evolution[i].Lsmooth,evolution[i].Ly,0,1); 214 | 215 | // Compute the conductivity equation 216 | if( diffusivity == 0 ) 217 | { 218 | PM_G1(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast); 219 | } 220 | else if( diffusivity == 1 ) 221 | { 222 | PM_G2(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast); 223 | } 224 | else if( diffusivity == 2 ) 225 | { 226 | Weickert_Diffusivity(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast); 227 | } 228 | 229 | // Perform the evolution step with AOS 230 | #if HAVE_BOOST_THREADING 231 | AOS_Step_Scalar_Parallel(evolution[i].Lt,evolution[i-1].Lt,evolution[i].Lflow,evolution[i].etime-evolution[i-1].etime); 232 | #else 233 | AOS_Step_Scalar(evolution[i].Lt,evolution[i-1].Lt,evolution[i].Lflow,evolution[i].etime-evolution[i-1].etime); 234 | #endif 235 | 236 | if( verbosity == true ) 237 | { 238 | std::cout << "--> Computed image evolution step " << i << " Evolution time: " << evolution[i].etime << 239 | " Sigma: " << evolution[i].esigma << std::endl; 240 | } 241 | } 242 | 243 | 244 | t2 = cv::getTickCount(); 245 | tnlscale = 1000.0*(t2-start_t1) / cv::getTickFrequency(); 246 | 247 | if( verbosity == true ) 248 | { 249 | std::cout << "> Computed the nonlinear scale space. Execution time (ms): " << tnlscale << std::endl; 250 | } 251 | 252 | return 0; 253 | } 254 | 255 | //************************************************************************************* 256 | //************************************************************************************* 257 | 258 | /** 259 | * @brief This method computes the k contrast factor 260 | * @param img Input image 261 | * @param kpercentile Percentile of the gradient histogram 262 | */ 263 | void KAZE::Compute_KContrast(const cv::Mat &img, const float &kpercentile) 264 | { 265 | if( verbosity == true ) 266 | { 267 | std::cout << "-> Computing Kcontrast factor." << std::endl; 268 | } 269 | 270 | if( COMPUTE_KCONTRAST == true ) 271 | { 272 | kcontrast = Compute_K_Percentile(img,kpercentile,sderivatives,KCONTRAST_NBINS,0,0); 273 | } 274 | 275 | if( verbosity == true ) 276 | { 277 | std::cout << "--> kcontrast = " << kcontrast << std::endl; 278 | } 279 | } 280 | 281 | //************************************************************************************* 282 | //************************************************************************************* 283 | 284 | /** 285 | * @brief This method computes the multiscale derivatives for the nonlinear scale space 286 | */ 287 | void KAZE::Compute_Multiscale_Derivatives(void) 288 | { 289 | int64 t1 = cv::getTickCount(); 290 | 291 | int N = img_width * img_height; 292 | 293 | #pragma omp parallel for 294 | for( int i = 0; i < evolution.size(); i++ ) 295 | { 296 | if( verbosity == true ) 297 | { 298 | std::cout << "--> Multiscale derivatives. Sigma ("<< i <<"): " << evolution[i].sigma_size << ". Thread: " << omp_get_thread_num() << std::endl; 299 | } 300 | 301 | // Compute multiscale derivatives for the detector 302 | Compute_Scharr_Derivatives(evolution[i].Lsmooth,evolution[i].Lx,1,0,evolution[i].sigma_size); 303 | Compute_Scharr_Derivatives(evolution[i].Lsmooth,evolution[i].Ly,0,1,evolution[i].sigma_size); 304 | Compute_Scharr_Derivatives(evolution[i].Lx,evolution[i].Lxx,1,0,evolution[i].sigma_size); 305 | Compute_Scharr_Derivatives(evolution[i].Ly,evolution[i].Lyy,0,1,evolution[i].sigma_size); 306 | Compute_Scharr_Derivatives(evolution[i].Lx,evolution[i].Lxy,0,1,evolution[i].sigma_size); 307 | 308 | int esigma = evolution[i].sigma_size, esigma2 = esigma*esigma; 309 | for ( int j = 0; j < N; j++ ) 310 | { 311 | *( evolution[i].Lx.ptr(0)+j ) *= esigma; 312 | *( evolution[i].Ly.ptr(0)+j ) *= esigma; 313 | *( evolution[i].Lxx.ptr(0)+j ) *= esigma2; 314 | *( evolution[i].Lxy.ptr(0)+j ) *= esigma2; 315 | *( evolution[i].Lyy.ptr(0)+j ) *= esigma2; 316 | } 317 | } 318 | 319 | int64 t2 = cv::getTickCount(); 320 | tmderivatives = 1000.0 * (t2-t1) / cv::getTickFrequency(); 321 | 322 | } 323 | 324 | //************************************************************************************* 325 | //************************************************************************************* 326 | 327 | /** 328 | * @brief This method computes the feature detector response for the nonlinear scale space 329 | * @note We use the Hessian determinant as feature detector 330 | */ 331 | void KAZE::Compute_Detector_Response(void) 332 | { 333 | float lxx = 0.0, lxy = 0.0, lyy = 0.0; 334 | float *ptr; 335 | 336 | int64 t1 = cv::getTickCount(), t2 = 0; 337 | 338 | // Firstly compute the multiscale derivatives 339 | Compute_Multiscale_Derivatives(); 340 | 341 | t2 = cv::getTickCount(); 342 | tdresponse = 1000.0 * (t2-t1) / cv::getTickFrequency(); 343 | if( verbosity == true ) 344 | { 345 | std::cout << "-> Computed multiscale derivatives. Execution time (ms): " << tdresponse << std::endl; 346 | } 347 | t1 = cv::getTickCount(); 348 | 349 | int N = img_width * img_height; 350 | for( int i = 0; i < evolution.size(); i++ ) 351 | { 352 | for( int jx = 0; jx < N; jx++ ) 353 | { 354 | // Get values of lxx,lxy,and lyy 355 | ptr = evolution[i].Lxx.ptr(0); 356 | lxx = ptr[jx]; 357 | 358 | ptr = evolution[i].Lxy.ptr(0); 359 | lxy = ptr[jx]; 360 | 361 | ptr = evolution[i].Lyy.ptr(0); 362 | lyy = ptr[jx]; 363 | 364 | // Compute ldet 365 | ptr = evolution[i].Ldet.ptr(0); 366 | ptr[jx] = (lxx*lyy-lxy*lxy); 367 | } 368 | } 369 | 370 | t2 = cv::getTickCount(); 371 | tdresponse = 1000.0 * (t2-t1) / cv::getTickFrequency(); 372 | if( verbosity == true ) 373 | { 374 | std::cout << "-> Computed Hessian determinant. Execution time (ms): " << tdresponse << std::endl; 375 | } 376 | } 377 | 378 | //************************************************************************************* 379 | //************************************************************************************* 380 | 381 | /** 382 | * @brief This method selects interesting keypoints through the nonlinear scale space 383 | */ 384 | void KAZE::Feature_Detection(std::vector &kpts) 385 | { 386 | if( verbosity == true ) 387 | { 388 | std::cout << "\n> Detecting features. " << std::endl; 389 | } 390 | int64 t1 = cv::getTickCount(), t2 = 0; 391 | 392 | // Firstly compute the detector response for each pixel and scale level 393 | Compute_Detector_Response(); 394 | 395 | t2 = cv::getTickCount(); 396 | double tresponse = 1000.0 * (t2-t1) / cv::getTickFrequency(); 397 | if( verbosity == true ) 398 | { 399 | std::cout << "-> Computed detector response. Execution time (ms):" << tresponse << std::endl; 400 | } 401 | int64 t13 = cv::getTickCount(); 402 | 403 | // Find scale space extrema 404 | Determinant_Hessian_Parallel(kpts); 405 | 406 | t2 = cv::getTickCount(); 407 | double thessian = 1000.0 * (t2-t13) / cv::getTickFrequency(); 408 | if( verbosity == true ) 409 | { 410 | std::cout << "-> Computed Hessian determinant. Execution time (ms):" << thessian << std::endl; 411 | } 412 | 413 | // Perform some subpixel refinement 414 | if( SUBPIXEL_REFINEMENT == true ) 415 | { 416 | Do_Subpixel_Refinement(kpts); 417 | } 418 | 419 | t2 = cv::getTickCount(); 420 | tdetector = 1000.0*(t2-t1) / cv::getTickFrequency(); 421 | if( verbosity == true ) 422 | { 423 | std::cout << "> Feature detection done. Execution time (ms): " << tdetector << std::endl; 424 | } 425 | 426 | } 427 | 428 | //************************************************************************************* 429 | //************************************************************************************* 430 | 431 | /** 432 | * @brief This method performs the detection of keypoints by using the normalized 433 | * score of the Hessian determinant through the nonlinear scale space 434 | * @note We compute features for each of the nonlinear scale space level in a different processing thread 435 | */ 436 | void KAZE::Determinant_Hessian_Parallel(std::vector &kpts) 437 | { 438 | unsigned int level = 0; 439 | float dist = 0.0, smax = 3.0; 440 | int npoints = 0, id_repeated = 0; 441 | int left_x = 0, right_x = 0, up_y = 0, down_y = 0; 442 | bool is_extremum = false, is_repeated = false, is_out = false; 443 | int64 t1 = cv::getTickCount(), t2 = 0; 444 | 445 | // Delete the memory of the vector of keypoints vectors 446 | // In case we use the same kaze object for multiple images 447 | vector >(evolution.size()-2, vector()).swap(kpts_par); 448 | 449 | t2 = cv::getTickCount(); 450 | if( verbosity == true ) 451 | { 452 | std::cout << "--> Init kpts_par time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << std::endl; 453 | } 454 | t1 = cv::getTickCount(); 455 | 456 | // Find extremum at each scale level 457 | #if HAVE_BOOST_THREADING 458 | // Create multi-thread 459 | boost::thread_group mthreads; 460 | 461 | for( unsigned int i = 1; i < evolution.size()-1; i++ ) 462 | { 463 | // Create the thread for finding extremum at i scale level 464 | mthreads.create_thread(boost::bind(&KAZE::Find_Extremum_Threading,this,i)); 465 | } 466 | 467 | // Wait for the threads 468 | mthreads.join_all(); 469 | #else 470 | #pragma omp parallel for 471 | for( int n = 1; n < evolution.size()-1; n++ ) 472 | { 473 | Find_Extremum_Threading(n); 474 | } 475 | #endif 476 | 477 | t2 = cv::getTickCount(); 478 | if( verbosity == true ) 479 | { 480 | std::cout << "--> Find extremum time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << std::endl; 481 | } 482 | t1 = cv::getTickCount(); 483 | 484 | // Now fill the vector of keypoints 485 | // Duplicate keypoints will be filtered out after 486 | // the whole Feature Detection procedure is finished 487 | for( int i = 0; i < kpts_par.size(); i++ ) 488 | { 489 | for( int j = 0; j < kpts_par[i].size(); j++ ) 490 | { 491 | kpts.push_back(kpts_par[i][j]); 492 | } 493 | } 494 | npoints = kpts.size(); 495 | 496 | t2 = cv::getTickCount(); 497 | if( verbosity == true ) 498 | { 499 | std::cout << "--> Fill the vector of keypoints time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << ". kpts size: " << kpts.size() << std::endl; 500 | } 501 | 502 | } 503 | 504 | //************************************************************************************* 505 | //************************************************************************************* 506 | 507 | /** 508 | * @brief This method is called by the thread which is responsible of finding extrema 509 | * at a given nonlinear scale level 510 | * @param level Index in the nonlinear scale space evolution 511 | */ 512 | void KAZE::Find_Extremum_Threading(int level) 513 | { 514 | float value = 0.0, smax = 3.0; 515 | bool is_extremum = false; 516 | 517 | int border = fRound(smax * evolution[level].esigma) + 1; 518 | int ix = border, jx = border; 519 | while (ix < img_height-border) 520 | { 521 | jx = border; 522 | while (jx < img_width-border) 523 | { 524 | is_extremum = false; 525 | value = *(evolution[level].Ldet.ptr(ix)+jx); 526 | 527 | // Filter the points with the detector threshold 528 | if( value > dthreshold && value >= DEFAULT_MIN_DETECTOR_THRESHOLD ) 529 | { 530 | if( value >= *(evolution[level].Ldet.ptr(ix)+jx-1) ) 531 | { 532 | // First check on the same scale 533 | if( Check_Maximum_Neighbourhood(evolution[level].Ldet,1,value,ix,jx,1)) 534 | { 535 | // Now check on the lower scale 536 | if( Check_Maximum_Neighbourhood(evolution[level-1].Ldet,1,value,ix,jx,0) ) 537 | { 538 | // Now check on the upper scale 539 | if( Check_Maximum_Neighbourhood(evolution[level+1].Ldet,1,value,ix,jx,0) ) 540 | { 541 | is_extremum = true; 542 | } 543 | } 544 | } 545 | } 546 | } 547 | 548 | // Add the point of interest!! 549 | if( is_extremum == true ) 550 | { 551 | Ipoint point; 552 | point.xf = jx; point.yf = ix; 553 | point.x = jx; point.y = ix; 554 | point.dresponse = fabs(value); 555 | point.scale = evolution[level].esigma; 556 | point.sigma_size = evolution[level].sigma_size; 557 | point.tevolution = evolution[level].etime; 558 | point.octave = evolution[level].octave; 559 | point.sublevel = evolution[level].sublevel; 560 | point.level = level; 561 | point.descriptor_mode = descriptor_mode; 562 | point.angle = 0.0; 563 | 564 | // Set the sign of the laplacian 565 | if( (*(evolution[level].Lxx.ptr(ix)+jx) + *(evolution[level].Lyy.ptr(ix)+jx)) > 0 ) 566 | { 567 | point.laplacian = 0; 568 | } 569 | else 570 | { 571 | point.laplacian = 1; 572 | } 573 | 574 | kpts_par[level-1].push_back(point); 575 | } 576 | jx++; 577 | } 578 | ix++; 579 | } 580 | } 581 | 582 | //************************************************************************************* 583 | //************************************************************************************* 584 | 585 | /** 586 | * @brief This method performs subpixel refinement of the detected keypoints 587 | */ 588 | void KAZE::Do_Subpixel_Refinement(std::vector &keypts) 589 | { 590 | 591 | float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0; 592 | float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0; 593 | int x = 0, y = 0, step = 1; 594 | cv::Mat A = cv::Mat::zeros(3,3,CV_32F); 595 | cv::Mat b = cv::Mat::zeros(3,1,CV_32F); 596 | cv::Mat dst = cv::Mat::zeros(3,1,CV_32F); 597 | 598 | 599 | int64 t1 = cv::getTickCount(); 600 | 601 | for( unsigned int i = 0; i < keypts.size(); i++ ) 602 | { 603 | x = keypts[i].x; 604 | y = keypts[i].y; 605 | 606 | // Compute the gradient 607 | Dx = (1.0/(2.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y)+x+step) 608 | -*(evolution[keypts[i].level].Ldet.ptr(y)+x-step)); 609 | Dy = (1.0/(2.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x) 610 | -*(evolution[keypts[i].level].Ldet.ptr(y-step)+x)); 611 | Ds = 0.5*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x) 612 | -*(evolution[keypts[i].level-1].Ldet.ptr(y)+x)); 613 | 614 | // Compute the Hessian 615 | Dxx = (1.0/(step*step))*(*(evolution[keypts[i].level].Ldet.ptr(y)+x+step) 616 | + *(evolution[keypts[i].level].Ldet.ptr(y)+x-step) 617 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x))); 618 | 619 | Dyy = (1.0/(step*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x) 620 | + *(evolution[keypts[i].level].Ldet.ptr(y-step)+x) 621 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x))); 622 | 623 | Dss = *(evolution[keypts[i].level+1].Ldet.ptr(y)+x) 624 | + *(evolution[keypts[i].level-1].Ldet.ptr(y)+x) 625 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x)); 626 | 627 | Dxy = (1.0/(4.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x+step) 628 | +(*(evolution[keypts[i].level].Ldet.ptr(y-step)+x-step))) 629 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y-step)+x+step) 630 | +(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x-step))); 631 | 632 | Dxs = (1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x+step) 633 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y)+x-step))) 634 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x-step) 635 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y)+x+step))); 636 | 637 | Dys = (1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y+step)+x) 638 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y-step)+x))) 639 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y-step)+x) 640 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y+step)+x))); 641 | 642 | // Solve the linear system 643 | *(A.ptr(0)) = Dxx; 644 | *(A.ptr(1)+1) = Dyy; 645 | *(A.ptr(2)+2) = Dss; 646 | 647 | *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy; 648 | *(A.ptr(0)+2) = *(A.ptr(2)) = Dxs; 649 | *(A.ptr(1)+2) = *(A.ptr(2)+1) = Dys; 650 | 651 | *(b.ptr(0)) = -Dx; 652 | *(b.ptr(1)) = -Dy; 653 | *(b.ptr(2)) = -Ds; 654 | 655 | cv::solve(A,b,dst,cv::DECOMP_LU); 656 | 657 | if( fabs(*(dst.ptr(0))) <= 1.0 658 | && fabs(*(dst.ptr(1))) <= 1.0 659 | && fabs(*(dst.ptr(2))) <= 1.0 ) 660 | { 661 | keypts[i].xf += *(dst.ptr(0)); 662 | keypts[i].yf += *(dst.ptr(1)); 663 | keypts[i].x = fRound(keypts[i].xf); 664 | keypts[i].y = fRound(keypts[i].yf); 665 | 666 | dsc = keypts[i].octave + (keypts[i].sublevel+*(dst.ptr(2)))/((float)(DEFAULT_NSUBLEVELS)); 667 | keypts[i].scale = soffset*pow((float)2.0,dsc); 668 | } 669 | // Delete the point since its not stable 670 | else 671 | { 672 | keypts[i].dresponse = 0; // Keypoints with zero response will be filtered out 673 | } 674 | } 675 | int64 t2 = cv::getTickCount(); 676 | tsubpixel = 1000.0*(t2-t1) / cv::getTickFrequency(); 677 | if( verbosity == true ) 678 | { 679 | std::cout << "-> Subpixel refinement done. Execution time (ms): " << tsubpixel << std::endl; 680 | } 681 | 682 | } 683 | 684 | //************************************************************************************* 685 | //************************************************************************************* 686 | 687 | /** 688 | * @brief This method performs feature suppression based on 2D distance 689 | * @param kpts Vector of keypoints 690 | * @param mdist Maximum distance in pixels 691 | */ 692 | void KAZE::Feature_Suppression_Distance(std::vector &kpts, float mdist) 693 | { 694 | std::vector aux; 695 | std::vector to_delete; 696 | float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0; 697 | bool found = false; 698 | 699 | for( unsigned int i = 0; i < kpts.size(); i++ ) 700 | { 701 | x1 = kpts[i].xf; 702 | y1 = kpts[i].yf; 703 | 704 | for( unsigned int j = i+1; j < kpts.size(); j++ ) 705 | { 706 | x2 = kpts[j].xf; 707 | y2 = kpts[j].yf; 708 | 709 | dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2)); 710 | 711 | if( dist < mdist ) 712 | { 713 | if( fabs(kpts[i].dresponse) >= fabs(kpts[j].dresponse) ) 714 | { 715 | to_delete.push_back(j); 716 | } 717 | else 718 | { 719 | to_delete.push_back(i); 720 | break; 721 | } 722 | } 723 | } 724 | } 725 | 726 | for( unsigned int i = 0; i < kpts.size(); i++ ) 727 | { 728 | found = false; 729 | 730 | for( unsigned int j = 0; j < to_delete.size(); j++ ) 731 | { 732 | if( i == to_delete[j] ) 733 | { 734 | found = true; 735 | break; 736 | } 737 | } 738 | 739 | if( found == false ) 740 | { 741 | aux.push_back(kpts[i]); 742 | } 743 | } 744 | 745 | kpts.clear(); 746 | kpts = aux; 747 | aux.clear(); 748 | } 749 | 750 | //************************************************************************************* 751 | //************************************************************************************* 752 | 753 | /** 754 | * @brief This method computes the set of descriptors through the nonlinear scale space 755 | * @param kpts Vector of keypoints 756 | */ 757 | void KAZE::Feature_Description(std::vector &kpts) 758 | { 759 | if( verbosity == true ) 760 | { 761 | std::cout << "\n> Computing feature descriptors. " << std::endl; 762 | } 763 | 764 | int64 t1 = cv::getTickCount(); 765 | 766 | // It is not necessary to compute the orientation 767 | if( use_upright == true ) 768 | { 769 | // Compute the descriptor 770 | if( use_extended == false ) 771 | { 772 | if( descriptor_mode == 0 ) 773 | { 774 | #pragma omp parallel for 775 | for( int i = 0; i < kpts.size(); i++ ) 776 | { 777 | kpts[i].angle = 0.0; 778 | Get_SURF_Upright_Descriptor_64(kpts[i]); 779 | } 780 | } 781 | else if( descriptor_mode == 1 ) 782 | { 783 | #pragma omp parallel for 784 | for( int i = 0; i < kpts.size(); i++ ) 785 | { 786 | kpts[i].angle = 0.0; 787 | Get_MSURF_Upright_Descriptor_64(kpts[i]); 788 | } 789 | } 790 | else if( descriptor_mode == 2 ) 791 | { 792 | #pragma omp parallel for 793 | for( int i = 0; i < kpts.size(); i++ ) 794 | { 795 | kpts[i].angle = 0.0; 796 | Get_GSURF_Upright_Descriptor_64(kpts[i]); 797 | } 798 | } 799 | } 800 | else 801 | { 802 | if( descriptor_mode == 0 ) 803 | { 804 | #pragma omp parallel for 805 | for( int i = 0; i < kpts.size(); i++ ) 806 | { 807 | kpts[i].angle = 0.0; 808 | Get_SURF_Upright_Descriptor_128(kpts[i]); 809 | } 810 | } 811 | else if( descriptor_mode == 1 ) 812 | { 813 | #pragma omp parallel for 814 | for( int i = 0; i < kpts.size(); i++ ) 815 | { 816 | kpts[i].angle = 0.0; 817 | Get_MSURF_Upright_Descriptor_128(kpts[i]); 818 | } 819 | } 820 | else if( descriptor_mode == 2 ) 821 | { 822 | #pragma omp parallel for 823 | for( int i = 0; i < kpts.size(); i++ ) 824 | { 825 | kpts[i].angle = 0.0; 826 | Get_GSURF_Upright_Descriptor_128(kpts[i]); 827 | } 828 | } 829 | } 830 | } 831 | else 832 | { 833 | // Compute the descriptor 834 | if( use_extended == false ) 835 | { 836 | if( descriptor_mode == 0 ) 837 | { 838 | #pragma omp parallel for 839 | for( int i = 0; i < kpts.size(); i++ ) 840 | { 841 | Compute_Main_Orientation_SURF(kpts[i]); 842 | Get_SURF_Descriptor_64(kpts[i]); 843 | } 844 | } 845 | else if( descriptor_mode == 1 ) 846 | { 847 | #pragma omp parallel for 848 | for( int i = 0; i < kpts.size(); i++ ) 849 | { 850 | Compute_Main_Orientation_SURF(kpts[i]); 851 | Get_MSURF_Descriptor_64(kpts[i]); 852 | } 853 | } 854 | else if( descriptor_mode == 2 ) 855 | { 856 | #pragma omp parallel for 857 | for( int i = 0; i < kpts.size(); i++ ) 858 | { 859 | Compute_Main_Orientation_SURF(kpts[i]); 860 | Get_GSURF_Descriptor_64(kpts[i]); 861 | } 862 | } 863 | } 864 | else 865 | { 866 | if( descriptor_mode == 0 ) 867 | { 868 | #pragma omp parallel for 869 | for( int i = 0; i < kpts.size(); i++ ) 870 | { 871 | Compute_Main_Orientation_SURF(kpts[i]); 872 | Get_SURF_Descriptor_128(kpts[i]); 873 | } 874 | } 875 | else if( descriptor_mode == 1 ) 876 | { 877 | #pragma omp parallel for 878 | for( int i = 0; i < kpts.size(); i++ ) 879 | { 880 | Compute_Main_Orientation_SURF(kpts[i]); 881 | Get_MSURF_Descriptor_128(kpts[i]); 882 | } 883 | } 884 | else if( descriptor_mode == 2 ) 885 | { 886 | #pragma omp parallel for 887 | for( int i = 0; i < kpts.size(); i++ ) 888 | { 889 | Compute_Main_Orientation_SURF(kpts[i]); 890 | Get_GSURF_Descriptor_128(kpts[i]); 891 | } 892 | } 893 | } 894 | } 895 | 896 | int64 t2 = cv::getTickCount(); 897 | tdescriptor = 1000.0*(t2-t1) / cv::getTickFrequency(); 898 | if( verbosity == true ) 899 | { 900 | std::cout << "> Computed feature descriptors. Execution time (ms): " << tdescriptor << std::endl; 901 | } 902 | 903 | } 904 | 905 | //************************************************************************************* 906 | //************************************************************************************* 907 | 908 | /** 909 | * @brief This method computes the main orientation for a given keypoint 910 | * @param kpt Input keypoint 911 | * @note The orientation is computed using a similar approach as described in the 912 | * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006 913 | */ 914 | void KAZE::Compute_Main_Orientation_SURF(Ipoint &kpt) 915 | { 916 | int ix = 0, iy = 0, idx = 0, s = 0; 917 | unsigned int level = kpt.level; 918 | float xf = 0.0, yf = 0.0, gweight = 0.0; 919 | std::vector resX(109), resY(109), Ang(109); // 109 is the maximum grids of size 1 in a circle of radius 6 920 | 921 | // Variables for computing the dominant direction 922 | float sumX = 0.0, sumY = 0.0, bestX = 0.0, bestY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0; 923 | 924 | // Get the information from the keypoint 925 | xf = kpt.xf; 926 | yf = kpt.yf; 927 | s = kpt.scale; 928 | 929 | // Calculate derivatives responses for points within radius of 6*scale 930 | for(int i = -6; i <= 6; ++i) 931 | { 932 | for(int j = -6; j <= 6; ++j) 933 | { 934 | if(i*i + j*j < 36) // the grid is in the circle 935 | { 936 | iy = fRound(yf + j*s); 937 | ix = fRound(xf + i*s); 938 | 939 | if( iy >= 0 && iy < img_height && ix >= 0 && ix < img_width ) 940 | { 941 | gweight = gaussian(iy-yf,ix-xf,3.5*s); 942 | resX[idx] = gweight*(*(evolution[level].Lx.ptr(iy)+ix)); 943 | resY[idx] = gweight*(*(evolution[level].Ly.ptr(iy)+ix)); 944 | Ang[idx] = Get_Angle(resX[idx],resY[idx]); 945 | } 946 | else 947 | { 948 | resX[idx] = 0.0; 949 | resY[idx] = 0.0; 950 | Ang[idx] = 0.0; 951 | } 952 | 953 | ++idx; 954 | } 955 | } 956 | } 957 | 958 | // Loop slides pi/3 window around feature point 959 | for( ang1 = 0; ang1 < M2_PI; ang1+=0.15f) 960 | { 961 | ang2 =(ang1+PI/3.0f > M2_PI ? ang1-5.0f*PI/3.0f : ang1+PI/3.0f); 962 | sumX = sumY = 0.f; 963 | 964 | for( unsigned int k = 0; k < Ang.size(); ++k) 965 | { 966 | // Get angle from the x-axis of the sample point 967 | const float & ang = Ang[k]; 968 | 969 | // Determine whether the point is within the window 970 | if( ang1 < ang2 && ang1 < ang && ang < ang2) 971 | { 972 | sumX+=resX[k]; 973 | sumY+=resY[k]; 974 | } 975 | else if (ang2 < ang1 && 976 | ((ang > 0 && ang < ang2) || (ang > ang1 && ang < M2_PI) )) 977 | { 978 | sumX+=resX[k]; 979 | sumY+=resY[k]; 980 | } 981 | } 982 | 983 | // if the vector produced from this window is longer than all 984 | // previous vectors then this forms the new dominant direction 985 | float sumxy = sumX*sumX + sumY*sumY; 986 | if( sumxy > max ) 987 | { 988 | // store largest orientation 989 | max = sumxy; 990 | bestX = sumX, bestY = sumY; 991 | } 992 | } 993 | 994 | kpt.angle = Get_Angle(bestX, bestY); 995 | 996 | } 997 | 998 | //************************************************************************************* 999 | //************************************************************************************* 1000 | 1001 | /** 1002 | * @brief This method computes the upright descriptor (no rotation invariant) 1003 | * of the provided keypoint 1004 | * @param kpt Input keypoint 1005 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional 1006 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al., 1007 | * Speeded Up Robust Features, ECCV, 2006 1008 | */ 1009 | void KAZE::Get_SURF_Upright_Descriptor_64(Ipoint &kpt) 1010 | { 1011 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; 1012 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; 1013 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1014 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1015 | int dsize = 0, level = 0; 1016 | 1017 | // Set the descriptor size and the sample and pattern sizes 1018 | dsize = kpt.descriptor_size = 64; 1019 | sample_step = 5; 1020 | pattern_size = 10; 1021 | 1022 | // Get the information from the keypoint 1023 | yf = kpt.yf; 1024 | xf = kpt.xf; 1025 | scale = kpt.scale; 1026 | level = kpt.level; 1027 | 1028 | // Allocate the memory for the vector 1029 | kpt.descriptor = vector(kpt.descriptor_size); 1030 | 1031 | // Calculate descriptor for this interest point 1032 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1033 | { 1034 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1035 | { 1036 | dx=dy=mdx=mdy=0.0; 1037 | 1038 | for(float k = i; k < i + sample_step; k+=0.5) 1039 | { 1040 | for(float l = j; l < j + sample_step; l+=0.5) 1041 | { 1042 | sample_y = k*scale + yf; 1043 | sample_x = l*scale + xf; 1044 | 1045 | y1 = (int)(sample_y-.5); 1046 | x1 = (int)(sample_x-.5); 1047 | 1048 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1049 | 1050 | y2 = (int)(sample_y+.5); 1051 | x2 = (int)(sample_x+.5); 1052 | 1053 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1054 | 1055 | fx = sample_x-x1; 1056 | fy = sample_y-y1; 1057 | 1058 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1059 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1060 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1061 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1062 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1063 | 1064 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1065 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1066 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1067 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1068 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1069 | 1070 | // Sum the derivatives to the cumulative descriptor 1071 | dx += rx; 1072 | dy += ry; 1073 | mdx += fabs(rx); 1074 | mdy += fabs(ry); 1075 | } 1076 | } 1077 | 1078 | // Add the values to the descriptor vector 1079 | kpt.descriptor[dcount++] = dx; 1080 | kpt.descriptor[dcount++] = dy; 1081 | kpt.descriptor[dcount++] = mdx; 1082 | kpt.descriptor[dcount++] = mdy; 1083 | 1084 | // Store the current length^2 of the vector 1085 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; 1086 | } 1087 | } 1088 | 1089 | // convert to unit vector 1090 | len = sqrt(len); 1091 | 1092 | for(int i = 0; i < dsize; i++) 1093 | { 1094 | kpt.descriptor[i] /= len; 1095 | } 1096 | 1097 | if( USE_CLIPPING_NORMALIZATION == true ) 1098 | { 1099 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1100 | } 1101 | } 1102 | 1103 | //************************************************************************************* 1104 | //************************************************************************************* 1105 | 1106 | /** 1107 | * @brief This method computes the descriptor of the provided keypoint given the 1108 | * main orientation 1109 | * @param kpt Input keypoint 1110 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional 1111 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al., 1112 | * Speeded Up Robust Features, ECCV, 2006 1113 | */ 1114 | void KAZE::Get_SURF_Descriptor_64(Ipoint &kpt) 1115 | { 1116 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; 1117 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; 1118 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 1119 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1120 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1121 | int dsize = 0, level = 0; 1122 | 1123 | // Set the descriptor size and the sample and pattern sizes 1124 | dsize = kpt.descriptor_size = 64; 1125 | sample_step = 5; 1126 | pattern_size = 10; 1127 | 1128 | // Get the information from the keypoint 1129 | yf = kpt.yf; 1130 | xf = kpt.xf; 1131 | scale = kpt.scale; 1132 | angle = kpt.angle; 1133 | level = kpt.level; 1134 | co = cos(angle); 1135 | si = sin(angle); 1136 | 1137 | // Allocate the memory for the vector 1138 | kpt.descriptor = vector(kpt.descriptor_size); 1139 | 1140 | // Calculate descriptor for this interest point 1141 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1142 | { 1143 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1144 | { 1145 | dx=dy=mdx=mdy=0.0; 1146 | 1147 | for(float k = i; k < i + sample_step; k+=0.5) 1148 | { 1149 | for(float l = j; l < j + sample_step; l+=0.5) 1150 | { 1151 | // Get the coordinates of the sample point on the rotated axis 1152 | sample_y = yf + (l*scale*co + k*scale*si); 1153 | sample_x = xf + (-l*scale*si + k*scale*co); 1154 | 1155 | y1 = (int)(sample_y-.5); 1156 | x1 = (int)(sample_x-.5); 1157 | 1158 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1159 | 1160 | y2 = (int)(sample_y+.5); 1161 | x2 = (int)(sample_x+.5); 1162 | 1163 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1164 | 1165 | fx = sample_x-x1; 1166 | fy = sample_y-y1; 1167 | 1168 | 1169 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1170 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1171 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1172 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1173 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1174 | 1175 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1176 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1177 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1178 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1179 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1180 | 1181 | // Get the x and y derivatives on the rotated axis 1182 | rry = rx*co + ry*si; 1183 | rrx = -rx*si + ry*co; 1184 | 1185 | // Sum the derivatives to the cumulative descriptor 1186 | dx += rrx; 1187 | dy += rry; 1188 | mdx += fabs(rrx); 1189 | mdy += fabs(rry); 1190 | } 1191 | } 1192 | 1193 | // Add the values to the descriptor vector 1194 | kpt.descriptor[dcount++] = dx; 1195 | kpt.descriptor[dcount++] = dy; 1196 | kpt.descriptor[dcount++] = mdx; 1197 | kpt.descriptor[dcount++] = mdy; 1198 | 1199 | // Store the current length^2 of the vector 1200 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; 1201 | } 1202 | } 1203 | 1204 | // convert to unit vector 1205 | len = sqrt(len); 1206 | 1207 | for(int i = 0; i < dsize; i++) 1208 | { 1209 | kpt.descriptor[i] /= len; 1210 | } 1211 | 1212 | if( USE_CLIPPING_NORMALIZATION == true ) 1213 | { 1214 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1215 | } 1216 | 1217 | } 1218 | 1219 | //************************************************************************************* 1220 | //************************************************************************************* 1221 | 1222 | /** 1223 | * @brief This method computes the upright descriptor (not rotation invariant) of 1224 | * the provided keypoint 1225 | * @param kpt Input keypoint 1226 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired 1227 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, 1228 | * ECCV 2008 1229 | */ 1230 | void KAZE::Get_MSURF_Upright_Descriptor_64(Ipoint &kpt) 1231 | { 1232 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; 1233 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; 1234 | float sample_x = 0.0, sample_y = 0.0; 1235 | int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; 1236 | int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; 1237 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1238 | int dsize = 0, level = 0; 1239 | 1240 | // Subregion centers for the 4x4 gaussian weighting 1241 | float cx = -0.5, cy = 0.5; 1242 | 1243 | // Set the descriptor size and the sample and pattern sizes 1244 | dsize = kpt.descriptor_size = 64; 1245 | sample_step = 5; 1246 | pattern_size = 12; 1247 | 1248 | // Get the information from the keypoint 1249 | yf = kpt.yf; 1250 | xf = kpt.xf; 1251 | level = kpt.level; 1252 | scale = kpt.scale; 1253 | 1254 | // Allocate the memory for the vector 1255 | kpt.descriptor = vector(kpt.descriptor_size); 1256 | 1257 | i = -8; 1258 | 1259 | // Calculate descriptor for this interest point 1260 | // Area of size 24 s x 24 s 1261 | while(i < pattern_size) 1262 | { 1263 | j = -8; 1264 | i = i-4; 1265 | 1266 | cx += 1.0; 1267 | cy = -0.5; 1268 | 1269 | while(j < pattern_size) 1270 | { 1271 | dx=dy=mdx=mdy=0.0; 1272 | cy += 1.0; 1273 | j = j-4; 1274 | 1275 | ky = i + sample_step; 1276 | kx = j + sample_step; 1277 | 1278 | ys = yf + (ky*scale); 1279 | xs = xf + (kx*scale); 1280 | 1281 | for(int k = i; k < i+9; k++) 1282 | { 1283 | for (int l = j; l < j+9; l++) 1284 | { 1285 | sample_y = k*scale + yf; 1286 | sample_x = l*scale + xf; 1287 | 1288 | //Get the gaussian weighted x and y responses 1289 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale); 1290 | 1291 | y1 = (int)(sample_y-.5); 1292 | x1 = (int)(sample_x-.5); 1293 | 1294 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1295 | 1296 | y2 = (int)(sample_y+.5); 1297 | x2 = (int)(sample_x+.5); 1298 | 1299 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1300 | 1301 | fx = sample_x-x1; 1302 | fy = sample_y-y1; 1303 | 1304 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1305 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1306 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1307 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1308 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1309 | 1310 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1311 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1312 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1313 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1314 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1315 | 1316 | rx = gauss_s1*rx; 1317 | ry = gauss_s1*ry; 1318 | 1319 | // Sum the derivatives to the cumulative descriptor 1320 | dx += rx; 1321 | dy += ry; 1322 | mdx += fabs(rx); 1323 | mdy += fabs(ry); 1324 | } 1325 | } 1326 | 1327 | // Add the values to the descriptor vector 1328 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); 1329 | 1330 | kpt.descriptor[dcount++] = dx*gauss_s2; 1331 | kpt.descriptor[dcount++] = dy*gauss_s2; 1332 | kpt.descriptor[dcount++] = mdx*gauss_s2; 1333 | kpt.descriptor[dcount++] = mdy*gauss_s2; 1334 | 1335 | len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; 1336 | 1337 | j += 9; 1338 | } 1339 | 1340 | i += 9; 1341 | } 1342 | 1343 | // convert to unit vector 1344 | len = sqrt(len); 1345 | 1346 | for(int i = 0; i < dsize; i++) 1347 | { 1348 | kpt.descriptor[i] /= len; 1349 | } 1350 | 1351 | if( USE_CLIPPING_NORMALIZATION == true ) 1352 | { 1353 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1354 | } 1355 | } 1356 | 1357 | //************************************************************************************* 1358 | //************************************************************************************* 1359 | 1360 | /** 1361 | * @brief This method computes the descriptor of the provided keypoint given the 1362 | * main orientation of the keypoint 1363 | * @param kpt Input keypoint 1364 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired 1365 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, 1366 | * ECCV 2008 1367 | */ 1368 | void KAZE::Get_MSURF_Descriptor_64(Ipoint &kpt) 1369 | { 1370 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; 1371 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; 1372 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 1373 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1374 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; 1375 | int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; 1376 | int dsize = 0, level = 0; 1377 | 1378 | // Subregion centers for the 4x4 gaussian weighting 1379 | float cx = -0.5, cy = 0.5; 1380 | 1381 | // Set the descriptor size and the sample and pattern sizes 1382 | dsize = kpt.descriptor_size = 64; 1383 | sample_step = 5; 1384 | pattern_size = 12; 1385 | 1386 | // Get the information from the keypoint 1387 | yf = kpt.yf; 1388 | xf = kpt.xf; 1389 | scale = kpt.scale; 1390 | angle = kpt.angle; 1391 | level = kpt.level; 1392 | co = cos(angle); 1393 | si = sin(angle); 1394 | 1395 | // Allocate the memory for the vector 1396 | kpt.descriptor = vector(kpt.descriptor_size); 1397 | 1398 | i = -8; 1399 | 1400 | // Calculate descriptor for this interest point 1401 | // Area of size 24 s x 24 s 1402 | while(i < pattern_size) 1403 | { 1404 | j = -8; 1405 | i = i-4; 1406 | 1407 | cx += 1.0; 1408 | cy = -0.5; 1409 | 1410 | while(j < pattern_size) 1411 | { 1412 | dx=dy=mdx=mdy=0.0; 1413 | cy += 1.0; 1414 | j = j - 4; 1415 | 1416 | ky = i + sample_step; 1417 | kx = j + sample_step; 1418 | 1419 | xs = xf + (-kx*scale*si + ky*scale*co); 1420 | ys = yf + (kx*scale*co + ky*scale*si); 1421 | 1422 | for (int k = i; k < i + 9; ++k) 1423 | { 1424 | for (int l = j; l < j + 9; ++l) 1425 | { 1426 | // Get coords of sample point on the rotated axis 1427 | sample_y = yf + (l*scale*co + k*scale*si); 1428 | sample_x = xf + (-l*scale*si + k*scale*co); 1429 | 1430 | // Get the gaussian weighted x and y responses 1431 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); 1432 | 1433 | y1 = fRound(sample_y-.5); 1434 | x1 = fRound(sample_x-.5); 1435 | 1436 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1437 | 1438 | y2 = fRound(sample_y+.5); 1439 | x2 = fRound(sample_x+.5); 1440 | 1441 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1442 | 1443 | fx = sample_x-x1; 1444 | fy = sample_y-y1; 1445 | 1446 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1447 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1448 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1449 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1450 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1451 | 1452 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1453 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1454 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1455 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1456 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1457 | 1458 | // Get the x and y derivatives on the rotated axis 1459 | rry = gauss_s1*(rx*co + ry*si); 1460 | rrx = gauss_s1*(-rx*si + ry*co); 1461 | 1462 | // Sum the derivatives to the cumulative descriptor 1463 | dx += rrx; 1464 | dy += rry; 1465 | mdx += fabs(rrx); 1466 | mdy += fabs(rry); 1467 | } 1468 | } 1469 | 1470 | // Add the values to the descriptor vector 1471 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); 1472 | kpt.descriptor[dcount++] = dx*gauss_s2; 1473 | kpt.descriptor[dcount++] = dy*gauss_s2; 1474 | kpt.descriptor[dcount++] = mdx*gauss_s2; 1475 | kpt.descriptor[dcount++] = mdy*gauss_s2; 1476 | 1477 | len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2; 1478 | 1479 | j += 9; 1480 | } 1481 | 1482 | i += 9; 1483 | } 1484 | 1485 | // convert to unit vector 1486 | len = sqrt(len); 1487 | 1488 | for(int i = 0; i < dsize; i++) 1489 | { 1490 | kpt.descriptor[i] /= len; 1491 | } 1492 | 1493 | if( USE_CLIPPING_NORMALIZATION == true ) 1494 | { 1495 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1496 | } 1497 | } 1498 | 1499 | //************************************************************************************* 1500 | //************************************************************************************* 1501 | 1502 | /** 1503 | * @brief This method computes the upright G-SURF descriptor of the provided keypoint 1504 | * given the main orientation 1505 | * @param kpt Input keypoint 1506 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional 1507 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and 1508 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 1509 | */ 1510 | void KAZE::Get_GSURF_Upright_Descriptor_64(Ipoint &kpt) 1511 | { 1512 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; 1513 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; 1514 | float sample_x = 0.0, sample_y = 0.0; 1515 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1516 | float lvv = 0.0, lww = 0.0, modg = 0.0; 1517 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1518 | int dsize = 0, level = 0; 1519 | 1520 | // Set the descriptor size and the sample and pattern sizes 1521 | dsize = kpt.descriptor_size = 64; 1522 | sample_step = 5; 1523 | pattern_size = 10; 1524 | 1525 | // Get the information from the keypoint 1526 | yf = kpt.yf; 1527 | xf = kpt.xf; 1528 | scale = kpt.scale; 1529 | level = kpt.level; 1530 | 1531 | // Allocate the memory for the vector 1532 | kpt.descriptor = vector(kpt.descriptor_size); 1533 | 1534 | // Calculate descriptor for this interest point 1535 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1536 | { 1537 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1538 | { 1539 | dx=dy=mdx=mdy=0.0; 1540 | 1541 | for(float k = i; k < i + sample_step; k+=0.5) 1542 | { 1543 | for(float l = j; l < j + sample_step; l+=0.5) 1544 | { 1545 | // Get the coordinates of the sample point on the rotated axis 1546 | sample_y = yf + l*scale; 1547 | sample_x = xf + k*scale; 1548 | 1549 | y1 = (int)(sample_y-.5); 1550 | x1 = (int)(sample_x-.5); 1551 | 1552 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1553 | 1554 | y2 = (int)(sample_y+.5); 1555 | x2 = (int)(sample_x+.5); 1556 | 1557 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1558 | 1559 | fx = sample_x-x1; 1560 | fy = sample_y-y1; 1561 | 1562 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1563 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1564 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1565 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1566 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1567 | 1568 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1569 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1570 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1571 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1572 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1573 | 1574 | modg = pow(rx,2) + pow(ry,2); 1575 | 1576 | if( modg != 0.0 ) 1577 | { 1578 | res1 = *(evolution[level].Lxx.ptr(y1)+x1); 1579 | res2 = *(evolution[level].Lxx.ptr(y1)+x2); 1580 | res3 = *(evolution[level].Lxx.ptr(y2)+x1); 1581 | res4 = *(evolution[level].Lxx.ptr(y2)+x2); 1582 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1583 | 1584 | res1 = *(evolution[level].Lxy.ptr(y1)+x1); 1585 | res2 = *(evolution[level].Lxy.ptr(y1)+x2); 1586 | res3 = *(evolution[level].Lxy.ptr(y2)+x1); 1587 | res4 = *(evolution[level].Lxy.ptr(y2)+x2); 1588 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1589 | 1590 | res1 = *(evolution[level].Lyy.ptr(y1)+x1); 1591 | res2 = *(evolution[level].Lyy.ptr(y1)+x2); 1592 | res3 = *(evolution[level].Lyy.ptr(y2)+x1); 1593 | res4 = *(evolution[level].Lyy.ptr(y2)+x2); 1594 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1595 | 1596 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) 1597 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); 1598 | 1599 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) 1600 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); 1601 | } 1602 | else 1603 | { 1604 | lww = 0.0; 1605 | lvv = 0.0; 1606 | } 1607 | 1608 | // Sum the derivatives to the cumulative descriptor 1609 | dx += lww; 1610 | dy += lvv; 1611 | mdx += fabs(lww); 1612 | mdy += fabs(lvv); 1613 | } 1614 | } 1615 | 1616 | // Add the values to the descriptor vector 1617 | kpt.descriptor[dcount++] = dx; 1618 | kpt.descriptor[dcount++] = dy; 1619 | kpt.descriptor[dcount++] = mdx; 1620 | kpt.descriptor[dcount++] = mdy; 1621 | 1622 | // Store the current length^2 of the vector 1623 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; 1624 | } 1625 | } 1626 | 1627 | // convert to unit vector 1628 | len = sqrt(len); 1629 | 1630 | for(int i = 0; i < dsize; i++) 1631 | { 1632 | kpt.descriptor[i] /= len; 1633 | } 1634 | 1635 | if( USE_CLIPPING_NORMALIZATION == true ) 1636 | { 1637 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1638 | } 1639 | 1640 | } 1641 | 1642 | //************************************************************************************* 1643 | //************************************************************************************* 1644 | 1645 | /** 1646 | * @brief This method computes the G-SURF descriptor of the provided keypoint given the 1647 | * main orientation 1648 | * @param kpt Input keypoint 1649 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional 1650 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and 1651 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 1652 | */ 1653 | void KAZE::Get_GSURF_Descriptor_64(Ipoint &kpt) 1654 | { 1655 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0; 1656 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0; 1657 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 1658 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1659 | float lvv = 0.0, lww = 0.0, modg = 0.0; 1660 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1661 | int dsize = 0, level = 0; 1662 | 1663 | // Set the descriptor size and the sample and pattern sizes 1664 | dsize = kpt.descriptor_size = 64; 1665 | sample_step = 5; 1666 | pattern_size = 10; 1667 | 1668 | // Get the information from the keypoint 1669 | yf = kpt.yf; 1670 | xf = kpt.xf; 1671 | scale = kpt.scale; 1672 | angle = kpt.angle; 1673 | level = kpt.level; 1674 | co = cos(angle); 1675 | si = sin(angle); 1676 | 1677 | // Allocate the memory for the vector 1678 | kpt.descriptor = vector(kpt.descriptor_size); 1679 | 1680 | // Calculate descriptor for this interest point 1681 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1682 | { 1683 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1684 | { 1685 | dx=dy=mdx=mdy=0.0; 1686 | 1687 | for(float k = i; k < i + sample_step; k+=0.5) 1688 | { 1689 | for(float l = j; l < j + sample_step; l+=0.5) 1690 | { 1691 | // Get the coordinates of the sample point on the rotated axis 1692 | sample_y = yf + (l*scale*co + k*scale*si); 1693 | sample_x = xf + (-l*scale*si + k*scale*co); 1694 | 1695 | y1 = (int)(sample_y-.5); 1696 | x1 = (int)(sample_x-.5); 1697 | 1698 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1699 | 1700 | y2 = (int)(sample_y+.5); 1701 | x2 = (int)(sample_x+.5); 1702 | 1703 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1704 | 1705 | fx = sample_x-x1; 1706 | fy = sample_y-y1; 1707 | 1708 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1709 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1710 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1711 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1712 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1713 | 1714 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1715 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1716 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1717 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1718 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1719 | 1720 | modg = pow(rx,2) + pow(ry,2); 1721 | 1722 | if( modg != 0.0 ) 1723 | { 1724 | res1 = *(evolution[level].Lxx.ptr(y1)+x1); 1725 | res2 = *(evolution[level].Lxx.ptr(y1)+x2); 1726 | res3 = *(evolution[level].Lxx.ptr(y2)+x1); 1727 | res4 = *(evolution[level].Lxx.ptr(y2)+x2); 1728 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1729 | 1730 | res1 = *(evolution[level].Lxy.ptr(y1)+x1); 1731 | res2 = *(evolution[level].Lxy.ptr(y1)+x2); 1732 | res3 = *(evolution[level].Lxy.ptr(y2)+x1); 1733 | res4 = *(evolution[level].Lxy.ptr(y2)+x2); 1734 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1735 | 1736 | res1 = *(evolution[level].Lyy.ptr(y1)+x1); 1737 | res2 = *(evolution[level].Lyy.ptr(y1)+x2); 1738 | res3 = *(evolution[level].Lyy.ptr(y2)+x1); 1739 | res4 = *(evolution[level].Lyy.ptr(y2)+x2); 1740 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1741 | 1742 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) 1743 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); 1744 | 1745 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) 1746 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); 1747 | } 1748 | else 1749 | { 1750 | lww = 0.0; 1751 | lvv = 0.0; 1752 | } 1753 | 1754 | // Sum the derivatives to the cumulative descriptor 1755 | dx += lww; 1756 | dy += lvv; 1757 | mdx += fabs(lww); 1758 | mdy += fabs(lvv); 1759 | } 1760 | } 1761 | 1762 | // Add the values to the descriptor vector 1763 | kpt.descriptor[dcount++] = dx; 1764 | kpt.descriptor[dcount++] = dy; 1765 | kpt.descriptor[dcount++] = mdx; 1766 | kpt.descriptor[dcount++] = mdy; 1767 | 1768 | // Store the current length^2 of the vector 1769 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy; 1770 | } 1771 | } 1772 | 1773 | // convert to unit vector 1774 | len = sqrt(len); 1775 | 1776 | for(int i = 0; i < dsize; i++) 1777 | { 1778 | kpt.descriptor[i] /= len; 1779 | } 1780 | 1781 | if( USE_CLIPPING_NORMALIZATION == true ) 1782 | { 1783 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1784 | } 1785 | 1786 | } 1787 | 1788 | //************************************************************************************* 1789 | //************************************************************************************* 1790 | 1791 | /** 1792 | * @brief This method computes the upright extended descriptor (no rotation invariant) 1793 | * of the provided keypoint 1794 | * @param kpt Input keypoint 1795 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional 1796 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al., 1797 | * Speeded Up Robust Features, ECCV, 2006 1798 | */ 1799 | void KAZE::Get_SURF_Upright_Descriptor_128(Ipoint &kpt) 1800 | { 1801 | float scale = 0.0; 1802 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; 1803 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1804 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 1805 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; 1806 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1807 | int dsize = 0, level = 0; 1808 | 1809 | // Set the descriptor size and the sample and pattern sizes 1810 | dsize = kpt.descriptor_size = 128; 1811 | sample_step = 5; 1812 | pattern_size = 10; 1813 | 1814 | // Get the information from the keypoint 1815 | yf = kpt.yf; 1816 | xf = kpt.xf; 1817 | scale = kpt.scale; 1818 | level = kpt.level; 1819 | 1820 | // Allocate the memory for the vector 1821 | kpt.descriptor = vector(kpt.descriptor_size); 1822 | 1823 | // Calculate descriptor for this interest point 1824 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1825 | { 1826 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1827 | { 1828 | dxp=dxn=mdxp=mdxn=0.0; 1829 | dyp=dyn=mdyp=mdyn=0.0; 1830 | 1831 | for(float k = i; k < i + sample_step; k+=0.5) 1832 | { 1833 | for(float l = j; l < j + sample_step; l+=0.5) 1834 | { 1835 | sample_y = k*scale + yf; 1836 | sample_x = l*scale + xf; 1837 | 1838 | y1 = (int)(sample_y-.5); 1839 | x1 = (int)(sample_x-.5); 1840 | 1841 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1842 | 1843 | y2 = (int)(sample_y+.5); 1844 | x2 = (int)(sample_x+.5); 1845 | 1846 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1847 | 1848 | fx = sample_x-x1; 1849 | fy = sample_y-y1; 1850 | 1851 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1852 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1853 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1854 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1855 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1856 | 1857 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1858 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1859 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1860 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1861 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1862 | 1863 | // Sum the derivatives to the cumulative descriptor 1864 | if( ry >= 0.0 ) 1865 | { 1866 | dxp += rx; 1867 | mdxp += fabs(rx); 1868 | } 1869 | else 1870 | { 1871 | dxn += rx; 1872 | mdxn += fabs(rx); 1873 | } 1874 | 1875 | if( rx >= 0.0 ) 1876 | { 1877 | dyp += ry; 1878 | mdyp += fabs(ry); 1879 | } 1880 | else 1881 | { 1882 | dyn += ry; 1883 | mdyn += fabs(ry); 1884 | } 1885 | } 1886 | } 1887 | 1888 | // Add the values to the descriptor vector 1889 | kpt.descriptor[dcount++] = dxp; 1890 | kpt.descriptor[dcount++] = dxn; 1891 | kpt.descriptor[dcount++] = mdxp; 1892 | kpt.descriptor[dcount++] = mdxn; 1893 | kpt.descriptor[dcount++] = dyp; 1894 | kpt.descriptor[dcount++] = dyn; 1895 | kpt.descriptor[dcount++] = mdyp; 1896 | kpt.descriptor[dcount++] = mdyn; 1897 | 1898 | // Store the current length^2 of the vector 1899 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 1900 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; 1901 | } 1902 | } 1903 | 1904 | // convert to unit vector 1905 | len = sqrt(len); 1906 | 1907 | for(int i = 0; i < dsize; i++) 1908 | { 1909 | kpt.descriptor[i] /= len; 1910 | } 1911 | 1912 | if( USE_CLIPPING_NORMALIZATION == true ) 1913 | { 1914 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 1915 | } 1916 | } 1917 | 1918 | //************************************************************************************* 1919 | //************************************************************************************* 1920 | 1921 | /** 1922 | * @brief This method computes the extended descriptor of the provided keypoint given the 1923 | * main orientation 1924 | * @param kpt Input keypoint 1925 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional 1926 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al., 1927 | * Speeded Up Robust Features, ECCV, 2006 1928 | */ 1929 | void KAZE::Get_SURF_Descriptor_128(Ipoint &kpt) 1930 | { 1931 | float scale = 0.0; 1932 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0; 1933 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 1934 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 1935 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 1936 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; 1937 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 1938 | int dsize = 0, level = 0; 1939 | 1940 | // Set the descriptor size and the sample and pattern sizes 1941 | dsize = kpt.descriptor_size = 128; 1942 | sample_step = 5; 1943 | pattern_size = 10; 1944 | 1945 | // Get the information from the keypoint 1946 | yf = kpt.yf; 1947 | xf = kpt.xf; 1948 | scale = kpt.scale; 1949 | angle = kpt.angle; 1950 | level = kpt.level; 1951 | co = cos(angle); 1952 | si = sin(angle); 1953 | 1954 | // Allocate the memory for the vector 1955 | kpt.descriptor = vector(kpt.descriptor_size); 1956 | 1957 | // Calculate descriptor for this interest point 1958 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 1959 | { 1960 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 1961 | { 1962 | dxp=dxn=mdxp=mdxn=0.0; 1963 | dyp=dyn=mdyp=mdyn=0.0; 1964 | 1965 | for(float k = i; k < i + sample_step; k+=0.5) 1966 | { 1967 | for(float l = j; l < j + sample_step; l+=0.5) 1968 | { 1969 | // Get the coordinates of the sample point on the rotated axis 1970 | sample_y = yf + (l*scale*co + k*scale*si); 1971 | sample_x = xf + (-l*scale*si + k*scale*co); 1972 | 1973 | y1 = (int)(sample_y-.5); 1974 | x1 = (int)(sample_x-.5); 1975 | 1976 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 1977 | 1978 | y2 = (int)(sample_y+.5); 1979 | x2 = (int)(sample_x+.5); 1980 | 1981 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 1982 | 1983 | fx = sample_x-x1; 1984 | fy = sample_y-y1; 1985 | 1986 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 1987 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 1988 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 1989 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 1990 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1991 | 1992 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 1993 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 1994 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 1995 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 1996 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 1997 | 1998 | // Get the x and y derivatives on the rotated axis 1999 | rry = rx*co + ry*si; 2000 | rrx = -rx*si + ry*co; 2001 | 2002 | // Sum the derivatives to the cumulative descriptor 2003 | if( rry >= 0.0 ) 2004 | { 2005 | dxp += rrx; 2006 | mdxp += fabs(rrx); 2007 | } 2008 | else 2009 | { 2010 | dxn += rrx; 2011 | mdxn += fabs(rrx); 2012 | } 2013 | 2014 | if( rrx >= 0.0 ) 2015 | { 2016 | dyp += rry; 2017 | mdyp += fabs(rry); 2018 | } 2019 | else 2020 | { 2021 | dyn += rry; 2022 | mdyn += fabs(rry); 2023 | } 2024 | } 2025 | } 2026 | 2027 | // Add the values to the descriptor vector 2028 | kpt.descriptor[dcount++] = dxp; 2029 | kpt.descriptor[dcount++] = dxn; 2030 | kpt.descriptor[dcount++] = mdxp; 2031 | kpt.descriptor[dcount++] = mdxn; 2032 | kpt.descriptor[dcount++] = dyp; 2033 | kpt.descriptor[dcount++] = dyn; 2034 | kpt.descriptor[dcount++] = mdyp; 2035 | kpt.descriptor[dcount++] = mdyn; 2036 | 2037 | // Store the current length^2 of the vector 2038 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 2039 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; 2040 | } 2041 | } 2042 | 2043 | // convert to unit vector 2044 | len = sqrt(len); 2045 | 2046 | for(int i = 0; i < dsize; i++) 2047 | { 2048 | kpt.descriptor[i] /= len; 2049 | } 2050 | 2051 | if( USE_CLIPPING_NORMALIZATION == true ) 2052 | { 2053 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 2054 | } 2055 | } 2056 | 2057 | //************************************************************************************* 2058 | //************************************************************************************* 2059 | 2060 | /** 2061 | * @brief This method computes the extended upright descriptor (not rotation invariant) of 2062 | * the provided keypoint 2063 | * @param kpt Input keypoint 2064 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired 2065 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, 2066 | * ECCV 2008 2067 | */ 2068 | void KAZE::Get_MSURF_Upright_Descriptor_128(Ipoint &kpt) 2069 | { 2070 | float scale = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; 2071 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; 2072 | float sample_x = 0.0, sample_y = 0.0; 2073 | int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0; 2074 | int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0; 2075 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 2076 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 2077 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; 2078 | int dsize = 0, level = 0; 2079 | 2080 | // Subregion centers for the 4x4 gaussian weighting 2081 | float cx = -0.5, cy = 0.5; 2082 | 2083 | // Set the descriptor size and the sample and pattern sizes 2084 | dsize = kpt.descriptor_size = 128; 2085 | sample_step = 5; 2086 | pattern_size = 12; 2087 | 2088 | // Get the information from the keypoint 2089 | yf = kpt.yf; 2090 | xf = kpt.xf; 2091 | level = kpt.level; 2092 | scale = kpt.scale; 2093 | 2094 | // Allocate the memory for the vector 2095 | kpt.descriptor = vector(kpt.descriptor_size); 2096 | 2097 | i = -8; 2098 | 2099 | // Calculate descriptor for this interest point 2100 | // Area of size 24 s x 24 s 2101 | while(i < pattern_size) 2102 | { 2103 | j = -8; 2104 | i = i-4; 2105 | 2106 | cx += 1.0; 2107 | cy = -0.5; 2108 | 2109 | while(j < pattern_size) 2110 | { 2111 | dxp=dxn=mdxp=mdxn=0.0; 2112 | dyp=dyn=mdyp=mdyn=0.0; 2113 | 2114 | cy += 1.0; 2115 | j = j-4; 2116 | 2117 | ky = i + sample_step; 2118 | kx = j + sample_step; 2119 | 2120 | ys = yf + (ky*scale); 2121 | xs = xf + (kx*scale); 2122 | 2123 | for(int k = i; k < i+9; k++) 2124 | { 2125 | for (int l = j; l < j+9; l++) 2126 | { 2127 | sample_y = k*scale + yf; 2128 | sample_x = l*scale + xf; 2129 | 2130 | //Get the gaussian weighted x and y responses 2131 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale); 2132 | 2133 | y1 = (int)(sample_y-.5); 2134 | x1 = (int)(sample_x-.5); 2135 | 2136 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 2137 | 2138 | y2 = (int)(sample_y+.5); 2139 | x2 = (int)(sample_x+.5); 2140 | 2141 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 2142 | 2143 | fx = sample_x-x1; 2144 | fy = sample_y-y1; 2145 | 2146 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 2147 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 2148 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 2149 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 2150 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2151 | 2152 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 2153 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 2154 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 2155 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 2156 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2157 | 2158 | rx = gauss_s1*rx; 2159 | ry = gauss_s1*ry; 2160 | 2161 | // Sum the derivatives to the cumulative descriptor 2162 | if( ry >= 0.0 ) 2163 | { 2164 | dxp += rx; 2165 | mdxp += fabs(rx); 2166 | } 2167 | else 2168 | { 2169 | dxn += rx; 2170 | mdxn += fabs(rx); 2171 | } 2172 | 2173 | if( rx >= 0.0 ) 2174 | { 2175 | dyp += ry; 2176 | mdyp += fabs(ry); 2177 | } 2178 | else 2179 | { 2180 | dyn += ry; 2181 | mdyn += fabs(ry); 2182 | } 2183 | } 2184 | } 2185 | 2186 | // Add the values to the descriptor vector 2187 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); 2188 | 2189 | kpt.descriptor[dcount++] = dxp*gauss_s2; 2190 | kpt.descriptor[dcount++] = dxn*gauss_s2; 2191 | kpt.descriptor[dcount++] = mdxp*gauss_s2; 2192 | kpt.descriptor[dcount++] = mdxn*gauss_s2; 2193 | kpt.descriptor[dcount++] = dyp*gauss_s2; 2194 | kpt.descriptor[dcount++] = dyn*gauss_s2; 2195 | kpt.descriptor[dcount++] = mdyp*gauss_s2; 2196 | kpt.descriptor[dcount++] = mdyn*gauss_s2; 2197 | 2198 | // Store the current length^2 of the vector 2199 | len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 2200 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; 2201 | 2202 | j += 9; 2203 | } 2204 | 2205 | i += 9; 2206 | } 2207 | 2208 | // convert to unit vector 2209 | len = sqrt(len); 2210 | 2211 | for(int i = 0; i < dsize; i++) 2212 | { 2213 | kpt.descriptor[i] /= len; 2214 | } 2215 | 2216 | if( USE_CLIPPING_NORMALIZATION == true ) 2217 | { 2218 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 2219 | } 2220 | } 2221 | 2222 | //************************************************************************************* 2223 | //************************************************************************************* 2224 | 2225 | /** 2226 | * @brief This method computes the extended G-SURF descriptor of the provided keypoint 2227 | * given the main orientation of the keypoint 2228 | * @param kpt Input keypoint 2229 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired 2230 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching, 2231 | * ECCV 2008 2232 | */ 2233 | void KAZE::Get_MSURF_Descriptor_128(Ipoint &kpt) 2234 | { 2235 | float scale = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0; 2236 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0; 2237 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 2238 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 2239 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 2240 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; 2241 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0; 2242 | int kx = 0, ky = 0, i = 0, j = 0, dcount = 0; 2243 | int dsize = 0, level = 0; 2244 | 2245 | // Subregion centers for the 4x4 gaussian weighting 2246 | float cx = -0.5, cy = 0.5; 2247 | 2248 | // Set the descriptor size and the sample and pattern sizes 2249 | dsize = kpt.descriptor_size = 128; 2250 | sample_step = 5; 2251 | pattern_size = 12; 2252 | 2253 | // Get the information from the keypoint 2254 | yf = kpt.yf; 2255 | xf = kpt.xf; 2256 | scale = kpt.scale; 2257 | angle = kpt.angle; 2258 | level = kpt.level; 2259 | co = cos(angle); 2260 | si = sin(angle); 2261 | 2262 | // Allocate the memory for the vector 2263 | kpt.descriptor = vector(kpt.descriptor_size); 2264 | 2265 | i = -8; 2266 | 2267 | // Calculate descriptor for this interest point 2268 | // Area of size 24 s x 24 s 2269 | while(i < pattern_size) 2270 | { 2271 | j = -8; 2272 | i = i-4; 2273 | 2274 | cx += 1.0; 2275 | cy = -0.5; 2276 | 2277 | while(j < pattern_size) 2278 | { 2279 | dxp=dxn=mdxp=mdxn=0.0; 2280 | dyp=dyn=mdyp=mdyn=0.0; 2281 | 2282 | cy += 1.0f; 2283 | j = j - 4; 2284 | 2285 | ky = i + sample_step; 2286 | kx = j + sample_step; 2287 | 2288 | xs = xf + (-kx*scale*si + ky*scale*co); 2289 | ys = yf + (kx*scale*co + ky*scale*si); 2290 | 2291 | for (int k = i; k < i + 9; ++k) 2292 | { 2293 | for (int l = j; l < j + 9; ++l) 2294 | { 2295 | // Get coords of sample point on the rotated axis 2296 | sample_y = yf + (l*scale*co + k*scale*si); 2297 | sample_x = xf + (-l*scale*si + k*scale*co); 2298 | 2299 | // Get the gaussian weighted x and y responses 2300 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale); 2301 | 2302 | y1 = fRound(sample_y-.5); 2303 | x1 = fRound(sample_x-.5); 2304 | 2305 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 2306 | 2307 | y2 = fRound(sample_y+.5); 2308 | x2 = fRound(sample_x+.5); 2309 | 2310 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 2311 | 2312 | fx = sample_x-x1; 2313 | fy = sample_y-y1; 2314 | 2315 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 2316 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 2317 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 2318 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 2319 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2320 | 2321 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 2322 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 2323 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 2324 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 2325 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2326 | 2327 | // Get the x and y derivatives on the rotated axis 2328 | rry = gauss_s1*(rx*co + ry*si); 2329 | rrx = gauss_s1*(-rx*si + ry*co); 2330 | 2331 | // Sum the derivatives to the cumulative descriptor 2332 | // Sum the derivatives to the cumulative descriptor 2333 | if( rry >= 0.0 ) 2334 | { 2335 | dxp += rrx; 2336 | mdxp += fabs(rrx); 2337 | } 2338 | else 2339 | { 2340 | dxn += rrx; 2341 | mdxn += fabs(rrx); 2342 | } 2343 | 2344 | if( rrx >= 0.0 ) 2345 | { 2346 | dyp += rry; 2347 | mdyp += fabs(rry); 2348 | } 2349 | else 2350 | { 2351 | dyn += rry; 2352 | mdyn += fabs(rry); 2353 | } 2354 | } 2355 | } 2356 | 2357 | // Add the values to the descriptor vector 2358 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f); 2359 | 2360 | kpt.descriptor[dcount++] = dxp*gauss_s2; 2361 | kpt.descriptor[dcount++] = dxn*gauss_s2; 2362 | kpt.descriptor[dcount++] = mdxp*gauss_s2; 2363 | kpt.descriptor[dcount++] = mdxn*gauss_s2; 2364 | kpt.descriptor[dcount++] = dyp*gauss_s2; 2365 | kpt.descriptor[dcount++] = dyn*gauss_s2; 2366 | kpt.descriptor[dcount++] = mdyp*gauss_s2; 2367 | kpt.descriptor[dcount++] = mdyn*gauss_s2; 2368 | 2369 | // Store the current length^2 of the vector 2370 | len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 2371 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2; 2372 | 2373 | j += 9; 2374 | } 2375 | 2376 | i += 9; 2377 | } 2378 | 2379 | // convert to unit vector 2380 | len = sqrt(len); 2381 | 2382 | for(int i = 0; i < dsize; i++) 2383 | { 2384 | kpt.descriptor[i] /= len; 2385 | } 2386 | 2387 | if( USE_CLIPPING_NORMALIZATION == true ) 2388 | { 2389 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 2390 | } 2391 | 2392 | } 2393 | 2394 | //************************************************************************************* 2395 | //************************************************************************************* 2396 | 2397 | /** 2398 | * @brief This method computes the G-SURF upright extended descriptor 2399 | * (no rotation invariant) of the provided keypoint 2400 | * @param kpt Input keypoint 2401 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional 2402 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and 2403 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 2404 | */ 2405 | void KAZE::Get_GSURF_Upright_Descriptor_128(Ipoint &kpt) 2406 | { 2407 | float scale = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0; 2408 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0; 2409 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 2410 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 2411 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0; 2412 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 2413 | int dsize = 0, level = 0; 2414 | 2415 | // Set the descriptor size and the sample and pattern sizes 2416 | dsize = kpt.descriptor_size = 128; 2417 | sample_step = 5; 2418 | pattern_size = 10; 2419 | 2420 | // Get the information from the keypoint 2421 | yf = kpt.yf; 2422 | xf = kpt.xf; 2423 | scale = kpt.scale; 2424 | level = kpt.level; 2425 | 2426 | // Allocate the memory for the vector 2427 | kpt.descriptor = vector(kpt.descriptor_size); 2428 | 2429 | // Calculate descriptor for this interest point 2430 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 2431 | { 2432 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 2433 | { 2434 | dxp=dxn=mdxp=mdxn=0.0; 2435 | dyp=dyn=mdyp=mdyn=0.0; 2436 | 2437 | for(float k = i; k < i + sample_step; k+=0.5) 2438 | { 2439 | for(float l = j; l < j + sample_step; l+=0.5) 2440 | { 2441 | sample_y = k*scale + yf; 2442 | sample_x = l*scale + xf; 2443 | 2444 | y1 = (int)(sample_y-.5); 2445 | x1 = (int)(sample_x-.5); 2446 | 2447 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 2448 | 2449 | y2 = (int)(sample_y+.5); 2450 | x2 = (int)(sample_x+.5); 2451 | 2452 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 2453 | 2454 | fx = sample_x-x1; 2455 | fy = sample_y-y1; 2456 | 2457 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 2458 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 2459 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 2460 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 2461 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2462 | 2463 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 2464 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 2465 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 2466 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 2467 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2468 | 2469 | modg = pow(rx,2) + pow(ry,2); 2470 | 2471 | if( modg != 0.0 ) 2472 | { 2473 | res1 = *(evolution[level].Lxx.ptr(y1)+x1); 2474 | res2 = *(evolution[level].Lxx.ptr(y1)+x2); 2475 | res3 = *(evolution[level].Lxx.ptr(y2)+x1); 2476 | res4 = *(evolution[level].Lxx.ptr(y2)+x2); 2477 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2478 | 2479 | res1 = *(evolution[level].Lxy.ptr(y1)+x1); 2480 | res2 = *(evolution[level].Lxy.ptr(y1)+x2); 2481 | res3 = *(evolution[level].Lxy.ptr(y2)+x1); 2482 | res4 = *(evolution[level].Lxy.ptr(y2)+x2); 2483 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2484 | 2485 | res1 = *(evolution[level].Lyy.ptr(y1)+x1); 2486 | res2 = *(evolution[level].Lyy.ptr(y1)+x2); 2487 | res3 = *(evolution[level].Lyy.ptr(y2)+x1); 2488 | res4 = *(evolution[level].Lyy.ptr(y2)+x2); 2489 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2490 | 2491 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) 2492 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); 2493 | 2494 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) 2495 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); 2496 | } 2497 | else 2498 | { 2499 | lww = 0.0; 2500 | lvv = 0.0; 2501 | } 2502 | 2503 | // Sum the derivatives to the cumulative descriptor 2504 | if( lww >= 0.0 ) 2505 | { 2506 | dxp += lvv; 2507 | mdxp += fabs(lvv); 2508 | } 2509 | else 2510 | { 2511 | dxn += lvv; 2512 | mdxn += fabs(lvv); 2513 | } 2514 | 2515 | if( lvv >= 0.0 ) 2516 | { 2517 | dyp += lww; 2518 | mdyp += fabs(lww); 2519 | } 2520 | else 2521 | { 2522 | dyn += lww; 2523 | mdyn += fabs(lww); 2524 | } 2525 | } 2526 | } 2527 | 2528 | // Add the values to the descriptor vector 2529 | kpt.descriptor[dcount++] = dxp; 2530 | kpt.descriptor[dcount++] = dxn; 2531 | kpt.descriptor[dcount++] = mdxp; 2532 | kpt.descriptor[dcount++] = mdxn; 2533 | kpt.descriptor[dcount++] = dyp; 2534 | kpt.descriptor[dcount++] = dyn; 2535 | kpt.descriptor[dcount++] = mdyp; 2536 | kpt.descriptor[dcount++] = mdyn; 2537 | 2538 | // Store the current length^2 of the vector 2539 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 2540 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; 2541 | } 2542 | } 2543 | 2544 | // convert to unit vector 2545 | len = sqrt(len); 2546 | 2547 | for(int i = 0; i < dsize; i++) 2548 | { 2549 | kpt.descriptor[i] /= len; 2550 | } 2551 | 2552 | if( USE_CLIPPING_NORMALIZATION == true ) 2553 | { 2554 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 2555 | } 2556 | } 2557 | 2558 | //************************************************************************************* 2559 | //************************************************************************************* 2560 | 2561 | /** 2562 | * @brief This method computes the extended descriptor of the provided keypoint given the 2563 | * main orientation 2564 | * @param kpt Input keypoint 2565 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional 2566 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and 2567 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013 2568 | */ 2569 | void KAZE::Get_GSURF_Descriptor_128(Ipoint &kpt) 2570 | { 2571 | float scale = 0.0, len = 0.0, xf = 0.0, yf = 0.0; 2572 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0; 2573 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0; 2574 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0; 2575 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0; 2576 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0; 2577 | float lvv = 0.0, lww = 0.0, modg = 0.0; 2578 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0; 2579 | int dsize = 0, level = 0; 2580 | 2581 | // Set the descriptor size and the sample and pattern sizes 2582 | dsize = kpt.descriptor_size = 128; 2583 | sample_step = 5; 2584 | pattern_size = 10; 2585 | 2586 | // Get the information from the keypoint 2587 | yf = kpt.yf; 2588 | xf = kpt.xf; 2589 | scale = kpt.scale; 2590 | angle = kpt.angle; 2591 | level = kpt.level; 2592 | co = cos(angle); 2593 | si = sin(angle); 2594 | 2595 | // Allocate the memory for the vector 2596 | kpt.descriptor = vector(kpt.descriptor_size); 2597 | 2598 | // Calculate descriptor for this interest point 2599 | for(int i = -pattern_size; i < pattern_size; i+=sample_step) 2600 | { 2601 | for(int j = -pattern_size; j < pattern_size; j+=sample_step) 2602 | { 2603 | dxp=dxn=mdxp=mdxn=0.0; 2604 | dyp=dyn=mdyp=mdyn=0.0; 2605 | 2606 | for(float k = i; k < i + sample_step; k+=0.5) 2607 | { 2608 | for(float l = j; l < j + sample_step; l+=0.5) 2609 | { 2610 | // Get the coordinates of the sample point on the rotated axis 2611 | sample_y = yf + (l*scale*co + k*scale*si); 2612 | sample_x = xf + (-l*scale*si + k*scale*co); 2613 | 2614 | y1 = (int)(sample_y-.5); 2615 | x1 = (int)(sample_x-.5); 2616 | 2617 | Check_Descriptor_Limits(x1,y1,img_width,img_height); 2618 | 2619 | y2 = (int)(sample_y+.5); 2620 | x2 = (int)(sample_x+.5); 2621 | 2622 | Check_Descriptor_Limits(x2,y2,img_width,img_height); 2623 | 2624 | fx = sample_x-x1; 2625 | fy = sample_y-y1; 2626 | 2627 | res1 = *(evolution[level].Lx.ptr(y1)+x1); 2628 | res2 = *(evolution[level].Lx.ptr(y1)+x2); 2629 | res3 = *(evolution[level].Lx.ptr(y2)+x1); 2630 | res4 = *(evolution[level].Lx.ptr(y2)+x2); 2631 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2632 | 2633 | res1 = *(evolution[level].Ly.ptr(y1)+x1); 2634 | res2 = *(evolution[level].Ly.ptr(y1)+x2); 2635 | res3 = *(evolution[level].Ly.ptr(y2)+x1); 2636 | res4 = *(evolution[level].Ly.ptr(y2)+x2); 2637 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2638 | 2639 | modg = pow(rx,2) + pow(ry,2); 2640 | 2641 | if( modg != 0.0 ) 2642 | { 2643 | res1 = *(evolution[level].Lxx.ptr(y1)+x1); 2644 | res2 = *(evolution[level].Lxx.ptr(y1)+x2); 2645 | res3 = *(evolution[level].Lxx.ptr(y2)+x1); 2646 | res4 = *(evolution[level].Lxx.ptr(y2)+x2); 2647 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2648 | 2649 | res1 = *(evolution[level].Lxy.ptr(y1)+x1); 2650 | res2 = *(evolution[level].Lxy.ptr(y1)+x2); 2651 | res3 = *(evolution[level].Lxy.ptr(y2)+x1); 2652 | res4 = *(evolution[level].Lxy.ptr(y2)+x2); 2653 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2654 | 2655 | res1 = *(evolution[level].Lyy.ptr(y1)+x1); 2656 | res2 = *(evolution[level].Lyy.ptr(y1)+x2); 2657 | res3 = *(evolution[level].Lyy.ptr(y2)+x1); 2658 | res4 = *(evolution[level].Lyy.ptr(y2)+x2); 2659 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4; 2660 | 2661 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2) 2662 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg); 2663 | 2664 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2) 2665 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg); 2666 | } 2667 | else 2668 | { 2669 | lww = 0.0; 2670 | lvv = 0.0; 2671 | } 2672 | 2673 | // Sum the derivatives to the cumulative descriptor 2674 | if( lww >= 0.0 ) 2675 | { 2676 | dxp += lvv; 2677 | mdxp += fabs(lvv); 2678 | } 2679 | else 2680 | { 2681 | dxn += lvv; 2682 | mdxn += fabs(lvv); 2683 | } 2684 | 2685 | if( lvv >= 0.0 ) 2686 | { 2687 | dyp += lww; 2688 | mdyp += fabs(lww); 2689 | } 2690 | else 2691 | { 2692 | dyn += lww; 2693 | mdyn += fabs(lww); 2694 | } 2695 | } 2696 | } 2697 | 2698 | // Add the values to the descriptor vector 2699 | kpt.descriptor[dcount++] = dxp; 2700 | kpt.descriptor[dcount++] = dxn; 2701 | kpt.descriptor[dcount++] = mdxp; 2702 | kpt.descriptor[dcount++] = mdxn; 2703 | kpt.descriptor[dcount++] = dyp; 2704 | kpt.descriptor[dcount++] = dyn; 2705 | kpt.descriptor[dcount++] = mdyp; 2706 | kpt.descriptor[dcount++] = mdyn; 2707 | 2708 | // Store the current length^2 of the vector 2709 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn + 2710 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn; 2711 | } 2712 | } 2713 | 2714 | // convert to unit vector 2715 | len = sqrt(len); 2716 | 2717 | for(int i = 0; i < dsize; i++) 2718 | { 2719 | kpt.descriptor[i] /= len; 2720 | } 2721 | 2722 | if( USE_CLIPPING_NORMALIZATION == true ) 2723 | { 2724 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO); 2725 | } 2726 | } 2727 | 2728 | //************************************************************************************* 2729 | //************************************************************************************* 2730 | 2731 | /** 2732 | * @brief This method performs a scalar non-linear diffusion step using AOS schemes 2733 | * @param Ld Image at a given evolution step 2734 | * @param Ldprev Image at a previous evolution step 2735 | * @param c Conductivity image 2736 | * @param stepsize Stepsize for the nonlinear diffusion evolution 2737 | * @note If c is constant, the diffusion will be linear 2738 | * If c is a matrix of the same size as Ld, the diffusion will be nonlinear 2739 | * The stepsize can be arbitrarilly large 2740 | */ 2741 | void KAZE::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize) 2742 | { 2743 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS schemes at " << t1 << endl; 2744 | AOS_Rows(Ldprev,c,stepsize); 2745 | AOS_Columns(Ldprev,c,stepsize); 2746 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS schemes. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl; 2747 | 2748 | Ld = 0.5*(Lty + Ltx.t()); 2749 | } 2750 | 2751 | //************************************************************************************* 2752 | //************************************************************************************* 2753 | 2754 | /** 2755 | * @brief This method performs a scalar non-linear diffusion step using AOS schemes 2756 | * Diffusion in each dimension is computed independently in a different thread 2757 | * @param Ld Image at a given evolution step 2758 | * @param Ldprev Image at a previous evolution step 2759 | * @param c Conductivity image 2760 | * @param stepsize Stepsize for the nonlinear diffusion evolution 2761 | * @note If c is constant, the diffusion will be linear 2762 | * If c is a matrix of the same size as Ld, the diffusion will be nonlinear 2763 | * The stepsize can be arbitrarilly large 2764 | */ 2765 | #if HAVE_BOOST_THREADING 2766 | void KAZE::AOS_Step_Scalar_Parallel(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize) 2767 | { 2768 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS schemes at " << t1 << endl; 2769 | boost::thread *AOSth1 = new boost::thread(&KAZE::AOS_Rows,this,Ldprev,c,stepsize); 2770 | boost::thread *AOSth2 = new boost::thread(&KAZE::AOS_Columns,this,Ldprev,c,stepsize); 2771 | 2772 | AOSth1->join(); 2773 | AOSth2->join(); 2774 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS schemes. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl; 2775 | 2776 | Ld = 0.5*(Lty + Ltx.t()); 2777 | 2778 | delete AOSth1; 2779 | delete AOSth2; 2780 | } 2781 | #endif 2782 | 2783 | //************************************************************************************* 2784 | //************************************************************************************* 2785 | 2786 | /** 2787 | * @brief This method performs performs 1D-AOS for the image rows 2788 | * @param Ldprev Image at a previous evolution step 2789 | * @param c Conductivity image 2790 | * @param stepsize Stepsize for the nonlinear diffusion evolution 2791 | */ 2792 | void KAZE::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize) 2793 | { 2794 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS_Rows at " << t1 << endl; 2795 | // Operate on rows 2796 | int qcols = qr.cols, qrows = qr.rows; 2797 | if (qr.isContinuous() && c.isContinuous()) 2798 | { 2799 | qcols *= qrows; 2800 | qrows = 1; 2801 | } 2802 | for( int i = 0; i < qrows; i++ ) 2803 | { 2804 | for( int j = 0; j < qcols; j++ ) 2805 | { 2806 | *(qr.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i+1)+j); 2807 | } 2808 | } 2809 | 2810 | for( int j = 0; j < py.cols; j++ ) 2811 | { 2812 | *(py.ptr(0)+j) = *(qr.ptr(0)+j); 2813 | *(py.ptr(py.rows-1)+j) = *(qr.ptr(qr.rows-1)+j); 2814 | } 2815 | 2816 | qcols = qr.cols, qrows = qr.rows; 2817 | if (qr.isContinuous() && py.isContinuous()) 2818 | { 2819 | qcols *= qrows-1; 2820 | qrows = 1; 2821 | } 2822 | for( int i = 0; i < qrows; i++ ) 2823 | { 2824 | for( int j = 0; j < qcols; j++ ) 2825 | { 2826 | *(py.ptr(i+1)+j) = *(qr.ptr(i)+j) + *(qr.ptr(i+1)+j); 2827 | } 2828 | } 2829 | 2830 | // a = 1 + t.*p; (p is -1*p) 2831 | // b = -t.*q; 2832 | ay = 1.0 + stepsize*py; // p is -1*p 2833 | by = -stepsize*qr; 2834 | 2835 | // Call to Thomas algorithm now 2836 | Thomas(ay,by,Ldprev,Lty); 2837 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS_Rows. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl; 2838 | 2839 | } 2840 | 2841 | //************************************************************************************* 2842 | //************************************************************************************* 2843 | 2844 | /** 2845 | * @brief This method performs performs 1D-AOS for the image columns 2846 | * @param Ldprev Image at a previous evolution step 2847 | * @param c Conductivity image 2848 | * @param stepsize Stepsize for the nonlinear diffusion evolution 2849 | */ 2850 | void KAZE::AOS_Columns(const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize) 2851 | { 2852 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS_Columns at " << t1 << endl; 2853 | // Operate on columns 2854 | int qcols = qc.cols, qrows = qc.rows; 2855 | if (qc.isContinuous() && c.isContinuous()) 2856 | { 2857 | qcols *= qrows; 2858 | qrows = 1; 2859 | } 2860 | for( int i = 0; i < qrows; i++ ) 2861 | { 2862 | for( int j = 0; j < qcols; j++ ) 2863 | { 2864 | *(qc.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i)+j+1); 2865 | } 2866 | } 2867 | 2868 | for( int i = 0; i < px.rows; i++ ) 2869 | { 2870 | *(px.ptr(i)) = *(qc.ptr(i)); 2871 | *(px.ptr(i)+px.cols-1) = *(qc.ptr(i)+qc.cols-1); 2872 | } 2873 | 2874 | for( int j = 1; j < px.cols-1; j++ ) 2875 | { 2876 | for( int i = 0; i < px.rows; i++ ) 2877 | { 2878 | *(px.ptr(i)+j) = *(qc.ptr(i)+j-1) + *(qc.ptr(i)+j); 2879 | } 2880 | } 2881 | 2882 | // a = 1 + t.*p'; 2883 | ax = 1.0 + stepsize*px.t(); 2884 | 2885 | // b = -t.*q'; 2886 | bx = -stepsize*qc.t(); 2887 | 2888 | // Call Thomas algorithm again 2889 | // But take care since we need to transpose the solution!! 2890 | Thomas(ax,bx,Ldprev.t(),Ltx); 2891 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS_Columns. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl; 2892 | 2893 | } 2894 | 2895 | //************************************************************************************* 2896 | //************************************************************************************* 2897 | 2898 | /** 2899 | * @brief This method does the Thomas algorithm for solving a tridiagonal linear system 2900 | * @note The matrix A must be strictly diagonally dominant for a stable solution 2901 | */ 2902 | void KAZE::Thomas(cv::Mat a, cv::Mat b, cv::Mat Ld, cv::Mat x) 2903 | { 2904 | // Auxiliary variables 2905 | int n = a.rows; 2906 | cv::Mat m = cv::Mat::zeros(a.rows,a.cols,CV_32F); 2907 | cv::Mat l = cv::Mat::zeros(b.rows,b.cols,CV_32F); 2908 | cv::Mat y = cv::Mat::zeros(Ld.rows,Ld.cols,CV_32F); 2909 | 2910 | /** A*x = d; */ 2911 | /** / a1 b1 0 0 0 ... 0 \ / x1 \ = / d1 \ */ 2912 | /** | c1 a2 b2 0 0 ... 0 | | x2 | = | d2 | */ 2913 | /** | 0 c2 a3 b3 0 ... 0 | | x3 | = | d3 | */ 2914 | /** | : : : : 0 ... 0 | | : | = | : | */ 2915 | /** | : : : : 0 cn-1 an | | xn | = | dn | */ 2916 | 2917 | /** 1. LU decomposition 2918 | / L = / 1 \ U = / m1 r1 \ 2919 | / | l1 1 | | m2 r2 | 2920 | / | l2 1 | | m3 r3 | 2921 | / | : : : | | : : : | 2922 | / \ ln-1 1 / \ mn / */ 2923 | 2924 | for( int j = 0; j < m.cols; j++ ) 2925 | { 2926 | *(m.ptr(0)+j) = *(a.ptr(0)+j); 2927 | } 2928 | 2929 | for( int j = 0; j < y.cols; j++ ) 2930 | { 2931 | *(y.ptr(0)+j) = *(Ld.ptr(0)+j); 2932 | } 2933 | 2934 | // 2. Forward substitution L*y = d for y 2935 | for( int k = 1; k < n; k++ ) 2936 | { 2937 | for( int j=0; j < l.cols; j++ ) 2938 | { 2939 | *(l.ptr(k-1)+j) = *(b.ptr(k-1)+j) / *(m.ptr(k-1)+j); 2940 | } 2941 | 2942 | for( int j=0; j < m.cols; j++ ) 2943 | { 2944 | *(m.ptr(k)+j) = *(a.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(b.ptr(k-1)+j)); 2945 | } 2946 | 2947 | for( int j=0; j < y.cols; j++ ) 2948 | { 2949 | *(y.ptr(k)+j) = *(Ld.ptr(k)+j) - *(l.ptr(k-1)+j)*(*(y.ptr(k-1)+j)); 2950 | } 2951 | } 2952 | 2953 | // 3. Backward substitution U*x = y 2954 | for( int j=0; j < y.cols; j++ ) 2955 | { 2956 | *(x.ptr(n-1)+j) = (*(y.ptr(n-1)+j))/(*(m.ptr(n-1)+j)); 2957 | } 2958 | 2959 | for( int i = n-2; i >= 0; i-- ) 2960 | { 2961 | for( int j = 0; j < x.cols; j++ ) 2962 | { 2963 | *(x.ptr(i)+j) = (*(y.ptr(i)+j) - (*(b.ptr(i)+j))*(*(x.ptr(i+1)+j)))/(*(m.ptr(i)+j)); 2964 | } 2965 | } 2966 | } 2967 | 2968 | //************************************************************************************* 2969 | //************************************************************************************* 2970 | 2971 | /** 2972 | * @brief This method saves the nonlinear scale space into jpg images 2973 | */ 2974 | /* 2975 | void KAZE::Save_Nonlinear_Scale_Space(void) 2976 | { 2977 | cv::Mat img_aux; 2978 | char cad[NMAX_CHAR]; 2979 | 2980 | for( unsigned int i = 0; i < evolution.size(); i++ ) 2981 | { 2982 | Convert_Scale(evolution[i].Lt); 2983 | evolution[i].Lt.convertTo(img_aux,CV_8U,255.0,0); 2984 | sprintf(cad,"../../output/images\nl_evolution_%02d.jpg",i); 2985 | cv::imwrite(cad,img_aux); 2986 | } 2987 | } 2988 | */ 2989 | //************************************************************************************* 2990 | //************************************************************************************* 2991 | 2992 | /** 2993 | * @brief This method saves the feature detector responses of the nonlinear scale space 2994 | * into jpg images 2995 | */ 2996 | /* 2997 | void KAZE::Save_Detector_Responses(void) 2998 | { 2999 | cv::Mat img_aux; 3000 | char cad[NMAX_CHAR]; 3001 | 3002 | for( unsigned int i = 0; i < evolution.size(); i++ ) 3003 | { 3004 | Convert_Scale(evolution[i].Ldet); 3005 | evolution[i].Ldet.convertTo(img_aux,CV_8U,255.0,0); 3006 | sprintf(cad,"../../output/images\nl_detector_%02d.jpg",i); 3007 | imwrite(cad,img_aux); 3008 | } 3009 | }*/ 3010 | //************************************************************************************* 3011 | //************************************************************************************* 3012 | 3013 | /** 3014 | * @brief This method saves the flow diffusivity responsesof the nonlinear scale space 3015 | * into jpg images 3016 | */ 3017 | /* 3018 | void KAZE::Save_Flow_Responses(void) 3019 | { 3020 | cv::Mat img_aux; 3021 | char cad[NMAX_CHAR]; 3022 | 3023 | for( unsigned int i = 0; i < evolution.size(); i++ ) 3024 | { 3025 | Convert_Scale(evolution[i].Lflow); 3026 | evolution[i].Lflow.convertTo(img_aux,CV_8U,255.0,0); 3027 | sprintf(cad,"../../output/images/flow/flow_%02d.jpg",i); 3028 | imwrite(cad,img_aux); 3029 | } 3030 | } 3031 | */ 3032 | //************************************************************************************* 3033 | //************************************************************************************* 3034 | 3035 | /** 3036 | * @brief This function computes the angle from the vector given by (X Y). From 0 to 2*Pi 3037 | */ 3038 | inline float Get_Angle(float X, float Y) 3039 | { 3040 | 3041 | if( X >= 0 && Y >= 0 ) 3042 | { 3043 | return atan(Y/X); 3044 | } 3045 | 3046 | if( X < 0 && Y >= 0 ) 3047 | { 3048 | return PI - atan(-Y/X); 3049 | } 3050 | 3051 | if( X < 0 && Y < 0 ) 3052 | { 3053 | return PI + atan(Y/X); 3054 | } 3055 | 3056 | if( X >= 0 && Y < 0 ) 3057 | { 3058 | return M2_PI - atan(-Y/X); 3059 | } 3060 | 3061 | return 0; 3062 | } 3063 | 3064 | //************************************************************************************* 3065 | //************************************************************************************* 3066 | 3067 | /** 3068 | * @brief This function performs descriptor clipping for a given keypoint 3069 | * @param keypoint Input keypoint 3070 | * @param iter Number of iterations 3071 | * @param ratio Clipping ratio 3072 | */ 3073 | inline void Clipping_Descriptor(Ipoint &keypoint, int niter, float ratio) 3074 | { 3075 | int dsize = keypoint.descriptor_size; 3076 | float cratio = ratio / std::sqrt(dsize); 3077 | float len = 0.0; 3078 | 3079 | for( int i = 0; i < niter; i++ ) 3080 | { 3081 | len = 0.0; 3082 | for( int j = 0; j < dsize; j++ ) 3083 | { 3084 | if( keypoint.descriptor[j] > cratio ) 3085 | { 3086 | keypoint.descriptor[j] = cratio; 3087 | } 3088 | else if( keypoint.descriptor[j] < -cratio ) 3089 | { 3090 | keypoint.descriptor[j] = -cratio; 3091 | } 3092 | len += keypoint.descriptor[j]*keypoint.descriptor[j]; 3093 | } 3094 | 3095 | // Normalize again 3096 | len = sqrt(len); 3097 | 3098 | for( int j = 0; j < dsize; j++ ) 3099 | { 3100 | keypoint.descriptor[j] = keypoint.descriptor[j] / len; 3101 | } 3102 | } 3103 | } 3104 | 3105 | //************************************************************************************** 3106 | //************************************************************************************** 3107 | 3108 | /** 3109 | * @brief This function computes the value of a 2D Gaussian function 3110 | * @param x X Position 3111 | * @param y Y Position 3112 | * @param sig Standard Deviation 3113 | */ 3114 | inline float gaussian(float x, float y, float sig) 3115 | { 3116 | return exp(-(x*x+y*y)/(2.0f*sig*sig)); 3117 | } 3118 | 3119 | //************************************************************************************** 3120 | //************************************************************************************** 3121 | 3122 | /** 3123 | * @brief This function checks descriptor limits 3124 | * @param x X Position 3125 | * @param y Y Position 3126 | * @param width Image width 3127 | * @param height Image height 3128 | */ 3129 | inline void Check_Descriptor_Limits(int &x, int &y, int width, int height ) 3130 | { 3131 | if( x < 0 ) 3132 | { 3133 | x = 0; 3134 | } 3135 | 3136 | if( y < 0 ) 3137 | { 3138 | y = 0; 3139 | } 3140 | 3141 | if( x > width-1 ) 3142 | { 3143 | x = width-1; 3144 | } 3145 | 3146 | if( y > height-1 ) 3147 | { 3148 | y = height-1; 3149 | } 3150 | } 3151 | --------------------------------------------------------------------------------