├── .DS_Store
├── .gitignore
├── Descritores.pro
├── Descritores.pro.user
├── Makefile
├── README.md
├── imageprocessor.cpp
├── imageprocessor.h
├── kaze.cpp
├── lembretes.txt
├── main.cpp
├── mainwindow.cpp
├── mainwindow.h
├── mainwindow.ui
├── moc_imageprocessor.cpp
├── moc_mainwindow.cpp
├── temp.txt
├── ui_mainwindow.h
├── util.cpp
└── util.h
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/guidefreitas/mser_sift_image_search/bb7c9343a06f44f5ccb2e56e6edb6bad36088037/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Descritores.pro.user
--------------------------------------------------------------------------------
/Descritores.pro:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------
2 | #
3 | # Project created by QtCreator 2012-06-07T12:42:26
4 | #
5 | #-------------------------------------------------
6 |
7 |
8 | #-L$$PWD/../../../../../../../../usr/local/lib/ -lmorph4cpp16
9 |
10 | QT += core gui
11 |
12 | TARGET = Descritores
13 | TEMPLATE = app
14 |
15 |
16 | SOURCES += main.cpp\
17 | mainwindow.cpp \
18 | util.cpp \
19 | imageprocessor.cpp
20 |
21 | HEADERS += mainwindow.h \
22 | util.h \
23 | imageprocessor.h
24 |
25 | FORMS += mainwindow.ui
26 |
27 |
28 | LIBS += -L/usr/local/lib/ -lopencv_core\
29 | -L/usr/local/lib/ -lopencv_contrib\
30 | -L/usr/local/lib/ -lopencv_imgproc\
31 | -L/usr/local/lib/ -lopencv_ml\
32 | -L/usr/local/lib/ -lopencv_objdetect\
33 | -L/usr/local/lib/ -lopencv_video\
34 | -L/usr/local/lib/ -lopencv_photo\
35 | -L/usr/local/lib/ -lopencv_ts\
36 | -L/usr/local/lib/ -lopencv_highgui\
37 | -L/usr/local/lib/ -lopencv_features2d\
38 | -L/usr/local/lib/ -lopencv_nonfree\
39 | -L/usr/local/lib/ -lopencv_legacy\
40 | -L/usr/local/lib/ -lopencv_flann\
41 | -L/usr/local/lib/ -lopencv_ts\
42 |
43 | INCLUDEPATH += /usr/local/include
44 | DEPENDPATH += /usr/local/include
45 |
46 | #QMAKE_CC = gcc-4.7
47 | #QMAKE_CXX = g++-4.7
48 |
49 | QMAKE_CXXFLAGS += -fopenmp
50 | QMAKE_LFLAGS += -fopenmp
51 |
52 | QMAKE_CFLAGS_DEBUG += -fopenmp
53 | QMAKE_CFLAGS_RELEASE += -fopenmp
54 |
55 |
56 |
57 | #INCLUDEPATH += /usr/local/include/opencv2
58 | #DEPENDPATH += /usr/local/include/opencv2
59 |
60 | OTHER_FILES += \
61 | lembretes.txt \
62 | temp.txt
63 |
64 |
65 |
--------------------------------------------------------------------------------
/Descritores.pro.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | ProjectExplorer.Project.ActiveTarget
7 | 0
8 |
9 |
10 | ProjectExplorer.Project.EditorSettings
11 |
12 | true
13 | false
14 | true
15 |
16 | Cpp
17 |
18 | CppGlobal
19 |
20 |
21 |
22 | QmlJS
23 |
24 | QmlJSGlobal
25 |
26 |
27 | 2
28 | UTF-8
29 | false
30 | 4
31 | false
32 | 80
33 | true
34 | true
35 | 1
36 | true
37 | false
38 | 0
39 | true
40 | 0
41 | 8
42 | true
43 | 1
44 | true
45 | true
46 | true
47 | false
48 |
49 |
50 |
51 | ProjectExplorer.Project.PluginSettings
52 |
53 |
54 |
55 | ProjectExplorer.Project.Target.0
56 |
57 | Desktop Qt 5.3 clang 64bit
58 | Desktop Qt 5.3 clang 64bit
59 | qt.53.clang_64_kit
60 | 0
61 | 0
62 | 0
63 |
64 | /Volumes/Dados/GUILHERME/MESTRADO/UDESC/REC/Exercicios/Descritores/build-Descritores-Desktop_Qt_5_3_clang_64bit-Debug
65 |
66 |
67 | true
68 | qmake
69 |
70 | QtProjectManager.QMakeBuildStep
71 | false
72 | true
73 |
74 | false
75 |
76 |
77 | true
78 | Make
79 |
80 | Qt4ProjectManager.MakeStep
81 |
82 | -w
83 | -r
84 |
85 | false
86 |
87 |
88 |
89 | 2
90 | Build
91 |
92 | ProjectExplorer.BuildSteps.Build
93 |
94 |
95 |
96 | true
97 | Make
98 |
99 | Qt4ProjectManager.MakeStep
100 |
101 | -w
102 | -r
103 |
104 | true
105 | clean
106 |
107 |
108 | 1
109 | Clean
110 |
111 | ProjectExplorer.BuildSteps.Clean
112 |
113 | 2
114 | false
115 |
116 | Debug
117 |
118 | Qt4ProjectManager.Qt4BuildConfiguration
119 | 2
120 | true
121 |
122 |
123 | /Volumes/Dados/GUILHERME/MESTRADO/UDESC/REC/Exercicios/Descritores/build-Descritores-Desktop_Qt_5_3_clang_64bit-Release
124 |
125 |
126 | true
127 | qmake
128 |
129 | QtProjectManager.QMakeBuildStep
130 | false
131 | true
132 |
133 | false
134 |
135 |
136 | true
137 | Make
138 |
139 | Qt4ProjectManager.MakeStep
140 |
141 | -w
142 | -r
143 |
144 | false
145 |
146 |
147 |
148 | 2
149 | Build
150 |
151 | ProjectExplorer.BuildSteps.Build
152 |
153 |
154 |
155 | true
156 | Make
157 |
158 | Qt4ProjectManager.MakeStep
159 |
160 | -w
161 | -r
162 |
163 | true
164 | clean
165 |
166 |
167 | 1
168 | Clean
169 |
170 | ProjectExplorer.BuildSteps.Clean
171 |
172 | 2
173 | false
174 |
175 | Release
176 |
177 | Qt4ProjectManager.Qt4BuildConfiguration
178 | 0
179 | true
180 |
181 | 2
182 |
183 |
184 | 0
185 | Deploy
186 |
187 | ProjectExplorer.BuildSteps.Deploy
188 |
189 | 1
190 | Deploy locally
191 |
192 | ProjectExplorer.DefaultDeployConfiguration
193 |
194 | 1
195 |
196 |
197 |
198 | false
199 | false
200 | false
201 | false
202 | true
203 | 0.01
204 | 10
205 | true
206 | 1
207 | 25
208 |
209 | 1
210 | true
211 | false
212 | true
213 | valgrind
214 |
215 | 0
216 | 1
217 | 2
218 | 3
219 | 4
220 | 5
221 | 6
222 | 7
223 | 8
224 | 9
225 | 10
226 | 11
227 | 12
228 | 13
229 | 14
230 |
231 | 2
232 |
233 | Descritores
234 |
235 | Qt4ProjectManager.Qt4RunConfiguration:/Volumes/Dados/GUILHERME/MESTRADO/UDESC/REC/Exercicios/Descritores/Descritores/Descritores.pro
236 |
237 | Descritores.pro
238 | false
239 | false
240 |
241 | 3768
242 | false
243 | true
244 | false
245 | false
246 | true
247 |
248 | 1
249 |
250 |
251 |
252 | ProjectExplorer.Project.TargetCount
253 | 1
254 |
255 |
256 | ProjectExplorer.Project.Updater.EnvironmentId
257 | {ac81e359-6103-42e4-99fe-c770c596a0b1}
258 |
259 |
260 | ProjectExplorer.Project.Updater.FileVersion
261 | 15
262 |
263 |
264 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | #############################################################################
2 | # Makefile for building: Descritores.app/Contents/MacOS/Descritores
3 | # Generated by qmake (2.01a) (Qt 4.8.4) on: Wed May 15 12:51:41 2013
4 | # Project: Descritores.pro
5 | # Template: app
6 | # Command: /usr/bin/qmake -spec /usr/local/Qt4.8/mkspecs/macx-g++ CONFIG+=x86_64 CONFIG+=declarative_debug -o Makefile Descritores.pro
7 | #############################################################################
8 |
9 | ####### Compiler, tools and options
10 |
11 | CC = gcc
12 | CXX = g++
13 | DEFINES = -DQT_GUI_LIB -DQT_CORE_LIB -DQT_SHARED
14 | CFLAGS = -pipe -g -fopenmp -gdwarf-2 -arch x86_64 -Xarch_x86_64 -mmacosx-version-min=10.5 -Wall -W $(DEFINES)
15 | CXXFLAGS = -pipe -fopenmp -g -gdwarf-2 -arch x86_64 -Xarch_x86_64 -mmacosx-version-min=10.5 -Wall -W $(DEFINES)
16 | INCPATH = -I/usr/local/Qt4.8/mkspecs/macx-g++ -I. -I/Library/Frameworks/QtCore.framework/Versions/4/Headers -I/usr/include/QtCore -I/Library/Frameworks/QtGui.framework/Versions/4/Headers -I/usr/include/QtGui -I/usr/include -I/usr/local/include -I. -I. -F/Library/Frameworks
17 | LINK = g++
18 | LFLAGS = -headerpad_max_install_names -fopenmp -arch x86_64 -Xarch_x86_64 -mmacosx-version-min=10.5
19 | LIBS = $(SUBLIBS) -F/Library/Frameworks -L/Library/Frameworks -L/usr/local/lib/ -lopencv_core -lopencv_contrib -lopencv_imgproc -lopencv_ml -lopencv_objdetect -lopencv_video -lopencv_photo -lopencv_highgui -lopencv_features2d -lopencv_nonfree -lopencv_legacy -lopencv_flann -lopencv_ts -framework QtGui -framework QtCore
20 | AR = ar cq
21 | RANLIB = ranlib -s
22 | QMAKE = /usr/bin/qmake
23 | TAR = tar -cf
24 | COMPRESS = gzip -9f
25 | COPY = cp -f
26 | SED = sed
27 | COPY_FILE = cp -f
28 | COPY_DIR = cp -f -R
29 | STRIP =
30 | INSTALL_FILE = $(COPY_FILE)
31 | INSTALL_DIR = $(COPY_DIR)
32 | INSTALL_PROGRAM = $(COPY_FILE)
33 | DEL_FILE = rm -f
34 | SYMLINK = ln -f -s
35 | DEL_DIR = rmdir
36 | MOVE = mv -f
37 | CHK_DIR_EXISTS= test -d
38 | MKDIR = mkdir -p
39 | export MACOSX_DEPLOYMENT_TARGET = 10.4
40 |
41 | ####### Output directory
42 |
43 | OBJECTS_DIR = ./
44 |
45 | ####### Files
46 |
47 | SOURCES = main.cpp \
48 | mainwindow.cpp \
49 | util.cpp \
50 | imageprocessor.cpp moc_mainwindow.cpp \
51 | moc_imageprocessor.cpp
52 | OBJECTS = main.o \
53 | mainwindow.o \
54 | util.o \
55 | imageprocessor.o \
56 | moc_mainwindow.o \
57 | moc_imageprocessor.o
58 | DIST = /usr/local/Qt4.8/mkspecs/common/unix.conf \
59 | /usr/local/Qt4.8/mkspecs/common/mac.conf \
60 | /usr/local/Qt4.8/mkspecs/common/gcc-base.conf \
61 | /usr/local/Qt4.8/mkspecs/common/gcc-base-macx.conf \
62 | /usr/local/Qt4.8/mkspecs/common/g++-base.conf \
63 | /usr/local/Qt4.8/mkspecs/common/g++-macx.conf \
64 | /usr/local/Qt4.8/mkspecs/qconfig.pri \
65 | /usr/local/Qt4.8/mkspecs/modules/qt_webkit_version.pri \
66 | /usr/local/Qt4.8/mkspecs/features/qt_functions.prf \
67 | /usr/local/Qt4.8/mkspecs/features/qt_config.prf \
68 | /usr/local/Qt4.8/mkspecs/features/exclusive_builds.prf \
69 | /usr/local/Qt4.8/mkspecs/features/default_pre.prf \
70 | /usr/local/Qt4.8/mkspecs/features/mac/default_pre.prf \
71 | /usr/local/Qt4.8/mkspecs/features/mac/dwarf2.prf \
72 | /usr/local/Qt4.8/mkspecs/features/debug.prf \
73 | /usr/local/Qt4.8/mkspecs/features/default_post.prf \
74 | /usr/local/Qt4.8/mkspecs/features/mac/default_post.prf \
75 | /usr/local/Qt4.8/mkspecs/features/mac/x86_64.prf \
76 | /usr/local/Qt4.8/mkspecs/features/mac/objective_c.prf \
77 | /usr/local/Qt4.8/mkspecs/features/declarative_debug.prf \
78 | /usr/local/Qt4.8/mkspecs/features/warn_on.prf \
79 | /usr/local/Qt4.8/mkspecs/features/qt.prf \
80 | /usr/local/Qt4.8/mkspecs/features/unix/thread.prf \
81 | /usr/local/Qt4.8/mkspecs/features/moc.prf \
82 | /usr/local/Qt4.8/mkspecs/features/mac/rez.prf \
83 | /usr/local/Qt4.8/mkspecs/features/mac/sdk.prf \
84 | /usr/local/Qt4.8/mkspecs/features/resources.prf \
85 | /usr/local/Qt4.8/mkspecs/features/uic.prf \
86 | /usr/local/Qt4.8/mkspecs/features/yacc.prf \
87 | /usr/local/Qt4.8/mkspecs/features/lex.prf \
88 | /usr/local/Qt4.8/mkspecs/features/include_source_dir.prf \
89 | Descritores.pro
90 | QMAKE_TARGET = Descritores
91 | DESTDIR =
92 | TARGET = Descritores.app/Contents/MacOS/Descritores
93 |
94 | ####### Custom Compiler Variables
95 | QMAKE_COMP_QMAKE_OBJECTIVE_CFLAGS = -pipe \
96 | -g \
97 | -arch \
98 | x86_64 \
99 | -Xarch_x86_64 \
100 | -mmacosx-version-min=10.5 \
101 | -Wall \
102 | -W
103 |
104 |
105 | first: all
106 | ####### Implicit rules
107 |
108 | .SUFFIXES: .o .c .cpp .cc .cxx .C
109 |
110 | .cpp.o:
111 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<"
112 |
113 | .cc.o:
114 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<"
115 |
116 | .cxx.o:
117 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<"
118 |
119 | .C.o:
120 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o "$@" "$<"
121 |
122 | .c.o:
123 | $(CC) -c $(CFLAGS) $(INCPATH) -o "$@" "$<"
124 |
125 | ####### Build rules
126 |
127 | all: Makefile Descritores.app/Contents/PkgInfo Descritores.app/Contents/Resources/empty.lproj Descritores.app/Contents/Info.plist $(TARGET)
128 |
129 | $(TARGET): ui_mainwindow.h $(OBJECTS)
130 | @$(CHK_DIR_EXISTS) Descritores.app/Contents/MacOS/ || $(MKDIR) Descritores.app/Contents/MacOS/
131 | $(LINK) $(LFLAGS) -o $(TARGET) $(OBJECTS) $(OBJCOMP) $(LIBS)
132 |
133 | Makefile: Descritores.pro /usr/local/Qt4.8/mkspecs/macx-g++/qmake.conf /usr/local/Qt4.8/mkspecs/common/unix.conf \
134 | /usr/local/Qt4.8/mkspecs/common/mac.conf \
135 | /usr/local/Qt4.8/mkspecs/common/gcc-base.conf \
136 | /usr/local/Qt4.8/mkspecs/common/gcc-base-macx.conf \
137 | /usr/local/Qt4.8/mkspecs/common/g++-base.conf \
138 | /usr/local/Qt4.8/mkspecs/common/g++-macx.conf \
139 | /usr/local/Qt4.8/mkspecs/qconfig.pri \
140 | /usr/local/Qt4.8/mkspecs/modules/qt_webkit_version.pri \
141 | /usr/local/Qt4.8/mkspecs/features/qt_functions.prf \
142 | /usr/local/Qt4.8/mkspecs/features/qt_config.prf \
143 | /usr/local/Qt4.8/mkspecs/features/exclusive_builds.prf \
144 | /usr/local/Qt4.8/mkspecs/features/default_pre.prf \
145 | /usr/local/Qt4.8/mkspecs/features/mac/default_pre.prf \
146 | /usr/local/Qt4.8/mkspecs/features/mac/dwarf2.prf \
147 | /usr/local/Qt4.8/mkspecs/features/debug.prf \
148 | /usr/local/Qt4.8/mkspecs/features/default_post.prf \
149 | /usr/local/Qt4.8/mkspecs/features/mac/default_post.prf \
150 | /usr/local/Qt4.8/mkspecs/features/mac/x86_64.prf \
151 | /usr/local/Qt4.8/mkspecs/features/mac/objective_c.prf \
152 | /usr/local/Qt4.8/mkspecs/features/declarative_debug.prf \
153 | /usr/local/Qt4.8/mkspecs/features/warn_on.prf \
154 | /usr/local/Qt4.8/mkspecs/features/qt.prf \
155 | /usr/local/Qt4.8/mkspecs/features/unix/thread.prf \
156 | /usr/local/Qt4.8/mkspecs/features/moc.prf \
157 | /usr/local/Qt4.8/mkspecs/features/mac/rez.prf \
158 | /usr/local/Qt4.8/mkspecs/features/mac/sdk.prf \
159 | /usr/local/Qt4.8/mkspecs/features/resources.prf \
160 | /usr/local/Qt4.8/mkspecs/features/uic.prf \
161 | /usr/local/Qt4.8/mkspecs/features/yacc.prf \
162 | /usr/local/Qt4.8/mkspecs/features/lex.prf \
163 | /usr/local/Qt4.8/mkspecs/features/include_source_dir.prf \
164 | /Library/Frameworks/QtGui.framework/QtGui.prl \
165 | /Library/Frameworks/QtCore.framework/QtCore.prl
166 | $(QMAKE) -spec /usr/local/Qt4.8/mkspecs/macx-g++ CONFIG+=x86_64 CONFIG+=declarative_debug -o Makefile Descritores.pro
167 | /usr/local/Qt4.8/mkspecs/common/unix.conf:
168 | /usr/local/Qt4.8/mkspecs/common/mac.conf:
169 | /usr/local/Qt4.8/mkspecs/common/gcc-base.conf:
170 | /usr/local/Qt4.8/mkspecs/common/gcc-base-macx.conf:
171 | /usr/local/Qt4.8/mkspecs/common/g++-base.conf:
172 | /usr/local/Qt4.8/mkspecs/common/g++-macx.conf:
173 | /usr/local/Qt4.8/mkspecs/qconfig.pri:
174 | /usr/local/Qt4.8/mkspecs/modules/qt_webkit_version.pri:
175 | /usr/local/Qt4.8/mkspecs/features/qt_functions.prf:
176 | /usr/local/Qt4.8/mkspecs/features/qt_config.prf:
177 | /usr/local/Qt4.8/mkspecs/features/exclusive_builds.prf:
178 | /usr/local/Qt4.8/mkspecs/features/default_pre.prf:
179 | /usr/local/Qt4.8/mkspecs/features/mac/default_pre.prf:
180 | /usr/local/Qt4.8/mkspecs/features/mac/dwarf2.prf:
181 | /usr/local/Qt4.8/mkspecs/features/debug.prf:
182 | /usr/local/Qt4.8/mkspecs/features/default_post.prf:
183 | /usr/local/Qt4.8/mkspecs/features/mac/default_post.prf:
184 | /usr/local/Qt4.8/mkspecs/features/mac/x86_64.prf:
185 | /usr/local/Qt4.8/mkspecs/features/mac/objective_c.prf:
186 | /usr/local/Qt4.8/mkspecs/features/declarative_debug.prf:
187 | /usr/local/Qt4.8/mkspecs/features/warn_on.prf:
188 | /usr/local/Qt4.8/mkspecs/features/qt.prf:
189 | /usr/local/Qt4.8/mkspecs/features/unix/thread.prf:
190 | /usr/local/Qt4.8/mkspecs/features/moc.prf:
191 | /usr/local/Qt4.8/mkspecs/features/mac/rez.prf:
192 | /usr/local/Qt4.8/mkspecs/features/mac/sdk.prf:
193 | /usr/local/Qt4.8/mkspecs/features/resources.prf:
194 | /usr/local/Qt4.8/mkspecs/features/uic.prf:
195 | /usr/local/Qt4.8/mkspecs/features/yacc.prf:
196 | /usr/local/Qt4.8/mkspecs/features/lex.prf:
197 | /usr/local/Qt4.8/mkspecs/features/include_source_dir.prf:
198 | /Library/Frameworks/QtGui.framework/QtGui.prl:
199 | /Library/Frameworks/QtCore.framework/QtCore.prl:
200 | qmake: FORCE
201 | @$(QMAKE) -spec /usr/local/Qt4.8/mkspecs/macx-g++ CONFIG+=x86_64 CONFIG+=declarative_debug -o Makefile Descritores.pro
202 |
203 | Descritores.app/Contents/PkgInfo:
204 | @$(CHK_DIR_EXISTS) Descritores.app/Contents || $(MKDIR) Descritores.app/Contents
205 | @$(DEL_FILE) Descritores.app/Contents/PkgInfo
206 | @echo "APPL????" >Descritores.app/Contents/PkgInfo
207 | Descritores.app/Contents/Resources/empty.lproj:
208 | @$(CHK_DIR_EXISTS) Descritores.app/Contents/Resources || $(MKDIR) Descritores.app/Contents/Resources
209 | @touch Descritores.app/Contents/Resources/empty.lproj
210 |
211 | Descritores.app/Contents/Info.plist:
212 | @$(CHK_DIR_EXISTS) Descritores.app/Contents || $(MKDIR) Descritores.app/Contents
213 | @$(DEL_FILE) Descritores.app/Contents/Info.plist
214 | @sed -e "s,@SHORT_VERSION@,1.0,g" -e "s,@TYPEINFO@,????,g" -e "s,@ICON@,,g" -e "s,@EXECUTABLE@,Descritores,g" -e "s,@TYPEINFO@,????,g" /usr/local/Qt4.8/mkspecs/macx-g++/Info.plist.app >Descritores.app/Contents/Info.plist
215 | dist:
216 | @$(CHK_DIR_EXISTS) .tmp/Descritores1.0.0 || $(MKDIR) .tmp/Descritores1.0.0
217 | $(COPY_FILE) --parents $(SOURCES) $(DIST) .tmp/Descritores1.0.0/ && $(COPY_FILE) --parents mainwindow.h util.h imageprocessor.h .tmp/Descritores1.0.0/ && $(COPY_FILE) --parents main.cpp mainwindow.cpp util.cpp imageprocessor.cpp .tmp/Descritores1.0.0/ && $(COPY_FILE) --parents mainwindow.ui .tmp/Descritores1.0.0/ && (cd `dirname .tmp/Descritores1.0.0` && $(TAR) Descritores1.0.0.tar Descritores1.0.0 && $(COMPRESS) Descritores1.0.0.tar) && $(MOVE) `dirname .tmp/Descritores1.0.0`/Descritores1.0.0.tar.gz . && $(DEL_FILE) -r .tmp/Descritores1.0.0
218 |
219 |
220 | clean:compiler_clean
221 | -$(DEL_FILE) $(OBJECTS)
222 | -$(DEL_FILE) *~ core *.core
223 |
224 |
225 | ####### Sub-libraries
226 |
227 | distclean: clean
228 | -$(DEL_FILE) -r Descritores.app
229 | -$(DEL_FILE) Makefile
230 |
231 |
232 | check: first
233 |
234 | mocclean: compiler_moc_header_clean compiler_moc_source_clean
235 |
236 | mocables: compiler_moc_header_make_all compiler_moc_source_make_all
237 |
238 | compiler_objective_c_make_all:
239 | compiler_objective_c_clean:
240 | compiler_moc_header_make_all: moc_mainwindow.cpp moc_imageprocessor.cpp
241 | compiler_moc_header_clean:
242 | -$(DEL_FILE) moc_mainwindow.cpp moc_imageprocessor.cpp
243 | moc_mainwindow.cpp: imageprocessor.h \
244 | /usr/local/include/opencv2/opencv.hpp \
245 | /usr/local/include/opencv2/core/core_c.h \
246 | /usr/local/include/opencv2/core/types_c.h \
247 | /usr/local/include/opencv2/core/core.hpp \
248 | /usr/local/include/opencv2/core/version.hpp \
249 | /usr/local/include/opencv2/core/operations.hpp \
250 | /usr/local/include/opencv2/core/mat.hpp \
251 | /usr/local/include/opencv2/flann/miniflann.hpp \
252 | /usr/local/include/opencv2/flann/defines.h \
253 | /usr/local/include/opencv2/flann/config.h \
254 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
255 | /usr/local/include/opencv2/imgproc/types_c.h \
256 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
257 | /usr/local/include/opencv2/photo/photo.hpp \
258 | /usr/local/include/opencv2/photo/photo_c.h \
259 | /usr/local/include/opencv2/video/video.hpp \
260 | /usr/local/include/opencv2/video/tracking.hpp \
261 | /usr/local/include/opencv2/video/background_segm.hpp \
262 | /usr/local/include/opencv2/features2d/features2d.hpp \
263 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
264 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
265 | /usr/local/include/opencv2/ml/ml.hpp \
266 | /usr/local/include/opencv2/highgui/highgui_c.h \
267 | /usr/local/include/opencv2/highgui/highgui.hpp \
268 | /usr/local/include/opencv2/contrib/contrib.hpp \
269 | /usr/local/include/opencv2/contrib/retina.hpp \
270 | /usr/local/include/opencv2/contrib/openfabmap.hpp \
271 | util.h \
272 | mainwindow.h
273 | /Developer/Tools/Qt/moc $(DEFINES) $(INCPATH) -D__APPLE__ -D__GNUC__ mainwindow.h -o moc_mainwindow.cpp
274 |
275 | moc_imageprocessor.cpp: /usr/local/include/opencv2/opencv.hpp \
276 | /usr/local/include/opencv2/core/core_c.h \
277 | /usr/local/include/opencv2/core/types_c.h \
278 | /usr/local/include/opencv2/core/core.hpp \
279 | /usr/local/include/opencv2/core/version.hpp \
280 | /usr/local/include/opencv2/core/operations.hpp \
281 | /usr/local/include/opencv2/core/mat.hpp \
282 | /usr/local/include/opencv2/flann/miniflann.hpp \
283 | /usr/local/include/opencv2/flann/defines.h \
284 | /usr/local/include/opencv2/flann/config.h \
285 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
286 | /usr/local/include/opencv2/imgproc/types_c.h \
287 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
288 | /usr/local/include/opencv2/photo/photo.hpp \
289 | /usr/local/include/opencv2/photo/photo_c.h \
290 | /usr/local/include/opencv2/video/video.hpp \
291 | /usr/local/include/opencv2/video/tracking.hpp \
292 | /usr/local/include/opencv2/video/background_segm.hpp \
293 | /usr/local/include/opencv2/features2d/features2d.hpp \
294 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
295 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
296 | /usr/local/include/opencv2/ml/ml.hpp \
297 | /usr/local/include/opencv2/highgui/highgui_c.h \
298 | /usr/local/include/opencv2/highgui/highgui.hpp \
299 | /usr/local/include/opencv2/contrib/contrib.hpp \
300 | /usr/local/include/opencv2/contrib/retina.hpp \
301 | /usr/local/include/opencv2/contrib/openfabmap.hpp \
302 | util.h \
303 | imageprocessor.h
304 | /Developer/Tools/Qt/moc $(DEFINES) $(INCPATH) -D__APPLE__ -D__GNUC__ imageprocessor.h -o moc_imageprocessor.cpp
305 |
306 | compiler_rcc_make_all:
307 | compiler_rcc_clean:
308 | compiler_image_collection_make_all: qmake_image_collection.cpp
309 | compiler_image_collection_clean:
310 | -$(DEL_FILE) qmake_image_collection.cpp
311 | compiler_moc_source_make_all:
312 | compiler_moc_source_clean:
313 | compiler_rez_source_make_all:
314 | compiler_rez_source_clean:
315 | compiler_uic_make_all: ui_mainwindow.h
316 | compiler_uic_clean:
317 | -$(DEL_FILE) ui_mainwindow.h
318 | ui_mainwindow.h: mainwindow.ui
319 | /Developer/Tools/Qt/uic mainwindow.ui -o ui_mainwindow.h
320 |
321 | compiler_yacc_decl_make_all:
322 | compiler_yacc_decl_clean:
323 | compiler_yacc_impl_make_all:
324 | compiler_yacc_impl_clean:
325 | compiler_lex_make_all:
326 | compiler_lex_clean:
327 | compiler_clean: compiler_moc_header_clean compiler_uic_clean
328 |
329 | ####### Compile
330 |
331 | main.o: main.cpp mainwindow.h \
332 | imageprocessor.h \
333 | /usr/local/include/opencv2/opencv.hpp \
334 | /usr/local/include/opencv2/core/core_c.h \
335 | /usr/local/include/opencv2/core/types_c.h \
336 | /usr/local/include/opencv2/core/core.hpp \
337 | /usr/local/include/opencv2/core/version.hpp \
338 | /usr/local/include/opencv2/core/operations.hpp \
339 | /usr/local/include/opencv2/core/mat.hpp \
340 | /usr/local/include/opencv2/flann/miniflann.hpp \
341 | /usr/local/include/opencv2/flann/defines.h \
342 | /usr/local/include/opencv2/flann/config.h \
343 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
344 | /usr/local/include/opencv2/imgproc/types_c.h \
345 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
346 | /usr/local/include/opencv2/photo/photo.hpp \
347 | /usr/local/include/opencv2/photo/photo_c.h \
348 | /usr/local/include/opencv2/video/video.hpp \
349 | /usr/local/include/opencv2/video/tracking.hpp \
350 | /usr/local/include/opencv2/video/background_segm.hpp \
351 | /usr/local/include/opencv2/features2d/features2d.hpp \
352 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
353 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
354 | /usr/local/include/opencv2/ml/ml.hpp \
355 | /usr/local/include/opencv2/highgui/highgui_c.h \
356 | /usr/local/include/opencv2/highgui/highgui.hpp \
357 | /usr/local/include/opencv2/contrib/contrib.hpp \
358 | /usr/local/include/opencv2/contrib/retina.hpp \
359 | /usr/local/include/opencv2/contrib/openfabmap.hpp \
360 | util.h
361 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o main.o main.cpp
362 |
363 | mainwindow.o: mainwindow.cpp mainwindow.h \
364 | imageprocessor.h \
365 | /usr/local/include/opencv2/opencv.hpp \
366 | /usr/local/include/opencv2/core/core_c.h \
367 | /usr/local/include/opencv2/core/types_c.h \
368 | /usr/local/include/opencv2/core/core.hpp \
369 | /usr/local/include/opencv2/core/version.hpp \
370 | /usr/local/include/opencv2/core/operations.hpp \
371 | /usr/local/include/opencv2/core/mat.hpp \
372 | /usr/local/include/opencv2/flann/miniflann.hpp \
373 | /usr/local/include/opencv2/flann/defines.h \
374 | /usr/local/include/opencv2/flann/config.h \
375 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
376 | /usr/local/include/opencv2/imgproc/types_c.h \
377 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
378 | /usr/local/include/opencv2/photo/photo.hpp \
379 | /usr/local/include/opencv2/photo/photo_c.h \
380 | /usr/local/include/opencv2/video/video.hpp \
381 | /usr/local/include/opencv2/video/tracking.hpp \
382 | /usr/local/include/opencv2/video/background_segm.hpp \
383 | /usr/local/include/opencv2/features2d/features2d.hpp \
384 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
385 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
386 | /usr/local/include/opencv2/ml/ml.hpp \
387 | /usr/local/include/opencv2/highgui/highgui_c.h \
388 | /usr/local/include/opencv2/highgui/highgui.hpp \
389 | /usr/local/include/opencv2/contrib/contrib.hpp \
390 | /usr/local/include/opencv2/contrib/retina.hpp \
391 | /usr/local/include/opencv2/contrib/openfabmap.hpp \
392 | util.h \
393 | ui_mainwindow.h
394 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o mainwindow.o mainwindow.cpp
395 |
396 | util.o: util.cpp util.h \
397 | /usr/local/include/opencv2/opencv.hpp \
398 | /usr/local/include/opencv2/core/core_c.h \
399 | /usr/local/include/opencv2/core/types_c.h \
400 | /usr/local/include/opencv2/core/core.hpp \
401 | /usr/local/include/opencv2/core/version.hpp \
402 | /usr/local/include/opencv2/core/operations.hpp \
403 | /usr/local/include/opencv2/core/mat.hpp \
404 | /usr/local/include/opencv2/flann/miniflann.hpp \
405 | /usr/local/include/opencv2/flann/defines.h \
406 | /usr/local/include/opencv2/flann/config.h \
407 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
408 | /usr/local/include/opencv2/imgproc/types_c.h \
409 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
410 | /usr/local/include/opencv2/photo/photo.hpp \
411 | /usr/local/include/opencv2/photo/photo_c.h \
412 | /usr/local/include/opencv2/video/video.hpp \
413 | /usr/local/include/opencv2/video/tracking.hpp \
414 | /usr/local/include/opencv2/video/background_segm.hpp \
415 | /usr/local/include/opencv2/features2d/features2d.hpp \
416 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
417 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
418 | /usr/local/include/opencv2/ml/ml.hpp \
419 | /usr/local/include/opencv2/highgui/highgui_c.h \
420 | /usr/local/include/opencv2/highgui/highgui.hpp \
421 | /usr/local/include/opencv2/contrib/contrib.hpp \
422 | /usr/local/include/opencv2/contrib/retina.hpp \
423 | /usr/local/include/opencv2/contrib/openfabmap.hpp
424 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o util.o util.cpp
425 |
426 | imageprocessor.o: imageprocessor.cpp imageprocessor.h \
427 | /usr/local/include/opencv2/opencv.hpp \
428 | /usr/local/include/opencv2/core/core_c.h \
429 | /usr/local/include/opencv2/core/types_c.h \
430 | /usr/local/include/opencv2/core/core.hpp \
431 | /usr/local/include/opencv2/core/version.hpp \
432 | /usr/local/include/opencv2/core/operations.hpp \
433 | /usr/local/include/opencv2/core/mat.hpp \
434 | /usr/local/include/opencv2/flann/miniflann.hpp \
435 | /usr/local/include/opencv2/flann/defines.h \
436 | /usr/local/include/opencv2/flann/config.h \
437 | /usr/local/include/opencv2/imgproc/imgproc_c.h \
438 | /usr/local/include/opencv2/imgproc/types_c.h \
439 | /usr/local/include/opencv2/imgproc/imgproc.hpp \
440 | /usr/local/include/opencv2/photo/photo.hpp \
441 | /usr/local/include/opencv2/photo/photo_c.h \
442 | /usr/local/include/opencv2/video/video.hpp \
443 | /usr/local/include/opencv2/video/tracking.hpp \
444 | /usr/local/include/opencv2/video/background_segm.hpp \
445 | /usr/local/include/opencv2/features2d/features2d.hpp \
446 | /usr/local/include/opencv2/objdetect/objdetect.hpp \
447 | /usr/local/include/opencv2/calib3d/calib3d.hpp \
448 | /usr/local/include/opencv2/ml/ml.hpp \
449 | /usr/local/include/opencv2/highgui/highgui_c.h \
450 | /usr/local/include/opencv2/highgui/highgui.hpp \
451 | /usr/local/include/opencv2/contrib/contrib.hpp \
452 | /usr/local/include/opencv2/contrib/retina.hpp \
453 | /usr/local/include/opencv2/contrib/openfabmap.hpp \
454 | util.h \
455 | /usr/local/include/opencv2/nonfree/features2d.hpp \
456 | /usr/local/include/opencv2/legacy/legacy.hpp
457 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o imageprocessor.o imageprocessor.cpp
458 |
459 | moc_mainwindow.o: moc_mainwindow.cpp
460 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o moc_mainwindow.o moc_mainwindow.cpp
461 |
462 | moc_imageprocessor.o: moc_imageprocessor.cpp
463 | $(CXX) -c $(CXXFLAGS) $(INCPATH) -o moc_imageprocessor.o moc_imageprocessor.cpp
464 |
465 | ####### Install
466 |
467 | install: FORCE
468 |
469 | uninstall: FORCE
470 |
471 | FORCE:
472 |
473 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Project using MSER and SIFT descriptors to find similiar images.
2 |
3 | All images must have a binary mask image with _msk.jpg suffix delimiting the interest area.
4 |
5 | Compile using gcc and Qt 4.8
6 |
7 | Youtube video demo
8 |
9 | [](https://www.youtube.com/watch?v=es3IgpfY-Js&list=UUk1U4Z7qvz3xPjFCeif7TvA)
10 |
11 | Ps: Not sure if it works with the last Qt 5.x version.
--------------------------------------------------------------------------------
/imageprocessor.cpp:
--------------------------------------------------------------------------------
1 | #include "imageprocessor.h"
2 | #include
3 | #include "opencv2/opencv.hpp"
4 | #include "opencv2/imgproc/imgproc.hpp"
5 | #include "opencv2/features2d/features2d.hpp"
6 | #include "opencv2/nonfree/features2d.hpp"
7 | #include "opencv2/highgui/highgui.hpp"
8 | #include
9 | #include
10 |
11 | ImageProcessor::ImageProcessor(QObject *parent) :
12 | QObject(parent)
13 | {
14 | databaseTrainningComplete = false;
15 | }
16 |
17 | QList ImageProcessor::getFilesToProcess(QString sDir){
18 | QDir dir(sDir);
19 | QFileInfoList list = dir.entryInfoList();
20 | QList filesDB;
21 |
22 | for (int iList=0;iList files = ImageProcessor::getFilesToProcess(sFilePath);
33 | foreach(QString file, files){
34 | filesDB.append(file);
35 | }
36 | }
37 | }
38 | else
39 | {
40 | if(!sFilePath.contains("_msk",Qt::CaseInsensitive)){
41 | filesDB.append(sFilePath);
42 | }
43 | }
44 | }
45 |
46 | return filesDB;
47 | }
48 |
49 | void ImageProcessor::RecurseDirectory(const QString& sDir)
50 | {
51 | QDir dir(sDir);
52 | QFileInfoList list = dir.entryInfoList();
53 |
54 | for (int iList=0;iList filesDB = this->getFilesToProcess(sDirPath);
96 | qDebug() << "Files to process: " << filesDB.count();
97 |
98 | for(int i=0;i keypoints;
199 | featureDetector->detect(image, keypoints, mask);
200 | descriptorExtractor->compute(image,keypoints,descriptors);
201 | /*
202 | cv::drawKeypoints(imageProcessed,
203 | keypoints,
204 | imageProcessed,
205 | cv::Scalar(255,255,255),
206 | cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
207 | */
208 |
209 | IplImage iplimg = imageProcessed;
210 |
211 | QImage qImgProcessed = util->IplImage2QImage(&iplimg);
212 | emit captureFrame(qImgProcessed);
213 |
214 | metaDescResult.descriptor = descriptors;
215 | metaDescResult.keypoints = keypoints;
216 | metaDescResult.filePath = sImagePath;
217 | return metaDescResult;
218 | }
219 |
220 |
221 | void ImageProcessor::Search(){
222 | emit searchStart();
223 | searchResult.clear();
224 |
225 | BruteForceMatcher< L2 > matcher;
226 | //BFMatcher matcher(NORM_L2);
227 |
228 | //FlannBasedMatcher matcher;
229 | //std::vector vetDescriptors;
230 | //vector< DMatch > matches;
231 | //cv::Mat imgPesquisada;
232 | meta_descriptor metaDescSearch;
233 |
234 | /*
235 | foreach(meta_descriptor metaDesk, imgDescDatabase){
236 | vetDescriptors.push_back(metaDesk.descriptor);
237 | }
238 |
239 |
240 | metaDescSearch = this->ExtractDescriptorImage(*searchImgFilePath);
241 | imgPesquisada = imread(searchImgFilePath->toStdString(), CV_LOAD_IMAGE_COLOR);
242 |
243 | matcher.add(vetDescriptors);
244 | matcher.train();
245 | matcher.knnMatch(metaDescSearch.descriptor,matches);
246 |
247 | foreach(DMatch match, matches){
248 | qDebug() << "Match Image: " << imgDescDatabase.at(match.trainIdx).filePath;
249 | qDebug() << "Match: " << match.distance;
250 |
251 | }
252 | */
253 |
254 |
255 | foreach(meta_descriptor metaDesk, imgDescDatabase){
256 | vector< DMatch > matches;
257 | matcher.match(metaDescSearch.descriptor,metaDesk.descriptor, matches);
258 | //matcher.match(metaDescSearch.descriptor,trainDescriptors,matches);
259 | double max_dist = 0; double min_dist = 100;
260 | foreach(DMatch match, matches){
261 |
262 | double dist = match.distance;
263 | if( dist < min_dist ) min_dist = dist;
264 | if( dist > max_dist ) max_dist = dist;
265 | }
266 |
267 | std::vector< DMatch > good_matches;
268 |
269 | foreach(DMatch match, matches){
270 | //SIFT = 2.2
271 | //SURF = 6
272 | if( match.distance <=2.2*min_dist )
273 | {
274 | good_matches.push_back(match);
275 | }
276 |
277 | }
278 | qDebug() << "Image: " << metaDesk.filePath;
279 | qDebug() << "Good Matches: " << good_matches.size();
280 |
281 | searchResult.insert(good_matches.size(), metaDesk.filePath);
282 | }
283 |
284 | emit searchComplete();
285 | }
286 |
287 | void ImageProcessor::DoWork(){
288 |
289 | if(!databaseTrainningComplete){
290 | TrainDatabase(trainningDirPath);
291 | }
292 | }
293 |
--------------------------------------------------------------------------------
/imageprocessor.h:
--------------------------------------------------------------------------------
1 | #ifndef IMAGEPROCESSOR_H
2 | #define IMAGEPROCESSOR_H
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include "util.h"
10 | #include "OpenCL/opencl.h"
11 |
12 | using namespace cv;
13 |
14 | struct meta_descriptor {
15 | QString filePath;
16 | cv::Mat descriptor;
17 | std::vector keypoints;
18 | };
19 |
20 | class ImageProcessor : public QObject
21 | {
22 | Q_OBJECT
23 | public:
24 | explicit ImageProcessor(QObject *parent = 0);
25 | QList getFilesToProcess(QString sDir);
26 | meta_descriptor ExtractDescriptorImage(QString sImagePath);
27 | void DoSetup(QThread &cThread);
28 | void ClearTranningDatabase();
29 | void Search();
30 | QList imgDescDatabase;
31 | QMap searchResult;
32 | QString searchImgFilePath;
33 | QString trainningDirPath;
34 | bool Stop;
35 | Util *util;
36 | bool databaseTrainningComplete;
37 |
38 | private:
39 | Mat frameAnt;
40 | Mat element;
41 | bool Interseccao(Mat img1, Mat img2);
42 | void RecurseDirectory(const QString& sDir);
43 | void TrainDatabase(QString sDirPath);
44 |
45 |
46 |
47 | signals:
48 | void captureOriginalFrame(QImage);
49 | void captureFrame(QImage);
50 | void trainningComplete();
51 | void trainningStart();
52 | void trainningClear();
53 | void searchStart();
54 | void searchComplete();
55 |
56 |
57 |
58 | public slots:
59 | void DoWork();
60 |
61 | };
62 |
63 | #endif // IMAGEPROCESSOR_H
64 |
--------------------------------------------------------------------------------
/kaze.cpp:
--------------------------------------------------------------------------------
1 |
2 | //=============================================================================
3 | //
4 | // KAZE.cpp
5 | // Author: Pablo F. Alcantarilla
6 | // Institution: University d'Auvergne
7 | // Address: Clermont Ferrand, France
8 | // Date: 21/01/2012
9 | // Email: pablofdezalc@gmail.com
10 | //
11 | // KAZE Features Copyright 2012, Pablo F. Alcantarilla
12 | // All Rights Reserved
13 | // See LICENSE for the license information
14 | //=============================================================================
15 |
16 | /**
17 | * @file KAZE.cpp
18 | * @brief Main class for detecting and describing features in a nonlinear
19 | * scale space
20 | * @date Jan 21, 2012
21 | * @author Pablo F. Alcantarilla
22 | * @update 2013-03-28 by Yuhua Zou
23 | * Code optimization has been implemented via using
24 | * OpenMP and Boost Thread for multi-threading,
25 | * changing the way to access matrix elements, etc.
26 | */
27 |
28 | #include "kaze.h"
29 | #include "kaze_config.h"
30 | #include
31 | #include
32 |
33 | #if HAVE_BOOST_THREADING
34 | #include
35 | #include
36 | #include
37 | #endif
38 |
39 | // Namespaces
40 | using namespace std;
41 |
42 | /**
43 | * @brief KAZE default constructor
44 | * @note The constructor does not allocate memory for the nonlinear scale space
45 | */
46 | //KAZE::KAZE(void)
47 | //{
48 | // soffset = DEFAULT_SCALE_OFFSET;
49 | // sderivatives = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES;
50 | // omax = DEFAULT_OCTAVE_MAX;
51 | // nsublevels = DEFAULT_NSUBLEVELS;
52 | // save_scale_space = DEFAULT_SAVE_SCALE_SPACE;
53 | // verbosity = DEFAULT_VERBOSITY;
54 | // kcontrast = DEFAULT_KCONTRAST;
55 | // descriptor_mode = DEFAULT_DESCRIPTOR_MODE;
56 | // use_upright = DEFAULT_UPRIGHT;
57 | // use_extended = DEFAULT_EXTENDED;
58 | // diffusivity = DEFAULT_DIFFUSIVITY_TYPE;
59 | // tkcontrast = 0.0;
60 | // tnlscale = 0.0;
61 | // tdetector = 0.0;
62 | // tmderivatives = 0.0;
63 | // tdescriptor = 0.0;
64 | // tsubpixel = 0.0;
65 | // img_width = 0;
66 | // img_height = 0;
67 | //}
68 |
69 | //*******************************************************************************
70 | //*******************************************************************************
71 |
72 | /**
73 | * @brief KAZE constructor with input options
74 | * @param options KAZE configuration options
75 | * @note The constructor allocates memory for the nonlinear scale space
76 | */
77 | KAZE::KAZE(toptions &options)
78 | {
79 | img_width = options.img_width;
80 | img_height = options.img_height;
81 | soffset = options.soffset;
82 | sderivatives = options.sderivatives;
83 | omax = options.omax;
84 | nsublevels = options.nsublevels;
85 | save_scale_space = options.save_scale_space;
86 | verbosity = options.verbosity;
87 | dthreshold = options.dthreshold;
88 | diffusivity = options.diffusivity;
89 | descriptor_mode = options.descriptor;
90 | use_upright = options.upright;
91 | use_extended = options.extended;
92 |
93 | kcontrast = DEFAULT_KCONTRAST;
94 | tkcontrast = 0.0;
95 | tnlscale = 0.0;
96 | tdetector = 0.0;
97 | tmderivatives = 0.0;
98 | tdresponse = 0.0;
99 | tdescriptor = 0.0;
100 |
101 | omax = options.omax > 0 ? options.omax : cvRound(std::log( (double)std::min( options.img_height, options.img_width ) ) / std::log(2.) - 2);
102 | //dthreshold = DEFAULT_DETECTOR_THRESHOLD + floorf( img_width/256.0f ) * 0.0015;
103 |
104 | // Now allocate memory for the evolution
105 | Allocate_Memory_Evolution();
106 | }
107 |
108 | //*******************************************************************************
109 | //*******************************************************************************
110 |
111 | /**
112 | * @brief This method allocates the memory for the nonlinear diffusion evolution
113 | */
114 | void KAZE::Allocate_Memory_Evolution(void)
115 | {
116 | // Allocate the dimension of the matrices for the evolution
117 | for( int i = 0; i <= omax-1; i++ )
118 | {
119 | for( int j = 0; j <= nsublevels-1; j++ )
120 | {
121 | tevolution aux;
122 | aux.Lx = cv::Mat::zeros(img_height,img_width,CV_32F);
123 | aux.Ly = cv::Mat::zeros(img_height,img_width,CV_32F);
124 |
125 | aux.Lxx = cv::Mat::zeros(img_height,img_width,CV_32F);
126 | aux.Lxy = cv::Mat::zeros(img_height,img_width,CV_32F);
127 | aux.Lyy = cv::Mat::zeros(img_height,img_width,CV_32F);
128 | aux.Lflow = cv::Mat::zeros(img_height,img_width,CV_32F);
129 | aux.Lt = cv::Mat::zeros(img_height,img_width,CV_32F);
130 | aux.Lsmooth = cv::Mat::zeros(img_height,img_width,CV_32F);
131 | aux.Lstep = cv::Mat::zeros(img_height,img_width,CV_32F);
132 | aux.Ldet = cv::Mat::zeros(img_height,img_width,CV_32F);
133 |
134 | aux.esigma = soffset*pow((float)2.0,(float)(j)/(float)(nsublevels) + i);
135 | aux.etime = 0.5*(aux.esigma*aux.esigma);
136 | aux.sigma_size = fRound(aux.esigma);
137 |
138 | aux.octave = i;
139 | aux.sublevel = j;
140 | evolution.push_back(aux);
141 | }
142 | }
143 |
144 | // Allocate memory for the auxiliary variables that are used in the AOS scheme
145 | Ltx = cv::Mat::zeros(img_width,img_height,CV_32F);
146 | Lty = cv::Mat::zeros(img_height,img_width,CV_32F);
147 | px = cv::Mat::zeros(img_height,img_width,CV_32F);
148 | py = cv::Mat::zeros(img_height,img_width,CV_32F);
149 | ax = cv::Mat::zeros(img_height,img_width,CV_32F);
150 | ay = cv::Mat::zeros(img_height,img_width,CV_32F);
151 | bx = cv::Mat::zeros(img_height-1,img_width,CV_32F);
152 | by = cv::Mat::zeros(img_height-1,img_width,CV_32F);
153 | qr = cv::Mat::zeros(img_height-1,img_width,CV_32F);
154 | qc = cv::Mat::zeros(img_height,img_width-1,CV_32F);
155 |
156 | }
157 |
158 | //*******************************************************************************
159 | //*******************************************************************************
160 |
161 | /**
162 | * @brief This method creates the nonlinear scale space for a given image
163 | * @param img Input image for which the nonlinear scale space needs to be created
164 | * @return 0 if the nonlinear scale space was created successfully. -1 otherwise
165 | */
166 | int KAZE::Create_Nonlinear_Scale_Space(const cv::Mat &img)
167 | {
168 | if( verbosity == true )
169 | {
170 | std::cout << "\n> Creating nonlinear scale space." << std::endl;
171 | }
172 |
173 | double t2 = 0.0, t1 = 0.0;
174 |
175 | if( evolution.size() == 0 )
176 | {
177 | std::cout << "---> Error generating the nonlinear scale space!!" << std::endl;
178 | std::cout << "---> Firstly you need to call KAZE::Allocate_Memory_Evolution()" << std::endl;
179 | return -1;
180 | }
181 |
182 | int64 start_t1 = cv::getTickCount();
183 |
184 | // Copy the original image to the first level of the evolution
185 | if( verbosity == true )
186 | {
187 | std::cout << "-> Perform the Gaussian smoothing." << std::endl;
188 | }
189 |
190 | img.copyTo(evolution[0].Lt);
191 | Gaussian_2D_Convolution(evolution[0].Lt,evolution[0].Lt,0,0,soffset);
192 | Gaussian_2D_Convolution(evolution[0].Lt,evolution[0].Lsmooth,0,0,sderivatives);
193 |
194 | // Firstly compute the kcontrast factor
195 | Compute_KContrast(evolution[0].Lt,KCONTRAST_PERCENTILE);
196 |
197 | t2 = cv::getTickCount();
198 | tkcontrast = 1000.0 * (t2 - start_t1) / cv::getTickFrequency();
199 |
200 | if( verbosity == true )
201 | {
202 | std::cout << "-> Computed K-contrast factor. Execution time (ms): " << tkcontrast << std::endl;
203 | std::cout << "-> Now computing the nonlinear scale space!!" << std::endl;
204 | }
205 |
206 | // Now generate the rest of evolution levels
207 | for( unsigned int i = 1; i < evolution.size(); i++ )
208 | {
209 | Gaussian_2D_Convolution(evolution[i-1].Lt,evolution[i].Lsmooth,0,0,sderivatives);
210 |
211 | // Compute the Gaussian derivatives Lx and Ly
212 | Image_Derivatives_Scharr(evolution[i].Lsmooth,evolution[i].Lx,1,0);
213 | Image_Derivatives_Scharr(evolution[i].Lsmooth,evolution[i].Ly,0,1);
214 |
215 | // Compute the conductivity equation
216 | if( diffusivity == 0 )
217 | {
218 | PM_G1(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast);
219 | }
220 | else if( diffusivity == 1 )
221 | {
222 | PM_G2(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast);
223 | }
224 | else if( diffusivity == 2 )
225 | {
226 | Weickert_Diffusivity(evolution[i].Lsmooth,evolution[i].Lflow,evolution[i].Lx,evolution[i].Ly,kcontrast);
227 | }
228 |
229 | // Perform the evolution step with AOS
230 | #if HAVE_BOOST_THREADING
231 | AOS_Step_Scalar_Parallel(evolution[i].Lt,evolution[i-1].Lt,evolution[i].Lflow,evolution[i].etime-evolution[i-1].etime);
232 | #else
233 | AOS_Step_Scalar(evolution[i].Lt,evolution[i-1].Lt,evolution[i].Lflow,evolution[i].etime-evolution[i-1].etime);
234 | #endif
235 |
236 | if( verbosity == true )
237 | {
238 | std::cout << "--> Computed image evolution step " << i << " Evolution time: " << evolution[i].etime <<
239 | " Sigma: " << evolution[i].esigma << std::endl;
240 | }
241 | }
242 |
243 |
244 | t2 = cv::getTickCount();
245 | tnlscale = 1000.0*(t2-start_t1) / cv::getTickFrequency();
246 |
247 | if( verbosity == true )
248 | {
249 | std::cout << "> Computed the nonlinear scale space. Execution time (ms): " << tnlscale << std::endl;
250 | }
251 |
252 | return 0;
253 | }
254 |
255 | //*************************************************************************************
256 | //*************************************************************************************
257 |
258 | /**
259 | * @brief This method computes the k contrast factor
260 | * @param img Input image
261 | * @param kpercentile Percentile of the gradient histogram
262 | */
263 | void KAZE::Compute_KContrast(const cv::Mat &img, const float &kpercentile)
264 | {
265 | if( verbosity == true )
266 | {
267 | std::cout << "-> Computing Kcontrast factor." << std::endl;
268 | }
269 |
270 | if( COMPUTE_KCONTRAST == true )
271 | {
272 | kcontrast = Compute_K_Percentile(img,kpercentile,sderivatives,KCONTRAST_NBINS,0,0);
273 | }
274 |
275 | if( verbosity == true )
276 | {
277 | std::cout << "--> kcontrast = " << kcontrast << std::endl;
278 | }
279 | }
280 |
281 | //*************************************************************************************
282 | //*************************************************************************************
283 |
284 | /**
285 | * @brief This method computes the multiscale derivatives for the nonlinear scale space
286 | */
287 | void KAZE::Compute_Multiscale_Derivatives(void)
288 | {
289 | int64 t1 = cv::getTickCount();
290 |
291 | int N = img_width * img_height;
292 |
293 | #pragma omp parallel for
294 | for( int i = 0; i < evolution.size(); i++ )
295 | {
296 | if( verbosity == true )
297 | {
298 | std::cout << "--> Multiscale derivatives. Sigma ("<< i <<"): " << evolution[i].sigma_size << ". Thread: " << omp_get_thread_num() << std::endl;
299 | }
300 |
301 | // Compute multiscale derivatives for the detector
302 | Compute_Scharr_Derivatives(evolution[i].Lsmooth,evolution[i].Lx,1,0,evolution[i].sigma_size);
303 | Compute_Scharr_Derivatives(evolution[i].Lsmooth,evolution[i].Ly,0,1,evolution[i].sigma_size);
304 | Compute_Scharr_Derivatives(evolution[i].Lx,evolution[i].Lxx,1,0,evolution[i].sigma_size);
305 | Compute_Scharr_Derivatives(evolution[i].Ly,evolution[i].Lyy,0,1,evolution[i].sigma_size);
306 | Compute_Scharr_Derivatives(evolution[i].Lx,evolution[i].Lxy,0,1,evolution[i].sigma_size);
307 |
308 | int esigma = evolution[i].sigma_size, esigma2 = esigma*esigma;
309 | for ( int j = 0; j < N; j++ )
310 | {
311 | *( evolution[i].Lx.ptr(0)+j ) *= esigma;
312 | *( evolution[i].Ly.ptr(0)+j ) *= esigma;
313 | *( evolution[i].Lxx.ptr(0)+j ) *= esigma2;
314 | *( evolution[i].Lxy.ptr(0)+j ) *= esigma2;
315 | *( evolution[i].Lyy.ptr(0)+j ) *= esigma2;
316 | }
317 | }
318 |
319 | int64 t2 = cv::getTickCount();
320 | tmderivatives = 1000.0 * (t2-t1) / cv::getTickFrequency();
321 |
322 | }
323 |
324 | //*************************************************************************************
325 | //*************************************************************************************
326 |
327 | /**
328 | * @brief This method computes the feature detector response for the nonlinear scale space
329 | * @note We use the Hessian determinant as feature detector
330 | */
331 | void KAZE::Compute_Detector_Response(void)
332 | {
333 | float lxx = 0.0, lxy = 0.0, lyy = 0.0;
334 | float *ptr;
335 |
336 | int64 t1 = cv::getTickCount(), t2 = 0;
337 |
338 | // Firstly compute the multiscale derivatives
339 | Compute_Multiscale_Derivatives();
340 |
341 | t2 = cv::getTickCount();
342 | tdresponse = 1000.0 * (t2-t1) / cv::getTickFrequency();
343 | if( verbosity == true )
344 | {
345 | std::cout << "-> Computed multiscale derivatives. Execution time (ms): " << tdresponse << std::endl;
346 | }
347 | t1 = cv::getTickCount();
348 |
349 | int N = img_width * img_height;
350 | for( int i = 0; i < evolution.size(); i++ )
351 | {
352 | for( int jx = 0; jx < N; jx++ )
353 | {
354 | // Get values of lxx,lxy,and lyy
355 | ptr = evolution[i].Lxx.ptr(0);
356 | lxx = ptr[jx];
357 |
358 | ptr = evolution[i].Lxy.ptr(0);
359 | lxy = ptr[jx];
360 |
361 | ptr = evolution[i].Lyy.ptr(0);
362 | lyy = ptr[jx];
363 |
364 | // Compute ldet
365 | ptr = evolution[i].Ldet.ptr(0);
366 | ptr[jx] = (lxx*lyy-lxy*lxy);
367 | }
368 | }
369 |
370 | t2 = cv::getTickCount();
371 | tdresponse = 1000.0 * (t2-t1) / cv::getTickFrequency();
372 | if( verbosity == true )
373 | {
374 | std::cout << "-> Computed Hessian determinant. Execution time (ms): " << tdresponse << std::endl;
375 | }
376 | }
377 |
378 | //*************************************************************************************
379 | //*************************************************************************************
380 |
381 | /**
382 | * @brief This method selects interesting keypoints through the nonlinear scale space
383 | */
384 | void KAZE::Feature_Detection(std::vector &kpts)
385 | {
386 | if( verbosity == true )
387 | {
388 | std::cout << "\n> Detecting features. " << std::endl;
389 | }
390 | int64 t1 = cv::getTickCount(), t2 = 0;
391 |
392 | // Firstly compute the detector response for each pixel and scale level
393 | Compute_Detector_Response();
394 |
395 | t2 = cv::getTickCount();
396 | double tresponse = 1000.0 * (t2-t1) / cv::getTickFrequency();
397 | if( verbosity == true )
398 | {
399 | std::cout << "-> Computed detector response. Execution time (ms):" << tresponse << std::endl;
400 | }
401 | int64 t13 = cv::getTickCount();
402 |
403 | // Find scale space extrema
404 | Determinant_Hessian_Parallel(kpts);
405 |
406 | t2 = cv::getTickCount();
407 | double thessian = 1000.0 * (t2-t13) / cv::getTickFrequency();
408 | if( verbosity == true )
409 | {
410 | std::cout << "-> Computed Hessian determinant. Execution time (ms):" << thessian << std::endl;
411 | }
412 |
413 | // Perform some subpixel refinement
414 | if( SUBPIXEL_REFINEMENT == true )
415 | {
416 | Do_Subpixel_Refinement(kpts);
417 | }
418 |
419 | t2 = cv::getTickCount();
420 | tdetector = 1000.0*(t2-t1) / cv::getTickFrequency();
421 | if( verbosity == true )
422 | {
423 | std::cout << "> Feature detection done. Execution time (ms): " << tdetector << std::endl;
424 | }
425 |
426 | }
427 |
428 | //*************************************************************************************
429 | //*************************************************************************************
430 |
431 | /**
432 | * @brief This method performs the detection of keypoints by using the normalized
433 | * score of the Hessian determinant through the nonlinear scale space
434 | * @note We compute features for each of the nonlinear scale space level in a different processing thread
435 | */
436 | void KAZE::Determinant_Hessian_Parallel(std::vector &kpts)
437 | {
438 | unsigned int level = 0;
439 | float dist = 0.0, smax = 3.0;
440 | int npoints = 0, id_repeated = 0;
441 | int left_x = 0, right_x = 0, up_y = 0, down_y = 0;
442 | bool is_extremum = false, is_repeated = false, is_out = false;
443 | int64 t1 = cv::getTickCount(), t2 = 0;
444 |
445 | // Delete the memory of the vector of keypoints vectors
446 | // In case we use the same kaze object for multiple images
447 | vector >(evolution.size()-2, vector()).swap(kpts_par);
448 |
449 | t2 = cv::getTickCount();
450 | if( verbosity == true )
451 | {
452 | std::cout << "--> Init kpts_par time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << std::endl;
453 | }
454 | t1 = cv::getTickCount();
455 |
456 | // Find extremum at each scale level
457 | #if HAVE_BOOST_THREADING
458 | // Create multi-thread
459 | boost::thread_group mthreads;
460 |
461 | for( unsigned int i = 1; i < evolution.size()-1; i++ )
462 | {
463 | // Create the thread for finding extremum at i scale level
464 | mthreads.create_thread(boost::bind(&KAZE::Find_Extremum_Threading,this,i));
465 | }
466 |
467 | // Wait for the threads
468 | mthreads.join_all();
469 | #else
470 | #pragma omp parallel for
471 | for( int n = 1; n < evolution.size()-1; n++ )
472 | {
473 | Find_Extremum_Threading(n);
474 | }
475 | #endif
476 |
477 | t2 = cv::getTickCount();
478 | if( verbosity == true )
479 | {
480 | std::cout << "--> Find extremum time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << std::endl;
481 | }
482 | t1 = cv::getTickCount();
483 |
484 | // Now fill the vector of keypoints
485 | // Duplicate keypoints will be filtered out after
486 | // the whole Feature Detection procedure is finished
487 | for( int i = 0; i < kpts_par.size(); i++ )
488 | {
489 | for( int j = 0; j < kpts_par[i].size(); j++ )
490 | {
491 | kpts.push_back(kpts_par[i][j]);
492 | }
493 | }
494 | npoints = kpts.size();
495 |
496 | t2 = cv::getTickCount();
497 | if( verbosity == true )
498 | {
499 | std::cout << "--> Fill the vector of keypoints time: "<< 1000.0*(t2-t1)/cv::getTickFrequency() << ". kpts size: " << kpts.size() << std::endl;
500 | }
501 |
502 | }
503 |
504 | //*************************************************************************************
505 | //*************************************************************************************
506 |
507 | /**
508 | * @brief This method is called by the thread which is responsible of finding extrema
509 | * at a given nonlinear scale level
510 | * @param level Index in the nonlinear scale space evolution
511 | */
512 | void KAZE::Find_Extremum_Threading(int level)
513 | {
514 | float value = 0.0, smax = 3.0;
515 | bool is_extremum = false;
516 |
517 | int border = fRound(smax * evolution[level].esigma) + 1;
518 | int ix = border, jx = border;
519 | while (ix < img_height-border)
520 | {
521 | jx = border;
522 | while (jx < img_width-border)
523 | {
524 | is_extremum = false;
525 | value = *(evolution[level].Ldet.ptr(ix)+jx);
526 |
527 | // Filter the points with the detector threshold
528 | if( value > dthreshold && value >= DEFAULT_MIN_DETECTOR_THRESHOLD )
529 | {
530 | if( value >= *(evolution[level].Ldet.ptr(ix)+jx-1) )
531 | {
532 | // First check on the same scale
533 | if( Check_Maximum_Neighbourhood(evolution[level].Ldet,1,value,ix,jx,1))
534 | {
535 | // Now check on the lower scale
536 | if( Check_Maximum_Neighbourhood(evolution[level-1].Ldet,1,value,ix,jx,0) )
537 | {
538 | // Now check on the upper scale
539 | if( Check_Maximum_Neighbourhood(evolution[level+1].Ldet,1,value,ix,jx,0) )
540 | {
541 | is_extremum = true;
542 | }
543 | }
544 | }
545 | }
546 | }
547 |
548 | // Add the point of interest!!
549 | if( is_extremum == true )
550 | {
551 | Ipoint point;
552 | point.xf = jx; point.yf = ix;
553 | point.x = jx; point.y = ix;
554 | point.dresponse = fabs(value);
555 | point.scale = evolution[level].esigma;
556 | point.sigma_size = evolution[level].sigma_size;
557 | point.tevolution = evolution[level].etime;
558 | point.octave = evolution[level].octave;
559 | point.sublevel = evolution[level].sublevel;
560 | point.level = level;
561 | point.descriptor_mode = descriptor_mode;
562 | point.angle = 0.0;
563 |
564 | // Set the sign of the laplacian
565 | if( (*(evolution[level].Lxx.ptr(ix)+jx) + *(evolution[level].Lyy.ptr(ix)+jx)) > 0 )
566 | {
567 | point.laplacian = 0;
568 | }
569 | else
570 | {
571 | point.laplacian = 1;
572 | }
573 |
574 | kpts_par[level-1].push_back(point);
575 | }
576 | jx++;
577 | }
578 | ix++;
579 | }
580 | }
581 |
582 | //*************************************************************************************
583 | //*************************************************************************************
584 |
585 | /**
586 | * @brief This method performs subpixel refinement of the detected keypoints
587 | */
588 | void KAZE::Do_Subpixel_Refinement(std::vector &keypts)
589 | {
590 |
591 | float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0;
592 | float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0;
593 | int x = 0, y = 0, step = 1;
594 | cv::Mat A = cv::Mat::zeros(3,3,CV_32F);
595 | cv::Mat b = cv::Mat::zeros(3,1,CV_32F);
596 | cv::Mat dst = cv::Mat::zeros(3,1,CV_32F);
597 |
598 |
599 | int64 t1 = cv::getTickCount();
600 |
601 | for( unsigned int i = 0; i < keypts.size(); i++ )
602 | {
603 | x = keypts[i].x;
604 | y = keypts[i].y;
605 |
606 | // Compute the gradient
607 | Dx = (1.0/(2.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y)+x+step)
608 | -*(evolution[keypts[i].level].Ldet.ptr(y)+x-step));
609 | Dy = (1.0/(2.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x)
610 | -*(evolution[keypts[i].level].Ldet.ptr(y-step)+x));
611 | Ds = 0.5*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x)
612 | -*(evolution[keypts[i].level-1].Ldet.ptr(y)+x));
613 |
614 | // Compute the Hessian
615 | Dxx = (1.0/(step*step))*(*(evolution[keypts[i].level].Ldet.ptr(y)+x+step)
616 | + *(evolution[keypts[i].level].Ldet.ptr(y)+x-step)
617 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x)));
618 |
619 | Dyy = (1.0/(step*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x)
620 | + *(evolution[keypts[i].level].Ldet.ptr(y-step)+x)
621 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x)));
622 |
623 | Dss = *(evolution[keypts[i].level+1].Ldet.ptr(y)+x)
624 | + *(evolution[keypts[i].level-1].Ldet.ptr(y)+x)
625 | -2.0*(*(evolution[keypts[i].level].Ldet.ptr(y)+x));
626 |
627 | Dxy = (1.0/(4.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x+step)
628 | +(*(evolution[keypts[i].level].Ldet.ptr(y-step)+x-step)))
629 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level].Ldet.ptr(y-step)+x+step)
630 | +(*(evolution[keypts[i].level].Ldet.ptr(y+step)+x-step)));
631 |
632 | Dxs = (1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x+step)
633 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y)+x-step)))
634 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y)+x-step)
635 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y)+x+step)));
636 |
637 | Dys = (1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y+step)+x)
638 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y-step)+x)))
639 | -(1.0/(4.0*step))*(*(evolution[keypts[i].level+1].Ldet.ptr(y-step)+x)
640 | +(*(evolution[keypts[i].level-1].Ldet.ptr(y+step)+x)));
641 |
642 | // Solve the linear system
643 | *(A.ptr(0)) = Dxx;
644 | *(A.ptr(1)+1) = Dyy;
645 | *(A.ptr(2)+2) = Dss;
646 |
647 | *(A.ptr(0)+1) = *(A.ptr(1)) = Dxy;
648 | *(A.ptr(0)+2) = *(A.ptr(2)) = Dxs;
649 | *(A.ptr(1)+2) = *(A.ptr(2)+1) = Dys;
650 |
651 | *(b.ptr(0)) = -Dx;
652 | *(b.ptr(1)) = -Dy;
653 | *(b.ptr(2)) = -Ds;
654 |
655 | cv::solve(A,b,dst,cv::DECOMP_LU);
656 |
657 | if( fabs(*(dst.ptr(0))) <= 1.0
658 | && fabs(*(dst.ptr(1))) <= 1.0
659 | && fabs(*(dst.ptr(2))) <= 1.0 )
660 | {
661 | keypts[i].xf += *(dst.ptr(0));
662 | keypts[i].yf += *(dst.ptr(1));
663 | keypts[i].x = fRound(keypts[i].xf);
664 | keypts[i].y = fRound(keypts[i].yf);
665 |
666 | dsc = keypts[i].octave + (keypts[i].sublevel+*(dst.ptr(2)))/((float)(DEFAULT_NSUBLEVELS));
667 | keypts[i].scale = soffset*pow((float)2.0,dsc);
668 | }
669 | // Delete the point since its not stable
670 | else
671 | {
672 | keypts[i].dresponse = 0; // Keypoints with zero response will be filtered out
673 | }
674 | }
675 | int64 t2 = cv::getTickCount();
676 | tsubpixel = 1000.0*(t2-t1) / cv::getTickFrequency();
677 | if( verbosity == true )
678 | {
679 | std::cout << "-> Subpixel refinement done. Execution time (ms): " << tsubpixel << std::endl;
680 | }
681 |
682 | }
683 |
684 | //*************************************************************************************
685 | //*************************************************************************************
686 |
687 | /**
688 | * @brief This method performs feature suppression based on 2D distance
689 | * @param kpts Vector of keypoints
690 | * @param mdist Maximum distance in pixels
691 | */
692 | void KAZE::Feature_Suppression_Distance(std::vector &kpts, float mdist)
693 | {
694 | std::vector aux;
695 | std::vector to_delete;
696 | float dist = 0.0, x1 = 0.0, y1 = 0.0, x2 = 0.0, y2 = 0.0;
697 | bool found = false;
698 |
699 | for( unsigned int i = 0; i < kpts.size(); i++ )
700 | {
701 | x1 = kpts[i].xf;
702 | y1 = kpts[i].yf;
703 |
704 | for( unsigned int j = i+1; j < kpts.size(); j++ )
705 | {
706 | x2 = kpts[j].xf;
707 | y2 = kpts[j].yf;
708 |
709 | dist = sqrt(pow(x1-x2,2)+pow(y1-y2,2));
710 |
711 | if( dist < mdist )
712 | {
713 | if( fabs(kpts[i].dresponse) >= fabs(kpts[j].dresponse) )
714 | {
715 | to_delete.push_back(j);
716 | }
717 | else
718 | {
719 | to_delete.push_back(i);
720 | break;
721 | }
722 | }
723 | }
724 | }
725 |
726 | for( unsigned int i = 0; i < kpts.size(); i++ )
727 | {
728 | found = false;
729 |
730 | for( unsigned int j = 0; j < to_delete.size(); j++ )
731 | {
732 | if( i == to_delete[j] )
733 | {
734 | found = true;
735 | break;
736 | }
737 | }
738 |
739 | if( found == false )
740 | {
741 | aux.push_back(kpts[i]);
742 | }
743 | }
744 |
745 | kpts.clear();
746 | kpts = aux;
747 | aux.clear();
748 | }
749 |
750 | //*************************************************************************************
751 | //*************************************************************************************
752 |
753 | /**
754 | * @brief This method computes the set of descriptors through the nonlinear scale space
755 | * @param kpts Vector of keypoints
756 | */
757 | void KAZE::Feature_Description(std::vector &kpts)
758 | {
759 | if( verbosity == true )
760 | {
761 | std::cout << "\n> Computing feature descriptors. " << std::endl;
762 | }
763 |
764 | int64 t1 = cv::getTickCount();
765 |
766 | // It is not necessary to compute the orientation
767 | if( use_upright == true )
768 | {
769 | // Compute the descriptor
770 | if( use_extended == false )
771 | {
772 | if( descriptor_mode == 0 )
773 | {
774 | #pragma omp parallel for
775 | for( int i = 0; i < kpts.size(); i++ )
776 | {
777 | kpts[i].angle = 0.0;
778 | Get_SURF_Upright_Descriptor_64(kpts[i]);
779 | }
780 | }
781 | else if( descriptor_mode == 1 )
782 | {
783 | #pragma omp parallel for
784 | for( int i = 0; i < kpts.size(); i++ )
785 | {
786 | kpts[i].angle = 0.0;
787 | Get_MSURF_Upright_Descriptor_64(kpts[i]);
788 | }
789 | }
790 | else if( descriptor_mode == 2 )
791 | {
792 | #pragma omp parallel for
793 | for( int i = 0; i < kpts.size(); i++ )
794 | {
795 | kpts[i].angle = 0.0;
796 | Get_GSURF_Upright_Descriptor_64(kpts[i]);
797 | }
798 | }
799 | }
800 | else
801 | {
802 | if( descriptor_mode == 0 )
803 | {
804 | #pragma omp parallel for
805 | for( int i = 0; i < kpts.size(); i++ )
806 | {
807 | kpts[i].angle = 0.0;
808 | Get_SURF_Upright_Descriptor_128(kpts[i]);
809 | }
810 | }
811 | else if( descriptor_mode == 1 )
812 | {
813 | #pragma omp parallel for
814 | for( int i = 0; i < kpts.size(); i++ )
815 | {
816 | kpts[i].angle = 0.0;
817 | Get_MSURF_Upright_Descriptor_128(kpts[i]);
818 | }
819 | }
820 | else if( descriptor_mode == 2 )
821 | {
822 | #pragma omp parallel for
823 | for( int i = 0; i < kpts.size(); i++ )
824 | {
825 | kpts[i].angle = 0.0;
826 | Get_GSURF_Upright_Descriptor_128(kpts[i]);
827 | }
828 | }
829 | }
830 | }
831 | else
832 | {
833 | // Compute the descriptor
834 | if( use_extended == false )
835 | {
836 | if( descriptor_mode == 0 )
837 | {
838 | #pragma omp parallel for
839 | for( int i = 0; i < kpts.size(); i++ )
840 | {
841 | Compute_Main_Orientation_SURF(kpts[i]);
842 | Get_SURF_Descriptor_64(kpts[i]);
843 | }
844 | }
845 | else if( descriptor_mode == 1 )
846 | {
847 | #pragma omp parallel for
848 | for( int i = 0; i < kpts.size(); i++ )
849 | {
850 | Compute_Main_Orientation_SURF(kpts[i]);
851 | Get_MSURF_Descriptor_64(kpts[i]);
852 | }
853 | }
854 | else if( descriptor_mode == 2 )
855 | {
856 | #pragma omp parallel for
857 | for( int i = 0; i < kpts.size(); i++ )
858 | {
859 | Compute_Main_Orientation_SURF(kpts[i]);
860 | Get_GSURF_Descriptor_64(kpts[i]);
861 | }
862 | }
863 | }
864 | else
865 | {
866 | if( descriptor_mode == 0 )
867 | {
868 | #pragma omp parallel for
869 | for( int i = 0; i < kpts.size(); i++ )
870 | {
871 | Compute_Main_Orientation_SURF(kpts[i]);
872 | Get_SURF_Descriptor_128(kpts[i]);
873 | }
874 | }
875 | else if( descriptor_mode == 1 )
876 | {
877 | #pragma omp parallel for
878 | for( int i = 0; i < kpts.size(); i++ )
879 | {
880 | Compute_Main_Orientation_SURF(kpts[i]);
881 | Get_MSURF_Descriptor_128(kpts[i]);
882 | }
883 | }
884 | else if( descriptor_mode == 2 )
885 | {
886 | #pragma omp parallel for
887 | for( int i = 0; i < kpts.size(); i++ )
888 | {
889 | Compute_Main_Orientation_SURF(kpts[i]);
890 | Get_GSURF_Descriptor_128(kpts[i]);
891 | }
892 | }
893 | }
894 | }
895 |
896 | int64 t2 = cv::getTickCount();
897 | tdescriptor = 1000.0*(t2-t1) / cv::getTickFrequency();
898 | if( verbosity == true )
899 | {
900 | std::cout << "> Computed feature descriptors. Execution time (ms): " << tdescriptor << std::endl;
901 | }
902 |
903 | }
904 |
905 | //*************************************************************************************
906 | //*************************************************************************************
907 |
908 | /**
909 | * @brief This method computes the main orientation for a given keypoint
910 | * @param kpt Input keypoint
911 | * @note The orientation is computed using a similar approach as described in the
912 | * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
913 | */
914 | void KAZE::Compute_Main_Orientation_SURF(Ipoint &kpt)
915 | {
916 | int ix = 0, iy = 0, idx = 0, s = 0;
917 | unsigned int level = kpt.level;
918 | float xf = 0.0, yf = 0.0, gweight = 0.0;
919 | std::vector resX(109), resY(109), Ang(109); // 109 is the maximum grids of size 1 in a circle of radius 6
920 |
921 | // Variables for computing the dominant direction
922 | float sumX = 0.0, sumY = 0.0, bestX = 0.0, bestY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0;
923 |
924 | // Get the information from the keypoint
925 | xf = kpt.xf;
926 | yf = kpt.yf;
927 | s = kpt.scale;
928 |
929 | // Calculate derivatives responses for points within radius of 6*scale
930 | for(int i = -6; i <= 6; ++i)
931 | {
932 | for(int j = -6; j <= 6; ++j)
933 | {
934 | if(i*i + j*j < 36) // the grid is in the circle
935 | {
936 | iy = fRound(yf + j*s);
937 | ix = fRound(xf + i*s);
938 |
939 | if( iy >= 0 && iy < img_height && ix >= 0 && ix < img_width )
940 | {
941 | gweight = gaussian(iy-yf,ix-xf,3.5*s);
942 | resX[idx] = gweight*(*(evolution[level].Lx.ptr(iy)+ix));
943 | resY[idx] = gweight*(*(evolution[level].Ly.ptr(iy)+ix));
944 | Ang[idx] = Get_Angle(resX[idx],resY[idx]);
945 | }
946 | else
947 | {
948 | resX[idx] = 0.0;
949 | resY[idx] = 0.0;
950 | Ang[idx] = 0.0;
951 | }
952 |
953 | ++idx;
954 | }
955 | }
956 | }
957 |
958 | // Loop slides pi/3 window around feature point
959 | for( ang1 = 0; ang1 < M2_PI; ang1+=0.15f)
960 | {
961 | ang2 =(ang1+PI/3.0f > M2_PI ? ang1-5.0f*PI/3.0f : ang1+PI/3.0f);
962 | sumX = sumY = 0.f;
963 |
964 | for( unsigned int k = 0; k < Ang.size(); ++k)
965 | {
966 | // Get angle from the x-axis of the sample point
967 | const float & ang = Ang[k];
968 |
969 | // Determine whether the point is within the window
970 | if( ang1 < ang2 && ang1 < ang && ang < ang2)
971 | {
972 | sumX+=resX[k];
973 | sumY+=resY[k];
974 | }
975 | else if (ang2 < ang1 &&
976 | ((ang > 0 && ang < ang2) || (ang > ang1 && ang < M2_PI) ))
977 | {
978 | sumX+=resX[k];
979 | sumY+=resY[k];
980 | }
981 | }
982 |
983 | // if the vector produced from this window is longer than all
984 | // previous vectors then this forms the new dominant direction
985 | float sumxy = sumX*sumX + sumY*sumY;
986 | if( sumxy > max )
987 | {
988 | // store largest orientation
989 | max = sumxy;
990 | bestX = sumX, bestY = sumY;
991 | }
992 | }
993 |
994 | kpt.angle = Get_Angle(bestX, bestY);
995 |
996 | }
997 |
998 | //*************************************************************************************
999 | //*************************************************************************************
1000 |
1001 | /**
1002 | * @brief This method computes the upright descriptor (no rotation invariant)
1003 | * of the provided keypoint
1004 | * @param kpt Input keypoint
1005 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional
1006 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al.,
1007 | * Speeded Up Robust Features, ECCV, 2006
1008 | */
1009 | void KAZE::Get_SURF_Upright_Descriptor_64(Ipoint &kpt)
1010 | {
1011 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0;
1012 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0;
1013 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1014 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1015 | int dsize = 0, level = 0;
1016 |
1017 | // Set the descriptor size and the sample and pattern sizes
1018 | dsize = kpt.descriptor_size = 64;
1019 | sample_step = 5;
1020 | pattern_size = 10;
1021 |
1022 | // Get the information from the keypoint
1023 | yf = kpt.yf;
1024 | xf = kpt.xf;
1025 | scale = kpt.scale;
1026 | level = kpt.level;
1027 |
1028 | // Allocate the memory for the vector
1029 | kpt.descriptor = vector(kpt.descriptor_size);
1030 |
1031 | // Calculate descriptor for this interest point
1032 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1033 | {
1034 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1035 | {
1036 | dx=dy=mdx=mdy=0.0;
1037 |
1038 | for(float k = i; k < i + sample_step; k+=0.5)
1039 | {
1040 | for(float l = j; l < j + sample_step; l+=0.5)
1041 | {
1042 | sample_y = k*scale + yf;
1043 | sample_x = l*scale + xf;
1044 |
1045 | y1 = (int)(sample_y-.5);
1046 | x1 = (int)(sample_x-.5);
1047 |
1048 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1049 |
1050 | y2 = (int)(sample_y+.5);
1051 | x2 = (int)(sample_x+.5);
1052 |
1053 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1054 |
1055 | fx = sample_x-x1;
1056 | fy = sample_y-y1;
1057 |
1058 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1059 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1060 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1061 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1062 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1063 |
1064 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1065 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1066 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1067 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1068 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1069 |
1070 | // Sum the derivatives to the cumulative descriptor
1071 | dx += rx;
1072 | dy += ry;
1073 | mdx += fabs(rx);
1074 | mdy += fabs(ry);
1075 | }
1076 | }
1077 |
1078 | // Add the values to the descriptor vector
1079 | kpt.descriptor[dcount++] = dx;
1080 | kpt.descriptor[dcount++] = dy;
1081 | kpt.descriptor[dcount++] = mdx;
1082 | kpt.descriptor[dcount++] = mdy;
1083 |
1084 | // Store the current length^2 of the vector
1085 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy;
1086 | }
1087 | }
1088 |
1089 | // convert to unit vector
1090 | len = sqrt(len);
1091 |
1092 | for(int i = 0; i < dsize; i++)
1093 | {
1094 | kpt.descriptor[i] /= len;
1095 | }
1096 |
1097 | if( USE_CLIPPING_NORMALIZATION == true )
1098 | {
1099 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1100 | }
1101 | }
1102 |
1103 | //*************************************************************************************
1104 | //*************************************************************************************
1105 |
1106 | /**
1107 | * @brief This method computes the descriptor of the provided keypoint given the
1108 | * main orientation
1109 | * @param kpt Input keypoint
1110 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional
1111 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al.,
1112 | * Speeded Up Robust Features, ECCV, 2006
1113 | */
1114 | void KAZE::Get_SURF_Descriptor_64(Ipoint &kpt)
1115 | {
1116 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0;
1117 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0;
1118 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1119 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1120 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1121 | int dsize = 0, level = 0;
1122 |
1123 | // Set the descriptor size and the sample and pattern sizes
1124 | dsize = kpt.descriptor_size = 64;
1125 | sample_step = 5;
1126 | pattern_size = 10;
1127 |
1128 | // Get the information from the keypoint
1129 | yf = kpt.yf;
1130 | xf = kpt.xf;
1131 | scale = kpt.scale;
1132 | angle = kpt.angle;
1133 | level = kpt.level;
1134 | co = cos(angle);
1135 | si = sin(angle);
1136 |
1137 | // Allocate the memory for the vector
1138 | kpt.descriptor = vector(kpt.descriptor_size);
1139 |
1140 | // Calculate descriptor for this interest point
1141 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1142 | {
1143 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1144 | {
1145 | dx=dy=mdx=mdy=0.0;
1146 |
1147 | for(float k = i; k < i + sample_step; k+=0.5)
1148 | {
1149 | for(float l = j; l < j + sample_step; l+=0.5)
1150 | {
1151 | // Get the coordinates of the sample point on the rotated axis
1152 | sample_y = yf + (l*scale*co + k*scale*si);
1153 | sample_x = xf + (-l*scale*si + k*scale*co);
1154 |
1155 | y1 = (int)(sample_y-.5);
1156 | x1 = (int)(sample_x-.5);
1157 |
1158 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1159 |
1160 | y2 = (int)(sample_y+.5);
1161 | x2 = (int)(sample_x+.5);
1162 |
1163 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1164 |
1165 | fx = sample_x-x1;
1166 | fy = sample_y-y1;
1167 |
1168 |
1169 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1170 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1171 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1172 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1173 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1174 |
1175 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1176 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1177 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1178 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1179 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1180 |
1181 | // Get the x and y derivatives on the rotated axis
1182 | rry = rx*co + ry*si;
1183 | rrx = -rx*si + ry*co;
1184 |
1185 | // Sum the derivatives to the cumulative descriptor
1186 | dx += rrx;
1187 | dy += rry;
1188 | mdx += fabs(rrx);
1189 | mdy += fabs(rry);
1190 | }
1191 | }
1192 |
1193 | // Add the values to the descriptor vector
1194 | kpt.descriptor[dcount++] = dx;
1195 | kpt.descriptor[dcount++] = dy;
1196 | kpt.descriptor[dcount++] = mdx;
1197 | kpt.descriptor[dcount++] = mdy;
1198 |
1199 | // Store the current length^2 of the vector
1200 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy;
1201 | }
1202 | }
1203 |
1204 | // convert to unit vector
1205 | len = sqrt(len);
1206 |
1207 | for(int i = 0; i < dsize; i++)
1208 | {
1209 | kpt.descriptor[i] /= len;
1210 | }
1211 |
1212 | if( USE_CLIPPING_NORMALIZATION == true )
1213 | {
1214 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1215 | }
1216 |
1217 | }
1218 |
1219 | //*************************************************************************************
1220 | //*************************************************************************************
1221 |
1222 | /**
1223 | * @brief This method computes the upright descriptor (not rotation invariant) of
1224 | * the provided keypoint
1225 | * @param kpt Input keypoint
1226 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
1227 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
1228 | * ECCV 2008
1229 | */
1230 | void KAZE::Get_MSURF_Upright_Descriptor_64(Ipoint &kpt)
1231 | {
1232 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
1233 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
1234 | float sample_x = 0.0, sample_y = 0.0;
1235 | int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
1236 | int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
1237 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1238 | int dsize = 0, level = 0;
1239 |
1240 | // Subregion centers for the 4x4 gaussian weighting
1241 | float cx = -0.5, cy = 0.5;
1242 |
1243 | // Set the descriptor size and the sample and pattern sizes
1244 | dsize = kpt.descriptor_size = 64;
1245 | sample_step = 5;
1246 | pattern_size = 12;
1247 |
1248 | // Get the information from the keypoint
1249 | yf = kpt.yf;
1250 | xf = kpt.xf;
1251 | level = kpt.level;
1252 | scale = kpt.scale;
1253 |
1254 | // Allocate the memory for the vector
1255 | kpt.descriptor = vector(kpt.descriptor_size);
1256 |
1257 | i = -8;
1258 |
1259 | // Calculate descriptor for this interest point
1260 | // Area of size 24 s x 24 s
1261 | while(i < pattern_size)
1262 | {
1263 | j = -8;
1264 | i = i-4;
1265 |
1266 | cx += 1.0;
1267 | cy = -0.5;
1268 |
1269 | while(j < pattern_size)
1270 | {
1271 | dx=dy=mdx=mdy=0.0;
1272 | cy += 1.0;
1273 | j = j-4;
1274 |
1275 | ky = i + sample_step;
1276 | kx = j + sample_step;
1277 |
1278 | ys = yf + (ky*scale);
1279 | xs = xf + (kx*scale);
1280 |
1281 | for(int k = i; k < i+9; k++)
1282 | {
1283 | for (int l = j; l < j+9; l++)
1284 | {
1285 | sample_y = k*scale + yf;
1286 | sample_x = l*scale + xf;
1287 |
1288 | //Get the gaussian weighted x and y responses
1289 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale);
1290 |
1291 | y1 = (int)(sample_y-.5);
1292 | x1 = (int)(sample_x-.5);
1293 |
1294 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1295 |
1296 | y2 = (int)(sample_y+.5);
1297 | x2 = (int)(sample_x+.5);
1298 |
1299 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1300 |
1301 | fx = sample_x-x1;
1302 | fy = sample_y-y1;
1303 |
1304 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1305 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1306 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1307 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1308 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1309 |
1310 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1311 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1312 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1313 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1314 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1315 |
1316 | rx = gauss_s1*rx;
1317 | ry = gauss_s1*ry;
1318 |
1319 | // Sum the derivatives to the cumulative descriptor
1320 | dx += rx;
1321 | dy += ry;
1322 | mdx += fabs(rx);
1323 | mdy += fabs(ry);
1324 | }
1325 | }
1326 |
1327 | // Add the values to the descriptor vector
1328 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f);
1329 |
1330 | kpt.descriptor[dcount++] = dx*gauss_s2;
1331 | kpt.descriptor[dcount++] = dy*gauss_s2;
1332 | kpt.descriptor[dcount++] = mdx*gauss_s2;
1333 | kpt.descriptor[dcount++] = mdy*gauss_s2;
1334 |
1335 | len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
1336 |
1337 | j += 9;
1338 | }
1339 |
1340 | i += 9;
1341 | }
1342 |
1343 | // convert to unit vector
1344 | len = sqrt(len);
1345 |
1346 | for(int i = 0; i < dsize; i++)
1347 | {
1348 | kpt.descriptor[i] /= len;
1349 | }
1350 |
1351 | if( USE_CLIPPING_NORMALIZATION == true )
1352 | {
1353 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1354 | }
1355 | }
1356 |
1357 | //*************************************************************************************
1358 | //*************************************************************************************
1359 |
1360 | /**
1361 | * @brief This method computes the descriptor of the provided keypoint given the
1362 | * main orientation of the keypoint
1363 | * @param kpt Input keypoint
1364 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
1365 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
1366 | * ECCV 2008
1367 | */
1368 | void KAZE::Get_MSURF_Descriptor_64(Ipoint &kpt)
1369 | {
1370 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
1371 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
1372 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1373 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1374 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
1375 | int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
1376 | int dsize = 0, level = 0;
1377 |
1378 | // Subregion centers for the 4x4 gaussian weighting
1379 | float cx = -0.5, cy = 0.5;
1380 |
1381 | // Set the descriptor size and the sample and pattern sizes
1382 | dsize = kpt.descriptor_size = 64;
1383 | sample_step = 5;
1384 | pattern_size = 12;
1385 |
1386 | // Get the information from the keypoint
1387 | yf = kpt.yf;
1388 | xf = kpt.xf;
1389 | scale = kpt.scale;
1390 | angle = kpt.angle;
1391 | level = kpt.level;
1392 | co = cos(angle);
1393 | si = sin(angle);
1394 |
1395 | // Allocate the memory for the vector
1396 | kpt.descriptor = vector(kpt.descriptor_size);
1397 |
1398 | i = -8;
1399 |
1400 | // Calculate descriptor for this interest point
1401 | // Area of size 24 s x 24 s
1402 | while(i < pattern_size)
1403 | {
1404 | j = -8;
1405 | i = i-4;
1406 |
1407 | cx += 1.0;
1408 | cy = -0.5;
1409 |
1410 | while(j < pattern_size)
1411 | {
1412 | dx=dy=mdx=mdy=0.0;
1413 | cy += 1.0;
1414 | j = j - 4;
1415 |
1416 | ky = i + sample_step;
1417 | kx = j + sample_step;
1418 |
1419 | xs = xf + (-kx*scale*si + ky*scale*co);
1420 | ys = yf + (kx*scale*co + ky*scale*si);
1421 |
1422 | for (int k = i; k < i + 9; ++k)
1423 | {
1424 | for (int l = j; l < j + 9; ++l)
1425 | {
1426 | // Get coords of sample point on the rotated axis
1427 | sample_y = yf + (l*scale*co + k*scale*si);
1428 | sample_x = xf + (-l*scale*si + k*scale*co);
1429 |
1430 | // Get the gaussian weighted x and y responses
1431 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale);
1432 |
1433 | y1 = fRound(sample_y-.5);
1434 | x1 = fRound(sample_x-.5);
1435 |
1436 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1437 |
1438 | y2 = fRound(sample_y+.5);
1439 | x2 = fRound(sample_x+.5);
1440 |
1441 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1442 |
1443 | fx = sample_x-x1;
1444 | fy = sample_y-y1;
1445 |
1446 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1447 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1448 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1449 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1450 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1451 |
1452 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1453 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1454 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1455 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1456 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1457 |
1458 | // Get the x and y derivatives on the rotated axis
1459 | rry = gauss_s1*(rx*co + ry*si);
1460 | rrx = gauss_s1*(-rx*si + ry*co);
1461 |
1462 | // Sum the derivatives to the cumulative descriptor
1463 | dx += rrx;
1464 | dy += rry;
1465 | mdx += fabs(rrx);
1466 | mdy += fabs(rry);
1467 | }
1468 | }
1469 |
1470 | // Add the values to the descriptor vector
1471 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f);
1472 | kpt.descriptor[dcount++] = dx*gauss_s2;
1473 | kpt.descriptor[dcount++] = dy*gauss_s2;
1474 | kpt.descriptor[dcount++] = mdx*gauss_s2;
1475 | kpt.descriptor[dcount++] = mdy*gauss_s2;
1476 |
1477 | len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
1478 |
1479 | j += 9;
1480 | }
1481 |
1482 | i += 9;
1483 | }
1484 |
1485 | // convert to unit vector
1486 | len = sqrt(len);
1487 |
1488 | for(int i = 0; i < dsize; i++)
1489 | {
1490 | kpt.descriptor[i] /= len;
1491 | }
1492 |
1493 | if( USE_CLIPPING_NORMALIZATION == true )
1494 | {
1495 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1496 | }
1497 | }
1498 |
1499 | //*************************************************************************************
1500 | //*************************************************************************************
1501 |
1502 | /**
1503 | * @brief This method computes the upright G-SURF descriptor of the provided keypoint
1504 | * given the main orientation
1505 | * @param kpt Input keypoint
1506 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional
1507 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and
1508 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013
1509 | */
1510 | void KAZE::Get_GSURF_Upright_Descriptor_64(Ipoint &kpt)
1511 | {
1512 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0;
1513 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0;
1514 | float sample_x = 0.0, sample_y = 0.0;
1515 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1516 | float lvv = 0.0, lww = 0.0, modg = 0.0;
1517 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1518 | int dsize = 0, level = 0;
1519 |
1520 | // Set the descriptor size and the sample and pattern sizes
1521 | dsize = kpt.descriptor_size = 64;
1522 | sample_step = 5;
1523 | pattern_size = 10;
1524 |
1525 | // Get the information from the keypoint
1526 | yf = kpt.yf;
1527 | xf = kpt.xf;
1528 | scale = kpt.scale;
1529 | level = kpt.level;
1530 |
1531 | // Allocate the memory for the vector
1532 | kpt.descriptor = vector(kpt.descriptor_size);
1533 |
1534 | // Calculate descriptor for this interest point
1535 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1536 | {
1537 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1538 | {
1539 | dx=dy=mdx=mdy=0.0;
1540 |
1541 | for(float k = i; k < i + sample_step; k+=0.5)
1542 | {
1543 | for(float l = j; l < j + sample_step; l+=0.5)
1544 | {
1545 | // Get the coordinates of the sample point on the rotated axis
1546 | sample_y = yf + l*scale;
1547 | sample_x = xf + k*scale;
1548 |
1549 | y1 = (int)(sample_y-.5);
1550 | x1 = (int)(sample_x-.5);
1551 |
1552 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1553 |
1554 | y2 = (int)(sample_y+.5);
1555 | x2 = (int)(sample_x+.5);
1556 |
1557 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1558 |
1559 | fx = sample_x-x1;
1560 | fy = sample_y-y1;
1561 |
1562 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1563 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1564 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1565 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1566 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1567 |
1568 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1569 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1570 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1571 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1572 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1573 |
1574 | modg = pow(rx,2) + pow(ry,2);
1575 |
1576 | if( modg != 0.0 )
1577 | {
1578 | res1 = *(evolution[level].Lxx.ptr(y1)+x1);
1579 | res2 = *(evolution[level].Lxx.ptr(y1)+x2);
1580 | res3 = *(evolution[level].Lxx.ptr(y2)+x1);
1581 | res4 = *(evolution[level].Lxx.ptr(y2)+x2);
1582 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1583 |
1584 | res1 = *(evolution[level].Lxy.ptr(y1)+x1);
1585 | res2 = *(evolution[level].Lxy.ptr(y1)+x2);
1586 | res3 = *(evolution[level].Lxy.ptr(y2)+x1);
1587 | res4 = *(evolution[level].Lxy.ptr(y2)+x2);
1588 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1589 |
1590 | res1 = *(evolution[level].Lyy.ptr(y1)+x1);
1591 | res2 = *(evolution[level].Lyy.ptr(y1)+x2);
1592 | res3 = *(evolution[level].Lyy.ptr(y2)+x1);
1593 | res4 = *(evolution[level].Lyy.ptr(y2)+x2);
1594 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1595 |
1596 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
1597 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg);
1598 |
1599 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
1600 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg);
1601 | }
1602 | else
1603 | {
1604 | lww = 0.0;
1605 | lvv = 0.0;
1606 | }
1607 |
1608 | // Sum the derivatives to the cumulative descriptor
1609 | dx += lww;
1610 | dy += lvv;
1611 | mdx += fabs(lww);
1612 | mdy += fabs(lvv);
1613 | }
1614 | }
1615 |
1616 | // Add the values to the descriptor vector
1617 | kpt.descriptor[dcount++] = dx;
1618 | kpt.descriptor[dcount++] = dy;
1619 | kpt.descriptor[dcount++] = mdx;
1620 | kpt.descriptor[dcount++] = mdy;
1621 |
1622 | // Store the current length^2 of the vector
1623 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy;
1624 | }
1625 | }
1626 |
1627 | // convert to unit vector
1628 | len = sqrt(len);
1629 |
1630 | for(int i = 0; i < dsize; i++)
1631 | {
1632 | kpt.descriptor[i] /= len;
1633 | }
1634 |
1635 | if( USE_CLIPPING_NORMALIZATION == true )
1636 | {
1637 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1638 | }
1639 |
1640 | }
1641 |
1642 | //*************************************************************************************
1643 | //*************************************************************************************
1644 |
1645 | /**
1646 | * @brief This method computes the G-SURF descriptor of the provided keypoint given the
1647 | * main orientation
1648 | * @param kpt Input keypoint
1649 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 64. No additional
1650 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and
1651 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013
1652 | */
1653 | void KAZE::Get_GSURF_Descriptor_64(Ipoint &kpt)
1654 | {
1655 | float scale = 0.0, dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0;
1656 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, len = 0.0, xf = 0.0, yf = 0.0;
1657 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1658 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1659 | float lvv = 0.0, lww = 0.0, modg = 0.0;
1660 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1661 | int dsize = 0, level = 0;
1662 |
1663 | // Set the descriptor size and the sample and pattern sizes
1664 | dsize = kpt.descriptor_size = 64;
1665 | sample_step = 5;
1666 | pattern_size = 10;
1667 |
1668 | // Get the information from the keypoint
1669 | yf = kpt.yf;
1670 | xf = kpt.xf;
1671 | scale = kpt.scale;
1672 | angle = kpt.angle;
1673 | level = kpt.level;
1674 | co = cos(angle);
1675 | si = sin(angle);
1676 |
1677 | // Allocate the memory for the vector
1678 | kpt.descriptor = vector(kpt.descriptor_size);
1679 |
1680 | // Calculate descriptor for this interest point
1681 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1682 | {
1683 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1684 | {
1685 | dx=dy=mdx=mdy=0.0;
1686 |
1687 | for(float k = i; k < i + sample_step; k+=0.5)
1688 | {
1689 | for(float l = j; l < j + sample_step; l+=0.5)
1690 | {
1691 | // Get the coordinates of the sample point on the rotated axis
1692 | sample_y = yf + (l*scale*co + k*scale*si);
1693 | sample_x = xf + (-l*scale*si + k*scale*co);
1694 |
1695 | y1 = (int)(sample_y-.5);
1696 | x1 = (int)(sample_x-.5);
1697 |
1698 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1699 |
1700 | y2 = (int)(sample_y+.5);
1701 | x2 = (int)(sample_x+.5);
1702 |
1703 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1704 |
1705 | fx = sample_x-x1;
1706 | fy = sample_y-y1;
1707 |
1708 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1709 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1710 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1711 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1712 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1713 |
1714 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1715 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1716 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1717 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1718 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1719 |
1720 | modg = pow(rx,2) + pow(ry,2);
1721 |
1722 | if( modg != 0.0 )
1723 | {
1724 | res1 = *(evolution[level].Lxx.ptr(y1)+x1);
1725 | res2 = *(evolution[level].Lxx.ptr(y1)+x2);
1726 | res3 = *(evolution[level].Lxx.ptr(y2)+x1);
1727 | res4 = *(evolution[level].Lxx.ptr(y2)+x2);
1728 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1729 |
1730 | res1 = *(evolution[level].Lxy.ptr(y1)+x1);
1731 | res2 = *(evolution[level].Lxy.ptr(y1)+x2);
1732 | res3 = *(evolution[level].Lxy.ptr(y2)+x1);
1733 | res4 = *(evolution[level].Lxy.ptr(y2)+x2);
1734 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1735 |
1736 | res1 = *(evolution[level].Lyy.ptr(y1)+x1);
1737 | res2 = *(evolution[level].Lyy.ptr(y1)+x2);
1738 | res3 = *(evolution[level].Lyy.ptr(y2)+x1);
1739 | res4 = *(evolution[level].Lyy.ptr(y2)+x2);
1740 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1741 |
1742 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
1743 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg);
1744 |
1745 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
1746 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg);
1747 | }
1748 | else
1749 | {
1750 | lww = 0.0;
1751 | lvv = 0.0;
1752 | }
1753 |
1754 | // Sum the derivatives to the cumulative descriptor
1755 | dx += lww;
1756 | dy += lvv;
1757 | mdx += fabs(lww);
1758 | mdy += fabs(lvv);
1759 | }
1760 | }
1761 |
1762 | // Add the values to the descriptor vector
1763 | kpt.descriptor[dcount++] = dx;
1764 | kpt.descriptor[dcount++] = dy;
1765 | kpt.descriptor[dcount++] = mdx;
1766 | kpt.descriptor[dcount++] = mdy;
1767 |
1768 | // Store the current length^2 of the vector
1769 | len += dx*dx + dy*dy + mdx*mdx + mdy*mdy;
1770 | }
1771 | }
1772 |
1773 | // convert to unit vector
1774 | len = sqrt(len);
1775 |
1776 | for(int i = 0; i < dsize; i++)
1777 | {
1778 | kpt.descriptor[i] /= len;
1779 | }
1780 |
1781 | if( USE_CLIPPING_NORMALIZATION == true )
1782 | {
1783 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1784 | }
1785 |
1786 | }
1787 |
1788 | //*************************************************************************************
1789 | //*************************************************************************************
1790 |
1791 | /**
1792 | * @brief This method computes the upright extended descriptor (no rotation invariant)
1793 | * of the provided keypoint
1794 | * @param kpt Input keypoint
1795 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional
1796 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al.,
1797 | * Speeded Up Robust Features, ECCV, 2006
1798 | */
1799 | void KAZE::Get_SURF_Upright_Descriptor_128(Ipoint &kpt)
1800 | {
1801 | float scale = 0.0;
1802 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0;
1803 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1804 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
1805 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
1806 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1807 | int dsize = 0, level = 0;
1808 |
1809 | // Set the descriptor size and the sample and pattern sizes
1810 | dsize = kpt.descriptor_size = 128;
1811 | sample_step = 5;
1812 | pattern_size = 10;
1813 |
1814 | // Get the information from the keypoint
1815 | yf = kpt.yf;
1816 | xf = kpt.xf;
1817 | scale = kpt.scale;
1818 | level = kpt.level;
1819 |
1820 | // Allocate the memory for the vector
1821 | kpt.descriptor = vector(kpt.descriptor_size);
1822 |
1823 | // Calculate descriptor for this interest point
1824 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1825 | {
1826 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1827 | {
1828 | dxp=dxn=mdxp=mdxn=0.0;
1829 | dyp=dyn=mdyp=mdyn=0.0;
1830 |
1831 | for(float k = i; k < i + sample_step; k+=0.5)
1832 | {
1833 | for(float l = j; l < j + sample_step; l+=0.5)
1834 | {
1835 | sample_y = k*scale + yf;
1836 | sample_x = l*scale + xf;
1837 |
1838 | y1 = (int)(sample_y-.5);
1839 | x1 = (int)(sample_x-.5);
1840 |
1841 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1842 |
1843 | y2 = (int)(sample_y+.5);
1844 | x2 = (int)(sample_x+.5);
1845 |
1846 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1847 |
1848 | fx = sample_x-x1;
1849 | fy = sample_y-y1;
1850 |
1851 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1852 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1853 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1854 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1855 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1856 |
1857 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1858 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1859 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1860 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1861 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1862 |
1863 | // Sum the derivatives to the cumulative descriptor
1864 | if( ry >= 0.0 )
1865 | {
1866 | dxp += rx;
1867 | mdxp += fabs(rx);
1868 | }
1869 | else
1870 | {
1871 | dxn += rx;
1872 | mdxn += fabs(rx);
1873 | }
1874 |
1875 | if( rx >= 0.0 )
1876 | {
1877 | dyp += ry;
1878 | mdyp += fabs(ry);
1879 | }
1880 | else
1881 | {
1882 | dyn += ry;
1883 | mdyn += fabs(ry);
1884 | }
1885 | }
1886 | }
1887 |
1888 | // Add the values to the descriptor vector
1889 | kpt.descriptor[dcount++] = dxp;
1890 | kpt.descriptor[dcount++] = dxn;
1891 | kpt.descriptor[dcount++] = mdxp;
1892 | kpt.descriptor[dcount++] = mdxn;
1893 | kpt.descriptor[dcount++] = dyp;
1894 | kpt.descriptor[dcount++] = dyn;
1895 | kpt.descriptor[dcount++] = mdyp;
1896 | kpt.descriptor[dcount++] = mdyn;
1897 |
1898 | // Store the current length^2 of the vector
1899 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
1900 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn;
1901 | }
1902 | }
1903 |
1904 | // convert to unit vector
1905 | len = sqrt(len);
1906 |
1907 | for(int i = 0; i < dsize; i++)
1908 | {
1909 | kpt.descriptor[i] /= len;
1910 | }
1911 |
1912 | if( USE_CLIPPING_NORMALIZATION == true )
1913 | {
1914 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
1915 | }
1916 | }
1917 |
1918 | //*************************************************************************************
1919 | //*************************************************************************************
1920 |
1921 | /**
1922 | * @brief This method computes the extended descriptor of the provided keypoint given the
1923 | * main orientation
1924 | * @param kpt Input keypoint
1925 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional
1926 | * Gaussian weighting is performed. The descriptor is inspired from Bay et al.,
1927 | * Speeded Up Robust Features, ECCV, 2006
1928 | */
1929 | void KAZE::Get_SURF_Descriptor_128(Ipoint &kpt)
1930 | {
1931 | float scale = 0.0;
1932 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0;
1933 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1934 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1935 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
1936 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
1937 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
1938 | int dsize = 0, level = 0;
1939 |
1940 | // Set the descriptor size and the sample and pattern sizes
1941 | dsize = kpt.descriptor_size = 128;
1942 | sample_step = 5;
1943 | pattern_size = 10;
1944 |
1945 | // Get the information from the keypoint
1946 | yf = kpt.yf;
1947 | xf = kpt.xf;
1948 | scale = kpt.scale;
1949 | angle = kpt.angle;
1950 | level = kpt.level;
1951 | co = cos(angle);
1952 | si = sin(angle);
1953 |
1954 | // Allocate the memory for the vector
1955 | kpt.descriptor = vector(kpt.descriptor_size);
1956 |
1957 | // Calculate descriptor for this interest point
1958 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
1959 | {
1960 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
1961 | {
1962 | dxp=dxn=mdxp=mdxn=0.0;
1963 | dyp=dyn=mdyp=mdyn=0.0;
1964 |
1965 | for(float k = i; k < i + sample_step; k+=0.5)
1966 | {
1967 | for(float l = j; l < j + sample_step; l+=0.5)
1968 | {
1969 | // Get the coordinates of the sample point on the rotated axis
1970 | sample_y = yf + (l*scale*co + k*scale*si);
1971 | sample_x = xf + (-l*scale*si + k*scale*co);
1972 |
1973 | y1 = (int)(sample_y-.5);
1974 | x1 = (int)(sample_x-.5);
1975 |
1976 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
1977 |
1978 | y2 = (int)(sample_y+.5);
1979 | x2 = (int)(sample_x+.5);
1980 |
1981 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
1982 |
1983 | fx = sample_x-x1;
1984 | fy = sample_y-y1;
1985 |
1986 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
1987 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
1988 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
1989 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
1990 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1991 |
1992 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
1993 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
1994 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
1995 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
1996 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
1997 |
1998 | // Get the x and y derivatives on the rotated axis
1999 | rry = rx*co + ry*si;
2000 | rrx = -rx*si + ry*co;
2001 |
2002 | // Sum the derivatives to the cumulative descriptor
2003 | if( rry >= 0.0 )
2004 | {
2005 | dxp += rrx;
2006 | mdxp += fabs(rrx);
2007 | }
2008 | else
2009 | {
2010 | dxn += rrx;
2011 | mdxn += fabs(rrx);
2012 | }
2013 |
2014 | if( rrx >= 0.0 )
2015 | {
2016 | dyp += rry;
2017 | mdyp += fabs(rry);
2018 | }
2019 | else
2020 | {
2021 | dyn += rry;
2022 | mdyn += fabs(rry);
2023 | }
2024 | }
2025 | }
2026 |
2027 | // Add the values to the descriptor vector
2028 | kpt.descriptor[dcount++] = dxp;
2029 | kpt.descriptor[dcount++] = dxn;
2030 | kpt.descriptor[dcount++] = mdxp;
2031 | kpt.descriptor[dcount++] = mdxn;
2032 | kpt.descriptor[dcount++] = dyp;
2033 | kpt.descriptor[dcount++] = dyn;
2034 | kpt.descriptor[dcount++] = mdyp;
2035 | kpt.descriptor[dcount++] = mdyn;
2036 |
2037 | // Store the current length^2 of the vector
2038 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
2039 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn;
2040 | }
2041 | }
2042 |
2043 | // convert to unit vector
2044 | len = sqrt(len);
2045 |
2046 | for(int i = 0; i < dsize; i++)
2047 | {
2048 | kpt.descriptor[i] /= len;
2049 | }
2050 |
2051 | if( USE_CLIPPING_NORMALIZATION == true )
2052 | {
2053 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
2054 | }
2055 | }
2056 |
2057 | //*************************************************************************************
2058 | //*************************************************************************************
2059 |
2060 | /**
2061 | * @brief This method computes the extended upright descriptor (not rotation invariant) of
2062 | * the provided keypoint
2063 | * @param kpt Input keypoint
2064 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
2065 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
2066 | * ECCV 2008
2067 | */
2068 | void KAZE::Get_MSURF_Upright_Descriptor_128(Ipoint &kpt)
2069 | {
2070 | float scale = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
2071 | float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
2072 | float sample_x = 0.0, sample_y = 0.0;
2073 | int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
2074 | int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
2075 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
2076 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
2077 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
2078 | int dsize = 0, level = 0;
2079 |
2080 | // Subregion centers for the 4x4 gaussian weighting
2081 | float cx = -0.5, cy = 0.5;
2082 |
2083 | // Set the descriptor size and the sample and pattern sizes
2084 | dsize = kpt.descriptor_size = 128;
2085 | sample_step = 5;
2086 | pattern_size = 12;
2087 |
2088 | // Get the information from the keypoint
2089 | yf = kpt.yf;
2090 | xf = kpt.xf;
2091 | level = kpt.level;
2092 | scale = kpt.scale;
2093 |
2094 | // Allocate the memory for the vector
2095 | kpt.descriptor = vector(kpt.descriptor_size);
2096 |
2097 | i = -8;
2098 |
2099 | // Calculate descriptor for this interest point
2100 | // Area of size 24 s x 24 s
2101 | while(i < pattern_size)
2102 | {
2103 | j = -8;
2104 | i = i-4;
2105 |
2106 | cx += 1.0;
2107 | cy = -0.5;
2108 |
2109 | while(j < pattern_size)
2110 | {
2111 | dxp=dxn=mdxp=mdxn=0.0;
2112 | dyp=dyn=mdyp=mdyn=0.0;
2113 |
2114 | cy += 1.0;
2115 | j = j-4;
2116 |
2117 | ky = i + sample_step;
2118 | kx = j + sample_step;
2119 |
2120 | ys = yf + (ky*scale);
2121 | xs = xf + (kx*scale);
2122 |
2123 | for(int k = i; k < i+9; k++)
2124 | {
2125 | for (int l = j; l < j+9; l++)
2126 | {
2127 | sample_y = k*scale + yf;
2128 | sample_x = l*scale + xf;
2129 |
2130 | //Get the gaussian weighted x and y responses
2131 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.50*scale);
2132 |
2133 | y1 = (int)(sample_y-.5);
2134 | x1 = (int)(sample_x-.5);
2135 |
2136 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
2137 |
2138 | y2 = (int)(sample_y+.5);
2139 | x2 = (int)(sample_x+.5);
2140 |
2141 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
2142 |
2143 | fx = sample_x-x1;
2144 | fy = sample_y-y1;
2145 |
2146 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
2147 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
2148 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
2149 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
2150 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2151 |
2152 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
2153 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
2154 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
2155 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
2156 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2157 |
2158 | rx = gauss_s1*rx;
2159 | ry = gauss_s1*ry;
2160 |
2161 | // Sum the derivatives to the cumulative descriptor
2162 | if( ry >= 0.0 )
2163 | {
2164 | dxp += rx;
2165 | mdxp += fabs(rx);
2166 | }
2167 | else
2168 | {
2169 | dxn += rx;
2170 | mdxn += fabs(rx);
2171 | }
2172 |
2173 | if( rx >= 0.0 )
2174 | {
2175 | dyp += ry;
2176 | mdyp += fabs(ry);
2177 | }
2178 | else
2179 | {
2180 | dyn += ry;
2181 | mdyn += fabs(ry);
2182 | }
2183 | }
2184 | }
2185 |
2186 | // Add the values to the descriptor vector
2187 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f);
2188 |
2189 | kpt.descriptor[dcount++] = dxp*gauss_s2;
2190 | kpt.descriptor[dcount++] = dxn*gauss_s2;
2191 | kpt.descriptor[dcount++] = mdxp*gauss_s2;
2192 | kpt.descriptor[dcount++] = mdxn*gauss_s2;
2193 | kpt.descriptor[dcount++] = dyp*gauss_s2;
2194 | kpt.descriptor[dcount++] = dyn*gauss_s2;
2195 | kpt.descriptor[dcount++] = mdyp*gauss_s2;
2196 | kpt.descriptor[dcount++] = mdyn*gauss_s2;
2197 |
2198 | // Store the current length^2 of the vector
2199 | len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
2200 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
2201 |
2202 | j += 9;
2203 | }
2204 |
2205 | i += 9;
2206 | }
2207 |
2208 | // convert to unit vector
2209 | len = sqrt(len);
2210 |
2211 | for(int i = 0; i < dsize; i++)
2212 | {
2213 | kpt.descriptor[i] /= len;
2214 | }
2215 |
2216 | if( USE_CLIPPING_NORMALIZATION == true )
2217 | {
2218 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
2219 | }
2220 | }
2221 |
2222 | //*************************************************************************************
2223 | //*************************************************************************************
2224 |
2225 | /**
2226 | * @brief This method computes the extended G-SURF descriptor of the provided keypoint
2227 | * given the main orientation of the keypoint
2228 | * @param kpt Input keypoint
2229 | * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
2230 | * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
2231 | * ECCV 2008
2232 | */
2233 | void KAZE::Get_MSURF_Descriptor_128(Ipoint &kpt)
2234 | {
2235 | float scale = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
2236 | float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
2237 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
2238 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
2239 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
2240 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
2241 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
2242 | int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
2243 | int dsize = 0, level = 0;
2244 |
2245 | // Subregion centers for the 4x4 gaussian weighting
2246 | float cx = -0.5, cy = 0.5;
2247 |
2248 | // Set the descriptor size and the sample and pattern sizes
2249 | dsize = kpt.descriptor_size = 128;
2250 | sample_step = 5;
2251 | pattern_size = 12;
2252 |
2253 | // Get the information from the keypoint
2254 | yf = kpt.yf;
2255 | xf = kpt.xf;
2256 | scale = kpt.scale;
2257 | angle = kpt.angle;
2258 | level = kpt.level;
2259 | co = cos(angle);
2260 | si = sin(angle);
2261 |
2262 | // Allocate the memory for the vector
2263 | kpt.descriptor = vector(kpt.descriptor_size);
2264 |
2265 | i = -8;
2266 |
2267 | // Calculate descriptor for this interest point
2268 | // Area of size 24 s x 24 s
2269 | while(i < pattern_size)
2270 | {
2271 | j = -8;
2272 | i = i-4;
2273 |
2274 | cx += 1.0;
2275 | cy = -0.5;
2276 |
2277 | while(j < pattern_size)
2278 | {
2279 | dxp=dxn=mdxp=mdxn=0.0;
2280 | dyp=dyn=mdyp=mdyn=0.0;
2281 |
2282 | cy += 1.0f;
2283 | j = j - 4;
2284 |
2285 | ky = i + sample_step;
2286 | kx = j + sample_step;
2287 |
2288 | xs = xf + (-kx*scale*si + ky*scale*co);
2289 | ys = yf + (kx*scale*co + ky*scale*si);
2290 |
2291 | for (int k = i; k < i + 9; ++k)
2292 | {
2293 | for (int l = j; l < j + 9; ++l)
2294 | {
2295 | // Get coords of sample point on the rotated axis
2296 | sample_y = yf + (l*scale*co + k*scale*si);
2297 | sample_x = xf + (-l*scale*si + k*scale*co);
2298 |
2299 | // Get the gaussian weighted x and y responses
2300 | gauss_s1 = gaussian(xs-sample_x,ys-sample_y,2.5*scale);
2301 |
2302 | y1 = fRound(sample_y-.5);
2303 | x1 = fRound(sample_x-.5);
2304 |
2305 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
2306 |
2307 | y2 = fRound(sample_y+.5);
2308 | x2 = fRound(sample_x+.5);
2309 |
2310 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
2311 |
2312 | fx = sample_x-x1;
2313 | fy = sample_y-y1;
2314 |
2315 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
2316 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
2317 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
2318 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
2319 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2320 |
2321 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
2322 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
2323 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
2324 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
2325 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2326 |
2327 | // Get the x and y derivatives on the rotated axis
2328 | rry = gauss_s1*(rx*co + ry*si);
2329 | rrx = gauss_s1*(-rx*si + ry*co);
2330 |
2331 | // Sum the derivatives to the cumulative descriptor
2332 | // Sum the derivatives to the cumulative descriptor
2333 | if( rry >= 0.0 )
2334 | {
2335 | dxp += rrx;
2336 | mdxp += fabs(rrx);
2337 | }
2338 | else
2339 | {
2340 | dxn += rrx;
2341 | mdxn += fabs(rrx);
2342 | }
2343 |
2344 | if( rrx >= 0.0 )
2345 | {
2346 | dyp += rry;
2347 | mdyp += fabs(rry);
2348 | }
2349 | else
2350 | {
2351 | dyn += rry;
2352 | mdyn += fabs(rry);
2353 | }
2354 | }
2355 | }
2356 |
2357 | // Add the values to the descriptor vector
2358 | gauss_s2 = gaussian(cx-2.0f,cy-2.0f,1.5f);
2359 |
2360 | kpt.descriptor[dcount++] = dxp*gauss_s2;
2361 | kpt.descriptor[dcount++] = dxn*gauss_s2;
2362 | kpt.descriptor[dcount++] = mdxp*gauss_s2;
2363 | kpt.descriptor[dcount++] = mdxn*gauss_s2;
2364 | kpt.descriptor[dcount++] = dyp*gauss_s2;
2365 | kpt.descriptor[dcount++] = dyn*gauss_s2;
2366 | kpt.descriptor[dcount++] = mdyp*gauss_s2;
2367 | kpt.descriptor[dcount++] = mdyn*gauss_s2;
2368 |
2369 | // Store the current length^2 of the vector
2370 | len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
2371 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
2372 |
2373 | j += 9;
2374 | }
2375 |
2376 | i += 9;
2377 | }
2378 |
2379 | // convert to unit vector
2380 | len = sqrt(len);
2381 |
2382 | for(int i = 0; i < dsize; i++)
2383 | {
2384 | kpt.descriptor[i] /= len;
2385 | }
2386 |
2387 | if( USE_CLIPPING_NORMALIZATION == true )
2388 | {
2389 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
2390 | }
2391 |
2392 | }
2393 |
2394 | //*************************************************************************************
2395 | //*************************************************************************************
2396 |
2397 | /**
2398 | * @brief This method computes the G-SURF upright extended descriptor
2399 | * (no rotation invariant) of the provided keypoint
2400 | * @param kpt Input keypoint
2401 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional
2402 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and
2403 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013
2404 | */
2405 | void KAZE::Get_GSURF_Upright_Descriptor_128(Ipoint &kpt)
2406 | {
2407 | float scale = 0.0, len = 0.0, xf = 0.0, yf = 0.0, sample_x = 0.0, sample_y = 0.0;
2408 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0, modg = 0.0;
2409 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
2410 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
2411 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0, lvv = 0.0, lww = 0.0;
2412 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
2413 | int dsize = 0, level = 0;
2414 |
2415 | // Set the descriptor size and the sample and pattern sizes
2416 | dsize = kpt.descriptor_size = 128;
2417 | sample_step = 5;
2418 | pattern_size = 10;
2419 |
2420 | // Get the information from the keypoint
2421 | yf = kpt.yf;
2422 | xf = kpt.xf;
2423 | scale = kpt.scale;
2424 | level = kpt.level;
2425 |
2426 | // Allocate the memory for the vector
2427 | kpt.descriptor = vector(kpt.descriptor_size);
2428 |
2429 | // Calculate descriptor for this interest point
2430 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
2431 | {
2432 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
2433 | {
2434 | dxp=dxn=mdxp=mdxn=0.0;
2435 | dyp=dyn=mdyp=mdyn=0.0;
2436 |
2437 | for(float k = i; k < i + sample_step; k+=0.5)
2438 | {
2439 | for(float l = j; l < j + sample_step; l+=0.5)
2440 | {
2441 | sample_y = k*scale + yf;
2442 | sample_x = l*scale + xf;
2443 |
2444 | y1 = (int)(sample_y-.5);
2445 | x1 = (int)(sample_x-.5);
2446 |
2447 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
2448 |
2449 | y2 = (int)(sample_y+.5);
2450 | x2 = (int)(sample_x+.5);
2451 |
2452 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
2453 |
2454 | fx = sample_x-x1;
2455 | fy = sample_y-y1;
2456 |
2457 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
2458 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
2459 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
2460 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
2461 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2462 |
2463 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
2464 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
2465 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
2466 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
2467 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2468 |
2469 | modg = pow(rx,2) + pow(ry,2);
2470 |
2471 | if( modg != 0.0 )
2472 | {
2473 | res1 = *(evolution[level].Lxx.ptr(y1)+x1);
2474 | res2 = *(evolution[level].Lxx.ptr(y1)+x2);
2475 | res3 = *(evolution[level].Lxx.ptr(y2)+x1);
2476 | res4 = *(evolution[level].Lxx.ptr(y2)+x2);
2477 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2478 |
2479 | res1 = *(evolution[level].Lxy.ptr(y1)+x1);
2480 | res2 = *(evolution[level].Lxy.ptr(y1)+x2);
2481 | res3 = *(evolution[level].Lxy.ptr(y2)+x1);
2482 | res4 = *(evolution[level].Lxy.ptr(y2)+x2);
2483 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2484 |
2485 | res1 = *(evolution[level].Lyy.ptr(y1)+x1);
2486 | res2 = *(evolution[level].Lyy.ptr(y1)+x2);
2487 | res3 = *(evolution[level].Lyy.ptr(y2)+x1);
2488 | res4 = *(evolution[level].Lyy.ptr(y2)+x2);
2489 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2490 |
2491 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
2492 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg);
2493 |
2494 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
2495 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg);
2496 | }
2497 | else
2498 | {
2499 | lww = 0.0;
2500 | lvv = 0.0;
2501 | }
2502 |
2503 | // Sum the derivatives to the cumulative descriptor
2504 | if( lww >= 0.0 )
2505 | {
2506 | dxp += lvv;
2507 | mdxp += fabs(lvv);
2508 | }
2509 | else
2510 | {
2511 | dxn += lvv;
2512 | mdxn += fabs(lvv);
2513 | }
2514 |
2515 | if( lvv >= 0.0 )
2516 | {
2517 | dyp += lww;
2518 | mdyp += fabs(lww);
2519 | }
2520 | else
2521 | {
2522 | dyn += lww;
2523 | mdyn += fabs(lww);
2524 | }
2525 | }
2526 | }
2527 |
2528 | // Add the values to the descriptor vector
2529 | kpt.descriptor[dcount++] = dxp;
2530 | kpt.descriptor[dcount++] = dxn;
2531 | kpt.descriptor[dcount++] = mdxp;
2532 | kpt.descriptor[dcount++] = mdxn;
2533 | kpt.descriptor[dcount++] = dyp;
2534 | kpt.descriptor[dcount++] = dyn;
2535 | kpt.descriptor[dcount++] = mdyp;
2536 | kpt.descriptor[dcount++] = mdyn;
2537 |
2538 | // Store the current length^2 of the vector
2539 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
2540 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn;
2541 | }
2542 | }
2543 |
2544 | // convert to unit vector
2545 | len = sqrt(len);
2546 |
2547 | for(int i = 0; i < dsize; i++)
2548 | {
2549 | kpt.descriptor[i] /= len;
2550 | }
2551 |
2552 | if( USE_CLIPPING_NORMALIZATION == true )
2553 | {
2554 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
2555 | }
2556 | }
2557 |
2558 | //*************************************************************************************
2559 | //*************************************************************************************
2560 |
2561 | /**
2562 | * @brief This method computes the extended descriptor of the provided keypoint given the
2563 | * main orientation
2564 | * @param kpt Input keypoint
2565 | * @note Rectangular grid of 20 s x 20 s. Descriptor Length 128. No additional
2566 | * G-SURF descriptor as described in Pablo F. Alcantarilla, Luis M. Bergasa and
2567 | * Andrew J. Davison, Gauge-SURF Descriptors, Image and Vision Computing 31(1), 2013
2568 | */
2569 | void KAZE::Get_GSURF_Descriptor_128(Ipoint &kpt)
2570 | {
2571 | float scale = 0.0, len = 0.0, xf = 0.0, yf = 0.0;
2572 | float rx = 0.0, ry = 0.0, rxx = 0.0, rxy = 0.0, ryy = 0.0;
2573 | float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
2574 | float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
2575 | float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
2576 | float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
2577 | float lvv = 0.0, lww = 0.0, modg = 0.0;
2578 | int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0, dcount = 0;
2579 | int dsize = 0, level = 0;
2580 |
2581 | // Set the descriptor size and the sample and pattern sizes
2582 | dsize = kpt.descriptor_size = 128;
2583 | sample_step = 5;
2584 | pattern_size = 10;
2585 |
2586 | // Get the information from the keypoint
2587 | yf = kpt.yf;
2588 | xf = kpt.xf;
2589 | scale = kpt.scale;
2590 | angle = kpt.angle;
2591 | level = kpt.level;
2592 | co = cos(angle);
2593 | si = sin(angle);
2594 |
2595 | // Allocate the memory for the vector
2596 | kpt.descriptor = vector(kpt.descriptor_size);
2597 |
2598 | // Calculate descriptor for this interest point
2599 | for(int i = -pattern_size; i < pattern_size; i+=sample_step)
2600 | {
2601 | for(int j = -pattern_size; j < pattern_size; j+=sample_step)
2602 | {
2603 | dxp=dxn=mdxp=mdxn=0.0;
2604 | dyp=dyn=mdyp=mdyn=0.0;
2605 |
2606 | for(float k = i; k < i + sample_step; k+=0.5)
2607 | {
2608 | for(float l = j; l < j + sample_step; l+=0.5)
2609 | {
2610 | // Get the coordinates of the sample point on the rotated axis
2611 | sample_y = yf + (l*scale*co + k*scale*si);
2612 | sample_x = xf + (-l*scale*si + k*scale*co);
2613 |
2614 | y1 = (int)(sample_y-.5);
2615 | x1 = (int)(sample_x-.5);
2616 |
2617 | Check_Descriptor_Limits(x1,y1,img_width,img_height);
2618 |
2619 | y2 = (int)(sample_y+.5);
2620 | x2 = (int)(sample_x+.5);
2621 |
2622 | Check_Descriptor_Limits(x2,y2,img_width,img_height);
2623 |
2624 | fx = sample_x-x1;
2625 | fy = sample_y-y1;
2626 |
2627 | res1 = *(evolution[level].Lx.ptr(y1)+x1);
2628 | res2 = *(evolution[level].Lx.ptr(y1)+x2);
2629 | res3 = *(evolution[level].Lx.ptr(y2)+x1);
2630 | res4 = *(evolution[level].Lx.ptr(y2)+x2);
2631 | rx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2632 |
2633 | res1 = *(evolution[level].Ly.ptr(y1)+x1);
2634 | res2 = *(evolution[level].Ly.ptr(y1)+x2);
2635 | res3 = *(evolution[level].Ly.ptr(y2)+x1);
2636 | res4 = *(evolution[level].Ly.ptr(y2)+x2);
2637 | ry = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2638 |
2639 | modg = pow(rx,2) + pow(ry,2);
2640 |
2641 | if( modg != 0.0 )
2642 | {
2643 | res1 = *(evolution[level].Lxx.ptr(y1)+x1);
2644 | res2 = *(evolution[level].Lxx.ptr(y1)+x2);
2645 | res3 = *(evolution[level].Lxx.ptr(y2)+x1);
2646 | res4 = *(evolution[level].Lxx.ptr(y2)+x2);
2647 | rxx = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2648 |
2649 | res1 = *(evolution[level].Lxy.ptr(y1)+x1);
2650 | res2 = *(evolution[level].Lxy.ptr(y1)+x2);
2651 | res3 = *(evolution[level].Lxy.ptr(y2)+x1);
2652 | res4 = *(evolution[level].Lxy.ptr(y2)+x2);
2653 | rxy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2654 |
2655 | res1 = *(evolution[level].Lyy.ptr(y1)+x1);
2656 | res2 = *(evolution[level].Lyy.ptr(y1)+x2);
2657 | res3 = *(evolution[level].Lyy.ptr(y2)+x1);
2658 | res4 = *(evolution[level].Lyy.ptr(y2)+x2);
2659 | ryy = (1.0-fx)*(1.0-fy)*res1 + fx*(1.0-fy)*res2 + (1.0-fx)*fy*res3 + fx*fy*res4;
2660 |
2661 | // Lww = (Lx^2 * Lxx + 2*Lx*Lxy*Ly + Ly^2*Lyy) / (Lx^2 + Ly^2)
2662 | lww = (pow(rx,2)*rxx + 2.0*rx*rxy*ry + pow(ry,2)*ryy) / (modg);
2663 |
2664 | // Lvv = (-2*Lx*Lxy*Ly + Lxx*Ly^2 + Lx^2*Lyy) / (Lx^2 + Ly^2)
2665 | lvv = (-2.0*rx*rxy*ry + rxx*pow(ry,2) + pow(rx,2)*ryy) /(modg);
2666 | }
2667 | else
2668 | {
2669 | lww = 0.0;
2670 | lvv = 0.0;
2671 | }
2672 |
2673 | // Sum the derivatives to the cumulative descriptor
2674 | if( lww >= 0.0 )
2675 | {
2676 | dxp += lvv;
2677 | mdxp += fabs(lvv);
2678 | }
2679 | else
2680 | {
2681 | dxn += lvv;
2682 | mdxn += fabs(lvv);
2683 | }
2684 |
2685 | if( lvv >= 0.0 )
2686 | {
2687 | dyp += lww;
2688 | mdyp += fabs(lww);
2689 | }
2690 | else
2691 | {
2692 | dyn += lww;
2693 | mdyn += fabs(lww);
2694 | }
2695 | }
2696 | }
2697 |
2698 | // Add the values to the descriptor vector
2699 | kpt.descriptor[dcount++] = dxp;
2700 | kpt.descriptor[dcount++] = dxn;
2701 | kpt.descriptor[dcount++] = mdxp;
2702 | kpt.descriptor[dcount++] = mdxn;
2703 | kpt.descriptor[dcount++] = dyp;
2704 | kpt.descriptor[dcount++] = dyn;
2705 | kpt.descriptor[dcount++] = mdyp;
2706 | kpt.descriptor[dcount++] = mdyn;
2707 |
2708 | // Store the current length^2 of the vector
2709 | len += dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
2710 | dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn;
2711 | }
2712 | }
2713 |
2714 | // convert to unit vector
2715 | len = sqrt(len);
2716 |
2717 | for(int i = 0; i < dsize; i++)
2718 | {
2719 | kpt.descriptor[i] /= len;
2720 | }
2721 |
2722 | if( USE_CLIPPING_NORMALIZATION == true )
2723 | {
2724 | Clipping_Descriptor(kpt,CLIPPING_NORMALIZATION_NITER,CLIPPING_NORMALIZATION_RATIO);
2725 | }
2726 | }
2727 |
2728 | //*************************************************************************************
2729 | //*************************************************************************************
2730 |
2731 | /**
2732 | * @brief This method performs a scalar non-linear diffusion step using AOS schemes
2733 | * @param Ld Image at a given evolution step
2734 | * @param Ldprev Image at a previous evolution step
2735 | * @param c Conductivity image
2736 | * @param stepsize Stepsize for the nonlinear diffusion evolution
2737 | * @note If c is constant, the diffusion will be linear
2738 | * If c is a matrix of the same size as Ld, the diffusion will be nonlinear
2739 | * The stepsize can be arbitrarilly large
2740 | */
2741 | void KAZE::AOS_Step_Scalar(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize)
2742 | {
2743 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS schemes at " << t1 << endl;
2744 | AOS_Rows(Ldprev,c,stepsize);
2745 | AOS_Columns(Ldprev,c,stepsize);
2746 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS schemes. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl;
2747 |
2748 | Ld = 0.5*(Lty + Ltx.t());
2749 | }
2750 |
2751 | //*************************************************************************************
2752 | //*************************************************************************************
2753 |
2754 | /**
2755 | * @brief This method performs a scalar non-linear diffusion step using AOS schemes
2756 | * Diffusion in each dimension is computed independently in a different thread
2757 | * @param Ld Image at a given evolution step
2758 | * @param Ldprev Image at a previous evolution step
2759 | * @param c Conductivity image
2760 | * @param stepsize Stepsize for the nonlinear diffusion evolution
2761 | * @note If c is constant, the diffusion will be linear
2762 | * If c is a matrix of the same size as Ld, the diffusion will be nonlinear
2763 | * The stepsize can be arbitrarilly large
2764 | */
2765 | #if HAVE_BOOST_THREADING
2766 | void KAZE::AOS_Step_Scalar_Parallel(cv::Mat &Ld, const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize)
2767 | {
2768 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS schemes at " << t1 << endl;
2769 | boost::thread *AOSth1 = new boost::thread(&KAZE::AOS_Rows,this,Ldprev,c,stepsize);
2770 | boost::thread *AOSth2 = new boost::thread(&KAZE::AOS_Columns,this,Ldprev,c,stepsize);
2771 |
2772 | AOSth1->join();
2773 | AOSth2->join();
2774 | //int64 t2 = cv::getTickCount(); cout << "Finish AOS schemes. Exec-time: " << 1000.0*(t2-t1)/cv::getTickFrequency() << endl;
2775 |
2776 | Ld = 0.5*(Lty + Ltx.t());
2777 |
2778 | delete AOSth1;
2779 | delete AOSth2;
2780 | }
2781 | #endif
2782 |
2783 | //*************************************************************************************
2784 | //*************************************************************************************
2785 |
2786 | /**
2787 | * @brief This method performs performs 1D-AOS for the image rows
2788 | * @param Ldprev Image at a previous evolution step
2789 | * @param c Conductivity image
2790 | * @param stepsize Stepsize for the nonlinear diffusion evolution
2791 | */
2792 | void KAZE::AOS_Rows(const cv::Mat &Ldprev, const cv::Mat &c, const float stepsize)
2793 | {
2794 | //int64 t1 = cv::getTickCount(); cout << "Begin AOS_Rows at " << t1 << endl;
2795 | // Operate on rows
2796 | int qcols = qr.cols, qrows = qr.rows;
2797 | if (qr.isContinuous() && c.isContinuous())
2798 | {
2799 | qcols *= qrows;
2800 | qrows = 1;
2801 | }
2802 | for( int i = 0; i < qrows; i++ )
2803 | {
2804 | for( int j = 0; j < qcols; j++ )
2805 | {
2806 | *(qr.ptr(i)+j) = *(c.ptr(i)+j) + *(c.ptr(i+1)+j);
2807 | }
2808 | }
2809 |
2810 | for( int j = 0; j < py.cols; j++ )
2811 | {
2812 | *(py.ptr(0)+j) = *(qr.ptr