├── .github
└── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── .gitignore
├── .settings
└── language.settings.xml
├── CMakeLists.txt
├── LICENSE
├── Makefile
├── README.md
├── appveyor.yml
├── cmake
└── DetectPython.cmake
├── include
└── pyboostcvconverter
│ └── pyboostcvconverter.hpp
├── src
├── pyboost_cv2_converter.cpp
├── pyboost_cv3_converter.cpp
├── pyboost_cv4_converter.cpp
└── python_module.cpp
└── tests
├── CMakeLists.txt
├── memory_test_video_capture.py
├── test_project_source.cpp
└── tests.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is. Please include build output if the problem is with building/linking. Please, include the problematic sections of your own code if applicable.
12 |
13 | **Desktop (please complete the following information):**
14 | - OS & version: [e.g. Windows 11, MacOS 10.15.7, Linux Ubuntu 22.04]
15 | - Python version (e.g. output of `python --version` and/or `python3 --version`):
16 | - Numpy version (e.g. output of `pip show numpy` or equivalent):
17 | - For a build problem: If on Linux/MacOS, output of `cat CMakeCache.txt | grep NUMPY_INCLUDE_DIRS` and `cat cat CMakeCache.txt | grep PYTHON_DESIRED_VERSION` from your build folder.
18 |
19 | **Additional context**
20 | Add any other context about the problem here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context about the feature request here.
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Files marked as links
2 | *-lk
3 |
4 | # Qt Generated files
5 | *_automoc.cpp
6 | moc_*
7 | *.depends
8 | ui_*
9 | qrc_*
10 |
11 | # Custom output folders
12 | /bin/
13 | /lib/
14 |
15 | # Compiled binaries
16 | *.so
17 | *.a
18 | *.dll
19 | # Compiled CPython extension on Windows
20 | *.pyd
21 |
22 | # Compiled Object files
23 | *.slo
24 | *.lo
25 | *.o
26 | *.obj
27 |
28 | # Precompiled Headers
29 | *.gch
30 | *.pch
31 |
32 | # Fortran module files
33 | *.mod
34 |
35 |
36 | # temporary GEdit backups
37 |
38 | *~
39 |
40 | # temporary QTCreator backups
41 | *.autosave
42 |
43 | ## CMake Generated files
44 |
45 | Makefile.cmake
46 | CMakeOutput.log
47 | CMakeCache.txt
48 | CMakeFiles
49 | Makefile
50 | *.cmake
51 | !/cmake/*.cmake
52 | install_manifest.txt
53 | *.rule
54 |
55 | ## Generated config files
56 |
57 | /modules/engine/config.h
58 | /modules/demo/config.h
59 | /modules/ir_table/config.h
60 | /modules/kinect/config.h
61 |
62 | ## CMake Generated Tests directory
63 | /Testing/
64 |
65 |
66 | ## Eclipse Project Files
67 |
68 | .settings/
69 | *.project
70 | *.cproject
71 |
72 | ## Ignore Visual Studio project/solution files
73 |
74 | *.sln
75 | *.vcproj
76 | *.filters
77 | *.opensdf
78 | *.sdf
79 | *.vcxproj
80 |
81 | ## Ignore Visual Studio temporary files, build results, and
82 | ## files generated by popular Visual Studio add-ons.
83 |
84 | # User-specific files
85 | *.suo
86 | *.user
87 | *.userosscache
88 | *.sln.docstates
89 |
90 | # Build results
91 | [Dd]ebug/
92 | !/third_party/gtest/lib/vc12/Debug
93 | [Dd]ebugPublic/
94 | [Rr]elease/
95 | !/third_party/gtest/lib/vc12/Release
96 | [Rr]eleases/
97 | x64/
98 | x86/
99 | build/
100 | bld/
101 | [Bb]in/
102 | [Oo]bj/
103 |
104 | # Roslyn cache directories
105 | *.ide/
106 |
107 | # MSTest test Results
108 | [Tt]est[Rr]esult*/
109 | [Bb]uild[Ll]og.*
110 |
111 | #NUNIT
112 | *.VisualState.xml
113 | TestResult.xml
114 |
115 | # Build Results of an ATL Project
116 | /[Dd]ebugPS/
117 | /[Rr]eleasePS/
118 | dlldata.c
119 |
120 | *_i.c
121 | *_p.c
122 | *_i.h
123 | *.ilk
124 | *.meta
125 | *.obj
126 | *.pch
127 | *.pdb
128 | *.pgc
129 | *.pgd
130 | *.rsp
131 | *.sbr
132 | *.tlb
133 | *.tli
134 | *.tlh
135 | *.tmp
136 | *.tmp_proj
137 | *.log
138 | *.tlog
139 | *.vspscc
140 | *.vssscc
141 | .builds
142 | *.pidb
143 | *.svclog
144 | *.scc
145 | *.lastbuildstate
146 |
147 | # Chutzpah Test files
148 | _Chutzpah*
149 |
150 | # Visual C++ cache files
151 | ipch/
152 | *.aps
153 | *.ncb
154 | *.opensdf
155 | *.sdf
156 | *.cachefile
157 |
158 | # Visual Studio profiler
159 | *.psess
160 | *.vsp
161 | *.vspx
162 |
163 | # TFS 2012 Local Workspace
164 | $tf/
165 |
166 | # Guidance Automation Toolkit
167 | *.gpState
168 |
169 | # ReSharper is a .NET coding add-in
170 | _ReSharper*/
171 | *.[Rr]e[Ss]harper
172 | *.DotSettings.user
173 |
174 | # JustCode is a .NET coding addin-in
175 | .JustCode
176 |
177 | # TeamCity is a build add-in
178 | _TeamCity*
179 |
180 | # DotCover is a Code Coverage Tool
181 | *.dotCover
182 |
183 | # NCrunch
184 | _NCrunch_*
185 | .*crunch*.local.xml
186 |
187 | # MightyMoose
188 | *.mm.*
189 | AutoTest.Net/
190 |
191 | # Web workbench (sass)
192 | .sass-cache/
193 |
194 | # Installshield output folder
195 | [Ee]xpress/
196 |
197 | # DocProject is a documentation generator add-in
198 | DocProject/buildhelp/
199 | DocProject/Help/*.HxT
200 | DocProject/Help/*.HxC
201 | DocProject/Help/*.hhc
202 | DocProject/Help/*.hhk
203 | DocProject/Help/*.hhp
204 | DocProject/Help/Html2
205 | DocProject/Help/html
206 |
207 | # Click-Once directory
208 | publish/
209 |
210 | # Publish Web Output
211 | *.[Pp]ublish.xml
212 | *.azurePubxml
213 | # TODO: Comment the next line if you want to checkin your web deploy settings
214 | # but database connection strings (with potential passwords) will be unencrypted
215 | *.pubxml
216 | *.publishproj
217 |
218 | # NuGet Packages
219 | *.nupkg
220 | # The packages folder can be ignored because of Package Restore
221 | **/packages/*
222 | # except build/, which is used as an MSBuild target.
223 | !**/packages/build/
224 | # If using the old MSBuild-Integrated Package Restore, uncomment this:
225 | #!**/packages/repositories.config
226 |
227 | # Windows Azure Build Output
228 | csx/
229 | *.build.csdef
230 |
231 | # Windows Store app package directory
232 | AppPackages/
233 |
234 | #Class diagrams
235 | *.cd
236 |
237 | # Others
238 | sql/
239 | *.Cache
240 | ClientBin/
241 | [Ss]tyle[Cc]op.*
242 | ~$*
243 | *~
244 | *.dbmdl
245 | *.dbproj.schemaview
246 | *.pfx
247 | *.publishsettings
248 | node_modules/
249 | bower_components/
250 |
251 | # RIA/Silverlight projects
252 | Generated_Code/
253 |
254 | # Backup & report files from converting an old project file
255 | # to a newer Visual Studio version. Backup files are not needed,
256 | # because we have git ;-)
257 | _UpgradeReport_Files/
258 | Backup*/
259 | UpgradeLog*.XML
260 | UpgradeLog*.htm
261 |
262 | # SQL Server files
263 | *.mdf
264 | *.ldf
265 |
266 | # Business Intelligence projects
267 | *.rdl.data
268 | *.bim.layout
269 | *.bim_*.settings
270 |
271 | # Microsoft Fakes
272 | FakesAssemblies/
273 |
274 | # CLion project files and build folders
275 | .idea/
276 | cmake-build-debug/
277 | cmake-build-release/
278 |
279 | # Common CMake build folders
280 | build/
281 |
282 | # Python Cache
283 | __pycache__/
--------------------------------------------------------------------------------
/.settings/language.settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
2 | project("pbcvt")
3 |
4 | #----------------------------CMAKE & GLOBAL PROPERTIES-------------------------#
5 | list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
6 |
7 | ###============= C++11 support====================================
8 | if(${CMAKE_VERSION} VERSION_LESS "3.1")
9 | include(CheckCXXCompilerFlag)
10 | CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
11 | CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X)
12 | if (COMPILER_SUPPORTS_CXX11)
13 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
14 | elseif (COMPILER_SUPPORTS_CXX0X)
15 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
16 | else ()
17 | message(FATAL_ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
18 | endif ()
19 | else()
20 | set(CMAKE_CXX_STANDARD 11)
21 | set(CMAKE_CXX_STANDARD_REQUIRED ON)
22 | endif()
23 | #=================================================================
24 | # PYTHON option
25 |
26 | set(PYTHON_OPTIONS "2.X" "3.X")
27 | set(PYTHON_DESIRED_VERSION "3.X" CACHE STRING "Choose which python version to use, options are: ${PYTHON_OPTIONS}.")
28 | set_property(CACHE PYTHON_DESIRED_VERSION PROPERTY STRINGS ${PYTHON_OPTIONS})
29 |
30 | #=============== Find Packages ====================================
31 | ## OpenCV
32 | find_package(OpenCV COMPONENTS core REQUIRED)
33 |
34 | ## Python
35 | include("DetectPython")
36 |
37 | ## Boost
38 | if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
39 | # Provisions for typical Boost compiled on Windows
40 | # Most commonly, Boost libraries are compiled statically on windows (change as necesssary)
41 | set(Boost_USE_STATIC_LIBS TRUE)
42 | set(Boost_USE_STATIC_RUNTIME OFF)
43 | set(Boost_USE_MULTITHREADED ON)
44 | set(Boost_USE_DEBUG_PYTHON OFF)
45 | add_definitions(-DBOOST_PYTHON_STATIC_LIB)
46 | endif()
47 | if (${PYTHON_DESIRED_VERSION} STREQUAL "2.X")
48 | set(Python_ADDITIONAL_VERSIONS ${PYTHON2_VERSION_MAJOR}.${PYTHON2_VERSION_MINOR})
49 | message(STATUS "Trying Boost.Python component name, python${PYTHON2_VERSION_MAJOR}...")
50 | find_package(Boost COMPONENTS python${PYTHON2_VERSION_MAJOR} QUIET)
51 | if(NOT Boost_FOUND)
52 | message(STATUS "Trying alternative Boost.Python component name, python${PYTHON2_VERSION_MAJOR}${PYTHON2_VERSION_MINOR}...")
53 | find_package(Boost COMPONENTS python${PYTHON2_VERSION_MAJOR}${PYTHON2_VERSION_MINOR} QUIET)
54 | if(NOT Boost_FOUND)
55 | message(STATUS "Trying alternative Boost.Python component name, python-py${PYTHON2_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}...")
56 | find_package(Boost COMPONENTS python-py${PYTHON2_VERSION_MAJOR}${PYTHON2_VERSION_MINOR} REQUIRED)
57 | endif()
58 | endif()
59 | else ()
60 | set(Python_ADDITIONAL_VERSIONS ${PYTHON3_VERSION_MAJOR}.${PYTHON3_VERSION_MINOR})
61 | message(STATUS "Trying Boost.Python component name, python${PYTHON3_VERSION_MAJOR}...")
62 | find_package(Boost COMPONENTS python${PYTHON3_VERSION_MAJOR} QUIET)
63 | if(NOT Boost_FOUND)
64 | message(STATUS "Trying alternative Boost.Python component name, python${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}..")
65 | find_package(Boost COMPONENTS python${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR} QUIET)
66 | if(NOT Boost_FOUND)
67 | message(STATUS "Trying alternative Boost.Python component name, python-py${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}...")
68 | find_package(Boost COMPONENTS python-py${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR} REQUIRED)
69 | endif()
70 | endif()
71 | endif ()
72 |
73 |
74 |
75 | #========pick python stuff========================================
76 | if (${PYTHON_DESIRED_VERSION} STREQUAL "2.X")
77 | set(PYTHON_INCLUDE_DIRS ${PYTHON2_INCLUDE_DIR} ${PYTHON2_INCLUDE_DIR2} ${PYTHON2_NUMPY_INCLUDE_DIRS})
78 | set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON2_NUMPY_INCLUDE_DIRS})
79 | set(PYTHON_LIBRARIES ${PYTHON2_LIBRARY})
80 | set(PYTHON_EXECUTABLE ${PYTHON2_EXECUTABLE})
81 | set(PYTHON_PACKAGES_PATH ${PYTHON2_PACKAGES_PATH})
82 | set(ARCHIVE_OUTPUT_NAME pbcvt_py2)
83 | set(ARCHIVE_OUTPUT_SUFFIX _py2)
84 | else ()
85 | set(PYTHON_INCLUDE_DIRS ${PYTHON3_INCLUDE_DIR} ${PYTHON3_INCLUDE_DIR2} ${PYTHON3_NUMPY_INCLUDE_DIRS})
86 | set(PYTHON_NUMPY_INCLUDE_DIRS ${PYTHON3_NUMPY_INCLUDE_DIRS})
87 | set(PYTHON_LIBRARIES ${PYTHON3_LIBRARY})
88 | set(PYTHON_EXECUTABLE ${PYTHON3_EXECUTABLE})
89 | set(PYTHON_PACKAGES_PATH ${PYTHON3_PACKAGES_PATH})
90 | set(ARCHIVE_OUTPUT_NAME pbcvt_py3)
91 | set(ARCHIVE_OUTPUT_SUFFIX _py3)
92 | endif ()
93 |
94 | find_package_handle_standard_args(Python DEFAULT_MSG PYTHON_INCLUDE_DIRS PYTHON_NUMPY_INCLUDE_DIRS PYTHON_LIBRARIES PYTHON_EXECUTABLE PYTHON_PACKAGES_PATH)
95 | if(NOT Python_FOUND)
96 | message(SEND_ERROR "Not all requred components of Numpy/Python found.")
97 | endif()
98 |
99 | file(GLOB project_sources ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
100 |
101 |
102 | macro(pbcvt_add_pbcvt_library target_name STATIC)
103 | if(${STATIC})
104 | add_library(${target_name} STATIC ${project_sources} ${CMAKE_CURRENT_SOURCE_DIR}/include/pyboostcvconverter/pyboostcvconverter.hpp)
105 | else()
106 | add_library(${target_name} SHARED ${project_sources} ${CMAKE_CURRENT_SOURCE_DIR}/include/pyboostcvconverter/pyboostcvconverter.hpp)
107 | endif()
108 | target_include_directories(${target_name} PUBLIC
109 | "${CMAKE_CURRENT_SOURCE_DIR}/include"
110 | ${Boost_INCLUDE_DIRS}
111 | ${OpenCV_INCLUDE_DIRS}
112 | ${PYTHON_INCLUDE_DIRS}
113 | )
114 |
115 | target_link_libraries(${target_name}
116 | ${Boost_LIBRARIES}
117 | ${OpenCV_LIBRARIES}
118 | ${PYTHON_LIBRARIES}
119 | )
120 |
121 | if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
122 | # Provisions for typical Boost compiled on Windows
123 | # Unless some extra compile options are used on Windows, the libraries won't have prefixes (change as necesssary)
124 | target_compile_definitions(${target_name} PUBLIC -DBOOST_ALL_NO_LIB -DBOOST_SYSTEM_NO_DEPRECATED)
125 | endif()
126 | endmacro()
127 |
128 | pbcvt_add_pbcvt_library(${PROJECT_NAME} OFF)
129 |
130 | #--------------------------- INSTALLATION -----------------------------------------------------
131 | #-get proper extension for python binary shared object on this platform
132 |
133 | set(__python_ext_suffix_var "EXT_SUFFIX")
134 | if(${PYTHON_DESIRED_VERSION} STREQUAL "2.X")
135 | set(__python_ext_suffix_var "SO")
136 | endif()
137 |
138 | execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('${__python_ext_suffix_var}'))"
139 | RESULT_VARIABLE PYTHON_${PROJECT_NAME}_PY_PROCESS
140 | OUTPUT_VARIABLE ${PROJECT_NAME}_PY_SUFFIX
141 | OUTPUT_STRIP_TRAILING_WHITESPACE)
142 | if(NOT ${PYTHON_${PROJECT_NAME}_PY_PROCESS} EQUAL 0)
143 | set(${PROJECT_NAME}_PY_SUFFIX ".so")
144 | endif()
145 |
146 |
147 | set_target_properties(${PROJECT_NAME} PROPERTIES
148 | ARCHIVE_OUTPUT_NAME ${ARCHIVE_OUTPUT_NAME} # prevent name conflict for python2/3 outputs
149 | PREFIX ""
150 | OUTPUT_NAME pbcvt
151 | SUFFIX ${${PROJECT_NAME}_PY_SUFFIX})
152 |
153 |
154 |
155 | if (MSVC AND NOT PYTHON_DEBUG_LIBRARIES)
156 | set(PYTHON_INSTALL_CONFIGURATIONS CONFIGURATIONS Release)
157 | else ()
158 | set(PYTHON_INSTALL_CONFIGURATIONS "")
159 | endif ()
160 |
161 | if (WIN32)
162 | set(PYTHON_INSTALL_ARCHIVE "")
163 | else ()
164 | set(PYTHON_INSTALL_ARCHIVE ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT python)
165 | endif ()
166 |
167 | install(TARGETS ${PROJECT_NAME}
168 | ${PYTHON_INSTALL_CONFIGURATIONS}
169 | RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT python
170 | LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT python
171 | ${PYTHON_INSTALL_ARCHIVE}
172 | )
173 |
174 | #--------------------------- TEST PROJECT -----------------------------------------------------
175 | # Test Project option
176 | option(BUILD_TEST_PROJECT ON)
177 |
178 |
179 | if (BUILD_TEST_PROJECT)
180 | set(CMAKE_POSITION_INDEPENDENT_CODE ON)
181 | pbcvt_add_pbcvt_library(static_pbcvt ON)
182 | add_subdirectory(tests)
183 | endif()
184 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2018 Gregory Kramida
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # CMAKE generated file: DO NOT EDIT!
2 | # Generated by "Unix Makefiles" Generator, CMake Version 3.5
3 |
4 | # Default target executed when no arguments are given to make.
5 | default_target: all
6 |
7 | .PHONY : default_target
8 |
9 | # Allow only one "make -f Makefile2" at a time, but pass parallelism.
10 | .NOTPARALLEL:
11 |
12 |
13 | #=============================================================================
14 | # Special targets provided by cmake.
15 |
16 | # Disable implicit rules so canonical targets will work.
17 | .SUFFIXES:
18 |
19 |
20 | # Remove some rules from gmake that .SUFFIXES does not remove.
21 | SUFFIXES =
22 |
23 | .SUFFIXES: .hpux_make_needs_suffix_list
24 |
25 |
26 | # Suppress display of executed commands.
27 | $(VERBOSE).SILENT:
28 |
29 |
30 | # A target that is always out of date.
31 | cmake_force:
32 |
33 | .PHONY : cmake_force
34 |
35 | #=============================================================================
36 | # Set environment variables for the build.
37 |
38 | # The shell in which to execute make rules.
39 | SHELL = /bin/sh
40 |
41 | # The CMake executable.
42 | CMAKE_COMMAND = /usr/bin/cmake
43 |
44 | # The command to remove a file.
45 | RM = /usr/bin/cmake -E remove -f
46 |
47 | # Escaping for special characters.
48 | EQUALS = =
49 |
50 | # The top-level source directory on which CMake was run.
51 | CMAKE_SOURCE_DIR = /home/algomorph/Factory/pyboostcvconverter
52 |
53 | # The top-level build directory on which CMake was run.
54 | CMAKE_BINARY_DIR = /home/algomorph/Factory/pyboostcvconverter
55 |
56 | #=============================================================================
57 | # Targets provided globally by CMake.
58 |
59 | # Special rule for the target install
60 | install: preinstall
61 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Install the project..."
62 | /usr/bin/cmake -P cmake_install.cmake
63 | .PHONY : install
64 |
65 | # Special rule for the target install
66 | install/fast: preinstall/fast
67 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Install the project..."
68 | /usr/bin/cmake -P cmake_install.cmake
69 | .PHONY : install/fast
70 |
71 | # Special rule for the target list_install_components
72 | list_install_components:
73 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Available install components are: \"python\""
74 | .PHONY : list_install_components
75 |
76 | # Special rule for the target list_install_components
77 | list_install_components/fast: list_install_components
78 |
79 | .PHONY : list_install_components/fast
80 |
81 | # Special rule for the target rebuild_cache
82 | rebuild_cache:
83 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..."
84 | /usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)
85 | .PHONY : rebuild_cache
86 |
87 | # Special rule for the target rebuild_cache
88 | rebuild_cache/fast: rebuild_cache
89 |
90 | .PHONY : rebuild_cache/fast
91 |
92 | # Special rule for the target install/strip
93 | install/strip: preinstall
94 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Installing the project stripped..."
95 | /usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake
96 | .PHONY : install/strip
97 |
98 | # Special rule for the target install/strip
99 | install/strip/fast: install/strip
100 |
101 | .PHONY : install/strip/fast
102 |
103 | # Special rule for the target install/local
104 | install/local: preinstall
105 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Installing only the local directory..."
106 | /usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake
107 | .PHONY : install/local
108 |
109 | # Special rule for the target install/local
110 | install/local/fast: install/local
111 |
112 | .PHONY : install/local/fast
113 |
114 | # Special rule for the target edit_cache
115 | edit_cache:
116 | @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake cache editor..."
117 | /usr/bin/cmake-gui -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR)
118 | .PHONY : edit_cache
119 |
120 | # Special rule for the target edit_cache
121 | edit_cache/fast: edit_cache
122 |
123 | .PHONY : edit_cache/fast
124 |
125 | # The main all target
126 | all: cmake_check_build_system
127 | $(CMAKE_COMMAND) -E cmake_progress_start /home/algomorph/Factory/pyboostcvconverter/CMakeFiles /home/algomorph/Factory/pyboostcvconverter/CMakeFiles/progress.marks
128 | $(MAKE) -f CMakeFiles/Makefile2 all
129 | $(CMAKE_COMMAND) -E cmake_progress_start /home/algomorph/Factory/pyboostcvconverter/CMakeFiles 0
130 | .PHONY : all
131 |
132 | # The main clean target
133 | clean:
134 | $(MAKE) -f CMakeFiles/Makefile2 clean
135 | .PHONY : clean
136 |
137 | # The main clean target
138 | clean/fast: clean
139 |
140 | .PHONY : clean/fast
141 |
142 | # Prepare targets for installation.
143 | preinstall: all
144 | $(MAKE) -f CMakeFiles/Makefile2 preinstall
145 | .PHONY : preinstall
146 |
147 | # Prepare targets for installation.
148 | preinstall/fast:
149 | $(MAKE) -f CMakeFiles/Makefile2 preinstall
150 | .PHONY : preinstall/fast
151 |
152 | # clear depends
153 | depend:
154 | $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1
155 | .PHONY : depend
156 |
157 | #=============================================================================
158 | # Target rules for targets named pbcvt
159 |
160 | # Build rule for target.
161 | pbcvt: cmake_check_build_system
162 | $(MAKE) -f CMakeFiles/Makefile2 pbcvt
163 | .PHONY : pbcvt
164 |
165 | # fast build rule for target.
166 | pbcvt/fast:
167 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/build
168 | .PHONY : pbcvt/fast
169 |
170 | src/pyboost_cv2_converter.o: src/pyboost_cv2_converter.cpp.o
171 |
172 | .PHONY : src/pyboost_cv2_converter.o
173 |
174 | # target to build an object file
175 | src/pyboost_cv2_converter.cpp.o:
176 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv2_converter.cpp.o
177 | .PHONY : src/pyboost_cv2_converter.cpp.o
178 |
179 | src/pyboost_cv2_converter.i: src/pyboost_cv2_converter.cpp.i
180 |
181 | .PHONY : src/pyboost_cv2_converter.i
182 |
183 | # target to preprocess a source file
184 | src/pyboost_cv2_converter.cpp.i:
185 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv2_converter.cpp.i
186 | .PHONY : src/pyboost_cv2_converter.cpp.i
187 |
188 | src/pyboost_cv2_converter.s: src/pyboost_cv2_converter.cpp.s
189 |
190 | .PHONY : src/pyboost_cv2_converter.s
191 |
192 | # target to generate assembly for a file
193 | src/pyboost_cv2_converter.cpp.s:
194 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv2_converter.cpp.s
195 | .PHONY : src/pyboost_cv2_converter.cpp.s
196 |
197 | src/pyboost_cv3_converter.o: src/pyboost_cv3_converter.cpp.o
198 |
199 | .PHONY : src/pyboost_cv3_converter.o
200 |
201 | # target to build an object file
202 | src/pyboost_cv3_converter.cpp.o:
203 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv3_converter.cpp.o
204 | .PHONY : src/pyboost_cv3_converter.cpp.o
205 |
206 | src/pyboost_cv3_converter.i: src/pyboost_cv3_converter.cpp.i
207 |
208 | .PHONY : src/pyboost_cv3_converter.i
209 |
210 | # target to preprocess a source file
211 | src/pyboost_cv3_converter.cpp.i:
212 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv3_converter.cpp.i
213 | .PHONY : src/pyboost_cv3_converter.cpp.i
214 |
215 | src/pyboost_cv3_converter.s: src/pyboost_cv3_converter.cpp.s
216 |
217 | .PHONY : src/pyboost_cv3_converter.s
218 |
219 | # target to generate assembly for a file
220 | src/pyboost_cv3_converter.cpp.s:
221 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/pyboost_cv3_converter.cpp.s
222 | .PHONY : src/pyboost_cv3_converter.cpp.s
223 |
224 | src/python_module.o: src/python_module.cpp.o
225 |
226 | .PHONY : src/python_module.o
227 |
228 | # target to build an object file
229 | src/python_module.cpp.o:
230 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/python_module.cpp.o
231 | .PHONY : src/python_module.cpp.o
232 |
233 | src/python_module.i: src/python_module.cpp.i
234 |
235 | .PHONY : src/python_module.i
236 |
237 | # target to preprocess a source file
238 | src/python_module.cpp.i:
239 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/python_module.cpp.i
240 | .PHONY : src/python_module.cpp.i
241 |
242 | src/python_module.s: src/python_module.cpp.s
243 |
244 | .PHONY : src/python_module.s
245 |
246 | # target to generate assembly for a file
247 | src/python_module.cpp.s:
248 | $(MAKE) -f CMakeFiles/pbcvt.dir/build.make CMakeFiles/pbcvt.dir/src/python_module.cpp.s
249 | .PHONY : src/python_module.cpp.s
250 |
251 | # Help Target
252 | help:
253 | @echo "The following are some of the valid targets for this Makefile:"
254 | @echo "... all (the default if no target is provided)"
255 | @echo "... clean"
256 | @echo "... depend"
257 | @echo "... install"
258 | @echo "... list_install_components"
259 | @echo "... rebuild_cache"
260 | @echo "... pbcvt"
261 | @echo "... install/strip"
262 | @echo "... install/local"
263 | @echo "... edit_cache"
264 | @echo "... src/pyboost_cv2_converter.o"
265 | @echo "... src/pyboost_cv2_converter.i"
266 | @echo "... src/pyboost_cv2_converter.s"
267 | @echo "... src/pyboost_cv3_converter.o"
268 | @echo "... src/pyboost_cv3_converter.i"
269 | @echo "... src/pyboost_cv3_converter.s"
270 | @echo "... src/python_module.o"
271 | @echo "... src/python_module.i"
272 | @echo "... src/python_module.s"
273 | .PHONY : help
274 |
275 |
276 |
277 | #=============================================================================
278 | # Special targets to cleanup operation of make.
279 |
280 | # Special rule to run CMake to check the build system integrity.
281 | # No rule that depends on this can have commands that come from listfiles
282 | # because they might be regenerated.
283 | cmake_check_build_system:
284 | $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0
285 | .PHONY : cmake_check_build_system
286 |
287 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | PBCVT (Python-Boost-OpenCV Converter)
2 | ==================
3 |
4 | [](https://ci.appveyor.com/project/Algomorph/pyboostcvconverter)
5 |
6 |
7 | This is minimalist C++ sample code for porting C++ functions/classes using OpenCV Mat as arguments directly (w/o explicit conversions) to python. It was originally inspired by [code by Yati Sagade](https://github.com/yati-sagade/blog-content/blob/master/content/numpy-boost-python-opencv.rst).
8 |
9 | It is mainly intended to be used as boilerplate code for your own C++ libraries which use OpenCV and which you'd like to call from Python code or a Python shell. ~Directly linking the generated library to other C++ code statically or dynamically is _not_ supported / has _not_ been tested.~ The provided CMake now _also_ demonstrates how to build another library and link a static version of the PBCVT. To do so, pass `-DBUILD_TEST_PROJECT=ON` to cmake or enable the corresponing entry in cmake-gui. For details, see **Usage** below.
10 |
11 | Compatibility
12 | -----------------
13 | This code is compatible with OpenCV 2.X, 3.X, and 4.X.
14 | This code supports Python 2.7 and Python 3.X. => You can pick one by passing `-DPYTHON_DESIRED_VERSION=3.X` or `=2.X` to cmake.
15 |
16 | Disclaimer
17 | -----------------
18 | Certain things in the code might be excessive/unneeded, so if you know something is not needed, please make a pull request with an update. Also, conversion errors aren't handled politically correct (i.e. just generates an empty matrix), please let me know if that bothers you or you'd like to fix that.
19 | The code has been tested for memory leaks. If you still find any errors, let me know by positing an issue!
20 |
21 | NOTE: PLEASE DO NOT TRY TO USE THE PYTHON API FROM WITHIN A C++ EXECUTABLE AND THEN MAKE AN ISSUE ABOUT IT NOT WORKING. (The all-caps is meant to be an attention grabber, because many people have done this in the past). That is _not_ what this code is for. This code is for using C++ libraries that employ OpenCV from a Python Script / shell.
22 |
23 | Compiling & Trying Out Sample Code
24 | ----------------------
25 | 1. Install CMake and/or CMake-gui (http://www.cmake.org/download/, ```sudo apt-get install cmake cmake-gui``` on Ubuntu/Debian)
26 | 2. Run CMake and/or CMake-gui with the git repository as the source and a build folder of your choice (in-source builds supported.) Choose desired generator, configure, and generate. Remember to set PYTHON_DESIRED_VERSION to 2.X for python 2 and 3.X for python 3.
27 | 3. Build (run the appropriate command ```make``` or ```ninja``` depending on your generator, or issue "Build All" on Windows+MSVC)
28 | 4. On *nix systems, ```make install``` run with root privileges will install the compiled library file. Alternatively, you can manually copy it to the pythonXX/dist-packages directory (replace XX with desired python version). On Windows+MSVC, build the INSTALL project.
29 | 5. Run python interpreter of your choice, issue the following commands:
30 | ```python
31 | import numpy
32 | import pbcvt # your module, also the name of your compiled dynamic library file w/o the extension
33 |
34 | a = numpy.array([[1., 2., 3.]])
35 | b = numpy.array([[1.],
36 | [2.],
37 | [3.]])
38 | print(pbcvt.dot(a, b)) # should print [[14.]]
39 | print(pbcvt.dot2(a, b)) # should also print [[14.]]
40 | ```
41 |
42 | Usage
43 | ----------------
44 | To use the project as boilerplate, the header and the source files need to be directly included in your project. Use the provided CMake as an example to properly detect your & link python, numpy, and boost, as well as make a proper install target for your project. Use the python_module.cpp for an example of how to organize your own module. All repository sources may serve well as project boilerplate.
45 |
46 | To learn how to _link_ your code to the static version of PBCVT, assuming you have built it successfully with `-DBUILD_TEST_PROJECT=ON`, check the code inside `/tests/test_project_source.cpp` and the corresponding `/tests/CMakeLists.txt`. Note: the OpenCV-style installation CMake code is provided for the dynamic `pbcvt` library in the root `CMakeLists.txt` only, not the "test_project" library, but it can easily be adapted for your custom project.
47 |
48 | **Windows users: please see note after the examples below.** **Troubleshooting CMake issues for older boost: also see note at the end.**
49 |
50 | Here is (some of the) C++ code in the sample pbcvt.so module (python_module.cpp):
51 |
52 | ```c++
53 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
54 |
55 | #include
56 | #include
57 |
58 | namespace pbcvt {
59 |
60 | using namespace boost::python;
61 |
62 | /**
63 | * Example function. Basic inner matrix product using explicit matrix conversion.
64 | * @param left left-hand matrix operand (NdArray required)
65 | * @param right right-hand matrix operand (NdArray required)
66 | * @return an NdArray representing the dot-product of the left and right operands
67 | */
68 | PyObject *dot(PyObject *left, PyObject *right) {
69 |
70 | cv::Mat leftMat, rightMat;
71 | leftMat = pbcvt::fromNDArrayToMat(left);
72 | rightMat = pbcvt::fromNDArrayToMat(right);
73 | auto c1 = leftMat.cols, r2 = rightMat.rows;
74 | // Check that the 2-D matrices can be legally multiplied.
75 | if (c1 != r2) {
76 | PyErr_SetString(PyExc_TypeError,
77 | "Incompatible sizes for matrix multiplication.");
78 | throw_error_already_set();
79 | }
80 | cv::Mat result = leftMat * rightMat;
81 | PyObject *ret = pbcvt::fromMatToNDArray(result);
82 | return ret;
83 | }
84 |
85 | //This example uses Mat directly, but we won't need to worry about the conversion
86 | /**
87 | * Example function. Basic inner matrix product using implicit matrix conversion.
88 | * @param leftMat left-hand matrix operand
89 | * @param rightMat right-hand matrix operand
90 | * @return an NdArray representing the dot-product of the left and right operands
91 | */
92 | cv::Mat dot2(cv::Mat leftMat, cv::Mat rightMat) {
93 | auto c1 = leftMat.cols, r2 = rightMat.rows;
94 | if (c1 != r2) {
95 | PyErr_SetString(PyExc_TypeError,
96 | "Incompatible sizes for matrix multiplication.");
97 | throw_error_already_set();
98 | }
99 | cv::Mat result = leftMat * rightMat;
100 |
101 | return result;
102 | }
103 |
104 |
105 | #if (PY_VERSION_HEX >= 0x03000000)
106 |
107 | static void *init_ar() {
108 | #else
109 | static void init_ar(){
110 | #endif
111 | Py_Initialize();
112 |
113 | import_array();
114 | return NUMPY_IMPORT_ARRAY_RETVAL;
115 | }
116 |
117 | BOOST_PYTHON_MODULE (pbcvt) {
118 | //using namespace XM;
119 | init_ar();
120 |
121 | //initialize converters
122 | to_python_converter();
124 | pbcvt::matFromNDArrayBoostConverter();
125 |
126 | //expose module-level functions
127 | def("dot", dot);
128 | def("dot2", dot2);
129 |
130 | }
131 |
132 | } //end namespace pbcvt
133 | ```
134 | Notes for Windows Usage / Known Problems / Troubleshooting
135 | ----------------
136 | When building on windows, please make sure to go over the following checklist.
137 | - You have environment variable OpenCV_DIR set to the location of OpenCVModules.cmake file, e.g. ```C:\opencv\build\x64\vc15\lib``` in order for CMake to find OpenCV right away. "vc15" corresponds to the VisualStudio 2017, "vc14" to VS 2015, choose the one that matches your version.
138 | - You have the directory containing opencv_world\.dll, e.g. C:\opencv\build\x64\vc15\bin, in your Path environment variable.
139 | - You have boost properly built or downloaded as *static libraries* with *static runtime off*. A dynamic build would produce binaries such as boost_python37-vc141-mt-x64-1_68.lib and boost_python37-vc141-mt-x64-1_68.dll (**not what you need, notice the absense of the 'lib' prefix**), a static build with static runtime would produce files such as libboost_python37-vc141-mt-s-x64-1_68.lib (**not what you need, notice the 's' suffix**). What you need are files in the form: "libboost_python37-vc141-mt-x64-1_68.lib". If you're building boost from scratch, after running `bootstrap`, this command worked for me with Boost 1.68, 64-bit, Visual Studio 2017: ```b2 toolset=msvc-14.1 release debug runtime-link=shared link=static --build-type=complete --abbreviate-paths architecture=x86 address-model=64 install -j4 --prefix=\```.
140 | - Your boost installation directory is structured as follows: `\` which contains "lib" and "include" folders inside it. For older versions of Boost, e.g. 1.68 or 1.69, the path that you choose for `BOOST_ROOT` should *also* be in your environment variables. For some versions of CMake/Boost, the include directory should have a "boost" subdirectory with all the headers, *not boost-1_68/boost* as is done by the build automatically, and also (if my memory serves me right) you might need to define `Boost_DIR` instead of `BOOST_ROOT` (try this if all else fails). For 1.71 and above, Boost_DIR should be set to the location of BoostConfig.cmake, i.e. `\\lib\cmake\Boost-1.76.0`, while "include" folder no longer has to be restructured.
141 | - The memory address model / architecture (64 bit vs 32 bit, "x86_64" vs "x86") for all your binaries agree, i.e. your python installation needs to be a 64-bit one if your boost libraries have the "x64" suffix, likewise for your OpenCV, and finally for your choice of generator (i.e. Visual Studio ... Win64 for 64-bit) in CMake.
142 |
143 | **Troubleshooting note on python37_d.lib**: I am still at war with Windows on having the Debug configuration done 100% correctly. You might still need it for such cases as, for instance, you have C++ unit tests which test your library and you want to debug through the unit test case. It *is* possible to do it right now, but there is an issue that sometimes requires a work-around. I got it to work by (1) installing the debug version of Python through the official installer and (2) manually linking to the non-debug library in the debug project configuration within MSVC after the CMake generation.
144 |
145 | **Friendly reminder**: don't forget to build the INSTALL project in MSVC before trying to import your library in python.
146 |
147 | Credits
148 | ----------------
149 | Original code was inspired by [Yati Sagade's example](https://github.com/yati-sagade/blog-content/blob/master/content/numpy-boost-python-opencv.rst).
150 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | # Build worker image (VM template)
2 | image:
3 | - Ubuntu1604
4 | - Ubuntu1804
5 | - Ubuntu2004
6 | - Visual Studio 2017
7 | - Visual Studio 2019
8 |
9 |
10 | platform: x64
11 |
12 | configuration: Release
13 |
14 | for:
15 | -
16 | matrix:
17 | only:
18 | - image: Ubuntu1604
19 | clone_folder: ~/pbcvt
20 | init:
21 | - sh: export REQUESTED_PYTHON_VERSION=2.X
22 | test_script:
23 | - sh: cd $APPVEYOR_BUILD_FOLDER/tests
24 | - python -m pytest tests.py
25 | -
26 | matrix:
27 | only:
28 | - image: Ubuntu1804
29 | clone_folder: ~/pbcvt
30 | init:
31 | - sh: export REQUESTED_PYTHON_VERSION=3.X
32 | test_script:
33 | - sh: cd $APPVEYOR_BUILD_FOLDER/tests
34 | - python3 -m pytest tests.py
35 | -
36 | matrix:
37 | only:
38 | - image: Ubuntu2004
39 | clone_folder: ~/pbcvt
40 | init:
41 | - sh: export REQUESTED_PYTHON_VERSION=3.X
42 | test_script:
43 | - sh: cd $APPVEYOR_BUILD_FOLDER/tests
44 | - python3 -m pytest tests.py
45 | -
46 | matrix:
47 | only:
48 | - image: Visual Studio 2017
49 | clone_folder: c:\projects\pbcvt
50 | test_script:
51 | - cmd: cd %APPVEYOR_BUILD_FOLDER%/tests
52 | - python -m pytest tests.py
53 |
54 | -
55 | matrix:
56 | only:
57 | - image: Visual Studio 2019
58 | clone_folder: c:\projects\pbcvt
59 | test_script:
60 | - cmd: cd %APPVEYOR_BUILD_FOLDER%/tests
61 | - python -m pytest tests.py
62 |
63 | init:
64 | # Windows 10
65 | # skip unsupported combinations
66 | - cmd: set REQUESTED_PYTHON_VERSION=3.X
67 | - cmd: set arch=
68 | - cmd: ECHO on
69 | - cmd: if "%PLATFORM%"=="x64" ( set arch=Win64)
70 | - cmd: echo %arch%
71 | - cmd: echo %APPVEYOR_BUILD_WORKER_IMAGE%
72 | - cmd: set VC2019ARCH=
73 | # VS 2019
74 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( set VC2019ARCH=-A "x64" )
75 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( set CMAKE_GENERATOR="Visual Studio 16 2019" )
76 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvars64.bat" )
77 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( set BOOST_ROOT2="C:\projects\boost_1_73")
78 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( set Boost_DIR="%BOOST_ROOT2%\lib\cmake\Boost-1.73.0" )
79 | # VS 2017
80 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" ( set CMAKE_GENERATOR="Visual Studio 15 2017 %arch%" )
81 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" ( call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat" )
82 | # Diagnostics
83 | - cmd: echo %CMAKE_GENERATOR%
84 | - cmd: cmake --version
85 | - cmd: msbuild /version
86 |
87 |
88 | # scripts that run after cloning repository
89 | install:
90 | # Windows
91 | # VS 2019
92 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( mkdir %BOOST_ROOT2% )
93 | - cmd: "if \"%APPVEYOR_BUILD_WORKER_IMAGE%\"==\"Visual Studio 2019\" ( echo using python : 3.7 : C:\\\\Python37-x64\\\\python.exe ; > %BOOST_ROOT2%\\user-config.jam )"
94 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( cd "C:\Libraries\boost_1_73_0" && bootstrap )
95 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( cd "C:\Libraries\boost_1_73_0" && b2 --user-config=%BOOST_ROOT2%\\user-config.jam python=3.7 toolset=msvc-14.2 release debug runtime-link=shared link=static --build-type=complete --abbreviate-paths architecture=x86 address-model=64 --with-python install -j4 -d0 --prefix=%BOOST_ROOT2%)
96 | # All Windows Images
97 | - cmd: choco install -y opencv
98 | - cmd: set PATH=C:\tools\opencv\build\bin;C:\tools\opencv\build\x64\vc15\bin;C:\Python37-x64;C:\Python37-x64\Scripts;%PATH%
99 | - cmd: set OpenCV_DIR=C:\tools\opencv\build\x64\vc15\lib
100 | # install numpy & pytest
101 | - cmd: pip install numpy pytest
102 | # Ubuntu (all)
103 | - sh: sudo DEBIAN_FRONTEND=noninteractive apt-get update -qq && sudo apt-get install -qq
104 | - sh: sudo DEBIAN_FRONTEND=noninteractive apt-get -y install python3-dev python3-numpy python3-pytest
105 | - sh: sudo DEBIAN_FRONTEND=noninteractive apt-get -y install python-dev python-numpy python-pytest
106 | - sh: sudo DEBIAN_FRONTEND=noninteractive apt-get -y install libboost-python-dev
107 | - sh: sudo DEBIAN_FRONTEND=noninteractive apt-get -y install libtbb-dev libopencv-dev
108 |
109 | build_script:
110 | # Windows 10
111 | - cmd: cd %APPVEYOR_BUILD_FOLDER%
112 | - cmd: mkdir build
113 | - cmd: cd build
114 | - cmd: cmake --version
115 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" ( cmake .. -G %CMAKE_GENERATOR% -DCMAKE_BUILD_TYPE=%Configuration% -DBUILD_TEST_PROJECT=ON -DPYTHON_DESIRED_VERSION=%REQUESTED_PYTHON_VERSION% -DBOOST_ROOT=C:\Libraries\boost_1_69_0 )
116 | - cmd: if "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2019" ( cmake .. -G %CMAKE_GENERATOR% %VC2019ARCH% -DCMAKE_BUILD_TYPE=%Configuration% -DBUILD_TEST_PROJECT=ON -DPYTHON_DESIRED_VERSION=%REQUESTED_PYTHON_VERSION% -DBoost_DIR=C:\projects\boost_1_73\lib\cmake\Boost-1.73.0 )
117 | - cmd: cmake --build . --target ALL_BUILD --config %Configuration% -- /maxcpucount:4
118 | # Ubuntu (all)
119 | - sh: echo Ubuntu build script
120 | - sh: echo Requested python version -
121 | - sh: echo $REQUESTED_PYTHON_VERSION
122 | - sh: mkdir build; cd build
123 | - sh: cmake -DCMAKE_BUILD_TYPE=$Configuration -DBUILD_TEST_PROJECT=ON -DPYTHON_DESIRED_VERSION=$REQUESTED_PYTHON_VERSION ..
124 | - sh: make -j4
125 |
126 |
127 |
128 |
129 | #build:
130 | # project: c:\projects\pbcvt\build\pbcvt.sln
131 | # verbosity: minimal
132 | # parallel: true
133 |
--------------------------------------------------------------------------------
/cmake/DetectPython.cmake:
--------------------------------------------------------------------------------
1 | # Search packages for host system instead of packages for target system
2 | # in case of cross compilation thess macro should be defined by toolchain file
3 | if(NOT COMMAND find_host_package)
4 | macro(find_host_package)
5 | find_package(${ARGN})
6 | endmacro()
7 | endif()
8 | if(NOT COMMAND find_host_program)
9 | macro(find_host_program)
10 | find_program(${ARGN})
11 | endmacro()
12 | endif()
13 |
14 | macro(check_environment_variables)
15 | foreach(_var ${ARGN})
16 | if(NOT DEFINED ${_var} AND DEFINED ENV{${_var}})
17 | set(__value "$ENV{${_var}}")
18 | file(TO_CMAKE_PATH "${__value}" __value) # Assume that we receive paths
19 | set(${_var} "${__value}")
20 | message(STATUS "Update variable ${_var} from environment: ${${_var}}")
21 | endif()
22 | endforeach()
23 | endmacro()
24 |
25 | # clears all passed variables
26 | macro(clear_vars)
27 | foreach(_var ${ARGN})
28 | unset(${_var})
29 | unset(${_var} CACHE)
30 | endforeach()
31 | endmacro()
32 |
33 | # Find specified Python version
34 | # Arguments:
35 | # preferred_version (value): Version to check for first
36 | # min_version (value): Minimum supported version
37 | # library_env (value): Name of Python library ENV variable to check
38 | # include_dir_env (value): Name of Python include directory ENV variable to check
39 | # found (variable): Set if interpreter found
40 | # executable (variable): Output of executable found
41 | # version_string (variable): Output of found version
42 | # version_major (variable): Output of found major version
43 | # version_minor (variable): Output of found minor version
44 | # libs_found (variable): Set if libs found
45 | # libs_version_string (variable): Output of found libs version
46 | # libraries (variable): Output of found Python libraries
47 | # library (variable): Output of found Python library
48 | # debug_libraries (variable): Output of found Python debug libraries
49 | # debug_library (variable): Output of found Python debug library
50 | # include_path (variable): Output of found Python include path
51 | # include_dir (variable): Output of found Python include dir
52 | # include_dir2 (variable): Output of found Python include dir2
53 | # packages_path (variable): Output of found Python packages path
54 | # numpy_include_dirs (variable): Output of found Python Numpy include dirs
55 | # numpy_version (variable): Output of found Python Numpy version
56 | function(find_python preferred_version min_version library_env include_dir_env
57 | found executable version_string version_major version_minor
58 | libs_found libs_version_string libraries library debug_libraries
59 | debug_library include_path include_dir include_dir2 packages_path
60 | numpy_include_dirs numpy_version)
61 | if(NOT ${found})
62 | # if(" ${executable}" STREQUAL " PYTHON_EXECUTABLE")
63 | # set(__update_python_vars 0)
64 | # else()
65 | set(__update_python_vars 1)
66 | # endif()
67 |
68 | check_environment_variables(${executable})
69 | if(${executable})
70 | set(PYTHON_EXECUTABLE "${${executable}}")
71 | endif()
72 |
73 | if(WIN32 AND NOT ${executable} AND OPENCV_PYTHON_PREFER_WIN32_REGISTRY) # deprecated
74 | # search for executable with the same bitness as resulting binaries
75 | # standard FindPythonInterp always prefers executable from system path
76 | # this is really important because we are using the interpreter for numpy search and for choosing the install location
77 | foreach(_CURRENT_VERSION ${Python_ADDITIONAL_VERSIONS} "${preferred_version}" "${min_version}")
78 | find_host_program(PYTHON_EXECUTABLE
79 | NAMES python${_CURRENT_VERSION} python
80 | PATHS
81 | [HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Python\\\\PythonCore\\\\${_CURRENT_VERSION}\\\\InstallPath]
82 | [HKEY_CURRENT_USER\\\\SOFTWARE\\\\Python\\\\PythonCore\\\\${_CURRENT_VERSION}\\\\InstallPath]
83 | NO_SYSTEM_ENVIRONMENT_PATH
84 | )
85 | endforeach()
86 | endif()
87 |
88 | if(preferred_version)
89 | set(__python_package_version "${preferred_version} EXACT")
90 | find_host_package(PythonInterp "${preferred_version}" EXACT)
91 | if(NOT PYTHONINTERP_FOUND)
92 | message(STATUS "Python is not found: ${preferred_version} EXACT")
93 | endif()
94 | elseif(min_version)
95 | set(__python_package_version "${min_version}")
96 | find_host_package(PythonInterp "${min_version}")
97 | else()
98 | set(__python_package_version "")
99 | find_host_package(PythonInterp)
100 | endif()
101 |
102 | string(REGEX MATCH "^[0-9]+" _python_version_major "${min_version}")
103 |
104 | if(PYTHONINTERP_FOUND)
105 | # Check if python major version is correct
106 | if(" ${_python_version_major}" STREQUAL " ")
107 | set(_python_version_major "${PYTHON_VERSION_MAJOR}")
108 | endif()
109 | if(NOT "${_python_version_major}" STREQUAL "${PYTHON_VERSION_MAJOR}"
110 | AND NOT DEFINED ${executable}
111 | )
112 | if(NOT OPENCV_SKIP_PYTHON_WARNING)
113 | message(WARNING "CMake's 'find_host_package(PythonInterp ${__python_package_version})' found wrong Python version:\n"
114 | "PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}\n"
115 | "PYTHON_VERSION_STRING=${PYTHON_VERSION_STRING}\n"
116 | "Consider specify '${executable}' variable via CMake command line or environment variables\n")
117 | endif()
118 | clear_vars(PYTHONINTERP_FOUND PYTHON_EXECUTABLE PYTHON_VERSION_STRING PYTHON_VERSION_MAJOR PYTHON_VERSION_MINOR PYTHON_VERSION_PATCH)
119 | if(NOT CMAKE_VERSION VERSION_LESS "3.12")
120 | if(_python_version_major STREQUAL "2")
121 | set(__PYTHON_PREFIX Python2)
122 | else()
123 | set(__PYTHON_PREFIX Python3)
124 | endif()
125 | find_host_package(${__PYTHON_PREFIX} "${preferred_version}" COMPONENTS Interpreter)
126 | if(${__PYTHON_PREFIX}_EXECUTABLE)
127 | set(PYTHON_EXECUTABLE "${${__PYTHON_PREFIX}_EXECUTABLE}")
128 | find_host_package(PythonInterp "${preferred_version}") # Populate other variables
129 | endif()
130 | else()
131 | message(STATUS "Consider using CMake 3.12+ for better Python support")
132 | endif()
133 | endif()
134 | if(PYTHONINTERP_FOUND AND "${_python_version_major}" STREQUAL "${PYTHON_VERSION_MAJOR}")
135 | # Copy outputs
136 | set(_found ${PYTHONINTERP_FOUND})
137 | set(_executable ${PYTHON_EXECUTABLE})
138 | set(_version_string ${PYTHON_VERSION_STRING})
139 | set(_version_major ${PYTHON_VERSION_MAJOR})
140 | set(_version_minor ${PYTHON_VERSION_MINOR})
141 | set(_version_patch ${PYTHON_VERSION_PATCH})
142 | endif()
143 | endif()
144 |
145 | if(__update_python_vars)
146 | # Clear find_host_package side effects
147 | unset(PYTHONINTERP_FOUND)
148 | unset(PYTHON_EXECUTABLE CACHE)
149 | unset(PYTHON_VERSION_STRING)
150 | unset(PYTHON_VERSION_MAJOR)
151 | unset(PYTHON_VERSION_MINOR)
152 | unset(PYTHON_VERSION_PATCH)
153 | endif()
154 |
155 | if(_found)
156 | set(_version_major_minor "${_version_major}.${_version_minor}")
157 |
158 | if(NOT ANDROID AND NOT APPLE_FRAMEWORK)
159 | check_environment_variables(${library_env} ${include_dir_env})
160 | if(NOT ${${library_env}} STREQUAL "")
161 | set(PYTHON_LIBRARY "${${library_env}}")
162 | endif()
163 | if(NOT ${${include_dir_env}} STREQUAL "")
164 | set(PYTHON_INCLUDE_DIR "${${include_dir_env}}")
165 | endif()
166 |
167 | # not using _version_string here, because it might not conform to the CMake version format
168 | if(CMAKE_CROSSCOMPILING)
169 | # builder version can differ from target, matching base version (e.g. 2.7)
170 | find_package(PythonLibs "${_version_major_minor}")
171 | else()
172 | find_package(PythonLibs "${_version_major_minor}.${_version_patch}" EXACT)
173 | endif()
174 |
175 | if(PYTHONLIBS_FOUND)
176 | # Copy outputs
177 | set(_libs_found ${PYTHONLIBS_FOUND})
178 | set(_libraries ${PYTHON_LIBRARIES})
179 | set(_include_path ${PYTHON_INCLUDE_PATH})
180 | set(_include_dirs ${PYTHON_INCLUDE_DIRS})
181 | set(_debug_libraries ${PYTHON_DEBUG_LIBRARIES})
182 | set(_libs_version_string ${PYTHONLIBS_VERSION_STRING})
183 | set(_debug_library ${PYTHON_DEBUG_LIBRARY})
184 | set(_library ${PYTHON_LIBRARY})
185 | set(_library_debug ${PYTHON_LIBRARY_DEBUG})
186 | set(_library_release ${PYTHON_LIBRARY_RELEASE})
187 | set(_include_dir ${PYTHON_INCLUDE_DIR})
188 | set(_include_dir2 ${PYTHON_INCLUDE_DIR2})
189 | endif()
190 | if(__update_python_vars)
191 | # Clear find_package side effects
192 | unset(PYTHONLIBS_FOUND)
193 | unset(PYTHON_LIBRARIES)
194 | unset(PYTHON_INCLUDE_PATH)
195 | unset(PYTHON_INCLUDE_DIRS)
196 | unset(PYTHON_DEBUG_LIBRARIES)
197 | unset(PYTHONLIBS_VERSION_STRING)
198 | unset(PYTHON_DEBUG_LIBRARY CACHE)
199 | unset(PYTHON_LIBRARY)
200 | unset(PYTHON_LIBRARY_DEBUG)
201 | unset(PYTHON_LIBRARY_RELEASE)
202 | unset(PYTHON_LIBRARY CACHE)
203 | unset(PYTHON_LIBRARY_DEBUG CACHE)
204 | unset(PYTHON_LIBRARY_RELEASE CACHE)
205 | unset(PYTHON_INCLUDE_DIR CACHE)
206 | unset(PYTHON_INCLUDE_DIR2 CACHE)
207 | endif()
208 | endif()
209 |
210 | if(NOT ANDROID AND NOT IOS)
211 | if(CMAKE_HOST_UNIX)
212 | execute_process(COMMAND ${_executable} -c "from distutils.sysconfig import *; print(get_python_lib())"
213 | RESULT_VARIABLE _cvpy_process
214 | OUTPUT_VARIABLE _std_packages_path
215 | OUTPUT_STRIP_TRAILING_WHITESPACE)
216 | if("${_std_packages_path}" MATCHES "site-packages")
217 | set(_packages_path "python${_version_major_minor}/site-packages")
218 | else() #debian based assumed, install to the dist-packages.
219 | set(_packages_path "python${_version_major_minor}/dist-packages")
220 | endif()
221 | if(EXISTS "${CMAKE_INSTALL_PREFIX}/lib${LIB_SUFFIX}/${${packages_path}}")
222 | set(_packages_path "lib${LIB_SUFFIX}/${_packages_path}")
223 | else()
224 | set(_packages_path "lib/${_packages_path}")
225 | endif()
226 | elseif(CMAKE_HOST_WIN32)
227 | get_filename_component(_path "${_executable}" PATH)
228 | file(TO_CMAKE_PATH "${_path}" _path)
229 | if(NOT EXISTS "${_path}/Lib/site-packages")
230 | unset(_path)
231 | get_filename_component(_path "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\${_version_major_minor}\\InstallPath]" ABSOLUTE)
232 | if(NOT _path)
233 | get_filename_component(_path "[HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${_version_major_minor}\\InstallPath]" ABSOLUTE)
234 | endif()
235 | file(TO_CMAKE_PATH "${_path}" _path)
236 | endif()
237 | set(_packages_path "${_path}/Lib/site-packages")
238 | unset(_path)
239 | endif()
240 |
241 | set(_numpy_include_dirs "${${numpy_include_dirs}}")
242 |
243 | if(NOT _numpy_include_dirs)
244 | if(CMAKE_CROSSCOMPILING)
245 | message(STATUS "Cannot probe for Python/Numpy support (because we are cross-compiling OpenCV)")
246 | message(STATUS "If you want to enable Python/Numpy support, set the following variables:")
247 | message(STATUS " PYTHON2_INCLUDE_PATH")
248 | message(STATUS " PYTHON2_LIBRARIES (optional on Unix-like systems)")
249 | message(STATUS " PYTHON2_NUMPY_INCLUDE_DIRS")
250 | message(STATUS " PYTHON3_INCLUDE_PATH")
251 | message(STATUS " PYTHON3_LIBRARIES (optional on Unix-like systems)")
252 | message(STATUS " PYTHON3_NUMPY_INCLUDE_DIRS")
253 | else()
254 | # Attempt to discover the NumPy include directory. If this succeeds, then build python API with NumPy
255 | execute_process(COMMAND "${_executable}" -c "import os; os.environ['DISTUTILS_USE_SDK']='1'; import numpy.distutils; print(os.pathsep.join(numpy.distutils.misc_util.get_numpy_include_dirs()))"
256 | RESULT_VARIABLE _numpy_process
257 | OUTPUT_VARIABLE _numpy_include_dirs
258 | OUTPUT_STRIP_TRAILING_WHITESPACE)
259 | if(NOT _numpy_process EQUAL 0)
260 | unset(_numpy_include_dirs)
261 | endif()
262 | endif()
263 | endif()
264 |
265 | if(_numpy_include_dirs)
266 | file(TO_CMAKE_PATH "${_numpy_include_dirs}" _numpy_include_dirs)
267 | if(CMAKE_CROSSCOMPILING)
268 | if(NOT _numpy_version)
269 | set(_numpy_version "undefined - cannot be probed because of the cross-compilation")
270 | endif()
271 | else()
272 | execute_process(COMMAND "${_executable}" -c "import numpy; print(numpy.version.version)"
273 | RESULT_VARIABLE _numpy_process
274 | OUTPUT_VARIABLE _numpy_version
275 | OUTPUT_STRIP_TRAILING_WHITESPACE)
276 | endif()
277 | endif()
278 | endif(NOT ANDROID AND NOT IOS)
279 | endif()
280 |
281 | # Export return values
282 | set(${found} "${_found}" CACHE INTERNAL "")
283 | set(${executable} "${_executable}" CACHE FILEPATH "Path to Python interpretor")
284 | set(${version_string} "${_version_string}" CACHE INTERNAL "")
285 | set(${version_major} "${_version_major}" CACHE INTERNAL "")
286 | set(${version_minor} "${_version_minor}" CACHE INTERNAL "")
287 | set(${libs_found} "${_libs_found}" CACHE INTERNAL "")
288 | set(${libs_version_string} "${_libs_version_string}" CACHE INTERNAL "")
289 | set(${libraries} "${_libraries}" CACHE INTERNAL "Python libraries")
290 | set(${library} "${_library}" CACHE FILEPATH "Path to Python library")
291 | set(${debug_libraries} "${_debug_libraries}" CACHE INTERNAL "")
292 | set(${debug_library} "${_debug_library}" CACHE FILEPATH "Path to Python debug")
293 | set(${include_path} "${_include_path}" CACHE INTERNAL "")
294 | set(${include_dir} "${_include_dir}" CACHE PATH "Python include dir")
295 | set(${include_dir2} "${_include_dir2}" CACHE PATH "Python include dir 2")
296 | set(${packages_path} "${_packages_path}" CACHE PATH "Where to install the python packages.")
297 | set(${numpy_include_dirs} ${_numpy_include_dirs} CACHE PATH "Path to numpy headers")
298 | set(${numpy_version} "${_numpy_version}" CACHE INTERNAL "")
299 | endif()
300 | endfunction(find_python)
301 |
302 | find_python("" 2.7 PYTHON2_LIBRARY PYTHON2_INCLUDE_DIR
303 | PYTHON2INTERP_FOUND PYTHON2_EXECUTABLE PYTHON2_VERSION_STRING
304 | PYTHON2_VERSION_MAJOR PYTHON2_VERSION_MINOR PYTHON2LIBS_FOUND
305 | PYTHON2LIBS_VERSION_STRING PYTHON2_LIBRARIES PYTHON2_LIBRARY
306 | PYTHON2_DEBUG_LIBRARIES PYTHON2_LIBRARY_DEBUG PYTHON2_INCLUDE_PATH
307 | PYTHON2_INCLUDE_DIR PYTHON2_INCLUDE_DIR2 PYTHON2_PACKAGES_PATH
308 | PYTHON2_NUMPY_INCLUDE_DIRS PYTHON2_NUMPY_VERSION)
309 |
310 | find_python("" "3.5" PYTHON3_LIBRARY PYTHON3_INCLUDE_DIR
311 | PYTHON3INTERP_FOUND PYTHON3_EXECUTABLE PYTHON3_VERSION_STRING
312 | PYTHON3_VERSION_MAJOR PYTHON3_VERSION_MINOR PYTHON3LIBS_FOUND
313 | PYTHON3LIBS_VERSION_STRING PYTHON3_LIBRARIES PYTHON3_LIBRARY
314 | PYTHON3_DEBUG_LIBRARIES PYTHON3_LIBRARY_DEBUG PYTHON3_INCLUDE_PATH
315 | PYTHON3_INCLUDE_DIR PYTHON3_INCLUDE_DIR2 PYTHON3_PACKAGES_PATH
316 | PYTHON3_NUMPY_INCLUDE_DIRS PYTHON3_NUMPY_VERSION)
317 |
318 |
319 | if(PYTHON_DEFAULT_EXECUTABLE)
320 | set(PYTHON_DEFAULT_AVAILABLE "TRUE")
321 | elseif(PYTHON2_EXECUTABLE AND PYTHON2INTERP_FOUND)
322 | # Use Python 2 as default Python interpreter
323 | set(PYTHON_DEFAULT_AVAILABLE "TRUE")
324 | set(PYTHON_DEFAULT_EXECUTABLE "${PYTHON2_EXECUTABLE}")
325 | elseif(PYTHON3_EXECUTABLE AND PYTHON3INTERP_FOUND)
326 | # Use Python 3 as fallback Python interpreter (if there is no Python 2)
327 | set(PYTHON_DEFAULT_AVAILABLE "TRUE")
328 | set(PYTHON_DEFAULT_EXECUTABLE "${PYTHON3_EXECUTABLE}")
329 | endif()
330 |
331 |
--------------------------------------------------------------------------------
/include/pyboostcvconverter/pyboostcvconverter.hpp:
--------------------------------------------------------------------------------
1 | /*
2 | * CVBoostConverter.hpp
3 | *
4 | * Created on: Mar 20, 2014
5 | * Author: Gregory Kramida
6 | * Copyright: (c) 2014 Gregory Kramida
7 | * License: MIT
8 | */
9 |
10 | #ifndef CVBOOSTCONVERTER_HPP_
11 | #define CVBOOSTCONVERTER_HPP_
12 |
13 | #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 |
20 | #if (PY_VERSION_HEX >= 0x03000000)
21 | #ifndef NUMPY_IMPORT_ARRAY_RETVAL
22 | #define NUMPY_IMPORT_ARRAY_RETVAL NULL
23 | #endif
24 | #else
25 | #ifndef NUMPY_IMPORT_ARRAY_RETVAL
26 | #define NUMPY_IMPORT_ARRAY_RETVAL
27 | #endif
28 | #endif
29 |
30 | #define BOOST_BIND_GLOBAL_PLACEHOLDERS
31 |
32 | namespace pbcvt{
33 |
34 | using namespace cv;
35 |
36 |
37 | static PyObject* opencv_error = 0;
38 |
39 |
40 | //=================== MACROS =================================================================
41 | #define ERRWRAP2(expr) \
42 | try \
43 | { \
44 | PyAllowThreads allowThreads; \
45 | expr; \
46 | } \
47 | catch (const cv::Exception &e) \
48 | { \
49 | PyErr_SetString(opencv_error, e.what()); \
50 | return 0; \
51 | }
52 |
53 | //=================== ERROR HANDLING =========================================================
54 |
55 | static int failmsg(const char *fmt, ...);
56 |
57 | //=================== THREADING ==============================================================
58 | class PyAllowThreads;
59 | class PyEnsureGIL;
60 |
61 | static size_t REFCOUNT_OFFSET = (size_t)&(((PyObject*)0)->ob_refcnt) +
62 | (0x12345678 != *(const size_t*)"\x78\x56\x34\x12\0\0\0\0\0")*sizeof(int);
63 |
64 | static inline PyObject* pyObjectFromRefcount(const int* refcount)
65 | {
66 | return (PyObject*)((size_t)refcount - REFCOUNT_OFFSET);
67 | }
68 |
69 | static inline int* refcountFromPyObject(const PyObject* obj)
70 | {
71 | return (int*)((size_t)obj + REFCOUNT_OFFSET);
72 | }
73 |
74 | //=================== NUMPY ALLOCATOR FOR OPENCV =============================================
75 |
76 | class NumpyAllocator;
77 |
78 | //=================== STANDALONE CONVERTER FUNCTIONS =========================================
79 |
80 | PyObject* fromMatToNDArray(const Mat& m);
81 | Mat fromNDArrayToMat(PyObject* o);
82 |
83 | //=================== BOOST CONVERTERS =======================================================
84 |
85 | struct matToNDArrayBoostConverter {
86 | static PyObject* convert(Mat const& m);
87 | };
88 |
89 |
90 | struct matFromNDArrayBoostConverter {
91 |
92 | matFromNDArrayBoostConverter();
93 |
94 | /// @brief Check if PyObject is an array and can be converted to OpenCV matrix.
95 | static void* convertible(PyObject* object);
96 |
97 | /// @brief Construct a Mat from an NDArray object.
98 | static void construct(PyObject* object,
99 | boost::python::converter::rvalue_from_python_stage1_data* data);
100 | };
101 | } // end namespace pbcvt
102 | #endif /* CVBOOSTCONVERTER_HPP_ */
103 |
--------------------------------------------------------------------------------
/src/pyboost_cv2_converter.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * CV2BoostConverter.cpp
3 | *
4 | * Created on: May 21, 2015
5 | * Author: Gregory Kramida
6 | * Copyright: 2015 Gregory Kramida
7 | */
8 | #define NO_IMPORT_ARRAY
9 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
10 | #include
11 | #if CV_VERSION_EPOCH == 2 || (!defined CV_VERSION_EPOCH && CV_VERSION_MAJOR == 2)
12 | namespace pbcvt{
13 | using namespace cv;
14 |
15 | //=================== ERROR HANDLING =========================================================
16 | static int failmsg(const char *fmt, ...)
17 | {
18 | char str[1000];
19 |
20 | va_list ap;
21 | va_start(ap, fmt);
22 | vsnprintf(str, sizeof(str), fmt, ap);
23 | va_end(ap);
24 |
25 | PyErr_SetString(PyExc_TypeError, str);
26 | return 0;
27 | }
28 |
29 | //=================== THREADING ==============================================================
30 |
31 | class PyAllowThreads
32 | {
33 | public:
34 | PyAllowThreads() :
35 | _state(PyEval_SaveThread()) {
36 | }
37 | ~PyAllowThreads()
38 | {
39 | PyEval_RestoreThread(_state);
40 | }
41 | private:
42 | PyThreadState* _state;
43 | };
44 |
45 | class PyEnsureGIL
46 | {
47 | public:
48 | PyEnsureGIL() :
49 | _state(PyGILState_Ensure()) {
50 | }
51 | ~PyEnsureGIL()
52 | {
53 | PyGILState_Release(_state);
54 | }
55 | private:
56 | PyGILState_STATE _state;
57 | };
58 |
59 | //=================== NUMPY ALLOCATOR FOR OPENCV =============================================
60 | class NumpyAllocator:
61 | public MatAllocator
62 | {
63 | public:
64 | NumpyAllocator() {
65 | }
66 | ~NumpyAllocator() {
67 | }
68 |
69 | void allocate(int dims, const int* sizes, int type, int*& refcount,
70 | uchar*& datastart, uchar*& data, size_t* step) {
71 | PyEnsureGIL gil;
72 |
73 | int depth = CV_MAT_DEPTH(type);
74 | int cn = CV_MAT_CN(type);
75 | const int f = (int) (sizeof(size_t) / 8);
76 | int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
77 | depth == CV_16U ? NPY_USHORT :
78 | depth == CV_16S ? NPY_SHORT :
79 | depth == CV_32S ? NPY_INT :
80 | depth == CV_32F ? NPY_FLOAT :
81 | depth == CV_64F ? NPY_DOUBLE : f * NPY_ULONGLONG + (f ^ 1) * NPY_UINT;
82 | int i;
83 | npy_intp _sizes[CV_MAX_DIM + 1];
84 | for (i = 0; i < dims; i++) {
85 | _sizes[i] = sizes[i];
86 | }
87 |
88 | if (cn > 1) {
89 | _sizes[dims++] = cn;
90 | }
91 |
92 | PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
93 |
94 | if (!o) {
95 | CV_Error_(CV_StsError,
96 | ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
97 | }
98 | refcount = refcountFromPyObject(o);
99 | npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
100 | for( i = 0; i < dims - (cn > 1); i++ )
101 | step[i] = (size_t)_strides[i];
102 | datastart = data = (uchar*)PyArray_DATA((PyArrayObject*) o);
103 | }
104 |
105 | void deallocate(int* refcount, uchar*, uchar*) {
106 | PyEnsureGIL gil;
107 | if (!refcount)
108 | return;
109 | PyObject* o = pyObjectFromRefcount(refcount);
110 | Py_INCREF(o);
111 | Py_DECREF(o);
112 | }
113 | };
114 |
115 | //=================== ALLOCATOR INITIALIZTION ==================================================
116 | NumpyAllocator g_numpyAllocator;
117 |
118 | //=================== STANDALONE CONVERTER FUNCTIONS =========================================
119 |
120 | PyObject* fromMatToNDArray(const Mat& m) {
121 | if( !m.data )
122 | Py_RETURN_NONE;
123 | Mat temp, *p = (Mat*)&m;
124 | if(!p->refcount || p->allocator != &g_numpyAllocator)
125 | {
126 | temp.allocator = &g_numpyAllocator;
127 | ERRWRAP2(m.copyTo(temp));
128 | p = &temp;
129 | }
130 | p->addref();
131 | return pyObjectFromRefcount(p->refcount);
132 | }
133 |
134 | Mat fromNDArrayToMat(PyObject* o) {
135 | cv::Mat m;
136 | if (!PyArray_Check(o)) {
137 | failmsg("argument is not a numpy array");
138 | if (!m.data)
139 | m.allocator = &g_numpyAllocator;
140 | } else {
141 | PyArrayObject* oarr = (PyArrayObject*) o;
142 |
143 | bool needcopy = false, needcast = false;
144 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
145 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
146 | typenum == NPY_USHORT ? CV_16U :
147 | typenum == NPY_SHORT ? CV_16S :
148 | typenum == NPY_INT ? CV_32S :
149 | typenum == NPY_INT32 ? CV_32S :
150 | typenum == NPY_FLOAT ? CV_32F :
151 | typenum == NPY_DOUBLE ? CV_64F : -1;
152 |
153 | if (type < 0) {
154 | if (typenum == NPY_INT64 || typenum == NPY_UINT64
155 | || type == NPY_LONG) {
156 | needcopy = needcast = true;
157 | new_typenum = NPY_INT;
158 | type = CV_32S;
159 | } else {
160 | failmsg("Argument data type is not supported");
161 | m.allocator = &g_numpyAllocator;
162 | return m;
163 | }
164 | }
165 |
166 | #ifndef CV_MAX_DIM
167 | const int CV_MAX_DIM = 32;
168 | #endif
169 |
170 | int ndims = PyArray_NDIM(oarr);
171 | if (ndims >= CV_MAX_DIM) {
172 | failmsg("Dimensionality of argument is too high");
173 | if (!m.data)
174 | m.allocator = &g_numpyAllocator;
175 | return m;
176 | }
177 | int size[CV_MAX_DIM + 1];
178 | size_t step[CV_MAX_DIM + 1];
179 | size_t elemsize = CV_ELEM_SIZE1(type);
180 | const npy_intp* _sizes = PyArray_DIMS(oarr);
181 | const npy_intp* _strides = PyArray_STRIDES(oarr);
182 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
183 |
184 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
185 | // these checks handle cases of
186 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
187 | // b) transposed arrays, where _strides[] elements go in non-descending order
188 | // c) flipped arrays, where some of _strides[] elements are negative
189 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
190 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
191 | needcopy = true;
192 | }
193 |
194 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
195 | needcopy = true;
196 | if (needcopy) {
197 |
198 | if (needcast) {
199 | o = PyArray_Cast(oarr, new_typenum);
200 | oarr = (PyArrayObject*) o;
201 | } else {
202 | oarr = PyArray_GETCONTIGUOUS(oarr);
203 | o = (PyObject*) oarr;
204 | }
205 |
206 | _strides = PyArray_STRIDES(oarr);
207 | }
208 |
209 | for (int i = 0; i < ndims; i++) {
210 | size[i] = (int) _sizes[i];
211 | step[i] = (size_t) _strides[i];
212 | }
213 |
214 | // handle degenerate case
215 | if (ndims == 0) {
216 | size[ndims] = 1;
217 | step[ndims] = elemsize;
218 | ndims++;
219 | }
220 | if (ismultichannel) {
221 | ndims--;
222 | type |= CV_MAKETYPE(0, size[2]);
223 | }
224 |
225 | m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
226 |
227 | if (m.data){
228 | m.refcount = refcountFromPyObject(o);
229 | if (!needcopy){
230 | m.addref(); // protect the original numpy array from deallocation
231 | // (since Mat destructor will decrement the reference counter)
232 | }
233 | };
234 | m.allocator = &g_numpyAllocator;
235 | }
236 | return m;
237 | }
238 |
239 | //=================== BOOST CONVERTERS =======================================================
240 |
241 | PyObject* matToNDArrayBoostConverter::convert(Mat const& m) {
242 | if( !m.data )
243 | Py_RETURN_NONE;
244 | Mat temp, *p = (Mat*)&m;
245 | if(!p->refcount || p->allocator != &g_numpyAllocator)
246 | {
247 | temp.allocator = &g_numpyAllocator;
248 | ERRWRAP2(m.copyTo(temp));
249 | p = &temp;
250 | }
251 | p->addref();
252 | return pyObjectFromRefcount(p->refcount);
253 | }
254 |
255 | matFromNDArrayBoostConverter::matFromNDArrayBoostConverter() {
256 | boost::python::converter::registry::push_back(matFromNDArrayBoostConverter::convertible,
257 | matFromNDArrayBoostConverter::construct,
258 | boost::python::type_id());
259 | }
260 |
261 | /// @brief Check if PyObject is an array and can be converted to OpenCV matrix.
262 | void* matFromNDArrayBoostConverter::convertible(PyObject* object) {
263 |
264 | if (!PyArray_Check(object)) {
265 | return NULL;
266 | }
267 | #ifndef CV_MAX_DIM
268 | const int CV_MAX_DIM = 32;
269 | #endif
270 | PyArrayObject* oarr = (PyArrayObject*) object;
271 |
272 | int typenum = PyArray_TYPE(oarr);
273 | if (typenum != NPY_INT64 && typenum != NPY_UINT64 && typenum != NPY_LONG
274 | && typenum != NPY_UBYTE && typenum != NPY_BYTE
275 | && typenum != NPY_USHORT && typenum != NPY_SHORT
276 | && typenum != NPY_INT && typenum != NPY_INT32
277 | && typenum != NPY_FLOAT && typenum != NPY_DOUBLE) {
278 | return NULL;
279 | }
280 | int ndims = PyArray_NDIM(oarr); //data type not supported
281 |
282 | if (ndims >= CV_MAX_DIM) {
283 | return NULL; //too many dimensions
284 | }
285 |
286 | return object;
287 | }
288 |
289 | /// @brief Construct a Mat from an NDArray object.
290 | void matFromNDArrayBoostConverter::construct(PyObject* object,
291 | boost::python::converter::rvalue_from_python_stage1_data* data) {
292 |
293 | namespace python = boost::python;
294 | // Object is a borrowed reference, so create a handle indicting it is
295 | // borrowed for proper reference counting.
296 | python::handle<> handle(python::borrowed(object));
297 |
298 | // Obtain a handle to the memory block that the converter has allocated
299 | // for the C++ type.
300 | typedef python::converter::rvalue_from_python_storage storage_type;
301 | void* storage = reinterpret_cast(data)->storage.bytes;
302 |
303 | // Allocate the C++ type into the converter's memory block, and assign
304 | // its handle to the converter's convertible variable. The C++
305 | // container is populated by passing the begin and end iterators of
306 | // the python object to the container's constructor.
307 | PyArrayObject* oarr = (PyArrayObject*) object;
308 |
309 | bool needcopy = false, needcast = false;
310 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
311 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
312 | typenum == NPY_USHORT ? CV_16U :
313 | typenum == NPY_SHORT ? CV_16S :
314 | typenum == NPY_INT ? CV_32S :
315 | typenum == NPY_INT32 ? CV_32S :
316 | typenum == NPY_FLOAT ? CV_32F :
317 | typenum == NPY_DOUBLE ? CV_64F : -1;
318 |
319 | if (type < 0) {
320 | needcopy = needcast = true;
321 | new_typenum = NPY_INT;
322 | type = CV_32S;
323 | }
324 |
325 | #ifndef CV_MAX_DIM
326 | const int CV_MAX_DIM = 32;
327 | #endif
328 | int ndims = PyArray_NDIM(oarr);
329 |
330 | int size[CV_MAX_DIM + 1];
331 | size_t step[CV_MAX_DIM + 1];
332 | size_t elemsize = CV_ELEM_SIZE1(type);
333 | const npy_intp* _sizes = PyArray_DIMS(oarr);
334 | const npy_intp* _strides = PyArray_STRIDES(oarr);
335 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
336 |
337 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
338 | // these checks handle cases of
339 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
340 | // b) transposed arrays, where _strides[] elements go in non-descending order
341 | // c) flipped arrays, where some of _strides[] elements are negative
342 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
343 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
344 | needcopy = true;
345 | }
346 |
347 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
348 | needcopy = true;
349 |
350 | if (needcopy) {
351 |
352 | if (needcast) {
353 | object = PyArray_Cast(oarr, new_typenum);
354 | oarr = (PyArrayObject*) object;
355 | } else {
356 | oarr = PyArray_GETCONTIGUOUS(oarr);
357 | object = (PyObject*) oarr;
358 | }
359 |
360 | _strides = PyArray_STRIDES(oarr);
361 | }
362 |
363 | for (int i = 0; i < ndims; i++) {
364 | size[i] = (int) _sizes[i];
365 | step[i] = (size_t) _strides[i];
366 | }
367 |
368 | // handle degenerate case
369 | if (ndims == 0) {
370 | size[ndims] = 1;
371 | step[ndims] = elemsize;
372 | ndims++;
373 | }
374 |
375 | if (ismultichannel) {
376 | ndims--;
377 | type |= CV_MAKETYPE(0, size[2]);
378 | }
379 | if (!needcopy) {
380 | Py_INCREF(object);
381 | }
382 |
383 | cv::Mat* m = new (storage) cv::Mat(ndims, size, type, PyArray_DATA(oarr), step);
384 | if (m->data){
385 | m->refcount = refcountFromPyObject(object);
386 | };
387 |
388 | m->allocator = &g_numpyAllocator;
389 | data->convertible = storage;
390 | }
391 |
392 | } //end namespace pbcvt
393 |
394 | #endif
395 |
396 |
--------------------------------------------------------------------------------
/src/pyboost_cv3_converter.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * CV3BoostConverter.cpp
3 | *
4 | * Created on: May 21, 2015
5 | * Author: Gregory Kramida
6 | * Copyright: 2015 Gregory Kramida
7 | */
8 | #define NO_IMPORT_ARRAY
9 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
10 | #include
11 | #if !defined CV_VERSION_EPOCH && CV_VERSION_MAJOR == 3
12 | namespace pbcvt {
13 | using namespace cv;
14 | //=================== ERROR HANDLING =========================================================
15 |
16 | static int failmsg(const char *fmt, ...) {
17 | char str[1000];
18 |
19 | va_list ap;
20 | va_start(ap, fmt);
21 | vsnprintf(str, sizeof(str), fmt, ap);
22 | va_end(ap);
23 |
24 | PyErr_SetString(PyExc_TypeError, str);
25 | return 0;
26 | }
27 |
28 | //=================== THREADING ==============================================================
29 | class PyAllowThreads {
30 | public:
31 | PyAllowThreads() :
32 | _state(PyEval_SaveThread()) {
33 | }
34 | ~PyAllowThreads() {
35 | PyEval_RestoreThread(_state);
36 | }
37 | private:
38 | PyThreadState* _state;
39 | };
40 |
41 | class PyEnsureGIL {
42 | public:
43 | PyEnsureGIL() :
44 | _state(PyGILState_Ensure()) {
45 | }
46 | ~PyEnsureGIL() {
47 | PyGILState_Release(_state);
48 | }
49 | private:
50 | PyGILState_STATE _state;
51 | };
52 |
53 | enum {
54 | ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2
55 | };
56 |
57 | class NumpyAllocator:
58 | public MatAllocator {
59 | public:
60 | NumpyAllocator() {
61 | stdAllocator = Mat::getStdAllocator();
62 | }
63 | ~NumpyAllocator() {
64 | }
65 |
66 | UMatData* allocate(PyObject* o, int dims, const int* sizes, int type,
67 | size_t* step) const {
68 | UMatData* u = new UMatData(this);
69 | u->data = u->origdata = (uchar*) PyArray_DATA((PyArrayObject*) o);
70 | npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
71 | for (int i = 0; i < dims - 1; i++)
72 | step[i] = (size_t) _strides[i];
73 | step[dims - 1] = CV_ELEM_SIZE(type);
74 | u->size = sizes[0] * step[0];
75 | u->userdata = o;
76 | return u;
77 | }
78 |
79 | UMatData* allocate(int dims0, const int* sizes, int type, void* data,
80 | size_t* step, int flags, UMatUsageFlags usageFlags) const {
81 | if (data != 0) {
82 | CV_Error(Error::StsAssert, "The data should normally be NULL!");
83 | // probably this is safe to do in such extreme case
84 | return stdAllocator->allocate(dims0, sizes, type, data, step, flags,
85 | usageFlags);
86 | }
87 | PyEnsureGIL gil;
88 |
89 | int depth = CV_MAT_DEPTH(type);
90 | int cn = CV_MAT_CN(type);
91 | const int f = (int) (sizeof(size_t) / 8);
92 | int typenum =
93 | depth == CV_8U ? NPY_UBYTE :
94 | depth == CV_8S ? NPY_BYTE :
95 | depth == CV_16U ? NPY_USHORT :
96 | depth == CV_16S ? NPY_SHORT :
97 | depth == CV_32S ? NPY_INT :
98 | depth == CV_32F ? NPY_FLOAT :
99 | depth == CV_64F ?
100 | NPY_DOUBLE :
101 | f * NPY_ULONGLONG + (f ^ 1) * NPY_UINT;
102 | int i, dims = dims0;
103 | cv::AutoBuffer _sizes(dims + 1);
104 | for (i = 0; i < dims; i++)
105 | _sizes[i] = sizes[i];
106 | if (cn > 1)
107 | _sizes[dims++] = cn;
108 | PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
109 | if (!o)
110 | CV_Error_(Error::StsError,
111 | ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
112 | return allocate(o, dims0, sizes, type, step);
113 | }
114 |
115 | bool allocate(UMatData* u, int accessFlags,
116 | UMatUsageFlags usageFlags) const {
117 | return stdAllocator->allocate(u, accessFlags, usageFlags);
118 | }
119 |
120 | void deallocate(UMatData* u) const {
121 | if (u) {
122 | PyEnsureGIL gil;
123 | PyObject* o = (PyObject*) u->userdata;
124 | Py_XDECREF(o);
125 | delete u;
126 | }
127 | }
128 |
129 | const MatAllocator* stdAllocator;
130 | };
131 |
132 | //=================== ALLOCATOR INITIALIZTION ==================================================
133 | NumpyAllocator g_numpyAllocator;
134 |
135 | //=================== STANDALONE CONVERTER FUNCTIONS =========================================
136 |
137 | PyObject* fromMatToNDArray(const Mat& m) {
138 | if (!m.data)
139 | Py_RETURN_NONE;
140 | Mat temp,
141 | *p = (Mat*) &m;
142 | if (!p->u || p->allocator != &g_numpyAllocator) {
143 | temp.allocator = &g_numpyAllocator;
144 | ERRWRAP2(m.copyTo(temp));
145 | p = &temp;
146 | }
147 | PyObject* o = (PyObject*) p->u->userdata;
148 | Py_INCREF(o);
149 | return o;
150 | }
151 |
152 | Mat fromNDArrayToMat(PyObject* o) {
153 | cv::Mat m;
154 | bool allowND = true;
155 | if (!PyArray_Check(o)) {
156 | failmsg("argument is not a numpy array");
157 | if (!m.data)
158 | m.allocator = &g_numpyAllocator;
159 | } else {
160 | PyArrayObject* oarr = (PyArrayObject*) o;
161 |
162 | bool needcopy = false, needcast = false;
163 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
164 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
165 | typenum == NPY_USHORT ? CV_16U :
166 | typenum == NPY_SHORT ? CV_16S :
167 | typenum == NPY_INT ? CV_32S :
168 | typenum == NPY_INT32 ? CV_32S :
169 | typenum == NPY_FLOAT ? CV_32F :
170 | typenum == NPY_DOUBLE ? CV_64F : -1;
171 |
172 | if (type < 0) {
173 | if (typenum == NPY_INT64 || typenum == NPY_UINT64
174 | || type == NPY_LONG) {
175 | needcopy = needcast = true;
176 | new_typenum = NPY_INT;
177 | type = CV_32S;
178 | } else {
179 | failmsg("Argument data type is not supported");
180 | m.allocator = &g_numpyAllocator;
181 | return m;
182 | }
183 | }
184 |
185 | #ifndef CV_MAX_DIM
186 | const int CV_MAX_DIM = 32;
187 | #endif
188 |
189 | int ndims = PyArray_NDIM(oarr);
190 | if (ndims >= CV_MAX_DIM) {
191 | failmsg("Dimensionality of argument is too high");
192 | if (!m.data)
193 | m.allocator = &g_numpyAllocator;
194 | return m;
195 | }
196 |
197 | int size[CV_MAX_DIM + 1];
198 | size_t step[CV_MAX_DIM + 1];
199 | size_t elemsize = CV_ELEM_SIZE1(type);
200 | const npy_intp* _sizes = PyArray_DIMS(oarr);
201 | const npy_intp* _strides = PyArray_STRIDES(oarr);
202 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
203 |
204 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
205 | // these checks handle cases of
206 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
207 | // b) transposed arrays, where _strides[] elements go in non-descending order
208 | // c) flipped arrays, where some of _strides[] elements are negative
209 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
210 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
211 | needcopy = true;
212 | }
213 |
214 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
215 | needcopy = true;
216 |
217 | if (needcopy) {
218 |
219 | if (needcast) {
220 | o = PyArray_Cast(oarr, new_typenum);
221 | oarr = (PyArrayObject*) o;
222 | } else {
223 | oarr = PyArray_GETCONTIGUOUS(oarr);
224 | o = (PyObject*) oarr;
225 | }
226 |
227 | _strides = PyArray_STRIDES(oarr);
228 | }
229 |
230 | for (int i = 0; i < ndims; i++) {
231 | size[i] = (int) _sizes[i];
232 | step[i] = (size_t) _strides[i];
233 | }
234 |
235 | // handle degenerate case
236 | if (ndims == 0) {
237 | size[ndims] = 1;
238 | step[ndims] = elemsize;
239 | ndims++;
240 | }
241 |
242 | if (ismultichannel) {
243 | ndims--;
244 | type |= CV_MAKETYPE(0, size[2]);
245 | }
246 |
247 | if (ndims > 2 && !allowND) {
248 | failmsg("%s has more than 2 dimensions");
249 | } else {
250 |
251 | m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
252 | m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
253 | m.addref();
254 |
255 | if (!needcopy) {
256 | Py_INCREF(o);
257 | }
258 | }
259 | m.allocator = &g_numpyAllocator;
260 | }
261 | return m;
262 | }
263 |
264 | //=================== BOOST CONVERTERS =======================================================
265 |
266 | PyObject* matToNDArrayBoostConverter::convert(Mat const& m) {
267 | if (!m.data)
268 | Py_RETURN_NONE;
269 | Mat temp,
270 | *p = (Mat*) &m;
271 | if (!p->u || p->allocator != &g_numpyAllocator)
272 | {
273 | temp.allocator = &g_numpyAllocator;
274 | ERRWRAP2(m.copyTo(temp));
275 | p = &temp;
276 | }
277 | PyObject* o = (PyObject*) p->u->userdata;
278 | Py_INCREF(o);
279 | return o;
280 | }
281 |
282 | matFromNDArrayBoostConverter::matFromNDArrayBoostConverter() {
283 | boost::python::converter::registry::push_back(convertible, construct,
284 | boost::python::type_id());
285 | }
286 |
287 | /// @brief Check if PyObject is an array and can be converted to OpenCV matrix.
288 | void* matFromNDArrayBoostConverter::convertible(PyObject* object) {
289 | if (!PyArray_Check(object)) {
290 | return NULL;
291 | }
292 | #ifndef CV_MAX_DIM
293 | const int CV_MAX_DIM = 32;
294 | #endif
295 | PyArrayObject* oarr = (PyArrayObject*) object;
296 |
297 | int typenum = PyArray_TYPE(oarr);
298 | if (typenum != NPY_INT64 && typenum != NPY_UINT64 && typenum != NPY_LONG
299 | && typenum != NPY_UBYTE && typenum != NPY_BYTE
300 | && typenum != NPY_USHORT && typenum != NPY_SHORT
301 | && typenum != NPY_INT && typenum != NPY_INT32
302 | && typenum != NPY_FLOAT && typenum != NPY_DOUBLE) {
303 | return NULL;
304 | }
305 | int ndims = PyArray_NDIM(oarr); //data type not supported
306 |
307 | if (ndims >= CV_MAX_DIM) {
308 | return NULL; //too many dimensions
309 | }
310 | return object;
311 | }
312 |
313 | /// @brief Construct a Mat from an NDArray object.
314 | void matFromNDArrayBoostConverter::construct(PyObject* object,
315 | boost::python::converter::rvalue_from_python_stage1_data* data) {
316 | namespace python = boost::python;
317 | // Object is a borrowed reference, so create a handle indicting it is
318 | // borrowed for proper reference counting.
319 | python::handle<> handle(python::borrowed(object));
320 |
321 | // Obtain a handle to the memory block that the converter has allocated
322 | // for the C++ type.
323 | typedef python::converter::rvalue_from_python_storage storage_type;
324 | void* storage = reinterpret_cast(data)->storage.bytes;
325 |
326 | // Allocate the C++ type into the converter's memory block, and assign
327 | // its handle to the converter's convertible variable. The C++
328 | // container is populated by passing the begin and end iterators of
329 | // the python object to the container's constructor.
330 | PyArrayObject* oarr = (PyArrayObject*) object;
331 |
332 | bool needcopy = false, needcast = false;
333 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
334 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
335 | typenum == NPY_USHORT ? CV_16U :
336 | typenum == NPY_SHORT ? CV_16S :
337 | typenum == NPY_INT ? CV_32S :
338 | typenum == NPY_INT32 ? CV_32S :
339 | typenum == NPY_FLOAT ? CV_32F :
340 | typenum == NPY_DOUBLE ? CV_64F : -1;
341 |
342 | if (type < 0) {
343 | needcopy = needcast = true;
344 | new_typenum = NPY_INT;
345 | type = CV_32S;
346 | }
347 |
348 | #ifndef CV_MAX_DIM
349 | const int CV_MAX_DIM = 32;
350 | #endif
351 | int ndims = PyArray_NDIM(oarr);
352 |
353 | int size[CV_MAX_DIM + 1];
354 | size_t step[CV_MAX_DIM + 1];
355 | size_t elemsize = CV_ELEM_SIZE1(type);
356 | const npy_intp* _sizes = PyArray_DIMS(oarr);
357 | const npy_intp* _strides = PyArray_STRIDES(oarr);
358 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
359 |
360 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
361 | // these checks handle cases of
362 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
363 | // b) transposed arrays, where _strides[] elements go in non-descending order
364 | // c) flipped arrays, where some of _strides[] elements are negative
365 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
366 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
367 | needcopy = true;
368 | }
369 |
370 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
371 | needcopy = true;
372 |
373 | if (needcopy) {
374 |
375 | if (needcast) {
376 | object = PyArray_Cast(oarr, new_typenum);
377 | oarr = (PyArrayObject*) object;
378 | } else {
379 | oarr = PyArray_GETCONTIGUOUS(oarr);
380 | object = (PyObject*) oarr;
381 | }
382 |
383 | _strides = PyArray_STRIDES(oarr);
384 | }
385 |
386 | for (int i = 0; i < ndims; i++) {
387 | size[i] = (int) _sizes[i];
388 | step[i] = (size_t) _strides[i];
389 | }
390 |
391 | // handle degenerate case
392 | if (ndims == 0) {
393 | size[ndims] = 1;
394 | step[ndims] = elemsize;
395 | ndims++;
396 | }
397 |
398 | if (ismultichannel) {
399 | ndims--;
400 | type |= CV_MAKETYPE(0, size[2]);
401 | }
402 | if (!needcopy) {
403 | Py_INCREF(object);
404 | }
405 |
406 | cv::Mat* m = new (storage) cv::Mat(ndims, size, type, PyArray_DATA(oarr), step);
407 | m->u = g_numpyAllocator.allocate(object, ndims, size, type, step);
408 | m->allocator = &g_numpyAllocator;
409 | m->addref();
410 | data->convertible = storage;
411 | }
412 |
413 | } //end namespace pbcvt
414 | #endif
415 |
--------------------------------------------------------------------------------
/src/pyboost_cv4_converter.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * CV4BoostConverter.cpp
3 | *
4 | */
5 | #define NO_IMPORT_ARRAY
6 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
7 | #include
8 | #if !defined CV_VERSION_EPOCH && CV_VERSION_MAJOR == 4
9 | namespace pbcvt {
10 | using namespace cv;
11 | //=================== ERROR HANDLING =========================================================
12 |
13 | static int failmsg(const char *fmt, ...) {
14 | char str[1000];
15 |
16 | va_list ap;
17 | va_start(ap, fmt);
18 | vsnprintf(str, sizeof(str), fmt, ap);
19 | va_end(ap);
20 |
21 | PyErr_SetString(PyExc_TypeError, str);
22 | return 0;
23 | }
24 |
25 | //=================== THREADING ==============================================================
26 | class PyAllowThreads {
27 | public:
28 | PyAllowThreads() :
29 | _state(PyEval_SaveThread()) {
30 | }
31 | ~PyAllowThreads() {
32 | PyEval_RestoreThread(_state);
33 | }
34 | private:
35 | PyThreadState* _state;
36 | };
37 |
38 | class PyEnsureGIL {
39 | public:
40 | PyEnsureGIL() :
41 | _state(PyGILState_Ensure()) {
42 | }
43 | ~PyEnsureGIL() {
44 | PyGILState_Release(_state);
45 | }
46 | private:
47 | PyGILState_STATE _state;
48 | };
49 |
50 | enum {
51 | ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2
52 | };
53 |
54 | class NumpyAllocator:
55 | public MatAllocator {
56 | public:
57 | NumpyAllocator() {
58 | stdAllocator = Mat::getStdAllocator();
59 | }
60 | ~NumpyAllocator() {
61 | }
62 |
63 | UMatData* allocate(PyObject* o, int dims, const int* sizes, int type,
64 | size_t* step) const {
65 | UMatData* u = new UMatData(this);
66 | u->data = u->origdata = (uchar*) PyArray_DATA((PyArrayObject*) o);
67 | npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
68 | for (int i = 0; i < dims - 1; i++)
69 | step[i] = (size_t) _strides[i];
70 | step[dims - 1] = CV_ELEM_SIZE(type);
71 | u->size = sizes[0] * step[0];
72 | u->userdata = o;
73 | return u;
74 | }
75 |
76 | UMatData* allocate(int dims0, const int* sizes, int type, void* data,
77 | size_t* step, cv::AccessFlag flags, UMatUsageFlags usageFlags) const {
78 | if (data != 0) {
79 | CV_Error(Error::StsAssert, "The data should normally be NULL!");
80 | // probably this is safe to do in such extreme case
81 | return stdAllocator->allocate(dims0, sizes, type, data, step, flags,
82 | usageFlags);
83 | }
84 | PyEnsureGIL gil;
85 |
86 | int depth = CV_MAT_DEPTH(type);
87 | int cn = CV_MAT_CN(type);
88 | const int f = (int) (sizeof(size_t) / 8);
89 | int typenum =
90 | depth == CV_8U ? NPY_UBYTE :
91 | depth == CV_8S ? NPY_BYTE :
92 | depth == CV_16U ? NPY_USHORT :
93 | depth == CV_16S ? NPY_SHORT :
94 | depth == CV_32S ? NPY_INT :
95 | depth == CV_32F ? NPY_FLOAT :
96 | depth == CV_64F ?
97 | NPY_DOUBLE :
98 | f * NPY_ULONGLONG + (f ^ 1) * NPY_UINT;
99 | int i, dims = dims0;
100 | cv::AutoBuffer _sizes(dims + 1);
101 | for (i = 0; i < dims; i++)
102 | _sizes[i] = sizes[i];
103 | if (cn > 1)
104 | _sizes[dims++] = cn;
105 | PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
106 | if (!o)
107 | CV_Error_(Error::StsError,
108 | ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
109 | return allocate(o, dims0, sizes, type, step);
110 | }
111 |
112 | bool allocate(UMatData* u, cv::AccessFlag accessFlags,
113 | UMatUsageFlags usageFlags) const {
114 | return stdAllocator->allocate(u, accessFlags, usageFlags);
115 | }
116 |
117 | void deallocate(UMatData* u) const {
118 | if (u) {
119 | PyEnsureGIL gil;
120 | PyObject* o = (PyObject*) u->userdata;
121 | Py_XDECREF(o);
122 | delete u;
123 | }
124 | }
125 |
126 | const MatAllocator* stdAllocator;
127 | };
128 |
129 | //=================== ALLOCATOR INITIALIZTION ==================================================
130 | NumpyAllocator g_numpyAllocator;
131 |
132 | //=================== STANDALONE CONVERTER FUNCTIONS =========================================
133 |
134 | PyObject* fromMatToNDArray(const Mat& m) {
135 | if (!m.data)
136 | Py_RETURN_NONE;
137 | Mat temp,
138 | *p = (Mat*) &m;
139 | if (!p->u || p->allocator != &g_numpyAllocator) {
140 | temp.allocator = &g_numpyAllocator;
141 | ERRWRAP2(m.copyTo(temp));
142 | p = &temp;
143 | }
144 | PyObject* o = (PyObject*) p->u->userdata;
145 | Py_INCREF(o);
146 | return o;
147 | }
148 |
149 | Mat fromNDArrayToMat(PyObject* o) {
150 | cv::Mat m;
151 | bool allowND = true;
152 | if (!PyArray_Check(o)) {
153 | failmsg("argument is not a numpy array");
154 | if (!m.data)
155 | m.allocator = &g_numpyAllocator;
156 | } else {
157 | PyArrayObject* oarr = (PyArrayObject*) o;
158 |
159 | bool needcopy = false, needcast = false;
160 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
161 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
162 | typenum == NPY_USHORT ? CV_16U :
163 | typenum == NPY_SHORT ? CV_16S :
164 | typenum == NPY_INT ? CV_32S :
165 | typenum == NPY_INT32 ? CV_32S :
166 | typenum == NPY_FLOAT ? CV_32F :
167 | typenum == NPY_DOUBLE ? CV_64F : -1;
168 |
169 | if (type < 0) {
170 | if (typenum == NPY_INT64 || typenum == NPY_UINT64
171 | || type == NPY_LONG) {
172 | needcopy = needcast = true;
173 | new_typenum = NPY_INT;
174 | type = CV_32S;
175 | } else {
176 | failmsg("Argument data type is not supported");
177 | m.allocator = &g_numpyAllocator;
178 | return m;
179 | }
180 | }
181 |
182 | #ifndef CV_MAX_DIM
183 | const int CV_MAX_DIM = 32;
184 | #endif
185 |
186 | int ndims = PyArray_NDIM(oarr);
187 | if (ndims >= CV_MAX_DIM) {
188 | failmsg("Dimensionality of argument is too high");
189 | if (!m.data)
190 | m.allocator = &g_numpyAllocator;
191 | return m;
192 | }
193 |
194 | int size[CV_MAX_DIM + 1];
195 | size_t step[CV_MAX_DIM + 1];
196 | size_t elemsize = CV_ELEM_SIZE1(type);
197 | const npy_intp* _sizes = PyArray_DIMS(oarr);
198 | const npy_intp* _strides = PyArray_STRIDES(oarr);
199 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
200 |
201 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
202 | // these checks handle cases of
203 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
204 | // b) transposed arrays, where _strides[] elements go in non-descending order
205 | // c) flipped arrays, where some of _strides[] elements are negative
206 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
207 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
208 | needcopy = true;
209 | }
210 |
211 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
212 | needcopy = true;
213 |
214 | if (needcopy) {
215 |
216 | if (needcast) {
217 | o = PyArray_Cast(oarr, new_typenum);
218 | oarr = (PyArrayObject*) o;
219 | } else {
220 | oarr = PyArray_GETCONTIGUOUS(oarr);
221 | o = (PyObject*) oarr;
222 | }
223 |
224 | _strides = PyArray_STRIDES(oarr);
225 | }
226 |
227 | for (int i = 0; i < ndims; i++) {
228 | size[i] = (int) _sizes[i];
229 | step[i] = (size_t) _strides[i];
230 | }
231 |
232 | // handle degenerate case
233 | if (ndims == 0) {
234 | size[ndims] = 1;
235 | step[ndims] = elemsize;
236 | ndims++;
237 | }
238 |
239 | if (ismultichannel) {
240 | ndims--;
241 | type |= CV_MAKETYPE(0, size[2]);
242 | }
243 |
244 | if (ndims > 2 && !allowND) {
245 | failmsg("%s has more than 2 dimensions");
246 | } else {
247 |
248 | m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
249 | m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
250 | m.addref();
251 |
252 | if (!needcopy) {
253 | Py_INCREF(o);
254 | }
255 | }
256 | m.allocator = &g_numpyAllocator;
257 | }
258 | return m;
259 | }
260 |
261 | //=================== BOOST CONVERTERS =======================================================
262 |
263 | PyObject* matToNDArrayBoostConverter::convert(Mat const& m) {
264 | if (!m.data)
265 | Py_RETURN_NONE;
266 | Mat temp,
267 | *p = (Mat*) &m;
268 | if (!p->u || p->allocator != &g_numpyAllocator)
269 | {
270 | temp.allocator = &g_numpyAllocator;
271 | ERRWRAP2(m.copyTo(temp));
272 | p = &temp;
273 | }
274 | PyObject* o = (PyObject*) p->u->userdata;
275 | Py_INCREF(o);
276 | return o;
277 | }
278 |
279 | matFromNDArrayBoostConverter::matFromNDArrayBoostConverter() {
280 | boost::python::converter::registry::push_back(convertible, construct,
281 | boost::python::type_id());
282 | }
283 |
284 | /// @brief Check if PyObject is an array and can be converted to OpenCV matrix.
285 | void* matFromNDArrayBoostConverter::convertible(PyObject* object) {
286 | if (!PyArray_Check(object)) {
287 | return NULL;
288 | }
289 | #ifndef CV_MAX_DIM
290 | const int CV_MAX_DIM = 32;
291 | #endif
292 | PyArrayObject* oarr = (PyArrayObject*) object;
293 |
294 | int typenum = PyArray_TYPE(oarr);
295 | if (typenum != NPY_INT64 && typenum != NPY_UINT64 && typenum != NPY_LONG
296 | && typenum != NPY_UBYTE && typenum != NPY_BYTE
297 | && typenum != NPY_USHORT && typenum != NPY_SHORT
298 | && typenum != NPY_INT && typenum != NPY_INT32
299 | && typenum != NPY_FLOAT && typenum != NPY_DOUBLE) {
300 | return NULL;
301 | }
302 | int ndims = PyArray_NDIM(oarr); //data type not supported
303 |
304 | if (ndims >= CV_MAX_DIM) {
305 | return NULL; //too many dimensions
306 | }
307 | return object;
308 | }
309 |
310 | /// @brief Construct a Mat from an NDArray object.
311 | void matFromNDArrayBoostConverter::construct(PyObject* object,
312 | boost::python::converter::rvalue_from_python_stage1_data* data) {
313 | namespace python = boost::python;
314 | // Object is a borrowed reference, so create a handle indicting it is
315 | // borrowed for proper reference counting.
316 | python::handle<> handle(python::borrowed(object));
317 |
318 | // Obtain a handle to the memory block that the converter has allocated
319 | // for the C++ type.
320 | typedef python::converter::rvalue_from_python_storage storage_type;
321 | void* storage = reinterpret_cast(data)->storage.bytes;
322 |
323 | // Allocate the C++ type into the converter's memory block, and assign
324 | // its handle to the converter's convertible variable. The C++
325 | // container is populated by passing the begin and end iterators of
326 | // the python object to the container's constructor.
327 | PyArrayObject* oarr = (PyArrayObject*) object;
328 |
329 | bool needcopy = false, needcast = false;
330 | int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
331 | int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
332 | typenum == NPY_USHORT ? CV_16U :
333 | typenum == NPY_SHORT ? CV_16S :
334 | typenum == NPY_INT ? CV_32S :
335 | typenum == NPY_INT32 ? CV_32S :
336 | typenum == NPY_FLOAT ? CV_32F :
337 | typenum == NPY_DOUBLE ? CV_64F : -1;
338 |
339 | if (type < 0) {
340 | needcopy = needcast = true;
341 | new_typenum = NPY_INT;
342 | type = CV_32S;
343 | }
344 |
345 | #ifndef CV_MAX_DIM
346 | const int CV_MAX_DIM = 32;
347 | #endif
348 | int ndims = PyArray_NDIM(oarr);
349 |
350 | int size[CV_MAX_DIM + 1];
351 | size_t step[CV_MAX_DIM + 1];
352 | size_t elemsize = CV_ELEM_SIZE1(type);
353 | const npy_intp* _sizes = PyArray_DIMS(oarr);
354 | const npy_intp* _strides = PyArray_STRIDES(oarr);
355 | bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
356 |
357 | for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
358 | // these checks handle cases of
359 | // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
360 | // b) transposed arrays, where _strides[] elements go in non-descending order
361 | // c) flipped arrays, where some of _strides[] elements are negative
362 | if ((i == ndims - 1 && (size_t) _strides[i] != elemsize)
363 | || (i < ndims - 1 && _strides[i] < _strides[i + 1]))
364 | needcopy = true;
365 | }
366 |
367 | if (ismultichannel && _strides[1] != (npy_intp) elemsize * _sizes[2])
368 | needcopy = true;
369 |
370 | if (needcopy) {
371 |
372 | if (needcast) {
373 | object = PyArray_Cast(oarr, new_typenum);
374 | oarr = (PyArrayObject*) object;
375 | } else {
376 | oarr = PyArray_GETCONTIGUOUS(oarr);
377 | object = (PyObject*) oarr;
378 | }
379 |
380 | _strides = PyArray_STRIDES(oarr);
381 | }
382 |
383 | for (int i = 0; i < ndims; i++) {
384 | size[i] = (int) _sizes[i];
385 | step[i] = (size_t) _strides[i];
386 | }
387 |
388 | // handle degenerate case
389 | if (ndims == 0) {
390 | size[ndims] = 1;
391 | step[ndims] = elemsize;
392 | ndims++;
393 | }
394 |
395 | if (ismultichannel) {
396 | ndims--;
397 | type |= CV_MAKETYPE(0, size[2]);
398 | }
399 | if (!needcopy) {
400 | Py_INCREF(object);
401 | }
402 |
403 | cv::Mat* m = new (storage) cv::Mat(ndims, size, type, PyArray_DATA(oarr), step);
404 | m->u = g_numpyAllocator.allocate(object, ndims, size, type, step);
405 | m->allocator = &g_numpyAllocator;
406 | m->addref();
407 | data->convertible = storage;
408 | }
409 |
410 | } //end namespace pbcvt
411 | #endif
412 |
--------------------------------------------------------------------------------
/src/python_module.cpp:
--------------------------------------------------------------------------------
1 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
2 |
3 | #include
4 | #include
5 |
6 | namespace pbcvt {
7 |
8 | using namespace boost::python;
9 |
10 | /**
11 | * @brief Example function. Basic inner matrix product using explicit matrix conversion.
12 | * @param left left-hand matrix operand (NdArray required)
13 | * @param right right-hand matrix operand (NdArray required)
14 | * @return an NdArray representing the dot-product of the left and right operands
15 | */
16 | PyObject *dot(PyObject *left, PyObject *right) {
17 |
18 | cv::Mat leftMat, rightMat;
19 | leftMat = pbcvt::fromNDArrayToMat(left);
20 | rightMat = pbcvt::fromNDArrayToMat(right);
21 | auto c1 = leftMat.cols, r2 = rightMat.rows;
22 | // Check that the 2-D matrices can be legally multiplied.
23 | if (c1 != r2) {
24 | PyErr_SetString(PyExc_TypeError,
25 | "Incompatible sizes for matrix multiplication.");
26 | throw_error_already_set();
27 | }
28 | cv::Mat result = leftMat * rightMat;
29 | PyObject *ret = pbcvt::fromMatToNDArray(result);
30 | return ret;
31 | }
32 | /**
33 | * @brief Example function. Simply makes a new CV_16UC3 matrix and returns it as a numpy array.
34 | * @return The resulting numpy array.
35 | */
36 |
37 | PyObject* makeCV_16UC3Matrix(){
38 | cv::Mat image = cv::Mat::zeros(240,320, CV_16UC3);
39 | PyObject* py_image = pbcvt::fromMatToNDArray(image);
40 | return py_image;
41 | }
42 |
43 | //
44 | /**
45 | * @brief Example function. Basic inner matrix product using implicit matrix conversion.
46 | * @details This example uses Mat directly, but we won't need to worry about the conversion in the body of the function.
47 | * @param leftMat left-hand matrix operand
48 | * @param rightMat right-hand matrix operand
49 | * @return an NdArray representing the dot-product of the left and right operands
50 | */
51 | cv::Mat dot2(cv::Mat leftMat, cv::Mat rightMat) {
52 | auto c1 = leftMat.cols, r2 = rightMat.rows;
53 | if (c1 != r2) {
54 | PyErr_SetString(PyExc_TypeError,
55 | "Incompatible sizes for matrix multiplication.");
56 | throw_error_already_set();
57 | }
58 | cv::Mat result = leftMat * rightMat;
59 |
60 | return result;
61 | }
62 |
63 | /**
64 | * \brief Example function. Increments all elements of the given matrix by one.
65 | * @details This example uses Mat directly, but we won't need to worry about the conversion anywhere at all,
66 | * it is handled automatically by boost.
67 | * \param matrix (numpy array) to increment
68 | * \return
69 | */
70 | cv::Mat increment_elements_by_one(cv::Mat matrix){
71 | matrix += 1.0;
72 | return matrix;
73 | }
74 |
75 |
76 | #if (PY_VERSION_HEX >= 0x03000000)
77 | #ifndef NUMPY_IMPORT_ARRAY_RETVAL
78 | #define NUMPY_IMPORT_ARRAY_RETVAL NULL
79 | #endif
80 | static void* init_ar() {
81 | #else
82 | #ifndef NUMPY_IMPORT_ARRAY_RETVAL
83 | #define NUMPY_IMPORT_ARRAY_RETVAL
84 | #endif
85 | static void init_ar(){
86 | #endif
87 | Py_Initialize();
88 |
89 | import_array();
90 | return NUMPY_IMPORT_ARRAY_RETVAL;
91 | }
92 |
93 | BOOST_PYTHON_MODULE (pbcvt) {
94 | //using namespace XM;
95 | init_ar();
96 |
97 | //initialize converters
98 | to_python_converter();
99 | matFromNDArrayBoostConverter();
100 |
101 | //expose module-level functions
102 | def("dot", dot);
103 | def("dot2", dot2);
104 | def("makeCV_16UC3Matrix", makeCV_16UC3Matrix);
105 |
106 | //from PEP8 (https://www.python.org/dev/peps/pep-0008/?#prescriptive-naming-conventions)
107 | //"Function names should be lowercase, with words separated by underscores as necessary to improve readability."
108 | def("increment_elements_by_one", increment_elements_by_one);
109 | }
110 |
111 | } //end namespace pbcvt
112 |
--------------------------------------------------------------------------------
/tests/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | add_library(test_project SHARED test_project_source.cpp)
3 |
4 | target_link_libraries(test_project static_pbcvt)
5 | target_include_directories(test_project PRIVATE ${PROJECT_SOURCE_DIR}/include)
6 | get_target_property(TEST_OUT_PATH test_project LIBRARY_OUTPUT_NAME)
7 |
8 | set_target_properties(test_project PROPERTIES
9 | ARCHIVE_OUTPUT_NAME test_project_${ARCHIVE_OUTPUT_SUFFIX} # prevent name conflict for python2/3 outputs
10 | PREFIX ""
11 | OUTPUT_NAME test_project
12 | SUFFIX ${${PROJECT_NAME}_PY_SUFFIX})
13 |
14 | add_custom_command(TARGET test_project POST_BUILD
15 | COMMAND ${CMAKE_COMMAND} -E copy $ "${CMAKE_CURRENT_SOURCE_DIR}/../tests/$"
16 | )
--------------------------------------------------------------------------------
/tests/memory_test_video_capture.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | # ================================================================
3 | # Created by Gregory Kramida on 7/20/18.
4 | # Copyright (c) 2018 Gregory Kramida
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | # ================================================================
17 |
18 | import cv2
19 | import numpy as np
20 | import pbcvt
21 |
22 |
23 | # Used to test the code for memory leaks: requires an OpenCV-compatible webcam to be connected to the system
24 | # If there is a memory leak in the conversion, memory used by the program should grow to eventually overwhelm
25 | # the system; memory usage monitors may be used to check the behavior
26 | if __name__ == "__main__":
27 | cap = cv2.VideoCapture(0)
28 | can_capture = True
29 | while can_capture:
30 | can_capture, frame = cap.read()
31 | cv2.imshow('video', frame)
32 | ch = 0xFF & cv2.waitKey(1)
33 | frame_copy = frame.copy()
34 | if ch == 27:
35 | break
36 | pbcvt.increment_elements_by_one(frame)
37 | sum = (np.sum(frame - frame_copy) / (frame.size/3))
38 | #print(sum)
39 | print("exiting...")
40 |
--------------------------------------------------------------------------------
/tests/test_project_source.cpp:
--------------------------------------------------------------------------------
1 | // ================================================================
2 | // Created by Gregory Kramida on 6/11/19.
3 | // Copyright (c) 2019 Gregory Kramida
4 | // Licensed under the Apache License, Version 2.0 (the "License");
5 | // you may not use this file except in compliance with the License.
6 | // You may obtain a copy of the License at
7 |
8 | // http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | // Unless required by applicable law or agreed to in writing, software
11 | // distributed under the License is distributed on an "AS IS" BASIS,
12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | // See the License for the specific language governing permissions and
14 | // limitations under the License.
15 | // ================================================================
16 | #define PY_ARRAY_UNIQUE_SYMBOL pbcvt_ARRAY_API
17 |
18 | #include
19 | #include
20 |
21 | namespace test_namespace {
22 |
23 | namespace bp = boost::python;
24 |
25 | #if (PY_VERSION_HEX >= 0x03000000)
26 | static void *init_ar() {
27 | #else
28 | static void init_ar(){
29 | #endif
30 | Py_Initialize();
31 |
32 | import_array();
33 | return NUMPY_IMPORT_ARRAY_RETVAL;
34 | }
35 |
36 |
37 | /**
38 | * @brief Example function. Basic inner matrix product using explicit matrix conversion.
39 | * @param left left-hand matrix operand (NdArray required)
40 | * @param right right-hand matrix operand (NdArray required)
41 | * @return an NdArray representing the dot-product of the left and right operands
42 | */
43 | PyObject *dot(PyObject *left, PyObject *right) {
44 |
45 | cv::Mat leftMat, rightMat;
46 | leftMat = pbcvt::fromNDArrayToMat(left);
47 | rightMat = pbcvt::fromNDArrayToMat(right);
48 | auto c1 = leftMat.cols, r2 = rightMat.rows;
49 | // Check that the 2-D matrices can be legally multiplied.
50 | if (c1 != r2) {
51 | PyErr_SetString(PyExc_TypeError,
52 | "Incompatible sizes for matrix multiplication.");
53 | bp::throw_error_already_set();
54 | }
55 | cv::Mat result = leftMat * rightMat;
56 | PyObject *ret = pbcvt::fromMatToNDArray(result);
57 | return ret;
58 | }
59 | /**
60 | * @brief Example function. Simply makes a new CV_16UC3 matrix and returns it as a numpy array.
61 | * @return The resulting numpy array.
62 | */
63 |
64 | PyObject* makeCV_16UC3Matrix(){
65 | cv::Mat image = cv::Mat::zeros(240,320, CV_16UC3);
66 | PyObject* py_image = pbcvt::fromMatToNDArray(image);
67 | return py_image;
68 | }
69 |
70 | //
71 | /**
72 | * @brief Example function. Basic inner matrix product using implicit matrix conversion.
73 | * @details This example uses Mat directly, but we won't need to worry about the conversion in the body of the function.
74 | * @param leftMat left-hand matrix operand
75 | * @param rightMat right-hand matrix operand
76 | * @return an NdArray representing the dot-product of the left and right operands
77 | */
78 | cv::Mat dot2(cv::Mat leftMat, cv::Mat rightMat) {
79 | auto c1 = leftMat.cols, r2 = rightMat.rows;
80 | if (c1 != r2) {
81 | PyErr_SetString(PyExc_TypeError,
82 | "Incompatible sizes for matrix multiplication.");
83 | bp::throw_error_already_set();
84 | }
85 | cv::Mat result = leftMat * rightMat;
86 |
87 | return result;
88 | }
89 |
90 | /**
91 | * \brief Example function. Increments all elements of the given matrix by one.
92 | * @details This example uses Mat directly, but we won't need to worry about the conversion anywhere at all,
93 | * it is handled automatically by boost.
94 | * \param matrix (numpy array) to increment
95 | * \return
96 | */
97 | cv::Mat increment_elements_by_one(cv::Mat matrix){
98 | matrix += 1.0;
99 | return matrix;
100 | }
101 |
102 | //NOTE: the argument to the BOOST_PYTHON_MODULE has to correspond with the generated dynamic library file name.
103 | //TO see how to avoid the "lib" prefix and append a standard python extension file suffix, see how the
104 | //pbcvt library is handled in the CMakeLists.txt file in the root of the repository.
105 | BOOST_PYTHON_MODULE (test_project) {
106 | //using namespace XM;
107 | init_ar();
108 |
109 | //initialize converters
110 | bp::to_python_converter();
111 | pbcvt::matFromNDArrayBoostConverter();
112 |
113 | //expose module-level functions
114 | bp::def("dot", dot);
115 | bp::def("dot2", dot2);
116 | bp::def("makeCV_16UC3Matrix", makeCV_16UC3Matrix);
117 |
118 | //from PEP8 (https://www.python.org/dev/peps/pep-0008/?#prescriptive-naming-conventions)
119 | //"Function names should be lowercase, with words separated by underscores as necessary to improve readability."
120 | bp::def("increment_elements_by_one", increment_elements_by_one);
121 | }
122 |
123 | } // end namespace test_namespace
--------------------------------------------------------------------------------
/tests/tests.py:
--------------------------------------------------------------------------------
1 | # ================================================================
2 | # Created by Gregory Kramida on 6/11/19.
3 | # Copyright (c) 2019 Gregory Kramida
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | # ================================================================
16 |
17 | import numpy as np
18 | import pytest
19 | import test_project as tp
20 |
21 |
22 | def test_dot_vec():
23 | input_a = np.array([1.0, 3.4, 30.2]).reshape(-1, 1)
24 | input_b = np.array([33.0, 49.0, 1928.23]).reshape(1, 3)
25 | expected_output = input_a.dot(input_b)
26 | assert np.allclose(tp.dot(input_a, input_b), expected_output)
27 |
28 |
29 | def test_dot_mat():
30 | input_a = np.random.rand(3, 4)
31 | input_b = np.random.rand(4, 3)
32 | expected_output = input_a.dot(input_b)
33 | assert np.allclose(tp.dot(input_a, input_b), expected_output)
34 |
35 |
36 | def test_dot2_mat():
37 | input_a = np.random.rand(3, 4)
38 | input_b = np.random.rand(4, 3)
39 | expected_output = input_a.dot(input_b)
40 | assert np.allclose(tp.dot2(input_a, input_b), expected_output)
41 |
42 |
43 | def test_increment_elements_by_one_int():
44 | input = np.arange(-20, 20)
45 | expected_output = np.arange(-19, 21)
46 | assert np.allclose(tp.increment_elements_by_one(input), expected_output)
47 |
48 |
49 | def test_increment_elements_by_one_float():
50 | input = np.arange(0.0, 25.0, dtype=np.float32)
51 | expected_output = np.arange(1.0, 26.0, dtype=np.float32)
52 | assert np.allclose(tp.increment_elements_by_one(input), expected_output)
53 |
54 |
55 | def test_increment_elements_by_one_double():
56 | input = np.arange(0.0, 25.0, dtype=np.float64)
57 | expected_output = np.arange(1.0, 26.0, dtype=np.float64)
58 | assert np.allclose(tp.increment_elements_by_one(input), expected_output)
59 |
--------------------------------------------------------------------------------