├── .gitignore
├── LICENSE
├── README.md
├── Tensorflow.NET.OpencvAdapter.sln
├── Tensorflow.NET.OpencvAdapter
├── APIs
│ ├── OpencvAPIs.cs
│ ├── cv2.core.cs
│ ├── cv2.cs
│ ├── cv2.highgui.cs
│ ├── cv2.imgcodecs.cs
│ ├── cv2.imgproc.cs
│ ├── cv2.objdetect.cs
│ ├── cv2.photo.cs
│ └── cv2.video.cs
├── AdapterUtils.cs
├── CvNDArray.cs
├── Extensions
│ ├── ConversionExtensions.cs
│ ├── DTypeExtensions.cs
│ └── NDArrayExtensions.cs
├── FakeMat.cs
├── OpencvAdapterContext.cs
├── OpencvAdapterMode.cs
└── Tensorflow.OpencvAdapter.csproj
└── Tensorflow.OpencvAdapter.Unittest
├── Assets
├── img.npy
└── test1.JPEG
├── ImageCodecsTest.cs
├── MemoryTest.cs
├── Tensorflow.OpencvAdapter.Unittest.csproj
└── Usings.cs
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.suo
8 | *.user
9 | *.userosscache
10 | *.sln.docstates
11 |
12 | # User-specific files (MonoDevelop/Xamarin Studio)
13 | *.userprefs
14 |
15 | # Build results
16 | [Dd]ebug/
17 | [Dd]ebugPublic/
18 | [Rr]elease/
19 | [Rr]eleases/
20 | x64/
21 | x86/
22 | bld/
23 | [Bb]in/
24 | [Oo]bj/
25 | [Ll]og/
26 |
27 | # Visual Studio 2015/2017 cache/options directory
28 | .vs/
29 | # Uncomment if you have tasks that create the project's static files in wwwroot
30 | #wwwroot/
31 |
32 | # Visual Studio 2017 auto generated files
33 | Generated\ Files/
34 |
35 | # MSTest test Results
36 | [Tt]est[Rr]esult*/
37 | [Bb]uild[Ll]og.*
38 |
39 | # NUNIT
40 | *.VisualState.xml
41 | TestResult.xml
42 |
43 | # Build Results of an ATL Project
44 | [Dd]ebugPS/
45 | [Rr]eleasePS/
46 | dlldata.c
47 |
48 | # Benchmark Results
49 | BenchmarkDotNet.Artifacts/
50 |
51 | # .NET Core
52 | project.lock.json
53 | project.fragment.lock.json
54 | artifacts/
55 | **/Properties/launchSettings.json
56 |
57 | # StyleCop
58 | StyleCopReport.xml
59 |
60 | # Files built by Visual Studio
61 | *_i.c
62 | *_p.c
63 | *_i.h
64 | *.ilk
65 | *.obj
66 | *.iobj
67 | *.pch
68 | *.pdb
69 | *.ipdb
70 | *.pgc
71 | *.pgd
72 | *.rsp
73 | *.sbr
74 | *.tlb
75 | *.tli
76 | *.tlh
77 | *.tmp
78 | *.tmp_proj
79 | *.log
80 | *.vspscc
81 | *.vssscc
82 | .builds
83 | *.pidb
84 | *.svclog
85 | *.scc
86 |
87 | # Chutzpah Test files
88 | _Chutzpah*
89 |
90 | # Visual C++ cache files
91 | ipch/
92 | *.aps
93 | *.ncb
94 | *.opendb
95 | *.opensdf
96 | *.sdf
97 | *.cachefile
98 | *.VC.db
99 | *.VC.VC.opendb
100 |
101 | # Visual Studio profiler
102 | *.psess
103 | *.vsp
104 | *.vspx
105 | *.sap
106 |
107 | # Visual Studio Trace Files
108 | *.e2e
109 |
110 | # TFS 2012 Local Workspace
111 | $tf/
112 |
113 | # Guidance Automation Toolkit
114 | *.gpState
115 |
116 | # ReSharper is a .NET coding add-in
117 | _ReSharper*/
118 | *.[Rr]e[Ss]harper
119 | *.DotSettings.user
120 |
121 | # JustCode is a .NET coding add-in
122 | .JustCode
123 |
124 | # TeamCity is a build add-in
125 | _TeamCity*
126 |
127 | # DotCover is a Code Coverage Tool
128 | *.dotCover
129 |
130 | # AxoCover is a Code Coverage Tool
131 | .axoCover/*
132 | !.axoCover/settings.json
133 |
134 | # Visual Studio code coverage results
135 | *.coverage
136 | *.coveragexml
137 |
138 | # NCrunch
139 | _NCrunch_*
140 | .*crunch*.local.xml
141 | nCrunchTemp_*
142 |
143 | # MightyMoose
144 | *.mm.*
145 | AutoTest.Net/
146 |
147 | # Web workbench (sass)
148 | .sass-cache/
149 |
150 | # Installshield output folder
151 | [Ee]xpress/
152 |
153 | # DocProject is a documentation generator add-in
154 | DocProject/buildhelp/
155 | DocProject/Help/*.HxT
156 | DocProject/Help/*.HxC
157 | DocProject/Help/*.hhc
158 | DocProject/Help/*.hhk
159 | DocProject/Help/*.hhp
160 | DocProject/Help/Html2
161 | DocProject/Help/html
162 |
163 | # Click-Once directory
164 | publish/
165 |
166 | # Publish Web Output
167 | *.[Pp]ublish.xml
168 | *.azurePubxml
169 | # Note: Comment the next line if you want to checkin your web deploy settings,
170 | # but database connection strings (with potential passwords) will be unencrypted
171 | *.pubxml
172 | *.publishproj
173 |
174 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
175 | # checkin your Azure Web App publish settings, but sensitive information contained
176 | # in these scripts will be unencrypted
177 | PublishScripts/
178 |
179 | # NuGet Packages
180 | *.nupkg
181 | # The packages folder can be ignored because of Package Restore
182 | **/[Pp]ackages/*
183 | # except build/, which is used as an MSBuild target.
184 | !**/[Pp]ackages/build/
185 | # Uncomment if necessary however generally it will be regenerated when needed
186 | #!**/[Pp]ackages/repositories.config
187 | # NuGet v3's project.json files produces more ignorable files
188 | *.nuget.props
189 | *.nuget.targets
190 |
191 | # Microsoft Azure Build Output
192 | csx/
193 | *.build.csdef
194 |
195 | # Microsoft Azure Emulator
196 | ecf/
197 | rcf/
198 |
199 | # Windows Store app package directories and files
200 | AppPackages/
201 | BundleArtifacts/
202 | Package.StoreAssociation.xml
203 | _pkginfo.txt
204 | *.appx
205 |
206 | # Visual Studio cache files
207 | # files ending in .cache can be ignored
208 | *.[Cc]ache
209 | # but keep track of directories ending in .cache
210 | !*.[Cc]ache/
211 |
212 | # Others
213 | ClientBin/
214 | ~$*
215 | *~
216 | *.dbmdl
217 | *.dbproj.schemaview
218 | *.jfm
219 | *.pfx
220 | *.publishsettings
221 | orleans.codegen.cs
222 |
223 | # Including strong name files can present a security risk
224 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
225 | #*.snk
226 |
227 | # Since there are multiple workflows, uncomment next line to ignore bower_components
228 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
229 | #bower_components/
230 |
231 | # RIA/Silverlight projects
232 | Generated_Code/
233 |
234 | # Backup & report files from converting an old project file
235 | # to a newer Visual Studio version. Backup files are not needed,
236 | # because we have git ;-)
237 | _UpgradeReport_Files/
238 | Backup*/
239 | UpgradeLog*.XML
240 | UpgradeLog*.htm
241 | ServiceFabricBackup/
242 | *.rptproj.bak
243 |
244 | # SQL Server files
245 | *.mdf
246 | *.ldf
247 | *.ndf
248 |
249 | # Business Intelligence projects
250 | *.rdl.data
251 | *.bim.layout
252 | *.bim_*.settings
253 | *.rptproj.rsuser
254 |
255 | # Microsoft Fakes
256 | FakesAssemblies/
257 |
258 | # GhostDoc plugin setting file
259 | *.GhostDoc.xml
260 |
261 | # Node.js Tools for Visual Studio
262 | .ntvs_analysis.dat
263 | node_modules/
264 |
265 | # Visual Studio 6 build log
266 | *.plg
267 |
268 | # Visual Studio 6 workspace options file
269 | *.opt
270 |
271 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
272 | *.vbw
273 |
274 | # Visual Studio LightSwitch build output
275 | **/*.HTMLClient/GeneratedArtifacts
276 | **/*.DesktopClient/GeneratedArtifacts
277 | **/*.DesktopClient/ModelManifest.xml
278 | **/*.Server/GeneratedArtifacts
279 | **/*.Server/ModelManifest.xml
280 | _Pvt_Extensions
281 |
282 | # Paket dependency manager
283 | .paket/paket.exe
284 | paket-files/
285 |
286 | # FAKE - F# Make
287 | .fake/
288 |
289 | # JetBrains Rider
290 | .idea/
291 | *.sln.iml
292 |
293 | # CodeRush
294 | .cr/
295 |
296 | # Python Tools for Visual Studio (PTVS)
297 | __pycache__/
298 | *.pyc
299 |
300 | # Cake - Uncomment if you are using it
301 | # tools/**
302 | # !tools/packages.config
303 |
304 | # Tabs Studio
305 | *.tss
306 |
307 | # Telerik's JustMock configuration file
308 | *.jmconfig
309 |
310 | # BizTalk build output
311 | *.btp.cs
312 | *.btm.cs
313 | *.odx.cs
314 | *.xsd.cs
315 |
316 | # OpenCover UI analysis results
317 | OpenCover/
318 |
319 | # Azure Stream Analytics local run output
320 | ASALocalRun/
321 |
322 | # MSBuild Binary and Structured Log
323 | *.binlog
324 |
325 | # NVidia Nsight GPU debugger configuration file
326 | *.nvuser
327 |
328 | # MFractors (Xamarin productivity tool) working folder
329 | .mfractor/
330 | /docs/build
331 | src/TensorFlowNET.Native/bazel-*
332 | src/TensorFlowNET.Native/c_api.h
333 | /.vscode
334 | test/TensorFlowNET.Examples/mnist
335 |
336 |
337 | # training model resources
338 | .resources
339 | /redist
340 | *.xml
341 | *.xsd
342 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TensorFlow.OpencvAdapter
2 | An efficient library which enables using tensorflow.net with opencvsharp. It reuses the memory of Mat to provide a good performance.
3 |
4 | ## Introduction
5 |
6 | [Tensorflow.NET](https://github.com/SciSharp/TensorFlow.NET) is the dotnet binding of tensorflow, which is a deep-learning framework. [OpencvSharp](https://github.com/shimat/opencvsharp) is a good framework to provide opencv APIs in .NET.
7 |
8 | Tensorflow.NET uses `NDArray/Tensor` as data structure, while OpencvSharp uses `Mat` as data structure. This once became a gap between Tensorflow.NET and OpencvSharp, causing some inconvenience for CV works with Tensorflow.NET.
9 |
10 | The aim of Tensorflow.OpencvAdapter is to make the two libraries compatible, and provide an extra set API with Tensorflow.NET style. With Tensorflow.OpencvAdapter, a `Mat` can be converted to a NDArray without memory copying and vice versa.
11 |
12 | ## Usages
13 |
14 | There are currently two ways to use Tensorflow.OpencvAdapter to combine Tensorflow.NET and OpencvSharp:
15 |
16 | 1. Do all the opencv manipulations with `Mat` and finnaly convert them to NDArrays (without copying):
17 |
18 | ```cs
19 | Mat m = ...
20 | NDArray array1 = m.ToNDArray(copy: false); // C# style API
21 | NDArray array2 = m.numpy(); // python style API
22 | Mat n1 = array1.AsMat(); // Convert back to Mat without copying
23 | Mat n2 = array1.ToMat(copy: true); // Convert back to Mat with copying
24 | ```
25 |
26 | 2. Use the cv2 APIs provided in `Tensorflow.OpencvAdapter`, which are in Tensorflow.NET style (python style). In this way the abstraction of `Mat` can be hided to some degrees.
27 |
28 | ```cs
29 | using static Tensorflow.OpencvAPIs;
30 |
31 | NDArray img = cv2.imread("xxx.jpg");
32 | img = cv2.resize(img, new Size(2, 3));
33 | ```
34 |
35 | ## Installation
36 |
37 | Currently the Tensorflow.OpencvAdapter has not been published. It will be published along with `Tensorflow.NET v1.0.0`.
38 |
39 | ## API progress rate
40 |
41 | - [x] Conversion between `Mat` and `NDArray`
42 | - [x] cv2.core APIs
43 | - [x] cv2.imgproc APIs
44 | - [x] cv2.photo APIs
45 | - [x] cv2.video APIs
46 | - [x] cv2.video APIs
47 | - [x] cv2.imgcodecs APIs
48 | - [x] cv2.objdetect APIs
49 | - [x] cv2.highgui APIs
50 | - [ ] cv2.features2d APIs
51 | - [ ] cv2.calib3d APIs
52 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.5.33424.131
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.OpencvAdapter", "Tensorflow.NET.OpencvAdapter\Tensorflow.OpencvAdapter.csproj", "{674EEB91-C749-4E60-A634-A1CF2F9EBA57}"
7 | EndProject
8 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "OpenCvSharp", "..\opencvsharp\src\OpenCvSharp\OpenCvSharp.csproj", "{CF348AB5-8DAD-4813-8BC1-3CB964A51603}"
9 | EndProject
10 | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "..\TensorFlow.NET\src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{C01ACE68-325B-4503-B0B1-4597EAEB1506}"
11 | EndProject
12 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.OpencvAdapter.Unittest", "Tensorflow.OpencvAdapter.Unittest\Tensorflow.OpencvAdapter.Unittest.csproj", "{D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}"
13 | EndProject
14 | Global
15 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
16 | Debug|Any CPU = Debug|Any CPU
17 | Debug|x64 = Debug|x64
18 | GPU|Any CPU = GPU|Any CPU
19 | GPU|x64 = GPU|x64
20 | Release|Any CPU = Release|Any CPU
21 | Release|x64 = Release|x64
22 | Release-JP|Any CPU = Release-JP|Any CPU
23 | Release-JP|x64 = Release-JP|x64
24 | EndGlobalSection
25 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
26 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
27 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Debug|Any CPU.Build.0 = Debug|Any CPU
28 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Debug|x64.ActiveCfg = Debug|Any CPU
29 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Debug|x64.Build.0 = Debug|Any CPU
30 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
31 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.GPU|Any CPU.Build.0 = Debug|Any CPU
32 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.GPU|x64.ActiveCfg = Debug|Any CPU
33 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.GPU|x64.Build.0 = Debug|Any CPU
34 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release|Any CPU.ActiveCfg = Release|Any CPU
35 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release|Any CPU.Build.0 = Release|Any CPU
36 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release|x64.ActiveCfg = Release|Any CPU
37 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release|x64.Build.0 = Release|Any CPU
38 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release-JP|Any CPU.ActiveCfg = Release|Any CPU
39 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release-JP|Any CPU.Build.0 = Release|Any CPU
40 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release-JP|x64.ActiveCfg = Release|Any CPU
41 | {674EEB91-C749-4E60-A634-A1CF2F9EBA57}.Release-JP|x64.Build.0 = Release|Any CPU
42 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
43 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Debug|Any CPU.Build.0 = Debug|Any CPU
44 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Debug|x64.ActiveCfg = Debug|Any CPU
45 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Debug|x64.Build.0 = Debug|Any CPU
46 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
47 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.GPU|Any CPU.Build.0 = Debug|Any CPU
48 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.GPU|x64.ActiveCfg = Debug|Any CPU
49 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.GPU|x64.Build.0 = Debug|Any CPU
50 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release|Any CPU.ActiveCfg = Release|Any CPU
51 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release|Any CPU.Build.0 = Release|Any CPU
52 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release|x64.ActiveCfg = Release|Any CPU
53 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release|x64.Build.0 = Release|Any CPU
54 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release-JP|Any CPU.ActiveCfg = Release-JP|Any CPU
55 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release-JP|Any CPU.Build.0 = Release-JP|Any CPU
56 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release-JP|x64.ActiveCfg = Release-JP|Any CPU
57 | {CF348AB5-8DAD-4813-8BC1-3CB964A51603}.Release-JP|x64.Build.0 = Release-JP|Any CPU
58 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
59 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Debug|Any CPU.Build.0 = Debug|Any CPU
60 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Debug|x64.ActiveCfg = Debug|x64
61 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Debug|x64.Build.0 = Debug|x64
62 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.GPU|Any CPU.ActiveCfg = GPU|Any CPU
63 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.GPU|Any CPU.Build.0 = GPU|Any CPU
64 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.GPU|x64.ActiveCfg = GPU|x64
65 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.GPU|x64.Build.0 = GPU|x64
66 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release|Any CPU.ActiveCfg = Release|Any CPU
67 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release|Any CPU.Build.0 = Release|Any CPU
68 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release|x64.ActiveCfg = Release|x64
69 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release|x64.Build.0 = Release|x64
70 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release-JP|Any CPU.ActiveCfg = Release|Any CPU
71 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release-JP|Any CPU.Build.0 = Release|Any CPU
72 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release-JP|x64.ActiveCfg = Release|x64
73 | {C01ACE68-325B-4503-B0B1-4597EAEB1506}.Release-JP|x64.Build.0 = Release|x64
74 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
75 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Debug|Any CPU.Build.0 = Debug|Any CPU
76 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Debug|x64.ActiveCfg = Debug|Any CPU
77 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Debug|x64.Build.0 = Debug|Any CPU
78 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
79 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.GPU|Any CPU.Build.0 = Debug|Any CPU
80 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.GPU|x64.ActiveCfg = Debug|Any CPU
81 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.GPU|x64.Build.0 = Debug|Any CPU
82 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release|Any CPU.ActiveCfg = Release|Any CPU
83 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release|Any CPU.Build.0 = Release|Any CPU
84 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release|x64.ActiveCfg = Release|Any CPU
85 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release|x64.Build.0 = Release|Any CPU
86 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release-JP|Any CPU.ActiveCfg = Release|Any CPU
87 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release-JP|Any CPU.Build.0 = Release|Any CPU
88 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release-JP|x64.ActiveCfg = Release|Any CPU
89 | {D73867B1-4622-4ADF-8AF2-B6D74AC71F3A}.Release-JP|x64.Build.0 = Release|Any CPU
90 | EndGlobalSection
91 | GlobalSection(SolutionProperties) = preSolution
92 | HideSolutionNode = FALSE
93 | EndGlobalSection
94 | GlobalSection(ExtensibilityGlobals) = postSolution
95 | SolutionGuid = {C21C7C17-04F2-4FFD-81EA-BB765A198D69}
96 | EndGlobalSection
97 | EndGlobal
98 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/OpencvAPIs.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 | using Tensorflow.OpencvAdapter.APIs;
5 |
6 | namespace Tensorflow
7 | {
8 | public class OpencvAPIs
9 | {
10 | public static Cv2API cv2 { get; } = new Cv2API();
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.core.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Security.Cryptography;
6 | using System.Text;
7 | using System.Threading.Tasks;
8 | using Tensorflow.Checkpoint;
9 | using Tensorflow.NumPy;
10 | using Tensorflow.OpencvAdapter.Extensions;
11 |
12 | namespace Tensorflow.OpencvAdapter.APIs
13 | {
14 | public partial class Cv2API
15 | {
16 | ///
17 | /// Computes the source location of an extrapolated pixel.
18 | ///
19 | /// 0-based coordinate of the extrapolated pixel along one of the axes, likely <0 or >= len
20 | /// Length of the array along the corresponding axis.
21 | /// Border type, one of the #BorderTypes, except for #BORDER_TRANSPARENT and BORDER_ISOLATED.
22 | /// When borderType==BORDER_CONSTANT, the function always returns -1, regardless
23 | ///
24 | public int borderInterpolate(int p, int len, BorderTypes borderType)
25 | {
26 | return Cv2.BorderInterpolate(p, len, borderType);
27 | }
28 |
29 | ///
30 | /// Forms a border around the image
31 | ///
32 | /// The source image
33 | /// Specify how much pixels in each direction from the source image rectangle one needs to extrapolate
34 | /// Specify how much pixels in each direction from the source image rectangle one needs to extrapolate
35 | /// Specify how much pixels in each direction from the source image rectangle one needs to extrapolate
36 | /// Specify how much pixels in each direction from the source image rectangle one needs to extrapolate
37 | /// The border type
38 | /// The border value if borderType == Constant
39 | public NDArray copyMakeBorder(NDArray src, int top, int bottom, int left, int right,
40 | BorderTypes borderType, Scalar? value = null)
41 | {
42 | Mat dst = new Mat();
43 | Cv2.CopyMakeBorder(src.AsMat(), dst, top, bottom, left, right, borderType, value);
44 | return new CvNDArray(dst);
45 | }
46 |
47 | ///
48 | /// Computes the per-element sum of two arrays or an array and a scalar.
49 | ///
50 | /// The first source array
51 | /// The second source array. It must have the same size and same type as src1
52 | /// The destination array; it will have the same size and same type as src1
53 | /// The optional operation mask, 8-bit single channel array; specifies elements of the destination array to be changed. [By default this is null]
54 | ///
55 | public NDArray add(NDArray src1, NDArray src2, NDArray? dst = null, NDArray? mask = null,
56 | TF_DataType dtype = TF_DataType.DtInvalid)
57 | {
58 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
59 | var src1Mat = src1.AsMat();
60 | var src2Mat = src2.AsMat();
61 | Cv2.Add(src1Mat, src2Mat, dstMat, mask.ToInputArray(),
62 | dtype.ToMatTypeNumber(Math.Max(src1Mat.Channels(), src2Mat.Channels())));
63 | return dst ?? new CvNDArray(dstMat);
64 | }
65 |
66 | ///
67 | /// Calculates per-element difference between two arrays or array and a scalar
68 | ///
69 | /// The first source array
70 | /// The second source array. It must have the same size and same type as src1
71 | /// The destination array; it will have the same size and same type as src1
72 | /// The optional operation mask, 8-bit single channel array; specifies elements of the destination array to be changed. [By default this is null]
73 | ///
74 | public NDArray subtract(NDArray src1, NDArray src2, NDArray? dst = null, NDArray? mask = null,
75 | TF_DataType dtype = TF_DataType.DtInvalid)
76 | {
77 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
78 | var src1Mat = src1.AsMat();
79 | var src2Mat = src2.AsMat();
80 | Cv2.Subtract(src1Mat, src2Mat, dstMat, mask.ToInputArray(),
81 | dtype.ToMatTypeNumber(Math.Max(src1Mat.Channels(), src2Mat.Channels())));
82 | return dst ?? new CvNDArray(dstMat);
83 | }
84 |
85 | ///
86 | /// Calculates per-element difference between two arrays or array and a scalar
87 | ///
88 | /// The first source array
89 | /// The second source array. It must have the same size and same type as src1
90 | /// The destination array; it will have the same size and same type as src1
91 | /// The optional operation mask, 8-bit single channel array; specifies elements of the destination array to be changed. [By default this is null]
92 | ///
93 | public NDArray subtract(NDArray src1, Scalar src2, NDArray? dst = null, NDArray? mask = null,
94 | TF_DataType dtype = TF_DataType.DtInvalid)
95 | {
96 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
97 | var src1Mat = src1.AsMat();
98 | Cv2.Subtract(src1Mat, src2, dstMat, mask.ToInputArray(), dtype.ToMatTypeNumber(src1Mat.Channels()));
99 | return dst ?? new CvNDArray(dstMat);
100 | }
101 |
102 | ///
103 | /// Calculates per-element difference between two arrays or array and a scalar
104 | ///
105 | /// The first source array
106 | /// The second source array. It must have the same size and same type as src1
107 | /// The destination array; it will have the same size and same type as src1
108 | /// The optional operation mask, 8-bit single channel array; specifies elements of the destination array to be changed. [By default this is null]
109 | ///
110 | public NDArray subtract(Scalar src1, NDArray src2, NDArray? dst = null, NDArray? mask = null,
111 | TF_DataType dtype = TF_DataType.DtInvalid)
112 | {
113 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
114 | var src2Mat = src2.AsMat();
115 | Cv2.Subtract(src1, src2Mat, dstMat, mask.ToInputArray(), dtype.ToMatTypeNumber(src2Mat.Channels()));
116 | return dst ?? new CvNDArray(dstMat);
117 | }
118 |
119 | ///
120 | /// Calculates the per-element scaled product of two arrays
121 | ///
122 | /// The first source array
123 | /// The second source array of the same size and the same type as src1
124 | /// The destination array; will have the same size and the same type as src1
125 | /// The optional scale factor. [By default this is 1]
126 | ///
127 | public NDArray multiply(NDArray src1, NDArray src2, NDArray? dst = null, double scale = 1,
128 | TF_DataType dtype = TF_DataType.DtInvalid)
129 | {
130 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
131 | var src1Mat = src1.AsMat();
132 | var src2Mat = src2.AsMat();
133 | Cv2.Multiply(src1Mat, src2Mat, dstMat, scale,
134 | dtype.ToMatTypeNumber(Math.Max(src1Mat.Channels(), src2Mat.Channels())));
135 | return dst ?? new CvNDArray(dstMat);
136 | }
137 |
138 | ///
139 | /// Performs per-element division of two arrays or a scalar by an array.
140 | ///
141 | /// The first source array
142 | /// The second source array; should have the same size and same type as src1
143 | /// The destination array; will have the same size and same type as src2
144 | /// Scale factor [By default this is 1]
145 | ///
146 | public NDArray divide(NDArray src1, NDArray src2, NDArray? dst = null, double scale = 1,
147 | TF_DataType dtype = TF_DataType.DtInvalid)
148 | {
149 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
150 | var src1Mat = src1.AsMat();
151 | var src2Mat = src2.AsMat();
152 | Cv2.Divide(src1Mat, src2Mat, dstMat, scale,
153 | dtype.ToMatTypeNumber(Math.Max(src1Mat.Channels(), src2Mat.Channels())));
154 | return dst ?? new CvNDArray(dstMat);
155 | }
156 |
157 | ///
158 | /// Performs per-element division of two arrays or a scalar by an array.
159 | ///
160 | /// Scale factor
161 | /// The first source array
162 | /// The destination array; will have the same size and same type as src2
163 | ///
164 | public NDArray divide(double scale, NDArray src2, NDArray? dst = null,
165 | TF_DataType dtype = TF_DataType.DtInvalid)
166 | {
167 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
168 | var src2Mat = src2.AsMat();
169 | Cv2.Divide(scale, src2Mat, dstMat, dtype.ToMatTypeNumber(src2Mat.Channels()));
170 | return dst ?? new CvNDArray(dstMat);
171 | }
172 |
173 | ///
174 | /// adds scaled array to another one (dst = alpha*src1 + src2)
175 | ///
176 | ///
177 | ///
178 | ///
179 | ///
180 | public NDArray scaleAdd(NDArray src1, double alpha, NDArray src2, NDArray? dst = null)
181 | {
182 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
183 | var src1Mat = src1.AsMat();
184 | var src2Mat = src2.AsMat();
185 | Cv2.ScaleAdd(src1Mat, alpha, src2Mat, dstMat);
186 | return dst ?? new CvNDArray(dstMat);
187 | }
188 |
189 | ///
190 | /// computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
191 | ///
192 | ///
193 | ///
194 | ///
195 | ///
196 | ///
197 | ///
198 | ///
199 | public NDArray addWeighted(NDArray src1, double alpha, NDArray src2, double beta,
200 | double gamma, NDArray? dst = null, TF_DataType dtype = TF_DataType.DtInvalid)
201 | {
202 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
203 | var src1Mat = src1.AsMat();
204 | var src2Mat = src2.AsMat();
205 | Cv2.AddWeighted(src1Mat, alpha, src2Mat, beta, gamma, dstMat,
206 | dtype.ToMatTypeNumber(Math.Max(src1Mat.Channels(), src2Mat.Channels())));
207 | return dst ?? new CvNDArray(dstMat);
208 | }
209 |
210 | ///
211 | /// Scales, computes absolute values and converts the result to 8-bit.
212 | ///
213 | /// The source array
214 | /// The destination array
215 | /// The optional scale factor. [By default this is 1]
216 | /// The optional delta added to the scaled values. [By default this is 0]
217 | public NDArray convertScaleAbs(NDArray src, NDArray? dst = null, double alpha = 1,
218 | double beta = 0)
219 | {
220 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
221 | var srcMat = src.AsMat();
222 | Cv2.ConvertScaleAbs(srcMat, dstMat, alpha, beta);
223 | return dst ?? new CvNDArray(dstMat);
224 | }
225 |
226 | ///
227 | /// Converts an array to half precision floating number.
228 | ///
229 | /// This function converts FP32(single precision floating point) from/to FP16(half precision floating point). CV_16S format is used to represent FP16 data.
230 | /// There are two use modes(src -> dst) : CV_32F -> CV_16S and CV_16S -> CV_32F.The input array has to have type of CV_32F or
231 | /// CV_16S to represent the bit depth.If the input array is neither of them, the function will raise an error.
232 | /// The format of half precision floating point is defined in IEEE 754-2008.
233 | ///
234 | /// input array.
235 | /// output array.
236 | public NDArray convertFp16(NDArray src, NDArray? dst = null)
237 | {
238 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
239 | var srcMat = src.AsMat();
240 | Cv2.ConvertFp16(srcMat, dstMat);
241 | return dst ?? new CvNDArray(dstMat);
242 | }
243 |
244 | ///
245 | /// transforms array of numbers using a lookup table: dst(i)=lut(src(i))
246 | ///
247 | /// Source array of 8-bit elements
248 | /// Look-up table of 256 elements.
249 | /// In the case of multi-channel source array, the table should either have
250 | /// a single channel (in this case the same table is used for all channels)
251 | /// or the same number of channels as in the source array
252 | /// Destination array;
253 | /// will have the same size and the same number of channels as src,
254 | /// and the same depth as lut
255 | public NDArray LUT(NDArray src, NDArray lut, NDArray? dst = null)
256 | {
257 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
258 | var srcMat = src.AsMat();
259 | var lutMat = lut.AsMat();
260 | Cv2.LUT(srcMat, lutMat, dstMat);
261 | return dst ?? new CvNDArray(dstMat);
262 | }
263 |
264 | ///
265 | /// transforms array of numbers using a lookup table: dst(i)=lut(src(i))
266 | ///
267 | /// Source array of 8-bit elements
268 | /// Look-up table of 256 elements.
269 | /// In the case of multi-channel source array, the table should either have
270 | /// a single channel (in this case the same table is used for all channels)
271 | /// or the same number of channels as in the source array
272 | /// Destination array;
273 | /// will have the same size and the same number of channels as src,
274 | /// and the same depth as lut
275 | public NDArray LUT(NDArray src, byte[] lut, NDArray? dst = null)
276 | {
277 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
278 | var srcMat = src.AsMat();
279 | Cv2.LUT(srcMat, lut, dstMat);
280 | return dst ?? new CvNDArray(dstMat);
281 | }
282 |
283 | ///
284 | /// computes sum of array elements
285 | ///
286 | /// The source array; must have 1 to 4 channels
287 | ///
288 | public Scalar sum(NDArray src)
289 | {
290 | return Cv2.Sum(src.AsMat());
291 | }
292 |
293 | ///
294 | /// computes the number of nonzero array elements
295 | ///
296 | /// Single-channel array
297 | /// number of non-zero elements in mtx
298 | public int countNonZero(NDArray mtx)
299 | {
300 | return Cv2.CountNonZero(mtx.AsMat());
301 | }
302 |
303 | ///
304 | /// returns the list of locations of non-zero pixels
305 | ///
306 | ///
307 | ///
308 | public NDArray findNonZero(NDArray src, NDArray? idx = null)
309 | {
310 | Mat dstMat = idx is null ? new Mat() : idx.AsMat();
311 | var srcMat = src.AsMat();
312 | Cv2.FindNonZero(srcMat, dstMat);
313 | return idx ?? new CvNDArray(dstMat);
314 | }
315 |
316 | ///
317 | /// computes mean value of selected array elements
318 | ///
319 | /// The source array; it should have 1 to 4 channels
320 | /// (so that the result can be stored in Scalar)
321 | /// The optional operation mask
322 | ///
323 | public Scalar mean(NDArray src, NDArray? mask = null)
324 | {
325 | var srcMat = src.AsMat();
326 | return Cv2.Mean(srcMat, mask.ToInputArray());
327 | }
328 |
329 | ///
330 | /// computes mean value and standard deviation of all or selected array elements
331 | ///
332 | /// The source array; it should have 1 to 4 channels
333 | /// (so that the results can be stored in Scalar's)
334 | /// The output parameter: computed mean value
335 | /// The output parameter: computed standard deviation
336 | /// The optional operation mask
337 | public (NDArray, NDArray) meanStdDev(NDArray src, NDArray? mean = null,
338 | NDArray? stddev = null, NDArray? mask = null)
339 | {
340 | var srcMat = src.AsMat();
341 | var meanMat = mean?.AsMat();
342 | var stddevMat = stddev?.AsMat();
343 | Cv2.MeanStdDev(srcMat, meanMat, stddevMat, mask.ToInputArray());
344 | var meanRes = mean ?? new CvNDArray(meanMat);
345 | var stddevRes = stddev ?? new CvNDArray(stddevMat);
346 | return (meanRes, stddevRes);
347 | }
348 |
349 | ///
350 | /// Calculates absolute array norm, absolute difference norm, or relative difference norm.
351 | ///
352 | /// The first source array
353 | /// Type of the norm
354 | /// The optional operation mask
355 | public double norm(NDArray src1, NormTypes normType = NormTypes.L2, NDArray? mask = null)
356 | {
357 | var src1Mat = src1.AsMat();
358 | return Cv2.Norm(src1Mat, normType, mask.ToInputArray());
359 | }
360 |
361 | ///
362 | /// computes norm of selected part of the difference between two arrays
363 | ///
364 | /// The first source array
365 | /// The second source array of the same size and the same type as src1
366 | /// Type of the norm
367 | /// The optional operation mask
368 | public double norm(NDArray src1, NDArray src2, NormTypes normType = NormTypes.L2,
369 | NDArray? mask = null)
370 | {
371 | var src1Mat = src1.AsMat();
372 | var src2Mat = src2.AsMat();
373 | return Cv2.Norm(src1Mat, src2Mat, normType, mask.ToInputArray());
374 | }
375 |
376 | ///
377 | /// Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.
378 | ///
379 | /// This function calculates the Peak Signal-to-Noise Ratio(PSNR) image quality metric in decibels(dB),
380 | /// between two input arrays src1 and src2.The arrays must have the same type.
381 | ///
382 | /// first input array.
383 | /// second input array of the same size as src1.
384 | /// the maximum pixel value (255 by default)
385 | ///
386 | public double PSNR(NDArray src1, NDArray src2, double r = 255.0)
387 | {
388 | var src1Mat = src1.AsMat();
389 | var src2Mat = src2.AsMat();
390 | return Cv2.PSNR(src1Mat, src2Mat);
391 | }
392 |
393 | ///
394 | /// naive nearest neighbor finder
395 | ///
396 | ///
397 | ///
398 | ///
399 | ///
400 | ///
401 | ///
402 | ///
403 | ///
404 | /// dist and nidx
405 | public (NDArray, NDArray) batchDistance(NDArray src1, NDArray src2,
406 | // ReSharper disable once IdentifierTypo
407 | int dtype, NormTypes normType = NormTypes.L2,
408 | int k = 0, NDArray? mask = null,
409 | int update = 0, bool crosscheck = false)
410 | {
411 | var src1Mat = src1.AsMat();
412 | var src2Mat = src2.AsMat();
413 | Mat dist = new();
414 | Mat nidx = new();
415 | Cv2.BatchDistance(src1Mat, src2Mat, dist, dtype, nidx, normType,
416 | k, mask.ToInputArray(), update, crosscheck);
417 | return (dist.numpy(), nidx.numpy());
418 | }
419 |
420 | ///
421 | /// scales and shifts array elements so that either the specified norm (alpha)
422 | /// or the minimum (alpha) and maximum (beta) array values get the specified values
423 | ///
424 | /// The source array
425 | /// The destination array; will have the same size as src
426 | /// The norm value to normalize to or the lower range boundary
427 | /// in the case of range normalization
428 | /// The upper range boundary in the case of range normalization;
429 | /// not used for norm normalization
430 | /// The normalization type
431 | /// When the parameter is negative,
432 | /// the destination array will have the same type as src,
433 | /// otherwise it will have the same number of channels as src and the depth =CV_MAT_DEPTH(rtype)
434 | /// The optional operation mask
435 | /// dst
436 | public NDArray normalize(NDArray src, NDArray? dst = null, double alpha = 1, double beta = 0,
437 | NormTypes normType = NormTypes.L2, int dtype = -1, InputArray? mask = null)
438 | {
439 | if (!dst.CanConvertToMatWithouyCopy())
440 | {
441 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
442 | "normalize needs that. Please consider change the adapter mode.");
443 | }
444 | var srcMat = src.AsMat();
445 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
446 | Cv2.Normalize(srcMat, dstMat, alpha, beta, normType, dtype, mask);
447 | if(dst is null)
448 | {
449 | dst = dstMat.numpy();
450 | }
451 | return dst;
452 | }
453 |
454 | ///
455 | /// Finds indices of max elements along provided axis
456 | ///
457 | /// Input single-channel array
458 | /// Axis to reduce along
459 | /// Whether to get the index of first or last occurrence of max
460 | /// Output array of type CV_32SC1 with the same dimensionality as src,
461 | /// except for axis being reduced - it should be set to 1.
462 | public NDArray reduceArgMax(NDArray src, int axis, bool lastIndex = false)
463 | {
464 | var srcMat = src.AsMat();
465 | Mat dstMat = new Mat();
466 | Cv2.ReduceArgMax(srcMat, dstMat, axis, lastIndex);
467 | return dstMat.numpy();
468 | }
469 |
470 | ///
471 | /// Finds indices of min elements along provided axis
472 | ///
473 | /// Input single-channel array
474 | /// Axis to reduce along
475 | /// Whether to get the index of first or last occurrence of max
476 | /// Output array of type CV_32SC1 with the same dimensionality as src,
477 | /// except for axis being reduced - it should be set to 1.
478 | public NDArray reduceArgMin(NDArray src, int axis, bool lastIndex = false)
479 | {
480 | var srcMat = src.AsMat();
481 | Mat dstMat = new Mat();
482 | Cv2.ReduceArgMin(srcMat, dstMat, axis, lastIndex);
483 | return dstMat.numpy();
484 | }
485 |
486 | ///
487 | /// finds global minimum and maximum array elements and returns their values and their locations
488 | ///
489 | /// The source single-channel array
490 | /// Pointer to returned minimum value and maximum value.
491 | public (double, double) minMaxLoc(NDArray src)
492 | {
493 | Cv2.MinMaxLoc(src.AsMat(), out double minVal, out double maxVal);
494 | return (minVal, maxVal);
495 | }
496 |
497 | ///
498 | /// finds global minimum and maximum array elements and returns their values and their locations
499 | ///
500 | /// The source single-channel array
501 | /// The optional mask used to select a sub-array
502 | /// Pointer to returned minimum value, maximum value, minimum location, maximum location.
503 | public (double, double, Point, Point) minMaxLoc(NDArray src, NDArray? mask = null)
504 | {
505 | Cv2.MinMaxLoc(src.AsMat(), out double minVal, out double maxVal, out Point minLoc, out Point maxLoc, mask.ToInputArray());
506 | return (minVal, maxVal, minLoc, maxLoc);
507 | }
508 |
509 | ///
510 | /// finds global minimum and maximum array elements and returns their values and their locations
511 | ///
512 | /// The source single-channel array
513 | /// Pointer to returned minimum value and maximum value.
514 | public (double, double) minMaxIdx(NDArray src)
515 | {
516 | Cv2.MinMaxIdx(src.AsMat(), out double minVal, out double maxVal);
517 | return (minVal, maxVal);
518 | }
519 |
520 | ///
521 | /// finds global minimum and maximum array elements and returns their values and their locations
522 | ///
523 | /// The source single-channel array
524 | ///
525 | /// Pointer to returned minimum value, maximum value, minimum idx, maximum idx.
526 | public (double, double, int[], int[]) minMaxIdx(NDArray src, NDArray? mask = null)
527 | {
528 | int[] minIdx = new int[src.AsMat().Dims];
529 | int[] maxIdx = new int[src.AsMat().Dims];
530 | Cv2.MinMaxIdx(src.AsMat(), out double minVal, out double maxVal,minIdx, maxIdx, mask.ToInputArray());
531 | return (minVal, maxVal, minIdx, maxIdx);
532 | }
533 |
534 | ///
535 | /// transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows
536 | ///
537 | /// The source 2D matrix
538 | /// The dimension index along which the matrix is reduced.
539 | /// 0 means that the matrix is reduced to a single row and 1 means that the matrix is reduced to a single column
540 | ///
541 | /// When it is negative, the destination vector will have
542 | /// the same type as the source matrix, otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), mtx.channels())
543 | /// The destination vector.
544 | public NDArray reduce(NDArray src, ReduceDimension dim, ReduceTypes rtype, TF_DataType dtype)
545 | {
546 | var srcMat = src.AsMat();
547 | Mat dstMat = new();
548 | Cv2.Reduce(srcMat, dstMat, dim, rtype, dtype.ToMatTypeNumber(srcMat.Channels()));
549 | return dstMat.numpy();
550 | }
551 |
552 | ///
553 | /// makes multi-channel array out of several single-channel arrays
554 | ///
555 | ///
556 | ///
557 | public NDArray merge(IEnumerable mv)
558 | {
559 | Mat dstMat = new();
560 | Cv2.Merge(mv.Select(x => x.AsMat()).ToArray(), dstMat);
561 | return dstMat.numpy();
562 | }
563 |
564 | ///
565 | /// Copies each plane of a multi-channel array to a dedicated array
566 | ///
567 | /// The source multi-channel array
568 | /// The number of arrays must match mtx.channels() .
569 | /// The arrays themselves will be reallocated if needed
570 | public NDArray[] split(NDArray src)
571 | {
572 | return Cv2.Split(src.AsMat()).Select(x => x.numpy()).ToArray();
573 | }
574 |
575 | ///
576 | /// extracts a single channel from src (coi is 0-based index)
577 | ///
578 | ///
579 | ///
580 | ///
581 | public NDArray extractChannel(NDArray src, int coi)
582 | {
583 | Mat dstMat = new();
584 | Cv2.ExtractChannel(src.AsMat(), dstMat, coi);
585 | return dstMat.numpy();
586 | }
587 |
588 | ///
589 | /// inserts a single channel to dst (coi is 0-based index)
590 | ///
591 | ///
592 | ///
593 | ///
594 | public NDArray insertChannel(NDArray src, int coi)
595 | {
596 | Mat dstMat = new();
597 | Cv2.InsertChannel(src.AsMat(), dstMat, coi);
598 | return dstMat.numpy();
599 | }
600 |
601 | ///
602 | /// reverses the order of the rows, columns or both in a matrix
603 | ///
604 | /// The source array
605 | /// Specifies how to flip the array:
606 | /// 0 means flipping around the x-axis, positive (e.g., 1) means flipping around y-axis,
607 | /// and negative (e.g., -1) means flipping around both axes. See also the discussion below for the formulas.
608 | /// The destination array; will have the same size and same type as src
609 | public NDArray flip(NDArray src, FlipMode flipCode)
610 | {
611 | Mat dstMat = new();
612 | Cv2.Flip(src.AsMat(), dstMat, flipCode);
613 | return dstMat.numpy();
614 | }
615 |
616 | ///
617 | /// Rotates a 2D array in multiples of 90 degrees.
618 | ///
619 | /// input array.
620 | /// an enum to specify how to rotate the array.
621 | /// output array of the same type as src.
622 | /// The size is the same with ROTATE_180, and the rows and cols are switched for
623 | /// ROTATE_90_CLOCKWISE and ROTATE_90_COUNTERCLOCKWISE.
624 | public NDArray rotate(NDArray src, RotateFlags rotateCode)
625 | {
626 | Mat dstMat = new();
627 | Cv2.Rotate(src.AsMat(), dstMat, rotateCode);
628 | return dstMat.numpy();
629 | }
630 |
631 | ///
632 | /// replicates the input matrix the specified number of times in the horizontal and/or vertical direction
633 | ///
634 | /// The source array to replicate
635 | /// How many times the src is repeated along the vertical axis
636 | /// How many times the src is repeated along the horizontal axis
637 | /// The destination array; will have the same type as src
638 | public NDArray repeat(NDArray src, int ny, int nx)
639 | {
640 | Mat dstMat = new();
641 | Cv2.Repeat(src.AsMat(), ny, nx);
642 | return dstMat.numpy();
643 | }
644 |
645 | ///
646 | /// Applies horizontal concatenation to given matrices.
647 | ///
648 | /// input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.
649 | /// output array. It has the same number of rows and depth as the src, and the sum of cols of the src.
650 | public NDArray hconcat(IEnumerable src)
651 | {
652 | Mat dstMat = new();
653 | Cv2.HConcat(src.Select(x => x.AsMat()), dstMat);
654 | return dstMat.numpy();
655 | }
656 |
657 | ///
658 | /// Applies horizontal concatenation to given matrices.
659 | ///
660 | /// first input array to be considered for horizontal concatenation.
661 | /// second input array to be considered for horizontal concatenation.
662 | /// output array. It has the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
663 | public NDArray hconcat(NDArray src1, NDArray src2)
664 | {
665 | Mat dstMat = new();
666 | Cv2.HConcat(src1.AsMat(), src2.AsMat(), dstMat);
667 | return dstMat.numpy();
668 | }
669 |
670 | ///
671 | /// Applies vertical concatenation to given matrices.
672 | ///
673 | /// input array or vector of matrices. all of the matrices must have the same number of cols and the same depth.
674 | /// output array. It has the same number of cols and depth as the src, and the sum of rows of the src.
675 | public NDArray vconcat(IEnumerable src)
676 | {
677 | Mat dstMat = new();
678 | Cv2.VConcat(src.Select(x => x.AsMat()), dstMat);
679 | return dstMat.numpy();
680 | }
681 |
682 | ///
683 | /// Applies vertical concatenation to given matrices.
684 | ///
685 | /// first input array to be considered for vertical concatenation.
686 | /// second input array to be considered for vertical concatenation.
687 | /// output array. It has the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
688 | public NDArray vconcat(NDArray src1, NDArray src2)
689 | {
690 | Mat dstMat = new();
691 | Cv2.VConcat(src1.AsMat(), src2.AsMat(), dstMat);
692 | return dstMat.numpy();
693 | }
694 |
695 | ///
696 | /// computes bitwise conjunction of the two arrays (dst = src1 & src2)
697 | ///
698 | /// first input array or a scalar.
699 | /// second input array or a scalar.
700 | /// optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
701 | /// output array that has the same size and type as the input
702 | public NDArray bitwise_and(NDArray src1, NDArray src2, NDArray? mask = null)
703 | {
704 | Mat dstMat = new();
705 | Cv2.BitwiseAnd(src1.AsMat(), src2.AsMat(), dstMat, mask.ToInputArray());
706 | return dstMat.numpy();
707 | }
708 |
709 | ///
710 | /// computes bitwise conjunction of the two arrays (dst = src1 | src2)
711 | ///
712 | /// first input array or a scalar.
713 | /// second input array or a scalar.
714 | /// optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
715 | /// output array that has the same size and type as the input
716 | public NDArray bitwise_or(NDArray src1, NDArray src2, NDArray? mask = null)
717 | {
718 | Mat dstMat = new();
719 | Cv2.BitwiseOr(src1.AsMat(), src2.AsMat(), dstMat, mask.ToInputArray());
720 | return dstMat.numpy();
721 | }
722 |
723 | ///
724 | /// computes bitwise conjunction of the two arrays (dst = src1 ^ src2)
725 | ///
726 | /// first input array or a scalar.
727 | /// second input array or a scalar.
728 | /// optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
729 | /// output array that has the same size and type as the input
730 | public NDArray bitwise_xor(NDArray src1, NDArray src2, NDArray? mask = null)
731 | {
732 | Mat dstMat = new();
733 | Cv2.BitwiseXor(src1.AsMat(), src2.AsMat(), dstMat, mask.ToInputArray());
734 | return dstMat.numpy();
735 | }
736 |
737 | ///
738 | /// inverts each bit of array (dst = ~src)
739 | ///
740 | /// input array.
741 | /// optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
742 | /// output array that has the same size and type as the input
743 | public NDArray bitwise_not(NDArray src, NDArray? mask = null)
744 | {
745 | Mat dstMat = new();
746 | Cv2.BitwiseNot(src.AsMat(), dstMat, mask.ToInputArray());
747 | return dstMat.numpy();
748 | }
749 |
750 | ///
751 | /// Calculates the per-element absolute difference between two arrays or between an array and a scalar.
752 | ///
753 | /// first input array or a scalar.
754 | /// second input array or a scalar.
755 | /// output array that has the same size and type as input arrays.
756 | public NDArray absdiff(NDArray src1, NDArray src2)
757 | {
758 | Mat dstMat = new();
759 | Cv2.Absdiff(src1.AsMat(), src2.AsMat(), dstMat);
760 | return dstMat.numpy();
761 | }
762 |
763 | ///
764 | /// Copies the matrix to another one.
765 | /// When the operation mask is specified, if the Mat::create call shown above reallocates the matrix, the newly allocated matrix is initialized with all zeros before copying the data.
766 | ///
767 | /// Source matrix.
768 | /// Destination matrix. If it does not have a proper size or type before the operation, it is reallocated.
769 | /// Operation mask of the same size as \*this. Its non-zero elements indicate which matrix
770 | /// elements need to be copied.The mask has to be of type CV_8U and can have 1 or multiple channels.
771 | /// dst
772 | public NDArray copyTo(NDArray src, NDArray? mask = null, NDArray? dst = null)
773 | {
774 | Mat dstMat = dst is null ? new Mat() : dst.AsMat();
775 | Cv2.CopyTo(src.AsMat(), dstMat, mask.ToInputArray());
776 | if(dst is null)
777 | {
778 | dst = dstMat.numpy();
779 | }
780 | return dst;
781 | }
782 |
783 | ///
784 | /// Checks if array elements lie between the elements of two other arrays.
785 | ///
786 | /// first input array.
787 | /// inclusive lower boundary array or a scalar.
788 | /// inclusive upper boundary array or a scalar.
789 | /// output array of the same size as src and CV_8U type.
790 | public NDArray inRange(NDArray src, NDArray lowerb, NDArray upperb)
791 | {
792 | Mat dstMat = new();
793 | Cv2.InRange(src.AsMat(), lowerb.AsMat(), upperb.AsMat(), dstMat);
794 | return dstMat.numpy();
795 | }
796 |
797 | ///
798 | /// Checks if array elements lie between the elements of two other arrays.
799 | ///
800 | /// first input array.
801 | /// inclusive lower boundary array or a scalar.
802 | /// inclusive upper boundary array or a scalar.
803 | /// output array of the same size as src and CV_8U type.
804 | public NDArray inRange(NDArray src, Scalar lowerb, Scalar upperb)
805 | {
806 | Mat dstMat = new();
807 | Cv2.InRange(src.AsMat(), lowerb, upperb, dstMat);
808 | return dstMat.numpy();
809 | }
810 |
811 | ///
812 | /// Performs the per-element comparison of two arrays or an array and scalar value.
813 | ///
814 | /// first input array or a scalar; when it is an array, it must have a single channel.
815 | /// second input array or a scalar; when it is an array, it must have a single channel.
816 | /// a flag, that specifies correspondence between the arrays (cv::CmpTypes)
817 | /// output array of type ref CV_8U that has the same size and the same number of channels as the input arrays.
818 | public NDArray compare(NDArray src1, NDArray src2, CmpType cmpop)
819 | {
820 | Mat dstMat = new();
821 | Cv2.Compare(src1.AsMat(), src2.AsMat(), dstMat, cmpop);
822 | return dstMat.numpy();
823 | }
824 |
825 | public NDArray min(NDArray src1, NDArray src2)
826 | {
827 | Mat dstMat = new();
828 | Cv2.Min(src1.AsMat(), src2.AsMat(), dstMat);
829 | return dstMat.numpy();
830 | }
831 |
832 | public NDArray min(NDArray src1, double src2)
833 | {
834 | Mat dstMat = new();
835 | Cv2.Min(src1.AsMat(), src2, dstMat);
836 | return dstMat.numpy();
837 | }
838 |
839 | public NDArray max(NDArray src1, NDArray src2)
840 | {
841 | Mat dstMat = new();
842 | Cv2.Max(src1.AsMat(), src2.AsMat(), dstMat);
843 | return dstMat.numpy();
844 | }
845 |
846 | public NDArray max(NDArray src1, double src2)
847 | {
848 | Mat dstMat = new();
849 | Cv2.Max(src1.AsMat(), src2, dstMat);
850 | return dstMat.numpy();
851 | }
852 |
853 | public NDArray sqrt(NDArray src)
854 | {
855 | Mat dstMat = new();
856 | Cv2.Sqrt(src.AsMat(), dstMat);
857 | return dstMat.numpy();
858 | }
859 |
860 | public NDArray pow(NDArray src, double power)
861 | {
862 | Mat dstMat = new();
863 | Cv2.Pow(src.AsMat(), power, dstMat);
864 | return dstMat.numpy();
865 | }
866 |
867 | public NDArray exp(NDArray src)
868 | {
869 | Mat dstMat = new();
870 | Cv2.Exp(src.AsMat(), dstMat);
871 | return dstMat.numpy();
872 | }
873 |
874 | public NDArray log(NDArray src)
875 | {
876 | Mat dstMat = new();
877 | Cv2.Log(src.AsMat(), dstMat);
878 | return dstMat.numpy();
879 | }
880 |
881 | ///
882 | /// Calculates x and y coordinates of 2D vectors from their magnitude and angle.
883 | ///
884 | /// input floating-point array of magnitudes of 2D vectors;
885 | /// it can be an empty matrix(=Mat()), in this case, the function assumes that all the magnitudes are = 1; if it is not empty,
886 | /// it must have the same size and type as angle.
887 | /// input floating-point array of angles of 2D vectors.
888 | /// when true, the input angles are measured in degrees, otherwise, they are measured in radians.
889 | /// output arrays of x-coordinates and y-coordinates of 2D vectors
890 | public (NDArray, NDArray) polarToCart(NDArray magnitude, NDArray angle, bool angleInDegrees = false)
891 | {
892 | Mat xMat = new();
893 | Mat yMat = new();
894 | Cv2.PolarToCart(magnitude.AsMat(), angle.AsMat(), xMat, yMat, angleInDegrees);
895 | return (xMat.numpy(), yMat.numpy());
896 | }
897 |
898 | ///
899 | /// Calculates the magnitude and angle of 2D vectors.
900 | ///
901 | /// array of x-coordinates; this must be a single-precision or double-precision floating-point array.
902 | /// array of y-coordinates, that must have the same size and same type as x.
903 | /// the angles are measured in radians(from 0 to 2\*Pi) or in degrees(0 to 360 degrees).
904 | /// a flag, indicating whether the angles are measured in radians(which is by default), or in degrees.
905 | /// output arrays of magnitudes and angles of the same size and type as x.
906 | public (NDArray, NDArray) cartToPolar(NDArray x, NDArray y, bool angleInDegrees = false)
907 | {
908 | Mat magnitudeMat = new();
909 | Mat angleMat = new();
910 | Cv2.CartToPolar(x.AsMat(), y.AsMat(), magnitudeMat, angleMat, angleInDegrees);
911 | return (magnitudeMat.numpy(), angleMat.numpy());
912 | }
913 |
914 | ///
915 | /// Calculates the rotation angle of 2D vectors.
916 | ///
917 | /// input floating-point array of x-coordinates of 2D vectors.
918 | /// input array of y-coordinates of 2D vectors; it must have the same size and the same type as x.
919 | /// when true, the function calculates the angle in degrees, otherwise, they are measured in radians.
920 | /// output array of vector angles; it has the same size and same type as x.
921 | public NDArray phase(NDArray x, NDArray y, bool angleInDegrees = false)
922 | {
923 | Mat angleMat = new();
924 | Cv2.Phase(x.AsMat(), y.AsMat(), angleMat, angleInDegrees);
925 | return angleMat.numpy();
926 | }
927 |
928 | ///
929 | /// Calculates the magnitude of 2D vectors.
930 | ///
931 | /// floating-point array of x-coordinates of the vectors.
932 | /// floating-point array of y-coordinates of the vectors; it must have the same size as x.
933 | /// output array of the same size and type as x.
934 | public NDArray magnitude(NDArray x, NDArray y)
935 | {
936 | Mat magnitudeMat = new();
937 | Cv2.Magnitude(x.AsMat(), y.AsMat(), magnitudeMat);
938 | return magnitudeMat.numpy();
939 | }
940 |
941 | ///
942 | /// checks that each matrix element is within the specified range.
943 | ///
944 | /// The array to check
945 | /// The flag indicating whether the functions quietly
946 | /// return false when the array elements are out of range,
947 | /// or they throw an exception.
948 | ///
949 | public bool checkRange(NDArray src, bool quiet = true)
950 | {
951 | return Cv2.CheckRange(src.AsMat(), quiet);
952 | }
953 |
954 | ///
955 | /// checks that each matrix element is within the specified range.
956 | ///
957 | /// The array to check
958 | /// The flag indicating whether the functions quietly
959 | /// return false when the array elements are out of range,
960 | /// or they throw an exception.
961 | /// The inclusive lower boundary of valid values range
962 | /// The exclusive upper boundary of valid values range
963 | /// The optional output parameter, where the position of
964 | /// the first outlier is stored.
965 | ///
966 | public (bool, Point) checkRange(NDArray src, bool quiet,
967 | double minVal = double.MinValue, double maxVal = double.MaxValue)
968 | {
969 | bool retVal = Cv2.CheckRange(src.AsMat(), quiet, out var pos, minVal, maxVal);
970 | return (retVal, pos);
971 | }
972 |
973 | ///
974 | /// converts NaN's to the given number
975 | ///
976 | ///
977 | ///
978 | ///
979 | public NDArray patchNaNs(NDArray a, double val = 0)
980 | {
981 | if (!a.CanConvertToMatWithouyCopy())
982 | {
983 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
984 | "patchNaNs needs that. Please consider change the adapter mode.");
985 | }
986 | Cv2.PatchNaNs(a.AsMat(), val);
987 | return a;
988 | }
989 |
990 | ///
991 | /// implements generalized matrix product algorithm GEMM from BLAS
992 | ///
993 | ///
994 | ///
995 | ///
996 | ///
997 | ///
998 | ///
999 | ///
1000 | public NDArray gemm(NDArray src1, NDArray src2, double alpha, NDArray src3,
1001 | double gamma, GemmFlags flags = GemmFlags.None)
1002 | {
1003 | Mat dstMat = new();
1004 | Cv2.Gemm(src1.AsMat(), src2.AsMat(), alpha, src3.AsMat(), gamma, dstMat, flags);
1005 | return dstMat.numpy();
1006 | }
1007 |
1008 | ///
1009 | /// multiplies matrix by its transposition from the left or from the right
1010 | ///
1011 | /// The source matrix
1012 | /// Specifies the multiplication ordering; see the description below
1013 | /// The optional delta matrix, subtracted from src before the
1014 | /// multiplication. When the matrix is empty ( delta=Mat() ), it’s assumed to be
1015 | /// zero, i.e. nothing is subtracted, otherwise if it has the same size as src,
1016 | /// then it’s simply subtracted, otherwise it is "repeated" to cover the full src
1017 | /// and then subtracted. Type of the delta matrix, when it's not empty, must be the
1018 | /// same as the type of created destination matrix, see the rtype description
1019 | /// The optional scale factor for the matrix product
1020 | /// When it’s negative, the destination matrix will have the
1021 | /// same type as src . Otherwise, it will have type=CV_MAT_DEPTH(rtype),
1022 | /// which should be either CV_32F or CV_64F
1023 | /// The destination square matrix
1024 | public NDArray mulTransposed(NDArray src, bool aTa, NDArray? delta = null, double scale = 1,
1025 | TF_DataType dtype = TF_DataType.DtInvalid)
1026 | {
1027 | Mat dstMat = new();
1028 | var srcMat = src.AsMat();
1029 | Cv2.MulTransposed(srcMat, dstMat, aTa, delta.ToInputArray(), scale, dtype.ToMatTypeNumber(srcMat.Channels()));
1030 | return dstMat.numpy();
1031 | }
1032 |
1033 | ///
1034 | /// transposes the matrix
1035 | ///
1036 | /// The source array
1037 | /// The destination array of the same type as src
1038 | public NDArray transpose(NDArray src)
1039 | {
1040 | Mat dstMat = new();
1041 | Cv2.Transpose(src.AsMat(), dstMat);
1042 | return dstMat.numpy();
1043 | }
1044 |
1045 | ///
1046 | /// performs affine transformation of each element of multi-channel input matrix
1047 | ///
1048 | /// The source array; must have as many channels (1 to 4) as mtx.cols or mtx.cols-1
1049 | /// The transformation matrix
1050 | /// The destination array; will have the same size and depth as src and as many channels as mtx.rows
1051 | public NDArray transform(NDArray src, NDArray m)
1052 | {
1053 | Mat dstMat = new();
1054 | Cv2.Transform(src.AsMat(), dstMat, m.AsMat());
1055 | return dstMat.numpy();
1056 | }
1057 |
1058 | ///
1059 | /// performs perspective transformation of each element of multi-channel input matrix
1060 | ///
1061 | /// The source two-channel or three-channel floating-point array;
1062 | /// each element is 2D/3D vector to be transformed
1063 | /// 3x3 or 4x4 transformation matrix
1064 | /// The destination array; it will have the same size and same type as src
1065 | public NDArray perspectiveTransform(NDArray src, NDArray m)
1066 | {
1067 | Mat dstMat = new();
1068 | Cv2.PerspectiveTransform(src.AsMat(), dstMat, m.AsMat());
1069 | return dstMat.numpy();
1070 | }
1071 |
1072 | ///
1073 | /// performs perspective transformation of each element of multi-channel input matrix
1074 | ///
1075 | /// The source two-channel or three-channel floating-point array;
1076 | /// each element is 2D/3D vector to be transformed
1077 | /// 3x3 or 4x4 transformation matrix
1078 | /// The destination array; it will have the same size and same type as src
1079 | public Point2f[] perspectiveTransform(IEnumerable src, NDArray m)
1080 | {
1081 | return Cv2.PerspectiveTransform(src, m.AsMat());
1082 | }
1083 |
1084 | ///
1085 | /// performs perspective transformation of each element of multi-channel input matrix
1086 | ///
1087 | /// The source two-channel or three-channel floating-point array;
1088 | /// each element is 2D/3D vector to be transformed
1089 | /// 3x3 or 4x4 transformation matrix
1090 | /// The destination array; it will have the same size and same type as src
1091 | public Point2d[] perspectiveTransform(IEnumerable src, NDArray m)
1092 | {
1093 | return Cv2.PerspectiveTransform(src, m.AsMat());
1094 | }
1095 |
1096 | ///
1097 | /// performs perspective transformation of each element of multi-channel input matrix
1098 | ///
1099 | /// The source two-channel or three-channel floating-point array;
1100 | /// each element is 2D/3D vector to be transformed
1101 | /// 3x3 or 4x4 transformation matrix
1102 | /// The destination array; it will have the same size and same type as src
1103 | public Point3f[] perspectiveTransform(IEnumerable src, NDArray m)
1104 | {
1105 | return Cv2.PerspectiveTransform(src, m.AsMat());
1106 | }
1107 |
1108 | ///
1109 | /// performs perspective transformation of each element of multi-channel input matrix
1110 | ///
1111 | /// The source two-channel or three-channel floating-point array;
1112 | /// each element is 2D/3D vector to be transformed
1113 | /// 3x3 or 4x4 transformation matrix
1114 | /// The destination array; it will have the same size and same type as src
1115 | public Point3d[] perspectiveTransform(IEnumerable src, NDArray m)
1116 | {
1117 | return Cv2.PerspectiveTransform(src, m.AsMat());
1118 | }
1119 |
1120 | ///
1121 | /// extends the symmetrical matrix from the lower half or from the upper half
1122 | ///
1123 | /// Input-output floating-point square matrix
1124 | /// If true, the lower half is copied to the upper half,
1125 | /// otherwise the upper half is copied to the lower half
1126 | /// mtx
1127 | public NDArray completeSymm(NDArray mtx, bool lowerToUpper = false)
1128 | {
1129 | if (!mtx.CanConvertToMatWithouyCopy())
1130 | {
1131 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1132 | "completeSymm needs that. Please consider change the adapter mode.");
1133 | }
1134 | Cv2.CompleteSymm(mtx.AsMat(), lowerToUpper);
1135 | return mtx;
1136 | }
1137 |
1138 | ///
1139 | /// initializes scaled identity matrix
1140 | ///
1141 | /// The matrix to initialize (not necessarily square)
1142 | /// The value to assign to the diagonal elements
1143 | /// mtx
1144 | public NDArray setIdentity(NDArray mtx, Scalar? s = null)
1145 | {
1146 | if (!mtx.CanConvertToMatWithouyCopy())
1147 | {
1148 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1149 | "setIdentity needs that. Please consider change the adapter mode.");
1150 | }
1151 | Cv2.SetIdentity(mtx.AsMat(), s);
1152 | return mtx;
1153 | }
1154 |
1155 | ///
1156 | /// computes determinant of a square matrix
1157 | ///
1158 | /// The input matrix; must have CV_32FC1 or CV_64FC1 type and square size
1159 | /// determinant of the specified matrix.
1160 | public double determinant(NDArray mtx)
1161 | {
1162 | return Cv2.Determinant(mtx.AsMat());
1163 | }
1164 |
1165 | ///
1166 | /// computes trace of a matrix
1167 | ///
1168 | /// The source matrix
1169 | ///
1170 | public Scalar trace(NDArray mtx)
1171 | {
1172 | return Cv2.Trace(mtx.AsMat());
1173 | }
1174 |
1175 | ///
1176 | /// computes inverse or pseudo-inverse matrix
1177 | ///
1178 | /// The source floating-point MxN matrix
1179 | /// The destination matrix; will have NxM size and the same type as src
1180 | /// The inversion method
1181 | ///
1182 | public (double, NDArray) invert(NDArray src, DecompTypes flags = DecompTypes.LU)
1183 | {
1184 | Mat dstMat = new();
1185 | double retVal = Cv2.Invert(src.AsMat(), dstMat, flags);
1186 | return (retVal, dstMat.numpy());
1187 | }
1188 |
1189 | ///
1190 | /// solves linear system or a least-square problem
1191 | ///
1192 | ///
1193 | ///
1194 | ///
1195 | /// retVal and dst
1196 | public (bool, NDArray) solve(NDArray src1, NDArray src2, DecompTypes flags = DecompTypes.LU)
1197 | {
1198 | Mat dstMat = new();
1199 | bool retVal = Cv2.Solve(src1.AsMat(), src2.AsMat(), dstMat, flags);
1200 | return (retVal, dstMat.numpy());
1201 | }
1202 |
1203 | ///
1204 | /// Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method).
1205 | ///
1206 | /// This row-vector corresponds to \f$c\f$ in the LP problem formulation (see above).
1207 | /// It should contain 32- or 64-bit floating point numbers.As a convenience, column-vector may be also submitted,
1208 | /// in the latter case it is understood to correspond to \f$c^T\f$.
1209 | /// `m`-by-`n+1` matrix, whose rightmost column corresponds to \f$b\f$ in formulation above
1210 | /// and the remaining to \f$A\f$. It should containt 32- or 64-bit floating point numbers.
1211 | /// solve result and the solution will be returned here as a column-vector - it corresponds to \f$c\f$ in the
1212 | /// formulation above.It will contain 64-bit floating point numbers.
1213 | public (SolveLPResult, NDArray) solveLP(NDArray func, NDArray constr)
1214 | {
1215 | Mat dstMat = new();
1216 | var retVal = Cv2.SolveLP(func.AsMat(), constr.AsMat(), dstMat);
1217 | return (retVal, dstMat.numpy());
1218 | }
1219 |
1220 | public NDArray sort(NDArray src, SortFlags flags)
1221 | {
1222 | Mat dstMat = new();
1223 | Cv2.Sort(src.AsMat(), dstMat, flags);
1224 | return dstMat.numpy();
1225 | }
1226 |
1227 | public NDArray sortIdx(NDArray src, SortFlags flags)
1228 | {
1229 | Mat dstMat = new();
1230 | Cv2.SortIdx(src.AsMat(), dstMat, flags);
1231 | return dstMat.numpy();
1232 | }
1233 |
1234 | ///
1235 | /// finds real roots of a cubic polynomial
1236 | ///
1237 | /// The equation coefficients, an array of 3 or 4 elements
1238 | /// solve result and the destination array of real roots which will have 1 or 3 elements
1239 | public (int, NDArray) solveCubic(NDArray coeffs)
1240 | {
1241 | Mat dstMat = new();
1242 | int retVal = Cv2.SolveCubic(coeffs.AsMat(), dstMat);
1243 | return (retVal, dstMat.numpy());
1244 | }
1245 |
1246 | public NDArray solvePoly(NDArray coeffs, int maxIters = 300)
1247 | {
1248 | Mat dstMat = new();
1249 | Cv2.SolvePoly(coeffs.AsMat(), dstMat, maxIters);
1250 | return dstMat.numpy();
1251 | }
1252 |
1253 | ///
1254 | /// Computes eigenvalues and eigenvectors of a symmetric matrix.
1255 | ///
1256 | /// The input matrix; must have CV_32FC1 or CV_64FC1 type,
1257 | /// square size and be symmetric: src^T == src
1258 | /// The output vector of eigenvalues of the same type as src;
1259 | /// The eigenvalues are stored in the descending order.
1260 | /// The output matrix of eigenvectors;
1261 | /// It will have the same size and the same type as src; The eigenvectors are stored
1262 | /// as subsequent matrix rows, in the same order as the corresponding eigenvalues
1263 | /// solve result; the output vector of eigenvalues of the same type as src,
1264 | /// which are stored in the descending order; the output matrix of eigenvectors;
1265 | /// It will have the same size and the same type as src, which are stored
1266 | /// as subsequent matrix rows, in the same order as the corresponding eigenvalues
1267 | public (bool, NDArray, NDArray) eigen(NDArray src)
1268 | {
1269 | Mat valuesMat = new();
1270 | Mat vectorsMat = new();
1271 | bool retVal = Cv2.Eigen(src.AsMat(), valuesMat, vectorsMat);
1272 | return (retVal, valuesMat.numpy(), vectorsMat.numpy());
1273 | }
1274 |
1275 | public (NDArray, NDArray) eigenNonSymmetric(NDArray src)
1276 | {
1277 | Mat valuesMat = new();
1278 | Mat vectorsMat = new();
1279 | Cv2.EigenNonSymmetric(src.AsMat(), valuesMat, vectorsMat);
1280 | return (valuesMat.numpy(), vectorsMat.numpy());
1281 | }
1282 |
1283 | public (NDArray, NDArray, NDArray) PCACompute(NDArray data, NDArray mean, int maxComponents = 0)
1284 | {
1285 | if (!mean.CanConvertToMatWithouyCopy())
1286 | {
1287 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1288 | "PCACompute needs that. Please consider change the adapter mode.");
1289 | }
1290 | Mat vectorsMat = new();
1291 | Mat valuesMat = new();
1292 | Cv2.PCACompute(data.AsMat(), mean.AsMat(), vectorsMat, valuesMat, maxComponents);
1293 | return (mean, vectorsMat.numpy(), valuesMat.numpy());
1294 | }
1295 |
1296 | public (NDArray, NDArray, NDArray) PCAComputeVar(NDArray data, NDArray mean, double retainedVariance)
1297 | {
1298 | if (!mean.CanConvertToMatWithouyCopy())
1299 | {
1300 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1301 | "PCAComputeVar needs that. Please consider change the adapter mode.");
1302 | }
1303 | Mat vectorsMat = new();
1304 | Mat valuesMat = new();
1305 | Cv2.PCAComputeVar(data.AsMat(), mean.AsMat(), vectorsMat, valuesMat, retainedVariance);
1306 | return (mean, vectorsMat.numpy(), valuesMat.numpy());
1307 | }
1308 |
1309 | public NDArray PCAProject(NDArray data, NDArray mean, NDArray eigenvectors)
1310 | {
1311 | Mat dstMat = new();
1312 | Cv2.PCAProject(data.AsMat(), mean.AsMat(), eigenvectors.AsMat(), dstMat);
1313 | return dstMat.numpy();
1314 | }
1315 |
1316 | public NDArray PCABackProject(NDArray data, NDArray mean, NDArray eigenvectors)
1317 | {
1318 | Mat dstMat = new();
1319 | Cv2.PCABackProject(data.AsMat(), mean.AsMat(), eigenvectors.AsMat(), dstMat);
1320 | return dstMat.numpy();
1321 | }
1322 |
1323 | public (NDArray, NDArray, NDArray) SVDecomp(NDArray src, SVD.Flags flags = SVD.Flags.None)
1324 | {
1325 | Mat wMat = new();
1326 | Mat uMat = new();
1327 | Mat vtMat = new();
1328 | Cv2.SVDecomp(src.AsMat(), wMat, uMat, vtMat, flags);
1329 | return (wMat.numpy(), uMat.numpy(), vtMat.numpy());
1330 | }
1331 |
1332 | public NDArray SVBackSubst(NDArray w, NDArray u, NDArray vt, NDArray rhs)
1333 | {
1334 | Mat dstMat = new();
1335 | Cv2.SVBackSubst(w.AsMat(), u.AsMat(), vt.AsMat(), rhs.AsMat(), dstMat);
1336 | return dstMat.numpy();
1337 | }
1338 |
1339 | public double Mahalanobis(NDArray v1, NDArray v2, NDArray icover)
1340 | {
1341 | return Cv2.Mahalanobis(v1.AsMat(), v2.AsMat(), icover.AsMat());
1342 | }
1343 |
1344 | public NDArray dft(NDArray src, DftFlags flags = DftFlags.None, int nonzeroRows = 0)
1345 | {
1346 | Mat dstMat = new();
1347 | Cv2.Dft(src.AsMat(), dstMat, flags, nonzeroRows);
1348 | return dstMat.numpy();
1349 | }
1350 |
1351 | public NDArray idft(NDArray src, DftFlags flags = DftFlags.None, int nonzeroRows = 0)
1352 | {
1353 | Mat dstMat = new();
1354 | Cv2.Idft(src.AsMat(), dstMat, flags, nonzeroRows);
1355 | return dstMat.numpy();
1356 | }
1357 |
1358 | public NDArray dct(NDArray src, DctFlags flags = DctFlags.None)
1359 | {
1360 | Mat dstMat = new();
1361 | Cv2.Dct(src.AsMat(), dstMat, flags);
1362 | return dstMat.numpy();
1363 | }
1364 |
1365 | public NDArray idct(NDArray src, DctFlags flags = DctFlags.None)
1366 | {
1367 | Mat dstMat = new();
1368 | Cv2.Idct(src.AsMat(), dstMat, flags);
1369 | return dstMat.numpy();
1370 | }
1371 |
1372 | public NDArray mulSpectrums(NDArray a, NDArray b, DftFlags flags, bool conjB = false)
1373 | {
1374 | Mat dstMat = new();
1375 | Cv2.MulSpectrums(a.AsMat(), b.AsMat(), dstMat, flags, conjB);
1376 | return dstMat.numpy();
1377 | }
1378 |
1379 | public int getOptimalDFTSize(int vecSize)
1380 | {
1381 | return Cv2.GetOptimalDFTSize(vecSize);
1382 | }
1383 |
1384 | public NDArray randu(NDArray dst, NDArray low, NDArray high)
1385 | {
1386 | if (!dst.CanConvertToMatWithouyCopy())
1387 | {
1388 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1389 | "randu needs that. Please consider change the adapter mode.");
1390 | }
1391 | Cv2.Randu(dst.AsMat(), low.AsMat(), high.AsMat());
1392 | return dst;
1393 | }
1394 |
1395 | public NDArray randu(NDArray dst, Scalar low, Scalar high)
1396 | {
1397 | if (!dst.CanConvertToMatWithouyCopy())
1398 | {
1399 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1400 | "randu needs that. Please consider change the adapter mode.");
1401 | }
1402 | Cv2.Randu(dst.AsMat(), low, high);
1403 | return dst;
1404 | }
1405 |
1406 | public NDArray randn(NDArray dst, NDArray mean, NDArray stddev)
1407 | {
1408 | if (!dst.CanConvertToMatWithouyCopy())
1409 | {
1410 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1411 | "randu needs that. Please consider change the adapter mode.");
1412 | }
1413 | Cv2.Randn(dst.AsMat(), mean.AsMat(), stddev.AsMat());
1414 | return dst;
1415 | }
1416 |
1417 | public NDArray randn(NDArray dst, Scalar mean, Scalar stddev)
1418 | {
1419 | if (!dst.CanConvertToMatWithouyCopy())
1420 | {
1421 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1422 | "randu needs that. Please consider change the adapter mode.");
1423 | }
1424 | Cv2.Randn(dst.AsMat(), mean, stddev);
1425 | return dst;
1426 | }
1427 |
1428 | public (NDArray, NDArray?) kmeans(NDArray data, int k, NDArray bestLabels, TermCriteria criteria,
1429 | int attempts, KMeansFlags flags, NDArray? centers)
1430 | {
1431 | if (!bestLabels.CanConvertToMatWithouyCopy())
1432 | {
1433 | throw new ValueError("Cannot convert the NDArray to Mat without copy but the method " +
1434 | "kmeans needs that. Please consider change the adapter mode.");
1435 | }
1436 | Mat? centersMat = centers is null ? null : centers.AsMat();
1437 | Cv2.Kmeans(data.AsMat(), k, bestLabels.AsMat(), criteria, attempts, flags, centersMat);
1438 | if(centersMat is null)
1439 | {
1440 | return (bestLabels, null);
1441 | }
1442 | else
1443 | {
1444 | return (bestLabels, centersMat.ToNDArray(true));
1445 | }
1446 | }
1447 |
1448 | public float fastAten2(float y, float x)
1449 | {
1450 | return Cv2.FastAtan2(y, x);
1451 | }
1452 |
1453 | public float cubeRoot(float val)
1454 | {
1455 | return Cv2.CubeRoot(val);
1456 | }
1457 |
1458 | public void setNumThreads(int nThreads)
1459 | {
1460 | Cv2.SetNumThreads(nThreads);
1461 | }
1462 |
1463 | public int getNumThreads()
1464 | {
1465 | return Cv2.GetNumThreads();
1466 | }
1467 |
1468 | public int getThreadNum()
1469 | {
1470 | return Cv2.GetThreadNum();
1471 | }
1472 |
1473 | public string getBuildInformation()
1474 | {
1475 | return Cv2.GetBuildInformation();
1476 | }
1477 |
1478 | public string? getVersionString()
1479 | {
1480 | return Cv2.GetVersionString();
1481 | }
1482 |
1483 | public int getVersionMajor()
1484 | {
1485 | return Cv2.GetVersionMajor();
1486 | }
1487 |
1488 | public int getVersionMinor()
1489 | {
1490 | return Cv2.GetVersionMinor();
1491 | }
1492 |
1493 | public int getVersionRevision()
1494 | {
1495 | return Cv2.GetVersionRevision();
1496 | }
1497 |
1498 | public long getTickCount()
1499 | {
1500 | return Cv2.GetTickCount();
1501 | }
1502 |
1503 | public double getTickFrequency()
1504 | {
1505 | return Cv2.GetTickFrequency();
1506 | }
1507 |
1508 | public long getCpuTickCount()
1509 | {
1510 | return Cv2.GetCpuTickCount();
1511 | }
1512 |
1513 | public bool checkHardwareSupport(CpuFeatures feature)
1514 | {
1515 | return Cv2.CheckHardwareSupport(feature);
1516 | }
1517 |
1518 | public string getHardwareFeatureName(CpuFeatures feature)
1519 | {
1520 | return Cv2.GetHardwareFeatureName(feature);
1521 | }
1522 |
1523 | public string getCpuFeaturesLine()
1524 | {
1525 | return Cv2.GetCpuFeaturesLine();
1526 | }
1527 |
1528 | public int getNumberOfCpus()
1529 | {
1530 | return Cv2.GetNumberOfCpus();
1531 | }
1532 |
1533 | public void setUseOptimized(bool onoff)
1534 | {
1535 | Cv2.SetUseOptimized(onoff);
1536 | }
1537 |
1538 | public bool useOptimized()
1539 | {
1540 | return Cv2.UseOptimized();
1541 | }
1542 |
1543 | public int alignSize(int sz, int n)
1544 | {
1545 | return Cv2.AlignSize(sz, n);
1546 | }
1547 |
1548 | public bool setBreakOnError(bool flag)
1549 | {
1550 | return Cv2.SetBreakOnError(flag);
1551 | }
1552 |
1553 | public string format(NDArray mtx, FormatType format = FormatType.Default)
1554 | {
1555 | return Cv2.Format(mtx.AsMat(), format);
1556 | }
1557 | }
1558 | }
1559 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 |
5 | namespace Tensorflow.OpencvAdapter.APIs
6 | {
7 | ///
8 | /// The python style cv2 API.
9 | ///
10 | public partial class Cv2API
11 | {
12 | ///
13 | /// The ratio of a circle's circumference to its diameter
14 | ///
15 | public const double PI = 3.1415926535897932384626433832795;
16 |
17 | ///
18 | ///
19 | ///
20 | public const double LOG2 = 0.69314718055994530941723212145818;
21 |
22 | ///
23 | ///
24 | ///
25 | public const int FILLED = -1;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.highgui.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using Tensorflow.NumPy;
6 | using Tensorflow.OpencvAdapter.Extensions;
7 |
8 | namespace Tensorflow.OpencvAdapter.APIs
9 | {
10 | public partial class Cv2API
11 | {
12 | ///
13 | /// Creates a window.
14 | ///
15 | /// Name of the window in the window caption that may be used as a window identifier.
16 | ///
17 | /// Flags of the window. Currently the only supported flag is CV WINDOW AUTOSIZE. If this is set,
18 | /// the window size is automatically adjusted to fit the displayed image (see imshow ), and the user can not change the window size manually.
19 | ///
20 | public void namedWindow(string winName, WindowFlags flags = WindowFlags.Normal)
21 | {
22 | Cv2.NamedWindow(winName, flags);
23 | }
24 |
25 | ///
26 | /// Destroys the specified window.
27 | ///
28 | ///
29 | public void destroyWindow(string winName)
30 | {
31 | Cv2.DestroyWindow(winName);
32 | }
33 |
34 | ///
35 | /// Destroys all of the HighGUI windows.
36 | ///
37 | public void destroyAllWindows()
38 | {
39 | Cv2.DestroyAllWindows();
40 | }
41 |
42 | public void startWindowThread()
43 | {
44 | Cv2.StartWindowThread();
45 | }
46 |
47 | ///
48 | /// Waits for a pressed key.
49 | /// Similar to #waitKey, but returns full key code.
50 | /// Key code is implementation specific and depends on used backend: QT/GTK/Win32/etc
51 | ///
52 | /// Delay in milliseconds. 0 is the special value that means ”forever”
53 | /// Returns the code of the pressed key or -1 if no key was pressed before the specified time had elapsed.
54 | public void waitKeyEx(int delay = 0)
55 | {
56 | Cv2.WaitKeyEx(delay);
57 | }
58 |
59 | ///
60 | /// Waits for a pressed key.
61 | ///
62 | /// Delay in milliseconds. 0 is the special value that means ”forever”
63 | /// Returns the code of the pressed key or -1 if no key was pressed before the specified time had elapsed.
64 | public void waitKey(int delay = 0)
65 | {
66 | Cv2.WaitKey(delay);
67 | }
68 |
69 | ///
70 | /// Displays the image in the specified window
71 | ///
72 | /// Name of the window.
73 | /// Image to be shown.
74 | public void imshow(string winName, NDArray mat)
75 | {
76 | Cv2.ImShow(winName, mat.AsMat());
77 | }
78 |
79 | ///
80 | /// Resizes window to the specified size
81 | ///
82 | /// Window name
83 | /// The new window width
84 | /// The new window height
85 | public void resizeWindow(string winName, int width, int height)
86 | {
87 | Cv2.ResizeWindow(winName, width, height);
88 | }
89 |
90 | ///
91 | /// Resizes window to the specified size
92 | ///
93 | /// Window name
94 | /// The new window size
95 | public void resizeWindow(string winName, Size size)
96 | {
97 | Cv2.ResizeWindow(winName, size);
98 | }
99 |
100 | ///
101 | /// Moves window to the specified position
102 | ///
103 | /// Window name
104 | /// The new x-coordinate of the window
105 | /// The new y-coordinate of the window
106 | public void moveWindow(string winName, int x, int y)
107 | {
108 | Cv2.ResizeWindow(winName, x, y);
109 | }
110 |
111 | ///
112 | /// Changes parameters of a window dynamically.
113 | ///
114 | /// Name of the window.
115 | /// Window property to retrieve.
116 | /// New value of the window property.
117 | public void setWindowProperty(string winName, WindowPropertyFlags propId, double propValue)
118 | {
119 | Cv2.SetWindowProperty(winName, propId, propValue);
120 | }
121 |
122 | ///
123 | /// Updates window title
124 | ///
125 | /// Name of the window
126 | /// New title
127 | public void setWindowTitle(string winName, string title)
128 | {
129 | Cv2.SetWindowTitle(winName, title);
130 | }
131 |
132 | ///
133 | /// Provides parameters of a window.
134 | ///
135 | /// Name of the window.
136 | /// Window property to retrieve.
137 | ///
138 | public double getWindowProperty(string winName, WindowPropertyFlags propId)
139 | {
140 | return Cv2.GetWindowProperty(winName, propId);
141 | }
142 |
143 | ///
144 | /// Provides rectangle of image in the window.
145 | /// The function getWindowImageRect returns the client screen coordinates, width and height of the image rendering area.
146 | ///
147 | /// Name of the window.
148 | ///
149 | public Rect GetWindowImageRect(string winName)
150 | {
151 | return GetWindowImageRect(winName);
152 | }
153 |
154 | ///
155 | /// Sets the callback function for mouse events occuring within the specified window.
156 | ///
157 | /// Name of the window.
158 | /// Reference to the function to be called every time mouse event occurs in the specified window.
159 | ///
160 | public void setMouseCallback(string windowName, MouseCallback onMouse, IntPtr userData = default)
161 | {
162 | Cv2.SetMouseCallback(windowName, onMouse, userData);
163 | }
164 |
165 | ///
166 | /// Gets the mouse-wheel motion delta, when handling mouse-wheel events cv::EVENT_MOUSEWHEEL and cv::EVENT_MOUSEHWHEEL.
167 | ///
168 | /// For regular mice with a scroll-wheel, delta will be a multiple of 120. The value 120 corresponds to
169 | /// a one notch rotation of the wheel or the threshold for action to be taken and one such action should
170 | /// occur for each delta.Some high-precision mice with higher-resolution freely-rotating wheels may
171 | /// generate smaller values.
172 | ///
173 | /// For cv::EVENT_MOUSEWHEEL positive and negative values mean forward and backward scrolling,
174 | /// respectively.For cv::EVENT_MOUSEHWHEEL, where available, positive and negative values mean right and
175 | /// left scrolling, respectively.
176 | ///
177 | /// The mouse callback flags parameter.
178 | ///
179 | public int getMouseWheelDelta(MouseEventFlags flags)
180 | {
181 | return Cv2.GetMouseWheelDelta(flags);
182 | }
183 |
184 | ///
185 | /// Selects ROI on the given image.
186 | /// Function creates a window and allows user to select a ROI using mouse.
187 | /// Controls: use `space` or `enter` to finish selection, use key `c` to cancel selection (function will return the zero cv::Rect).
188 | ///
189 | /// name of the window where selection process will be shown.
190 | /// image to select a ROI.
191 | /// if true crosshair of selection rectangle will be shown.
192 | /// if true center of selection will match initial mouse position. In opposite case a corner of
193 | /// selection rectangle will correspond to the initial mouse position.
194 | /// selected ROI or empty rect if selection canceled.
195 | public Rect selectROI(string windowName, NDArray img, bool showCrosshair = true, bool fromCenter = false)
196 | {
197 | return Cv2.SelectROI(windowName, img.AsMat(), showCrosshair, fromCenter);
198 | }
199 |
200 | ///
201 | /// Selects ROI on the given image.
202 | /// Function creates a window and allows user to select a ROI using mouse.
203 | /// Controls: use `space` or `enter` to finish selection, use key `c` to cancel selection (function will return the zero cv::Rect).
204 | ///
205 | /// image to select a ROI.
206 | /// if true crosshair of selection rectangle will be shown.
207 | /// if true center of selection will match initial mouse position. In opposite case a corner of
208 | /// selection rectangle will correspond to the initial mouse position.
209 | /// selected ROI or empty rect if selection canceled.
210 | public Rect selectROI(NDArray img, bool showCrosshair = true, bool fromCenter = false)
211 | {
212 | return Cv2.SelectROI(img.AsMat(), showCrosshair, fromCenter);
213 | }
214 |
215 | ///
216 | /// Selects ROIs on the given image.
217 | /// Function creates a window and allows user to select a ROIs using mouse.
218 | /// Controls: use `space` or `enter` to finish current selection and start a new one,
219 | /// use `esc` to terminate multiple ROI selection process.
220 | ///
221 | /// name of the window where selection process will be shown.
222 | /// image to select a ROI.
223 | /// if true crosshair of selection rectangle will be shown.
224 | /// if true center of selection will match initial mouse position. In opposite case a corner of
225 | /// selection rectangle will correspond to the initial mouse position.
226 | /// selected ROIs.
227 | public static Rect[] selectROIs(string windowName, NDArray img, bool showCrosshair = true, bool fromCenter = false)
228 | {
229 | return Cv2.SelectROIs(windowName, img.AsMat(), showCrosshair, fromCenter);
230 | }
231 |
232 | ///
233 | /// Creates a trackbar and attaches it to the specified window.
234 | /// The function createTrackbar creates a trackbar(a slider or range control) with the specified name
235 | /// and range, assigns a variable value to be a position synchronized with the trackbar and specifies
236 | /// the callback function onChange to be called on the trackbar position change.The created trackbar is
237 | /// displayed in the specified window winName.
238 | ///
239 | /// Name of the created trackbar.
240 | /// Name of the window that will be used as a parent of the created trackbar.
241 | /// Optional pointer to an integer variable whose value reflects the position of the slider.Upon creation,
242 | /// the slider position is defined by this variable.
243 | /// Maximal position of the slider. The minimal position is always 0.
244 | /// Pointer to the function to be called every time the slider changes position.
245 | /// This function should be prototyped as void Foo(int, void\*); , where the first parameter is the trackbar
246 | /// position and the second parameter is the user data(see the next parameter). If the callback is
247 | /// the NULL pointer, no callbacks are called, but only value is updated.
248 | /// User data that is passed as is to the callback. It can be used to handle trackbar events without using global variables.
249 | ///
250 | public int createTrackbar(string trackbarName, string winName,
251 | ref int value, int count, TrackbarCallbackNative? onChange = null, IntPtr userData = default)
252 | {
253 | return Cv2.CreateTrackbar(trackbarName, winName, ref value, count, onChange, userData);
254 | }
255 |
256 | ///
257 | /// Creates a trackbar and attaches it to the specified window.
258 | /// The function createTrackbar creates a trackbar(a slider or range control) with the specified name
259 | /// and range, assigns a variable value to be a position synchronized with the trackbar and specifies
260 | /// the callback function onChange to be called on the trackbar position change.The created trackbar is
261 | /// displayed in the specified window winName.
262 | ///
263 | /// Name of the created trackbar.
264 | /// Name of the window that will be used as a parent of the created trackbar.
265 | /// Maximal position of the slider. The minimal position is always 0.
266 | /// Pointer to the function to be called every time the slider changes position.
267 | /// This function should be prototyped as void Foo(int, void\*); , where the first parameter is the trackbar
268 | /// position and the second parameter is the user data(see the next parameter). If the callback is
269 | /// the NULL pointer, no callbacks are called, but only value is updated.
270 | /// User data that is passed as is to the callback. It can be used to handle trackbar events without using global variables.
271 | ///
272 | public int createTrackbar(string trackbarName, string winName,
273 | int count, TrackbarCallbackNative? onChange = null, IntPtr userData = default)
274 | {
275 | return Cv2.CreateTrackbar(trackbarName, winName, count, onChange, userData);
276 | }
277 |
278 | ///
279 | /// Returns the trackbar position.
280 | ///
281 | /// Name of the trackbar.
282 | /// Name of the window that is the parent of the trackbar.
283 | /// trackbar position
284 | public int getTrackbarPos(string trackbarName, string winName)
285 | {
286 | return Cv2.GetTrackbarPos(trackbarName, winName);
287 | }
288 |
289 | ///
290 | /// Sets the trackbar position.
291 | ///
292 | /// Name of the trackbar.
293 | /// Name of the window that is the parent of trackbar.
294 | /// New position.
295 | public void setTrackbarPos(string trackbarName, string winName, int pos)
296 | {
297 | Cv2.SetTrackbarPos(trackbarName, winName, pos);
298 | }
299 |
300 | ///
301 | /// Sets the trackbar maximum position.
302 | /// The function sets the maximum position of the specified trackbar in the specified window.
303 | ///
304 | /// Name of the trackbar.
305 | /// Name of the window that is the parent of trackbar.
306 | /// New maximum position.
307 | public void setTrackbarMax(string trackbarName, string winName, int maxVal)
308 | {
309 | Cv2.SetTrackbarMax(trackbarName, winName, maxVal);
310 | }
311 |
312 | ///
313 | /// Sets the trackbar minimum position.
314 | /// The function sets the minimum position of the specified trackbar in the specified window.
315 | ///
316 | /// Name of the trackbar.
317 | /// Name of the window that is the parent of trackbar.
318 | /// New minimum position.
319 | public void setTrackbarMin(string trackbarName, string winName, int maxVal)
320 | {
321 | Cv2.SetTrackbarMin(trackbarName, winName, maxVal);
322 | }
323 |
324 | public IntPtr getWindowHandle(string windowName)
325 | {
326 | return Cv2.GetWindowHandle(windowName);
327 | }
328 | }
329 | }
330 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.imgcodecs.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 | using Tensorflow.NumPy;
7 | using Tensorflow.OpencvAdapter.Extensions;
8 |
9 | namespace Tensorflow.OpencvAdapter.APIs
10 | {
11 | public partial class Cv2API
12 | {
13 | ///
14 | /// Loads an image from a file.
15 | ///
16 | /// Name of file to be loaded.
17 | /// Specifies color type of the loaded image
18 | ///
19 | public NDArray imread(string filename, ImreadModes flags = ImreadModes.Color)
20 | {
21 | return Cv2.ImRead(filename, flags).ToNDArray(clone: false);
22 | }
23 |
24 | ///
25 | /// Loads a multi-page image from a file.
26 | ///
27 | /// Name of file to be loaded.
28 | /// A vector of Mat objects holding each page, if more than one.
29 | /// Flag that can take values of @ref cv::ImreadModes, default with IMREAD_ANYCOLOR.
30 | ///
31 | public bool imreadmulti(string filename, out NDArray[] arrays, ImreadModes flags = ImreadModes.AnyColor)
32 | {
33 | bool res = Cv2.ImReadMulti(filename, out var mats, flags);
34 | arrays = mats.Select(m => m.ToNDArray(clone: false)).ToArray();
35 | return res;
36 | }
37 |
38 | ///
39 | /// Saves an image to a specified file.
40 | ///
41 | /// Name of the file.
42 | /// Image to be saved.
43 | /// Format-specific save parameters encoded as pairs
44 | ///
45 | public bool imwrite(string filename, NDArray img, int[]? prms = null)
46 | {
47 | return Cv2.ImWrite(filename, img.AsMat(), prms);
48 | }
49 |
50 | ///
51 | /// Saves an image to a specified file.
52 | ///
53 | /// Name of the file.
54 | /// Image to be saved.
55 | /// Format-specific save parameters encoded as pairs
56 | ///
57 | public bool imwrite(string filename, NDArray img, params ImageEncodingParam[] prms)
58 | {
59 | return Cv2.ImWrite(filename, img.AsMat(), prms);
60 | }
61 |
62 | ///
63 | /// Saves an image to a specified file.
64 | ///
65 | /// Name of the file.
66 | /// Image to be saved.
67 | /// Format-specific save parameters encoded as pairs
68 | ///
69 | public bool imwrite(string filename, IEnumerable img, int[]? prms = null)
70 | {
71 | return Cv2.ImWrite(filename, img.Select(x => x.AsMat()), prms);
72 | }
73 |
74 | ///
75 | /// Saves an image to a specified file.
76 | ///
77 | /// Name of the file.
78 | /// Image to be saved.
79 | /// Format-specific save parameters encoded as pairs
80 | ///
81 | public bool imwrite(string filename, IEnumerable img, params ImageEncodingParam[] prms)
82 | {
83 | return Cv2.ImWrite(filename, img.Select(x => x.AsMat()), prms);
84 | }
85 |
86 | ///
87 | /// Reads image from the specified buffer in memory.
88 | ///
89 | /// The input array of vector of bytes.
90 | /// The same flags as in imread
91 | ///
92 | public NDArray imdecode(NDArray buf, ImreadModes flags)
93 | {
94 | return Cv2.ImDecode(buf.AsMat(), flags).ToNDArray(clone: false);
95 | }
96 |
97 | ///
98 | /// Reads image from the specified buffer in memory.
99 | ///
100 | /// The input array of vector of bytes.
101 | /// The same flags as in imread
102 | ///
103 | public NDArray imdecode(byte[] buf, ImreadModes flags)
104 | {
105 | return Cv2.ImDecode(buf, flags).ToNDArray(clone: false);
106 | }
107 |
108 | ///
109 | /// Reads image from the specified buffer in memory.
110 | ///
111 | /// The input slice of bytes.
112 | /// The same flags as in imread
113 | ///
114 | public NDArray imdecode(ReadOnlySpan buf, ImreadModes flags)
115 | {
116 | return Cv2.ImDecode(buf, flags).ToNDArray(clone: false);
117 | }
118 |
119 | ///
120 | /// Compresses the image and stores it in the memory buffer
121 | ///
122 | /// The file extension that defines the output format
123 | /// The image to be written
124 | /// Output buffer resized to fit the compressed image.
125 | /// Format-specific parameters.
126 | public bool imencode(string ext, NDArray img, out byte[] buf, int[]? prms = null)
127 | {
128 | return Cv2.ImEncode(ext, img.AsMat(), out buf, prms);
129 | }
130 |
131 | ///
132 | /// Compresses the image and stores it in the memory buffer
133 | ///
134 | /// The file extension that defines the output format
135 | /// The image to be written
136 | /// Output buffer resized to fit the compressed image.
137 | /// Format-specific parameters.
138 | public void imencode(string ext, NDArray img, out byte[] buf, params ImageEncodingParam[] prms)
139 | {
140 | Cv2.ImEncode(ext, img.AsMat(), out buf, prms);
141 | }
142 |
143 | ///
144 | ///
145 | ///
146 | ///
147 | ///
148 | public bool haveImageReader(string filename)
149 | {
150 | return Cv2.HaveImageReader(filename);
151 | }
152 |
153 | ///
154 | ///
155 | ///
156 | ///
157 | ///
158 | public bool haveImageWriter(string filename)
159 | {
160 | return Cv2.HaveImageWriter(filename);
161 | }
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.objdetect.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 |
6 | namespace Tensorflow.OpencvAdapter.APIs
7 | {
8 | public partial class Cv2API
9 | {
10 | ///
11 | /// Groups the object candidate rectangles.
12 | ///
13 | /// Input/output vector of rectangles. Output vector includes retained and grouped rectangles.
14 | /// Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
15 | ///
16 | public void groupRectangles(IList rectList, int groupThreshold, double eps = 0.2)
17 | {
18 | Cv2.GroupRectangles(rectList, groupThreshold, eps);
19 | }
20 |
21 | ///
22 | /// Groups the object candidate rectangles.
23 | ///
24 | ///
25 | ///
26 | ///
27 | ///
28 | ///
29 | public void groupRectangles(IList rectList, int groupThreshold, double eps, out int[] weights, out double[] levelWeights)
30 | {
31 | Cv2.GroupRectangles(rectList, groupThreshold, eps, out weights, out levelWeights);
32 | }
33 |
34 | ///
35 | /// Groups the object candidate rectangles.
36 | ///
37 | ///
38 | ///
39 | ///
40 | ///
41 | ///
42 | public void groupRectangles(IList rectList, out int[] rejectLevels, out double[] levelWeights, int groupThreshold, double eps = 0.2)
43 | {
44 | Cv2.GroupRectangles(rectList, out rejectLevels, out levelWeights, groupThreshold, eps);
45 | }
46 |
47 | ///
48 | ///
49 | ///
50 | ///
51 | ///
52 | ///
53 | ///
54 | ///
55 | public static void groupRectanglesMeanshift(IList rectList, out double[] foundWeights,
56 | out double[] foundScales, double detectThreshold = 0.0, Size? winDetSize = null)
57 | {
58 | Cv2.GroupRectanglesMeanshift(rectList, out foundWeights, out foundScales, detectThreshold, winDetSize);
59 | }
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.photo.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 | using Tensorflow.NumPy;
7 | using Tensorflow.OpencvAdapter.Extensions;
8 |
9 | namespace Tensorflow.OpencvAdapter.APIs
10 | {
11 | public partial class Cv2API
12 | {
13 | ///
14 | /// Restores the selected region in an image using the region neighborhood.
15 | ///
16 | /// Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image.
17 | /// Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted.
18 | /// Radius of a circular neighborhood of each point inpainted that is considered by the algorithm.
19 | /// Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
20 | /// Output image with the same size and type as src.
21 | public NDArray inpaint(NDArray src, NDArray inpaintMask, double inpaintRadius, InpaintMethod flags)
22 | {
23 | Mat dstMat = new();
24 | Cv2.Inpaint(src.AsMat(), inpaintMask.AsMat(), dstMat, inpaintRadius, flags);
25 | return dstMat.numpy();
26 | }
27 |
28 | ///
29 | /// Perform image denoising using Non-local Means Denoising algorithm
30 | /// with several computational optimizations. Noise expected to be a gaussian white noise
31 | ///
32 | /// Input 8-bit 1-channel, 2-channel or 3-channel image.
33 | ///
34 | /// Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details,
35 | /// smaller h value preserves details but also preserves some noise
36 | ///
37 | /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels
38 | ///
39 | /// Size in pixels of the window that is used to compute weighted average for given pixel.
40 | /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels
41 | /// Output image with the same size and type as src .
42 | public NDArray fastNlMeansDenoising(NDArray src, float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
43 | {
44 | Mat dstMat = new();
45 | Cv2.FastNlMeansDenoising(src.AsMat(), dstMat, h, templateWindowSize, searchWindowSize);
46 | return dstMat.numpy();
47 | }
48 |
49 | ///
50 | /// Modification of fastNlMeansDenoising function for colored images
51 | ///
52 | /// Input 8-bit 3-channel image.
53 | /// Parameter regulating filter strength for luminance component.
54 | /// Bigger h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise
55 | /// The same as h but for color components. For most images value equals 10 will be enought
56 | /// to remove colored noise and do not distort colors
57 | ///
58 | /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels
59 | ///
60 | /// Size in pixels of the window that is used to compute weighted average for given pixel. Should be odd.
61 | /// Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels
62 | /// Output image with the same size and type as src.
63 | public NDArray fastNlMeansDenoisingColored(NDArray src, float h = 3, float hColor = 3,
64 | int templateWindowSize = 7, int searchWindowSize = 21)
65 | {
66 | Mat dstMat = new();
67 | Cv2.FastNlMeansDenoisingColored(src.AsMat(), dstMat, h, hColor, templateWindowSize, searchWindowSize);
68 | return dstMat.numpy();
69 | }
70 |
71 | ///
72 | /// Modification of fastNlMeansDenoising function for images sequence where consequtive images have been captured
73 | /// in small period of time. For example video. This version of the function is for grayscale images or for manual manipulation with colorspaces.
74 | ///
75 | /// Input 8-bit 1-channel, 2-channel or 3-channel images sequence. All images should have the same type and size.
76 | /// Target image to denoise index in srcImgs sequence
77 | /// Number of surrounding images to use for target image denoising.
78 | /// Should be odd. Images from imgToDenoiseIndex - temporalWindowSize / 2 to imgToDenoiseIndex - temporalWindowSize / 2
79 | /// from srcImgs will be used to denoise srcImgs[imgToDenoiseIndex] image.
80 | /// Parameter regulating filter strength for luminance component. Bigger h value perfectly removes noise but also removes image details,
81 | /// smaller h value preserves details but also preserves some noise
82 | /// Size in pixels of the template patch that is used to compute weights. Should be odd. Recommended value 7 pixels
83 | /// Size in pixels of the window that is used to compute weighted average for given pixel.
84 | /// Should be odd. Affect performance linearly: greater searchWindowsSize - greater denoising time. Recommended value 21 pixels
85 | /// Output image with the same size and type as srcImgs images.
86 | public NDArray fastNlMeansDenoisingMulti(IEnumerable srcImgs, int imgToDenoiseIndex, int temporalWindowSize,
87 | float h = 3, int templateWindowSize = 7, int searchWindowSize = 21)
88 | {
89 | Mat dstMat = new();
90 | Cv2.FastNlMeansDenoisingMulti(srcImgs.Select(x => x.AsMat()), dstMat, imgToDenoiseIndex, temporalWindowSize,
91 | h, templateWindowSize, searchWindowSize);
92 | return dstMat.numpy();
93 | }
94 |
95 | ///
96 | /// Primal-dual algorithm is an algorithm for solving special types of variational problems
97 | /// (that is, finding a function to minimize some functional). As the image denoising,
98 | /// in particular, may be seen as the variational problem, primal-dual algorithm then
99 | /// can be used to perform denoising and this is exactly what is implemented.
100 | ///
101 | /// This array should contain one or more noised versions
102 | /// of the image that is to be restored.
103 | /// Corresponds to \f$\lambda\f$ in the formulas above.
104 | /// As it is enlarged, the smooth (blurred) images are treated more favorably than
105 | /// detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller,
106 | /// the result will be more blur but more sever outliers will be removed.
107 | /// Number of iterations that the algorithm will run.
108 | /// Of course, as more iterations as better, but it is hard to quantitatively
109 | /// refine this statement, so just use the default and increase it if the results are poor.
110 | /// Here the denoised image will be stored. There is no need to
111 | /// do pre-allocation of storage space, as it will be automatically allocated, if necessary.
112 | public NDArray denoiseTVL1(IEnumerable observations, double lambda = 1.0, int niters = 30)
113 | {
114 | Mat dstMat = new();
115 | Cv2.DenoiseTVL1(observations.Select(x => x.AsMat()), dstMat, lambda, niters);
116 | return dstMat.numpy();
117 | }
118 |
119 | ///
120 | /// Transforms a color image to a grayscale image. It is a basic tool in digital
121 | /// printing, stylized black-and-white photograph rendering, and in many single
122 | /// channel image processing applications @cite CL12 .
123 | ///
124 | /// Input 8-bit 3-channel image.
125 | /// grayscale Mat and color boost Mat
126 | public (NDArray, NDArray) decolor(NDArray src)
127 | {
128 | Mat grayscaleMat = new();
129 | Mat colorBoostMat = new();
130 | Cv2.Decolor(src.AsMat(), grayscaleMat, colorBoostMat);
131 | return (grayscaleMat.numpy(), colorBoostMat.numpy());
132 | }
133 |
134 | ///
135 | /// Image editing tasks concern either global changes (color/intensity corrections,
136 | /// filters, deformations) or local changes concerned to a selection. Here we are
137 | /// interested in achieving local changes, ones that are restricted to a region
138 | /// manually selected (ROI), in a seamless and effortless manner. The extent of
139 | /// the changes ranges from slight distortions to complete replacement by novel
140 | /// content @cite PM03 .
141 | ///
142 | /// Input 8-bit 3-channel image.
143 | /// Input 8-bit 3-channel image.
144 | /// Input 8-bit 1 or 3-channel image.
145 | /// Point in dst image where object is placed.
146 | /// Cloning method
147 | /// Output image with the same size and type as dst.
148 | public NDArray seamlessClone(NDArray src, NDArray dst, NDArray? mask, Point p, SeamlessCloneMethods flags)
149 | {
150 | Mat blendMat = new();
151 | Cv2.SeamlessClone(src.AsMat(), dst.AsMat(), mask.ToInputArray(), p, blendMat, flags);
152 | return blendMat.numpy();
153 | }
154 |
155 | ///
156 | /// Given an original color image, two differently colored versions of this
157 | /// image can be mixed seamlessly. Multiplication factor is between 0.5 to 2.5.
158 | ///
159 | /// Input 8-bit 3-channel image.
160 | /// Input 8-bit 1 or 3-channel image.
161 | /// R-channel multiply factor.
162 | /// G-channel multiply factor.
163 | /// B-channel multiply factor.
164 | /// Output image with the same size and type as src.
165 | public NDArray colorChange(NDArray src, NDArray? mask, float redMul = 1.0f, float greenMul = 1.0f, float blueMul = 1.0f)
166 | {
167 | Mat dstMat = new();
168 | Cv2.ColorChange(src.AsMat(), mask.ToInputArray(), dstMat, redMul, greenMul, blueMul);
169 | return dstMat.numpy();
170 | }
171 |
172 | ///
173 | /// Applying an appropriate non-linear transformation to the gradient field inside
174 | /// the selection and then integrating back with a Poisson solver, modifies locally
175 | /// the apparent illumination of an image.
176 | ///
177 | /// Input 8-bit 3-channel image.
178 | /// Input 8-bit 1 or 3-channel image.
179 | /// Value ranges between 0-2.
180 | /// Value ranges between 0-2.
181 | /// Output image with the same size and type as src.
182 | ///
183 | /// This is useful to highlight under-exposed foreground objects or to reduce specular reflections.
184 | ///
185 | public NDArray illuminationChange(NDArray src, NDArray? mask, float alpha = 0.2f, float beta = 0.4f)
186 | {
187 | Mat dstMat = new();
188 | Cv2.IlluminationChange(src.AsMat(), mask.ToInputArray(), dstMat, alpha, beta);
189 | return dstMat.numpy();
190 | }
191 |
192 | ///
193 | /// By retaining only the gradients at edge locations, before integrating with the
194 | /// Poisson solver, one washes out the texture of the selected region, giving its
195 | /// contents a flat aspect. Here Canny Edge Detector is used.
196 | ///
197 | /// Input 8-bit 3-channel image.
198 | /// Input 8-bit 1 or 3-channel image.
199 | /// Range from 0 to 100.
200 | /// Value > 100.
201 | /// The size of the Sobel kernel to be used.
202 | /// Output image with the same size and type as src.
203 | public NDArray textureFlattening(NDArray src, NDArray? mask, float lowThreshold = 30, float highThreshold = 45,
204 | int kernelSize = 3)
205 | {
206 | Mat dstMat = new();
207 | Cv2.TextureFlattening(src.AsMat(), mask.ToInputArray(), dstMat, lowThreshold, highThreshold, kernelSize);
208 | return dstMat.numpy();
209 | }
210 |
211 | ///
212 | /// Filtering is the fundamental operation in image and video processing.
213 | /// Edge-preserving smoothing filters are used in many different applications @cite EM11 .
214 | ///
215 | /// Input 8-bit 3-channel image.
216 | /// Edge preserving filters
217 | /// Range between 0 to 200.
218 | /// Range between 0 to 1.
219 | /// Output 8-bit 3-channel image.
220 | public NDArray edgePreservingFilter(NDArray src, EdgePreservingMethods flags = EdgePreservingMethods.RecursFilter,
221 | float sigmaS = 60, float sigmaR = 0.4f)
222 | {
223 | Mat dstMat = new();
224 | Cv2.EdgePreservingFilter(src.AsMat(), dstMat, flags, sigmaS, sigmaR);
225 | return dstMat.numpy();
226 | }
227 |
228 | ///
229 | /// This filter enhances the details of a particular image.
230 | ///
231 | /// Input 8-bit 3-channel image.
232 | /// Output image with the same size and type as src.
233 | /// Range between 0 to 200.
234 | /// Range between 0 to 1.
235 | public NDArray detailEnhance(NDArray src, float sigmaS = 60, float sigmaR = 0.4f)
236 | {
237 | Mat dstMat = new();
238 | Cv2.DetailEnhance(src.AsMat(), dstMat, sigmaS, sigmaR);
239 | return dstMat.numpy();
240 | }
241 |
242 | ///
243 | /// Pencil-like non-photorealistic line drawing
244 | ///
245 | /// Input 8-bit 3-channel image.
246 | /// Range between 0 to 200.
247 | /// Range between 0 to 1.
248 | /// Range between 0 to 0.1.
249 | /// output 8-bit 1-channel image and output image with the same size and type as src
250 | public (NDArray, NDArray) pencilSketch(NDArray src, float sigmaS = 60, float sigmaR = 0.07f, float shadeFactor = 0.02f)
251 | {
252 | Mat dst1Mat = new();
253 | Mat dst2Mat = new();
254 | Cv2.PencilSketch(src.AsMat(), dst1Mat, dst2Mat, sigmaS, sigmaR, shadeFactor);
255 | return (dst1Mat.numpy(), dst2Mat.numpy());
256 | }
257 |
258 | ///
259 | /// Stylization aims to produce digital imagery with a wide variety of effects
260 | /// not focused on photorealism. Edge-aware filters are ideal for stylization,
261 | /// as they can abstract regions of low contrast while preserving, or enhancing,
262 | /// high-contrast features.
263 | ///
264 | /// Input 8-bit 3-channel image.
265 | /// Range between 0 to 200.
266 | /// Range between 0 to 1.
267 | /// Output image with the same size and type as src.
268 | public NDArray stylization(NDArray src, float sigmaS = 60, float sigmaR = 0.45f)
269 | {
270 | Mat dstMat = new();
271 | Cv2.Stylization(src.AsMat(), dstMat, sigmaS, sigmaR);
272 | return dstMat.numpy();
273 | }
274 | }
275 | }
276 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/APIs/cv2.video.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using Tensorflow.NumPy;
6 | using Tensorflow.OpencvAdapter.Extensions;
7 |
8 | namespace Tensorflow.OpencvAdapter.APIs
9 | {
10 | public partial class Cv2API
11 | {
12 | ///
13 | /// Finds an object center, size, and orientation.
14 | ///
15 | /// Back projection of the object histogram.
16 | /// Initial search window.
17 | /// Stop criteria for the underlying MeanShift() .
18 | ///
19 | public RotatedRect camShift(NDArray probImage, ref Rect window, TermCriteria criteria)
20 | {
21 | return Cv2.CamShift(probImage.AsMat(), ref window, criteria);
22 | }
23 |
24 | ///
25 | /// Finds an object on a back projection image.
26 | ///
27 | /// Back projection of the object histogram.
28 | /// Initial search window.
29 | /// Stop criteria for the iterative search algorithm.
30 | /// Number of iterations CAMSHIFT took to converge.
31 | public int meanShift(NDArray probImage, ref Rect window, TermCriteria criteria)
32 | {
33 | return Cv2.MeanShift(probImage.AsMat(), ref window, criteria);
34 | }
35 |
36 | ///
37 | /// Constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
38 | ///
39 | /// 8-bit input image.
40 | /// window size of optical flow algorithm.
41 | /// Must be not less than winSize argument of calcOpticalFlowPyrLK().
42 | /// It is needed to calculate required padding for pyramid levels.
43 | /// 0-based maximal pyramid level number.
44 | /// set to precompute gradients for the every pyramid level.
45 | /// If pyramid is constructed without the gradients then calcOpticalFlowPyrLK() will
46 | /// calculate them internally.
47 | /// the border mode for pyramid layers.
48 | /// the border mode for gradients.
49 | /// put ROI of input image into the pyramid if possible.
50 | /// You can pass false to force data copying.
51 | /// 1. number of levels in constructed pyramid. Can be less than maxLevel.
52 | /// 2. output pyramid.
53 | public (int, NDArray) buildOpticalFlowPyramid(NDArray img, Size winSize, int maxLevel,
54 | bool withDerivatives = true,
55 | BorderTypes pyrBorder = BorderTypes.Reflect101,
56 | BorderTypes derivBorder = BorderTypes.Constant,
57 | bool tryReuseInputImage = true)
58 | {
59 | Mat dstMat = new();
60 | var retVal = Cv2.BuildOpticalFlowPyramid(img.AsMat(), dstMat, winSize, maxLevel,
61 | withDerivatives, pyrBorder, derivBorder, tryReuseInputImage);
62 | return (retVal, dstMat.numpy());
63 | }
64 |
65 | ///
66 | /// computes sparse optical flow using multi-scale Lucas-Kanade algorithm
67 | ///
68 | ///
69 | ///
70 | ///
71 | ///
72 | ///
73 | ///
74 | ///
75 | ///
76 | ///
77 | public (NDArray, NDArray, NDArray) calcOpticalFlowPyrLK(NDArray prevImg, NDArray nextImg, NDArray prevPts, NDArray nextPts,
78 | Size? winSize = null,
79 | int maxLevel = 3,
80 | TermCriteria? criteria = null,
81 | OpticalFlowFlags flags = OpticalFlowFlags.None,
82 | double minEigThreshold = 1e-4)
83 | {
84 | Mat statusMat = new();
85 | Mat errMat = new();
86 | Cv2.CalcOpticalFlowPyrLK(prevImg.AsMat(), nextImg.AsMat(), prevPts.AsMat(), nextPts.AsMat(),
87 | statusMat, errMat, winSize, maxLevel, criteria, flags, minEigThreshold);
88 | return (nextPts, statusMat.numpy(), errMat.numpy());
89 | }
90 |
91 | ///
92 | /// Computes a dense optical flow using the Gunnar Farneback's algorithm.
93 | ///
94 | /// first 8-bit single-channel input image.
95 | /// second input image of the same size and the same type as prev.
96 | /// computed flow image that has the same size as prev and type CV_32FC2.
97 | /// parameter, specifying the image scale (<1) to build pyramids for each image;
98 | /// pyrScale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one.
99 | /// number of pyramid layers including the initial image;
100 | /// levels=1 means that no extra layers are created and only the original images are used.
101 | /// averaging window size; larger values increase the algorithm robustness to
102 | /// image noise and give more chances for fast motion detection, but yield more blurred motion field.
103 | /// number of iterations the algorithm does at each pyramid level.
104 | /// size of the pixel neighborhood used to find polynomial expansion in each pixel;
105 | /// larger values mean that the image will be approximated with smoother surfaces,
106 | /// yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7.
107 | /// standard deviation of the Gaussian that is used to smooth derivatives used as
108 | /// a basis for the polynomial expansion; for polyN=5, you can set polySigma=1.1,
109 | /// for polyN=7, a good value would be polySigma=1.5.
110 | /// operation flags that can be a combination of OPTFLOW_USE_INITIAL_FLOW and/or OPTFLOW_FARNEBACK_GAUSSIAN
111 | public NDArray calcOpticalFlowFarneback(NDArray prev, NDArray next, NDArray flow,
112 | double pyrScale, int levels, int winsize, int iterations, int polyN, double polySigma, OpticalFlowFlags flags)
113 | {
114 | Cv2.CalcOpticalFlowFarneback(prev.AsMat(), next.AsMat(), flow.AsMat(), pyrScale,
115 | levels, winsize, iterations, polyN, polySigma, flags);
116 | return flow;
117 | }
118 |
119 | ///
120 | /// Computes the Enhanced Correlation Coefficient value between two images @cite EP08 .
121 | ///
122 | /// single-channel template image; CV_8U or CV_32F array.
123 | /// single-channel input image to be warped to provide an image similar to templateImage, same type as templateImage.
124 | /// An optional mask to indicate valid values of inputImage.
125 | ///
126 | public double computeECC(NDArray templateImage, NDArray inputImage, NDArray? inputMask = null)
127 | {
128 | return Cv2.ComputeECC(templateImage.AsMat(), inputImage.AsMat(), inputMask?.AsMat());
129 | }
130 |
131 | ///
132 | /// Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
133 | ///
134 | /// single-channel template image; CV_8U or CV_32F array.
135 | /// single-channel input image which should be warped with the final warpMatrix in
136 | /// order to provide an image similar to templateImage, same type as templateImage.
137 | /// floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
138 | /// parameter, specifying the type of motion
139 | /// parameter, specifying the termination criteria of the ECC algorithm;
140 | /// criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
141 | /// iterations(a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
142 | /// Default values are shown in the declaration above.
143 | /// An optional mask to indicate valid values of inputImage.
144 | /// An optional value indicating size of gaussian blur filter; (DEFAULT: 5)
145 | ///
146 | public (double, NDArray) findTransformECC(NDArray templateImage, NDArray inputImage, NDArray warpMatrix, MotionTypes motionType,
147 | TermCriteria criteria, NDArray? inputMask = null, int gaussFiltSize = 5)
148 | {
149 | var retVal = Cv2.FindTransformECC(templateImage.AsMat(), inputImage.AsMat(), warpMatrix.AsMat(),
150 | motionType, criteria, inputMask.ToInputArray(), gaussFiltSize);
151 | return (retVal, warpMatrix);
152 | }
153 |
154 | ///
155 | /// Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
156 | ///
157 | /// single-channel template image; CV_8U or CV_32F array.
158 | /// single-channel input image which should be warped with the final warpMatrix in
159 | /// order to provide an image similar to templateImage, same type as templateImage.
160 | /// floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
161 | /// parameter, specifying the type of motion
162 | /// parameter, specifying the termination criteria of the ECC algorithm;
163 | /// criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
164 | /// iterations(a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
165 | /// Default values are shown in the declaration above.
166 | /// An optional mask to indicate valid values of inputImage.
167 | ///
168 | public (double, NDArray) findTransformECC(NDArray templateImage, NDArray inputImage, NDArray warpMatrix, MotionTypes motionType,
169 | TermCriteria criteria, NDArray? inputMask = null)
170 | {
171 | var retVal = Cv2.FindTransformECC(templateImage.AsMat(), inputImage.AsMat(), warpMatrix.AsMat(),
172 | motionType, criteria, inputMask.ToInputArray());
173 | return (retVal, warpMatrix);
174 | }
175 |
176 |
177 | }
178 | }
179 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/AdapterUtils.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 | using Tensorflow.NumPy;
7 |
8 | namespace Tensorflow.OpencvAdapter
9 | {
10 | internal static class AdapterUtils
11 | {
12 | private static readonly IReadOnlyDictionary _matTypeMappingToTFDataType = new Dictionary
13 | {
14 | [MatType.CV_8UC1] = TF_DataType.TF_UINT8,
15 | [MatType.CV_8SC1] = TF_DataType.TF_INT8,
16 | [MatType.CV_16SC1] = TF_DataType.TF_INT16,
17 | [MatType.CV_16UC1] = TF_DataType.TF_UINT16,
18 | [MatType.CV_32SC1] = TF_DataType.TF_INT32,
19 | [MatType.CV_32FC1] = TF_DataType.TF_FLOAT,
20 | [MatType.CV_64FC1] = TF_DataType.TF_DOUBLE,
21 |
22 | [MatType.CV_8UC2] = TF_DataType.TF_UINT8,
23 | [MatType.CV_8UC3] = TF_DataType.TF_UINT8,
24 | [MatType.CV_8UC4] = TF_DataType.TF_UINT8,
25 |
26 | [MatType.CV_16SC2] = TF_DataType.TF_INT16,
27 | [MatType.CV_16SC3] = TF_DataType.TF_INT16,
28 | [MatType.CV_16SC4] = TF_DataType.TF_INT16,
29 |
30 | [MatType.CV_16UC2] = TF_DataType.TF_UINT16,
31 | [MatType.CV_16UC3] = TF_DataType.TF_UINT16,
32 | [MatType.CV_16UC4] = TF_DataType.TF_UINT16,
33 |
34 | [MatType.CV_32SC2] = TF_DataType.TF_INT32,
35 | [MatType.CV_32SC3] = TF_DataType.TF_INT32,
36 | [MatType.CV_32SC4] = TF_DataType.TF_INT32,
37 |
38 | [MatType.CV_32FC2] = TF_DataType.TF_FLOAT,
39 | [MatType.CV_32FC3] = TF_DataType.TF_FLOAT,
40 | [MatType.CV_32FC4] = TF_DataType.TF_FLOAT,
41 |
42 | [MatType.CV_64FC2] = TF_DataType.TF_DOUBLE,
43 | [MatType.CV_64FC3] = TF_DataType.TF_DOUBLE,
44 | [MatType.CV_64FC4] = TF_DataType.TF_DOUBLE,
45 | };
46 |
47 | internal static TF_DataType MatTypeToTFDataType(MatType type)
48 | {
49 | if(_matTypeMappingToTFDataType.TryGetValue(type, out var res))
50 | {
51 | return res;
52 | }
53 | else
54 | {
55 | throw new TypeError($"MatType {Enum.GetName(typeof(MatType), type)} is invalid " +
56 | $"or is not supported in tensorflow opencv adapter. The developers of tensorflow " +
57 | $"opencv adapter cannot decide which types to support at the beginning so that only some " +
58 | $"basic types is supported. For example, the using of vec4 and vec6 seems to be rare. " +
59 | $"Please submit an issue to tell us the condition of this MatType " +
60 | $"and we will add support for it");
61 | }
62 | }
63 |
64 | internal static MatType TFDataTypeToMatType(TF_DataType type, int channels)
65 | {
66 | if(channels == 1)
67 | {
68 | return type switch
69 | {
70 | TF_DataType.TF_UINT8 => MatType.CV_8UC1,
71 | TF_DataType.TF_UINT16 => MatType.CV_16UC1,
72 | TF_DataType.TF_INT8 => MatType.CV_8SC1,
73 | TF_DataType.TF_INT16 => MatType.CV_16SC1,
74 | TF_DataType.TF_INT32 => MatType.CV_32SC1,
75 | TF_DataType.TF_FLOAT => MatType.CV_32FC1,
76 | TF_DataType.TF_DOUBLE => MatType.CV_64FC1
77 | };
78 | }
79 | else if(channels == 2)
80 | {
81 | return type switch
82 | {
83 | TF_DataType.TF_UINT8 => MatType.CV_8UC2,
84 | TF_DataType.TF_UINT16 => MatType.CV_16UC2,
85 | TF_DataType.TF_INT8 => MatType.CV_8SC2,
86 | TF_DataType.TF_INT16 => MatType.CV_16SC2,
87 | TF_DataType.TF_INT32 => MatType.CV_32SC2,
88 | TF_DataType.TF_FLOAT => MatType.CV_32FC2,
89 | TF_DataType.TF_DOUBLE => MatType.CV_64FC2
90 | };
91 | }
92 | else if(channels == 3)
93 | {
94 | return type switch
95 | {
96 | TF_DataType.TF_UINT8 => MatType.CV_8UC3,
97 | TF_DataType.TF_UINT16 => MatType.CV_16UC3,
98 | TF_DataType.TF_INT8 => MatType.CV_8SC3,
99 | TF_DataType.TF_INT16 => MatType.CV_16SC3,
100 | TF_DataType.TF_INT32 => MatType.CV_32SC3,
101 | TF_DataType.TF_FLOAT => MatType.CV_32FC3,
102 | TF_DataType.TF_DOUBLE => MatType.CV_64FC3
103 | };
104 | }
105 | else if(channels == 4)
106 | {
107 | return type switch
108 | {
109 | TF_DataType.TF_UINT8 => MatType.CV_8UC4,
110 | TF_DataType.TF_UINT16 => MatType.CV_16UC4,
111 | TF_DataType.TF_INT8 => MatType.CV_8SC4,
112 | TF_DataType.TF_INT16 => MatType.CV_16SC4,
113 | TF_DataType.TF_INT32 => MatType.CV_32SC4,
114 | TF_DataType.TF_FLOAT => MatType.CV_32FC4,
115 | TF_DataType.TF_DOUBLE => MatType.CV_64FC4
116 | };
117 | }
118 | else
119 | {
120 | throw new ValueError($"{channels} channels data is not supported by tensorflow.net opencv adapter. " +
121 | $"If you think it's an expected behavior, please submit an issue to tell us.");
122 | }
123 | }
124 |
125 | internal static void SetMatFromNDArrayData(NDArray array, Mat mat)
126 | {
127 | if(array.dtype == TF_DataType.TF_FLOAT)
128 | {
129 | mat.SetArray(array.ToArray());
130 | }
131 | else if(array.dtype == TF_DataType.TF_DOUBLE)
132 | {
133 | mat.SetArray(array.ToArray());
134 | }
135 | else if(array.dtype == TF_DataType.TF_INT32)
136 | {
137 | mat.SetArray(array.ToArray());
138 | }
139 | else if(array.dtype == TF_DataType.TF_INT16)
140 | {
141 | mat.SetArray(array.ToArray());
142 | }
143 | else if(array.dtype == TF_DataType.TF_INT8)
144 | {
145 | mat.SetArray(array.ToArray());
146 | }
147 | else if(array.dtype == TF_DataType.TF_UINT16)
148 | {
149 | mat.SetArray(array.ToArray());
150 | }
151 | else if(array.dtype == TF_DataType.TF_UINT8)
152 | {
153 | mat.SetArray(array.ToArray());
154 | }
155 | else
156 | {
157 | throw new ValueError($"Type {array.dtype.as_numpy_name()} is not supported to convert to Mat.");
158 | }
159 | }
160 |
161 | ///
162 | /// The layout should be "hwc"
163 | ///
164 | ///
165 | internal static Mat ConvertNDArrayToMat(NDArray array)
166 | {
167 | if (CvNDArray.AdapterMode == OpencvAdapterMode.StrictNoCopy || CvNDArray.AdapterMode == OpencvAdapterMode.AllowCopy)
168 | {
169 | var dataPointer = array.TensorDataPointer;
170 | var (matType, rows, cols) = DeduceMatInfoFromNDArray(array.shape, array.dtype);
171 | return new Mat(rows, cols, matType, dataPointer);
172 | }
173 | else // AdapterMode == OpencvAdapterMode.AlwaysCopy
174 | {
175 | var (matType, rows, cols) = DeduceMatInfoFromNDArray(array.shape, array.dtype);
176 | Mat m = new Mat(rows, cols, matType);
177 | SetMatFromNDArrayData(array, m);
178 | return m;
179 | }
180 | }
181 |
182 | internal static (MatType, int, int) DeduceMatInfoFromNDArray(Shape shape, TF_DataType dtype)
183 | {
184 | if (shape.rank <= 1 || shape.rank >= 4)
185 | {
186 | throw new ValueError($"Converting from NDArray to Mat with shape with rank {shape.rank} has not been supported. If it's expected to work with you, " +
187 | $"please submit an issue and we'll add it.");
188 | }
189 | if (shape[0] > int.MaxValue || shape[1] > int.MaxValue)
190 | {
191 | throw new ValueError($"The shape {shape} is too large to convert to CvNDArray");
192 | }
193 | int rows = (int)shape[0];
194 | int cols = (int)shape[1];
195 | MatType matType;
196 | if (shape.rank == 2)
197 | {
198 | matType = TFDataTypeToMatType(dtype, 1);
199 | }
200 | else // shape.rank == 3
201 | {
202 | matType = TFDataTypeToMatType(dtype, (int)shape[2]);
203 | }
204 | return (matType, rows, cols);
205 | }
206 | }
207 | }
208 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/CvNDArray.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using OpenCvSharp.Internal;
3 | using System;
4 | using Tensorflow.NumPy;
5 |
6 | namespace Tensorflow.OpencvAdapter
7 | {
8 | public class CvNDArray: NDArray
9 | {
10 | public static OpencvAdapterMode AdapterMode { get; set; } = OpencvAdapterMode.AllowCopy;
11 | protected Mat _mat;
12 | public unsafe CvNDArray(Mat mat)
13 | {
14 | // If mode is AlwaysCopy, then just copy it.
15 | if(AdapterMode == OpencvAdapterMode.AlwaysCopy)
16 | {
17 | _mat = mat.Clone();
18 | }
19 | // If the mat is not contiguous, then a memory copy will happen to get a contiguous mat.
20 | else if(!mat.IsContinuous())
21 | {
22 | if(AdapterMode == OpencvAdapterMode.AllowCopy)
23 | {
24 | _mat = mat.Clone();
25 | }
26 | else if(AdapterMode == OpencvAdapterMode.StrictNoCopy)
27 | {
28 | throw new RuntimeError($"The CvNDarray cannot be constructed because the mat is not " +
29 | $"contiguous and the mode is set to `StrictNoCopy`. Please consider changing the mode or " +
30 | $"avoiding incontiguous Mat.");
31 | }
32 | else
33 | {
34 | throw new ValueError($"Cannot recognize the mode {Enum.GetName(typeof(OpencvAdapterMode), AdapterMode)}");
35 | }
36 | }
37 | else
38 | {
39 | _mat = mat;
40 | }
41 | InitWithExistingMemory(new IntPtr(_mat.DataPointer), new Shape(_mat.Rows, _mat.Cols, _mat.Channels()),
42 | AdapterUtils.MatTypeToTFDataType(_mat.Type()), (x, y, z) => { if(_mat is not null) _mat.Release(); _mat = null; });
43 | }
44 |
45 | public Mat AsMat()
46 | {
47 | return _mat;
48 | }
49 |
50 | public override string ToString()
51 | {
52 | return "NDarray which shares memory with Mat: " + base.ToString();
53 | }
54 |
55 | protected override void DisposeManagedResources()
56 | {
57 | if(_mat is not null)
58 | {
59 | _mat.Release();
60 | _mat = null;
61 | }
62 | base.DisposeManagedResources();
63 | }
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/Extensions/ConversionExtensions.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Runtime.CompilerServices;
5 | using System.Text;
6 | using Tensorflow.NumPy;
7 |
8 | namespace Tensorflow.OpencvAdapter.Extensions
9 | {
10 | public static class ConversionExtensions
11 | {
12 | ///
13 | /// Convert the NDArray to Opencv Mat object. If the parameter clone is set to true,
14 | /// then memory copying will happen. If it's set to false, the returned Mat object will
15 | /// share the memory with the source NDArray. So be careful if the NDArray will still be
16 | /// modified after calling this method.
17 | ///
18 | /// The source NDArray.
19 | /// Whether to perform memory copying.
20 | ///
21 | public static Mat ToMat(this NDArray array, bool clone)
22 | {
23 | if (clone)
24 | {
25 | using(new OpencvAdapterContext(OpencvAdapterMode.AlwaysCopy))
26 | {
27 | return AdapterUtils.ConvertNDArrayToMat(array);
28 | }
29 | }
30 | else
31 | {
32 | return AdapterUtils.ConvertNDArrayToMat(array);
33 | }
34 | }
35 |
36 | ///
37 | /// Convert the opencv Mat to NDArray. If the parameter clone is set to true,
38 | /// then memory copying will happen. If it's set to false, the returned NDArray will
39 | /// share the memory with the source Mat. So be careful if the Mat will still be
40 | /// modified after calling this method.
41 | ///
42 | /// The source Opencv Mat object.
43 | /// Whether to perform memory copying.
44 | ///
45 | public static NDArray ToNDArray(this Mat mat, bool clone)
46 | {
47 | if (clone)
48 | {
49 | using (new OpencvAdapterContext(OpencvAdapterMode.AlwaysCopy))
50 | {
51 | return new CvNDArray(mat);
52 | }
53 | }
54 | else
55 | {
56 | return new CvNDArray(mat);
57 | }
58 | }
59 |
60 | ///
61 | /// Convert the opencv Mat to NDArray. If the parameter clone is set to true,
62 | /// then memory copying will happen. If it's set to false, the returned NDArray will
63 | /// share the memory with the source Mat. By default `clone` is set to false.
64 | /// So be careful if the Mat will still be modified after calling this method.
65 | ///
66 | /// The source Opencv Mat object.
67 | /// Whether to perform memory copying.
68 | ///
69 | public static NDArray numpy(this Mat mat, bool clone = false)
70 | {
71 | if (clone)
72 | {
73 | using (new OpencvAdapterContext(OpencvAdapterMode.AlwaysCopy))
74 | {
75 | return new CvNDArray(mat);
76 | }
77 | }
78 | else
79 | {
80 | return new CvNDArray(mat);
81 | }
82 | }
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/Extensions/DTypeExtensions.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 |
5 | namespace Tensorflow.OpencvAdapter.Extensions
6 | {
7 | internal static class DTypeExtensions
8 | {
9 | internal static int ToMatTypeNumber(this TF_DataType dtype, int channels)
10 | {
11 | if(dtype == TF_DataType.DtInvalid)
12 | {
13 | return -1;
14 | }
15 | else
16 | {
17 | return AdapterUtils.TFDataTypeToMatType(dtype, channels);
18 | }
19 | }
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/Extensions/NDArrayExtensions.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using Tensorflow.NumPy;
6 |
7 | namespace Tensorflow.OpencvAdapter.Extensions
8 | {
9 | public static class NDArrayExtensions
10 | {
11 | ///
12 | /// If the array already has a shared Mat, then return it.
13 | /// Otherwise return the result of `NDArray.ToMat()`.
14 | ///
15 | ///
16 | ///
17 | public static Mat AsMat(this NDArray array)
18 | {
19 | if(array is CvNDArray cvarray)
20 | {
21 | return cvarray.AsMat();
22 | }
23 | else
24 | {
25 | return array.ToMat(false);
26 | }
27 | }
28 |
29 | public static bool CanConvertToMatWithouyCopy(this NDArray array)
30 | {
31 | return CvNDArray.AdapterMode != OpencvAdapterMode.StrictNoCopy;
32 | }
33 |
34 | internal static InputArray? ToInputArray(this NDArray? array)
35 | {
36 | if(array is null)
37 | {
38 | return null;
39 | }
40 | return (InputArray)(array.AsMat());
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/FakeMat.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Text;
5 | using Tensorflow.NumPy;
6 |
7 | namespace Tensorflow.OpencvAdapter
8 | {
9 | ///
10 | /// A class to enable implicit conversion from NDArray to Mat.
11 | ///
12 | public class FakeMat: Mat
13 | {
14 | internal FakeMat(int rows, int cols, MatType type, IntPtr data, long step = 0) :
15 | base(rows, cols, type, data, step)
16 | {
17 |
18 | }
19 |
20 | internal FakeMat(int rows, int cols, MatType type) : base(rows, cols, type)
21 | {
22 |
23 | }
24 |
25 | internal static FakeMat FromNDArray(NDArray array)
26 | {
27 | if (CvNDArray.AdapterMode == OpencvAdapterMode.StrictNoCopy || CvNDArray.AdapterMode == OpencvAdapterMode.AllowCopy)
28 | {
29 | var dataPointer = array.TensorDataPointer;
30 | var (matType, rows, cols) = AdapterUtils.DeduceMatInfoFromNDArray(array.shape, array.dtype);
31 | return new FakeMat(rows, cols, matType, dataPointer);
32 | }
33 | else // AdapterMode == OpencvAdapterMode.AlwaysCopy
34 | {
35 | var (matType, rows, cols) = AdapterUtils.DeduceMatInfoFromNDArray(array.shape, array.dtype);
36 | FakeMat m = new FakeMat(rows, cols, matType);
37 | AdapterUtils.SetMatFromNDArrayData(array, m);
38 | return m;
39 | }
40 | }
41 |
42 | public static implicit operator FakeMat(NDArray array)
43 | {
44 | return FromNDArray(array);
45 | }
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/OpencvAdapterContext.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 |
5 | namespace Tensorflow.OpencvAdapter
6 | {
7 | public class OpencvAdapterContext: IDisposable
8 | {
9 | OpencvAdapterMode _oldMode;
10 | public OpencvAdapterContext(OpencvAdapterMode mode)
11 | {
12 | _oldMode = CvNDArray.AdapterMode;
13 | CvNDArray.AdapterMode = mode;
14 | }
15 | public void Dispose()
16 | {
17 | CvNDArray.AdapterMode = _oldMode;
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/OpencvAdapterMode.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Text;
4 |
5 | namespace Tensorflow.OpencvAdapter
6 | {
7 | ///
8 | /// Decide the behavior of CvNDArray, which combines Mat and NDArray to provide APIs.
9 | ///
10 | public enum OpencvAdapterMode
11 | {
12 | ///
13 | /// Memory copying in the adapter is never allowed. This mode minimizes the cost of memory copying.
14 | /// However, sometimes the memory sharing between NDArray and Mat are not allowed because the mat
15 | /// is not contiguous. In this case under this mode, an exception will be thrown.
16 | ///
17 | StrictNoCopy,
18 | ///
19 | /// Memory copying is allowed when necessary. If the mat is not contiguous, then a new Mat will
20 | /// be cloned for data sharing.
21 | ///
22 | AllowCopy,
23 | ///
24 | /// Momery copying is always done. Under this mode, the mat data will be copied every timr when a
25 | /// CvNDarray is created.
26 | ///
27 | AlwaysCopy
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/Tensorflow.NET.OpencvAdapter/Tensorflow.OpencvAdapter.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netstandard2.0
5 | 10
6 | true
7 | enable
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/Assets/img.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SciSharp/TensorFlow.NET.OpencvAdapter/1754e2d2970d97757f2c28808b182e381258408e/Tensorflow.OpencvAdapter.Unittest/Assets/img.npy
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/Assets/test1.JPEG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SciSharp/TensorFlow.NET.OpencvAdapter/1754e2d2970d97757f2c28808b182e381258408e/Tensorflow.OpencvAdapter.Unittest/Assets/test1.JPEG
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/ImageCodecsTest.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using System;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Text;
6 | using System.Threading.Tasks;
7 | using static Tensorflow.Binding;
8 | using static Tensorflow.OpencvAPIs;
9 |
10 | namespace Tensorflow.OpencvAdapter.Unittest
11 | {
12 | [TestClass]
13 | public class ImageCodecsTest
14 | {
15 | [TestMethod]
16 | public void LoadImageAndSave()
17 | {
18 | string filename = "Assets/test1.JPEG";
19 | var img = cv2.imread(filename);
20 | Console.WriteLine(img.ToString());
21 |
22 | Assert.AreEqual(17, (int)img[0, 0, 0]);
23 | Assert.AreEqual(184, (int)img[0, 0, 1]);
24 | Assert.AreEqual(197, (int)img[0, 0, 2]);
25 | Assert.AreEqual(13, (int)img[0, 1, 0]);
26 | Assert.AreEqual(181, (int)img[0, 1, 1]);
27 | Assert.AreEqual(192, (int)img[0, 1, 2]);
28 |
29 | cv2.imwrite("Assets/saved_test1.jpg", cv2.subtract(img, Scalar.FromDouble(1.0)));
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/MemoryTest.cs:
--------------------------------------------------------------------------------
1 | using OpenCvSharp;
2 | using Tensorflow.NumPy;
3 | using Tensorflow.OpencvAdapter.Extensions;
4 |
5 | namespace Tensorflow.OpencvAdapter.Unittest
6 | {
7 | [TestClass]
8 | public class MemoryTest
9 | {
10 | [TestMethod]
11 | public void BasicUsage()
12 | {
13 | var img = Cv2.ImRead(@"Assets/test1.JPEG");
14 | var n = new CvNDArray(img);
15 | img.Set(0, 0, 111);
16 | Assert.AreEqual(111, (byte)n[0, 0, 0]);
17 | }
18 |
19 | [TestMethod]
20 | public void MemoryRelease()
21 | {
22 | var img = Cv2.ImRead(@"Assets/test1.JPEG");
23 | var n = new CvNDArray(img);
24 | n.Dispose();
25 | GC.Collect();
26 | Assert.ThrowsException(() => { img.CvPtr.ToString(); });
27 | }
28 |
29 | [TestMethod]
30 | public void MatFromNDArray()
31 | {
32 | var array = np.load(@"Assets/img.npy");
33 | Mat m = array.ToMat(clone: false);
34 | m.Set(5, 6, 111);
35 | Assert.AreEqual(111, (byte)array[5, 6, 0]);
36 | Cv2.ImWrite(@"Assets/img.jpg", m);
37 |
38 | m.Release();
39 | GC.Collect();
40 | Assert.AreEqual(111, (byte)array[5, 6, 0]);
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/Tensorflow.OpencvAdapter.Unittest.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | enable
6 | enable
7 |
8 | false
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 | PreserveNewest
26 |
27 |
28 | PreserveNewest
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/Tensorflow.OpencvAdapter.Unittest/Usings.cs:
--------------------------------------------------------------------------------
1 | global using Microsoft.VisualStudio.TestTools.UnitTesting;
--------------------------------------------------------------------------------