├── .github └── workflows │ └── main.yml ├── .gitignore ├── .prettierrc ├── LICENSE ├── README.md ├── package.json ├── src ├── ImageProcessing │ ├── ColorMap.d.ts │ ├── DrawingFunctions.d.ts │ ├── Feature.d.ts │ ├── GeometricImageTransformations.d.ts │ ├── Histograms.d.ts │ ├── ImageFiltering.d.ts │ ├── Misc.d.ts │ ├── Object.d.ts │ ├── Segmentation.d.ts │ ├── Shape.d.ts │ └── Subdiv2D.d.ts ├── ObjectDetection │ └── ObjectDetection.d.ts ├── core │ ├── ColorConversion.d.ts │ ├── Core.d.ts │ ├── CoreArray.d.ts │ ├── HalInterface.d.ts │ ├── Mat.d.ts │ ├── MatVector.d.ts │ ├── Moments.d.ts │ ├── Point.d.ts │ ├── Range.d.ts │ ├── Rect.d.ts │ ├── RotatedRect.d.ts │ ├── Scalar.d.ts │ ├── Size.d.ts │ ├── TermCriteria.d.ts │ └── Utils.d.ts ├── dnn │ └── dnn.d.ts ├── helper.ts ├── opencv.d.ts ├── opencv.js └── video │ ├── BackgroundSubtractor.d.ts │ ├── BackgroundSubtractorMOG2.d.ts │ └── track.d.ts ├── tsconfig.json └── yarn.lock /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Publish to NPM 2 | on: 3 | release: 4 | types: [created] 5 | jobs: 6 | publish: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - name: Checkout 10 | uses: actions/checkout@v2 11 | - name: Setup Node 12 | uses: actions/setup-node@v2 13 | with: 14 | node-version: '14.x' 15 | registry-url: 'https://registry.npmjs.org' 16 | - name: Publish package on NPM 📦 17 | run: npm publish 18 | env: 19 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "printWidth": 100, 3 | "singleQuote": true, 4 | "tabWidth": 4, 5 | "useTabs": false, 6 | "bracketSpacing": true, 7 | "arrowParens": "avoid", 8 | "semi": true, 9 | "trailingComma": "es5" 10 | } 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # opencv-ts 2 | Package for opencvjs in typescript (WIP), adding class and function definiitons progresssively. 3 | 4 | ## compatibility 5 | 6 | Compatible with OpenCV 4.5 version 7 | 8 | ## example use 9 | 10 | ```typescript 11 | import cv, { Mat, Rect } from "opencv-ts"; 12 | 13 | cv.onRuntimeInitialized = () => { 14 | const src = cv.imread("inputCanvas"); 15 | const dst: Mat = new cv.Mat(src.cols, src.rows, cv.CV_8UC4); 16 | 17 | cv.resize(src, dst, new cv.Size(500, 500), 0, 0, cv.INTER_AREA); 18 | 19 | const roiRect: Rect = new cv.Rect(0, 0, 200, 200); 20 | 21 | const roi = dst.roi(roiRect); 22 | 23 | cv.imshow('outputCanvas', roi); 24 | }; 25 | 26 | ``` 27 | 28 | ## Typings Progress 29 | * Modules 30 | * Core 31 | * ~~Basic Structures~~ 32 | * ~~Operations on Arrays~~ 33 | * Image Processing 34 | * ~~Image Filtering~~ 35 | * ~~Geometric Image Transformations~~ 36 | * ~~Drawing Functions~~ 37 | * ~~Color Space Conversions~~ 38 | * ~~ColorMaps in OpenCV~~ 39 | * ~~Planar Subdivision~~ 40 | * ~~Histograms~~ 41 | * ~~Miscellaneous Image Transformations~~ 42 | * ~~Structural Analysis and Shape Descriptors~~ 43 | * ~~Feature Detection~~ 44 | * ~~Object Detection~~ 45 | * ~~Image Segmentation~~ 46 | * ~~Video: Object Tracking~~ 47 | * 2D Features Framework 48 | * Computational Photography 49 | * Inpainting 50 | * Camera Calibration and 3D Reconstruction 51 | * Fisheye camera model 52 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "opencv-ts", 3 | "version": "1.3.6", 4 | "main": "src/opencv.js", 5 | "types": "src/opencv.d.ts", 6 | "description": "This package is a work in progress to translate opecv with a typescript definition", 7 | "license": "MIT", 8 | "dependencies": { 9 | "typescript": "^4.2.3" 10 | }, 11 | "keywords": [ 12 | "typescript", 13 | "opencvjs", 14 | "opencv" 15 | ], 16 | "author": "Grant Davidson", 17 | "repository": { 18 | "type": "git", 19 | "url": "https://github.com/theothergrantdavidson/opencv-ts" 20 | }, 21 | "browser": { 22 | "fs": false, 23 | "crypto": false, 24 | "path": false 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/ImageProcessing/ColorMap.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from "../core/Mat"; 2 | 3 | declare module ColorMap { 4 | enum ColormapTypes { 5 | COLORMAP_AUTUMN = 0, 6 | COLORMAP_BONE = 1, 7 | COLORMAP_JET = 2, 8 | COLORMAP_WINTER = 3, 9 | COLORMAP_RAINBOW = 4, 10 | COLORMAP_OCEAN = 5, 11 | COLORMAP_SUMMER = 6, 12 | COLORMAP_SPRING = 7, 13 | COLORMAP_COOL = 8, 14 | COLORMAP_HSV = 9, 15 | COLORMAP_PINK = 10, 16 | COLORMAP_HOT = 11, 17 | COLORMAP_PARULA = 12, 18 | COLORMAP_MAGMA = 13, 19 | COLORMAP_INFERNO = 14, 20 | COLORMAP_PLASMA = 15, 21 | COLORMAP_VIRIDIS = 16, 22 | COLORMAP_CIVIDIS = 17, 23 | COLORMAP_TWILIGHT = 18, 24 | COLORMAP_TWILIGHT_SHIFTED = 19, 25 | COLORMAP_TURBO = 20, 26 | COLORMAP_DEEPGREEN = 21, 27 | } 28 | 29 | interface _ColormapTypes { 30 | COLORMAP_AUTUMN: ColormapTypes.COLORMAP_AUTUMN; 31 | COLORMAP_BONE: ColormapTypes.COLORMAP_BONE; 32 | COLORMAP_JET: ColormapTypes.COLORMAP_JET; 33 | COLORMAP_WINTER: ColormapTypes.COLORMAP_WINTER; 34 | COLORMAP_RAINBOW: ColormapTypes.COLORMAP_RAINBOW; 35 | COLORMAP_OCEAN: ColormapTypes.COLORMAP_OCEAN; 36 | COLORMAP_SUMMER: ColormapTypes.COLORMAP_SUMMER; 37 | COLORMAP_SPRING: ColormapTypes.COLORMAP_SPRING; 38 | COLORMAP_COOL: ColormapTypes.COLORMAP_COOL; 39 | COLORMAP_HSV: ColormapTypes.COLORMAP_HSV; 40 | COLORMAP_PINK: ColormapTypes.COLORMAP_PINK; 41 | COLORMAP_HOT: ColormapTypes.COLORMAP_HOT; 42 | COLORMAP_PARULA: ColormapTypes.COLORMAP_PARULA; 43 | COLORMAP_MAGMA: ColormapTypes.COLORMAP_MAGMA; 44 | COLORMAP_INFERNO: ColormapTypes.COLORMAP_INFERNO; 45 | COLORMAP_PLASMA: ColormapTypes.COLORMAP_PLASMA; 46 | COLORMAP_VIRIDIS: ColormapTypes.COLORMAP_VIRIDIS; 47 | COLORMAP_CIVIDIS: ColormapTypes.COLORMAP_CIVIDIS; 48 | COLORMAP_TWILIGHT: ColormapTypes.COLORMAP_TWILIGHT; 49 | COLORMAP_TWILIGHT_SHIFTED: ColormapTypes.COLORMAP_TWILIGHT_SHIFTED; 50 | COLORMAP_TURBO: ColormapTypes.COLORMAP_TURBO; 51 | COLORMAP_DEEPGREEN: ColormapTypes.COLORMAP_DEEPGREEN; 52 | } 53 | 54 | interface ColorMap { 55 | applyColorMap(src: Mat, dst: Mat, colormap: ColormapTypes): void; 56 | } 57 | } 58 | export = ColorMap; 59 | -------------------------------------------------------------------------------- /src/ImageProcessing/DrawingFunctions.d.ts: -------------------------------------------------------------------------------- 1 | import { NDArray } from '../core/Core'; 2 | import { Mat } from '../core/Mat'; 3 | import { Point } from '../core/Point'; 4 | import { Rect } from '../core/Rect'; 5 | import { RotatedRect } from '../core/RotatedRect'; 6 | import { Scalar } from '../core/Scalar'; 7 | import { Size } from '../core/Size'; 8 | import { MatVector } from '../core/MatVector'; 9 | 10 | declare module DrawingFunctions { 11 | enum HersheyFonts { 12 | FONT_HERSHEY_SIMPLEX = 0, 13 | FONT_HERSHEY_PLAIN = 1, 14 | FONT_HERSHEY_DUPLEX = 2, 15 | FONT_HERSHEY_COMPLEX = 3, 16 | FONT_HERSHEY_TRIPLEX = 4, 17 | FONT_HERSHEY_COMPLEX_SMALL = 5, 18 | FONT_HERSHEY_SCRIPT_SIMPLEX = 6, 19 | FONT_HERSHEY_SCRIPT_COMPLEX = 7, 20 | FONT_ITALIC = 16, 21 | } 22 | 23 | interface _HersheyFonts { 24 | FONT_HERSHEY_SIMPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 25 | FONT_HERSHEY_PLAIN: HersheyFonts.FONT_HERSHEY_SIMPLEX; 26 | FONT_HERSHEY_DUPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 27 | FONT_HERSHEY_COMPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 28 | FONT_HERSHEY_TRIPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 29 | FONT_HERSHEY_COMPLEX_SMALL: HersheyFonts.FONT_HERSHEY_SIMPLEX; 30 | FONT_HERSHEY_SCRIPT_SIMPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 31 | FONT_HERSHEY_SCRIPT_COMPLEX: HersheyFonts.FONT_HERSHEY_SIMPLEX; 32 | FONT_ITALIC: HersheyFonts.FONT_HERSHEY_SIMPLEX; 33 | } 34 | 35 | enum LineTypes { 36 | FILLED = -1, 37 | LINE_4 = 4, 38 | LINE_8 = 8, 39 | LINE_AA = 16, 40 | } 41 | 42 | interface _LineTypes { 43 | FILLED: LineTypes.FILLED; 44 | LINE_4: LineTypes.LINE_4; 45 | LINE_8: LineTypes.LINE_8; 46 | LINE_AA: LineTypes.LINE_AA; 47 | } 48 | 49 | enum MarkerTypes { 50 | MARKER_CROSS = 0, 51 | MARKER_TILTED_CROSS = 1, 52 | MARKER_STAR = 2, 53 | MARKER_DIAMOND = 3, 54 | MARKER_SQUARE = 4, 55 | MARKER_TRIANGLE_UP = 5, 56 | MARKER_TRIANGLE_DOWN = 6, 57 | } 58 | 59 | interface _MarkerTypes { 60 | MARKER_CROSS: MarkerTypes.MARKER_CROSS; 61 | MARKER_TILTED_CROSS: MarkerTypes.MARKER_TILTED_CROSS; 62 | MARKER_STAR: MarkerTypes.MARKER_STAR; 63 | MARKER_DIAMOND: MarkerTypes.MARKER_DIAMOND; 64 | MARKER_SQUARE: MarkerTypes.MARKER_SQUARE; 65 | MARKER_TRIANGLE_UP: MarkerTypes.MARKER_TRIANGLE_UP; 66 | MARKER_TRIANGLE_DOWN: MarkerTypes.MARKER_TRIANGLE_DOWN; 67 | } 68 | 69 | interface DrawingFunctions { 70 | /** 71 | * Draws a arrow segment pointing from the first point to the second one. 72 | * @param img Image 73 | * @param pt1 The point the arrow starts from. 74 | * @param pt2 The point the arrow points to. 75 | * @param color Line color 76 | * @param thickness Line thickness 77 | * @param line_type Type of the line. See LineTypes 78 | * @param shift Number of fractional bits in the point coordinates. 79 | * @param tipLength The length of the arrow tip in relation to the arrow length 80 | */ 81 | arrowedLine( 82 | img: Mat, 83 | pt1: Point, 84 | pt2: Point, 85 | color: Scalar, 86 | thickness: number, 87 | line_type: LineTypes, 88 | shift: number, 89 | tipLength: number 90 | ): void; 91 | arrowedLine( 92 | img: Mat, 93 | pt1: Point, 94 | pt2: Point, 95 | color: Scalar, 96 | thickness: number, 97 | line_type: LineTypes, 98 | shift: number 99 | ): void; 100 | arrowedLine( 101 | img: Mat, 102 | pt1: Point, 103 | pt2: Point, 104 | color: Scalar, 105 | thickness: number, 106 | line_type: LineTypes 107 | ): void; 108 | arrowedLine(img: Mat, pt1: Point, pt2: Point, color: Scalar, thickness: number): void; 109 | arrowedLine(img: Mat, pt1: Point, pt2: Point, color: Scalar): void; 110 | /** 111 | * Draws a circle. 112 | * @param img Image where the circle is drawn. 113 | * @param center Center of the circle. 114 | * @param radius Radius of the circle. 115 | * @param color Circle color. 116 | * @param thickness Thickness of the circle outline, if positive. Negative values, like FILLED, mean that a filled circle is to be drawn 117 | * @param line_type Type of the circle boundary. See LineTypes 118 | * @param shift Number of fractional bits in the coordinates of the center and in the radius value. 119 | */ 120 | circle( 121 | img: Mat, 122 | center: Point, 123 | radius: number, 124 | color: Scalar, 125 | thickness: number, 126 | line_type: LineTypes, 127 | shift: number 128 | ): void; 129 | circle( 130 | img: Mat, 131 | center: Point, 132 | radius: number, 133 | color: Scalar, 134 | thickness: number, 135 | line_type: LineTypes 136 | ): void; 137 | circle(img: Mat, center: Point, radius: number, color: Scalar, thickness: number): void; 138 | circle(img: Mat, center: Point, radius: number, color: Scalar): void; 139 | /** 140 | * Clips the line against the image rectangle. 141 | * The function cv::clipLine calculates a part of the line segment that is entirely within the specified rectangle. it returns false if the line segment is completely outside the rectangle. Otherwise, it returns true . 142 | * @param imgSize Image size 143 | * @param pt1 First line point. 144 | * @param pt2 Second line point. 145 | */ 146 | clipLine(imgSize: Size, pt1: Point, pt2: Point): boolean; 147 | /** 148 | * Clips the line against the image rectangle. 149 | * The function cv::clipLine calculates a part of the line segment that is entirely within the specified rectangle. it returns false if the line segment is completely outside the rectangle. Otherwise, it returns true . 150 | * @param imgRect Image rectangle 151 | * @param pt1 First line point. 152 | * @param pt2 Second line point. 153 | */ 154 | clipLine(imgRect: Rect, pt1: Point, pt2: Point): boolean; 155 | /** 156 | * Draws contours outlines or filled contours. 157 | * The function draws contour outlines in the image if 𝚝𝚑𝚒𝚌𝚔𝚗𝚎𝚜𝚜≥0 or fills the area bounded by the contours if 𝚝𝚑𝚒𝚌𝚔𝚗𝚎𝚜𝚜<0. 158 | * @param image Destination image. 159 | * @param contours All the input contours. Each contour is stored as a point vector 160 | * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 161 | * @param color Color of the contours. 162 | * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, thickness=FILLED ), the contour interiors are drawn 163 | * @param lineType Line connectivity. See LineTypes 164 | * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see maxLevel ). 165 | * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is hierarchy available. 166 | * @param offset Optional contour shift parameter. Shift all the drawn contours by the specified 𝚘𝚏𝚏𝚜𝚎𝚝=(dx,dy) . 167 | */ 168 | drawContours( 169 | image: Mat, 170 | contours: MatVector, 171 | contourIdx: number, 172 | color: Scalar, 173 | thickness: number, 174 | lineType: LineTypes, 175 | hierarchy: Mat, 176 | maxLevel: number, 177 | offset: Point 178 | ): void; 179 | drawContours( 180 | image: Mat, 181 | contours: MatVector, 182 | contourIdx: number, 183 | color: Scalar, 184 | thickness: number, 185 | lineType: LineTypes, 186 | hierarchy: Mat, 187 | maxLevel: number 188 | ): void; 189 | drawContours( 190 | image: Mat, 191 | contours: MatVector, 192 | contourIdx: number, 193 | color: Scalar, 194 | thickness: number, 195 | lineType: LineTypes, 196 | hierarchy: Mat 197 | ): void; 198 | drawContours( 199 | image: Mat, 200 | contours: MatVector, 201 | contourIdx: number, 202 | color: Scalar, 203 | thickness: number, 204 | lineType: LineTypes 205 | ): void; 206 | drawContours( 207 | image: Mat, 208 | contours: MatVector, 209 | contourIdx: number, 210 | color: Scalar, 211 | thickness: number 212 | ): void; 213 | drawContours( 214 | image: Mat, 215 | contours: MatVector, 216 | contourIdx: number, 217 | color: Scalar 218 | ): void; 219 | /** 220 | * Draws a marker on a predefined position in an image. 221 | * @param img Image 222 | * @param position The point where the crosshair is positioned 223 | * @param color Line color 224 | * @param markerType The specific type of marker you want to use, see MarkerTypes 225 | * @param markerSize Line thickness 226 | * @param thickness Type of the line, See LineTypes 227 | * @param line_type The length of the marker axis [default = 20 pixels] 228 | */ 229 | drawMarker( 230 | img: Mat, 231 | position: Point, 232 | color: Scalar, 233 | markerType: MarkerTypes, 234 | markerSize: number, 235 | thickness: number, 236 | line_type: LineTypes 237 | ): void; 238 | drawMarker( 239 | img: Mat, 240 | position: Point, 241 | color: Scalar, 242 | markerType: MarkerTypes, 243 | markerSize: number, 244 | thickness: number 245 | ): void; 246 | drawMarker( 247 | img: Mat, 248 | position: Point, 249 | color: Scalar, 250 | markerType: MarkerTypes, 251 | markerSize: number 252 | ): void; 253 | drawMarker(img: Mat, position: Point, color: Scalar, markerType: MarkerTypes): void; 254 | drawMarker(img: Mat, position: Point, color: Scalar): void; 255 | /** 256 | * Draws a simple or thick elliptic arc or fills an ellipse sector. 257 | * @param img Image 258 | * @param center Center of the ellipse 259 | * @param axes Half of the size of the ellipse main axes 260 | * @param angle Ellipse rotation angle in degrees 261 | * @param startAngle Starting angle of the elliptic arc in degrees 262 | * @param endAngle Ending angle of the elliptic arc in degrees 263 | * @param color Ellipse color. 264 | * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn 265 | * @param lineType Type of the ellipse boundary. See LineTypes 266 | * @param shift Number of fractional bits in the coordinates of the center and values of axes. 267 | */ 268 | ellipse( 269 | img: Mat, 270 | center: Point, 271 | axes: Size, 272 | angle: number, 273 | startAngle: number, 274 | endAngle: number, 275 | color: Scalar, 276 | thickness: number, 277 | lineType: LineTypes, 278 | shift: number 279 | ): void; 280 | ellipse( 281 | img: Mat, 282 | center: Point, 283 | axes: Size, 284 | angle: number, 285 | startAngle: number, 286 | endAngle: number, 287 | color: Scalar, 288 | thickness: number, 289 | lineType: LineTypes 290 | ): void; 291 | ellipse( 292 | img: Mat, 293 | center: Point, 294 | axes: Size, 295 | angle: number, 296 | startAngle: number, 297 | endAngle: number, 298 | color: Scalar, 299 | thickness: number 300 | ): void; 301 | ellipse( 302 | img: Mat, 303 | center: Point, 304 | axes: Size, 305 | angle: number, 306 | startAngle: number, 307 | endAngle: number, 308 | color: Scalar 309 | ): void; 310 | /** 311 | * Draws a simple or thick elliptic arc or fills an ellipse sector. 312 | * @param img Image 313 | * @param box Alternative ellipse representation via RotatedRect. This means that the function draws an ellipse inscribed in the rotated rectangle 314 | * @param color Ellipse color 315 | * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn. 316 | * @param lineType Type of the ellipse boundary. See LineTypes 317 | */ 318 | ellipse( 319 | img: Mat, 320 | box: RotatedRect, 321 | color: Scalar, 322 | thickness: number, 323 | lineType: LineTypes 324 | ): void; 325 | ellipse(img: Mat, box: RotatedRect, color: Scalar, thickness: number): void; 326 | ellipse(img: Mat, box: RotatedRect, color: Scalar): void; 327 | /** 328 | * Approximates an elliptic arc with a polyline. 329 | * @param center Center of the arc. 330 | * @param axes Half of the size of the ellipse main axes. See ellipse for details. 331 | * @param angle Rotation angle of the ellipse in degrees. See ellipse for details. 332 | * @param arcStart Starting angle of the elliptic arc in degrees 333 | * @param arcEnd Ending angle of the elliptic arc in degrees 334 | * @param delta Angle between the subsequent polyline vertices. It defines the approximation accuracy 335 | * @param pts Output vector of polyline vertices 336 | */ 337 | ellipse2Poly( 338 | center: Point, 339 | angle: number, 340 | axes: Size, 341 | arcStart: number, 342 | arcEnd: number, 343 | delta: number, 344 | pts: MatVector 345 | ): void; 346 | /** 347 | * Fills a convex polygon. 348 | * @param img Image 349 | * @param pts Polygon vertices. 350 | * @param color Polygon color 351 | * @param lineType Type of the polygon boundaries. See LineTypes 352 | * @param shift Number of fractional bits in the vertex coordinates 353 | */ 354 | fillConvexPoly( 355 | img: Mat, 356 | pts: MatVector, 357 | color: Scalar, 358 | lineType: LineTypes, 359 | shift: number 360 | ): void; 361 | fillConvexPoly(img: Mat, pts: MatVector, color: Scalar, lineType: LineTypes): void; 362 | fillConvexPoly(img: Mat, pts: MatVector, color: Scalar): void; 363 | /** 364 | * Fills the area bounded by one or more polygons 365 | * @param img Image 366 | * @param pts Array of polygons where each polygon is represented as an array of points 367 | * @param color Polygon colo 368 | * @param lineType Type of the polygon boundaries. See LineTypes 369 | * @param shift Number of fractional bits in the vertex coordinates 370 | * @param offset Optional offset of all points of the contours 371 | */ 372 | fillPoly( 373 | img: Mat, 374 | pts: MatVector, 375 | color: Scalar, 376 | lineType: LineTypes, 377 | shift: number, 378 | offset: Point 379 | ): void; 380 | fillPoly(img: Mat, pts: MatVector, color: Scalar, lineType: LineTypes, shift: number): void; 381 | fillPoly(img: Mat, pts: MatVector, color: Scalar, lineType: LineTypes): void; 382 | fillPoly(img: Mat, pts: MatVector, color: Scalar): void; 383 | /** 384 | * Calculates the font-specific size to use to achieve a given height in pixels. 385 | * @param fontFace Font to use, see HersheyFonts. 386 | * @param pixelHeight Pixel height to compute the fontScale for 387 | * @param thickness Thickness of lines used to render the text. 388 | * @returns The fontSize to use for cv::putText 389 | */ 390 | getFontScaleFromHeight( 391 | fontFace: HersheyFonts, 392 | pixelHeight: number, 393 | thickness: number 394 | ): number; 395 | getFontScaleFromHeight(fontFace: HersheyFonts, pixelHeight: number): number; 396 | /** 397 | * Calculates the width and height of a text string. 398 | * @param text Input text string 399 | * @param fontFace Font to use, see HersheyFonts. 400 | * @param fontScale Font scale factor that is multiplied by the font-specific base size 401 | * @param thickness Thickness of lines used to render the text 402 | * @param baseLine y-coordinate of the baseline relative to the bottom-most text point. 403 | * @returns The size of a box that contains the specified text. 404 | */ 405 | getTextSize( 406 | text: string, 407 | fontFace: HersheyFonts, 408 | fontScale: number, 409 | thickness: number, 410 | baseLine: number 411 | ): Size; 412 | /** 413 | * Draws a line segment connecting two points. 414 | * @param img Image 415 | * @param pt1 First point of the line segment 416 | * @param pt2 Second point of the line segment 417 | * @param color Line color 418 | * @param thickness Line thickness 419 | * @param lineType Type of the line. See LineTypes. 420 | * @param shift Number of fractional bits in the point coordinates 421 | */ 422 | line( 423 | img: Mat, 424 | pt1: Point, 425 | pt2: Point, 426 | color: Scalar, 427 | thickness: number, 428 | lineType: LineTypes, 429 | shift: number 430 | ): void; 431 | line( 432 | img: Mat, 433 | pt1: Point, 434 | pt2: Point, 435 | color: Scalar, 436 | thickness: number, 437 | lineType: LineTypes 438 | ): void; 439 | line(img: Mat, pt1: Point, pt2: Point, color: Scalar, thickness: number): void; 440 | line(img: Mat, pt1: Point, pt2: Point, color: Scalar): void; 441 | /** 442 | * Draws several polygonal curves 443 | * @param img Image 444 | * @param pts Array of polygonal curves. 445 | * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, the function draws a line from the last vertex of each curve to its first vertex 446 | * @param color Polyline color 447 | * @param thickness Thickness of the polyline edges 448 | * @param lineType Type of the line segments. See LineTypes 449 | * @param shift Number of fractional bits in the vertex coordinates 450 | */ 451 | polylines( 452 | img: Mat, 453 | pts: MatVector, 454 | isClosed: boolean, 455 | color: Scalar, 456 | thickness: number, 457 | lineType: LineTypes, 458 | shift: number 459 | ): void; 460 | polylines( 461 | img: Mat, 462 | pts: MatVector, 463 | isClosed: boolean, 464 | color: Scalar, 465 | thickness: number, 466 | lineType: LineTypes 467 | ): void; 468 | polylines( 469 | img: Mat, 470 | pts: NDArray, 471 | isClosed: boolean, 472 | color: Scalar, 473 | thickness: number 474 | ): void; 475 | polylines(img: Mat, pts: MatVector, isClosed: boolean, color: Scalar): void; 476 | /** 477 | * Draws a text string. 478 | * @param img Image 479 | * @param text Text string to be drawn 480 | * @param org Bottom-left corner of the text string in the image 481 | * @param fontFace Font type, see HersheyFonts. 482 | * @param fontScale Font scale factor that is multiplied by the font-specific base size 483 | * @param color Text color 484 | * @param thickness Thickness of the lines used to draw a text 485 | * @param lineType Line type. See LineTypes 486 | * @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner 487 | */ 488 | putText( 489 | img: Mat, 490 | text: string, 491 | org: Point, 492 | fontFace: HersheyFonts, 493 | fontScale: number, 494 | color: Scalar, 495 | thickness: number, 496 | lineType: LineTypes, 497 | bottomLeftOrigin: boolean 498 | ): void; 499 | putText( 500 | img: Mat, 501 | text: string, 502 | org: Point, 503 | fontFace: HersheyFonts, 504 | fontScale: number, 505 | color: Scalar, 506 | thickness: number, 507 | lineType: LineTypes 508 | ): void; 509 | putText( 510 | img: Mat, 511 | text: string, 512 | org: Point, 513 | fontFace: HersheyFonts, 514 | fontScale: number, 515 | color: Scalar, 516 | thickness: number 517 | ): void; 518 | putText( 519 | img: Mat, 520 | text: string, 521 | org: Point, 522 | fontFace: HersheyFonts, 523 | fontScale: number, 524 | color: Scalar 525 | ): void; 526 | /** 527 | * Draws a simple, thick, or filled up-right rectangle. 528 | * @param img Image 529 | * @param pt1 Vertex of the rectangle 530 | * @param pt2 Vertex of the rectangle opposite to pt1 531 | * @param color Rectangle color or brightness (grayscale image). 532 | * @param thickness Thickness of lines that make up the rectangle. Negative values, like FILLED, mean that the function has to draw a filled rectangle 533 | * @param lineType Type of the line. See LineTypes 534 | * @param shift Number of fractional bits in the point coordinates. 535 | */ 536 | rectangle( 537 | img: Mat, 538 | pt1: Point, 539 | pt2: Point, 540 | color: Scalar, 541 | thickness: number, 542 | lineType: LineTypes, 543 | shift: number 544 | ): void; 545 | rectangle( 546 | img: Mat, 547 | pt1: Point, 548 | pt2: Point, 549 | color: Scalar, 550 | thickness: number, 551 | lineType: LineTypes 552 | ): void; 553 | rectangle(img: Mat, pt1: Point, pt2: Point, color: Scalar | number[], thickness: number): void; 554 | rectangle(img: Mat, pt1: Point, pt2: Point, color: Scalar | number[]): void; 555 | } 556 | } 557 | export = DrawingFunctions; 558 | -------------------------------------------------------------------------------- /src/ImageProcessing/Feature.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | 3 | declare module FeatureDetection { 4 | enum HoughModes { 5 | HOUGH_STANDARD = 0, 6 | HOUGH_PROBABILISTIC = 1, 7 | HOUGH_MULTI_SCALE = 2, 8 | HOUGH_GRADIENT = 3, 9 | HOUGH_GRADIENT_ALT = 4, 10 | } 11 | 12 | interface _HoughModes { 13 | HOUGH_STANDARD: HoughModes.HOUGH_STANDARD; 14 | HOUGH_PROBABILISTIC: HoughModes.HOUGH_PROBABILISTIC; 15 | HOUGH_MULTI_SCALE: HoughModes.HOUGH_MULTI_SCALE; 16 | HOUGH_GRADIENT: HoughModes.HOUGH_GRADIENT; 17 | HOUGH_GRADIENT_ALT: HoughModes.HOUGH_GRADIENT_ALT; 18 | } 19 | 20 | enum LineSegmentDetectorModes { 21 | LSD_REFINE_NONE = 0, 22 | LSD_REFINE_STD = 1, 23 | LSD_REFINE_ADV = 2, 24 | } 25 | 26 | interface _LineSegmentDetectorModes { 27 | LSD_REFINE_NONE: LineSegmentDetectorModes.LSD_REFINE_NONE; 28 | LSD_REFINE_STD: LineSegmentDetectorModes.LSD_REFINE_STD; 29 | LSD_REFINE_ADV: LineSegmentDetectorModes.LSD_REFINE_ADV; 30 | } 31 | 32 | interface FeatureDetection { 33 | /** 34 | * Finds edges in an image using the Canny algorithm 35 | * @param image 8-bit input image. 36 | * @param edges output edge map; single channels 8-bit image, which has the same size as image 37 | * @param threshold1 first threshold for the hysteresis procedure 38 | * @param threshold2 second threshold for the hysteresis procedure 39 | * @param apertureSize aperture size for the Sobel operator 40 | */ 41 | Canny(image: Mat, edges: Mat, threshold1: number, threshold2: number): void; 42 | /** 43 | * Finds edges in an image using the Canny algorithm 44 | * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3). 45 | * @param dy 16-bit y derivative of input image (same type as dx). 46 | * @param edges output edge map; single channels 8-bit image, which has the same size as image 47 | * @param threshold1 first threshold for the hysteresis procedure 48 | * @param threshold2 second threshold for the hysteresis procedure 49 | */ 50 | Canny1(dx: Mat, dy: Mat, edges: Mat, threshold1: number, threshold2: number): void; 51 | /** 52 | * Harris corner detector 53 | * @param src Input single-channel 8-bit or floating-point image. 54 | * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same size as src 55 | * @param blockSize Neighborhood size 56 | * @param ksize Aperture parameter for the Sobel operator 57 | * @param k Harris detector free parameter. See the formula above 58 | */ 59 | cornerHarris(src: Mat, dst: Mat, blockSize: number, ksize: number, k: number): void; 60 | /** 61 | * Calculates the minimal eigenvalue of gradient matrices for corner detection 62 | * @param src Input single-channel 8-bit or floating-point image 63 | * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as src 64 | * @param blockSize Neighborhood size 65 | */ 66 | cornerMinEigenVal(src: Mat, dst: Mat, blockSize: number): void; 67 | /** 68 | * Determines strong corners on an image 69 | * @param image Input 8-bit or floating-point 32-bit, single-channel image 70 | * @param corners Output vector of detected corners 71 | * @param maxCorners Maximum number of corners to return. If there are more corners than are found, the strongest of them is returned. maxCorners <= 0 implies that no limit on the maximum is set and all detected corners are returned 72 | * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue (see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than 15 are rejected. 73 | * @param minDistance Minimum possible Euclidean distance between the returned corners 74 | */ 75 | goodFeaturesToTrack( 76 | image: Mat, 77 | corners: Mat, 78 | maxCorners: number, 79 | qualityLevel: number, 80 | minDistance: number 81 | ): void; 82 | /** 83 | *Determines strong corners on an image 84 | * @param image Input 8-bit or floating-point 32-bit, single-channel image 85 | * @param corners Output vector of detected corners 86 | * @param maxCorners Maximum number of corners to return. If there are more corners than are found, the strongest of them is returned. maxCorners <= 0 implies that no limit on the maximum is set and all detected corners are returned 87 | * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue (see cornerMinEigenVal ) or the Harris function response (see cornerHarris ). The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than 15 are rejected 88 | * @param minDistance Minimum possible Euclidean distance between the returned corners 89 | * @param mask Region of interest. If the image is not empty (it needs to have the type CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected 90 | * @param blockSize Size of an average block for computing a derivative covariation matrix over each pixel neighborhood 91 | * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation 92 | */ 93 | goodFeaturesToTrack1( 94 | image: Mat, 95 | corners: Mat, 96 | maxCorners: number, 97 | qualityLevel: number, 98 | minDistance: number, 99 | mask: Mat, 100 | blockSize: number, 101 | gradientSize?: number | 3 102 | ): void; 103 | /** 104 | * Finds circles in a grayscale image using the Hough transform 105 | * @param image 8-bit, single-channel, grayscale input image. 106 | * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element floating-point vector (x,y,radius) or (x,y,radius,votes) 107 | * @param method Detection method, see HoughModes. The available methods are HOUGH_GRADIENT and HOUGH_GRADIENT_ALT. 108 | * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height. For HOUGH_GRADIENT_ALT the recommended value is dp=1.5, unless some small very circles need to be detected 109 | * @param minDist Minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed 110 | * this two parameter will be helpful to increase the accuracy of the circle detection 111 | * this two parameter are also available in opencv original documentation 112 | * @param param1 it is the higher threshold of the two passed to the Canny edge detector (the lower canny threshold is twice smaller) 113 | * @param param2 it is the accumulator threshold for the circle centers at the detection stage as discussed above. 114 | */ 115 | HoughCircles( 116 | image: Mat, 117 | circles: Mat, 118 | method: HoughModes, 119 | dp: number, 120 | minDist: number, 121 | param1?: number, 122 | param2?: number 123 | ): void; 124 | /** 125 | * Finds lines in a binary image using the standard Hough transform 126 | * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 127 | * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector (ρ,θ) or (ρ,θ,votes) . ρ is the distance from the coordinate origin (0,0) (top-left corner of the image). θ is the line rotation angle in radians ( 0∼vertical line,π/2∼horizontal line ). votes is the value of accumulator 128 | * @param rho Distance resolution of the accumulator in pixels 129 | * @param theta Angle resolution of the accumulator in radians 130 | * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough votes ( >𝚝𝚑𝚛𝚎𝚜𝚑𝚘𝚕𝚍 ). 131 | */ 132 | HoughLines(image: Mat, lines: Mat, rho: number, theta: number, threshold: number): void; 133 | /** 134 | * Finds line segments in a binary image using the probabilistic Hough transform 135 | * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 136 | * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector (ρ,θ) or (ρ,θ,votes) . ρ is the distance from the coordinate origin (0,0) (top-left corner of the image). θ is the line rotation angle in radians ( 0∼vertical line,π/2∼horizontal line ). votes is the value of accumulator 137 | * @param rho Distance resolution of the accumulator in pixels 138 | * @param theta Angle resolution of the accumulator in radians 139 | * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough votes ( >𝚝𝚑𝚛𝚎𝚜𝚑𝚘𝚕𝚍 ). 140 | */ 141 | HoughLinesP(image: Mat, lines: Mat, rho: number, theta: number, threshold: number): void; 142 | } 143 | } 144 | 145 | export = FeatureDetection; 146 | -------------------------------------------------------------------------------- /src/ImageProcessing/GeometricImageTransformations.d.ts: -------------------------------------------------------------------------------- 1 | import { BorderTypes, DecompTypes } from '../core/CoreArray'; 2 | import { DataTypes } from '../core/HalInterface'; 3 | import { Mat } from '../core/Mat'; 4 | import { Point } from '../core/Point'; 5 | import { Scalar, Size } from '../opencv'; 6 | 7 | declare module GeometricImageTransformations { 8 | enum InterpolationFlags { 9 | INTER_NEAREST = 0, 10 | INTER_LINEAR = 1, 11 | INTER_CUBIC = 2, 12 | INTER_AREA = 3, 13 | INTER_LANCZOS4 = 4, 14 | INTER_LINEAR_EXACT = 5, 15 | INTER_NEAREST_EXACT = 6, 16 | INTER_MAX = 7, 17 | WARP_FILL_OUTLIERS = 8, 18 | WARP_INVERSE_MAP = 16, 19 | } 20 | 21 | interface _InterpolationFlags { 22 | INTER_NEAREST: InterpolationFlags.INTER_NEAREST; 23 | INTER_LINEAR: InterpolationFlags.INTER_LINEAR; 24 | INTER_CUBIC: InterpolationFlags.INTER_CUBIC; 25 | INTER_AREA: InterpolationFlags.INTER_AREA; 26 | INTER_LANCZOS4: InterpolationFlags.INTER_LANCZOS4; 27 | INTER_LINEAR_EXACT: InterpolationFlags.INTER_LINEAR_EXACT; 28 | INTER_NEAREST_EXACT: InterpolationFlags.INTER_NEAREST_EXACT; 29 | INTER_MAX: InterpolationFlags.INTER_MAX; 30 | WARP_FILL_OUTLIERS: InterpolationFlags.WARP_FILL_OUTLIERS; 31 | WARP_INVERSE_MAP: InterpolationFlags.WARP_INVERSE_MAP; 32 | } 33 | 34 | enum InterpolationMasks { 35 | INTER_BITS = 5, 36 | INTER_BITS2 = 10, 37 | INTER_TAB_SIZE = 32, 38 | INTER_TAB_SIZE2 = 1024, 39 | } 40 | 41 | interface _InterpolationMasks { 42 | INTER_BITS: InterpolationMasks.INTER_BITS; 43 | INTER_BITS2: InterpolationMasks.INTER_BITS2; 44 | INTER_TAB_SIZE: InterpolationMasks.INTER_TAB_SIZE; 45 | INTER_TAB_SIZE2: InterpolationMasks.INTER_TAB_SIZE2; 46 | } 47 | 48 | enum WarpPolarMode { 49 | WARP_POLAR_LINEAR = 0, 50 | WARP_POLAR_LOG = 256, 51 | } 52 | 53 | interface _WarpPolarMode { 54 | WARP_POLAR_LINEAR: WarpPolarMode.WARP_POLAR_LINEAR; 55 | WARP_POLAR_LOG: WarpPolarMode.WARP_POLAR_LOG; 56 | } 57 | 58 | interface GeometricImageTransformations { 59 | /** 60 | * Converts image transformation maps from one representation to another 61 | * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 . 62 | * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix), respectively. 63 | * @param dstmap1 The first output map that has the type dstmap1type and the same size as src . 64 | * @param dstmap2 The second output map 65 | * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or CV_32FC2 . 66 | * @param nninterpolation Flag indicating whether the fixed-point maps are used for the nearest-neighbor or for a more complex interpolation 67 | */ 68 | convertMaps( 69 | map1: Mat, 70 | map2: Mat, 71 | dstmap1: Mat, 72 | dstmap2: Mat, 73 | dstmap1type: number | DataTypes, 74 | nninterpolation: boolean 75 | ): void; 76 | /** 77 | * Calculates an affine transform from three pairs of the corresponding points 78 | * @param src Coordinates of triangle vertices in the source image 79 | * @param dst Coordinates of the corresponding triangle vertices in the destination image 80 | */ 81 | getAffineTransform(src: Point, dst: Point): Mat; 82 | /** 83 | * @todo update documentation when added to source 84 | * @param src 85 | * @param dst 86 | */ 87 | getAffineTransform(src: Mat, dst: Mat): Mat; 88 | /** 89 | * Calculates a perspective transform from four pairs of the corresponding points 90 | * @param src Coordinates of quadrangle vertices in the source image 91 | * @param dst Coordinates of the corresponding quadrangle vertices in the destination image 92 | * @param solveMethod method passed to cv::solve (DecompTypes) 93 | */ 94 | getPerspectiveTransform(src: Mat, dst: Mat, solveMethod?: number | DecompTypes): Mat; 95 | /** 96 | * Retrieves a pixel rectangle from an image with sub-pixel accuracy. 97 | * @param image Source image 98 | * @param patchSize Size of the extracted patch 99 | * @param center Floating point coordinates of the center of the extracted rectangle within the source image. The center must be inside the image 100 | * @param patch Extracted patch that has the size patchSize and the same number of channels as src 101 | * @param patchType Depth of the extracted pixels. By default, they have the same depth as src 102 | */ 103 | getRectSubPix( 104 | image: Mat, 105 | patchSize: Size, 106 | center: Point, 107 | patch: Mat, 108 | patchType?: number | DataTypes 109 | ): void; 110 | /** 111 | * Calculates an affine matrix of 2D rotation 112 | * @param center Center of the rotation in the source image 113 | * @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the coordinate origin is assumed to be the top-left corner). 114 | * @param scale Isotropic scale factor. 115 | */ 116 | getRotationMatrix2D(center: Point, angle: number, scale: number): Mat; 117 | /** 118 | * Inverts an affine transformation 119 | * @param M Original affine transformation 120 | * @param iM Output reverse affine transformation 121 | */ 122 | invertAffineTransform(M: Mat, iM: Mat): void; 123 | /** 124 | * Remaps an image to polar coordinates space 125 | * @param src 126 | * @param dst 127 | * @param center 128 | * @param maxRadius 129 | * @param flags 130 | */ 131 | linearPolar( 132 | src: Mat, 133 | dst: Mat, 134 | center: Point, 135 | maxRadius: number, 136 | flags: WarpPolarMode 137 | ): void; 138 | /** 139 | * Remaps an image to semilog-polar coordinates space 140 | * @param src 141 | * @param dst 142 | * @param center 143 | * @param M 144 | * @param flags 145 | */ 146 | logPolar(src: Mat, dst: Mat, center: Point, M: number, flags: WarpPolarMode): void; 147 | /** 148 | * Applies a generic geometrical transformation to an image 149 | * @param src Source image 150 | * @param dst Destination image. It has the same size as map1 and the same type as src . 151 | * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 , CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point representation to fixed-point for speed 152 | * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively 153 | * @param interpolation Interpolation method (see InterpolationFlags). The methods INTER_AREA and INTER_LINEAR_EXACT are not supported by this function 154 | * @param borderMode Pixel extrapolation method (see BorderTypes). When borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that corresponds to the "outliers" in the source image are not modified by the function 155 | * @param borderValue Value used in case of a constant border. By default, it is 0. 156 | */ 157 | remap( 158 | src: Mat, 159 | dst: Mat, 160 | map1: Mat, 161 | map2: Mat, 162 | interpolation: number | InterpolationFlags, 163 | borderMode: number | BorderTypes, 164 | borderValue: Scalar 165 | ): void; 166 | remap( 167 | src: Mat, 168 | dst: Mat, 169 | map1: Mat, 170 | map2: Mat, 171 | interpolation: number | InterpolationFlags, 172 | borderMode: number | BorderTypes 173 | ): void; 174 | remap( 175 | src: Mat, 176 | dst: Mat, 177 | map1: Mat, 178 | map2: Mat, 179 | interpolation: number | InterpolationFlags 180 | ): void; 181 | /** 182 | * Resizes an image 183 | * @param src Source image 184 | * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from src.size(), fx, and fy; the type of dst is the same as of src. 185 | * @param dsize output image size 186 | * @param fx scale factor along the horizontal axis 187 | * @param fy scale factor along the vertical axis 188 | * @param interpolation interpolation method, see InterpolationFlags 189 | * @example 190 | * let src = cv.imread('canvasInput'); 191 | * let dst = new cv.Mat(); 192 | * let dsize = new cv.Size(300, 300); 193 | * // You can try more different parameters 194 | * cv.resize(src, dst, dsize, 0, 0, cv.INTER_AREA); 195 | * cv.imshow('canvasOutput', dst); 196 | * src.delete(); dst.delete(); 197 | */ 198 | resize( 199 | src: Mat, 200 | dst: Mat, 201 | dsize: Size, 202 | fx: number, 203 | fy: number, 204 | interpolation: number | InterpolationFlags 205 | ): void; 206 | resize(src: Mat, dst: Mat, dsize: Size, fx: number, fy: number): void; 207 | resize(src: Mat, dst: Mat, dsize: Size): void; 208 | /** 209 | * Applies an affine transformation to an image. 210 | * @param src input image 211 | * @param dst output image that has the size dsize and the same type as src 212 | * @param M 2×3 transformation matrix. 213 | * @param dsize size of the output image 214 | * @param flags combination of interpolation methods (see InterpolationFlags) and the optional flag WARP_INVERSE_MAP that means that M is the inverse transformation 215 | * @param borderMode pixel extrapolation method (see BorderTypes); when borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to the "outliers" in the source image are not modified by the function 216 | * @param borderValue value used in case of a constant border; by default, it is 0. 217 | */ 218 | warpAffine( 219 | src: Mat, 220 | dst: Mat, 221 | M: Mat, 222 | dsize: Size, 223 | flags: InterpolationFlags, 224 | borderMode: BorderTypes, 225 | borderValue: Scalar 226 | ): void; 227 | warpAffine( 228 | src: Mat, 229 | dst: Mat, 230 | M: Mat, 231 | dsize: Size, 232 | flags: InterpolationFlags, 233 | borderMode: BorderTypes 234 | ): void; 235 | warpAffine(src: Mat, dst: Mat, M: Mat, dsize: Size, flags: InterpolationFlags): void; 236 | warpAffine(src: Mat, dst: Mat, M: Mat, dsize: Size): void; 237 | /** 238 | * Applies a perspective transformation to an image 239 | * @param src input image 240 | * @param dst output image that has the size dsize and the same type as src 241 | * @param M output image that has the size dsize and the same type as src 242 | * @param dsize size of the output image 243 | * @param flags combination of interpolation methods (INTER_LINEAR or INTER_NEAREST) and the optional flag WARP_INVERSE_MAP, that sets M as the inverse transformation ( 𝚍𝚜𝚝→𝚜𝚛𝚌 ). 244 | * @param borderMode pixel extrapolation method (BORDER_CONSTANT or BORDER_REPLICATE). 245 | * @param borderValue value used in case of a constant border; by default, it equals 0. 246 | */ 247 | warpPerspective( 248 | src: Mat, 249 | dst: Mat, 250 | M: Mat, 251 | dsize: Size, 252 | flags: InterpolationFlags, 253 | borderMode: BorderTypes, 254 | borderValue: Scalar 255 | ): void; 256 | warpPerspective( 257 | src: Mat, 258 | dst: Mat, 259 | M: Mat, 260 | dsize: Size, 261 | flags: InterpolationFlags, 262 | borderMode: BorderTypes 263 | ): void; 264 | warpPerspective(src: Mat, dst: Mat, M: Mat, dsize: Size, flags: InterpolationFlags): void; 265 | warpPerspective(src: Mat, dst: Mat, M: Mat, dsize: Size): void; 266 | /** 267 | * 268 | * @param src Source image 269 | * @param dst Destination image. It will have same type as src 270 | * @param dsize The destination image size (see description for valid options) 271 | * @param center The transformation center 272 | * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too 273 | * @param flags A combination of interpolation methods, InterpolationFlags + WarpPolarMode. 274 | * - Add cv.WARP_POLAR_LINEAR to select linear polar mapping (default) 275 | * - Add cv.WARP_POLAR_LOG to select semilog polar mapping 276 | * - Add cv.WARP_INVERSE_MAP for reverse mapping. 277 | */ 278 | warpPolar( 279 | src: Mat, 280 | dst: Mat, 281 | dsize: Size, 282 | center: Point, 283 | maxRadius: number, 284 | flags: number 285 | ): void; 286 | } 287 | } 288 | export = GeometricImageTransformations; 289 | -------------------------------------------------------------------------------- /src/ImageProcessing/Histograms.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | import { MatVector } from '../core/MatVector'; 3 | import { DistanceTypes } from './Misc'; 4 | 5 | declare module Histograms { 6 | enum HistCompMethods { 7 | HISTCMP_CORREL = 0, 8 | HISTCMP_CHISQR = 1, 9 | HISTCMP_INTERSECT = 2, 10 | HISTCMP_BHATTACHARYYA = 3, 11 | HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA, 12 | HISTCMP_CHISQR_ALT = 4, 13 | HISTCMP_KL_DIV = 5, 14 | } 15 | 16 | interface _HistCompMethods { 17 | HISTCMP_CORREL: HistCompMethods.HISTCMP_CORREL; 18 | HISTCMP_CHISQR: HistCompMethods.HISTCMP_CHISQR; 19 | HISTCMP_INTERSECT: HistCompMethods.HISTCMP_INTERSECT; 20 | HISTCMP_BHATTACHARYYA: HistCompMethods.HISTCMP_BHATTACHARYYA; 21 | HISTCMP_HELLINGER: HistCompMethods.HISTCMP_HELLINGER; 22 | HISTCMP_CHISQR_ALT: HistCompMethods.HISTCMP_CHISQR_ALT; 23 | HISTCMP_KL_DIV: HistCompMethods.HISTCMP_KL_DIV; 24 | } 25 | 26 | interface Histograms { 27 | /** 28 | * 29 | * @param images source arrays. They all should have the same depth, cv.CV_8U, cv.CV_16U or cv.CV_32F , and the same size. Each of them can have an arbitrary number of channels 30 | * @param channels the list of channels used to compute the back projection. The number of channels must match the histogram dimensionality 31 | * @param hist input histogram that can be dense or sparse 32 | * @param dst destination back projection array that is a single-channel array of the same size and depth as images[0]. 33 | * @param ranges array of arrays of the histogram bin boundaries in each dimension 34 | * @param scale optional scale factor for the output back projection. 35 | * @example 36 | * let src = cv.imread('srcCanvasInput'); 37 | * let dst = cv.imread('dstCanvasInput'); 38 | * cv.cvtColor(src, src, cv.COLOR_RGB2HSV, 0); 39 | * cv.cvtColor(dst, dst, cv.COLOR_RGB2HSV, 0); 40 | * let srcVec = new cv.MatVector(); 41 | * let dstVec = new cv.MatVector(); 42 | * srcVec.push_back(src); dstVec.push_back(dst); 43 | * let backproj = new cv.Mat(); 44 | * let none = new cv.Mat(); 45 | * let mask = new cv.Mat(); 46 | * let hist = new cv.Mat(); 47 | * let channels = [0]; 48 | * let histSize = [50]; 49 | * let ranges = [0, 180]; 50 | * let accumulate = false; 51 | * cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate); 52 | * cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX, -1, none); 53 | * cv.calcBackProject(dstVec, channels, hist, backproj, ranges, 1); 54 | * cv.imshow('canvasOutput', backproj); 55 | * src.delete(); dst.delete(); srcVec.delete(); dstVec.delete(); 56 | * backproj.delete(); mask.delete(); hist.delete(); none.delete(); 57 | */ 58 | calcBackProject( 59 | images: Mat | MatVector, 60 | channels: number | number[], 61 | hist: Mat, 62 | dst: Mat, 63 | ranges: MatVector | number[], 64 | scale?: number 65 | ): void; 66 | /** 67 | * Calculates a histogram of a set of arrays 68 | * @param image Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the same size. Each of them can have an arbitrary number of channels 69 | * @param channels List of the dims channels used to compute the histogram. The first array channels are numerated from 0 to images[0].channels()-1 , the second array channels are counted from images[0].channels() to images[0].channels() + images[1].channels()-1, and so on. 70 | * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram 71 | * @param hist Output histogram, which is a dense or sparse dims -dimensional array 72 | * @param histSize Array of histogram sizes in each dimension 73 | * @param ranges Array of the dims arrays of the histogram bin boundaries in each dimension 74 | * @param accumulate Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time 75 | * @example 76 | * let src = cv.imread('canvasInput'); 77 | * cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0); 78 | * let srcVec = new cv.MatVector(); 79 | * srcVec.push_back(src); 80 | * let accumulate = false; 81 | * let channels = [0]; 82 | * let histSize = [256]; 83 | * let ranges = [0, 255]; 84 | * let hist = new cv.Mat(); 85 | * let mask = new cv.Mat(); 86 | * let color = new cv.Scalar(255, 255, 255); 87 | * let scale = 2; 88 | * // You can try more different parameters 89 | * cv.calcHist(srcVec, channels, mask, hist, histSize, ranges, accumulate); 90 | * let result = cv.minMaxLoc(hist, mask); 91 | * let max = result.maxVal; 92 | * let dst = new cv.Mat.zeros(src.rows, histSize[0] * scale, cv.CV_8UC3); 93 | * // draw histogram 94 | * for (let i = 0; i < histSize[0]; i++) { 95 | * let binVal = hist.data32F[i] * src.rows / max; 96 | * let point1 = new cv.Point(i * scale, src.rows - 1); 97 | * let point2 = new cv.Point((i + 1) * scale - 1, src.rows - binVal); 98 | * cv.rectangle(dst, point1, point2, color, cv.FILLED); 99 | * } 100 | * cv.imshow('canvasOutput', dst); 101 | * src.delete(); dst.delete(); srcVec.delete(); mask.delete(); hist.delete(); 102 | */ 103 | calcHist( 104 | image: Mat | MatVector, 105 | channels: number | number[], 106 | mask: Mat, 107 | hist: Mat, 108 | histSize: number | number[], 109 | ranges: number | number[], 110 | accumulate?: boolean 111 | ): void; 112 | /** 113 | * 114 | * @param H1 First compared histogram 115 | * @param H2 Second compared histogram of the same size as H1 116 | * @param method Comparison method, see HistCompMethods 117 | * @returns returns d(H1,H2) 118 | */ 119 | compareHist(H1: Mat, H2: Mat, method: HistCompMethods): number; 120 | /** 121 | * Computes the "minimal work" distance between two weighted point configurations 122 | * @param signature1 First signature, a 𝚜𝚒𝚣𝚎𝟷×𝚍𝚒𝚖𝚜+1 floating-point matrix. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a single column (weights only) if the user-defined cost matrix is used. The weights must be non-negative and have at least one non-zero value 123 | * @param signature2 Second signature of the same format as signature1 , though the number of rows may be different. The total weights may be different. In this case an extra "dummy" point is added to either signature1 or signature2. The weights must be non-negative and have at least one non-zero value 124 | * @param distType Used metric. See DistanceTypes. 125 | * @param cost User-defined 𝚜𝚒𝚣𝚎𝟷×𝚜𝚒𝚣𝚎𝟸 cost matrix. Also, if a cost matrix is used, lower boundary lowerBound cannot be calculated because it needs a metric function 126 | * @param lowerBound Optional input/output parameter: lower boundary of a distance between the two signatures that is a distance between mass centers. The lower boundary may not be calculated if the user-defined cost matrix is used, the total weights of point configurations are not equal, or if the signatures consist of weights only (the signature matrices have a single column). You must** initialize *lowerBound . If the calculated distance between mass centers is greater or equal to *lowerBound (it means that the signatures are far enough), the function does not calculate EMD. In any case *lowerBound is set to the calculated distance between mass centers on return. Thus, if you want to calculate both distance between mass centers and EMD, *lowerBound should be set to 0. 127 | * @param flow Resultant 𝚜𝚒𝚣𝚎𝟷×𝚜𝚒𝚣𝚎𝟸 flow matrix: 𝚏𝚕𝚘𝚠i,j is a flow from i -th point of signature1 to j -th point of signature2 128 | */ 129 | EMD( 130 | signature1: Mat, 131 | signature2: Mat, 132 | distType: DistanceTypes, 133 | cost: MatVector | Mat, 134 | lowerBound: number, 135 | flow?: Mat 136 | ): number; 137 | /** 138 | * Equalizes the histogram of a grayscale image 139 | * @param src Source 8-bit single channel image 140 | * @param dst Destination image of the same size and type as src 141 | * let src = cv.imread('canvasInput'); 142 | * @example 143 | * let dst = new cv.Mat(); 144 | * cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0); 145 | * cv.equalizeHist(src, dst); 146 | * cv.imshow('canvasOutput', src); 147 | * cv.imshow('canvasOutput', dst); 148 | * src.delete(); dst.delete(); 149 | */ 150 | equalizeHist(src: Mat, dst: Mat): void; 151 | } 152 | } 153 | export = Histograms; 154 | -------------------------------------------------------------------------------- /src/ImageProcessing/ImageFiltering.d.ts: -------------------------------------------------------------------------------- 1 | import { NDArray } from '../core/Core'; 2 | import { BorderTypes } from '../core/CoreArray'; 3 | import { DataTypes } from '../core/HalInterface'; 4 | import { Mat } from '../core/Mat'; 5 | import { Scalar } from '../core/Scalar'; 6 | import { TermCriteria } from '../core/TermCriteria'; 7 | import { Point, Size } from '../opencv'; 8 | import { MatVector } from '../core/MatVector' 9 | 10 | declare module ImageFiltering { 11 | enum MorphShapes { 12 | MORPH_RECT = 'MORPH_RECT', 13 | MORPH_CROSS = 'MORPH_CROSS', 14 | MORPH_ELLIPSE = 'MORPH_ELLIPSE', 15 | } 16 | 17 | interface _MorphShapes { 18 | MORPH_RECT: MorphShapes.MORPH_RECT; 19 | MORPH_CROSS: MorphShapes.MORPH_CROSS; 20 | MORPH_ELLIPSE: MorphShapes.MORPH_ELLIPSE; 21 | } 22 | 23 | enum MorphTypes { 24 | MORPH_ERODE = 'MORPH_ERODE', 25 | MORPH_DILATE = 'MORPH_DILATE', 26 | MORPH_OPEN = 'MORPH_OPEN', 27 | MORPH_CLOSE = 'MORPH_CLOSE', 28 | MORPH_GRADIENT = 'MORPH_GRADIENT', 29 | MORPH_TOPHAT = 'MORPH_TOPHAT', 30 | MORPH_BLACKHAT = 'MORPH_BLACKHAT', 31 | MORPH_HITMISS = 'MORPH_HITMISS', 32 | } 33 | 34 | interface _MorphTypes { 35 | MORPH_ERODE: MorphTypes.MORPH_ERODE; 36 | MORPH_DILATE: MorphTypes.MORPH_DILATE; 37 | MORPH_CLOSE: MorphTypes.MORPH_CLOSE; 38 | MORPH_GRADIENT: MorphTypes.MORPH_GRADIENT; 39 | MORPH_TOPHAT: MorphTypes.MORPH_TOPHAT; 40 | MORPH_BLACKHAT: MorphTypes.MORPH_BLACKHAT; 41 | MORPH_HITMISS: MorphTypes.MORPH_HITMISS; 42 | } 43 | 44 | enum SpecialFilter { 45 | FILTER_SCHARR = 'FILTER_SCHARR', 46 | } 47 | 48 | interface _SpecialFilter { 49 | FILTER_SCHARR: SpecialFilter.FILTER_SCHARR; 50 | } 51 | 52 | interface ImageFiltering { 53 | /** 54 | * Applies the bilateral filter to an image. 55 | * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. 56 | * @param dst Destination image of the same size and type as src . 57 | * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace. 58 | * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting in larger areas of semi-equal color. 59 | * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is proportional to sigmaSpace. 60 | * @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes 61 | */ 62 | bilateralFilter( 63 | src: Mat, 64 | dst: Mat, 65 | d: number, 66 | sigmaColor: number, 67 | sigmaSpace: number, 68 | borderType: BorderTypes 69 | ): void; 70 | /** 71 | * 72 | * @param src input image; it can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 73 | * @param dst output image of the same size and type as src. 74 | * @param ksize blurring kernel size. 75 | * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel center. 76 | * @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes. BORDER_WRAP is not supported. 77 | */ 78 | blur(src: Mat, dst: Mat, ksize: Size, anchor: Point, borderType: BorderTypes): void; 79 | /** 80 | * Blurs an image using the box filter. 81 | * @param src input image. 82 | * @param dst output image of the same size and type as src. 83 | * @param ddepth the output image depth (-1 to use src.depth()). 84 | * @param ksize blurring kernel size. 85 | * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel center. 86 | * @param normalize flag, specifying whether the kernel is normalized by its area or not. 87 | * @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes. BORDER_WRAP is not supported. 88 | */ 89 | boxFilter( 90 | src: Mat, 91 | dst: Mat, 92 | ddepth: number | DataTypes, 93 | ksize: Size, 94 | anchor: Point, 95 | normalize: boolean, 96 | borderType: BorderTypes 97 | ): void; 98 | /** 99 | * Constructs the Gaussian pyramid for an image. 100 | * @param src Source image. Check pyrDown for the list of supported types. 101 | * @param dst Destination vector of maxlevel+1 images of the same type as src. dst[0] will be the same as src. dst[1] is the next pyramid layer, a smoothed and down-sized src, and so on. 102 | * @param maxlevel 0-based index of the last (the smallest) pyramid layer. It must be non-negative. 103 | * @param borderType Pixel extrapolation method, see BorderTypes (BORDER_CONSTANT isn't supported) 104 | */ 105 | buildPyramid(src: Mat, dst: Mat, maxlevel: number, borderType: BorderTypes): void; 106 | /** 107 | * Dilates an image by using a specific structuring element. 108 | * @param src input image; the number of channels can be arbitrary, but the depth should be one of CV_8U, CV_16U, CV_16S, CV_32F or CV_64F 109 | * @param dst output image of the same size and type as src. 110 | * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular structuring element is used. Kernel can be created using getStructuringElement 111 | * @param anchor position of the anchor within the element; default value (-1, -1) means that the anchor is at the element center. 112 | * @param iterations number of times dilation is applied. 113 | * @param borderType pixel extrapolation method, see BorderTypes. BORDER_WRAP is not suported. 114 | * @param borderValue border value in case of a constant border 115 | */ 116 | dilate( 117 | src: Mat, 118 | dst: Mat, 119 | kernel: Mat, 120 | anchor: Point, 121 | iterations: number, 122 | borderType: BorderTypes, 123 | borderValue: Scalar 124 | ): void; 125 | /** 126 | * Erodes an image by using a specific structuring element. 127 | * @param src input image; the number of channels can be arbitrary, but the depth should be one of CV_8U, CV_16U, CV_16S, CV_32F or CV_64F 128 | * @param dst output image of the same size and type as src. 129 | * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular structuring element is used. Kernel can be created using getStructuringElement 130 | * @param anchor position of the anchor within the element; default value (-1, -1) means that the anchor is at the element center. 131 | * @param iterations number of times dilation is applied. 132 | * @param borderType pixel extrapolation method, see BorderTypes. BORDER_WRAP is not suported. 133 | * @param borderValue border value in case of a constant border 134 | */ 135 | erode( 136 | src: Mat, 137 | dst: Mat, 138 | kernel: Mat, 139 | anchor: Point, 140 | iterations: number, 141 | borderType: BorderTypes, 142 | borderValue: Scalar 143 | ): void; 144 | /** 145 | * Convolves an image with the kernel. 146 | * @param src input image. 147 | * @param dst output image of the same size and the same number of channels as src. 148 | * @param ddepth desired depth of the destination image 149 | * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point matrix; if you want to apply different kernels to different channels, split the image into separate color planes using split and process them individually. 150 | * @param anchor anchor of the kernel that indicates the relative position of a filtered point within the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor is at the kernel center. 151 | * @param delat optional value added to the filtered pixels before storing them in dst. 152 | * @param borderType pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 153 | */ 154 | filter2D( 155 | src: Mat, 156 | dst: Mat, 157 | ddepth: number | DataTypes, 158 | kernel: MatVector | Mat, 159 | anchor: Point, 160 | delat: number, 161 | borderType: BorderTypes 162 | ): void; 163 | /** 164 | * Blurs an image using a Gaussian filter. 165 | * @param src input image; the image can have any number of channels, which are processed independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 166 | * @param dst output image of the same size and type as src. 167 | * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be positive and odd. Or, they can be zero's and then they are computed from sigma. 168 | * @param sigmaX Gaussian kernel standard deviation in X direction. 169 | * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, respectively (see getGaussianKernel for details); to fully control the result regardless of possible future modifications of all this semantics, it is recommended to specify all of ksize, sigmaX, and sigmaY. 170 | * @param borderType pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 171 | */ 172 | GaussianBlur( 173 | src: Mat, 174 | dst: Mat, 175 | ksize: number | Size, 176 | sigmaX: number, 177 | sigmaY: number, 178 | borderType: BorderTypes 179 | ): void; 180 | /** 181 | * Returns filter coefficients for computing spatial image derivatives. 182 | * @param kx Output matrix of row filter coefficients. It has the type ktype . 183 | * @param ky Output matrix of column filter coefficients. It has the type ktype . 184 | * @param dx Derivative order in respect of x. 185 | * @param dy Derivative order in respect of y. 186 | * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7. 187 | * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not. Theoretically, the coefficients should have the denominator =2ksize∗2−dx−dy−2. If you are going to filter floating-point images, you are likely to use the normalized kernels. But if you compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve all the fractional bits, you may want to set normalize=false . 188 | * @param ktype Type of filter coefficients. It can be CV_32f or CV_64F . 189 | */ 190 | getDerivKernels( 191 | kx: MatVector | Mat, 192 | ky: MatVector | Mat, 193 | dx: number, 194 | dy: number, 195 | ksize: 1 | 3 | 5 | 7 | SpecialFilter.FILTER_SCHARR, 196 | normalize: boolean, 197 | ktype: DataTypes.CV_32F | DataTypes.CV_64F 198 | ): void; 199 | /** 200 | * 201 | * @param ksize Size of the filter returned. 202 | * @param sigma Standard deviation of the gaussian envelope. 203 | * @param theta Orientation of the normal to the parallel stripes of a Gabor function. 204 | * @param lambd Wavelength of the sinusoidal factor. 205 | * @param gamma Spatial aspect ratio. 206 | * @param psi Phase offset. 207 | * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F . 208 | */ 209 | getGaborKernel( 210 | ksize: Size, 211 | sigma: number, 212 | theta: number, 213 | lambd: number, 214 | gamma: number, 215 | psi: number, 216 | ktype: DataTypes.CV_32F | DataTypes.CV_64F 217 | ): Mat; 218 | /** 219 | * Returns Gaussian filter coefficients. 220 | * @param ksize Aperture size. It should be odd ( 𝚔𝚜𝚒𝚣𝚎mod2=1 ) and positive. 221 | * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8. 222 | * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F . 223 | */ 224 | getGaussianKernel( 225 | ksize: number, 226 | sigma: number, 227 | ktype: DataTypes.CV_32F | DataTypes.CV_64F 228 | ): Mat; 229 | /** 230 | * Returns a structuring element of the specified size and shape for morphological operations. 231 | * @param shape Element shape that could be one of MorphShapes 232 | * @param ksize Size of the structuring element. 233 | * @param anchor Anchor position within the element. The default value (−1,−1) means that the anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor position. In other cases the anchor just regulates how much the result of the morphological operation is shifted. 234 | */ 235 | getStructuringElement(shape: MorphShapes, ksize: Size, anchor: Point): Mat; 236 | /** 237 | * Calculates the Laplacian of an image. 238 | * @param src Source image. 239 | * @param dst Destination image of the same size and the same number of channels as src . 240 | * @param ddepth Desired depth of the destination image. 241 | * @param ksize Aperture size used to compute the second-derivative filters. See getDerivKernels for details. The size must be positive and odd. 242 | * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is applied. See getDerivKernels for details. 243 | * @param delta Optional delta value that is added to the results prior to storing them in dst 244 | * @param borderType Pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 245 | */ 246 | Laplacian( 247 | src: Mat, 248 | dst: Mat, 249 | ddepth: number | DataTypes, 250 | ksize: number, 251 | scale: number, 252 | delta: number, 253 | borderType: BorderTypes 254 | ): void; 255 | /** 256 | * Blurs an image using the median filter. 257 | * The function smoothes an image using the median filter with the 𝚔𝚜𝚒𝚣𝚎×𝚔𝚜𝚒𝚣𝚎 aperture. Each channel of a multi-channel image is processed independently. In-place operation is supported. 258 | * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U. 259 | * @param dst destination array of the same size and type as src. 260 | * @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ... 261 | */ 262 | medianBlur(src: Mat, dst: Mat, ksize: number): void; 263 | /** 264 | * returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. 265 | */ 266 | morphologyDefaultBorderValue(): Scalar; 267 | /** 268 | * Performs advanced morphological transformations. 269 | * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as basic operations. 270 | * Any of the operations can be done in-place. In case of multi-channel images, each channel is processed independently. 271 | * @param src Source image. The number of channels can be arbitrary. The depth should be one of CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 272 | * @param dst Destination image of the same size and type as source image. 273 | * @param op Type of a morphological operation, see MorphTypes 274 | * @param kernel Structuring element. It can be created using getStructuringElement. 275 | * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the kernel center. 276 | * @param iterations Number of times erosion and dilation are applied. 277 | * @param borderType Pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 278 | * @param borderValue Border value in case of a constant border. The default value has a special meaning. 279 | */ 280 | morphologyEx( 281 | src: Mat, 282 | dst: Mat, 283 | op: MorphTypes, 284 | kernel: Mat, 285 | anchor: Point, 286 | iterations: number, 287 | borderType: BorderTypes, 288 | borderValue: Scalar 289 | ): void; 290 | /** 291 | * Blurs an image and downsamples it. 292 | * @param src input image. 293 | * @param dst output image; it has the specified size and the same type as src. 294 | * @param dstsize size of the output image. 295 | * @param borderType Pixel extrapolation method, see BorderTypes (BORDER_CONSTANT isn't supported) 296 | */ 297 | pyrDown(src: Mat, dst: Mat, dstsize: Size, borderType: BorderTypes): void; 298 | /** 299 | * Performs initial step of meanshift segmentation of an image. 300 | * @param src The source 8-bit, 3-channel image. 301 | * @param dst The destination image of the same format and the same size as the source 302 | * @param sp The spatial window radius 303 | * @param sr The color window radius 304 | * @param maxLevel Maximum level of the pyramid for the segmentation 305 | * @param termcrit Termination criteria: when to stop meanshift iterations 306 | */ 307 | pyrMeanShiftFiltering( 308 | src: Mat, 309 | dst: Mat, 310 | sp: number, 311 | sr: number, 312 | maxLevel: number, 313 | termcrit: TermCriteria 314 | ): void; 315 | /** 316 | * Upsamples an image and then blurs it. 317 | * @param src input image 318 | * @param dst output image. It has the specified size and the same type as src 319 | * @param dstsize size of the output image 320 | * @param borderType Pixel extrapolation method, see BorderTypes (only BORDER_DEFAULT is supported) 321 | */ 322 | pyrUp(src: Mat, dst: Mat, dstsize: Size, borderType: BorderTypes): void; 323 | /** 324 | * Calculates the first x- or y- image derivative using Scharr operator. 325 | * @param src input image. 326 | * @param dst output image of the same size and the same number of channels as src. 327 | * @param ddepth output image depth, @see https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#filter_depths 328 | * @param dx order of the derivative x. 329 | * @param dy order of the derivative y. 330 | * @param scale optional scale factor for the computed derivative values; by default, no scaling is applied (see getDerivKernels for details). 331 | * @param delta optional delta value that is added to the results prior to storing them in dst. 332 | * @param borderType pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 333 | */ 334 | Scharr( 335 | src: Mat, 336 | dst: Mat, 337 | ddepth: number | DataTypes, 338 | dx: number, 339 | dy: number, 340 | scale: number, 341 | delta: number, 342 | borderType: BorderTypes 343 | ): void; 344 | /** 345 | * Applies a separable linear filter to an image. 346 | * 347 | * Depth combinations 348 | * when ddepth=-1, the output image will have the same depth as the source. 349 | * 350 | * -------------------------------------------------------- 351 | * | Input depth (src.depth()) | Output depth (ddepth) | 352 | * |======================================================= 353 | * | CV_8U | -1/CV_16S/CV_32F/CV_64F | 354 | * | CV_16U/CV_16S | -1/CV_32F/CV_64F | 355 | * | CV_32F | -1/CV_32F/CV_64F | 356 | * | CV_64F | -1/CV_64F | 357 | * -------------------------------------------------------- 358 | * 359 | * @param src Source image. 360 | * @param dst Destination image of the same size and the same number of channels as src . 361 | * @param ddepth Destination image depth, @see https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#filter_depths 362 | * @param kernelX Coefficients for filtering each row. 363 | * @param kernelY Coefficients for filtering each column. 364 | * @param anchor Anchor position within the kernel. The default value (−1,−1) means that the anchor is at the kernel center 365 | * @param delta Value added to the filtered results before storing them 366 | * @param borderType Pixel extrapolation method, see BorderTypes. BORDER_WRAP is not supported. 367 | */ 368 | sepFilter2D( 369 | src: Mat, 370 | dst: Mat, 371 | ddepth: number | DataTypes, 372 | kernelX: MatVector | Mat, 373 | kernelY: MatVector | Mat, 374 | anchor: Point, 375 | delta: number, 376 | borderType: BorderTypes 377 | ): void; 378 | /** 379 | * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 380 | * Depth combinations 381 | * when ddepth=-1, the output image will have the same depth as the source. 382 | * 383 | * -------------------------------------------------------- 384 | * | Input depth (src.depth()) | Output depth (ddepth) | 385 | * |======================================================= 386 | * | CV_8U | -1/CV_16S/CV_32F/CV_64F | 387 | * | CV_16U/CV_16S | -1/CV_32F/CV_64F | 388 | * | CV_32F | -1/CV_32F/CV_64F | 389 | * | CV_64F | -1/CV_64F | 390 | * ------------------------------------------------------- 391 | * @param src input image. 392 | * @param dst output image of the same size and the same number of channels as src . 393 | * @param ddepth output image depth, @see https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#filter_depths in the case of 8-bit input images it will result in truncated derivatives. 394 | * @param dx order of the derivative x. 395 | * @param dy order of the derivative y. 396 | * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 397 | * @param scale optional scale factor for the computed derivative values; by default, no scaling is applied (see getDerivKernels for details). 398 | * @param delta optional scale factor for the computed derivative values; by default, no scaling is applied (see getDerivKernels for details). 399 | * @param borderType optional delta value that is added to the results prior to storing them in dst. 400 | */ 401 | Sobel( 402 | src: Mat, 403 | dst: Mat, 404 | ddepth: number | DataTypes, 405 | dx: number, 406 | dy: number, 407 | ksize: number, 408 | scale: number, 409 | delta: number, 410 | borderType: BorderTypes 411 | ): void; 412 | /** 413 | * Calculates the first order image derivative in both x and y using a Sobel operator. 414 | * @param src input image. 415 | * @param dx output image with first-order derivative in x. 416 | * @param dy output image with first-order derivative in y. 417 | * @param ksize size of Sobel kernel. It must be 3. 418 | * @param borderType pixel extrapolation method, see BorderTypes. Only BORDER_DEFAULT=BORDER_REFLECT_101 and BORDER_REPLICATE are supported. 419 | */ 420 | spatialGradient( 421 | src: Mat, 422 | dx: MatVector | Mat, 423 | dy: MatVector | Mat, 424 | ksize: number, 425 | borderType: BorderTypes 426 | ): void; 427 | /** 428 | * Calculates the normalized sum of squares of the pixel values overlapping the filter. 429 | * @param src input image 430 | * @param dst output image of the same size and type as _src 431 | * @param ddepth output image depth, @see https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#filter_depths in the case of 8-bit input images it will result in truncated derivatives. 432 | * @param ksize kernel size 433 | * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel center 434 | * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not 435 | * @param borderType border mode used to extrapolate pixels outside of the image, see BorderTypes. BORDER_WRAP is not supported 436 | */ 437 | sqrBoxFilter( 438 | src: Mat, 439 | dst: Mat, 440 | ddepth: number | DataTypes, 441 | ksize: Size, 442 | anchor: Point, 443 | normalize: boolean, 444 | borderType: BorderTypes 445 | ): void; 446 | } 447 | } 448 | export = ImageFiltering; 449 | -------------------------------------------------------------------------------- /src/ImageProcessing/Misc.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | import { Point } from '../core/Point'; 3 | import { Rect } from '../core/Rect'; 4 | import { Scalar } from '../core/Scalar'; 5 | 6 | declare module Misc { 7 | enum AdaptiveThresholdTypes { 8 | ADAPTIVE_THRESH_MEAN_C = 0, 9 | ADAPTIVE_THRESH_GAUSSIAN_C = 1, 10 | } 11 | 12 | interface _AdaptiveThresholdTypes { 13 | ADAPTIVE_THRESH_MEAN_C: AdaptiveThresholdTypes.ADAPTIVE_THRESH_MEAN_C; 14 | ADAPTIVE_THRESH_GAUSSIAN_C: AdaptiveThresholdTypes.ADAPTIVE_THRESH_GAUSSIAN_C; 15 | } 16 | 17 | enum DistanceTransformLabelTypes { 18 | DIST_LABEL_CCOMP = 0, 19 | DIST_LABEL_PIXEL = 1, 20 | } 21 | 22 | interface _DistanceTransformLabelTypes { 23 | DIST_LABEL_CCOMP: DistanceTransformLabelTypes.DIST_LABEL_CCOMP; 24 | DIST_LABEL_PIXEL: DistanceTransformLabelTypes.DIST_LABEL_PIXEL; 25 | } 26 | 27 | enum DistanceTransformMasks { 28 | DIST_MASK_3 = 3, 29 | DIST_MASK_5 = 5, 30 | DIST_MASK_PRECISE = 0, 31 | } 32 | 33 | interface _DistanceTransformMasks { 34 | DIST_MASK_3: DistanceTransformMasks.DIST_MASK_3; 35 | DIST_MASK_5: DistanceTransformMasks.DIST_MASK_5; 36 | DIST_MASK_PRECISE: DistanceTransformMasks.DIST_MASK_PRECISE; 37 | } 38 | 39 | enum DistanceTypes { 40 | DIST_USER = -1, 41 | DIST_L1 = 1, 42 | DIST_L2 = 2, 43 | DIST_C = 3, 44 | DIST_L12 = 4, 45 | DIST_FAIR = 5, 46 | DIST_WELSCH = 6, 47 | DIST_HUBER = 7, 48 | } 49 | 50 | interface _DistanceTypes { 51 | DIST_USER: DistanceTypes.DIST_USER; 52 | DIST_L1: DistanceTypes.DIST_L1; 53 | DIST_L2: DistanceTypes.DIST_L2; 54 | DIST_C: DistanceTypes.DIST_C; 55 | DIST_L12: DistanceTypes.DIST_L12; 56 | DIST_FAIR: DistanceTypes.DIST_FAIR; 57 | DIST_WELSCH: DistanceTypes.DIST_WELSCH; 58 | DIST_HUBER: DistanceTypes.DIST_HUBER; 59 | } 60 | 61 | enum FloodFillFlags { 62 | FLOODFILL_FIXED_RANGE = 65536, 63 | FLOODFILL_MASK_ONLY = 131072, 64 | } 65 | 66 | interface _FloodFillFlags { 67 | FLOODFILL_FIXED_RANGE: FloodFillFlags.FLOODFILL_FIXED_RANGE; 68 | FLOODFILL_MASK_ONLY: FloodFillFlags.FLOODFILL_MASK_ONLY; 69 | } 70 | 71 | enum GrabCutClasses { 72 | GC_BGD = 0, 73 | GC_FGD = 1, 74 | GC_PR_BGD = 2, 75 | GC_PR_FGD = 3, 76 | } 77 | 78 | interface _GrabCutClasses { 79 | GC_BGD: GrabCutClasses.GC_BGD; 80 | GC_FGD: GrabCutClasses.GC_FGD; 81 | GC_PR_BGD: GrabCutClasses.GC_PR_BGD; 82 | GC_PR_FGD: GrabCutClasses.GC_PR_FGD; 83 | } 84 | 85 | enum GrabCutModes { 86 | GC_INIT_WITH_RECT = 0, 87 | GC_INIT_WITH_MASK = 1, 88 | GC_EVAL = 2, 89 | GC_EVAL_FREEZE_MODEL = 3, 90 | } 91 | 92 | interface _GrabCutModes { 93 | GC_INIT_WITH_RECT: GrabCutModes.GC_INIT_WITH_RECT; 94 | GC_INIT_WITH_MASK: GrabCutModes.GC_INIT_WITH_MASK; 95 | GC_EVAL: GrabCutModes.GC_EVAL; 96 | GC_EVAL_FREEZE_MODEL: GrabCutModes.GC_EVAL_FREEZE_MODEL; 97 | } 98 | 99 | enum ThresholdTypes { 100 | THRESH_BINARY = 0, 101 | THRESH_BINARY_INV = 1, 102 | THRESH_TRUNC = 2, 103 | THRESH_TOZERO = 3, 104 | THRESH_TOZERO_INV = 4, 105 | THRESH_MASK = 7, 106 | THRESH_OTSU = 8, 107 | THRESH_TRIANGLE = 16, 108 | } 109 | 110 | interface _ThresholdTypes { 111 | THRESH_BINARY: ThresholdTypes.THRESH_BINARY; 112 | THRESH_BINARY_INV: ThresholdTypes.THRESH_BINARY_INV; 113 | THRESH_TRUNC: ThresholdTypes.THRESH_TRUNC; 114 | THRESH_TOZERO: ThresholdTypes.THRESH_TOZERO; 115 | THRESH_TOZERO_INV: ThresholdTypes.THRESH_TOZERO_INV; 116 | THRESH_MASK: ThresholdTypes.THRESH_MASK; 117 | THRESH_OTSU: ThresholdTypes.THRESH_OTSU; 118 | THRESH_TRIANGLE: ThresholdTypes.THRESH_TRIANGLE; 119 | } 120 | 121 | interface Misc { 122 | /** 123 | * Applies an adaptive threshold to an array 124 | * @param src Source 8-bit single-channel image. 125 | * @param dst Destination image of the same size and the same type as src 126 | * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied 127 | * @param adaptiveMethod Adaptive thresholding algorithm to use, @see AdaptiveThresholdTypes. The BORDER_REPLICATE | BORDER_ISOLATED is used to process boundaries 128 | * @param thresholdType Thresholding type that must be either THRESH_BINARY or THRESH_BINARY_INV, @see ThresholdTypes. 129 | * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on. 130 | * @param C Constant subtracted from the mean or weighted mean (@see the details below). Normally, it is positive but may be zero or negative as well) 131 | */ 132 | adaptiveThreshold( 133 | src: Mat, 134 | dst: Mat, 135 | maxValue: number, 136 | adaptiveMethod: AdaptiveThresholdTypes, 137 | thresholdType: ThresholdTypes.THRESH_BINARY | ThresholdTypes.THRESH_BINARY_INV, 138 | blockSize: number, 139 | C: number 140 | ): void; 141 | /** 142 | * Calculates the distance to the closest zero pixel for each pixel of the source image 143 | * @param src 8-bit, single-channel (binary) source image 144 | * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, single-channel image of the same size as src 145 | * @param distanceType Type of distance, @see DistanceTypes 146 | * @param maskSize Size of the distance transform mask, @see DistanceTransformMasks. In case of the DIST_L1 or DIST_C distance type, the parameter is forced to 3 because a 3×3 mask gives the same result as 5×5 or any larger aperture. 147 | */ 148 | distanceTransform( 149 | src: Mat, 150 | dst: Mat, 151 | distanceType: DistanceTypes, 152 | maskSize: DistanceTransformMasks 153 | ): void; 154 | /** 155 | * Fills a connected component with the given color 156 | * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the function unless the FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See the details below 157 | * @param seedPoint Starting point 158 | * @param newVal New value of the repainted domain pixels 159 | * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the repainted domain 160 | */ 161 | floodFill(image: Mat, seedPoint: Point, newVal: Scalar, rect: Rect): void; 162 | /** 163 | * Calculates the integral of an image 164 | * @param src input image as W×H, 8-bit or floating-point (32f or 64f). 165 | * @param sum integral image as (W+1)×(H+1) , 32-bit integer or floating-point (32f or 64f). 166 | */ 167 | integral(src: Mat,sum: Mat): void; 168 | /** 169 | * Applies a fixed-level threshold to each array element 170 | * @param src input array (multiple-channel, 8-bit or 32-bit floating point). 171 | * @param dst output array of the same size and type and the same number of channels as src. 172 | * @param thresh threshold value 173 | * @param maxval maximum value to use with the THRESH_BINARY and THRESH_BINARY_INV thresholding types 174 | * @param type thresholding type (@see ThresholdTypes). 175 | */ 176 | threshold(src: Mat,dst: Mat,thresh: number,maxval: number,type: number): void; 177 | } 178 | } 179 | export = Misc; 180 | -------------------------------------------------------------------------------- /src/ImageProcessing/Object.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | 3 | declare module ObjectDetection { 4 | enum TemplateMatchModes { 5 | TM_SQDIFF = 0, 6 | TM_SQDIFF_NORMED = 1, 7 | TM_CCORR = 2, 8 | TM_CCORR_NORMED = 3, 9 | TM_CCOEFF = 4, 10 | TM_CCOEFF_NORMED = 5, 11 | } 12 | 13 | interface _TemplateMatchModes { 14 | TM_SQDIFF: TemplateMatchModes.TM_SQDIFF; 15 | TM_SQDIFF_NORMED: TemplateMatchModes.TM_SQDIFF_NORMED; 16 | TM_CCORR: TemplateMatchModes.TM_CCORR; 17 | TM_CCORR_NORMED: TemplateMatchModes.TM_CCORR_NORMED; 18 | TM_CCOEFF: TemplateMatchModes.TM_CCOEFF; 19 | TM_CCOEFF_NORMED: TemplateMatchModes.TM_CCOEFF_NORMED; 20 | } 21 | 22 | interface ObjectDetection { 23 | /** 24 | * Compares a template against overlapped image regions 25 | * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point 26 | * @param templ Searched template. It must be not greater than the source image and have the same data type. 27 | * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image is W×H and templ is w×h , then result is (W−w+1)×(H−h+1) . 28 | * @param method Parameter specifying the comparison method, @see TemplateMatchModes 29 | * @param mask Mask of searched template. It must have the same datatype and size with templ. It is not set by default. Currently, only the TM_SQDIFF and TM_CCORR_NORMED methods are supported 30 | */ 31 | matchTemplate( 32 | image: Mat, 33 | templ: Mat, 34 | result: Mat, 35 | method: TemplateMatchModes, 36 | mask: Mat 37 | ): void; 38 | } 39 | } 40 | 41 | export = ObjectDetection; 42 | -------------------------------------------------------------------------------- /src/ImageProcessing/Segmentation.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | import { Rect } from '../core/Rect'; 3 | import { GrabCutModes } from './Misc'; 4 | 5 | declare module ImageSegmentation { 6 | interface ImageSegmentation { 7 | /** 8 | * Runs the GrabCut algorithm 9 | * @param img Input 8-bit 3-channel image 10 | * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when mode is set to GC_INIT_WITH_RECT. Its elements may have one of the GrabCutClasses. 11 | * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as "obvious background". 12 | * @param bgdModel Temporary array for the background model. Do not modify it while you are processing the same image. 13 | * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are processing the same image 14 | * @param iterCount Number of iterations the algorithm should make before returning the result 15 | */ 16 | grabCut( 17 | img: Mat, 18 | mask: Mat, 19 | rect: Rect, 20 | bgdModel: Mat, 21 | fgdModel: Mat, 22 | iterCount: number, 23 | mode: GrabCutModes 24 | ): void; 25 | /** 26 | * Performs a marker-based image segmentation using the watershed algorithm 27 | * @param image Input 8-bit 3-channel image 28 | * @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same size as image 29 | */ 30 | watershed(image: Mat, markers: Mat): void; 31 | } 32 | } 33 | 34 | export = ImageSegmentation; 35 | -------------------------------------------------------------------------------- /src/ImageProcessing/Shape.d.ts: -------------------------------------------------------------------------------- 1 | import { NDArray } from '../core/Core'; 2 | import { Mat } from '../core/Mat'; 3 | import { Moments } from '../core/Moments'; 4 | import { Rect } from '../core/Rect'; 5 | import { RotatedRect } from '../core/RotatedRect'; 6 | import { Point } from '../core/Point'; 7 | import { DistanceTypes } from './Misc'; 8 | import { MatVector } from '../core/MatVector'; 9 | 10 | declare module StructuralAnalysisShapeDescriptors { 11 | enum ConnectedComponentsAlgorithmsTypes { 12 | CCL_DEFAULT = -1, 13 | CCL_WU = 0, 14 | CCL_GRANA = 1, 15 | CCL_BOLELLI = 2, 16 | CCL_SAUF = 3, 17 | CCL_BBDT = 4, 18 | CCL_SPAGHETTI = 5, 19 | } 20 | 21 | interface _ConnectedComponentsAlgorithmsTypes { 22 | CCL_DEFAULT: ConnectedComponentsAlgorithmsTypes.CCL_DEFAULT; 23 | CCL_WU: ConnectedComponentsAlgorithmsTypes.CCL_WU; 24 | CCL_GRANA: ConnectedComponentsAlgorithmsTypes.CCL_GRANA; 25 | CCL_BOLELLI: ConnectedComponentsAlgorithmsTypes.CCL_BOLELLI; 26 | CCL_SAUF: ConnectedComponentsAlgorithmsTypes.CCL_SAUF; 27 | CCL_BBDT: ConnectedComponentsAlgorithmsTypes.CCL_BBDT; 28 | CCL_SPAGHETTI: ConnectedComponentsAlgorithmsTypes.CCL_SPAGHETTI; 29 | } 30 | 31 | enum ConnectedComponentsTypes { 32 | CC_STAT_LEFT = 0, 33 | CC_STAT_TOP = 1, 34 | CC_STAT_WIDTH = 2, 35 | CC_STAT_HEIGHT = 3, 36 | CC_STAT_AREA = 4, 37 | } 38 | 39 | interface _ConnectedComponentsTypes { 40 | CC_STAT_LEFT: ConnectedComponentsTypes.CC_STAT_LEFT; 41 | CC_STAT_TOP: ConnectedComponentsTypes.CC_STAT_TOP; 42 | CC_STAT_WIDTH: ConnectedComponentsTypes.CC_STAT_WIDTH; 43 | CC_STAT_HEIGHT: ConnectedComponentsTypes.CC_STAT_HEIGHT; 44 | CC_STAT_AREA: ConnectedComponentsTypes.CC_STAT_AREA; 45 | } 46 | 47 | enum ContourApproximationModes { 48 | CHAIN_APPROX_NONE = 1, 49 | CHAIN_APPROX_SIMPLE = 2, 50 | CHAIN_APPROX_TC89_L1 = 3, 51 | CHAIN_APPROX_TC89_KCOS = 4, 52 | } 53 | 54 | interface _ContourApproximationModes { 55 | CHAIN_APPROX_NONE: ContourApproximationModes.CHAIN_APPROX_NONE; 56 | CHAIN_APPROX_SIMPLE: ContourApproximationModes.CHAIN_APPROX_SIMPLE; 57 | CHAIN_APPROX_TC89_L1: ContourApproximationModes.CHAIN_APPROX_TC89_L1; 58 | CHAIN_APPROX_TC89_KCOS: ContourApproximationModes.CHAIN_APPROX_TC89_KCOS; 59 | } 60 | 61 | enum RectanglesIntersectTypes { 62 | INTERSECT_NONE = 0, 63 | INTERSECT_PARTIAL = 1, 64 | INTERSECT_FULL = 2, 65 | } 66 | 67 | interface _RectanglesIntersectTypes { 68 | INTERSECT_NONE: RectanglesIntersectTypes.INTERSECT_NONE; 69 | INTERSECT_PARTIAL: RectanglesIntersectTypes.INTERSECT_PARTIAL; 70 | INTERSECT_FULL: RectanglesIntersectTypes.INTERSECT_FULL; 71 | } 72 | 73 | enum RetrievalModes { 74 | RETR_EXTERNAL = 0, 75 | RETR_LIST = 1, 76 | RETR_CCOMP = 2, 77 | RETR_TREE = 3, 78 | RETR_FLOODFILL = 4, 79 | } 80 | 81 | interface _RetrievalModes { 82 | RETR_EXTERNAL: RetrievalModes.RETR_EXTERNAL; 83 | RETR_LIST: RetrievalModes.RETR_LIST; 84 | RETR_CCOMP: RetrievalModes.RETR_CCOMP; 85 | RETR_TREE: RetrievalModes.RETR_TREE; 86 | RETR_FLOODFILL: RetrievalModes.RETR_FLOODFILL; 87 | } 88 | 89 | enum ShapeMatchModes { 90 | CONTOURS_MATCH_I1 = 1, 91 | CONTOURS_MATCH_I2 = 2, 92 | CONTOURS_MATCH_I3 = 3, 93 | } 94 | 95 | interface _ShapeMatchModes { 96 | CONTOURS_MATCH_I1: ShapeMatchModes.CONTOURS_MATCH_I1; 97 | CONTOURS_MATCH_I2: ShapeMatchModes.CONTOURS_MATCH_I2; 98 | CONTOURS_MATCH_I3: ShapeMatchModes.CONTOURS_MATCH_I3; 99 | } 100 | 101 | interface StructuralAnalysisShapeDescriptors { 102 | /** 103 | * Approximates a polygonal curve(s) with the specified precision 104 | * @param curve Input vector of a 2D point stored in std::vector or Mat 105 | * @param approxCurveResult of the approximation. The type should match the type of the input curve 106 | * @param epsilonParameter specifying the approximation accuracy. This is the maximum distance between the original curve and its approximation 107 | * @param boolIf true, the approximated curve is closed (its first and last vertices are connected). Otherwise, it is not closed 108 | */ 109 | approxPolyDP( 110 | curve: Mat | MatVector, 111 | approxCurve: Mat, 112 | epsilon: number, 113 | bool: boolean 114 | ): void; 115 | /** 116 | * Calculates a contour perimeter or a curve length. 117 | * @param curve Input vector of 2D points, stored in std::vector or Mat. 118 | * @param closed Flag indicating whether the curve is closed or not. 119 | * @returns a curve length or a closed contour perimeter. 120 | */ 121 | arcLength(curve: Mat | MatVector, closed: boolean): number; 122 | /** 123 | * Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image. 124 | * @param array Input gray-scale image or 2D point set, stored in std::vector or Mat. 125 | * @returns the minimal up-right bounding rectangle for the specified point set or non-zero pixels of gray-scale image 126 | */ 127 | boundingRect(array: Mat | MatVector): Rect; 128 | /** 129 | * computes the connected components labeled image of boolean image 130 | * @param image the 8-bit single-channel image to be labeled 131 | * @param labels destination labeled image 132 | * @returns N, the total number of labels [0, N-1] where 0 represents the background label 133 | */ 134 | connectedComponents(image: Mat, labels: Mat): number; 135 | /** 136 | * computes the connected components labeled image of boolean image and also produces a statistics output for each label 137 | * @param image the 8-bit single-channel image to be labeled 138 | * @param labels destination labeled image 139 | * @param stats statistics output for each label, including the background label. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. 140 | * @param centroids centroid output for each label, including the background label. Centroids are accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F 141 | * @returns N, the total number of labels [0, N-1] where 0 represents the background label 142 | */ 143 | connectedComponentsWithStats(image: Mat, labels: Mat, stats: Mat, centroids: Mat): number; 144 | /** 145 | * Calculates a contour area 146 | * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat. 147 | * @param oriented Oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking the sign of an area. By default, the parameter is false, which means that the absolute value is returned 148 | * @returns area and the number of non-zero pixels, 149 | */ 150 | contourArea(contour: Mat | MatVector, oriented?: boolean): number; 151 | /** 152 | * Finds the convex hull of a point set. 153 | * @param points Input 2D point set, stored in std::vector or Mat. 154 | * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In the first case, the hull elements are 0-based indices of the convex hull points in the original array (since the set of convex hull points is a subset of the original point set). In the second case, hull elements are the convex hull points themselves 155 | */ 156 | convexHull(points: Mat, hull: Mat): void; 157 | /** 158 | * Finds the convexity defects of a contour. 159 | * @param contour Input contour 160 | * @param convexHull Convex hull obtained using convexHull that should contain indices of the contour points that make the hull 161 | * @param convexityDefects output vector of convexity defects. In C++ and the new Python/Java interface each convexity defect is represented as 4-element integer vector (a.k.a. Vec4i): (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices in the original contour of the convexity defect beginning, end and the farthest point, and fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the farthest contour point and the hull. That is, to get the floating-point value of the depth will be fixpt_depth/256.0. 162 | */ 163 | convexityDefects(contour: Mat, convexHull: Mat, convexityDefects: Mat): void; 164 | /** 165 | * Finds contours in a binary image 166 | * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero pixels remain 0's, so the image is treated as binary . You can use compare, inRange, threshold , adaptiveThreshold, Canny, and others to create a binary image out of a grayscale or color one. If mode equals to RETR_CCOMP or RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1). 167 | * @param contours Detected contours. Each contour is stored as a vector of points 168 | * @param hierarchy Optional output vector (e.g. std::vector), containing information about the image topology. It has as many elements as the number of contours. For each i-th contour contours[i], the elements hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices in contours of the next and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative 169 | * @param mode Contour retrieval mode, @see RetrievalModes 170 | * @param method Contour approximation method, @see ContourApproximationModes 171 | */ 172 | findContours( 173 | image: Mat | MatVector, 174 | contours: Mat | MatVector, 175 | hierarchy: Mat | MatVector, 176 | mode: RetrievalModes, 177 | method: ContourApproximationModes 178 | ): void; 179 | /** 180 | * Fits an ellipse around a set of 2D points. 181 | * @param points 182 | */ 183 | fitEllipse(points: Mat): void; 184 | /** 185 | * Fits a line to a 2D or 3D point set. 186 | * @param points Input vector of 2D or 3D points, stored in a Mat. 187 | * @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on the line 188 | * @param disType Distance used by the M-estimator, @see DistanceTypes 189 | * @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value is chosen. 190 | * @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line). 191 | * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps. 192 | */ 193 | fitLine( 194 | points: Mat, 195 | line: Mat, 196 | disType: DistanceTypes, 197 | param: number, 198 | reps: number, 199 | aeps: number 200 | ): void; 201 | /** 202 | * Tests a contour convexity. 203 | * @param contour 204 | * @returns whether the input contour is convex or not 205 | */ 206 | isContourConvex(contour: Mat): boolean; 207 | /** 208 | * Compares two shapes 209 | * @param contour1 First contour or grayscale image 210 | * @param contour2 Second contour or grayscale image 211 | * @param method Comparison method, @see ShapeMatchModes 212 | * @param parameter Method-specific parameter (not supported now). 213 | * @returns 214 | */ 215 | matchShapes( 216 | contour1: Mat, 217 | contour2: Mat, 218 | method: ShapeMatchModes, 219 | parameter: number 220 | ): number; 221 | /** 222 | * Finds a rotated rectangle of the minimum area enclosing the input 2D point set. 223 | * @param points Input vector of 2D points, stored in a Mat 224 | * @returns the minimum-area bounding rectangle (possibly rotated) for a specified point set 225 | */ 226 | minAreaRect(points: Mat): RotatedRect; 227 | /** 228 | * Finds a circle of the minimum area enclosing a 2D point set. 229 | * @param points Input vector of 2D points, stored in a Mat 230 | */ 231 | minEnclosingCircle(points: Mat): void; 232 | /** 233 | * Calculates all of the moments up to the third order of a polygon or rasterized shape 234 | * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( 1×N or N×1 ) of 2D points (Point or Point2f ). 235 | * @returns results are returned in the structure cv::Moments. 236 | */ 237 | moments(array: Mat): Moments; 238 | /** 239 | * Performs a point-in-contour test. 240 | * @param contour Input contour 241 | * @param pt Point tested against the contour 242 | * @param measureDist If true, the function estimates the signed distance from the point to the nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not. 243 | * @returns positive (inside), negative (outside), or zero (on an edge) value, correspondingly. 244 | */ 245 | pointPolygonTest(contour: Mat, pt: Point, measureDist: boolean): number; 246 | } 247 | } 248 | 249 | export = StructuralAnalysisShapeDescriptors; 250 | -------------------------------------------------------------------------------- /src/ImageProcessing/Subdiv2D.d.ts: -------------------------------------------------------------------------------- 1 | import { NDArray } from '../core/Core'; 2 | import { MatVector } from '../core/MatVector'; 3 | import { Point } from '../core/Point'; 4 | import { Rect } from '../core/Rect'; 5 | 6 | declare module Subdiv2D { 7 | enum EdgeType { 8 | NEXT_AROUND_ORG = 0x00, 9 | NEXT_AROUND_DST = 0x22, 10 | PREV_AROUND_ORG = 0x11, 11 | PREV_AROUND_DST = 0x33, 12 | NEXT_AROUND_LEFT = 0x13, 13 | NEXT_AROUND_RIGHT = 0x31, 14 | PREV_AROUND_LEFT = 0x20, 15 | PREV_AROUND_RIGHT = 0x02, 16 | } 17 | 18 | interface _EdgeType { 19 | NEXT_AROUND_ORG: EdgeType.NEXT_AROUND_ORG; 20 | NEXT_AROUND_DST: EdgeType.NEXT_AROUND_DST; 21 | PREV_AROUND_ORG: EdgeType.PREV_AROUND_ORG; 22 | PREV_AROUND_DST: EdgeType.PREV_AROUND_DST; 23 | NEXT_AROUND_LEFT: EdgeType.NEXT_AROUND_LEFT; 24 | NEXT_AROUND_RIGHT: EdgeType.NEXT_AROUND_RIGHT; 25 | PREV_AROUND_LEFT: EdgeType.PREV_AROUND_LEFT; 26 | PREV_AROUND_RIGHT: EdgeType.PREV_AROUND_RIGHT; 27 | } 28 | 29 | 30 | interface QuadEdge { 31 | new(): QuadEdge; 32 | new(edgeidx: number): QuadEdge; 33 | isfree(): boolean; 34 | next: QuadEdge; 35 | pt: number; 36 | } 37 | 38 | interface Vertex { 39 | new(): Vertex; 40 | new(pt: Point, _isvirtual: boolean, _firstEdge?: number): Vertex; 41 | isfree(): boolean; 42 | isvirtual(): boolean; 43 | firstEdge: number; 44 | pt: Point; 45 | } 46 | 47 | interface Subdiv2D { 48 | new (): Subdiv2D; 49 | new (rect: Rect): Subdiv2D; 50 | calcVoronoi(): void; 51 | checkSubdiv(): void; 52 | clearVoronoi(): void; 53 | connectEdges(edgeA: number, edgeB: number): void; 54 | deleteEdge(edge: number): void; 55 | deletePoint(vtx: number): void; 56 | /** 57 | * Returns the edge origin 58 | * @param edge Subdivision edge ID. 59 | * @param dstpt Output vertex location 60 | * @returns vertex ID 61 | */ 62 | edgeDst(edge: number, dstpt: Point): number; 63 | /** 64 | * Returns the edge origin 65 | * @param edge Subdivision edge ID. 66 | * @param orgpt Output vertex location 67 | * @returns vertex ID 68 | */ 69 | edgeOrg(edge: number, orgpt: Point): number; 70 | /** 71 | * Finds the subdivision vertex closest to the given point 72 | * @param pt Input point 73 | * @param nearestPt Output subdivision vertex point 74 | * @returns vertex ID. 75 | */ 76 | findNearest(pt: Point, nearestPt: Point): number; 77 | /** 78 | * Returns one of the edges related to the given edge 79 | * @param edge Subdivision edge ID 80 | * @param nextEdgeType Parameter specifying which of the related edges to return. The following values are possible 81 | * - NEXT_AROUND_ORG next around the edge origin ( eOnext on the picture below if e is the input edge) 82 | * - NEXT_AROUND_DST next around the edge vertex ( eDnext ) 83 | * - PREV_AROUND_ORG previous around the edge origin (reversed eRnext ) 84 | * - PREV_AROUND_DST previous around the edge destination (reversed eLnext ) 85 | * - NEXT_AROUND_LEFT next around the left facet ( eLnext ) 86 | * - NEXT_AROUND_RIGHT next around the right facet ( eRnext ) 87 | * - PREV_AROUND_LEFT previous around the left facet (reversed eOnext ) 88 | * - PREV_AROUND_RIGHT previous around the right facet (reversed eDnext ) 89 | * @returns edge ID related to the input edge 90 | */ 91 | getEdge(edge: number, nextEdgeType: EdgeType): number; 92 | /** 93 | * Returns a list of all edges 94 | * @param edgeList Output vector 95 | */ 96 | getEdgeList(edgeList: MatVector): void; 97 | /** 98 | * Returns a list of the leading edge ID connected to each triangle 99 | * The function gives one edge ID for each triangle 100 | * @param leadingEdgeList Output vector 101 | */ 102 | getLeadingEdgeList(leadingEdgeList: MatVector): void; 103 | /** 104 | * Returns a list of all triangles. 105 | * @param triangleList Output vector 106 | */ 107 | getTriangleList(triangleList: MatVector): void; 108 | /** 109 | *Returns vertex location from vertex ID. 110 | * @param vertex vertex ID. 111 | * @param firstEdge Optional. The first edge ID which is connected to the vertex. 112 | * @returns vertex (x,y) 113 | */ 114 | getVertex(vertex: number, firstEdge?: number): Point; 115 | /** 116 | * Returns a list of all Voronoi facets. 117 | * @param idx Vector of vertices IDs to consider. For all vertices you can pass empty vector 118 | * @param facetList Output vector of the Voronoi facets. 119 | * @param facetCenters Output vector of the Voronoi facets center points 120 | */ 121 | getVoronoiFacetList( 122 | idx: MatVector, 123 | facetList: NDArray, 124 | facetCenters: NDArray 125 | ): void; 126 | /** 127 | * Creates a new empty Delaunay subdivision 128 | */ 129 | initDelaunay(): void; 130 | /** 131 | * Insert a single point into a Delaunay triangulation 132 | * @param pt Point to insert 133 | * @returns the ID of the point. 134 | */ 135 | insert(pt: Point): number; 136 | /** 137 | * Insert multiple points into a Delaunay triangulation 138 | * @param ptvec Points to insert 139 | */ 140 | insert(ptvec: NDArray): void; 141 | isRightOf(pt: Point, edge: number): number; 142 | /** 143 | * Returns the location of a point within a Delaunay triangulation 144 | * @param pt Point to locate 145 | * @param edge Output edge that the point belongs to or is located to the right of it 146 | * @param vertex Optional output vertex the input point coincides with 147 | */ 148 | locate(pt: Point, edge: number, vertex: number): number; 149 | newEdge(): number; 150 | newPoint(pt: Point, isvirtual: boolean, firstEdge?: number): number; 151 | /** 152 | * Returns next edge around the edge origin 153 | * @param edge Subdivision edge ID 154 | * @returns an integer which is next edge ID around the edge origin: eOnext on the picture above if e is the input edge). 155 | */ 156 | nextEdge(edge: number): number; 157 | /** 158 | * Returns another edge of the same quad-edge 159 | * @param edge Subdivision edge ID 160 | * @param rotate Parameter specifying which of the edges of the same quad-edge as the input one to return. The following values are possible: 161 | * 0 - the input edge ( e on the picture below if e is the input edge) 162 | * 1 - the rotated edge ( eRot ) 163 | * 2 - the reversed edge (reversed e (in green)) 164 | * 3 - the reversed rotated edge (reversed eRot (in green)) 165 | * @returns one of the edges ID of the same quad-edge as the input edge 166 | */ 167 | rotateEdge(edge: number, rotate: number): number; 168 | setEdgePoints(edge: number, orgPt: number, dstPt: number): void; 169 | splice(edgeA: number, edgeB: number): void; 170 | swapEdges(edge: number): void; 171 | symEdge(edge: number): void; 172 | bottomRight: Point; 173 | freePoint: number; 174 | qedges: NDArray; 175 | recentEdge: number; 176 | topLeft: Point; 177 | validGeometry: boolean; 178 | vtx: NDArray; 179 | } 180 | } 181 | export = Subdiv2D; 182 | -------------------------------------------------------------------------------- /src/ObjectDetection/ObjectDetection.d.ts: -------------------------------------------------------------------------------- 1 | import { NDArray } from "../core/Core"; 2 | import { MatVector } from "../core/MatVector"; 3 | import { Rect } from "../core/Rect"; 4 | 5 | declare module ObjectDetection { 6 | interface ObjectDetection { 7 | /** 8 | * Groups the object candidate rectangles 9 | * @param rectList Input/output vector of rectangles. Output vector includes retained and grouped rectangles. (The Python list is not modified in place.) 10 | * @param weights Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it 11 | * @param groupThreshold 12 | */ 13 | groupRectangles(rectList: NDArray, weights: MatVector, groupThreshold: number): void; 14 | } 15 | } 16 | 17 | export = ObjectDetection; -------------------------------------------------------------------------------- /src/core/ColorConversion.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from './Mat'; 2 | 3 | declare module ColorConversions { 4 | enum ColorConversionCodes { 5 | COLOR_BGR2BGRA = 0, 6 | COLOR_RGB2RGBA = COLOR_BGR2BGRA, 7 | COLOR_BGRA2BGR = 1, 8 | COLOR_RGBA2RGB = COLOR_BGRA2BGR, 9 | COLOR_BGR2RGBA = 2, 10 | COLOR_RGB2BGRA = COLOR_BGR2RGBA, 11 | COLOR_RGBA2BGR = 3, 12 | COLOR_BGRA2RGB = COLOR_RGBA2BGR, 13 | COLOR_BGR2RGB = 4, 14 | COLOR_RGB2BGR = COLOR_BGR2RGB, 15 | COLOR_BGRA2RGBA = 5, 16 | COLOR_RGBA2BGRA = COLOR_BGRA2RGBA, 17 | COLOR_BGR2GRAY = 6, 18 | COLOR_RGB2GRAY = 7, 19 | COLOR_GRAY2BGR = 8, 20 | COLOR_GRAY2RGB = COLOR_GRAY2BGR, 21 | COLOR_GRAY2BGRA = 9, 22 | COLOR_GRAY2RGBA = COLOR_GRAY2BGRA, 23 | COLOR_BGRA2GRAY = 10, 24 | COLOR_RGBA2GRAY = 11, 25 | COLOR_BGR2BGR565 = 12, 26 | COLOR_RGB2BGR565 = 13, 27 | COLOR_BGR5652BGR = 14, 28 | COLOR_BGR5652RGB = 15, 29 | COLOR_BGRA2BGR565 = 16, 30 | COLOR_RGBA2BGR565 = 17, 31 | COLOR_BGR5652BGRA = 18, 32 | COLOR_BGR5652RGBA = 19, 33 | COLOR_GRAY2BGR565 = 20, 34 | COLOR_BGR5652GRAY = 21, 35 | COLOR_BGR2BGR555 = 22, 36 | COLOR_RGB2BGR555 = 23, 37 | COLOR_BGR5552BGR = 24, 38 | COLOR_BGR5552RGB = 25, 39 | COLOR_BGRA2BGR555 = 26, 40 | COLOR_RGBA2BGR555 = 27, 41 | COLOR_BGR5552BGRA = 28, 42 | COLOR_BGR5552RGBA = 29, 43 | COLOR_GRAY2BGR555 = 30, 44 | COLOR_BGR5552GRAY = 31, 45 | COLOR_BGR2XYZ = 32, 46 | COLOR_RGB2XYZ = 33, 47 | COLOR_XYZ2BGR = 34, 48 | COLOR_XYZ2RGB = 35, 49 | COLOR_BGR2YCrCb = 36, 50 | COLOR_RGB2YCrCb = 37, 51 | COLOR_YCrCb2BGR = 38, 52 | COLOR_YCrCb2RGB = 39, 53 | COLOR_BGR2HSV = 40, 54 | COLOR_RGB2HSV = 41, 55 | COLOR_BGR2Lab = 44, 56 | COLOR_RGB2Lab = 45, 57 | COLOR_BGR2Luv = 50, 58 | COLOR_RGB2Luv = 51, 59 | COLOR_BGR2HLS = 52, 60 | COLOR_RGB2HLS = 53, 61 | COLOR_HSV2BGR = 54, 62 | COLOR_HSV2RGB = 55, 63 | COLOR_Lab2BGR = 56, 64 | COLOR_Lab2RGB = 57, 65 | COLOR_Luv2BGR = 58, 66 | COLOR_Luv2RGB = 59, 67 | COLOR_HLS2BGR = 60, 68 | COLOR_HLS2RGB = 61, 69 | COLOR_BGR2HSV_FULL = 66, 70 | COLOR_RGB2HSV_FULL = 67, 71 | COLOR_BGR2HLS_FULL = 68, 72 | COLOR_RGB2HLS_FULL = 69, 73 | COLOR_HSV2BGR_FULL = 70, 74 | COLOR_HSV2RGB_FULL = 71, 75 | COLOR_HLS2BGR_FULL = 72, 76 | COLOR_HLS2RGB_FULL = 73, 77 | COLOR_LBGR2Lab = 74, 78 | COLOR_LRGB2Lab = 75, 79 | COLOR_LBGR2Luv = 76, 80 | COLOR_LRGB2Luv = 77, 81 | COLOR_Lab2LBGR = 78, 82 | COLOR_Lab2LRGB = 79, 83 | COLOR_Luv2LBGR = 80, 84 | COLOR_Luv2LRGB = 81, 85 | COLOR_BGR2YUV = 82, 86 | COLOR_RGB2YUV = 83, 87 | COLOR_YUV2BGR = 84, 88 | COLOR_YUV2RGB = 85, 89 | COLOR_YUV2RGB_NV12 = 90, 90 | COLOR_YUV2BGR_NV12 = 91, 91 | COLOR_YUV2RGB_NV21 = 92, 92 | COLOR_YUV2BGR_NV21 = 93, 93 | COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, 94 | COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, 95 | COLOR_YUV2RGBA_NV12 = 94, 96 | COLOR_YUV2BGRA_NV12 = 95, 97 | COLOR_YUV2RGBA_NV21 = 96, 98 | COLOR_YUV2BGRA_NV21 = 97, 99 | COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, 100 | COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, 101 | COLOR_YUV2RGB_YV12 = 98, 102 | COLOR_YUV2BGR_YV12 = 99, 103 | COLOR_YUV2RGB_IYUV = 100, 104 | COLOR_YUV2BGR_IYUV = 101, 105 | COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, 106 | COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, 107 | COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, 108 | COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, 109 | COLOR_YUV2RGBA_YV12 = 102, 110 | COLOR_YUV2BGRA_YV12 = 103, 111 | COLOR_YUV2RGBA_IYUV = 104, 112 | COLOR_YUV2BGRA_IYUV = 105, 113 | COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, 114 | COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, 115 | COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, 116 | COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, 117 | COLOR_YUV2GRAY_420 = 106, 118 | COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, 119 | COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, 120 | COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, 121 | COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, 122 | COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, 123 | COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, 124 | COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, 125 | COLOR_YUV2RGB_UYVY = 107, 126 | COLOR_YUV2BGR_UYVY = 108, 127 | COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, 128 | COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, 129 | COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, 130 | COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, 131 | COLOR_YUV2RGBA_UYVY = 111, 132 | COLOR_YUV2BGRA_UYVY = 112, 133 | COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, 134 | COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, 135 | COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, 136 | COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, 137 | COLOR_YUV2RGB_YUY2 = 115, 138 | COLOR_YUV2BGR_YUY2 = 116, 139 | COLOR_YUV2RGB_YVYU = 117, 140 | COLOR_YUV2BGR_YVYU = 118, 141 | COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, 142 | COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, 143 | COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, 144 | COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, 145 | COLOR_YUV2RGBA_YUY2 = 119, 146 | COLOR_YUV2BGRA_YUY2 = 120, 147 | COLOR_YUV2RGBA_YVYU = 121, 148 | COLOR_YUV2BGRA_YVYU = 122, 149 | COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, 150 | COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, 151 | COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, 152 | COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, 153 | COLOR_YUV2GRAY_UYVY = 123, 154 | COLOR_YUV2GRAY_YUY2 = 124, 155 | COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, 156 | COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, 157 | COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, 158 | COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, 159 | COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, 160 | COLOR_RGBA2mRGBA = 125, 161 | COLOR_mRGBA2RGBA = 126, 162 | COLOR_RGB2YUV_I420 = 127, 163 | COLOR_BGR2YUV_I420 = 128, 164 | COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, 165 | COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, 166 | COLOR_RGBA2YUV_I420 = 129, 167 | COLOR_BGRA2YUV_I420 = 130, 168 | COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, 169 | COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, 170 | COLOR_RGB2YUV_YV12 = 131, 171 | COLOR_BGR2YUV_YV12 = 132, 172 | COLOR_RGBA2YUV_YV12 = 133, 173 | COLOR_BGRA2YUV_YV12 = 134, 174 | COLOR_BayerBG2BGR = 46, 175 | COLOR_BayerGB2BGR = 47, 176 | COLOR_BayerRG2BGR = 48, 177 | COLOR_BayerGR2BGR = 49, 178 | COLOR_BayerBG2RGB = COLOR_BayerRG2BGR, 179 | COLOR_BayerGB2RGB = COLOR_BayerGR2BGR, 180 | COLOR_BayerRG2RGB = COLOR_BayerBG2BGR, 181 | COLOR_BayerGR2RGB = COLOR_BayerGB2BGR, 182 | COLOR_BayerBG2GRAY = 86, 183 | COLOR_BayerGB2GRAY = 87, 184 | COLOR_BayerRG2GRAY = 88, 185 | COLOR_BayerGR2GRAY = 89, 186 | COLOR_BayerBG2BGR_VNG = 62, 187 | COLOR_BayerGB2BGR_VNG = 63, 188 | COLOR_BayerRG2BGR_VNG = 64, 189 | COLOR_BayerGR2BGR_VNG = 65, 190 | COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG, 191 | COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG, 192 | COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG, 193 | COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG, 194 | COLOR_BayerBG2BGR_EA = 135, 195 | COLOR_BayerGB2BGR_EA = 136, 196 | COLOR_BayerRG2BGR_EA = 137, 197 | COLOR_BayerGR2BGR_EA = 138, 198 | COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA, 199 | COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA, 200 | COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA, 201 | COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA, 202 | COLOR_BayerBG2BGRA = 139, 203 | COLOR_BayerGB2BGRA = 140, 204 | COLOR_BayerRG2BGRA = 141, 205 | COLOR_BayerGR2BGRA = 142, 206 | COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA, 207 | COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA, 208 | COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA, 209 | COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA, 210 | COLOR_COLORCVT_MAX = 143 211 | } 212 | 213 | interface _ColorConversionCodes { 214 | COLOR_BGR2BGRA: ColorConversionCodes.COLOR_BGR2BGRA; 215 | COLOR_RGB2RGBA: ColorConversionCodes.COLOR_RGB2RGBA; 216 | COLOR_BGRA2BGR: ColorConversionCodes.COLOR_BGRA2BGR; 217 | COLOR_RGBA2RGB: ColorConversionCodes.COLOR_RGBA2RGB; 218 | COLOR_BGR2RGBA: ColorConversionCodes.COLOR_BGR2RGBA; 219 | COLOR_RGB2BGRA: ColorConversionCodes.COLOR_RGB2BGRA; 220 | COLOR_RGBA2BGR: ColorConversionCodes.COLOR_RGBA2BGR; 221 | COLOR_BGRA2RGB: ColorConversionCodes.COLOR_BGRA2RGB; 222 | COLOR_BGR2RGB: ColorConversionCodes.COLOR_BGR2RGB; 223 | COLOR_RGB2BGR: ColorConversionCodes.COLOR_RGB2BGR; 224 | COLOR_BGRA2RGBA: ColorConversionCodes.COLOR_BGRA2RGBA; 225 | COLOR_RGBA2BGRA: ColorConversionCodes.COLOR_RGBA2BGRA; 226 | COLOR_BGR2GRAY: ColorConversionCodes.COLOR_BGR2GRAY; 227 | COLOR_RGB2GRAY: ColorConversionCodes.COLOR_RGB2GRAY; 228 | COLOR_GRAY2BGR: ColorConversionCodes.COLOR_GRAY2BGR; 229 | COLOR_GRAY2RGB: ColorConversionCodes.COLOR_GRAY2RGB; 230 | COLOR_GRAY2BGRA: ColorConversionCodes.COLOR_GRAY2BGRA; 231 | COLOR_GRAY2RGBA: ColorConversionCodes.COLOR_GRAY2RGBA; 232 | COLOR_BGRA2GRAY: ColorConversionCodes.COLOR_BGRA2GRAY; 233 | COLOR_RGBA2GRAY: ColorConversionCodes.COLOR_RGBA2GRAY; 234 | COLOR_BGR2BGR565: ColorConversionCodes.COLOR_BGR2BGR565; 235 | COLOR_RGB2BGR565: ColorConversionCodes.COLOR_RGB2BGR565; 236 | COLOR_BGR5652BGR: ColorConversionCodes.COLOR_BGR5652BGR; 237 | COLOR_BGR5652RGB: ColorConversionCodes.COLOR_BGR5652RGB; 238 | COLOR_BGRA2BGR565: ColorConversionCodes.COLOR_BGRA2BGR565; 239 | COLOR_RGBA2BGR565: ColorConversionCodes.COLOR_RGBA2BGR565; 240 | COLOR_BGR5652BGRA: ColorConversionCodes.COLOR_BGR5652BGRA; 241 | COLOR_BGR5652RGBA: ColorConversionCodes.COLOR_BGR5652RGBA; 242 | COLOR_GRAY2BGR565: ColorConversionCodes.COLOR_GRAY2BGR565; 243 | COLOR_BGR5652GRAY: ColorConversionCodes.COLOR_BGR5652GRAY; 244 | COLOR_BGR2BGR555: ColorConversionCodes.COLOR_BGR2BGR555; 245 | COLOR_RGB2BGR555: ColorConversionCodes.COLOR_RGB2BGR555; 246 | COLOR_BGR5552BGR: ColorConversionCodes.COLOR_BGR5552BGR; 247 | COLOR_BGR5552RGB: ColorConversionCodes.COLOR_BGR5552RGB; 248 | COLOR_BGRA2BGR555: ColorConversionCodes.COLOR_BGRA2BGR555; 249 | COLOR_RGBA2BGR555: ColorConversionCodes.COLOR_RGBA2BGR555; 250 | COLOR_BGR5552BGRA: ColorConversionCodes.COLOR_BGR5552BGRA; 251 | COLOR_BGR5552RGBA: ColorConversionCodes.COLOR_BGR5552RGBA; 252 | COLOR_GRAY2BGR555: ColorConversionCodes.COLOR_GRAY2BGR555; 253 | COLOR_BGR5552GRAY: ColorConversionCodes.COLOR_BGR5552GRAY; 254 | COLOR_BGR2XYZ: ColorConversionCodes.COLOR_BGR2XYZ; 255 | COLOR_RGB2XYZ: ColorConversionCodes.COLOR_RGB2XYZ; 256 | COLOR_XYZ2BGR: ColorConversionCodes.COLOR_XYZ2BGR; 257 | COLOR_XYZ2RGB: ColorConversionCodes.COLOR_XYZ2RGB; 258 | COLOR_BGR2YCrCb: ColorConversionCodes.COLOR_BGR2YCrCb; 259 | COLOR_RGB2YCrCb: ColorConversionCodes.COLOR_RGB2YCrCb; 260 | COLOR_YCrCb2BGR: ColorConversionCodes.COLOR_YCrCb2BGR; 261 | COLOR_YCrCb2RGB: ColorConversionCodes.COLOR_YCrCb2RGB; 262 | COLOR_BGR2HSV: ColorConversionCodes.COLOR_BGR2HSV; 263 | COLOR_RGB2HSV: ColorConversionCodes.COLOR_RGB2HSV; 264 | COLOR_BGR2Lab: ColorConversionCodes.COLOR_BGR2Lab; 265 | COLOR_RGB2Lab: ColorConversionCodes.COLOR_RGB2Lab; 266 | COLOR_BGR2Luv: ColorConversionCodes.COLOR_BGR2Luv; 267 | COLOR_RGB2Luv: ColorConversionCodes.COLOR_RGB2Luv; 268 | COLOR_BGR2HLS: ColorConversionCodes.COLOR_BGR2HLS; 269 | COLOR_RGB2HLS: ColorConversionCodes.COLOR_RGB2HLS; 270 | COLOR_HSV2BGR: ColorConversionCodes.COLOR_HSV2BGR; 271 | COLOR_HSV2RGB: ColorConversionCodes.COLOR_HSV2RGB; 272 | COLOR_Lab2BGR: ColorConversionCodes.COLOR_Lab2BGR; 273 | COLOR_Lab2RGB: ColorConversionCodes.COLOR_Lab2RGB; 274 | COLOR_Luv2BGR: ColorConversionCodes.COLOR_Luv2BGR; 275 | COLOR_Luv2RGB: ColorConversionCodes.COLOR_Luv2RGB; 276 | COLOR_HLS2BGR: ColorConversionCodes.COLOR_HLS2BGR; 277 | COLOR_HLS2RGB: ColorConversionCodes.COLOR_HLS2RGB; 278 | COLOR_BGR2HSV_FULL: ColorConversionCodes.COLOR_BGR2HSV_FULL; 279 | COLOR_RGB2HSV_FULL: ColorConversionCodes.COLOR_RGB2HSV_FULL; 280 | COLOR_BGR2HLS_FULL: ColorConversionCodes.COLOR_BGR2HLS_FULL; 281 | COLOR_RGB2HLS_FULL: ColorConversionCodes.COLOR_RGB2HLS_FULL; 282 | COLOR_HSV2BGR_FULL: ColorConversionCodes.COLOR_HSV2BGR_FULL; 283 | COLOR_HSV2RGB_FULL: ColorConversionCodes.COLOR_HSV2RGB_FULL; 284 | COLOR_HLS2BGR_FULL: ColorConversionCodes.COLOR_HLS2BGR_FULL; 285 | COLOR_HLS2RGB_FULL: ColorConversionCodes.COLOR_HLS2RGB_FULL; 286 | COLOR_LBGR2Lab: ColorConversionCodes.COLOR_LBGR2Lab; 287 | COLOR_LRGB2Lab: ColorConversionCodes.COLOR_LRGB2Lab; 288 | COLOR_LBGR2Luv: ColorConversionCodes.COLOR_LBGR2Luv; 289 | COLOR_LRGB2Luv: ColorConversionCodes.COLOR_LRGB2Luv; 290 | COLOR_Lab2LBGR: ColorConversionCodes.COLOR_Lab2LBGR; 291 | COLOR_Lab2LRGB: ColorConversionCodes.COLOR_Lab2LRGB; 292 | COLOR_Luv2LBGR: ColorConversionCodes.COLOR_Luv2LBGR; 293 | COLOR_Luv2LRGB: ColorConversionCodes.COLOR_Luv2LRGB; 294 | COLOR_BGR2YUV: ColorConversionCodes.COLOR_BGR2YUV; 295 | COLOR_RGB2YUV: ColorConversionCodes.COLOR_RGB2YUV; 296 | COLOR_YUV2BGR: ColorConversionCodes.COLOR_YUV2BGR; 297 | COLOR_YUV2RGB: ColorConversionCodes.COLOR_YUV2RGB; 298 | COLOR_YUV2RGB_NV12: ColorConversionCodes.COLOR_YUV2RGB_NV12; 299 | COLOR_YUV2BGR_NV12: ColorConversionCodes.COLOR_YUV2BGR_NV12; 300 | COLOR_YUV2RGB_NV21: ColorConversionCodes.COLOR_YUV2RGB_NV21; 301 | COLOR_YUV2BGR_NV21: ColorConversionCodes.COLOR_YUV2BGR_NV21; 302 | COLOR_YUV420sp2RGB: ColorConversionCodes.COLOR_YUV420sp2RGB; 303 | COLOR_YUV420sp2BGR: ColorConversionCodes.COLOR_YUV420sp2BGR; 304 | COLOR_YUV2RGBA_NV12: ColorConversionCodes.COLOR_YUV2RGBA_NV12; 305 | COLOR_YUV2BGRA_NV12: ColorConversionCodes.COLOR_YUV2BGRA_NV12; 306 | COLOR_YUV2RGBA_NV21: ColorConversionCodes.COLOR_YUV2RGBA_NV21; 307 | COLOR_YUV2BGRA_NV21: ColorConversionCodes.COLOR_YUV2BGRA_NV21; 308 | COLOR_YUV420sp2RGBA: ColorConversionCodes.COLOR_YUV420sp2RGBA; 309 | COLOR_YUV420sp2BGRA: ColorConversionCodes.COLOR_YUV420sp2BGRA; 310 | COLOR_YUV2RGB_YV12: ColorConversionCodes.COLOR_YUV2RGB_YV12; 311 | COLOR_YUV2BGR_YV12: ColorConversionCodes.COLOR_YUV2BGR_YV12; 312 | COLOR_YUV2RGB_IYUV: ColorConversionCodes.COLOR_YUV2RGB_IYUV; 313 | COLOR_YUV2BGR_IYUV: ColorConversionCodes.COLOR_YUV2BGR_IYUV; 314 | COLOR_YUV2RGB_I420: ColorConversionCodes.COLOR_YUV2RGB_I420; 315 | COLOR_YUV2BGR_I420: ColorConversionCodes.COLOR_YUV2BGR_I420; 316 | COLOR_YUV420p2RGB: ColorConversionCodes.COLOR_YUV420p2RGB; 317 | COLOR_YUV420p2BGR: ColorConversionCodes.COLOR_YUV420p2BGR; 318 | COLOR_YUV2RGBA_YV12: ColorConversionCodes.COLOR_YUV2RGBA_YV12; 319 | COLOR_YUV2BGRA_YV12: ColorConversionCodes.COLOR_YUV2BGRA_YV12; 320 | COLOR_YUV2RGBA_IYUV: ColorConversionCodes.COLOR_YUV2RGBA_IYUV; 321 | COLOR_YUV2BGRA_IYUV: ColorConversionCodes.COLOR_YUV2BGRA_IYUV; 322 | COLOR_YUV2RGBA_I420: ColorConversionCodes.COLOR_YUV2RGBA_I420; 323 | COLOR_YUV2BGRA_I420: ColorConversionCodes.COLOR_YUV2BGRA_I420; 324 | COLOR_YUV420p2RGBA: ColorConversionCodes.COLOR_YUV420p2RGBA; 325 | COLOR_YUV420p2BGRA: ColorConversionCodes.COLOR_YUV420p2BGRA; 326 | COLOR_YUV2GRAY_420: ColorConversionCodes.COLOR_YUV2GRAY_420; 327 | COLOR_YUV2GRAY_NV21: ColorConversionCodes.COLOR_YUV2GRAY_NV21; 328 | COLOR_YUV2GRAY_NV12: ColorConversionCodes.COLOR_YUV2GRAY_NV12; 329 | COLOR_YUV2GRAY_YV12: ColorConversionCodes.COLOR_YUV2GRAY_YV12; 330 | COLOR_YUV2GRAY_IYUV: ColorConversionCodes.COLOR_YUV2GRAY_IYUV; 331 | COLOR_YUV2GRAY_I420: ColorConversionCodes.COLOR_YUV2GRAY_I420; 332 | COLOR_YUV420sp2GRAY: ColorConversionCodes.COLOR_YUV420sp2GRAY; 333 | COLOR_YUV420p2GRAY: ColorConversionCodes.COLOR_YUV420p2GRAY; 334 | COLOR_YUV2RGB_UYVY: ColorConversionCodes.COLOR_YUV2RGB_UYVY; 335 | COLOR_YUV2BGR_UYVY: ColorConversionCodes.COLOR_YUV2BGR_UYVY; 336 | COLOR_YUV2RGB_Y422: ColorConversionCodes.COLOR_YUV2RGB_Y422; 337 | COLOR_YUV2BGR_Y422: ColorConversionCodes.COLOR_YUV2BGR_Y422; 338 | COLOR_YUV2RGB_UYNV: ColorConversionCodes.COLOR_YUV2RGB_UYNV; 339 | COLOR_YUV2BGR_UYNV: ColorConversionCodes.COLOR_YUV2BGR_UYNV; 340 | COLOR_YUV2RGBA_UYVY: ColorConversionCodes.COLOR_YUV2RGBA_UYVY; 341 | COLOR_YUV2BGRA_UYVY: ColorConversionCodes.COLOR_YUV2BGRA_UYVY; 342 | COLOR_YUV2RGBA_Y422: ColorConversionCodes.COLOR_YUV2RGBA_Y422; 343 | COLOR_YUV2BGRA_Y422: ColorConversionCodes.COLOR_YUV2BGRA_Y422; 344 | COLOR_YUV2RGBA_UYNV: ColorConversionCodes.COLOR_YUV2RGBA_UYNV; 345 | COLOR_YUV2BGRA_UYNV: ColorConversionCodes.COLOR_YUV2BGRA_UYNV; 346 | COLOR_YUV2RGB_YUY2: ColorConversionCodes.COLOR_YUV2RGB_YUY2; 347 | COLOR_YUV2BGR_YUY2: ColorConversionCodes.COLOR_YUV2BGR_YUY2; 348 | COLOR_YUV2RGB_YVYU: ColorConversionCodes.COLOR_YUV2RGB_YVYU; 349 | COLOR_YUV2BGR_YVYU: ColorConversionCodes.COLOR_YUV2BGR_YVYU; 350 | COLOR_YUV2RGB_YUYV: ColorConversionCodes.COLOR_YUV2RGB_YUYV; 351 | COLOR_YUV2BGR_YUYV: ColorConversionCodes.COLOR_YUV2BGR_YUYV; 352 | COLOR_YUV2RGB_YUNV: ColorConversionCodes.COLOR_YUV2RGB_YUNV; 353 | COLOR_YUV2BGR_YUNV: ColorConversionCodes.COLOR_YUV2BGR_YUNV; 354 | COLOR_YUV2RGBA_YUY2: ColorConversionCodes.COLOR_YUV2RGBA_YUY2; 355 | COLOR_YUV2BGRA_YUY2: ColorConversionCodes.COLOR_YUV2BGRA_YUY2; 356 | COLOR_YUV2RGBA_YVYU: ColorConversionCodes.COLOR_YUV2RGBA_YVYU; 357 | COLOR_YUV2BGRA_YVYU: ColorConversionCodes.COLOR_YUV2BGRA_YVYU; 358 | COLOR_YUV2RGBA_YUYV: ColorConversionCodes.COLOR_YUV2RGBA_YUYV; 359 | COLOR_YUV2BGRA_YUYV: ColorConversionCodes.COLOR_YUV2BGRA_YUYV; 360 | COLOR_YUV2RGBA_YUNV: ColorConversionCodes.COLOR_YUV2RGBA_YUNV; 361 | COLOR_YUV2BGRA_YUNV: ColorConversionCodes.COLOR_YUV2BGRA_YUNV; 362 | COLOR_YUV2GRAY_UYVY: ColorConversionCodes.COLOR_YUV2GRAY_UYVY; 363 | COLOR_YUV2GRAY_YUY2: ColorConversionCodes.COLOR_YUV2GRAY_YUY2; 364 | COLOR_YUV2GRAY_Y422: ColorConversionCodes.COLOR_YUV2GRAY_Y422; 365 | COLOR_YUV2GRAY_UYNV: ColorConversionCodes.COLOR_YUV2GRAY_UYNV; 366 | COLOR_YUV2GRAY_YVYU: ColorConversionCodes.COLOR_YUV2GRAY_YVYU; 367 | COLOR_YUV2GRAY_YUYV: ColorConversionCodes.COLOR_YUV2GRAY_YUYV; 368 | COLOR_YUV2GRAY_YUNV: ColorConversionCodes.COLOR_YUV2GRAY_YUNV; 369 | COLOR_RGBA2mRGBA: ColorConversionCodes.COLOR_RGBA2mRGBA; 370 | COLOR_mRGBA2RGBA: ColorConversionCodes.COLOR_mRGBA2RGBA; 371 | COLOR_RGB2YUV_I420: ColorConversionCodes.COLOR_RGB2YUV_I420; 372 | COLOR_BGR2YUV_I420: ColorConversionCodes.COLOR_BGR2YUV_I420; 373 | COLOR_RGB2YUV_IYUV: ColorConversionCodes.COLOR_RGB2YUV_IYUV; 374 | COLOR_BGR2YUV_IYUV: ColorConversionCodes.COLOR_BGR2YUV_IYUV; 375 | COLOR_RGBA2YUV_I420: ColorConversionCodes.COLOR_RGBA2YUV_I420; 376 | COLOR_BGRA2YUV_I420: ColorConversionCodes.COLOR_BGRA2YUV_I420; 377 | COLOR_RGBA2YUV_IYUV: ColorConversionCodes.COLOR_RGBA2YUV_IYUV; 378 | COLOR_BGRA2YUV_IYUV: ColorConversionCodes.COLOR_BGRA2YUV_IYUV; 379 | COLOR_RGB2YUV_YV12: ColorConversionCodes.COLOR_RGB2YUV_YV12; 380 | COLOR_BGR2YUV_YV12: ColorConversionCodes.COLOR_BGR2YUV_YV12; 381 | COLOR_RGBA2YUV_YV12: ColorConversionCodes.COLOR_RGBA2YUV_YV12; 382 | COLOR_BGRA2YUV_YV12: ColorConversionCodes.COLOR_BGRA2YUV_YV12; 383 | COLOR_BayerBG2BGR: ColorConversionCodes.COLOR_BayerBG2BGR; 384 | COLOR_BayerGB2BGR: ColorConversionCodes.COLOR_BayerGB2BGR; 385 | COLOR_BayerRG2BGR: ColorConversionCodes.COLOR_BayerRG2BGR; 386 | COLOR_BayerGR2BGR: ColorConversionCodes.COLOR_BayerGR2BGR; 387 | COLOR_BayerBG2RGB: ColorConversionCodes.COLOR_BayerBG2RGB; 388 | COLOR_BayerGB2RGB: ColorConversionCodes.COLOR_BayerGB2RGB; 389 | COLOR_BayerRG2RGB: ColorConversionCodes.COLOR_BayerRG2RGB; 390 | COLOR_BayerGR2RGB: ColorConversionCodes.COLOR_BayerGR2RGB; 391 | COLOR_BayerBG2GRAY: ColorConversionCodes.COLOR_BayerBG2GRAY; 392 | COLOR_BayerGB2GRAY: ColorConversionCodes.COLOR_BayerGB2GRAY; 393 | COLOR_BayerRG2GRAY: ColorConversionCodes.COLOR_BayerRG2GRAY; 394 | COLOR_BayerGR2GRAY: ColorConversionCodes.COLOR_BayerGR2GRAY; 395 | COLOR_BayerBG2BGR_VNG: ColorConversionCodes.COLOR_BayerBG2BGR_VNG; 396 | COLOR_BayerGB2BGR_VNG: ColorConversionCodes.COLOR_BayerGB2BGR_VNG; 397 | COLOR_BayerRG2BGR_VNG: ColorConversionCodes.COLOR_BayerRG2BGR_VNG; 398 | COLOR_BayerGR2BGR_VNG: ColorConversionCodes.COLOR_BayerGR2BGR_VNG; 399 | COLOR_BayerBG2RGB_VNG: ColorConversionCodes.COLOR_BayerBG2RGB_VNG; 400 | COLOR_BayerGB2RGB_VNG: ColorConversionCodes.COLOR_BayerGB2RGB_VNG; 401 | COLOR_BayerRG2RGB_VNG: ColorConversionCodes.COLOR_BayerRG2RGB_VNG; 402 | COLOR_BayerGR2RGB_VNG: ColorConversionCodes.COLOR_BayerGR2RGB_VNG; 403 | COLOR_BayerBG2BGR_EA: ColorConversionCodes.COLOR_BayerBG2BGR_EA; 404 | COLOR_BayerGB2BGR_EA: ColorConversionCodes.COLOR_BayerGB2BGR_EA; 405 | COLOR_BayerRG2BGR_EA: ColorConversionCodes.COLOR_BayerRG2BGR_EA; 406 | COLOR_BayerGR2BGR_EA: ColorConversionCodes.COLOR_BayerGR2BGR_EA; 407 | COLOR_BayerBG2RGB_EA: ColorConversionCodes.COLOR_BayerBG2RGB_EA; 408 | COLOR_BayerGB2RGB_EA: ColorConversionCodes.COLOR_BayerGB2RGB_EA; 409 | COLOR_BayerRG2RGB_EA: ColorConversionCodes.COLOR_BayerRG2RGB_EA; 410 | COLOR_BayerGR2RGB_EA: ColorConversionCodes.COLOR_BayerGR2RGB_EA; 411 | COLOR_BayerBG2BGRA: ColorConversionCodes.COLOR_BayerBG2BGRA; 412 | COLOR_BayerGB2BGRA: ColorConversionCodes.COLOR_BayerGB2BGRA; 413 | COLOR_BayerRG2BGRA: ColorConversionCodes.COLOR_BayerRG2BGRA; 414 | COLOR_BayerGR2BGRA: ColorConversionCodes.COLOR_BayerGR2BGRA; 415 | COLOR_BayerBG2RGBA: ColorConversionCodes.COLOR_BayerBG2RGBA; 416 | COLOR_BayerGB2RGBA: ColorConversionCodes.COLOR_BayerGB2RGBA; 417 | COLOR_BayerRG2RGBA: ColorConversionCodes.COLOR_BayerRG2RGBA; 418 | COLOR_BayerGR2RGBA: ColorConversionCodes.COLOR_BayerGR2RGBA; 419 | COLORCOLORCVT_MAX: ColorConversionCodes.COLOR_COLORCVT_MAX; 420 | } 421 | 422 | interface ColorConversions { 423 | /** 424 | * Converts an image from one color space to another. 425 | * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision floating-point. 426 | * @param dst output image of the same size and depth as src. 427 | * @param code color space conversion code. 428 | * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the channels is derived automatically from src and code. 429 | */ 430 | cvtColor(src: Mat, dst: Mat, code: ColorConversionCodes, dstCn?: number): void; 431 | /** 432 | * Converts an image from one color space to another where the source image is stored in two planes. This function only supports YUV420 to RGB conversion as of now. 433 | * @param src1 8-bit image (CV_8U) of the Y plane. 434 | * @param src2 image containing interleaved U/V plane. 435 | * @param dst output image. 436 | * @param code Specifies the type of conversion 437 | */ 438 | cvtColorTwoPlane( 439 | src1: Mat, 440 | src2: Mat, 441 | dst: Mat, 442 | code: 443 | | ColorConversionCodes.COLOR_YUV2BGR_NV12 444 | | ColorConversionCodes.COLOR_YUV2RGB_NV12 445 | | ColorConversionCodes.COLOR_YUV2BGRA_NV12 446 | | ColorConversionCodes.COLOR_YUV2RGBA_NV12 447 | | ColorConversionCodes.COLOR_YUV2BGR_NV21 448 | | ColorConversionCodes.COLOR_YUV2RGB_NV21 449 | | ColorConversionCodes.COLOR_YUV2BGRA_NV21 450 | | ColorConversionCodes.COLOR_YUV2RGBA_NV21 451 | ): void; 452 | /** 453 | * main function for all demosaicing processes 454 | * @param src input image: 8-bit unsigned or 16-bit unsigned. 455 | * @param dst output image of the same size and depth as src. 456 | * @param code Color space conversion code (see the description below). 457 | * @param dstCn number of channels in the destination image 458 | */ 459 | demosaicing( 460 | src: Mat, 461 | dst: Mat, 462 | code: //Demosaicing using bilinear interpolation 463 | | ColorConversionCodes.COLOR_BayerBG2BGR 464 | | ColorConversionCodes.COLOR_BayerGB2BGR 465 | | ColorConversionCodes.COLOR_BayerRG2BGR 466 | | ColorConversionCodes.COLOR_BayerGR2BGR 467 | | ColorConversionCodes.COLOR_BayerBG2GRAY 468 | | ColorConversionCodes.COLOR_BayerGB2GRAY 469 | | ColorConversionCodes.COLOR_BayerRG2GRAY 470 | | ColorConversionCodes.COLOR_BayerGR2GRAY 471 | // Demosaicing using Variable Number of Gradients. 472 | | ColorConversionCodes.COLOR_BayerBG2BGR_VNG 473 | | ColorConversionCodes.COLOR_BayerGB2BGR_VNG 474 | | ColorConversionCodes.COLOR_BayerRG2BGR_VNG 475 | | ColorConversionCodes.COLOR_BayerGR2BGR_VNG 476 | // Edge-Aware Demosaicing. 477 | | ColorConversionCodes.COLOR_BayerBG2BGR_EA 478 | | ColorConversionCodes.COLOR_BayerGB2BGR_EA 479 | | ColorConversionCodes.COLOR_BayerRG2BGR_EA 480 | | ColorConversionCodes.COLOR_BayerGR2BGR_EA 481 | // Demosaicing with alpha channel 482 | | ColorConversionCodes.COLOR_BayerBG2BGRA 483 | | ColorConversionCodes.COLOR_BayerGB2BGRA 484 | | ColorConversionCodes.COLOR_BayerRG2BGRA 485 | | ColorConversionCodes.COLOR_BayerGR2BGRA, 486 | dstCn?: number 487 | ): void; 488 | } 489 | } 490 | 491 | export = ColorConversions; 492 | -------------------------------------------------------------------------------- /src/core/Core.d.ts: -------------------------------------------------------------------------------- 1 | import { Point } from './Point'; 2 | 3 | declare module Core { 4 | type NDArray = NDimensionArray; 5 | interface NDimensionArray extends Array | T > {} 6 | 7 | enum CovarFlags { 8 | COVAR_SCRAMBLED = 0, 9 | COVAR_NORMAL = 1, 10 | COVAR_USE_AVG = 2, 11 | COVAR_SCALE = 4, 12 | COVAR_ROWS = 8, 13 | COVAR_COLS = 16, 14 | } 15 | 16 | interface _CovarFlags { 17 | COVAR_SCRAMBLED: CovarFlags.COVAR_SCRAMBLED; 18 | COVAR_NORMAL: CovarFlags.COVAR_NORMAL; 19 | COVAR_USE_AVG: CovarFlags.COVAR_USE_AVG; 20 | COVAR_SCALE: CovarFlags.COVAR_SCALE; 21 | COVAR_ROWS: CovarFlags.COVAR_ROWS; 22 | COVAR_COLS: CovarFlags.COVAR_COLS; 23 | } 24 | 25 | enum QuatAssumeType { 26 | QUAT_ASSUME_NOT_UNIT = 'QUAT_ASSUME_NOT_UNIT', 27 | QUAT_ASSUME_UNIT = 'QUAT_ASSUME_UNIT', 28 | } 29 | 30 | interface _QuatAssumeType { 31 | QUAT_ASSUME_NOT_UNIT: QuatAssumeType.QUAT_ASSUME_NOT_UNIT; 32 | QUAT_ASSUME_UNIT: QuatAssumeType.QUAT_ASSUME_UNIT; 33 | } 34 | } 35 | export = Core; 36 | -------------------------------------------------------------------------------- /src/core/HalInterface.d.ts: -------------------------------------------------------------------------------- 1 | declare module HalInterface { 2 | enum DataTypes { 3 | CV_8U = 0, 4 | CV_8S = 1, 5 | CV_16U = 2, 6 | CV_16S = 3, 7 | CV_32S = 4, 8 | CV_32F = 5, 9 | CV_64F = 6, 10 | CV_16F = 7, 11 | CV_8UC1 = 0, 12 | CV_8UC2 = 8, 13 | CV_8UC3 = 16, 14 | CV_8UC4 = 24, 15 | CV_8SC1 = 1, 16 | CV_8SC2 = 9, 17 | CV_8SC3 = 17, 18 | CV_8SC4 = 25, 19 | CV_16UC1 = 2, 20 | CV_16UC2 = 10, 21 | CV_16UC3 = 18, 22 | CV_16UC4 = 26, 23 | CV_16SC1 = 3, 24 | CV_16SC2 = 11, 25 | CV_16SC3 = 19, 26 | CV_16SC4 = 27, 27 | CV_32SC1 = 4, 28 | CV_32SC2 = 12, 29 | CV_32SC3 = 20, 30 | CV_32SC4 = 28, 31 | CV_32FC1 = 5, 32 | CV_32FC2 = 13, 33 | CV_32FC3 = 21, 34 | CV_32FC4 = 29, 35 | CV_64FC1 = 6, 36 | CV_64FC2 = 14, 37 | CV_64FC3 = 22, 38 | CV_64FC4 = 30, 39 | CV_16FC1 = 7, 40 | CV_16FC2 = 15, 41 | CV_16FC3 = 23, 42 | CV_16FC4 = 31, 43 | } 44 | 45 | interface _DataTypes { 46 | CV_8U: DataTypes.CV_8U; 47 | CV_8S: DataTypes.CV_8S; 48 | CV_16U: DataTypes.CV_16U; 49 | CV_16S: DataTypes.CV_16S; 50 | CV_32S: DataTypes.CV_32S; 51 | CV_32F: DataTypes.CV_32F; 52 | CV_64F: DataTypes.CV_64F; 53 | CV_16F: DataTypes.CV_16F; 54 | CV_8UC1: DataTypes.CV_8UC1; 55 | CV_8UC2: DataTypes.CV_8UC2; 56 | CV_8UC3: DataTypes.CV_8UC3; 57 | CV_8UC4: DataTypes.CV_8UC4; 58 | CV_8SC1: DataTypes.CV_8SC1; 59 | CV_8SC2: DataTypes.CV_8SC2; 60 | CV_8SC3: DataTypes.CV_8SC3; 61 | CV_8SC4: DataTypes.CV_8SC4; 62 | CV_16UC1: DataTypes.CV_16UC1; 63 | CV_16UC2: DataTypes.CV_16UC2; 64 | CV_16UC3: DataTypes.CV_16UC3; 65 | CV_16UC4: DataTypes.CV_16UC4; 66 | CV_16SC1: DataTypes.CV_16SC1; 67 | CV_16SC2: DataTypes.CV_16SC2; 68 | CV_16SC3: DataTypes.CV_16SC3; 69 | CV_16SC4: DataTypes.CV_16SC4; 70 | CV_32SC1: DataTypes.CV_32SC1; 71 | CV_32SC2: DataTypes.CV_32SC2; 72 | CV_32SC3: DataTypes.CV_32SC3; 73 | CV_32SC4: DataTypes.CV_32SC4; 74 | CV_32FC1: DataTypes.CV_32FC1; 75 | CV_32FC2: DataTypes.CV_32FC2; 76 | CV_32FC3: DataTypes.CV_32FC3; 77 | CV_32FC4: DataTypes.CV_32FC4; 78 | CV_64FC1: DataTypes.CV_64FC1; 79 | CV_64FC2: DataTypes.CV_64FC2; 80 | CV_64FC3: DataTypes.CV_64FC3; 81 | CV_64FC4: DataTypes.CV_64FC4; 82 | CV_16FC1: DataTypes.CV_16FC1; 83 | CV_16FC2: DataTypes.CV_16FC2; 84 | CV_16FC3: DataTypes.CV_16FC3; 85 | CV_16FC4: DataTypes.CV_16FC4; 86 | } 87 | } 88 | export = HalInterface; 89 | -------------------------------------------------------------------------------- /src/core/Mat.d.ts: -------------------------------------------------------------------------------- 1 | import { ColorConversionCodes } from './ColorConversion'; 2 | import { DataTypes, _DataTypes } from './HalInterface'; 3 | import { Scalar } from './Scalar'; 4 | import { Size } from './Size'; 5 | import { Range } from './Range'; 6 | import { DecompTypes } from './CoreArray'; 7 | import { Rect } from './Rect'; 8 | 9 | declare module Mat { 10 | interface zeros { 11 | /** 12 | * The method returns a Matlab-style zero array initializer. It can be used to quickly form a constant 13 | * array as a function parameter, part of a matrix expression, or as a matrix initializer: 14 | * @example 15 | * const m: Mat = cv.Mat.zeros(new cv.Size(3, 3), cv.CV_8UC3); 16 | * @param size Alternative to the matrix size specification Size(cols, rows) . 17 | * @param type Created matrix type. 18 | * @returns Returns a zero array of the specified size and type. 19 | */ 20 | new (size: Size, type: DataTypes): Mat; 21 | /** 22 | * The method returns a Matlab-style zero array initializer. It can be used to quickly form a constant 23 | * array as a function parameter, part of a matrix expression, or as a matrix initializer: 24 | * @example 25 | * const m: Mat = cv.Mat.zeros(3, 3, cv.CV_8UC3); 26 | * @param rows Number of rows. 27 | * @param cols Number of columns. 28 | * @param type Created matrix type. 29 | * @returns Returns a zero array of the specified size and type. 30 | */ 31 | new (rows: number, cols: number, type: DataTypes): Mat; 32 | } 33 | 34 | interface ones { 35 | /** 36 | * The method returns a Matlab-style 1's array initializer, similarly to Mat::zeros. Note that using 37 | * this method you can initialize an array with an arbitrary value, using the following Matlab idiom: 38 | * Note: In case of multi-channels type, only the first channel will be initialized with 1's, the 39 | * others will be set to 0's. 40 | * @example 41 | * const m: Mat = cv.Mat.ones(new cv.Size(3, 3), cv.CV_8UC3); 42 | * @param size Alternative to the matrix size specification Size(cols, rows) . 43 | * @param type Created matrix type. 44 | * @return Returns a zero array of the specified size and type. 45 | */ 46 | new (size: Size, type: DataTypes): Mat; 47 | /** 48 | * The method returns a Matlab-style 1's array initializer, similarly to Mat::zeros. Note that using 49 | * this method you can initialize an array with an arbitrary value, using the following Matlab idiom: 50 | * Note: In case of multi-channels type, only the first channel will be initialized with 1's, the 51 | * others will be set to 0's. 52 | * @example 53 | * const m: Mat = cv.Mat.ones(3, 3, cv.CV_8UC3); 54 | * @param rows Number of rows. 55 | * @param cols Number of columns. 56 | * @param type Created matrix type. 57 | * @returns Returns a zero array of the specified size and type. 58 | */ 59 | new (rows: number, cols: number, type: DataTypes): Mat; 60 | } 61 | interface Mat { 62 | new (): Mat; 63 | new (mat: Mat): Mat; 64 | new (rows: number, cols: number, type: DataTypes, s?: Scalar): Mat; 65 | zeros: zeros; 66 | ones: ones; 67 | delete(): void; 68 | /** 69 | * The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to 70 | * Mat::ones, you can use a scale operation to create a scaled identity matrix efficiently: 71 | * Note: In case of multi-channels type, identity matrix will be initialized only for the first channel, 72 | * the others will be set to 0's 73 | * @example 74 | * const m: Mat = cv.Mat.eye(new cv.Size(3, 3), cv.CV_8UC3); 75 | * @param size Alternative matrix size specification as Size(cols, rows) . 76 | * @param type Created matrix type. 77 | * @returns The method returns a Matlab-style identity matrix initializer 78 | */ 79 | eye(size: Size, type: DataTypes): Mat; 80 | /** 81 | * The method returns a Matlab-style identity matrix initializer, similarly to Mat::zeros. Similarly to 82 | * Mat::ones, you can use a scale operation to create a scaled identity matrix efficiently: 83 | * Note: In case of multi-channels type, identity matrix will be initialized only for the first channel, 84 | * @example 85 | * const m: Mat = cv.Mat.eye(3, 3, cv.CV_8UC3); 86 | * @param rows Number of rows. 87 | * @param cols Number of columns. 88 | * @param type Created matrix type. 89 | * @returns Returns an identity matrix of the specified size and type. 90 | */ 91 | eye(rows: number, cols: number, type: DataTypes): Mat; 92 | 93 | /** 94 | * @returns Type of Mat 95 | */ 96 | type(): DataTypes; 97 | /** 98 | * The number of rows in the matrix 99 | */ 100 | rows: number; 101 | /** 102 | * The number of cols in the matrix 103 | */ 104 | cols: number; 105 | /** 106 | * size of the matrix (rows, cols) // (width, height) 107 | */ 108 | matSize: Array; 109 | step: Array; 110 | data: Uint8Array; 111 | data8S: Int8Array; 112 | data16U: Uint16Array; 113 | data16S: Int16Array; 114 | data32S: Int32Array; 115 | data32F: Float32Array; 116 | data64F: Float64Array; 117 | /** 118 | * The method returns the matrix element size in bytes. For example, if the matrix type is CV_16SC3 , the method returns 3*sizeof(short) or 6. 119 | * @returns matrix element size in bytes. 120 | */ 121 | elemSize(): number; 122 | /** 123 | * The method returns the matrix element channel size in bytes, that is, it ignores the number of channels. For example, if the matrix type is CV_16SC3 , the method returns sizeof(short) or 2. 124 | * @returns size of each matrix element channel in bytes. 125 | */ 126 | elemSize1(): number; 127 | /** 128 | * The method returns the number of matrix channels. 129 | * @returns the number of matrix channels. 130 | */ 131 | channels(): number; 132 | /** 133 | * Converts an array to another data type with optional scaling. 134 | * The method converts source pixel values to the target data type. 135 | * @param m output matrix; if it does not have a proper size or type before the operation, it is 136 | * reallocated. 137 | * @param rtype desired output matrix type or, rather, the depth since the number of channels are the 138 | * same as the input has; if rtype is negative, the output matrix will have the same type as the input. 139 | * @param alpha optional scale factor. 140 | * @param beta optional delta added to the scaled values. 141 | */ 142 | convertTo(m: Mat, rtype: DataTypes, alpha?: number, beta?: number): void; 143 | convertTo(m: Mat, rtype: DataTypes, alpha: number, beta?: number): void; 144 | convertTo(m: Mat, rtype: DataTypes, alpha: number, beta: number): void; 145 | /** 146 | * The method returns the number of array elements (a number of pixels if the array represents an image). 147 | * @returns the total number of array elements. 148 | */ 149 | total(): number; 150 | /** 151 | * @param y row to return 152 | * @returns the specificed matrix row 153 | */ 154 | row(y: number): Array; 155 | /** 156 | * This is one of the key Mat methods. Most new-style OpenCV functions and methods that produce arrays call this method for each output array. The method uses the following algorithm: 157 | * 1 - If the current array shape and the type match the new ones, return immediately. Otherwise, de-reference the previous data by calling Mat::release. 158 | * 2- Initialize the new header. 159 | * 3 - Allocate the new data of total()*elemSize() bytes. 160 | * 4 - Allocate the new, associated with the data, reference counter and set it to 1. 161 | * @param rows New number of rows. 162 | * @param cols New number of columns. 163 | * @param type New matrix type. 164 | */ 165 | create(rows: number, cols: number, type: DataTypes): void; 166 | /** 167 | * This is one of the key Mat methods. Most new-style OpenCV functions and methods that produce arrays call this method for each output array. The method uses the following algorithm: 168 | * 1 - If the current array shape and the type match the new ones, return immediately. Otherwise, de-reference the previous data by calling Mat::release. 169 | * 2- Initialize the new header. 170 | * 3 - Allocate the new data of total()*elemSize() bytes. 171 | * 4 - Allocate the new, associated with the data, reference counter and set it to 1. 172 | * @param size Alternative new matrix size specification: Size(cols, rows) 173 | * @param type New matrix type. 174 | */ 175 | create(size: Size, type: DataTypes): void; 176 | /** 177 | * The method makes a new header for the specified row span of the matrix. Similarly to Mat::row and 178 | * Mat::col , this is an O(1) operation. 179 | * @param startrow An inclusive 0-based start index of the row span. 180 | * @param endrow An exclusive 0-based ending index of the row span. 181 | * @returns A Mat which is a copy of the row range 182 | */ 183 | rowRange(startrow: number, endrow: number): Mat; 184 | /** 185 | * The method makes a new header for the specified row span of the matrix. Similarly to Mat::row and 186 | * Mat::col , this is an O(1) operation. 187 | * @param r Range structure containing both the start and the end indices. 188 | * @returns A Mat which is a copy of the row range 189 | */ 190 | rowRange(r: Range): Mat; 191 | /** 192 | * The method copies the matrix data to another matrix. 193 | * When the operation mask is specified, if the Mat::create call shown above reallocates the matrix, 194 | * the newly allocated matrix is initialized with all zeros before copying the data. 195 | * @param m Destination matrix. If it does not have a proper size or type before the operation, it is reallocated. 196 | * @param mask Optional, Operation mask of the same size as \*this. Its non-zero elements indicate which matrix 197 | * elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels. 198 | */ 199 | copyTo(m: Mat, mask?: Mat): void; 200 | /** 201 | * @returns true if the array has no elements. 202 | */ 203 | empty(): boolean; 204 | /** 205 | * The method makes a new header for the specified column span of the matrix. Similarly to Mat::column and 206 | * Mat::col , this is an O(1) operation. 207 | * @param startcol An inclusive 0-based start index of the column span. 208 | * @param endcol An exclusive 0-based ending index of the column span. 209 | * @returns A Mat which is a copy of the column range 210 | */ 211 | colRange(startcol: number, endcol: number): Mat; 212 | /** 213 | * The method makes a new header for the specified column span of the matrix. Similarly to Mat::column and 214 | * Mat::col , this is an O(1) operation. 215 | * @param r Range structure containing both the start and the end indices. 216 | * @returns A Mat which is a copy of the column range 217 | */ 218 | colRange(r: Range): Mat; 219 | /** 220 | * The method returns a matrix step divided by Mat::elemSize1() . It can be useful to quickly access an arbitrary matrix element. 221 | * @returns a normalized step. 222 | */ 223 | step1(): number; 224 | /** 225 | * Creates a full copy of the array and the underlying data. 226 | * @returns a Mat copy of the underlying data 227 | */ 228 | clone(): Mat; 229 | /** 230 | * @returns the depth of a matrix element. 231 | */ 232 | depth(): number; 233 | /** 234 | * Creates a matrix header for the specified matrix column. 235 | * @param x A 0-based column index 236 | * @returns a Mat which represenst the specified matrix column 237 | */ 238 | col(x: number): Mat; 239 | /** 240 | * Computes a dot-product of two vectors. 241 | * @param m another dot-product operand. 242 | * @returns dot-product of two vectors. 243 | */ 244 | dot(m: Mat): number; 245 | /** 246 | * Performs an element-wise multiplication or division of the two matrices. 247 | * @param m Another array of the same type and the same size as \*this, or a matrix expression. 248 | * @param scale Optional scale factor. 249 | * @returns a temporary object encoding per-element array multiplication, with optional scale 250 | */ 251 | mul(m: Mat, scale?: number): Mat; 252 | /** 253 | * Inverses a matrix. 254 | * The method performs a matrix inversion by means of matrix expressions. This means that a temporary 255 | * matrix inversion object is returned by the method and can be used further as a part of more complex 256 | * matrix expressions or can be assigned to a matrix. 257 | * @param method Matrix inversion method. One of cv::DecompTypes 258 | */ 259 | inv(method: DecompTypes): Mat; 260 | t(): Mat; 261 | /** 262 | * Creats a submatrix of Matrix 263 | * @param r the area to create the submatrix of, cannot be lareger than the original Matrix 264 | * @returns submatrix of matrix 265 | */ 266 | roi(r: Rect): Mat; 267 | /** 268 | * creates a diagonal matrix 269 | * The method creates a square diagonal matrix from specified main diagonal. 270 | * @param d One-dimensional matrix that represents the main diagonal. 271 | */ 272 | diag(d: number): Mat; 273 | /** 274 | * Creates a full copy of the array and the underlying data. 275 | * The method creates a full copy of the array. The original step[] is not taken into account. So, the 276 | * array copy is a continuous array occupying total()*elemSize() bytes. 277 | * @param d One-dimensional matrix that represents the main diagonal. 278 | */ 279 | diag(d: Mat): Mat; 280 | /** 281 | * Reports whether the matrix is continuous or not. 282 | * @returns true if the matrix elements are stored continuously without gaps at the end of 283 | * each row. Otherwise, it returns false. 284 | */ 285 | isContinuous(): boolean; 286 | /** 287 | * Sets all or some of the array elements to the specified value. 288 | * @param s Assigned scalar converted to the actual array type. 289 | */ 290 | setTo(s: Scalar): void; 291 | /** 292 | * Sets all or some of the array elements to the specified value. 293 | * @param s Assigned scalar converted to the actual array type. 294 | * @param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix 295 | * elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels 296 | */ 297 | setTo(s: Scalar, mask: Mat): void; 298 | /** 299 | * Gets the size of the matrix and returns it as a cv.Size object 300 | * @returns Size of the Matrix 301 | */ 302 | size(): Size; 303 | /** 304 | * Returns a pointer to the specified matrix row. 305 | * @param i0 A 0-based row index. 306 | * @returns pointer to the specified element (1D case) 307 | */ 308 | ptr(i0: number): Uint8Array; 309 | /** 310 | * Returns a pointer to the specified matrix row. 311 | * @param i0 A 0-based row index. 312 | * @param i1 A 0-based column index. 313 | * @returns pointer to the specified element (2D case) 314 | */ 315 | ptr(i0: number, i1: number): Uint8Array; 316 | /** 317 | * Returns a pointer to the specified matrix row. 318 | * @param i0 A 0-based row index. 319 | * @returns the specified matrix row. 320 | */ 321 | ucharPtr(i0: number): Uint8Array; 322 | /** 323 | * Returns a pointer to the specified matrix row. 324 | * @param row Index along the dimension 0 325 | * @param col Index along the dimension 1 326 | * @returns the specified matrix row. 327 | */ 328 | ucharPtr(row: number, col: number): Uint8Array; 329 | /** 330 | * Returns a pointer to the specified matrix row. 331 | * @param i0 A 0-based row index. 332 | * @returns the specified matrix row. 333 | */ 334 | charPtr(i0: number): Int8Array; 335 | /** 336 | * Returns a pointer to the specified matrix row. 337 | * @param row Index along the dimension 0 338 | * @param col Index along the dimension 1 339 | * @returns the specified matrix row. 340 | */ 341 | charPtr(row: number, col: number): Int8Array; 342 | /** 343 | * Returns a pointer to the specified matrix row. 344 | * @param i0 A 0-based row index. 345 | * @returns the specified matrix row. 346 | */ 347 | shortPtr(i0: number): Int16Array; 348 | /** 349 | * Returns a pointer to the specified matrix row. 350 | * @param row Index along the dimension 0 351 | * @param col Index along the dimension 1 352 | * @returns the specified matrix row. 353 | */ 354 | shortPtr(row: number, col: number): Int16Array; 355 | /** 356 | * Returns a pointer to the specified matrix row. 357 | * @param i0 A 0-based row index. 358 | * @returns the specified matrix row. 359 | */ 360 | ushortPtr(i0: number): Uint16Array; 361 | /** 362 | * Returns a pointer to the specified matrix row. 363 | * @param row Index along the dimension 0 364 | * @param col Index along the dimension 1 365 | * @returns the specified matrix row. 366 | */ 367 | ushortPtr(row: number, col: number): Uint16Array; 368 | /** 369 | * Returns a pointer to the specified matrix row. 370 | * @param i0 A 0-based row index. 371 | * @returns the specified matrix row. 372 | */ 373 | intPtr(i0: number): Int8Array; 374 | /** 375 | * Returns a pointer to the specified matrix row. 376 | * @param row Index along the dimension 0 377 | * @param col Index along the dimension 1 378 | * @returns the specified matrix row. 379 | */ 380 | intPtr(row: number, col: number): Int8Array; 381 | /** 382 | * Returns a pointer to the specified matrix row. 383 | * @param i0 A 0-based row index. 384 | * @returns the specified matrix row. 385 | */ 386 | floatPtr(i0: number): Float32Array; 387 | /** 388 | * Returns a pointer to the specified matrix row. 389 | * @param row Index along the dimension 0 390 | * @param col Index along the dimension 1 391 | * @returns the specified matrix row. 392 | */ 393 | floatPtr(row: number, col: number): Float32Array; 394 | /** 395 | * Returns a pointer to the specified matrix row. 396 | * @param i0 A 0-based row index. 397 | * @returns the specified matrix row. 398 | */ 399 | doublePtr(i0: number): Float64Array; 400 | /** 401 | * Returns a pointer to the specified matrix row. 402 | * @param row Index along the dimension 0 403 | * @param col Index along the dimension 1 404 | * @returns the specified matrix row. 405 | */ 406 | doublePtr(row: number, col: number): Float64Array; 407 | 408 | charAt(i0: number): number; 409 | charAt(i0: number, i1: number): number; 410 | charAt(i0: number, i1: number, i2: number): number; 411 | 412 | ucharAt(i0: number): number; 413 | ucharAt(i0: number, i1: number): number; 414 | ucharAt(i0: number, i1: number, i2: number): number; 415 | 416 | shortAt(i0: number): number; 417 | shortAt(i0: number, i1: number): number; 418 | shortAt(i0: number, i1: number, i2: number): number; 419 | 420 | ushortAt(i0: number): number; 421 | ushortAt(i0: number, i1: number): number; 422 | ushortAt(i0: number, i1: number, i2: number): number; 423 | 424 | intAt(i0: number): number; 425 | intAt(i0: number, i1: number): number; 426 | intAt(i0: number, i1: number, i2: number): number; 427 | 428 | floatAt(i0: number): number; 429 | floatAt(i0: number, i1: number): number; 430 | floatAt(i0: number, i1: number, i2: number): number; 431 | 432 | doubleAt(i0: number): number; 433 | doubleAt(i0: number, i1: number): number; 434 | doubleAt(i0: number, i1: number, i2: number): number; 435 | } 436 | } 437 | export = Mat; 438 | -------------------------------------------------------------------------------- /src/core/MatVector.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from "./Mat"; 2 | 3 | declare module MatVector { 4 | interface MatVector { 5 | new (): MatVector; 6 | push_back(m: Mat): void; 7 | delete(): void; 8 | get(n: number): Mat; 9 | size(): number; 10 | } 11 | } 12 | export = MatVector; 13 | -------------------------------------------------------------------------------- /src/core/Moments.d.ts: -------------------------------------------------------------------------------- 1 | declare module Moments { 2 | interface Moments { 3 | new() : Moments; 4 | 5 | // spatial moments 6 | m00: number; 7 | m10: number; 8 | m01: number; 9 | m20: number; 10 | m11: number; 11 | m02: number; 12 | m30: number; 13 | m21: number; 14 | m12: number; 15 | m03: number; 16 | // central moments 17 | mu20: number; 18 | mu11: number; 19 | mu02: number; 20 | mu30: number; 21 | mu21: number; 22 | mu12: number; 23 | mu03: number; 24 | // central normalized moments 25 | nu20: number; 26 | nu11: number; 27 | nu02: number; 28 | nu30: number; 29 | nu21: number; 30 | nu12: number; 31 | nu03: number; 32 | } 33 | } 34 | 35 | export = Moments; 36 | -------------------------------------------------------------------------------- /src/core/Point.d.ts: -------------------------------------------------------------------------------- 1 | declare module Point { 2 | interface Point { 3 | new(x: number, y: number): Point; 4 | x: number; 5 | y: number; 6 | } 7 | } 8 | export = Point; -------------------------------------------------------------------------------- /src/core/Range.d.ts: -------------------------------------------------------------------------------- 1 | declare module Range { 2 | interface Range { 3 | new(): Range; 4 | new(start: number, end: number): Range; 5 | start: number, 6 | end: number 7 | } 8 | } 9 | export = Range; -------------------------------------------------------------------------------- /src/core/Rect.d.ts: -------------------------------------------------------------------------------- 1 | import { Point } from "./Point"; 2 | import { Size } from "./Size"; 3 | 4 | declare module Rect { 5 | interface Rect { 6 | new(): Rect; 7 | new(x: number, y: number, width: number, height: number): Rect; 8 | new(r: Rect): Rect; 9 | new(org: Point, sz: Size): Rect; 10 | new(pt1: Point, pt2: Point): Rect; 11 | /** 12 | * x coordinate of the top-left corner 13 | */ 14 | x: number; 15 | /** 16 | * y coordinate of the top-left corner 17 | */ 18 | y: number; 19 | /** 20 | * width of the rectangle 21 | */ 22 | width: number; 23 | /** 24 | * height of the rectangle 25 | */ 26 | height: number; 27 | /** 28 | * Top left corner 29 | */ 30 | tl(): Point; 31 | /** 32 | * Bottom right corner 33 | */ 34 | br(): Point; 35 | /** 36 | * Size of the rect (width, height) 37 | */ 38 | size(): Size; 39 | /** 40 | * area of the Rectangle (width * height) 41 | */ 42 | area(): number; 43 | /** 44 | * true if empty 45 | */ 46 | empty(): boolean; 47 | /** 48 | * checks whether the rectangle contains the point 49 | * @param pt Point to check inside rectangle 50 | */ 51 | contains(pt: Point): boolean; 52 | } 53 | } 54 | 55 | export = Rect; -------------------------------------------------------------------------------- /src/core/RotatedRect.d.ts: -------------------------------------------------------------------------------- 1 | import { Point } from './Point'; 2 | import { Rect } from './Rect'; 3 | import { Size } from './Size'; 4 | 5 | declare module RotatedRect { 6 | interface RotatedRect { 7 | new (): RotatedRect; 8 | new (rect: Rect): RotatedRect; 9 | new (pt: Point, sz: Size): RotatedRect; 10 | new (x: number, y: number, width: number, height: number): RotatedRect; 11 | points(pts: Point[]): void; 12 | boundingRect(): Rect; 13 | boundingRect2f(): Rect; 14 | angle: number; 15 | center: Point; 16 | size: Size; 17 | } 18 | } 19 | export = RotatedRect; 20 | -------------------------------------------------------------------------------- /src/core/Scalar.d.ts: -------------------------------------------------------------------------------- 1 | declare module Scalar { 2 | interface Scalar { 3 | new(): Scalar; 4 | new(s1: number): Scalar; 5 | new(s1: number, s2: number, s3: number, s4?: number): Scalar; 6 | 7 | //returns a scalar with all elements set to v0 8 | all(v0: number): Scalar; 9 | } 10 | } 11 | export = Scalar 12 | -------------------------------------------------------------------------------- /src/core/Size.d.ts: -------------------------------------------------------------------------------- 1 | declare module Size { 2 | interface Size { 3 | new(width: number, height: number): Size; 4 | width: number; 5 | height: number; 6 | } 7 | } 8 | export = Size; -------------------------------------------------------------------------------- /src/core/TermCriteria.d.ts: -------------------------------------------------------------------------------- 1 | declare module TermCriteria { 2 | enum Type { 3 | TERM_CRITERIA_COUNT = 1, 4 | TERM_CRITERIA_MAX_ITER = 1, 5 | TERM_CRITERIA_EPS = 2 6 | } 7 | 8 | interface _Type { 9 | TERM_CRITERIA_COUNT: Type.TERM_CRITERIA_COUNT, 10 | TERM_CRITERIA_MAX_ITER: Type.TERM_CRITERIA_MAX_ITER; 11 | TERM_CRITERIA_EPS: Type.TERM_CRITERIA_EPS 12 | } 13 | 14 | interface TermCriteria { 15 | new (): TermCriteria; 16 | /** 17 | * @param type The type of termination criteria, one of TermCriteria::Type 18 | * @param maxCount The maximum number of iterations or elements to compute. 19 | * @param epsilon The desired accuracy or change in parameters at which the iterative algorithm stops. 20 | 21 | */ 22 | new (type: Type , maxCount: number, epsilon: number): TermCriteria; 23 | Type: Type; 24 | isValid(): boolean; 25 | epsilon: number; 26 | maxCount: number; 27 | type: Type 28 | } 29 | } 30 | export = TermCriteria; -------------------------------------------------------------------------------- /src/core/Utils.d.ts: -------------------------------------------------------------------------------- 1 | declare module Utils { 2 | enum SortFlags { 3 | SORT_EVERY_ROW = 0, 4 | SORT_EVERY_COLUMN = 1, 5 | SORT_ASCENDING = 0, 6 | SORT_DESCENDING = 16, 7 | } 8 | 9 | interface _SortFlags { 10 | SORT_EVERY_ROW: SortFlags.SORT_EVERY_ROW, 11 | SORT_EVERY_COLUMN: SortFlags.SORT_EVERY_COLUMN, 12 | SORT_ASCENDING: SortFlags.SORT_ASCENDING, 13 | SORT_DESCENDING: SortFlags.SORT_DESCENDING, 14 | } 15 | } 16 | export = Utils; 17 | -------------------------------------------------------------------------------- /src/dnn/dnn.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | import { Scalar } from '../core/Scalar'; 3 | import { Size } from '../core/Size'; 4 | 5 | declare module DNN { 6 | interface dnn_Net { 7 | new(): dnn_Net 8 | /** 9 | * Sets the new input value for the network 10 | * @param input input mat 11 | */ 12 | setInput(input: Mat): void; 13 | /** 14 | * Runs forward pass to compute outputs of layers 15 | */ 16 | forward(): number[]; 17 | } 18 | 19 | interface dnn { 20 | /** 21 | *Creates 4-dimensional blob from image. Optionally resizes and crops image from center, subtract mean values, scales values by scalefactor, swap Blue and Red channels 22 | * @param image input image (with 1-, 3- or 4-channels). 23 | * @param scalefactor multiplier for image values. 24 | * @param size spatial size for output image 25 | * @param mean scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true 26 | * @param swapRB flag which indicates that swap first and last channels in 3-channel image is necessary 27 | */ 28 | blobFromImage( 29 | image: Mat, 30 | scalefactor: number, 31 | size: Size, 32 | mean: Scalar, 33 | swapRB: boolean 34 | ): Mat; 35 | /** 36 | * Read deep learning network represented in one of the supported formats 37 | * @param config Text file contains network configuration. It could be a file with the following extensions: 38 | * *.prototxt (Caffe, @see http://caffe.berkeleyvision.org/) 39 | * *.pbtxt (TensorFlow, @see https://www.tensorflow.org/) 40 | * *.cfg (Darknet, @see https://pjreddie.com/darknet/) 41 | * *.xml (DLDT, @see https://software.intel.com/openvino-toolkit) 42 | * @param model Binary file contains trained weights. The following file extensions are expected for models from different frameworks: 43 | * *.prototxt (Caffe, http://caffe.berkeleyvision.org/) 44 | * *.pbtxt (TensorFlow, https://www.tensorflow.org/) 45 | * *.cfg (Darknet, https://pjreddie.com/darknet/) 46 | * *.xml (DLDT, https://software.intel.com/openvino-toolkit) 47 | */ 48 | readNet(config: string, model: string): dnn_Net; 49 | } 50 | } 51 | export = DNN; 52 | -------------------------------------------------------------------------------- /src/helper.ts: -------------------------------------------------------------------------------- 1 | 2 | enum data { 3 | THRESH_BINARY = 0, 4 | THRESH_BINARY_INV = 1, 5 | THRESH_TRUNC = 2, 6 | THRESH_TOZERO = 3, 7 | THRESH_TOZERO_INV = 4, 8 | THRESH_MASK = 7, 9 | THRESH_OTSU = 8, 10 | THRESH_TRIANGLE = 16, 11 | } 12 | 13 | const n = "ThresholdTypes"; 14 | 15 | let str = ""; 16 | 17 | Object.keys(data).forEach(key => { 18 | str += `${key}: ${n}.${key};\n` 19 | }) 20 | 21 | 22 | const fnStr = (`adaptiveThreshold ( InputArray src, 23 | OutputArray dst, 24 | double maxValue, 25 | int adaptiveMethod, 26 | int thresholdType, 27 | int blockSize, 28 | double C 29 | ) `).replace(/\n/g, " ").replace(/[^a-zA-Z ]/g, " ").replace(/\s\s+/g, ' ').split(' '); 30 | 31 | let replaceWords:{ [index:string] : {} } = { 32 | 'InputArray': 'Mat', 33 | 'OutputArray': 'Mat', 34 | 'int': 'number', 35 | 'double': 'number' 36 | }; 37 | 38 | 39 | let formattedFnStr = `${fnStr[0]}(` 40 | fnStr.shift(); 41 | 42 | let argType = undefined; 43 | 44 | for(var i = 0; i <= fnStr.length; i+=2 ) { 45 | const argType = replaceWords[fnStr[i]]; 46 | const arg = fnStr[i+1]; 47 | 48 | if(argType && arg) { 49 | formattedFnStr += `${arg}: ${argType},` 50 | } 51 | } 52 | 53 | console.log(`${formattedFnStr.substring(0, formattedFnStr.length - 1)}):`) 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /src/video/BackgroundSubtractor.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | 3 | declare module BackgroundSubtractor { 4 | interface BackgroundSubtractor { 5 | new (): BackgroundSubtractor; 6 | apply(image: Mat): void; 7 | getBackgroundImage(backgroundImage: Mat): void; 8 | } 9 | } 10 | export = BackgroundSubtractor; 11 | -------------------------------------------------------------------------------- /src/video/BackgroundSubtractorMOG2.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | 3 | declare module BackgroundSubtractorMOG2 { 4 | interface BackgroundSubtractorMOG2 { 5 | new (history: number, varThreshold: number, detectShadows: boolean): BackgroundSubtractorMOG2; 6 | new (image: number, fgmask: Mat, learningRate: [0,1]): BackgroundSubtractorMOG2; 7 | apply(image: Mat, dst: Mat): void; 8 | getBackgroundImage(backgroundImage: Mat): void; 9 | } 10 | } 11 | export = BackgroundSubtractorMOG2; 12 | -------------------------------------------------------------------------------- /src/video/track.d.ts: -------------------------------------------------------------------------------- 1 | import { Mat } from '../core/Mat'; 2 | import { Rect } from '../core/Rect'; 3 | import { Size } from '../core/Size'; 4 | import { RotatedRect } from '../core/RotatedRect'; 5 | import { TermCriteria } from '../core/TermCriteria'; 6 | 7 | declare module ObjectTracking { 8 | enum Optflow { 9 | OPTFLOW_USE_INITIAL_FLOW = 4, 10 | OPTFLOW_LK_GET_MIN_EIGENVALS = 8, 11 | OPTFLOW_FARNEBACK_GAUSSIAN = 256, 12 | } 13 | 14 | interface _Optflow { 15 | OPTFLOW_USE_INITIAL_FLOW: Optflow.OPTFLOW_USE_INITIAL_FLOW; 16 | OPTFLOW_LK_GET_MIN_EIGENVALS: Optflow.OPTFLOW_LK_GET_MIN_EIGENVALS; 17 | OPTFLOW_FARNEBACK_GAUSSIAN: Optflow.OPTFLOW_FARNEBACK_GAUSSIAN; 18 | } 19 | 20 | enum Motion { 21 | MOTION_TRANSLATION = 0, 22 | MOTION_EUCLIDEAN = 1, 23 | MOTION_AFFINE = 2, 24 | MOTION_HOMOGRAPHY = 3, 25 | } 26 | 27 | interface _Motion { 28 | MOTION_TRANSLATION: Motion.MOTION_TRANSLATION; 29 | MOTION_EUCLIDEAN: Motion.MOTION_EUCLIDEAN; 30 | MOTION_AFFINE: Motion.MOTION_AFFINE; 31 | MOTION_HOMOGRAPHY: Motion.MOTION_HOMOGRAPHY; 32 | } 33 | 34 | interface ObjectTracking { 35 | /** 36 | * Computes a dense optical flow using the Gunnar Farneback's algorithm 37 | * @param prev first 8-bit single-channel input image. 38 | * @param next second input image of the same size and the same type as prev 39 | * @param flow computed flow image that has the same size as prev and type CV_32FC2 40 | * @param pyr_scale parameter, specifying the image scale (<1) to build pyramids for each image; pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous one 41 | * @param levels number of pyramid layers including the initial image; levels=1 means that no extra layers are created and only the original images are used 42 | * @param winsize averaging window size; larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field 43 | * @param iterations number of iterations the algorithm does at each pyramid level 44 | * @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel; larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field, typically poly_n =5 or 7. 45 | * @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a good value would be poly_sigma=1.5. 46 | * @param flags operation flags that can be a combination of the following: 47 | * - OPTFLOW_USE_INITIAL_FLOW uses the input flow as an initial flow approximation. 48 | * - OPTFLOW_FARNEBACK_GAUSSIAN uses the Gaussian 𝚠𝚒𝚗𝚜𝚒𝚣𝚎×𝚠𝚒𝚗𝚜𝚒𝚣𝚎 filter instead of a box filter of the same size for optical flow estimation; usually, this option gives z more accurate flow than with a box filter, at the cost of lower speed; normally, winsize for a Gaussian window should be set to a larger value to achieve the same level of robustness. 49 | */ 50 | calcOpticalFlowFarneback( 51 | prev: Mat, 52 | next: Mat, 53 | flow: Mat, 54 | pyr_scale: number, 55 | levels: number, 56 | winsize: number, 57 | iterations: number, 58 | poly_n: number, 59 | poly_sigma: number, 60 | flags: number 61 | ): void; 62 | /** 63 | * Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with pyramids 64 | * @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid. 65 | * @param nextImg second input image or pyramid of the same size and the same type as prevImg 66 | * @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be single-precision floating-point numbers 67 | * @param nextPts output vector of 2D points (with single-precision floating-point coordinates) containing the calculated new positions of input features in the second image; when OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input 68 | * @param status output status vector (of unsigned chars); each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0 69 | * @param err output vector of errors; each element of the vector is set to an error for the corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't found then the error is not defined (use the status parameter to find such cases). 70 | * @param winSize size of the search window at each pyramid level 71 | * @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), if set to 1, two levels are used, and so on; if pyramids are passed to input then algorithm will use as many levels as pyramids have but no more than maxLevel. 72 | * @param criteria parameter, specifying the termination criteria of the iterative search algorithm (after the specified maximum number of iterations criteria.maxCount or when the search window moves by less than criteria.epsilon 73 | */ 74 | calcOpticalFlowPyrLK( 75 | prevImg: Mat, 76 | nextImg: Mat, 77 | prevPts: Mat, 78 | nextPts: Mat, 79 | status: Mat, 80 | err: Mat, 81 | winSize: Size, 82 | maxLevel: number, 83 | criteria: TermCriteria 84 | ): void; 85 | /** 86 | * Finds an object center, size, and orientation 87 | * @param probImage Back projection of the object histogram. See calcBackProject 88 | * @param window Initial search window 89 | * @param criteria Stop criteria for the underlying meanShift. returns (in old interfaces) Number of iterations CAMSHIFT took to converge The function implements the CAMSHIFT object tracking algorithm [33] . First, it finds an object center using meanShift and then adjusts the window size and finds the optimal rotation. The function returns the rotated rectangle structure that includes the object position, size, and orientation. The next position of the search window can be obtained with RotatedRect::boundingRect() 90 | * @returns returns the rotated rectangle structure that includes the object position, size, and orientation. 91 | */ 92 | CamShift(probImage: Mat, window: Rect, criteria: TermCriteria): [RotatedRect, Rect]; 93 | /** 94 | * Finds the geometric transform (warp) between two images in terms of the ECC criterion 95 | * @param templateImage single-channel template image; CV_8U or CV_32F array. 96 | * @param inputImage single-channel input image which should be warped with the final warpMatrix in order to provide an image similar to templateImage, same type as templateImage 97 | * @param warpMatrix floating-point 2×3 or 3×3 mapping matrix (warp 98 | * @param motionType parameter, specifying the type of motion: 99 | * - MOTION_TRANSLATION sets a translational motion model; warpMatrix is 2×3 with the first 2×2 part being the unity matrix and the rest two parameters being estimated. 100 | * - MOTION_EUCLIDEAN sets a Euclidean (rigid) transformation as motion model; three parameters are estimated; warpMatrix is 2×3. 101 | * - MOTION_AFFINE sets an affine motion model (DEFAULT); six parameters are estimated; warpMatrix is 2×3. 102 | * - MOTION_HOMOGRAPHY sets a homography as a motion model; eight parameters are estimated;`warpMatrix` is 3×3 103 | * @param criteria parameter, specifying the termination criteria of the ECC algorithm; criteria.epsilon defines the threshold of the increment in the correlation coefficient between two iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion). Default values are shown in the declaration above 104 | * @param inputMask An optional mask to indicate valid values of inputImage 105 | * @param gaussFiltSize An optional value indicating size of gaussian blur filter 106 | */ 107 | /** 108 | * Finds an object on a back projection image 109 | * @param probImage Back projection of the object histogram. See calcBackProject for details 110 | * @param window Initial search window 111 | * @param criteria Stop criteria for the iterative search algorithm. returns : Number of iterations CAMSHIFT took to converge. The function implements the iterative object search algorithm. It takes the input back projection of an object and the initial position. The mass center in window of the back projection image is computed and the search window center shifts to the mass center. The procedure is repeated until the specified number of iterations criteria.maxCount is done or until the window center shifts by less than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search window size or orientation do not change during the search. You can simply pass the output of calcBackProject to this function. But better results can be obtained if you pre-filter the back projection and remove the noise. For example, you can do this by retrieving connected components with findContours , throwing away contours with small area ( contourArea ), and rendering the remaining contours with drawContours 112 | */ 113 | meanShift(probImage: Mat, window: Rect, criteria: TermCriteria): [number, Rect]; 114 | findTransformECC( 115 | templateImage: Mat, 116 | inputImage: Mat, 117 | warpMatrix: Mat, 118 | motionType: number, 119 | criteria: TermCriteria, 120 | inputMask: Mat, 121 | gaussFiltSize: number 122 | ): void; 123 | } 124 | } 125 | export = ObjectTracking; 126 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es6", 4 | "rootDir": "src", 5 | "declaration": true, 6 | "outDir": "dist", 7 | "strict": true, 8 | "esModuleInterop": true, 9 | "noEmit": false 10 | }, 11 | "exclude": [ 12 | "node_modules" 13 | ] 14 | } -------------------------------------------------------------------------------- /yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | crypto-js@^4.0.0: 6 | version "4.0.0" 7 | resolved "https://registry.yarnpkg.com/crypto-js/-/crypto-js-4.0.0.tgz#2904ab2677a9d042856a2ea2ef80de92e4a36dcc" 8 | integrity sha512-bzHZN8Pn+gS7DQA6n+iUmBfl0hO5DJq++QP3U6uTucDtk/0iGpXd/Gg7CGR0p8tJhofJyaKoWBuJI4eAO00BBg== 9 | 10 | crypto@^1.0.1: 11 | version "1.0.1" 12 | resolved "https://registry.yarnpkg.com/crypto/-/crypto-1.0.1.tgz#2af1b7cad8175d24c8a1b0778255794a21803037" 13 | integrity sha512-VxBKmeNcqQdiUQUW2Tzq0t377b54N2bMtXO/qiLa+6eRRmmC4qT3D4OnTGoT/U6O9aklQ/jTwbOtRMTTY8G0Ig== 14 | 15 | inherits@2.0.3: 16 | version "2.0.3" 17 | resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" 18 | integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= 19 | 20 | path@^0.12.7: 21 | version "0.12.7" 22 | resolved "https://registry.yarnpkg.com/path/-/path-0.12.7.tgz#d4dc2a506c4ce2197eb481ebfcd5b36c0140b10f" 23 | integrity sha1-1NwqUGxM4hl+tIHr/NWzbAFAsQ8= 24 | dependencies: 25 | process "^0.11.1" 26 | util "^0.10.3" 27 | 28 | process@^0.11.1: 29 | version "0.11.10" 30 | resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" 31 | integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= 32 | 33 | typescript@^4.2.3: 34 | version "4.2.3" 35 | resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.2.3.tgz#39062d8019912d43726298f09493d598048c1ce3" 36 | integrity sha512-qOcYwxaByStAWrBf4x0fibwZvMRG+r4cQoTjbPtUlrWjBHbmCAww1i448U0GJ+3cNNEtebDteo/cHOR3xJ4wEw== 37 | 38 | util@^0.10.3: 39 | version "0.10.4" 40 | resolved "https://registry.yarnpkg.com/util/-/util-0.10.4.tgz#3aa0125bfe668a4672de58857d3ace27ecb76901" 41 | integrity sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A== 42 | dependencies: 43 | inherits "2.0.3" 44 | --------------------------------------------------------------------------------