├── _config.yml ├── src ├── index.ts └── types │ ├── opencv │ ├── Tracker.ts │ ├── index.ts │ ├── TrackerMIL.ts │ ├── Feature2D.ts │ ├── ORB.ts │ ├── Node.ts │ ├── Logger.ts │ ├── BackgroundSubtractorMOG2.ts │ ├── fisheye.ts │ ├── Exception.ts │ ├── softfloat.ts │ ├── photo_inpaint.ts │ ├── softdouble.ts │ ├── BackgroundSubtractor.ts │ ├── BOWTrainer.ts │ ├── DynamicBitset.ts │ ├── _types.ts │ ├── AutoBuffer.ts │ ├── BFMatcher.ts │ ├── MatOp.ts │ ├── FlannBasedMatcher.ts │ ├── LshTable.ts │ ├── RotatedRect.ts │ ├── imgproc_colormap.ts │ ├── imgproc_object.ts │ ├── core_cluster.ts │ ├── QRCodeDetector.ts │ ├── features2d_draw.ts │ ├── MatExpr.ts │ ├── core_hal_interface.ts │ ├── Algorithm.ts │ ├── QRCodeDetectorAruco.ts │ ├── Matx.ts │ ├── CascadeClassifier.ts │ ├── objdetect.ts │ ├── Affine3.ts │ ├── PCA.ts │ ├── DescriptorMatcher.ts │ ├── _hacks.ts │ ├── HOGDescriptor.ts │ └── imgproc_hist.ts │ ├── _cv.ts │ └── emscripten.ts ├── .prettierrc.json ├── .gitignore ├── opencv.ico ├── test ├── Lenna.png ├── test-qr.png ├── Tracker.test.ts ├── cv.ts ├── cvKeys.test.ts ├── rect.test.ts ├── Mat.test.ts ├── BackgroundSubtractorMOG2.test.ts ├── applyColorMap.test.ts └── QRCodeDetector.test.ts ├── doc └── README.md ├── jest.config.js ├── tsconfig.json ├── dist ├── opencv.js.patch └── README.md ├── .github ├── workflows │ ├── unit-test.yml │ ├── build-opencv.yml │ └── npm-publish.yml └── copilot-instructions.md ├── package.json ├── README.md └── LICENSE /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-cayman -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./types/opencv"; 2 | -------------------------------------------------------------------------------- /.prettierrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "trailingComma": "all" 3 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules/ 2 | /dist/src/ 3 | .idea/ 4 | -------------------------------------------------------------------------------- /src/types/opencv/Tracker.ts: -------------------------------------------------------------------------------- 1 | export declare class Tracker {} 2 | -------------------------------------------------------------------------------- /opencv.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechStark/opencv-js/HEAD/opencv.ico -------------------------------------------------------------------------------- /test/Lenna.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechStark/opencv-js/HEAD/test/Lenna.png -------------------------------------------------------------------------------- /test/test-qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TechStark/opencv-js/HEAD/test/test-qr.png -------------------------------------------------------------------------------- /doc/README.md: -------------------------------------------------------------------------------- 1 | ## Runtime/real methods and properties on CV objects 2 | 3 | `cvKeys.json` is generated by `test/cvKeys.test.ts` 4 | -------------------------------------------------------------------------------- /src/types/opencv/index.ts: -------------------------------------------------------------------------------- 1 | import * as _cv from "./_types"; 2 | export type CV = typeof _cv; 3 | 4 | export * from "./_types"; 5 | -------------------------------------------------------------------------------- /src/types/opencv/TrackerMIL.ts: -------------------------------------------------------------------------------- 1 | import type { Tracker } from "./_types"; 2 | 3 | export declare class TrackerMIL extends Tracker {} 4 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('ts-jest').JestConfigWithTsJest} */ 2 | module.exports = { 3 | preset: 'ts-jest', 4 | testEnvironment: 'node', 5 | }; -------------------------------------------------------------------------------- /src/types/_cv.ts: -------------------------------------------------------------------------------- 1 | import type { FS } from "./emscripten"; 2 | import type { CV } from "./opencv"; 3 | 4 | declare global { 5 | var cv: CV & { FS: FS }; 6 | } 7 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "lib": ["esnext", "dom"], 6 | "strict": true, 7 | "esModuleInterop": true, 8 | "sourceMap": true, 9 | "outDir": "./dist", 10 | "rootDir": ".", 11 | "skipLibCheck": true, 12 | "declaration": true 13 | }, 14 | "include": ["src"] 15 | } 16 | -------------------------------------------------------------------------------- /test/Tracker.test.ts: -------------------------------------------------------------------------------- 1 | import Jimp from "jimp"; 2 | import path from "path"; 3 | import { setupOpenCv, translateException } from "./cv"; 4 | 5 | beforeAll(setupOpenCv); 6 | 7 | describe("Tracker", () => { 8 | it("shoud pass TS type validations", async () => { 9 | try { 10 | const tracker = new cv.TrackerMIL(); 11 | } catch (err) { 12 | throw translateException(err); 13 | } 14 | }); 15 | }); 16 | -------------------------------------------------------------------------------- /dist/opencv.js.patch: -------------------------------------------------------------------------------- 1 | diff --git a/dist/opencv.js b/dist/opencv.js 2 | index af4111b..3ba8a69 100644 3 | --- a/dist/opencv.js 4 | +++ b/dist/opencv.js 5 | @@ -41,7 +41,7 @@ else if (typeof define === 'function' && define['amd']) 6 | define([], () => cv); 7 | 8 | if (typeof Module === 'undefined') 9 | - Module = {}; 10 | + var Module = {}; 11 | return cv(Module); 12 | })); 13 | 14 | \ No newline at end of file 15 | -------------------------------------------------------------------------------- /src/types/opencv/Feature2D.ts: -------------------------------------------------------------------------------- 1 | import type { Algorithm, KeyPointVector, Mat, OutputArray } from "./_types"; 2 | 3 | /** 4 | * https://docs.opencv.org/master/d0/d13/classcv_1_1Feature2D.html 5 | */ 6 | export declare class Feature2D extends Algorithm { 7 | /** 8 | * Detects keypoints and computes the descriptors 9 | * @param img 10 | * @param mask 11 | * @param keypoints 12 | * @param descriptors 13 | */ 14 | public detectAndCompute( 15 | img: Mat, 16 | mask: Mat, 17 | keypoints: KeyPointVector, 18 | descriptors: OutputArray, 19 | ): void; 20 | } 21 | -------------------------------------------------------------------------------- /.github/workflows/unit-test.yml: -------------------------------------------------------------------------------- 1 | name: "Unit Test" 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | types: 8 | - opened 9 | - synchronize 10 | - reopened 11 | - ready_for_review 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | 17 | env: 18 | NODE_OPTIONS: --max_old_space_size=4096 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | with: 23 | fetch-depth: 0 24 | 25 | - name: Install dependencies 26 | run: npm install 27 | 28 | - name: Run test 29 | run: npm test 30 | -------------------------------------------------------------------------------- /src/types/opencv/ORB.ts: -------------------------------------------------------------------------------- 1 | import type { Feature2D, float, int } from "./_types"; 2 | 3 | /** 4 | * https://docs.opencv.org/master/db/d95/classcv_1_1ORB.html 5 | */ 6 | export declare class ORB extends Feature2D { 7 | public constructor( 8 | nfeatures?: int, 9 | scaleFactor?: float, 10 | nlevels?: int, 11 | edgeThreshold?: int, 12 | firstLevel?: int, 13 | WTA_K?: int, 14 | scoreType?: ORBScoreType, 15 | patchSize?: int, 16 | fastThreshold?: int, 17 | ); 18 | } 19 | 20 | type ORBScoreType = int; 21 | export declare const ORB_HARRIS_SCORE: ORBScoreType; 22 | export declare const ORB_FAST_SCORE: ORBScoreType; 23 | -------------------------------------------------------------------------------- /dist/README.md: -------------------------------------------------------------------------------- 1 | ## Build opencv.js 2 | 3 | - see https://github.com/opencv/opencv/blob/4.x/platforms/js/README.md 4 | - also https://docs.opencv.org/4.7.0/d4/da1/tutorial_js_setup.html 5 | 6 | ```sh 7 | cd ~/apps/emsdk 8 | ./emsdk update 9 | ./emsdk install 2.0.10 10 | ./emsdk activate 2.0.10 11 | ``` 12 | 13 | - build 14 | 15 | ```sh 16 | source ~/apps/emsdk/emsdk_env.sh 17 | emcmake python ./platforms/js/build_js.py build_js --build_wasm 18 | ``` 19 | 20 | ## Patch opencv.js 21 | 22 | - To create a patch for the current version of opencv.js, run: 23 | 24 | ``` 25 | git diff > temp.patch 26 | mv temp.patch dist/opencv.js.patch 27 | ``` 28 | 29 | - To apply the patch, run: 30 | 31 | ```sh 32 | git apply dist/opencv.js.patch 33 | ``` 34 | -------------------------------------------------------------------------------- /test/cv.ts: -------------------------------------------------------------------------------- 1 | import "../src"; 2 | 3 | export async function setupOpenCv() { 4 | const cvModule = require("../dist/opencv.js"); 5 | 6 | // Support both Promise and onRuntimeInitialized callback APIs 7 | let cv; 8 | if (cvModule instanceof Promise) { 9 | // Promise API 10 | cv = await cvModule; 11 | } else { 12 | // Callback API 13 | await new Promise((resolve) => { 14 | cvModule.onRuntimeInitialized = () => { 15 | resolve(); 16 | }; 17 | }); 18 | cv = cvModule; 19 | } 20 | global.cv = cv; 21 | } 22 | 23 | export function translateException(err: any) { 24 | if (typeof err === "number") { 25 | try { 26 | const exception = cv.exceptionFromPtr(err); 27 | return exception; 28 | } catch (error) { 29 | // ignore 30 | } 31 | } 32 | return err; 33 | } 34 | -------------------------------------------------------------------------------- /src/types/opencv/Node.ts: -------------------------------------------------------------------------------- 1 | import type { double, int } from "./_types"; 2 | 3 | export declare class Node { 4 | /** 5 | * Class index normalized to 0..class_count-1 range and assigned to the node. It is used internally 6 | * in classification trees and tree ensembles. 7 | * 8 | */ 9 | public classIdx: int; 10 | 11 | /** 12 | * Default direction where to go (-1: left or +1: right). It helps in the case of missing values. 13 | * 14 | */ 15 | public defaultDir: int; 16 | 17 | public left: int; 18 | 19 | public parent: int; 20 | 21 | public right: int; 22 | 23 | public split: int; 24 | 25 | /** 26 | * Value at the node: a class label in case of classification or estimated function value in case of 27 | * regression. 28 | * 29 | */ 30 | public value: double; 31 | 32 | public constructor(); 33 | } 34 | -------------------------------------------------------------------------------- /src/types/opencv/Logger.ts: -------------------------------------------------------------------------------- 1 | import type { int } from "./_types"; 2 | 3 | export declare class Logger { 4 | public static error(fmt: any, arg121: any): int; 5 | 6 | public static fatal(fmt: any, arg122: any): int; 7 | 8 | public static info(fmt: any, arg123: any): int; 9 | 10 | /** 11 | * Print log message 12 | * 13 | * @param level Log level 14 | * 15 | * @param fmt Message format 16 | */ 17 | public static log(level: int, fmt: any, arg124: any): int; 18 | 19 | /** 20 | * Sets the logging destination 21 | * 22 | * @param name Filename or NULL for console 23 | */ 24 | public static setDestination(name: any): void; 25 | 26 | /** 27 | * Sets the logging level. All messages with lower priority will be ignored. 28 | * 29 | * @param level Logging level 30 | */ 31 | public static setLevel(level: int): void; 32 | 33 | public static warn(fmt: any, arg125: any): int; 34 | } 35 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@techstark/opencv-js", 3 | "version": "4.12.0-release.1", 4 | "description": "OpenCV JavaScript version for node.js or browser", 5 | "main": "dist/opencv.js", 6 | "types": "dist/src/index.d.ts", 7 | "files": [ 8 | "dist/", 9 | "src/" 10 | ], 11 | "scripts": { 12 | "build": "tsc", 13 | "prepack": "npm run build", 14 | "format": "prettier --write \"src/**/*.ts\"", 15 | "test": "jest" 16 | }, 17 | "devDependencies": { 18 | "@types/jest": "^29.5.14", 19 | "jest": "^29.7.0", 20 | "jimp": "^1.6.0", 21 | "prettier": "^3.5.3", 22 | "ts-jest": "^29.3.4", 23 | "typescript": "^5.8.3" 24 | }, 25 | "repository": { 26 | "type": "git", 27 | "url": "git+https://github.com/TechStark/opencv-js.git" 28 | }, 29 | "keywords": [ 30 | "opencv", 31 | "javascript", 32 | "computer vision" 33 | ], 34 | "author": "Wilson", 35 | "license": "Apache-2.0", 36 | "bugs": { 37 | "url": "https://github.com/TechStark/opencv-js/issues" 38 | }, 39 | "homepage": "https://github.com/TechStark/opencv-js#readme" 40 | } 41 | -------------------------------------------------------------------------------- /src/types/opencv/BackgroundSubtractorMOG2.ts: -------------------------------------------------------------------------------- 1 | import type { BackgroundSubtractor, bool, double, int } from "./_types"; 2 | 3 | /** 4 | * Gaussian Mixture-based Background/Foreground Segmentation Algorithm. 5 | * 6 | * The class implements the Gaussian mixture model background subtraction described in [Zivkovic2004] 7 | * and [Zivkovic2006]. 8 | * 9 | * Source: 10 | * [opencv2/video.hpp](https://github.com/opencv/opencv/tree/master/modules/video/include/opencv2/video/background_segm.hpp). 11 | */ 12 | export declare class BackgroundSubtractorMOG2 extends BackgroundSubtractor { 13 | /** 14 | * @param history Length of the history. 15 | * @param varThreshold Threshold on the squared Mahalanobis distance between the pixel and the model 16 | * to decide whether a pixel is well described by the background model. This parameter does not 17 | * affect the background update. 18 | * @param detectShadows If true, the algorithm will detect shadows and mark them. It decreases the 19 | * speed a bit, so if you do not need this feature, set the parameter to false. 20 | */ 21 | public constructor(history?: int, varThreshold?: double, detectShadows?: bool); 22 | } -------------------------------------------------------------------------------- /src/types/opencv/fisheye.ts: -------------------------------------------------------------------------------- 1 | import type { InputArray, OutputArray, int, Size } from "./_types"; 2 | 3 | /** 4 | * Computes the undistortion and rectification maps for the image transform using remap. 5 | * If D is empty, zero distortion is used. If R or P is empty, identity matrices are used. 6 | * 7 | * @param {InputArray} K - Camera intrinsic matrix. 8 | * @param {InputArray} D - Input vector of distortion coefficients (k1, k2, k3, k4). 9 | * @param {InputArray} R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 1-channel or 1x1 3-channel. 10 | * @param {InputArray} P - New camera intrinsic matrix (3x3) or new projection matrix (3x4). 11 | * @param {Size} size - Undistorted image size. 12 | * @param {int} m1type - Type of the first output map that can be CV_32FC1 or CV_16SC2. See convertMaps for details. 13 | * @param {OutputArray} map1 - The first output map. 14 | * @param {OutputArray} map2 - The second output map. 15 | * @return {void} 16 | */ 17 | export declare function fisheye_initUndistortRectifyMap( 18 | K: InputArray, 19 | D: InputArray, 20 | R: InputArray, 21 | P: InputArray, 22 | size: Size, 23 | m1type: int, 24 | map1: OutputArray, 25 | map2: OutputArray, 26 | ): void; 27 | -------------------------------------------------------------------------------- /test/cvKeys.test.ts: -------------------------------------------------------------------------------- 1 | import path from "path"; 2 | import fs from "fs"; 3 | import { setupOpenCv } from "./cv"; 4 | 5 | beforeAll(setupOpenCv); 6 | 7 | describe("CV keys", () => { 8 | function getObjectKeys(obj: any) { 9 | const keys: string[] = []; 10 | for (let key in obj) { 11 | if (!key.startsWith("dynCall")) { 12 | keys.push(key); 13 | } 14 | } 15 | // console.log(keys); 16 | keys.sort((a, b) => a.localeCompare(b)); 17 | return keys; 18 | } 19 | 20 | it("output CV keys", async () => { 21 | const objectNameMap: { [key: string]: any } = { 22 | cv: cv, 23 | "cv.Mat": new cv.Mat(), 24 | }; 25 | 26 | const objectKeyMap: { [key: string]: string[] } = { 27 | buildInformation: cv.getBuildInformation(), 28 | }; 29 | 30 | for (const objName in objectNameMap) { 31 | const obj = objectNameMap[objName]; 32 | const keys = getObjectKeys(obj); 33 | objectKeyMap[objName] = keys; 34 | } 35 | 36 | // write the objectKeyMap to JSON file 37 | const jsonString = JSON.stringify(objectKeyMap, null, 2); 38 | const fileName = "../doc/cvKeys.json"; 39 | const filePath = path.join(__dirname, fileName); 40 | fs.writeFileSync(filePath, jsonString); 41 | }); 42 | }); 43 | -------------------------------------------------------------------------------- /src/types/opencv/Exception.ts: -------------------------------------------------------------------------------- 1 | import type { int } from "./_types"; 2 | 3 | /** 4 | * This class encapsulates all or almost all necessary information about the error happened in the 5 | * program. The exception is usually constructed and thrown implicitly via CV_Error and CV_Error_ 6 | * macros. 7 | * 8 | * [error](#db/de0/group__core__utils_1gacbd081fdb20423a63cf731569ba70b2b}) 9 | * 10 | * Source: 11 | * [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L135). 12 | * 13 | */ 14 | export declare class Exception { 15 | /** 16 | * CVStatus 17 | * 18 | */ 19 | public code: int; 20 | 21 | public err: String; 22 | 23 | public file: String; 24 | 25 | public func: String; 26 | 27 | public line: int; 28 | 29 | public msg: String; 30 | 31 | /** 32 | * Default constructor 33 | */ 34 | public constructor(); 35 | 36 | /** 37 | * Full constructor. Normally the constructor is not called explicitly. Instead, the macros 38 | * [CV_Error()], [CV_Error_()] and [CV_Assert()] are used. 39 | */ 40 | public constructor( 41 | _code: int, 42 | _err: String, 43 | _func: String, 44 | _file: String, 45 | _line: int, 46 | ); 47 | 48 | public formatMessage(): void; 49 | 50 | /** 51 | * the error description and the context as a text string. 52 | */ 53 | public what(): any; 54 | } 55 | -------------------------------------------------------------------------------- /.github/workflows/build-opencv.yml: -------------------------------------------------------------------------------- 1 | name: Build OpenCV.js 2 | 3 | on: 4 | # push: 5 | # branches: 6 | # - build-opencv 7 | # - main 8 | workflow_dispatch: 9 | inputs: {} 10 | 11 | jobs: 12 | build-opencv: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Install dependencies 16 | run: | 17 | sudo apt-get update 18 | sudo apt-get install -y libv4l-dev 19 | 20 | - name: Checkout emsdk 21 | uses: actions/checkout@v4 22 | with: 23 | repository: emscripten-core/emsdk 24 | path: emsdk 25 | - name: Install an emsdk version 26 | run: | 27 | cd emsdk 28 | ./emsdk install 2.0.10 29 | ./emsdk activate 2.0.10 30 | 31 | - name: Checkout opencv 32 | uses: actions/checkout@v4 33 | with: 34 | repository: opencv/opencv 35 | ref: 4.10.0 36 | path: opencv 37 | - name: Build opencv.js 38 | run: | 39 | source emsdk/emsdk_env.sh 40 | emcmake python opencv/platforms/js/build_js.py build_js --build_flags="-s WASM_ASYNC_COMPILATION=0" 41 | 42 | - name: Upload opencv_js 43 | uses: actions/upload-artifact@v4 44 | with: 45 | name: opencv.js 46 | path: build_js/bin/opencv.js 47 | retention-days: 30 48 | 49 | # - name: Check out repository code 50 | # uses: actions/checkout@v4 51 | -------------------------------------------------------------------------------- /.github/workflows/npm-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/publishing-nodejs-packages 3 | 4 | name: Publish NPM Package 5 | 6 | on: 7 | release: 8 | types: [created] 9 | workflow_dispatch: 10 | inputs: {} 11 | 12 | jobs: 13 | # build: 14 | # runs-on: ubuntu-latest 15 | # steps: 16 | # - uses: actions/checkout@v4 17 | # - uses: actions/setup-node@v4 18 | # with: 19 | # node-version: "20.x" 20 | # - run: npm ci 21 | # - run: npm run build 22 | 23 | publish-npm: 24 | # needs: build 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: actions/setup-node@v4 29 | with: 30 | node-version: "20.x" 31 | registry-url: https://registry.npmjs.org/ 32 | - run: npm ci 33 | - run: npm publish --access=public 34 | env: 35 | NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} 36 | 37 | # publish-gpr: 38 | # needs: build 39 | # runs-on: ubuntu-latest 40 | # steps: 41 | # - uses: actions/checkout@v2 42 | # - uses: actions/setup-node@v1 43 | # with: 44 | # node-version: 14 45 | # registry-url: https://npm.pkg.github.com/ 46 | # - run: npm ci 47 | # - run: npm publish 48 | # env: 49 | # NODE_AUTH_TOKEN: ${{secrets.GITHUB_TOKEN}} 50 | -------------------------------------------------------------------------------- /src/types/opencv/softfloat.ts: -------------------------------------------------------------------------------- 1 | import type { bool, int, int32_t, int64_t, uint32_t, uint64_t } from "./_types"; 2 | 3 | export declare class softfloat { 4 | public v: uint32_t; 5 | 6 | public constructor(); 7 | 8 | public constructor(c: softfloat); 9 | 10 | public constructor(arg174: uint32_t); 11 | 12 | public constructor(arg175: uint64_t); 13 | 14 | public constructor(arg176: int32_t); 15 | 16 | public constructor(arg177: int64_t); 17 | 18 | public constructor(a: any); 19 | 20 | public getExp(): int; 21 | 22 | /** 23 | * Returns a number 1 <= x < 2 with the same significand 24 | */ 25 | public getFrac(): softfloat; 26 | 27 | public getSign(): bool; 28 | 29 | public isInf(): bool; 30 | 31 | public isNaN(): bool; 32 | 33 | public isSubnormal(): bool; 34 | 35 | public setExp(e: int): softfloat; 36 | 37 | /** 38 | * Constructs a copy of a number with significand taken from parameter 39 | */ 40 | public setFrac(s: softfloat): softfloat; 41 | 42 | public setSign(sign: bool): softfloat; 43 | 44 | public static eps(): softfloat; 45 | 46 | /** 47 | * Builds new value from raw binary representation 48 | */ 49 | public static fromRaw(a: uint32_t): softfloat; 50 | 51 | public static inf(): softfloat; 52 | 53 | public static max(): softfloat; 54 | 55 | public static min(): softfloat; 56 | 57 | public static nan(): softfloat; 58 | 59 | public static one(): softfloat; 60 | 61 | public static pi(): softfloat; 62 | 63 | public static zero(): softfloat; 64 | } 65 | -------------------------------------------------------------------------------- /src/types/opencv/photo_inpaint.ts: -------------------------------------------------------------------------------- 1 | import type { double, InputArray, int, OutputArray } from "./_types"; 2 | /* 3 | * # Inpainting 4 | * the inpainting algorithm 5 | */ 6 | /** 7 | * The function reconstructs the selected image area from the pixel near the area boundary. The 8 | * function may be used to remove dust and scratches from a scanned photo, or to remove undesirable 9 | * objects from still images or video. See for more details. 10 | * 11 | * An example using the inpainting technique can be found at opencv_source_code/samples/cpp/inpaint.cpp 12 | * (Python) An example using the inpainting technique can be found at 13 | * opencv_source_code/samples/python/inpaint.py 14 | * 15 | * @param src Input 8-bit, 16-bit unsigned or 32-bit float 1-channel or 8-bit 3-channel image. 16 | * 17 | * @param inpaintMask Inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that 18 | * needs to be inpainted. 19 | * 20 | * @param dst Output image with the same size and type as src . 21 | * 22 | * @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered by 23 | * the algorithm. 24 | * 25 | * @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA 26 | */ 27 | export declare function inpaint( 28 | src: InputArray, 29 | inpaintMask: InputArray, 30 | dst: OutputArray, 31 | inpaintRadius: double, 32 | flags: int, 33 | ): void; 34 | 35 | export declare const INPAINT_NS: any; // initializer: = 0 36 | 37 | export declare const INPAINT_TELEA: any; // initializer: = 1 38 | -------------------------------------------------------------------------------- /src/types/opencv/softdouble.ts: -------------------------------------------------------------------------------- 1 | import type { bool, int, int32_t, int64_t, uint32_t, uint64_t } from "./_types"; 2 | 3 | export declare class softdouble { 4 | public v: uint64_t; 5 | 6 | public constructor(); 7 | 8 | public constructor(c: softdouble); 9 | 10 | public constructor(arg159: uint32_t); 11 | 12 | public constructor(arg160: uint64_t); 13 | 14 | public constructor(arg161: int32_t); 15 | 16 | public constructor(arg162: int64_t); 17 | 18 | public constructor(a: any); 19 | 20 | public getExp(): int; 21 | 22 | /** 23 | * Returns a number 1 <= x < 2 with the same significand 24 | */ 25 | public getFrac(): softdouble; 26 | 27 | public getSign(): bool; 28 | 29 | public isInf(): bool; 30 | 31 | public isNaN(): bool; 32 | 33 | public isSubnormal(): bool; 34 | 35 | public setExp(e: int): softdouble; 36 | 37 | /** 38 | * Constructs a copy of a number with significand taken from parameter 39 | */ 40 | public setFrac(s: softdouble): softdouble; 41 | 42 | public setSign(sign: bool): softdouble; 43 | 44 | public static eps(): softdouble; 45 | 46 | /** 47 | * Builds new value from raw binary representation 48 | */ 49 | public static fromRaw(a: uint64_t): softdouble; 50 | 51 | public static inf(): softdouble; 52 | 53 | public static max(): softdouble; 54 | 55 | public static min(): softdouble; 56 | 57 | public static nan(): softdouble; 58 | 59 | public static one(): softdouble; 60 | 61 | public static pi(): softdouble; 62 | 63 | public static zero(): softdouble; 64 | } 65 | -------------------------------------------------------------------------------- /src/types/opencv/BackgroundSubtractor.ts: -------------------------------------------------------------------------------- 1 | import type { Algorithm, bool, double, InputArray, OutputArray } from "./_types"; 2 | 3 | /** 4 | * Base class for background/foreground segmentation algorithms. 5 | * 6 | * The class is only used to define the common interface for the whole family of background/foreground 7 | * segmentation algorithms. 8 | * 9 | * Source: 10 | * [opencv2/video.hpp](https://github.com/opencv/opencv/tree/master/modules/video/include/opencv2/video/background_segm.hpp). 11 | */ 12 | export declare class BackgroundSubtractor extends Algorithm { 13 | public constructor(); 14 | 15 | /** 16 | * Computes a foreground mask. 17 | * 18 | * @param image Next video frame. 19 | * @param fgmask The output foreground mask as an 8-bit binary image. 20 | * @param learningRate The value between 0 and 1 that indicates how fast the background model is learnt. 21 | * Negative parameter value makes the algorithm use some automatically chosen learning rate. 22 | * 0 means that the background model is not updated at all, 1 means that the background model is 23 | * completely reinitialized from the last frame. 24 | */ 25 | public apply(image: InputArray, fgmask: OutputArray, learningRate?: double): void; 26 | 27 | /** 28 | * Computes a background image. 29 | * 30 | * @param backgroundImage The output background image. 31 | * 32 | * @note Sometimes the background image can be very blurry, as it contain the average background 33 | * statistics. 34 | */ 35 | public getBackgroundImage(backgroundImage: OutputArray): void; 36 | } -------------------------------------------------------------------------------- /src/types/opencv/BOWTrainer.ts: -------------------------------------------------------------------------------- 1 | import type { int, Mat } from "./_types"; 2 | 3 | /** 4 | * For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka, 5 | * Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. : 6 | * 7 | * Source: 8 | * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1339). 9 | * 10 | */ 11 | export declare class BOWTrainer { 12 | public constructor(); 13 | 14 | /** 15 | * The training set is clustered using clustermethod to construct the vocabulary. 16 | * 17 | * @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a 18 | * descriptor. 19 | */ 20 | public add(descriptors: Mat): Mat; 21 | 22 | public clear(): void; 23 | 24 | /** 25 | * This is an overloaded member function, provided for convenience. It differs from the above 26 | * function only in what argument(s) it accepts. 27 | */ 28 | public cluster(): Mat; 29 | 30 | /** 31 | * The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first 32 | * variant of the method, train descriptors stored in the object are clustered. In the second variant, 33 | * input descriptors are clustered. 34 | * 35 | * @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor. 36 | * Descriptors are not added to the inner train descriptor set. 37 | */ 38 | public cluster(descriptors: Mat): Mat; 39 | 40 | public descriptorsCount(): int; 41 | 42 | public getDescriptors(): Mat; 43 | } 44 | -------------------------------------------------------------------------------- /src/types/opencv/DynamicBitset.ts: -------------------------------------------------------------------------------- 1 | import type { bool, size_t } from "./_types"; 2 | 3 | /** 4 | * Class re-implementing the boost version of it This helps not depending on boost, it also does not do 5 | * the bound checks and has a way to reset a block for speed 6 | * 7 | * Source: 8 | * [opencv2/flann/dynamic_bitset.h](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/flann/dynamic_bitset.h#L150). 9 | * 10 | */ 11 | export declare class DynamicBitset { 12 | /** 13 | * default constructor 14 | */ 15 | public constructor(); 16 | 17 | /** 18 | * only constructor we use in our code 19 | * 20 | * @param sz the size of the bitset (in bits) 21 | */ 22 | public constructor(sz: size_t); 23 | 24 | /** 25 | * Sets all the bits to 0 26 | */ 27 | public clear(): void; 28 | 29 | /** 30 | * true if the bitset is empty 31 | */ 32 | public empty(): bool; 33 | 34 | /** 35 | * set all the bits to 0 36 | */ 37 | public reset(): void; 38 | 39 | public reset(index: size_t): void; 40 | 41 | public reset_block(index: size_t): void; 42 | 43 | /** 44 | * resize the bitset so that it contains at least sz bits 45 | */ 46 | public resize(sz: size_t): void; 47 | 48 | /** 49 | * set a bit to true 50 | * 51 | * @param index the index of the bit to set to 1 52 | */ 53 | public set(index: size_t): void; 54 | 55 | /** 56 | * gives the number of contained bits 57 | */ 58 | public size(): size_t; 59 | 60 | /** 61 | * check if a bit is set 62 | * 63 | * true if the bit is set 64 | * 65 | * @param index the index of the bit to check 66 | */ 67 | public test(index: size_t): bool; 68 | } 69 | -------------------------------------------------------------------------------- /src/types/opencv/_types.ts: -------------------------------------------------------------------------------- 1 | export * from "./Affine3"; 2 | export * from "./Algorithm"; 3 | export * from "./AutoBuffer"; 4 | export * from "./BackgroundSubtractor"; 5 | export * from "./BackgroundSubtractorMOG2"; 6 | export * from "./BFMatcher"; 7 | export * from "./BOWTrainer"; 8 | export * from "./calib3d"; 9 | export * from "./CascadeClassifier"; 10 | export * from "./core_array"; 11 | export * from "./core_cluster"; 12 | export * from "./core_hal_interface"; 13 | export * from "./core_utils"; 14 | export * from "./DescriptorMatcher"; 15 | export * from "./dnn"; 16 | export * from "./DynamicBitset"; 17 | export * from "./Exception"; 18 | export * from "./Feature2D"; 19 | export * from "./features2d_draw"; 20 | export * from "./fisheye"; 21 | export * from "./FlannBasedMatcher"; 22 | export * from "./HOGDescriptor"; 23 | export * from "./imgproc_color_conversions"; 24 | export * from "./imgproc_colormap"; 25 | export * from "./imgproc_draw"; 26 | export * from "./imgproc_feature"; 27 | export * from "./imgproc_filter"; 28 | export * from "./imgproc_hist"; 29 | export * from "./imgproc_misc"; 30 | export * from "./imgproc_object"; 31 | export * from "./imgproc_shape"; 32 | export * from "./imgproc_transform"; 33 | export * from "./Logger"; 34 | export * from "./LshTable"; 35 | export * from "./Mat"; 36 | export * from "./MatExpr"; 37 | export * from "./MatOp"; 38 | export * from "./Matx"; 39 | export * from "./Node"; 40 | export * from "./objdetect"; 41 | export * from "./ORB"; 42 | export * from "./PCA"; 43 | export * from "./photo_inpaint"; 44 | export * from "./RotatedRect"; 45 | export * from "./softdouble"; 46 | export * from "./softfloat"; 47 | export * from "./video_track"; 48 | export * from "./_hacks"; 49 | export * from "./Tracker"; 50 | export * from "./TrackerMIL"; 51 | -------------------------------------------------------------------------------- /test/rect.test.ts: -------------------------------------------------------------------------------- 1 | import { setupOpenCv } from "./cv"; 2 | 3 | beforeAll(setupOpenCv); 4 | 5 | describe("rect", () => { 6 | it("test rotated rect", async () => { 7 | const point = new cv.Point(100, 200); 8 | const size = new cv.Size(200, 300); 9 | const rect = new cv.RotatedRect(point, size, 30); 10 | 11 | const points = cv.RotatedRect.points(rect); 12 | 13 | expect(points[0].x).toBe(cv.RotatedRect.boundingRect2f(rect).x); 14 | expect(points[1].y).toBe(cv.RotatedRect.boundingRect2f(rect).y); 15 | 16 | expect(Math.round(points[0].x)).toBe(cv.RotatedRect.boundingRect(rect).x); 17 | expect(Math.round(points[1].y)).toBe(cv.RotatedRect.boundingRect(rect).y); 18 | }); 19 | 20 | it("test boxPoints function", async () => { 21 | const center = new cv.Point(50, 40); 22 | const size = new cv.Size(80, 30); 23 | const angle = 25; 24 | const rotatedRect = new cv.RotatedRect(center, size, angle); 25 | 26 | // Test that boxPoints accepts one argument and returns Point2f[] 27 | const points = cv.boxPoints(rotatedRect); 28 | 29 | // Verify it returns 4 points 30 | expect(points.length).toBe(4); 31 | 32 | // Check the actual values - boxPoints should return the same result as RotatedRect.points 33 | const expectedPoints = cv.RotatedRect.points(rotatedRect); 34 | expect(points[0].x).toBeCloseTo(expectedPoints[0].x, 5); 35 | expect(points[0].y).toBeCloseTo(expectedPoints[0].y, 5); 36 | expect(points[1].x).toBeCloseTo(expectedPoints[1].x, 5); 37 | expect(points[1].y).toBeCloseTo(expectedPoints[1].y, 5); 38 | expect(points[2].x).toBeCloseTo(expectedPoints[2].x, 5); 39 | expect(points[2].y).toBeCloseTo(expectedPoints[2].y, 5); 40 | expect(points[3].x).toBeCloseTo(expectedPoints[3].x, 5); 41 | expect(points[3].y).toBeCloseTo(expectedPoints[3].y, 5); 42 | }); 43 | }); 44 | -------------------------------------------------------------------------------- /src/types/opencv/AutoBuffer.ts: -------------------------------------------------------------------------------- 1 | import type { size_t } from "./_types"; 2 | 3 | /** 4 | * The class is used for temporary buffers in functions and methods. If a temporary buffer is usually 5 | * small (a few K's of memory), but its size depends on the parameters, it makes sense to create a 6 | * small fixed-size array on stack and use it if it's large enough. If the required buffer size is 7 | * larger than the fixed size, another buffer of sufficient size is allocated dynamically and released 8 | * after the processing. Therefore, in typical cases, when the buffer size is small, there is no 9 | * overhead associated with malloc()/free(). At the same time, there is no limit on the size of 10 | * processed data. 11 | * 12 | * This is what [AutoBuffer](#d8/dd0/classcv_1_1AutoBuffer}) does. The template takes 2 parameters - 13 | * type of the buffer elements and the number of stack-allocated elements. Here is how the class is 14 | * used: 15 | * 16 | * ```cpp 17 | * void my_func(const cv::Mat& m) 18 | * { 19 | * cv::AutoBuffer buf(1000); // create automatic buffer containing 1000 floats 20 | * 21 | * buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, 22 | * // otherwise the buffer of "m.rows" floats will be allocated 23 | * // dynamically and deallocated in cv::AutoBuffer destructor 24 | * ... 25 | * } 26 | * ``` 27 | * 28 | * Source: 29 | * [opencv2/core/utility.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/utility.hpp#L128). 30 | * 31 | */ 32 | export declare class AutoBuffer { 33 | public constructor(); 34 | 35 | public constructor(_size: size_t); 36 | 37 | public constructor(buf: AutoBuffer); 38 | 39 | public allocate(_size: size_t): void; 40 | 41 | public data(): any; 42 | 43 | public data(): any; 44 | 45 | public deallocate(): void; 46 | 47 | public resize(_size: size_t): void; 48 | 49 | public size(): size_t; 50 | } 51 | -------------------------------------------------------------------------------- /src/types/opencv/BFMatcher.ts: -------------------------------------------------------------------------------- 1 | import type { bool, DescriptorMatcher, int, Ptr } from "./_types"; 2 | 3 | /** 4 | * For each descriptor in the first set, this matcher finds the closest descriptor in the second set by 5 | * trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. 6 | * 7 | * Source: 8 | * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1140). 9 | * 10 | */ 11 | export declare class BFMatcher extends DescriptorMatcher { 12 | public constructor(normType?: int, crossCheck?: bool); 13 | 14 | /** 15 | * @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 16 | * that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 17 | * object copy with the current parameters but with empty train data. 18 | */ 19 | public clone(emptyTrainData?: bool): Ptr; 20 | 21 | public isMaskSupported(): bool; 22 | 23 | /** 24 | * @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are 25 | * preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and 26 | * BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor 27 | * description). 28 | * 29 | * @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k 30 | * nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with 31 | * k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the 32 | * matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent 33 | * pairs. Such technique usually produces best results with minimal number of outliers when there are 34 | * enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. 35 | */ 36 | public static create(normType?: int, crossCheck?: bool): Ptr; 37 | } 38 | -------------------------------------------------------------------------------- /src/types/opencv/MatOp.ts: -------------------------------------------------------------------------------- 1 | import type { double, int, Mat, MatExpr, Scalar, Size } from "./_types"; 2 | 3 | export declare class MatOp { 4 | public constructor(); 5 | 6 | public abs(expr: MatExpr, res: MatExpr): MatExpr; 7 | 8 | public add(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr; 9 | 10 | public add(expr1: MatExpr, s: Scalar, res: MatExpr): MatExpr; 11 | 12 | public assign(expr: MatExpr, m: Mat, type?: int): MatExpr; 13 | 14 | public augAssignAdd(expr: MatExpr, m: Mat): MatExpr; 15 | 16 | public augAssignAnd(expr: MatExpr, m: Mat): MatExpr; 17 | 18 | public augAssignDivide(expr: MatExpr, m: Mat): MatExpr; 19 | 20 | public augAssignMultiply(expr: MatExpr, m: Mat): MatExpr; 21 | 22 | public augAssignOr(expr: MatExpr, m: Mat): MatExpr; 23 | 24 | public augAssignSubtract(expr: MatExpr, m: Mat): MatExpr; 25 | 26 | public augAssignXor(expr: MatExpr, m: Mat): MatExpr; 27 | 28 | public diag(expr: MatExpr, d: int, res: MatExpr): MatExpr; 29 | 30 | public divide( 31 | expr1: MatExpr, 32 | expr2: MatExpr, 33 | res: MatExpr, 34 | scale?: double, 35 | ): MatExpr; 36 | 37 | public divide(s: double, expr: MatExpr, res: MatExpr): MatExpr; 38 | 39 | public elementWise(expr: MatExpr): MatExpr; 40 | 41 | public invert(expr: MatExpr, method: int, res: MatExpr): MatExpr; 42 | 43 | public matmul(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr; 44 | 45 | public multiply( 46 | expr1: MatExpr, 47 | expr2: MatExpr, 48 | res: MatExpr, 49 | scale?: double, 50 | ): MatExpr; 51 | 52 | public multiply(expr1: MatExpr, s: double, res: MatExpr): MatExpr; 53 | 54 | public roi( 55 | expr: MatExpr, 56 | rowRange: Range, 57 | colRange: Range, 58 | res: MatExpr, 59 | ): MatExpr; 60 | 61 | public size(expr: MatExpr): Size; 62 | 63 | public subtract(expr1: MatExpr, expr2: MatExpr, res: MatExpr): MatExpr; 64 | 65 | public subtract(s: Scalar, expr: MatExpr, res: MatExpr): Scalar; 66 | 67 | public transpose(expr: MatExpr, res: MatExpr): MatExpr; 68 | 69 | public type(expr: MatExpr): MatExpr; 70 | } 71 | -------------------------------------------------------------------------------- /src/types/opencv/FlannBasedMatcher.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | DescriptorMatcher, 4 | FileNode, 5 | FileStorage, 6 | InputArrayOfArrays, 7 | Ptr, 8 | } from "./_types"; 9 | 10 | /** 11 | * This matcher trains [cv::flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) on a train descriptor 12 | * collection and calls its nearest search methods to find the best matches. So, this matcher may be 13 | * faster when matching a large train collection than the brute force matcher. 14 | * [FlannBasedMatcher](#dc/de2/classcv_1_1FlannBasedMatcher}) does not support masking permissible 15 | * matches of descriptor sets because [flann::Index](#d1/db2/classcv_1_1flann_1_1Index}) does not 16 | * support this. : 17 | * 18 | * Source: 19 | * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L1187). 20 | * 21 | */ 22 | export declare class FlannBasedMatcher extends DescriptorMatcher { 23 | public constructor(indexParams?: Ptr, searchParams?: Ptr); 24 | 25 | /** 26 | * If the collection is not empty, the new descriptors are added to existing train descriptors. 27 | * 28 | * @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same 29 | * train image. 30 | */ 31 | public add(descriptors: InputArrayOfArrays): InputArrayOfArrays; 32 | 33 | public clear(): void; 34 | 35 | /** 36 | * @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 37 | * that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 38 | * object copy with the current parameters but with empty train data. 39 | */ 40 | public clone(emptyTrainData?: bool): Ptr; 41 | 42 | public isMaskSupported(): bool; 43 | 44 | public read(fn: FileNode): FileNode; 45 | 46 | /** 47 | * Trains a descriptor matcher (for example, the flann index). In all methods to match, the method 48 | * [train()] is run every time before matching. Some descriptor matchers (for example, 49 | * BruteForceMatcher) have an empty implementation of this method. Other matchers really train their 50 | * inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ). 51 | */ 52 | public train(): void; 53 | 54 | public write(fs: FileStorage): FileStorage; 55 | 56 | public static create(): Ptr; 57 | } 58 | -------------------------------------------------------------------------------- /test/Mat.test.ts: -------------------------------------------------------------------------------- 1 | import { Jimp } from "jimp"; 2 | import path from "path"; 3 | import { setupOpenCv, translateException } from "./cv"; 4 | 5 | beforeAll(setupOpenCv); 6 | 7 | describe("Mat", () => { 8 | it("shoud pass TS type validations", async () => { 9 | try { 10 | // load local image file with jimp. It supports jpg, png, bmp, tiff and gif: 11 | const jimpSrc = await Jimp.read(path.resolve(__dirname, "Lenna.png")); 12 | 13 | // `jimpImage.bitmap` property has the decoded ImageData that we can use to create a cv:Mat 14 | const img = cv.matFromImageData(jimpSrc.bitmap); 15 | expect(img.channels()).toEqual(4); 16 | 17 | const imgGray = new cv.Mat(); 18 | cv.cvtColor(img, imgGray, cv.COLOR_RGBA2GRAY); 19 | expect(imgGray.channels()).toEqual(1); 20 | 21 | const imgBlur = new cv.Mat(); 22 | cv.GaussianBlur( 23 | imgGray, 24 | imgBlur, 25 | new cv.Size(5, 5), 26 | 0, 27 | 0, 28 | cv.BORDER_DEFAULT, 29 | ); 30 | 31 | const imgThresh = new cv.Mat(); 32 | cv.threshold( 33 | imgBlur, 34 | imgThresh, 35 | 0, 36 | 255, 37 | cv.THRESH_BINARY + cv.THRESH_OTSU, 38 | ); 39 | 40 | const contours = new cv.MatVector(); 41 | const hierarchy = new cv.Mat(); 42 | 43 | cv.findContours( 44 | imgThresh, 45 | contours, 46 | hierarchy, 47 | cv.RETR_CCOMP, 48 | cv.CHAIN_APPROX_SIMPLE, 49 | ); 50 | 51 | const channels = new cv.MatVector(); 52 | cv.split(img, channels); 53 | cv.merge(channels, img); 54 | } catch (err) { 55 | throw translateException(err); 56 | } 57 | }); 58 | 59 | it("should allow ucharPtr with optional second parameter", async () => { 60 | try { 61 | // Create a simple test matrix 62 | const mat = new cv.Mat(3, 3, cv.CV_8UC1); 63 | 64 | // Test that ucharPtr works with just one parameter (row index) 65 | // This should compile without TypeScript errors due to optional j parameter 66 | const rowPtr = mat.ucharPtr(0); 67 | expect(rowPtr).toBeDefined(); 68 | 69 | // Test that ucharPtr works with two parameters (row and column) 70 | const elementPtr = mat.ucharPtr(0, 0); 71 | expect(elementPtr).toBeDefined(); 72 | 73 | mat.delete(); 74 | } catch (err) { 75 | throw translateException(err); 76 | } 77 | }); 78 | }); 79 | -------------------------------------------------------------------------------- /src/types/opencv/LshTable.ts: -------------------------------------------------------------------------------- 1 | import type { Bucket, BucketKey, LshStats, Matrix, size_t } from "./_types"; 2 | 3 | /** 4 | * Lsh hash table. As its key is a sub-feature, and as usually the size of it is pretty small, we keep 5 | * it as a continuous memory array. The value is an index in the corpus of features (we keep it as an 6 | * unsigned int for pure memory reasons, it could be a size_t) 7 | * 8 | * Source: 9 | * [opencv2/flann/lsh_table.h](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/flann/lsh_table.h#L261). 10 | * 11 | */ 12 | export declare class LshTable { 13 | /** 14 | * Default constructor 15 | */ 16 | public constructor(); 17 | 18 | /** 19 | * Default constructor Create the mask and allocate the memory 20 | * 21 | * @param feature_size is the size of the feature (considered as a ElementType[]) 22 | * 23 | * @param key_size is the number of bits that are turned on in the feature 24 | */ 25 | public constructor(feature_size: any, key_size: any); 26 | 27 | public constructor(feature_size: any, subsignature_size: any); 28 | 29 | /** 30 | * Add a feature to the table 31 | * 32 | * @param value the value to store for that feature 33 | * 34 | * @param feature the feature itself 35 | */ 36 | public add(value: any, feature: any): void; 37 | 38 | /** 39 | * Add a set of features to the table 40 | * 41 | * @param dataset the values to store 42 | */ 43 | public add(dataset: Matrix): Matrix; 44 | 45 | /** 46 | * Get a bucket given the key 47 | */ 48 | public getBucketFromKey(key: BucketKey): Bucket; 49 | 50 | /** 51 | * Compute the sub-signature of a feature 52 | */ 53 | public getKey(arg50: any): size_t; 54 | 55 | /** 56 | * Return the Subsignature of a feature 57 | * 58 | * @param feature the feature to analyze 59 | */ 60 | public getKey(feature: any): size_t; 61 | 62 | /** 63 | * Get statistics about the table 64 | */ 65 | public getStats(): LshStats; 66 | 67 | public getStats(): LshStats; 68 | } 69 | 70 | export declare const kArray: SpeedLevel; // initializer: 71 | 72 | export declare const kBitsetHash: SpeedLevel; // initializer: 73 | 74 | export declare const kHash: SpeedLevel; // initializer: 75 | 76 | /** 77 | * defines the speed fo the implementation kArray uses a vector for storing data kBitsetHash uses a 78 | * hash map but checks for the validity of a key with a bitset kHash uses a hash map only 79 | * 80 | */ 81 | export type SpeedLevel = any; 82 | -------------------------------------------------------------------------------- /test/BackgroundSubtractorMOG2.test.ts: -------------------------------------------------------------------------------- 1 | import { setupOpenCv } from "./cv"; 2 | 3 | beforeAll(async () => { 4 | await setupOpenCv(); 5 | }); 6 | 7 | describe("BackgroundSubtractorMOG2", () => { 8 | it("should have correct TypeScript definitions for constructor", () => { 9 | // Test constructor without parameters 10 | const bs1 = new cv.BackgroundSubtractorMOG2(); 11 | expect(bs1).toBeDefined(); 12 | bs1.delete(); 13 | 14 | // Test constructor with history parameter 15 | const bs2 = new cv.BackgroundSubtractorMOG2(500); 16 | expect(bs2).toBeDefined(); 17 | bs2.delete(); 18 | 19 | // Test constructor with history and varThreshold 20 | const bs3 = new cv.BackgroundSubtractorMOG2(500, 16); 21 | expect(bs3).toBeDefined(); 22 | bs3.delete(); 23 | 24 | // Test constructor with all parameters 25 | const bs4 = new cv.BackgroundSubtractorMOG2(500, 16, true); 26 | expect(bs4).toBeDefined(); 27 | bs4.delete(); 28 | }); 29 | 30 | it("should have correct TypeScript definitions for inherited methods", () => { 31 | const bs = new cv.BackgroundSubtractorMOG2(); 32 | 33 | // Test inherited methods from BackgroundSubtractor 34 | expect(typeof bs.apply).toBe("function"); 35 | expect(typeof bs.getBackgroundImage).toBe("function"); 36 | 37 | // Test apply method with a real Mat 38 | const testImage = new cv.Mat(100, 100, cv.CV_8UC3); 39 | const fgMask = new cv.Mat(); 40 | 41 | // This should not throw TypeScript errors 42 | bs.apply(testImage, fgMask); 43 | bs.apply(testImage, fgMask, 0.1); // with learning rate 44 | 45 | // Test getBackgroundImage method 46 | const bgImage = new cv.Mat(); 47 | bs.getBackgroundImage(bgImage); 48 | 49 | // Clean up 50 | testImage.delete(); 51 | fgMask.delete(); 52 | bgImage.delete(); 53 | bs.delete(); 54 | }); 55 | 56 | it("should work in TypeScript usage scenarios from the issue", () => { 57 | // This test verifies the original issue is resolved 58 | // These should compile without TypeScript errors 59 | 60 | // Test the main usage pattern mentioned in the issue 61 | const backgroundSubtractor = new cv.BackgroundSubtractorMOG2(); 62 | expect(backgroundSubtractor).toBeDefined(); 63 | 64 | // Test with parameters 65 | const backgroundSubtractorWithParams = new cv.BackgroundSubtractorMOG2(500, 16, true); 66 | expect(backgroundSubtractorWithParams).toBeDefined(); 67 | 68 | // Clean up 69 | backgroundSubtractor.delete(); 70 | backgroundSubtractorWithParams.delete(); 71 | }); 72 | }); -------------------------------------------------------------------------------- /src/types/opencv/RotatedRect.ts: -------------------------------------------------------------------------------- 1 | import type { float, Point2f, Rect, Rect_, Size2f } from "./_types"; 2 | 3 | /** 4 | * Each rectangle is specified by the center point (mass center), length of each side (represented by 5 | * [Size2f](#dc/d84/group__core__basic_1gab34496d2466b5f69930ab74c70f117d4}) structure) and the 6 | * rotation angle in degrees. 7 | * 8 | * The sample below demonstrates how to use [RotatedRect](#db/dd6/classcv_1_1RotatedRect}): 9 | * 10 | * ```cpp 11 | * Mat test_image(200, 200, CV_8UC3, Scalar(0)); 12 | * RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30); 13 | * 14 | * Point2f vertices[4]; 15 | * rRect.points(vertices); 16 | * for (int i = 0; i < 4; i++) 17 | * line(test_image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0), 2); 18 | * 19 | * Rect brect = rRect.boundingRect(); 20 | * rectangle(test_image, brect, Scalar(255,0,0), 2); 21 | * 22 | * imshow("rectangles", test_image); 23 | * waitKey(0); 24 | * ``` 25 | * 26 | * [CamShift](#dc/d6b/group__video__track_1gaef2bd39c8356f423124f1fe7c44d54a1}), 27 | * [fitEllipse](#d3/dc0/group__imgproc__shape_1gaf259efaad93098103d6c27b9e4900ffa}), 28 | * [minAreaRect](#d3/dc0/group__imgproc__shape_1ga3d476a3417130ae5154aea421ca7ead9}), CvBox2D 29 | * 30 | * Source: 31 | * [opencv2/core/types.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/types.hpp#L534). 32 | * 33 | */ 34 | export declare class RotatedRect { 35 | public angle: float; 36 | 37 | public center: Point2f; 38 | 39 | public size: Size2f; 40 | 41 | public constructor(); 42 | 43 | /** 44 | * full constructor 45 | * 46 | * @param center The rectangle mass center. 47 | * 48 | * @param size Width and height of the rectangle. 49 | * 50 | * @param angle The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., 51 | * the rectangle becomes an up-right rectangle. 52 | */ 53 | public constructor(center: Point2f, size: Size2f, angle: float); 54 | 55 | /** 56 | * Any 3 end points of the [RotatedRect]. They must be given in order (either clockwise or 57 | * anticlockwise). 58 | */ 59 | public constructor(point1: Point2f, point2: Point2f, point3: Point2f); 60 | 61 | public static boundingRect(rect: RotatedRect): Rect; 62 | 63 | public static boundingRect2f(rect: RotatedRect): Rect; 64 | 65 | /** 66 | returns 4 vertices of the rectangle 67 | * @param rect The rotated rectangle 68 | * @returns Array of 4 points in order: bottomLeft, topLeft, topRight, bottomRight 69 | */ 70 | public static points(rect: RotatedRect): Point2f[]; 71 | } 72 | -------------------------------------------------------------------------------- /src/types/opencv/imgproc_colormap.ts: -------------------------------------------------------------------------------- 1 | import type { InputArray, int, OutputArray } from "./_types"; 2 | 3 | /* 4 | * # Colormap Transformations 5 | * 6 | */ 7 | 8 | /** 9 | * Applies a colormap on a given image. 10 | * 11 | * @param src The source image, which should be grayscale. Should be 8-bit, 16-bit, or floating-point. 12 | * @param dst The result is the colored image. 13 | * @param colormap The colormap to apply. 14 | */ 15 | export declare function applyColorMap( 16 | src: InputArray, 17 | dst: OutputArray, 18 | colormap: int, 19 | ): void; 20 | 21 | /** 22 | * Applies a user colormap on a given image. 23 | * 24 | * @param src The source image, which should be grayscale. Should be 8-bit, 16-bit, or floating-point. 25 | * @param dst The result is the colored image. 26 | * @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256. 27 | */ 28 | export declare function applyColorMap( 29 | src: InputArray, 30 | dst: OutputArray, 31 | userColor: InputArray, 32 | ): void; 33 | 34 | /** 35 | * Colormap types used by the applyColorMap function. 36 | */ 37 | export type ColormapTypes = any; 38 | 39 | export declare const COLORMAP_AUTUMN: ColormapTypes; // initializer: = 0 40 | export declare const COLORMAP_BONE: ColormapTypes; // initializer: = 1 41 | export declare const COLORMAP_JET: ColormapTypes; // initializer: = 2 42 | export declare const COLORMAP_WINTER: ColormapTypes; // initializer: = 3 43 | export declare const COLORMAP_RAINBOW: ColormapTypes; // initializer: = 4 44 | export declare const COLORMAP_OCEAN: ColormapTypes; // initializer: = 5 45 | export declare const COLORMAP_SUMMER: ColormapTypes; // initializer: = 6 46 | export declare const COLORMAP_SPRING: ColormapTypes; // initializer: = 7 47 | export declare const COLORMAP_COOL: ColormapTypes; // initializer: = 8 48 | export declare const COLORMAP_HSV: ColormapTypes; // initializer: = 9 49 | export declare const COLORMAP_PINK: ColormapTypes; // initializer: = 10 50 | export declare const COLORMAP_HOT: ColormapTypes; // initializer: = 11 51 | export declare const COLORMAP_PARULA: ColormapTypes; // initializer: = 12 52 | export declare const COLORMAP_MAGMA: ColormapTypes; // initializer: = 13 53 | export declare const COLORMAP_INFERNO: ColormapTypes; // initializer: = 14 54 | export declare const COLORMAP_PLASMA: ColormapTypes; // initializer: = 15 55 | export declare const COLORMAP_VIRIDIS: ColormapTypes; // initializer: = 16 56 | export declare const COLORMAP_CIVIDIS: ColormapTypes; // initializer: = 17 57 | export declare const COLORMAP_TWILIGHT: ColormapTypes; // initializer: = 18 58 | export declare const COLORMAP_TWILIGHT_SHIFTED: ColormapTypes; // initializer: = 19 59 | export declare const COLORMAP_TURBO: ColormapTypes; // initializer: = 20 60 | export declare const COLORMAP_DEEPGREEN: ColormapTypes; // initializer: = 21 -------------------------------------------------------------------------------- /src/types/opencv/imgproc_object.ts: -------------------------------------------------------------------------------- 1 | import type { InputArray, int, OutputArray } from "./_types"; 2 | /* 3 | * # Object Detection 4 | * 5 | */ 6 | /** 7 | * The function slides through image , compares the overlapped patches of size `$w \\times h$` against 8 | * templ using the specified method and stores the comparison results in result . Here are the formulae 9 | * for the available comparison methods ( `$I$` denotes image, `$T$` template, `$R$` result ). The 10 | * summation is done over template and/or the image patch: `$x' = 0...w-1, y' = 0...h-1$` 11 | * 12 | * After the function finishes the comparison, the best matches can be found as global minimums (when 13 | * [TM_SQDIFF] was used) or maximums (when [TM_CCORR] or [TM_CCOEFF] was used) using the [minMaxLoc] 14 | * function. In case of a color image, template summation in the numerator and each sum in the 15 | * denominator is done over all of the channels and separate mean values are used for each channel. 16 | * That is, the function can take a color template and a color image. The result will still be a 17 | * single-channel image, which is easier to analyze. 18 | * 19 | * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point. 20 | * 21 | * @param templ Searched template. It must be not greater than the source image and have the same data 22 | * type. 23 | * 24 | * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image 25 | * is $W \times H$ and templ is $w \times h$ , then result is $(W-w+1) \times (H-h+1)$ . 26 | * 27 | * @param method Parameter specifying the comparison method, see TemplateMatchModes 28 | * 29 | * @param mask Mask of searched template. It must have the same datatype and size with templ. It is not 30 | * set by default. Currently, only the TM_SQDIFF and TM_CCORR_NORMED methods are supported. 31 | */ 32 | export declare function matchTemplate( 33 | image: InputArray, 34 | templ: InputArray, 35 | result: OutputArray, 36 | method: int, 37 | mask?: InputArray, 38 | ): void; 39 | 40 | export declare const TM_SQDIFF: TemplateMatchModes; // initializer: = 0 41 | 42 | export declare const TM_SQDIFF_NORMED: TemplateMatchModes; // initializer: = 1 43 | 44 | export declare const TM_CCORR: TemplateMatchModes; // initializer: = 2 45 | 46 | export declare const TM_CCORR_NORMED: TemplateMatchModes; // initializer: = 3 47 | 48 | /** 49 | * `\\[R(x,y)= \\sum _{x',y'} (T'(x',y') \\cdot I'(x+x',y+y'))\\]` where `\\[\\begin{array}{l} 50 | * T'(x',y')=T(x',y') - 1/(w \\cdot h) \\cdot \\sum _{x'',y''} T(x'',y'') \\\\ 51 | * I'(x+x',y+y')=I(x+x',y+y') - 1/(w \\cdot h) \\cdot \\sum _{x'',y''} I(x+x'',y+y'') \\end{array}\\]` 52 | * 53 | */ 54 | export declare const TM_CCOEFF: TemplateMatchModes; // initializer: = 4 55 | 56 | export declare const TM_CCOEFF_NORMED: TemplateMatchModes; // initializer: = 5 57 | 58 | export type TemplateMatchModes = any; 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # opencv-js 2 | 3 | OpenCV JavaScript version (NPM package) for node.js or browser. Get started guide [OpenCV.js Tutorials](https://docs.opencv.org/4.12.0/#:~:text=OpenCV%2DPython%20Tutorials-,OpenCV.js%20Tutorials,-Tutorials%20for%20contrib). 4 | 5 | The file `opencv.js` was downloaded from https://docs.opencv.org/4.12.0/opencv.js 6 | 7 | TypeScript is supported (thanks to `mirada`). 8 | 9 | # Basic Usage 10 | 11 | ```js 12 | import cvModule from "@techstark/opencv-js"; 13 | 14 | async function getOpenCv() { 15 | let cv; 16 | if (cvModule instanceof Promise) { 17 | cv = await cvModule; 18 | } else { 19 | if (cvModule.Mat) { 20 | cv = cvModule; 21 | } else { 22 | await new Promise((resolve) => { 23 | cvModule.onRuntimeInitialized = () => resolve(); 24 | }); 25 | cv = cvModule; 26 | } 27 | } 28 | return { cv }; 29 | } 30 | 31 | async function main() { 32 | const { cv } = await getOpenCv(); 33 | console.log("OpenCV.js is ready!"); 34 | // You can now use OpenCV functions here 35 | console.log(cv.getBuildInformation()); 36 | } 37 | 38 | main(); 39 | ``` 40 | 41 | # Code Examples 42 | 43 | - See code examples (React, Angular, Node.js) in [opencv-js-examples](https://github.com/TechStark/opencv-js-examples) 44 | 45 | # Live Demo 46 | 47 | ## Using in react.js project 48 | 49 | - See [live demo and code here](https://codesandbox.io/s/techstarkopencv-js-demo-page-f7gvk?file=/src/TestPage.jsx) 50 | Live demo screenshot 51 | - Get the test image from here [Lenna.png](test/Lenna.png) 52 | 53 | ## Using in Angular project 54 | 55 | - See [code here](https://codesandbox.io/s/techstark-opencv-js-angular-demo-hkmc1n?file=/src/app/app.component.ts) 56 | 57 | ## Real-time face detection 58 | 59 | - See [live demo and code here](https://codesandbox.io/s/opencv-js-face-detection-i1i3u) 60 | 61 | ![Real-time face detection](https://user-images.githubusercontent.com/132509/160820773-cdb023a6-77a2-4f2e-a0e9-fb06931c8f9f.gif) 62 | 63 | # How to Use 64 | 65 | - `npm install @techstark/opencv-js` 66 | - or `yarn add @techstark/opencv-js` 67 | - `import cv from "@techstark/opencv-js"` 68 | - for TypeScript, set `"esModuleInterop": true` in `tsconfig.json` 69 | - or `import * as cv from "@techstark/opencv-js"` 70 | 71 | # Webpack Configuration (for browser usage) 72 | 73 | If you use this package for browsers, you need to set some polyfills. In the file "webpack.config.js", set 74 | 75 | ```js 76 | module.exports = { 77 | resolve: { 78 | modules: [...], 79 | fallback: { 80 | fs: false, 81 | path: false, 82 | crypto: false 83 | } 84 | } 85 | }; 86 | ``` 87 | 88 | # What methods and properties are available 89 | 90 | The TypeScript type declarations may not be up to date with the latest OpenCV.js. Refer to [cvKeys.json](doc/cvKeys.json) to check the available methods and properties at runtime. 91 | 92 | # Star History 93 | 94 | [![Star History Chart](https://api.star-history.com/svg?repos=techstark/opencv-js&type=Date)](https://star-history.com/#techstark/opencv-js&Date) 95 | -------------------------------------------------------------------------------- /test/applyColorMap.test.ts: -------------------------------------------------------------------------------- 1 | import { setupOpenCv } from "./cv"; 2 | 3 | beforeAll(setupOpenCv); 4 | 5 | describe("applyColorMap", () => { 6 | it("should apply COLORMAP_JET to a grayscale image", async () => { 7 | // Create a simple grayscale image 8 | const src = new cv.Mat(100, 100, cv.CV_8UC1); 9 | 10 | // Fill with gradient values 11 | for (let i = 0; i < 100; i++) { 12 | for (let j = 0; j < 100; j++) { 13 | src.ucharPtr(i, j)[0] = Math.floor((i + j) * 255 / 200); 14 | } 15 | } 16 | 17 | const dst = new cv.Mat(); 18 | 19 | // Apply JET colormap 20 | cv.applyColorMap(src, dst, cv.COLORMAP_JET); 21 | 22 | // Verify the output is a 3-channel color image 23 | expect(dst.channels()).toBe(3); 24 | expect(dst.rows).toBe(100); 25 | expect(dst.cols).toBe(100); 26 | expect(dst.type()).toBe(cv.CV_8UC3); 27 | 28 | // Clean up 29 | src.delete(); 30 | dst.delete(); 31 | }); 32 | 33 | it("should have all COLORMAP constants available", () => { 34 | // Test that all colormap constants are defined 35 | expect(typeof cv.COLORMAP_JET).toBe('number'); 36 | expect(typeof cv.COLORMAP_AUTUMN).toBe('number'); 37 | expect(typeof cv.COLORMAP_BONE).toBe('number'); 38 | expect(typeof cv.COLORMAP_WINTER).toBe('number'); 39 | expect(typeof cv.COLORMAP_RAINBOW).toBe('number'); 40 | expect(typeof cv.COLORMAP_OCEAN).toBe('number'); 41 | expect(typeof cv.COLORMAP_SUMMER).toBe('number'); 42 | expect(typeof cv.COLORMAP_SPRING).toBe('number'); 43 | expect(typeof cv.COLORMAP_COOL).toBe('number'); 44 | expect(typeof cv.COLORMAP_HSV).toBe('number'); 45 | expect(typeof cv.COLORMAP_PINK).toBe('number'); 46 | expect(typeof cv.COLORMAP_HOT).toBe('number'); 47 | expect(typeof cv.COLORMAP_PARULA).toBe('number'); 48 | expect(typeof cv.COLORMAP_MAGMA).toBe('number'); 49 | expect(typeof cv.COLORMAP_INFERNO).toBe('number'); 50 | expect(typeof cv.COLORMAP_PLASMA).toBe('number'); 51 | expect(typeof cv.COLORMAP_VIRIDIS).toBe('number'); 52 | expect(typeof cv.COLORMAP_CIVIDIS).toBe('number'); 53 | expect(typeof cv.COLORMAP_TWILIGHT).toBe('number'); 54 | expect(typeof cv.COLORMAP_TWILIGHT_SHIFTED).toBe('number'); 55 | expect(typeof cv.COLORMAP_TURBO).toBe('number'); 56 | expect(typeof cv.COLORMAP_DEEPGREEN).toBe('number'); 57 | }); 58 | 59 | it("should apply different colormaps correctly", async () => { 60 | // Create a simple grayscale image 61 | const src = new cv.Mat(50, 50, cv.CV_8UC1, new cv.Scalar(128)); 62 | const dst1 = new cv.Mat(); 63 | const dst2 = new cv.Mat(); 64 | 65 | // Apply different colormaps 66 | cv.applyColorMap(src, dst1, cv.COLORMAP_JET); 67 | cv.applyColorMap(src, dst2, cv.COLORMAP_VIRIDIS); 68 | 69 | // Both should be 3-channel color images 70 | expect(dst1.channels()).toBe(3); 71 | expect(dst2.channels()).toBe(3); 72 | 73 | // Different colormaps should produce different results 74 | const data1 = dst1.data; 75 | const data2 = dst2.data; 76 | let different = false; 77 | for (let i = 0; i < Math.min(data1.length, data2.length); i++) { 78 | if (data1[i] !== data2[i]) { 79 | different = true; 80 | break; 81 | } 82 | } 83 | expect(different).toBe(true); 84 | 85 | // Clean up 86 | src.delete(); 87 | dst1.delete(); 88 | dst2.delete(); 89 | }); 90 | }); -------------------------------------------------------------------------------- /src/types/opencv/core_cluster.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | double, 3 | InputArray, 4 | InputOutputArray, 5 | int, 6 | OutputArray, 7 | TermCriteria, 8 | _EqPredicate, 9 | } from "./_types"; 10 | /* 11 | * # Clustering 12 | * 13 | */ 14 | /** 15 | * The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters 16 | * and groups the input samples around the clusters. As an output, `$\\texttt{bestLabels}_i$` contains 17 | * a 0-based cluster index for the sample stored in the `$i^{th}$` row of the samples matrix. 18 | * 19 | * (Python) An example on K-means clustering can be found at 20 | * opencv_source_code/samples/python/kmeans.py 21 | * 22 | * The function returns the compactness measure that is computed as `\\[\\sum _i \\| \\texttt{samples} 23 | * _i - \\texttt{centers} _{ \\texttt{labels} _i} \\| ^2\\]` after every attempt. The best (minimum) 24 | * value is chosen and the corresponding labels and the compactness value are returned by the function. 25 | * Basically, you can use only the core of the function, set the number of attempts to 1, initialize 26 | * labels each time using a custom algorithm, pass them with the ( flags = [KMEANS_USE_INITIAL_LABELS] 27 | * ) flag, and then choose the best (most-compact) clustering. 28 | * 29 | * @param data Data for clustering. An array of N-Dimensional points with float coordinates is needed. 30 | * Examples of this array can be: 31 | * Mat points(count, 2, CV_32F);Mat points(count, 1, CV_32FC2);Mat points(1, count, 32 | * CV_32FC2);std::vector points(sampleCount); 33 | * 34 | * @param K Number of clusters to split the set by. 35 | * 36 | * @param bestLabels Input/output integer array that stores the cluster indices for every sample. 37 | * 38 | * @param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or 39 | * the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster 40 | * centers moves by less than criteria.epsilon on some iteration, the algorithm stops. 41 | * 42 | * @param attempts Flag to specify the number of times the algorithm is executed using different 43 | * initial labellings. The algorithm returns the labels that yield the best compactness (see the last 44 | * function parameter). 45 | * 46 | * @param flags Flag that can take values of cv::KmeansFlags 47 | * 48 | * @param centers Output matrix of the cluster centers, one row per each cluster center. 49 | */ 50 | export declare function kmeans( 51 | data: InputArray, 52 | K: int, 53 | bestLabels: InputOutputArray, 54 | criteria: TermCriteria, 55 | attempts: int, 56 | flags: int, 57 | centers?: OutputArray, 58 | ): double; 59 | 60 | /** 61 | * The generic function partition implements an `$O(N^2)$` algorithm for splitting a set of `$N$` 62 | * elements into one or more equivalency classes, as described in . The function returns the number of 63 | * equivalency classes. 64 | * 65 | * @param _vec Set of elements stored as a vector. 66 | * 67 | * @param labels Output vector of labels. It contains as many elements as vec. Each label labels[i] is 68 | * a 0-based cluster index of vec[i]. 69 | * 70 | * @param predicate Equivalence predicate (pointer to a boolean function of two arguments or an 71 | * instance of the class that has the method bool operator()(const _Tp& a, const _Tp& b) ). The 72 | * predicate returns true when the elements are certainly in the same class, and returns false if they 73 | * may or may not be in the same class. 74 | */ 75 | export declare function partition( 76 | arg119: any, 77 | arg120: any, 78 | _vec: any, 79 | labels: any, 80 | predicate?: _EqPredicate, 81 | ): any; 82 | -------------------------------------------------------------------------------- /src/types/opencv/QRCodeDetector.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | InputArray, 4 | InputOutputArray, 5 | OutputArray, 6 | OutputArrayOfArrays, 7 | Point2f, 8 | } from "./_types"; 9 | 10 | /** 11 | * QR Code detection and decoding class. 12 | * 13 | * This class implements QR code detection and decoding functionality. 14 | * It can detect QR codes in an image and decode their content. 15 | * 16 | * Source: 17 | * [opencv2/objdetect.hpp](https://github.com/opencv/opencv/tree/master/modules/objdetect/include/opencv2/objdetect.hpp). 18 | */ 19 | export declare class QRCodeDetector { 20 | /** 21 | * QRCodeDetector constructor 22 | */ 23 | public constructor(); 24 | 25 | /** 26 | * Detects QR code in image and returns the quadrangle containing the code. 27 | * 28 | * @param img grayscale or color (BGR) image containing (or not) QR code. 29 | * @param points Output vector of vertices of the minimum-area quadrangle containing the code. 30 | */ 31 | public detect(img: InputArray, points: OutputArray): bool; 32 | 33 | /** 34 | * Decodes QR code in image once it's found by the detect() method. 35 | * 36 | * @param img grayscale or color (BGR) image containing QR code. 37 | * @param points Quadrangle vertices found by detect() method (or some other algorithm). 38 | * @param straight_qrcode The optional output image containing rectified and binarized QR code 39 | */ 40 | public decode( 41 | img: InputArray, 42 | points: InputArray, 43 | straight_qrcode?: OutputArray, 44 | ): String; 45 | 46 | /** 47 | * Both detects and decodes QR code 48 | * 49 | * @param img grayscale or color (BGR) image containing QR code. 50 | * @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found. 51 | * @param straight_qrcode The optional output image containing rectified and binarized QR code 52 | */ 53 | public detectAndDecode( 54 | img: InputArray, 55 | points?: OutputArray, 56 | straight_qrcode?: OutputArray, 57 | ): String; 58 | 59 | /** 60 | * Detects QR codes in image and returns the vector of the quadrangles containing the codes. 61 | * 62 | * @param img grayscale or color (BGR) image containing (or not) QR codes. 63 | * @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes. 64 | */ 65 | public detectMulti(img: InputArray, points: OutputArrayOfArrays): bool; 66 | 67 | /** 68 | * Decodes QR codes in image once it's found by the detectMulti() method. 69 | * 70 | * @param img grayscale or color (BGR) image containing QR codes. 71 | * @param points vector of Quadrangle vertices found by detectMulti() method (or some other algorithm). 72 | * @param decoded_info UTF8-encoded output vector of String or empty vector of String if the codes cannot be decoded. 73 | * @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes 74 | */ 75 | public decodeMulti( 76 | img: InputArray, 77 | points: InputArray, 78 | decoded_info: any, 79 | straight_qrcode?: OutputArrayOfArrays, 80 | ): bool; 81 | 82 | /** 83 | * Both detects and decodes QR codes 84 | * 85 | * @param img grayscale or color (BGR) image containing QR codes. 86 | * @param decoded_info UTF8-encoded output vector of String or empty vector of String if the codes cannot be decoded. 87 | * @param points optional output vector of vertices of the found QR code quadrangles. Will be empty if not found. 88 | * @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes 89 | */ 90 | public detectAndDecodeMulti( 91 | img: InputArray, 92 | decoded_info: any, 93 | points?: OutputArrayOfArrays, 94 | straight_qrcode?: OutputArrayOfArrays, 95 | ): bool; 96 | 97 | /** 98 | * Aruco-based QR code detector 99 | */ 100 | public setUseAruco(use_aruco: bool): void; 101 | 102 | /** 103 | * Get if Aruco-based QR code detector is used 104 | */ 105 | public getUseAruco(): bool; 106 | 107 | /** 108 | * Releases the object 109 | */ 110 | public delete(): void; 111 | } 112 | -------------------------------------------------------------------------------- /src/types/opencv/features2d_draw.ts: -------------------------------------------------------------------------------- 1 | import type { InputArray, InputOutputArray } from "./_types"; 2 | /* 3 | * # Drawing Function of Keypoints and Matches 4 | * 5 | */ 6 | /** 7 | * For Python API, flags are modified as cv.DRAW_MATCHES_FLAGS_DEFAULT, 8 | * cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, 9 | * cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS 10 | * 11 | * @param image Source image. 12 | * 13 | * @param keypoints Keypoints from the source image. 14 | * 15 | * @param outImage Output image. Its content depends on the flags value defining what is drawn in the 16 | * output image. See possible flags bit values below. 17 | * 18 | * @param color Color of keypoints. 19 | * 20 | * @param flags Flags setting drawing features. Possible flags bit values are defined by 21 | * DrawMatchesFlags. See details above in drawMatches . 22 | */ 23 | export declare function drawKeypoints( 24 | image: InputArray, 25 | keypoints: any, 26 | outImage: InputOutputArray, 27 | color?: any, 28 | flags?: DrawMatchesFlags, 29 | ): void; 30 | 31 | /** 32 | * This function draws matches of keypoints from two images in the output image. Match is a line 33 | * connecting two keypoints (circles). See [cv::DrawMatchesFlags]. 34 | * 35 | * @param img1 First source image. 36 | * 37 | * @param keypoints1 Keypoints from the first source image. 38 | * 39 | * @param img2 Second source image. 40 | * 41 | * @param keypoints2 Keypoints from the second source image. 42 | * 43 | * @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i] 44 | * has a corresponding point in keypoints2[matches[i]] . 45 | * 46 | * @param outImg Output image. Its content depends on the flags value defining what is drawn in the 47 | * output image. See possible flags bit values below. 48 | * 49 | * @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) , 50 | * the color is generated randomly. 51 | * 52 | * @param singlePointColor Color of single keypoints (circles), which means that keypoints do not have 53 | * the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly. 54 | * 55 | * @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are 56 | * drawn. 57 | * 58 | * @param flags Flags setting drawing features. Possible flags bit values are defined by 59 | * DrawMatchesFlags. 60 | */ 61 | export declare function drawMatches( 62 | img1: InputArray, 63 | keypoints1: any, 64 | img2: InputArray, 65 | keypoints2: any, 66 | matches1to2: any, 67 | outImg: InputOutputArray, 68 | matchColor?: any, 69 | singlePointColor?: any, 70 | matchesMask?: any, 71 | flags?: DrawMatchesFlags, 72 | ): void; 73 | 74 | /** 75 | * This is an overloaded member function, provided for convenience. It differs from the above function 76 | * only in what argument(s) it accepts. 77 | */ 78 | export declare function drawMatches( 79 | img1: InputArray, 80 | keypoints1: any, 81 | img2: InputArray, 82 | keypoints2: any, 83 | matches1to2: any, 84 | outImg: InputOutputArray, 85 | matchColor?: any, 86 | singlePointColor?: any, 87 | matchesMask?: any, 88 | flags?: DrawMatchesFlags, 89 | ): void; 90 | 91 | /** 92 | * Output image matrix will be created ([Mat::create]), i.e. existing memory of output image may be 93 | * reused. Two source image, matches and single keypoints will be drawn. For each keypoint only the 94 | * center point will be drawn (without the circle around keypoint with keypoint size and orientation). 95 | * 96 | */ 97 | export declare const DEFAULT: DrawMatchesFlags; // initializer: = 0 98 | 99 | /** 100 | * Output image matrix will not be created ([Mat::create]). Matches will be drawn on existing content 101 | * of output image. 102 | * 103 | */ 104 | export declare const DRAW_OVER_OUTIMG: DrawMatchesFlags; // initializer: = 1 105 | 106 | export declare const NOT_DRAW_SINGLE_POINTS: DrawMatchesFlags; // initializer: = 2 107 | 108 | /** 109 | * For each keypoint the circle around keypoint with keypoint size and orientation will be drawn. 110 | * 111 | */ 112 | export declare const DRAW_RICH_KEYPOINTS: DrawMatchesFlags; // initializer: = 4 113 | 114 | export type DrawMatchesFlags = any; 115 | -------------------------------------------------------------------------------- /src/types/opencv/MatExpr.ts: -------------------------------------------------------------------------------- 1 | import type { double, int, Mat, MatOp, Scalar } from "./_types"; 2 | 3 | /** 4 | * This is a list of implemented matrix 5 | * operations that can be combined in arbitrary complex expressions (here A, B stand for matrices ( 6 | * [Mat](#d3/d63/classcv_1_1Mat}) ), s for a scalar ( Scalar ), alpha for a real-valued scalar ( double 7 | * )): 8 | * 9 | * Addition, subtraction, negation: `A+B`, `A-B`, `A+s`, `A-s`, `s+A`, `s-A`, `-A` 10 | * Scaling: `A*alpha` 11 | * Per-element multiplication and division: `A.mul(B)`, `A/B`, `alpha/A` 12 | * Matrix multiplication: `A*B` 13 | * Transposition: `A.t()` (means A) 14 | * Matrix inversion and pseudo-inversion, solving linear systems and least-squares problems: 15 | * `A.inv([method]) (~ A-1)`, `A.inv([method])*B (~ X: AX=B)` 16 | * Comparison: `A cmpop B`, `A cmpop alpha`, `alpha cmpop A`, where *cmpop* is one of `>`, `>=`, `==`, 17 | * `!=`, `<=`, `<`. The result of comparison is an 8-bit single channel mask whose elements are set to 18 | * 255 (if the particular element or pair of elements satisfy the condition) or 0. 19 | * Bitwise logical operations: `A logicop B`, `A logicop s`, `s logicop A`, `~A`, where *logicop* is 20 | * one of `&`, `|`, `^`. 21 | * Element-wise minimum and maximum: `min(A, B)`, `min(A, alpha)`, `max(A, B)`, `max(A, alpha)` 22 | * Element-wise absolute value: `abs(A)` 23 | * Cross-product, dot-product: `A.cross(B)`, `A.dot(B)` 24 | * Any function of matrix or matrices and scalars that returns a matrix or a scalar, such as norm, 25 | * mean, sum, countNonZero, trace, determinant, repeat, and others. 26 | * Matrix initializers ( [Mat::eye()](#d3/d63/classcv_1_1Mat_1a2cf9b9acde7a9852542bbc20ef851ed2}), 27 | * [Mat::zeros()](#d3/d63/classcv_1_1Mat_1a0b57b6a326c8876d944d188a46e0f556}), 28 | * [Mat::ones()](#d3/d63/classcv_1_1Mat_1a69ae0402d116fc9c71908d8508dc2f09}) ), matrix comma-separated 29 | * initializers, matrix constructors and operators that extract sub-matrices (see 30 | * [Mat](#d3/d63/classcv_1_1Mat}) description). 31 | * Mat_() constructors to cast the result to the proper type. 32 | * 33 | * Comma-separated initializers and probably some other operations may require additional explicit 34 | * Mat() or Mat_() constructor calls to resolve a possible ambiguity. 35 | * Here are examples of matrix expressions: 36 | * 37 | * ```cpp 38 | * // compute pseudo-inverse of A, equivalent to A.inv(DECOMP_SVD) 39 | * SVD svd(A); 40 | * Mat pinvA = svd.vt.t()*Mat::diag(1./svd.w)*svd.u.t(); 41 | * 42 | * // compute the new vector of parameters in the Levenberg-Marquardt algorithm 43 | * x -= (A.t()*A + lambda*Mat::eye(A.cols,A.cols,A.type())).inv(DECOMP_CHOLESKY)*(A.t()*err); 44 | * 45 | * // sharpen image using "unsharp mask" algorithm 46 | * Mat blurred; double sigma = 1, threshold = 5, amount = 1; 47 | * GaussianBlur(img, blurred, Size(), sigma, sigma); 48 | * Mat lowContrastMask = abs(img - blurred) < threshold; 49 | * Mat sharpened = img*(1+amount) + blurred*(-amount); 50 | * img.copyTo(sharpened, lowContrastMask); 51 | * ``` 52 | * 53 | * Source: 54 | * [opencv2/core/mat.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/mat.hpp#L3557). 55 | * 56 | */ 57 | export declare class MatExpr extends Mat { 58 | public a: Mat; 59 | 60 | public alpha: double; 61 | 62 | public b: Mat; 63 | 64 | public beta: double; 65 | 66 | public c: Mat; 67 | 68 | public flags: int; 69 | 70 | public op: MatOp; 71 | 72 | public s: Scalar; 73 | 74 | public constructor(); 75 | 76 | public constructor(m: Mat); 77 | 78 | public constructor( 79 | _op: MatOp, 80 | _flags: int, 81 | _a?: Mat, 82 | _b?: Mat, 83 | _c?: Mat, 84 | _alpha?: double, 85 | _beta?: double, 86 | _s?: Scalar, 87 | ); 88 | 89 | public col(x: int): MatExpr; 90 | 91 | public cross(m: Mat): Mat; 92 | 93 | public diag(d?: int): MatExpr; 94 | 95 | public dot(m: Mat): Mat; 96 | 97 | public inv(method?: int): MatExpr; 98 | 99 | public mul(e: MatExpr, scale?: double): MatExpr; 100 | 101 | public mul(m: Mat, scale?: double): MatExpr; 102 | 103 | public row(y: int): MatExpr; 104 | public t(): MatExpr; 105 | 106 | public type(): int; 107 | } 108 | -------------------------------------------------------------------------------- /src/types/opencv/core_hal_interface.ts: -------------------------------------------------------------------------------- 1 | import type { cvhalDFT, int, size_t, uchar } from "./_types"; 2 | /* 3 | * # Interface 4 | * Define your functions to override default implementations: 5 | * 6 | * ```cpp 7 | * #undef hal_add8u 8 | * #define hal_add8u my_add8u 9 | * ``` 10 | */ 11 | /** 12 | * @param context pointer to context storing all necessary data 13 | * 14 | * @param src_data source image data and step 15 | * 16 | * @param dst_data destination image data and step 17 | */ 18 | export declare function hal_ni_dct2D( 19 | context: cvhalDFT, 20 | src_data: uchar, 21 | src_step: size_t, 22 | dst_data: uchar, 23 | dst_step: size_t, 24 | ): cvhalDFT; 25 | 26 | /** 27 | * @param context pointer to context storing all necessary data 28 | */ 29 | export declare function hal_ni_dctFree2D(context: cvhalDFT): cvhalDFT; 30 | 31 | /** 32 | * @param context double pointer to context storing all necessary data 33 | * 34 | * @param width image dimensions 35 | * 36 | * @param depth image type (CV_32F or CV64F) 37 | * 38 | * @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...) 39 | */ 40 | export declare function hal_ni_dctInit2D( 41 | context: cvhalDFT, 42 | width: int, 43 | height: int, 44 | depth: int, 45 | flags: int, 46 | ): cvhalDFT; 47 | 48 | /** 49 | * @param context pointer to context storing all necessary data 50 | * 51 | * @param src source data 52 | * 53 | * @param dst destination data 54 | */ 55 | export declare function hal_ni_dft1D( 56 | context: cvhalDFT, 57 | src: uchar, 58 | dst: uchar, 59 | ): cvhalDFT; 60 | 61 | /** 62 | * @param context pointer to context storing all necessary data 63 | * 64 | * @param src_data source image data and step 65 | * 66 | * @param dst_data destination image data and step 67 | */ 68 | export declare function hal_ni_dft2D( 69 | context: cvhalDFT, 70 | src_data: uchar, 71 | src_step: size_t, 72 | dst_data: uchar, 73 | dst_step: size_t, 74 | ): cvhalDFT; 75 | 76 | /** 77 | * @param context pointer to context storing all necessary data 78 | */ 79 | export declare function hal_ni_dftFree1D(context: cvhalDFT): cvhalDFT; 80 | 81 | /** 82 | * @param context pointer to context storing all necessary data 83 | */ 84 | export declare function hal_ni_dftFree2D(context: cvhalDFT): cvhalDFT; 85 | 86 | /** 87 | * @param context double pointer to context storing all necessary data 88 | * 89 | * @param len transformed array length 90 | * 91 | * @param count estimated transformation count 92 | * 93 | * @param depth array type (CV_32F or CV_64F) 94 | * 95 | * @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, CV_HAL_DFT_SCALE, ...) 96 | * 97 | * @param needBuffer pointer to boolean variable, if valid pointer provided, then variable value should 98 | * be set to true to signal that additional memory buffer is needed for operations 99 | */ 100 | export declare function hal_ni_dftInit1D( 101 | context: cvhalDFT, 102 | len: int, 103 | count: int, 104 | depth: int, 105 | flags: int, 106 | needBuffer: any, 107 | ): cvhalDFT; 108 | 109 | /** 110 | * @param context double pointer to context storing all necessary data 111 | * 112 | * @param width image dimensions 113 | * 114 | * @param depth image type (CV_32F or CV64F) 115 | * 116 | * @param src_channels number of channels in input image 117 | * 118 | * @param dst_channels number of channels in output image 119 | * 120 | * @param flags algorithm options (combination of CV_HAL_DFT_INVERSE, ...) 121 | * 122 | * @param nonzero_rows number of nonzero rows in image, can be used for optimization 123 | */ 124 | export declare function hal_ni_dftInit2D( 125 | context: cvhalDFT, 126 | width: int, 127 | height: int, 128 | depth: int, 129 | src_channels: int, 130 | dst_channels: int, 131 | flags: int, 132 | nonzero_rows: int, 133 | ): cvhalDFT; 134 | 135 | /** 136 | * @param src_data Source image 137 | * 138 | * @param width Source image dimensions 139 | * 140 | * @param depth Depth of source image 141 | * 142 | * @param minVal Pointer to the returned global minimum and maximum in an array. 143 | * 144 | * @param minIdx Pointer to the returned minimum and maximum location. 145 | * 146 | * @param mask Specified array region. 147 | */ 148 | export declare function hal_ni_minMaxIdx( 149 | src_data: uchar, 150 | src_step: size_t, 151 | width: int, 152 | height: int, 153 | depth: int, 154 | minVal: any, 155 | maxVal: any, 156 | minIdx: any, 157 | maxIdx: any, 158 | mask: uchar, 159 | ): uchar; 160 | -------------------------------------------------------------------------------- /src/types/opencv/Algorithm.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | EmscriptenEmbindInstance, 4 | FileNode, 5 | FileStorage, 6 | Ptr, 7 | } from "./_types"; 8 | 9 | /** 10 | * especially for classes of algorithms, for which there can be multiple implementations. The examples 11 | * are stereo correspondence (for which there are algorithms like block matching, semi-global block 12 | * matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians 13 | * models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck 14 | * etc.). 15 | * 16 | * Here is example of [SimpleBlobDetector](#d0/d7a/classcv_1_1SimpleBlobDetector}) use in your 17 | * application via [Algorithm](#d3/d46/classcv_1_1Algorithm}) interface: 18 | * 19 | * ```cpp 20 | * Ptr sbd = SimpleBlobDetector::create(); 21 | * FileStorage fs_read("SimpleBlobDetector_params.xml", FileStorage::READ); 22 | * 23 | * if (fs_read.isOpened()) // if we have file with parameters, read them 24 | * { 25 | * sbd->read(fs_read.root()); 26 | * fs_read.release(); 27 | * } 28 | * else // else modify the parameters and store them; user can later edit the file to use different 29 | * parameters 30 | * { 31 | * fs_read.release(); 32 | * FileStorage fs_write("SimpleBlobDetector_params.xml", FileStorage::WRITE); 33 | * sbd->write(fs_write); 34 | * fs_write.release(); 35 | * } 36 | * 37 | * Mat result, image = imread("../data/detect_blob.png", IMREAD_COLOR); 38 | * vector keypoints; 39 | * sbd->detect(image, keypoints, Mat()); 40 | * 41 | * drawKeypoints(image, keypoints, result); 42 | * for (vector::iterator k = keypoints.begin(); k != keypoints.end(); ++k) 43 | * circle(result, k->pt, (int)k->size, Scalar(0, 0, 255), 2); 44 | * 45 | * imshow("result", result); 46 | * waitKey(0); 47 | * ``` 48 | * 49 | * Source: 50 | * [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L3077). 51 | * 52 | */ 53 | export declare class Algorithm extends EmscriptenEmbindInstance { 54 | public constructor(); 55 | 56 | public clear(): void; 57 | 58 | public empty(): bool; 59 | 60 | /** 61 | * Returns the algorithm string identifier. This string is used as top level xml/yml node tag when 62 | * the object is saved to a file or string. 63 | */ 64 | public getDefaultName(): String; 65 | 66 | public read(fn: FileNode): FileNode; 67 | 68 | /** 69 | * Saves the algorithm to a file. In order to make this method work, the derived class must implement 70 | * Algorithm::write(FileStorage& fs). 71 | */ 72 | public save(filename: String): String; 73 | 74 | public write(fs: FileStorage): FileStorage; 75 | 76 | public write(fs: Ptr, name?: String): Ptr; 77 | 78 | /** 79 | * This is static template method of [Algorithm]. It's usage is following (in the case of SVM): 80 | * 81 | * ```cpp 82 | * Ptr svm = Algorithm::load("my_svm_model.xml"); 83 | * ``` 84 | * 85 | * In order to make this method work, the derived class must overwrite [Algorithm::read](const 86 | * [FileNode]& fn). 87 | * 88 | * @param filename Name of the file to read. 89 | * 90 | * @param objname The optional name of the node to read (if empty, the first top-level node will be 91 | * used) 92 | */ 93 | public static load(arg0: any, filename: String, objname?: String): Ptr; 94 | 95 | /** 96 | * This is static template method of [Algorithm]. It's usage is following (in the case of SVM): 97 | * 98 | * ```cpp 99 | * Ptr svm = Algorithm::loadFromString(myStringModel); 100 | * ``` 101 | * 102 | * @param strModel The string variable containing the model you want to load. 103 | * 104 | * @param objname The optional name of the node to read (if empty, the first top-level node will be 105 | * used) 106 | */ 107 | public static loadFromString( 108 | arg1: any, 109 | strModel: String, 110 | objname?: String, 111 | ): Ptr; 112 | 113 | /** 114 | * This is static template method of [Algorithm]. It's usage is following (in the case of SVM): 115 | * 116 | * ```cpp 117 | * cv::FileStorage fsRead("example.xml", FileStorage::READ); 118 | * Ptr svm = Algorithm::read(fsRead.root()); 119 | * ``` 120 | * 121 | * In order to make this method work, the derived class must overwrite [Algorithm::read](const 122 | * [FileNode]& fn) and also have static create() method without parameters (or with all the optional 123 | * parameters) 124 | */ 125 | public static read(arg2: any, fn: FileNode): Ptr; 126 | } 127 | -------------------------------------------------------------------------------- /src/types/opencv/QRCodeDetectorAruco.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | float, 4 | InputArray, 5 | OutputArray, 6 | OutputArrayOfArrays, 7 | } from "./_types"; 8 | 9 | /** 10 | * Parameters for QRCodeDetectorAruco 11 | */ 12 | export declare class QRCodeDetectorAruco_Params { 13 | public minModuleSizeInPyramid: float; 14 | public maxRotation: float; 15 | public maxModuleSizeMismatch: float; 16 | public maxTimingPatternMismatch: float; 17 | public maxPenalties: float; 18 | public maxColorsMismatch: float; 19 | public scaleTimingPatternScore: float; 20 | 21 | public constructor(); 22 | 23 | /** 24 | * Releases the object 25 | */ 26 | public delete(): void; 27 | } 28 | 29 | /** 30 | * QR Code detection and decoding class using Aruco-based detection. 31 | * 32 | * This class implements QR code detection and decoding functionality using 33 | * Aruco marker detection techniques for improved robustness. 34 | * 35 | * Source: 36 | * [opencv2/objdetect.hpp](https://github.com/opencv/opencv/tree/master/modules/objdetect/include/opencv2/objdetect.hpp). 37 | */ 38 | export declare class QRCodeDetectorAruco { 39 | /** 40 | * QRCodeDetectorAruco constructor 41 | */ 42 | public constructor(); 43 | 44 | /** 45 | * QRCodeDetectorAruco constructor with parameters 46 | * 47 | * @param params QRCodeDetectorAruco parameters 48 | */ 49 | public constructor(params: QRCodeDetectorAruco_Params); 50 | 51 | /** 52 | * Detects QR code in image and returns the quadrangle containing the code. 53 | * 54 | * @param img grayscale or color (BGR) image containing (or not) QR code. 55 | * @param points Output vector of vertices of the minimum-area quadrangle containing the code. 56 | */ 57 | public detect(img: InputArray, points: OutputArray): bool; 58 | 59 | /** 60 | * Decodes QR code in image once it's found by the detect() method. 61 | * 62 | * @param img grayscale or color (BGR) image containing QR code. 63 | * @param points Quadrangle vertices found by detect() method (or some other algorithm). 64 | * @param straight_qrcode The optional output image containing rectified and binarized QR code 65 | */ 66 | public decode( 67 | img: InputArray, 68 | points: InputArray, 69 | straight_qrcode?: OutputArray, 70 | ): String; 71 | 72 | /** 73 | * Both detects and decodes QR code 74 | * 75 | * @param img grayscale or color (BGR) image containing QR code. 76 | * @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found. 77 | * @param straight_qrcode The optional output image containing rectified and binarized QR code 78 | */ 79 | public detectAndDecode( 80 | img: InputArray, 81 | points?: OutputArray, 82 | straight_qrcode?: OutputArray, 83 | ): String; 84 | 85 | /** 86 | * Detects QR codes in image and returns the vector of the quadrangles containing the codes. 87 | * 88 | * @param img grayscale or color (BGR) image containing (or not) QR codes. 89 | * @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes. 90 | */ 91 | public detectMulti(img: InputArray, points: OutputArrayOfArrays): bool; 92 | 93 | /** 94 | * Decodes QR codes in image once it's found by the detectMulti() method. 95 | * 96 | * @param img grayscale or color (BGR) image containing QR codes. 97 | * @param points vector of Quadrangle vertices found by detectMulti() method (or some other algorithm). 98 | * @param decoded_info UTF8-encoded output vector of String or empty vector of String if the codes cannot be decoded. 99 | * @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes 100 | */ 101 | public decodeMulti( 102 | img: InputArray, 103 | points: InputArray, 104 | decoded_info: any, 105 | straight_qrcode?: OutputArrayOfArrays, 106 | ): bool; 107 | 108 | /** 109 | * Both detects and decodes QR codes 110 | * 111 | * @param img grayscale or color (BGR) image containing QR codes. 112 | * @param decoded_info UTF8-encoded output vector of String or empty vector of String if the codes cannot be decoded. 113 | * @param points optional output vector of vertices of the found QR code quadrangles. Will be empty if not found. 114 | * @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes 115 | */ 116 | public detectAndDecodeMulti( 117 | img: InputArray, 118 | decoded_info: any, 119 | points?: OutputArrayOfArrays, 120 | straight_qrcode?: OutputArrayOfArrays, 121 | ): bool; 122 | 123 | /** 124 | * Get detector parameters 125 | */ 126 | public getDetectorParameters(): QRCodeDetectorAruco_Params; 127 | 128 | /** 129 | * Set detector parameters 130 | * 131 | * @param params QRCodeDetectorAruco parameters 132 | */ 133 | public setDetectorParameters(params: QRCodeDetectorAruco_Params): void; 134 | 135 | /** 136 | * Releases the object 137 | */ 138 | public delete(): void; 139 | } 140 | -------------------------------------------------------------------------------- /src/types/opencv/Matx.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | diag_type, 3 | int, 4 | Matx_AddOp, 5 | Matx_DivOp, 6 | Matx_MatMulOp, 7 | Matx_MulOp, 8 | Matx_ScaleOp, 9 | Matx_SubOp, 10 | Matx_TOp, 11 | Vec, 12 | _T2, 13 | _Tp, 14 | } from "./_types"; 15 | 16 | /** 17 | * If you need a more flexible type, use [Mat](#d3/d63/classcv_1_1Mat}) . The elements of the matrix M 18 | * are accessible using the M(i,j) notation. Most of the common matrix operations (see also 19 | * [MatrixExpressions](#d1/d10/classcv_1_1MatExpr_1MatrixExpressions}) ) are available. To do an 20 | * operation on [Matx](#de/de1/classcv_1_1Matx}) that is not implemented, you can easily convert the 21 | * matrix to [Mat](#d3/d63/classcv_1_1Mat}) and backwards: 22 | * 23 | * ```cpp 24 | * Matx33f m(1, 2, 3, 25 | * 4, 5, 6, 26 | * 7, 8, 9); 27 | * cout << sum(Mat(m*m.t())) << endl; 28 | * ``` 29 | * 30 | * Except of the plain constructor which takes a list of elements, [Matx](#de/de1/classcv_1_1Matx}) 31 | * can be initialized from a C-array: 32 | * 33 | * ```cpp 34 | * float values[] = { 1, 2, 3}; 35 | * Matx31f m(values); 36 | * ``` 37 | * 38 | * In case if C++11 features are available, std::initializer_list can be also used to initialize 39 | * [Matx](#de/de1/classcv_1_1Matx}): 40 | * 41 | * ```cpp 42 | * Matx31f m = { 1, 2, 3}; 43 | * ``` 44 | * 45 | * Source: 46 | * [opencv2/core/matx.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/matx.hpp#L1185). 47 | * 48 | */ 49 | export declare class Matx { 50 | public val: _Tp; 51 | 52 | public constructor(); 53 | 54 | public constructor(v0: _Tp); 55 | 56 | public constructor(v0: _Tp, v1: _Tp); 57 | 58 | public constructor(v0: _Tp, v1: _Tp, v2: _Tp); 59 | 60 | public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp); 61 | 62 | public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp, v4: _Tp); 63 | 64 | public constructor(v0: _Tp, v1: _Tp, v2: _Tp, v3: _Tp, v4: _Tp, v5: _Tp); 65 | 66 | public constructor( 67 | v0: _Tp, 68 | v1: _Tp, 69 | v2: _Tp, 70 | v3: _Tp, 71 | v4: _Tp, 72 | v5: _Tp, 73 | v6: _Tp, 74 | ); 75 | 76 | public constructor( 77 | v0: _Tp, 78 | v1: _Tp, 79 | v2: _Tp, 80 | v3: _Tp, 81 | v4: _Tp, 82 | v5: _Tp, 83 | v6: _Tp, 84 | v7: _Tp, 85 | ); 86 | 87 | public constructor( 88 | v0: _Tp, 89 | v1: _Tp, 90 | v2: _Tp, 91 | v3: _Tp, 92 | v4: _Tp, 93 | v5: _Tp, 94 | v6: _Tp, 95 | v7: _Tp, 96 | v8: _Tp, 97 | ); 98 | 99 | public constructor( 100 | v0: _Tp, 101 | v1: _Tp, 102 | v2: _Tp, 103 | v3: _Tp, 104 | v4: _Tp, 105 | v5: _Tp, 106 | v6: _Tp, 107 | v7: _Tp, 108 | v8: _Tp, 109 | v9: _Tp, 110 | ); 111 | 112 | public constructor( 113 | v0: _Tp, 114 | v1: _Tp, 115 | v2: _Tp, 116 | v3: _Tp, 117 | v4: _Tp, 118 | v5: _Tp, 119 | v6: _Tp, 120 | v7: _Tp, 121 | v8: _Tp, 122 | v9: _Tp, 123 | v10: _Tp, 124 | v11: _Tp, 125 | ); 126 | 127 | public constructor( 128 | v0: _Tp, 129 | v1: _Tp, 130 | v2: _Tp, 131 | v3: _Tp, 132 | v4: _Tp, 133 | v5: _Tp, 134 | v6: _Tp, 135 | v7: _Tp, 136 | v8: _Tp, 137 | v9: _Tp, 138 | v10: _Tp, 139 | v11: _Tp, 140 | v12: _Tp, 141 | v13: _Tp, 142 | ); 143 | 144 | public constructor( 145 | v0: _Tp, 146 | v1: _Tp, 147 | v2: _Tp, 148 | v3: _Tp, 149 | v4: _Tp, 150 | v5: _Tp, 151 | v6: _Tp, 152 | v7: _Tp, 153 | v8: _Tp, 154 | v9: _Tp, 155 | v10: _Tp, 156 | v11: _Tp, 157 | v12: _Tp, 158 | v13: _Tp, 159 | v14: _Tp, 160 | v15: _Tp, 161 | ); 162 | 163 | public constructor(vals: any); 164 | 165 | public constructor(arg334: any); 166 | 167 | public constructor(a: Matx, b: Matx, arg335: Matx_AddOp); 168 | 169 | public constructor(a: Matx, b: Matx, arg336: Matx_SubOp); 170 | 171 | public constructor(arg337: any, a: Matx, alpha: _T2, arg338: Matx_ScaleOp); 172 | 173 | public constructor(a: Matx, b: Matx, arg339: Matx_MulOp); 174 | 175 | public constructor(a: Matx, b: Matx, arg340: Matx_DivOp); 176 | 177 | public constructor(l: int, a: Matx, b: Matx, arg341: Matx_MatMulOp); 178 | 179 | public constructor(a: Matx, arg342: Matx_TOp); 180 | 181 | public col(i: int): Matx; 182 | 183 | public ddot(v: Matx): Matx; 184 | 185 | public diag(): diag_type; 186 | 187 | public div(a: Matx): Matx; 188 | 189 | public dot(v: Matx): Matx; 190 | 191 | public get_minor(m1: int, n1: int, base_row: int, base_col: int): Matx; 192 | 193 | public inv(method?: int, p_is_ok?: any): Matx; 194 | 195 | public mul(a: Matx): Matx; 196 | 197 | public reshape(m1: int, n1: int): Matx; 198 | 199 | public row(i: int): Matx; 200 | 201 | public solve(l: int, rhs: Matx, flags?: int): Matx; 202 | 203 | public solve(rhs: Vec, method: int): Vec; 204 | 205 | public t(): Matx; 206 | 207 | public static all(alpha: _Tp): Matx; 208 | 209 | public static diag(d: diag_type): Matx; 210 | 211 | public static eye(): Matx; 212 | 213 | public static ones(): Matx; 214 | 215 | public static randn(a: _Tp, b: _Tp): Matx; 216 | 217 | public static randu(a: _Tp, b: _Tp): Matx; 218 | 219 | public static zeros(): Matx; 220 | } 221 | 222 | export declare const rows: any; // initializer: = m 223 | 224 | export declare const cols: any; // initializer: = n 225 | 226 | export declare const channels: any; // initializer: = rows*cols 227 | 228 | export declare const shortdim: any; // initializer: = (m < n ? m : n) 229 | -------------------------------------------------------------------------------- /src/types/opencv/CascadeClassifier.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | double, 4 | FileNode, 5 | InputArray, 6 | int, 7 | Mat, 8 | Ptr, 9 | Size, 10 | } from "./_types"; 11 | 12 | export declare class CascadeClassifier extends Mat { 13 | public cc: Ptr; 14 | 15 | public constructor(); 16 | 17 | /** 18 | * @param filename Name of the file from which the classifier is loaded. 19 | */ 20 | public constructor(filename: String); 21 | 22 | /** 23 | * The function is parallelized with the TBB library. 24 | * 25 | * (Python) A face detection example using cascade classifiers can be found at 26 | * opencv_source_code/samples/python/facedetect.py 27 | * 28 | * @param image Matrix of the type CV_8U containing an image where objects are detected. 29 | * 30 | * @param objects Vector of rectangles where each rectangle contains the detected object, the 31 | * rectangles may be partially outside the original image. 32 | * 33 | * @param scaleFactor Parameter specifying how much the image size is reduced at each image scale. 34 | * 35 | * @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have 36 | * to retain it. 37 | * 38 | * @param flags Parameter with the same meaning for an old cascade as in the function 39 | * cvHaarDetectObjects. It is not used for a new cascade. 40 | * 41 | * @param minSize Minimum possible object size. Objects smaller than that are ignored. 42 | * 43 | * @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize == 44 | * minSize model is evaluated on single scale. 45 | */ 46 | public detectMultiScale( 47 | image: InputArray, 48 | objects: any, 49 | scaleFactor?: double, 50 | minNeighbors?: int, 51 | flags?: int, 52 | minSize?: Size, 53 | maxSize?: Size, 54 | ): InputArray; 55 | 56 | /** 57 | * This is an overloaded member function, provided for convenience. It differs from the above 58 | * function only in what argument(s) it accepts. 59 | * 60 | * @param image Matrix of the type CV_8U containing an image where objects are detected. 61 | * 62 | * @param objects Vector of rectangles where each rectangle contains the detected object, the 63 | * rectangles may be partially outside the original image. 64 | * 65 | * @param numDetections Vector of detection numbers for the corresponding objects. An object's number 66 | * of detections is the number of neighboring positively classified rectangles that were joined 67 | * together to form the object. 68 | * 69 | * @param scaleFactor Parameter specifying how much the image size is reduced at each image scale. 70 | * 71 | * @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have 72 | * to retain it. 73 | * 74 | * @param flags Parameter with the same meaning for an old cascade as in the function 75 | * cvHaarDetectObjects. It is not used for a new cascade. 76 | * 77 | * @param minSize Minimum possible object size. Objects smaller than that are ignored. 78 | * 79 | * @param maxSize Maximum possible object size. Objects larger than that are ignored. If maxSize == 80 | * minSize model is evaluated on single scale. 81 | */ 82 | public detectMultiScale( 83 | image: InputArray, 84 | objects: any, 85 | numDetections: any, 86 | scaleFactor?: double, 87 | minNeighbors?: int, 88 | flags?: int, 89 | minSize?: Size, 90 | maxSize?: Size, 91 | ): InputArray; 92 | 93 | /** 94 | * This is an overloaded member function, provided for convenience. It differs from the above 95 | * function only in what argument(s) it accepts. This function allows you to retrieve the final stage 96 | * decision certainty of classification. For this, one needs to set `outputRejectLevels` on true and 97 | * provide the `rejectLevels` and `levelWeights` parameter. For each resulting detection, 98 | * `levelWeights` will then contain the certainty of classification at the final stage. This value can 99 | * then be used to separate strong from weaker classifications. 100 | * 101 | * A code sample on how to use it efficiently can be found below: 102 | * 103 | * ```cpp 104 | * Mat img; 105 | * vector weights; 106 | * vector levels; 107 | * vector detections; 108 | * CascadeClassifier model("/path/to/your/model.xml"); 109 | * model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true); 110 | * cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl; 111 | * ``` 112 | */ 113 | public detectMultiScale( 114 | image: InputArray, 115 | objects: any, 116 | rejectLevels: any, 117 | levelWeights: any, 118 | scaleFactor?: double, 119 | minNeighbors?: int, 120 | flags?: int, 121 | minSize?: Size, 122 | maxSize?: Size, 123 | outputRejectLevels?: bool, 124 | ): InputArray; 125 | 126 | public empty(): bool; 127 | 128 | public getFeatureType(): int; 129 | 130 | public getMaskGenerator(): Ptr; 131 | 132 | public getOldCascade(): any; 133 | 134 | public getOriginalWindowSize(): Size; 135 | 136 | public isOldFormatCascade(): bool; 137 | 138 | /** 139 | * @param filename Name of the file from which the classifier is loaded. The file may contain an old 140 | * HAAR classifier trained by the haartraining application or a new cascade classifier trained by the 141 | * traincascade application. 142 | */ 143 | public load(filename: String): String; 144 | 145 | /** 146 | * The file may contain a new cascade classifier (trained traincascade application) only. 147 | */ 148 | public read(node: FileNode): FileNode; 149 | 150 | public setMaskGenerator(maskGenerator: Ptr): Ptr; 151 | 152 | public static convert(oldcascade: String, newcascade: String): String; 153 | } 154 | -------------------------------------------------------------------------------- /src/types/opencv/objdetect.ts: -------------------------------------------------------------------------------- 1 | import type { double, int, Size } from "./_types"; 2 | /* 3 | * # Object Detection 4 | * ## Haar Feature-based Cascade Classifier for Object Detection 5 | * 6 | * 7 | * The object detector described below has been initially proposed by Paul Viola Viola01 and improved by Rainer Lienhart Lienhart02 . 8 | * 9 | * First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is trained with a few hundred sample views of a particular object (i.e., a face or a car), called positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary images of the same size. 10 | * 11 | * After a classifier is trained, it can be applied to a region of interest (of the same size as used during the training) in an input image. The classifier outputs a "1" if the region is likely to show the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can move the search window across the image and check every location using the classifier. The classifier is designed so that it can be easily "resized" in order to be able to find the objects of interest at different sizes, which is more efficient than resizing the image itself. So, to find an object of an unknown size in the image the scan procedure should be done several times at different scales. 12 | * 13 | * The word "cascade" in the classifier name means that the resultant classifier consists of several simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some stage the candidate is rejected or all the stages are passed. The word "boosted" means that the classifiers at every stage of the cascade are complex themselves and they are built out of basic classifiers using one of four different boosting techniques (weighted voting). Currently Discrete Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic classifiers, and are calculated as described below. The current algorithm uses the following Haar-like features: 14 | * 15 | * 16 | * The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within the region of interest and the scale (this scale is not the same as the scale used at the detection stage, though these two scales are multiplied). For example, in the case of the third line feature (2c) the response is calculated as the difference between the sum of image pixels under the rectangle covering the whole feature (including the two white stripes and the black stripe in the middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to compensate for the differences in the size of areas. The sums of pixel values over a rectangular regions are calculated rapidly using integral images (see below and the integral description). 17 | * 18 | * To see the object detector at work, have a look at the facedetect demo: 19 | * 20 | * The following reference is for the detection part only. There is a separate application called opencv_traincascade that can train a cascade of boosted classifiers from a set of samples. 21 | * 22 | * 23 | * 24 | * In the new C++ interface it is also possible to use LBP (local binary pattern) features in addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at 25 | */ 26 | export declare function createFaceDetectionMaskGenerator(): any; 27 | 28 | /** 29 | * The function is a wrapper for the generic function partition . It clusters all the input rectangles 30 | * using the rectangle equivalence criteria that combines rectangles with similar sizes and similar 31 | * locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If 32 | * `$\\texttt{eps}\\rightarrow +\\inf$` , all the rectangles are put in one cluster. Then, the small 33 | * clusters containing less than or equal to groupThreshold rectangles are rejected. In each other 34 | * cluster, the average rectangle is computed and put into the output rectangle list. 35 | * 36 | * @param rectList Input/output vector of rectangles. Output vector includes retained and grouped 37 | * rectangles. (The Python list is not modified in place.) 38 | * 39 | * @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a 40 | * group of rectangles to retain it. 41 | * 42 | * @param eps Relative difference between sides of the rectangles to merge them into a group. 43 | */ 44 | export declare function groupRectangles( 45 | rectList: any, 46 | groupThreshold: int, 47 | eps?: double, 48 | ): void; 49 | 50 | /** 51 | * This is an overloaded member function, provided for convenience. It differs from the above function 52 | * only in what argument(s) it accepts. 53 | */ 54 | export declare function groupRectangles( 55 | rectList: any, 56 | weights: any, 57 | groupThreshold: int, 58 | eps?: double, 59 | ): void; 60 | 61 | /** 62 | * This is an overloaded member function, provided for convenience. It differs from the above function 63 | * only in what argument(s) it accepts. 64 | */ 65 | export declare function groupRectangles( 66 | rectList: any, 67 | groupThreshold: int, 68 | eps: double, 69 | weights: any, 70 | levelWeights: any, 71 | ): void; 72 | 73 | /** 74 | * This is an overloaded member function, provided for convenience. It differs from the above function 75 | * only in what argument(s) it accepts. 76 | */ 77 | export declare function groupRectangles( 78 | rectList: any, 79 | rejectLevels: any, 80 | levelWeights: any, 81 | groupThreshold: int, 82 | eps?: double, 83 | ): void; 84 | 85 | /** 86 | * This is an overloaded member function, provided for convenience. It differs from the above function 87 | * only in what argument(s) it accepts. 88 | */ 89 | export declare function groupRectangles_meanshift( 90 | rectList: any, 91 | foundWeights: any, 92 | foundScales: any, 93 | detectThreshold?: double, 94 | winDetSize?: Size, 95 | ): void; 96 | 97 | export declare const CASCADE_DO_CANNY_PRUNING: any; // initializer: = 1 98 | 99 | export declare const CASCADE_SCALE_IMAGE: any; // initializer: = 2 100 | 101 | export declare const CASCADE_FIND_BIGGEST_OBJECT: any; // initializer: = 4 102 | 103 | export declare const CASCADE_DO_ROUGH_SEARCH: any; // initializer: = 8 104 | 105 | export { QRCodeDetector } from "./QRCodeDetector"; 106 | export { 107 | QRCodeDetectorAruco, 108 | QRCodeDetectorAruco_Params, 109 | } from "./QRCodeDetectorAruco"; 110 | -------------------------------------------------------------------------------- /src/types/opencv/Affine3.ts: -------------------------------------------------------------------------------- 1 | import type { float_type, int, Mat, Mat3, Mat4, Vec3 } from "./_types"; 2 | 3 | /** 4 | * It represents a 4x4 homogeneous transformation matrix `$T$` 5 | * 6 | * `\\[T = \\begin{bmatrix} R & t\\\\ 0 & 1\\\\ \\end{bmatrix} \\]` 7 | * 8 | * where `$R$` is a 3x3 rotation matrix and `$t$` is a 3x1 translation vector. 9 | * 10 | * You can specify `$R$` either by a 3x3 rotation matrix or by a 3x1 rotation vector, which is 11 | * converted to a 3x3 rotation matrix by the Rodrigues formula. 12 | * 13 | * To construct a matrix `$T$` representing first rotation around the axis `$r$` with rotation angle 14 | * `$|r|$` in radian (right hand rule) and then translation by the vector `$t$`, you can use 15 | * 16 | * ```cpp 17 | * cv::Vec3f r, t; 18 | * cv::Affine3f T(r, t); 19 | * ``` 20 | * 21 | * If you already have the rotation matrix `$R$`, then you can use 22 | * 23 | * ```cpp 24 | * cv::Matx33f R; 25 | * cv::Affine3f T(R, t); 26 | * ``` 27 | * 28 | * To extract the rotation matrix `$R$` from `$T$`, use 29 | * 30 | * ```cpp 31 | * cv::Matx33f R = T.rotation(); 32 | * ``` 33 | * 34 | * To extract the translation vector `$t$` from `$T$`, use 35 | * 36 | * ```cpp 37 | * cv::Vec3f t = T.translation(); 38 | * ``` 39 | * 40 | * To extract the rotation vector `$r$` from `$T$`, use 41 | * 42 | * ```cpp 43 | * cv::Vec3f r = T.rvec(); 44 | * ``` 45 | * 46 | * Note that since the mapping from rotation vectors to rotation matrices is many to one. The returned 47 | * rotation vector is not necessarily the one you used before to set the matrix. 48 | * 49 | * If you have two transformations `$T = T_1 * T_2$`, use 50 | * 51 | * ```cpp 52 | * cv::Affine3f T, T1, T2; 53 | * T = T2.concatenate(T1); 54 | * ``` 55 | * 56 | * To get the inverse transform of `$T$`, use 57 | * 58 | * ```cpp 59 | * cv::Affine3f T, T_inv; 60 | * T_inv = T.inv(); 61 | * ``` 62 | * 63 | * Source: 64 | * [opencv2/core/affine.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core/affine.hpp#L129). 65 | * 66 | */ 67 | export declare class Affine3 { 68 | public matrix: Mat4; 69 | 70 | public constructor(); 71 | 72 | public constructor(affine: Mat4); 73 | 74 | /** 75 | * The resulting 4x4 matrix is 76 | * 77 | * `\\[ \\begin{bmatrix} R & t\\\\ 0 & 1\\\\ \\end{bmatrix} \\]` 78 | * 79 | * @param R 3x3 rotation matrix. 80 | * 81 | * @param t 3x1 translation vector. 82 | */ 83 | public constructor(R: Mat3, t?: Vec3); 84 | 85 | /** 86 | * Rodrigues vector. 87 | * 88 | * The last row of the current matrix is set to [0,0,0,1]. 89 | * 90 | * @param rvec 3x1 rotation vector. Its direction indicates the rotation axis and its length 91 | * indicates the rotation angle in radian (using right hand rule). 92 | * 93 | * @param t 3x1 translation vector. 94 | */ 95 | public constructor(rvec: Vec3, t?: Vec3); 96 | 97 | /** 98 | * Combines all constructors above. Supports 4x4, 3x4, 3x3, 1x3, 3x1 sizes of data matrix. 99 | * 100 | * The last row of the current matrix is set to [0,0,0,1] when data is not 4x4. 101 | * 102 | * @param data 1-channel matrix. when it is 4x4, it is copied to the current matrix and t is not 103 | * used. When it is 3x4, it is copied to the upper part 3x4 of the current matrix and t is not used. 104 | * When it is 3x3, it is copied to the upper left 3x3 part of the current matrix. When it is 3x1 or 105 | * 1x3, it is treated as a rotation vector and the Rodrigues formula is used to compute a 3x3 rotation 106 | * matrix. 107 | * 108 | * @param t 3x1 translation vector. It is used only when data is neither 4x4 nor 3x4. 109 | */ 110 | public constructor(data: Mat, t?: Vec3); 111 | 112 | public constructor(vals: float_type); 113 | 114 | public cast(arg401: any): Affine3; 115 | 116 | public concatenate(affine: Affine3): Affine3; 117 | 118 | /** 119 | * the inverse of the current matrix. 120 | */ 121 | public inv(method?: int): Affine3; 122 | 123 | /** 124 | * Copy the 3x3 matrix L to the upper left part of the current matrix 125 | * 126 | * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. 127 | * 128 | * @param L 3x3 matrix. 129 | */ 130 | public linear(L: Mat3): Mat3; 131 | 132 | /** 133 | * the upper left 3x3 part 134 | */ 135 | public linear(): Mat3; 136 | 137 | public rotate(R: Mat3): Affine3; 138 | 139 | public rotate(rvec: Vec3): Affine3; 140 | 141 | /** 142 | * Rotation matrix. 143 | * 144 | * Copy the rotation matrix to the upper left 3x3 part of the current matrix. The remaining elements 145 | * of the current matrix are not changed. 146 | * 147 | * @param R 3x3 rotation matrix. 148 | */ 149 | public rotation(R: Mat3): Mat3; 150 | 151 | /** 152 | * Rodrigues vector. 153 | * 154 | * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. 155 | * 156 | * @param rvec 3x1 rotation vector. The direction indicates the rotation axis and its length 157 | * indicates the rotation angle in radian (using the right thumb convention). 158 | */ 159 | public rotation(rvec: Vec3): Vec3; 160 | 161 | /** 162 | * Combines rotation methods above. Supports 3x3, 1x3, 3x1 sizes of data matrix. 163 | * 164 | * It sets the upper left 3x3 part of the matrix. The remaining part is unaffected. 165 | * 166 | * @param data 1-channel matrix. When it is a 3x3 matrix, it sets the upper left 3x3 part of the 167 | * current matrix. When it is a 1x3 or 3x1 matrix, it is used as a rotation vector. The Rodrigues 168 | * formula is used to compute the rotation matrix and sets the upper left 3x3 part of the current 169 | * matrix. 170 | */ 171 | public rotation(data: Mat): Mat; 172 | 173 | /** 174 | * the upper left 3x3 part 175 | */ 176 | public rotation(): Mat3; 177 | 178 | /** 179 | * Rodrigues vector. 180 | * 181 | * a vector representing the upper left 3x3 rotation matrix of the current matrix. 182 | * 183 | * Since the mapping between rotation vectors and rotation matrices is many to one, this function 184 | * returns only one rotation vector that represents the current rotation matrix, which is not 185 | * necessarily the same one set by `[rotation(const Vec3& rvec)]`. 186 | */ 187 | public rvec(): Vec3; 188 | 189 | public translate(t: Vec3): Affine3; 190 | 191 | /** 192 | * Copy t to the first three elements of the last column of the current matrix 193 | * 194 | * It sets the upper right 3x1 part of the matrix. The remaining part is unaffected. 195 | * 196 | * @param t 3x1 translation vector. 197 | */ 198 | public translation(t: Vec3): Vec3; 199 | 200 | /** 201 | * the upper right 3x1 part 202 | */ 203 | public translation(): Vec3; 204 | 205 | public static Identity(): Affine3; 206 | } 207 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # OpenCV-JS Package Development Instructions 2 | 3 | Always reference these instructions first and fallback to search or bash commands only when you encounter unexpected information that does not match the info here. 4 | 5 | OpenCV-JS is a TypeScript NPM package that provides OpenCV.js (JavaScript/WebAssembly version of OpenCV) for both Node.js and browser environments. The package wraps a pre-built 11MB OpenCV.js WASM binary with TypeScript type definitions. 6 | 7 | ## Working Effectively 8 | 9 | ### Initial Setup and Build 10 | - Install dependencies: `npm install` -- takes ~15 seconds 11 | - Build TypeScript: `npm run build` -- takes ~2 seconds. NEVER CANCEL. Set timeout to 60+ seconds. 12 | - Run tests: `npm test` -- takes ~8 seconds. NEVER CANCEL. Set timeout to 300+ seconds. 13 | - Format code: `npm run format` -- takes ~1 second 14 | 15 | ### Build Process Validation 16 | - ALWAYS run the complete build process: `npm install && npm run build && npm test` 17 | - Test package creation: `npm pack` -- creates .tgz file for distribution testing 18 | - ALWAYS run `npm audit fix` to address security vulnerabilities before committing 19 | 20 | ### Manual Testing and Validation 21 | - ALWAYS test OpenCV functionality after making changes using this pattern: 22 | ```javascript 23 | const cv = await require('./dist/opencv.js'); 24 | global.cv = cv; 25 | const mat = new cv.Mat(3, 3, cv.CV_8UC1); 26 | console.log(`Mat: ${mat.rows}x${mat.cols}, channels: ${mat.channels()}`); 27 | mat.delete(); // CRITICAL: Always call delete() for memory management 28 | ``` 29 | 30 | ### Memory Management Requirements 31 | - ALWAYS call `.delete()` on OpenCV objects (Mat, Size, etc.) to prevent memory leaks 32 | - NEVER forget memory cleanup in tests and examples 33 | - Use try/catch with proper cleanup in finally blocks when appropriate 34 | 35 | ## Package Usage Patterns 36 | 37 | ### v4.11+ API (Current) 38 | ```javascript 39 | import cvReadyPromise from "@techstark/opencv-js"; 40 | const cv = await cvReadyPromise; 41 | // Use cv here 42 | ``` 43 | 44 | ### v4.10- API (Legacy) 45 | ```javascript 46 | import cv from "@techstark/opencv-js"; 47 | cv.onRuntimeInitialized = () => { 48 | // Use cv here 49 | }; 50 | ``` 51 | 52 | ### Browser Configuration 53 | - ALWAYS include webpack polyfills for browser usage: 54 | ```javascript 55 | module.exports = { 56 | resolve: { 57 | fallback: { 58 | fs: false, 59 | path: false, 60 | crypto: false 61 | } 62 | } 63 | }; 64 | ``` 65 | 66 | ## Testing and Validation 67 | 68 | ### Running Tests 69 | - Unit tests use Jest with TypeScript preset 70 | - Tests validate: Mat operations, image processing, color conversions, tracking 71 | - Test files include: `Mat.test.ts`, `Tracker.test.ts`, `rect.test.ts`, `cvKeys.test.ts` 72 | - ALWAYS wait for async OpenCV initialization in tests using `setupOpenCv()` helper 73 | 74 | ### Key Validation Scenarios 75 | After making changes, ALWAYS test these scenarios: 76 | 1. Basic Mat creation and property access 77 | 2. Color space conversion (RGBA2GRAY) 78 | 3. Image filtering operations (GaussianBlur, threshold) 79 | 4. Contour detection and processing 80 | 5. Memory cleanup with .delete() calls 81 | 82 | ### Testing with Real Images 83 | - Use `test/Lenna.png` for image processing tests 84 | - Use Jimp library for loading images in Node.js environment 85 | - Pattern: `const jimpSrc = await Jimp.read(path); const img = cv.matFromImageData(jimpSrc.bitmap);` 86 | 87 | ## File Structure and Navigation 88 | 89 | ### Key Directories 90 | - `src/` - TypeScript source (mainly type definitions) 91 | - `src/index.ts` - Main export (32 bytes, exports from types/opencv) 92 | - `src/types/` - Comprehensive OpenCV type definitions 93 | - `test/` - Jest test files with OpenCV functionality validation 94 | - `dist/` - Build output including the 11MB opencv.js binary 95 | - `.github/workflows/` - CI/CD configuration 96 | 97 | ### Important Files 98 | - `dist/opencv.js` - Pre-built OpenCV.js WASM binary (11MB, core functionality) 99 | - `package.json` - NPM configuration with build/test scripts 100 | - `tsconfig.json` - TypeScript compilation settings 101 | - `jest.config.js` - Jest testing configuration 102 | - `doc/cvKeys.json` - Runtime OpenCV methods and properties reference 103 | 104 | ### Type Definitions Structure 105 | - Over 100 TypeScript definition files in `src/types/opencv/` 106 | - Core modules: Mat.ts, core_array.ts, imgproc_*.ts, calib3d.ts 107 | - Always check existing type definitions before adding new ones 108 | 109 | ## CI/CD and Publishing 110 | 111 | ### GitHub Workflows 112 | - `unit-test.yml` - Runs on PRs/pushes, uses Node.js 20.x, sets NODE_OPTIONS for memory 113 | - `build-opencv.yml` - Manual workflow for building opencv.js from source (uses Emscripten 2.0.10) 114 | - `npm-publish.yml` - Publishes to NPM on releases 115 | 116 | ### Build Requirements 117 | - Node.js 20.x (as used in CI) 118 | - NEVER CANCEL builds or tests - they may take longer than expected 119 | - Set explicit timeouts: 60+ seconds for builds, 300+ seconds for tests 120 | 121 | ## Common Development Tasks 122 | 123 | ### Adding New OpenCV Features 124 | 1. Check if types exist in `src/types/opencv/` 125 | 2. Add type definitions following existing patterns 126 | 3. Create test in appropriate test file 127 | 4. Run full validation: `npm run build && npm test` 128 | 5. Test with real OpenCV operations, not just TypeScript compilation 129 | 130 | ### Updating Dependencies 131 | - Use `npm audit fix` for security updates 132 | - Test package creation with `npm pack` after updates 133 | - ALWAYS run full test suite after dependency changes 134 | 135 | ### Memory Debugging 136 | - Check for missing `.delete()` calls in tests and examples 137 | - Use `translateException()` helper for OpenCV error handling 138 | - Monitor memory usage in long-running operations 139 | 140 | ## Browser vs Node.js Differences 141 | 142 | ### Node.js Environment 143 | - Direct require() of opencv.js works 144 | - Can use filesystem for loading images 145 | - Full OpenCV functionality available 146 | 147 | ### Browser Environment 148 | - Requires webpack polyfills (fs: false, path: false, crypto: false) 149 | - Must handle async WASM loading 150 | - Limited to browser-compatible image loading methods 151 | 152 | ## Performance and Timing Expectations 153 | 154 | ### Build Times (NEVER CANCEL) 155 | - `npm install`: ~15 seconds 156 | - `npm run build`: ~2 seconds 157 | - `npm test`: ~8 seconds 158 | - `npm run format`: ~1 second 159 | - Full CI pipeline: ~2-3 minutes 160 | 161 | ### Package Size 162 | - Source: ~12.8MB unpacked, ~4.1MB packed 163 | - Main contributor: dist/opencv.js (11MB WASM binary) 164 | - 214 files total in package 165 | 166 | ## Troubleshooting 167 | 168 | ### Common Issues 169 | - **TypeError: cv.Mat is not a constructor** - OpenCV not properly initialized, use await pattern 170 | - **Memory issues** - Missing .delete() calls on OpenCV objects 171 | - **Browser webpack errors** - Missing fallback polyfills configuration 172 | - **Test failures** - OpenCV async initialization not awaited in setupOpenCv() 173 | 174 | ### Debug Commands 175 | - `node -e "console.log(require('./dist/opencv.js'))"` - Test opencv.js loading 176 | - `npm audit` - Check for security vulnerabilities 177 | - `npm pack && tar -tzf *.tgz | head -20` - Inspect package contents 178 | 179 | ALWAYS follow these patterns for reliable OpenCV-JS development and avoid common pitfalls with async initialization and memory management. -------------------------------------------------------------------------------- /src/types/emscripten.ts: -------------------------------------------------------------------------------- 1 | interface Lookup { 2 | path: string; 3 | node: FSNode; 4 | } 5 | interface FSStream {} 6 | interface FSNode {} 7 | 8 | export interface FS { 9 | lookupPath(path: string, opts: any): Lookup; 10 | getPath(node: FSNode): string; 11 | 12 | isFile(mode: number): boolean; 13 | isDir(mode: number): boolean; 14 | isLink(mode: number): boolean; 15 | isChrdev(mode: number): boolean; 16 | isBlkdev(mode: number): boolean; 17 | isFIFO(mode: number): boolean; 18 | isSocket(mode: number): boolean; 19 | 20 | major(dev: number): number; 21 | minor(dev: number): number; 22 | makedev(ma: number, mi: number): number; 23 | registerDevice(dev: number, ops: any): void; 24 | 25 | syncfs(populate: boolean, callback: (e: any) => any): void; 26 | syncfs(callback: (e: any) => any, populate?: boolean): void; 27 | mount(type: any, opts: any, mountpoint: string): any; 28 | unmount(mountpoint: string): void; 29 | 30 | mkdir(path: string, mode?: number): any; 31 | mkdev(path: string, mode?: number, dev?: number): any; 32 | symlink(oldpath: string, newpath: string): any; 33 | rename(old_path: string, new_path: string): void; 34 | rmdir(path: string): void; 35 | readdir(path: string): string[]; 36 | unlink(path: string): void; 37 | readlink(path: string): string; 38 | stat(path: string, dontFollow?: boolean): any; 39 | lstat(path: string): any; 40 | chmod(path: string, mode: number, dontFollow?: boolean): void; 41 | lchmod(path: string, mode: number): void; 42 | fchmod(fd: number, mode: number): void; 43 | chown(path: string, uid: number, gid: number, dontFollow?: boolean): void; 44 | lchown(path: string, uid: number, gid: number): void; 45 | fchown(fd: number, uid: number, gid: number): void; 46 | truncate(path: string, len: number): void; 47 | ftruncate(fd: number, len: number): void; 48 | utime(path: string, atime: number, mtime: number): void; 49 | open( 50 | path: string, 51 | flags: string, 52 | mode?: number, 53 | fd_start?: number, 54 | fd_end?: number, 55 | ): FSStream; 56 | close(stream: FSStream): void; 57 | llseek(stream: FSStream, offset: number, whence: number): any; 58 | read( 59 | stream: FSStream, 60 | buffer: ArrayBufferView, 61 | offset: number, 62 | length: number, 63 | position?: number, 64 | ): number; 65 | write( 66 | stream: FSStream, 67 | buffer: ArrayBufferView, 68 | offset: number, 69 | length: number, 70 | position?: number, 71 | canOwn?: boolean, 72 | ): number; 73 | allocate(stream: FSStream, offset: number, length: number): void; 74 | mmap( 75 | stream: FSStream, 76 | buffer: ArrayBufferView, 77 | offset: number, 78 | length: number, 79 | position: number, 80 | prot: number, 81 | flags: number, 82 | ): any; 83 | ioctl(stream: FSStream, cmd: any, arg: any): any; 84 | readFile( 85 | path: string, 86 | opts?: { encoding: string; flags: string }, 87 | ): ArrayBufferView; 88 | writeFile( 89 | path: string, 90 | data: ArrayBufferView, 91 | opts?: { encoding: string; flags: string }, 92 | ): void; 93 | writeFile( 94 | path: string, 95 | data: string, 96 | opts?: { encoding: string; flags: string }, 97 | ): void; 98 | analyzePath(p: string): any; 99 | cwd(): string; 100 | chdir(path: string): void; 101 | init( 102 | input: () => number, 103 | output: (c: number) => any, 104 | error: (c: number) => any, 105 | ): void; 106 | 107 | createLazyFile( 108 | parent: string, 109 | name: string, 110 | url: string, 111 | canRead: boolean, 112 | canWrite: boolean, 113 | ): FSNode; 114 | createLazyFile( 115 | parent: FSNode, 116 | name: string, 117 | url: string, 118 | canRead: boolean, 119 | canWrite: boolean, 120 | ): FSNode; 121 | 122 | createPreloadedFile( 123 | parent: string, 124 | name: string, 125 | url: string, 126 | canRead: boolean, 127 | canWrite: boolean, 128 | onload?: () => void, 129 | onerror?: () => void, 130 | dontCreateFile?: boolean, 131 | canOwn?: boolean, 132 | ): void; 133 | createPreloadedFile( 134 | parent: FSNode, 135 | name: string, 136 | url: string, 137 | canRead: boolean, 138 | canWrite: boolean, 139 | onload?: () => void, 140 | onerror?: () => void, 141 | dontCreateFile?: boolean, 142 | canOwn?: boolean, 143 | ): void; 144 | 145 | createDataFile( 146 | parent: string, 147 | name: string, 148 | data: ArrayBufferView, 149 | canRead: boolean, 150 | canWrite: boolean, 151 | canOwn: boolean, 152 | ): void; 153 | } 154 | 155 | export interface EmscriptenModule { 156 | print(str: string): void; 157 | printErr(str: string): void; 158 | arguments: string[]; 159 | environment: EnvironmentType; 160 | preInit: Array<{ (): void }>; 161 | preRun: Array<{ (): void }>; 162 | postRun: Array<{ (): void }>; 163 | onAbort: { (what: any): void }; 164 | onRuntimeInitialized: { (): void }; 165 | preinitializedWebGLContext: WebGLRenderingContext; 166 | noInitialRun: boolean; 167 | noExitRuntime: boolean; 168 | logReadFiles: boolean; 169 | filePackagePrefixURL: string; 170 | wasmBinary: ArrayBuffer; 171 | 172 | destroy(object: object): void; 173 | getPreloadedPackage( 174 | remotePackageName: string, 175 | remotePackageSize: number, 176 | ): ArrayBuffer; 177 | instantiateWasm( 178 | imports: WebAssemblyImports, 179 | successCallback: (module: WebAssemblyModule) => void, 180 | ): WebAssemblyExports; 181 | locateFile(url: string): string; 182 | onCustomMessage(event: MessageEvent): void; 183 | 184 | Runtime: any; 185 | 186 | ccall( 187 | ident: string, 188 | returnType: ValueType | null, 189 | argTypes: ValueType[], 190 | args: TypeCompatibleWithC[], 191 | opts?: CCallOpts, 192 | ): any; 193 | cwrap( 194 | ident: string, 195 | returnType: ValueType | null, 196 | argTypes: ValueType[], 197 | opts?: CCallOpts, 198 | ): (...args: any[]) => any; 199 | 200 | setValue(ptr: number, value: any, type: string, noSafe?: boolean): void; 201 | getValue(ptr: number, type: string, noSafe?: boolean): number; 202 | 203 | ALLOC_NORMAL: number; 204 | ALLOC_STACK: number; 205 | ALLOC_STATIC: number; 206 | ALLOC_DYNAMIC: number; 207 | ALLOC_NONE: number; 208 | 209 | allocate( 210 | slab: any, 211 | types: string | string[], 212 | allocator: number, 213 | ptr: number, 214 | ): number; 215 | 216 | // USE_TYPED_ARRAYS == 1 217 | HEAP: Int32Array; 218 | IHEAP: Int32Array; 219 | FHEAP: Float64Array; 220 | 221 | // USE_TYPED_ARRAYS == 2 222 | HEAP8: Int8Array; 223 | HEAP16: Int16Array; 224 | HEAP32: Int32Array; 225 | HEAPU8: Uint8Array; 226 | HEAPU16: Uint16Array; 227 | HEAPU32: Uint32Array; 228 | HEAPF32: Float32Array; 229 | HEAPF64: Float64Array; 230 | 231 | TOTAL_STACK: number; 232 | TOTAL_MEMORY: number; 233 | FAST_MEMORY: number; 234 | 235 | addOnPreRun(cb: () => any): void; 236 | addOnInit(cb: () => any): void; 237 | addOnPreMain(cb: () => any): void; 238 | addOnExit(cb: () => any): void; 239 | addOnPostRun(cb: () => any): void; 240 | 241 | // Tools 242 | intArrayFromString( 243 | stringy: string, 244 | dontAddNull?: boolean, 245 | length?: number, 246 | ): number[]; 247 | intArrayToString(array: number[]): string; 248 | writeStringToMemory(str: string, buffer: number, dontAddNull: boolean): void; 249 | writeArrayToMemory(array: number[], buffer: number): void; 250 | writeAsciiToMemory(str: string, buffer: number, dontAddNull: boolean): void; 251 | 252 | addRunDependency(id: any): void; 253 | removeRunDependency(id: any): void; 254 | 255 | preloadedImages: any; 256 | preloadedAudios: any; 257 | 258 | _malloc(size: number): number; 259 | _free(ptr: number): void; 260 | } 261 | 262 | // declare namespace Emscripten { 263 | interface FileSystemType {} 264 | type EnvironmentType = "WEB" | "NODE" | "SHELL" | "WORKER"; 265 | type ValueType = "number" | "string" | "array" | "boolean"; 266 | type TypeCompatibleWithC = number | string | any[] | boolean; 267 | 268 | type WebAssemblyImports = Array<{ 269 | name: string; 270 | kind: string; 271 | }>; 272 | 273 | type WebAssemblyExports = Array<{ 274 | module: string; 275 | name: string; 276 | kind: string; 277 | }>; 278 | 279 | interface CCallOpts { 280 | async?: boolean; 281 | } 282 | // } 283 | 284 | // declare namespace WebAssembly { 285 | interface WebAssemblyModule {} 286 | // } 287 | -------------------------------------------------------------------------------- /test/QRCodeDetector.test.ts: -------------------------------------------------------------------------------- 1 | import { Jimp } from "jimp"; 2 | import path from "path"; 3 | import { setupOpenCv } from "./cv"; 4 | 5 | beforeAll(async () => { 6 | await setupOpenCv(); 7 | }); 8 | 9 | describe("QRCodeDetector", () => { 10 | it("should be available as a class", () => { 11 | expect(cv.QRCodeDetector).toBeDefined(); 12 | expect(typeof cv.QRCodeDetector).toBe("function"); 13 | }); 14 | 15 | it("should be able to create QRCodeDetector instance", () => { 16 | const detector = new cv.QRCodeDetector(); 17 | expect(detector).toBeDefined(); 18 | expect(detector.constructor.name).toBe("QRCodeDetector"); 19 | }); 20 | 21 | it("should have detect method", () => { 22 | const detector = new cv.QRCodeDetector(); 23 | expect(detector.detect).toBeDefined(); 24 | expect(typeof detector.detect).toBe("function"); 25 | }); 26 | 27 | it("should have decode method", () => { 28 | const detector = new cv.QRCodeDetector(); 29 | expect(detector.decode).toBeDefined(); 30 | expect(typeof detector.decode).toBe("function"); 31 | }); 32 | 33 | it("should have detectAndDecode method", () => { 34 | const detector = new cv.QRCodeDetector(); 35 | expect(detector.detectAndDecode).toBeDefined(); 36 | expect(typeof detector.detectAndDecode).toBe("function"); 37 | }); 38 | 39 | it("should have detectMulti method", () => { 40 | const detector = new cv.QRCodeDetector(); 41 | expect(detector.detectMulti).toBeDefined(); 42 | expect(typeof detector.detectMulti).toBe("function"); 43 | }); 44 | 45 | it("should have decodeMulti method", () => { 46 | const detector = new cv.QRCodeDetector(); 47 | expect(detector.decodeMulti).toBeDefined(); 48 | expect(typeof detector.decodeMulti).toBe("function"); 49 | }); 50 | 51 | it("should have detectAndDecodeMulti method", () => { 52 | const detector = new cv.QRCodeDetector(); 53 | expect(detector.detectAndDecodeMulti).toBeDefined(); 54 | expect(typeof detector.detectAndDecodeMulti).toBe("function"); 55 | }); 56 | 57 | it("should be able to clean up detector", () => { 58 | const detector = new cv.QRCodeDetector(); 59 | // Just verify we can call delete without errors 60 | expect(() => detector.delete()).not.toThrow(); 61 | }); 62 | 63 | it("should detect and decode QR code from image", async () => { 64 | const detector = new cv.QRCodeDetector(); 65 | 66 | try { 67 | // Load the test QR code image 68 | const jimpSrc = await Jimp.read(path.resolve(__dirname, "test-qr.png")); 69 | const img = cv.matFromImageData(jimpSrc.bitmap); 70 | 71 | // Convert RGBA to BGR as OpenCV expects BGR format 72 | const imgBGR = new cv.Mat(); 73 | cv.cvtColor(img, imgBGR, cv.COLOR_RGBA2BGR); 74 | 75 | // Test detectAndDecode method 76 | const points = new cv.Mat(); 77 | const decodedText = detector.detectAndDecode(imgBGR, points); 78 | 79 | // Verify the decoded text matches what we encoded 80 | expect(decodedText).toBe("Hello OpenCV.js QR Test!"); 81 | 82 | // Verify points were detected (should have 4 corner points) 83 | expect(points.rows).toBeGreaterThan(0); 84 | expect(points.cols).toBeGreaterThan(0); 85 | 86 | // Clean up 87 | img.delete(); 88 | imgBGR.delete(); 89 | points.delete(); 90 | } finally { 91 | detector.delete(); 92 | } 93 | }); 94 | 95 | it("should detect QR code corners using detect method", async () => { 96 | const detector = new cv.QRCodeDetector(); 97 | 98 | try { 99 | // Load the test QR code image 100 | const jimpSrc = await Jimp.read(path.resolve(__dirname, "test-qr.png")); 101 | const img = cv.matFromImageData(jimpSrc.bitmap); 102 | 103 | // Convert RGBA to BGR as OpenCV expects BGR format 104 | const imgBGR = new cv.Mat(); 105 | cv.cvtColor(img, imgBGR, cv.COLOR_RGBA2BGR); 106 | 107 | // Test detect method 108 | const points = new cv.Mat(); 109 | const detected = detector.detect(imgBGR, points); 110 | 111 | // Verify QR code was detected 112 | expect(detected).toBe(true); 113 | expect(points.rows).toBeGreaterThan(0); 114 | expect(points.cols).toBeGreaterThan(0); 115 | 116 | // Clean up 117 | img.delete(); 118 | imgBGR.delete(); 119 | points.delete(); 120 | } finally { 121 | detector.delete(); 122 | } 123 | }); 124 | 125 | it("should decode previously detected QR code using decode method", async () => { 126 | const detector = new cv.QRCodeDetector(); 127 | 128 | try { 129 | // Load the test QR code image 130 | const jimpSrc = await Jimp.read(path.resolve(__dirname, "test-qr.png")); 131 | const img = cv.matFromImageData(jimpSrc.bitmap); 132 | 133 | // Convert RGBA to BGR as OpenCV expects BGR format 134 | const imgBGR = new cv.Mat(); 135 | cv.cvtColor(img, imgBGR, cv.COLOR_RGBA2BGR); 136 | 137 | // First detect the QR code 138 | const points = new cv.Mat(); 139 | const detected = detector.detect(imgBGR, points); 140 | expect(detected).toBe(true); 141 | 142 | // Then decode it using the detected points 143 | const decodedText = detector.decode(imgBGR, points); 144 | expect(decodedText).toBe("Hello OpenCV.js QR Test!"); 145 | 146 | // Clean up 147 | img.delete(); 148 | imgBGR.delete(); 149 | points.delete(); 150 | } finally { 151 | detector.delete(); 152 | } 153 | }); 154 | }); 155 | 156 | describe("QRCodeDetectorAruco", () => { 157 | it("should be available as a class", () => { 158 | expect(cv.QRCodeDetectorAruco).toBeDefined(); 159 | expect(typeof cv.QRCodeDetectorAruco).toBe("function"); 160 | }); 161 | 162 | it("should be able to create QRCodeDetectorAruco instance", () => { 163 | const detector = new cv.QRCodeDetectorAruco(); 164 | expect(detector).toBeDefined(); 165 | expect(detector.constructor.name).toBe("QRCodeDetectorAruco"); 166 | }); 167 | 168 | it("should have detect method", () => { 169 | const detector = new cv.QRCodeDetectorAruco(); 170 | expect(detector.detect).toBeDefined(); 171 | expect(typeof detector.detect).toBe("function"); 172 | }); 173 | 174 | it("should have decode method", () => { 175 | const detector = new cv.QRCodeDetectorAruco(); 176 | expect(detector.decode).toBeDefined(); 177 | expect(typeof detector.decode).toBe("function"); 178 | }); 179 | 180 | it("should have detectAndDecode method", () => { 181 | const detector = new cv.QRCodeDetectorAruco(); 182 | expect(detector.detectAndDecode).toBeDefined(); 183 | expect(typeof detector.detectAndDecode).toBe("function"); 184 | }); 185 | 186 | it("should be able to clean up detector", () => { 187 | const detector = new cv.QRCodeDetectorAruco(); 188 | // Just verify we can call delete without errors 189 | expect(() => detector.delete()).not.toThrow(); 190 | }); 191 | 192 | it("should detect and decode QR code from image using Aruco detector", async () => { 193 | const detector = new cv.QRCodeDetectorAruco(); 194 | 195 | try { 196 | // Load the test QR code image 197 | const jimpSrc = await Jimp.read(path.resolve(__dirname, "test-qr.png")); 198 | const img = cv.matFromImageData(jimpSrc.bitmap); 199 | 200 | // Convert RGBA to BGR as OpenCV expects BGR format 201 | const imgBGR = new cv.Mat(); 202 | cv.cvtColor(img, imgBGR, cv.COLOR_RGBA2BGR); 203 | 204 | // Test detectAndDecode method 205 | const points = new cv.Mat(); 206 | const decodedText = detector.detectAndDecode(imgBGR, points); 207 | 208 | // Verify the decoded text matches what we encoded 209 | expect(decodedText).toBe("Hello OpenCV.js QR Test!"); 210 | 211 | // Verify points were detected 212 | expect(points.rows).toBeGreaterThan(0); 213 | expect(points.cols).toBeGreaterThan(0); 214 | 215 | // Clean up 216 | img.delete(); 217 | imgBGR.delete(); 218 | points.delete(); 219 | } finally { 220 | detector.delete(); 221 | } 222 | }); 223 | }); 224 | 225 | describe("QRCodeDetectorAruco_Params", () => { 226 | it("should be available as a class", () => { 227 | expect(cv.QRCodeDetectorAruco_Params).toBeDefined(); 228 | expect(typeof cv.QRCodeDetectorAruco_Params).toBe("function"); 229 | }); 230 | 231 | it("should be able to create QRCodeDetectorAruco_Params instance", () => { 232 | const params = new cv.QRCodeDetectorAruco_Params(); 233 | expect(params).toBeDefined(); 234 | expect(params.constructor.name).toBe("QRCodeDetectorAruco_Params"); 235 | }); 236 | 237 | it("should have expected properties", () => { 238 | const params = new cv.QRCodeDetectorAruco_Params(); 239 | expect(params.minModuleSizeInPyramid).toBeDefined(); 240 | expect(params.maxRotation).toBeDefined(); 241 | expect(params.maxModuleSizeMismatch).toBeDefined(); 242 | expect(params.maxTimingPatternMismatch).toBeDefined(); 243 | expect(params.maxPenalties).toBeDefined(); 244 | expect(params.maxColorsMismatch).toBeDefined(); 245 | expect(params.scaleTimingPatternScore).toBeDefined(); 246 | }); 247 | 248 | it("should be able to clean up params", () => { 249 | const params = new cv.QRCodeDetectorAruco_Params(); 250 | // Just verify we can call delete without errors 251 | expect(() => params.delete()).not.toThrow(); 252 | }); 253 | }); -------------------------------------------------------------------------------- /src/types/opencv/PCA.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | double, 3 | FileNode, 4 | FileStorage, 5 | InputArray, 6 | int, 7 | Mat, 8 | OutputArray, 9 | } from "./_types"; 10 | 11 | /** 12 | * The class is used to calculate a special basis for a set of vectors. The basis will consist of 13 | * eigenvectors of the covariance matrix calculated from the input set of vectors. The class PCA can 14 | * also transform vectors to/from the new coordinate space defined by the basis. Usually, in this new 15 | * coordinate system, each vector from the original set (and any linear combination of such vectors) 16 | * can be quite accurately approximated by taking its first few components, corresponding to the 17 | * eigenvectors of the largest eigenvalues of the covariance matrix. Geometrically it means that you 18 | * calculate a projection of the vector to a subspace formed by a few eigenvectors corresponding to the 19 | * dominant eigenvalues of the covariance matrix. And usually such a projection is very close to the 20 | * original vector. So, you can represent the original vector from a high-dimensional space with a much 21 | * shorter vector consisting of the projected vector's coordinates in the subspace. Such a 22 | * transformation is also known as Karhunen-Loeve Transform, or KLT. See 23 | * 24 | * The sample below is the function that takes two matrices. The first function stores a set of vectors 25 | * (a row per vector) that is used to calculate [PCA](#d3/d8d/classcv_1_1PCA}). The second function 26 | * stores another "test" set of vectors (a row per vector). First, these vectors are compressed with 27 | * [PCA](#d3/d8d/classcv_1_1PCA}), then reconstructed back, and then the reconstruction error norm is 28 | * computed and printed for each vector. : 29 | * 30 | * ```cpp 31 | * using namespace cv; 32 | * 33 | * PCA compressPCA(const Mat& pcaset, int maxComponents, 34 | * const Mat& testset, Mat& compressed) 35 | * { 36 | * PCA pca(pcaset, // pass the data 37 | * Mat(), // we do not have a pre-computed mean vector, 38 | * // so let the PCA engine to compute it 39 | * PCA::DATA_AS_ROW, // indicate that the vectors 40 | * // are stored as matrix rows 41 | * // (use PCA::DATA_AS_COL if the vectors are 42 | * // the matrix columns) 43 | * maxComponents // specify, how many principal components to retain 44 | * ); 45 | * // if there is no test data, just return the computed basis, ready-to-use 46 | * if( !testset.data ) 47 | * return pca; 48 | * CV_Assert( testset.cols == pcaset.cols ); 49 | * 50 | * compressed.create(testset.rows, maxComponents, testset.type()); 51 | * 52 | * Mat reconstructed; 53 | * for( int i = 0; i < testset.rows; i++ ) 54 | * { 55 | * Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; 56 | * // compress the vector, the result will be stored 57 | * // in the i-th row of the output matrix 58 | * pca.project(vec, coeffs); 59 | * // and then reconstruct it 60 | * pca.backProject(coeffs, reconstructed); 61 | * // and measure the error 62 | * printf("%d. diff = %g\\n", i, norm(vec, reconstructed, NORM_L2)); 63 | * } 64 | * return pca; 65 | * } 66 | * ``` 67 | * 68 | * [calcCovarMatrix](#d2/de8/group__core__array_1gae6ffa9354633f984246945d52823165d}), 69 | * [mulTransposed](#d2/de8/group__core__array_1gadc4e49f8f7a155044e3be1b9e3b270ab}), 70 | * [SVD](#df/df7/classcv_1_1SVD}), 71 | * [dft](#d2/de8/group__core__array_1gadd6cf9baf2b8b704a11b5f04aaf4f39d}), 72 | * [dct](#d2/de8/group__core__array_1ga85aad4d668c01fbd64825f589e3696d4}) 73 | * 74 | * Source: 75 | * [opencv2/core.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/core.hpp#L2393). 76 | * 77 | */ 78 | export declare class PCA { 79 | public eigenvalues: Mat; 80 | 81 | public eigenvectors: Mat; 82 | 83 | public mean: Mat; 84 | 85 | /** 86 | * The default constructor initializes an empty PCA structure. The other constructors initialize the 87 | * structure and call [PCA::operator()()]. 88 | */ 89 | public constructor(); 90 | 91 | /** 92 | * This is an overloaded member function, provided for convenience. It differs from the above 93 | * function only in what argument(s) it accepts. 94 | * 95 | * @param data input samples stored as matrix rows or matrix columns. 96 | * 97 | * @param mean optional mean value; if the matrix is empty (noArray()), the mean is computed from the 98 | * data. 99 | * 100 | * @param flags operation flags; currently the parameter is only used to specify the data layout 101 | * (PCA::Flags) 102 | * 103 | * @param maxComponents maximum number of components that PCA should retain; by default, all the 104 | * components are retained. 105 | */ 106 | public constructor( 107 | data: InputArray, 108 | mean: InputArray, 109 | flags: int, 110 | maxComponents?: int, 111 | ); 112 | 113 | /** 114 | * This is an overloaded member function, provided for convenience. It differs from the above 115 | * function only in what argument(s) it accepts. 116 | * 117 | * @param data input samples stored as matrix rows or matrix columns. 118 | * 119 | * @param mean optional mean value; if the matrix is empty (noArray()), the mean is computed from the 120 | * data. 121 | * 122 | * @param flags operation flags; currently the parameter is only used to specify the data layout 123 | * (PCA::Flags) 124 | * 125 | * @param retainedVariance Percentage of variance that PCA should retain. Using this parameter will 126 | * let the PCA decided how many components to retain but it will always keep at least 2. 127 | */ 128 | public constructor( 129 | data: InputArray, 130 | mean: InputArray, 131 | flags: int, 132 | retainedVariance: double, 133 | ); 134 | 135 | /** 136 | * The methods are inverse operations to [PCA::project]. They take PC coordinates of projected 137 | * vectors and reconstruct the original vectors. Unless all the principal components have been 138 | * retained, the reconstructed vectors are different from the originals. But typically, the difference 139 | * is small if the number of components is large enough (but still much smaller than the original 140 | * vector dimensionality). As a result, [PCA] is used. 141 | * 142 | * @param vec coordinates of the vectors in the principal component subspace, the layout and size are 143 | * the same as of PCA::project output vectors. 144 | */ 145 | public backProject(vec: InputArray): Mat; 146 | 147 | /** 148 | * This is an overloaded member function, provided for convenience. It differs from the above 149 | * function only in what argument(s) it accepts. 150 | * 151 | * @param vec coordinates of the vectors in the principal component subspace, the layout and size are 152 | * the same as of PCA::project output vectors. 153 | * 154 | * @param result reconstructed vectors; the layout and size are the same as of PCA::project input 155 | * vectors. 156 | */ 157 | public backProject(vec: InputArray, result: OutputArray): InputArray; 158 | 159 | /** 160 | * The methods project one or more vectors to the principal component subspace, where each vector 161 | * projection is represented by coefficients in the principal component basis. The first form of the 162 | * method returns the matrix that the second form writes to the result. So the first form can be used 163 | * as a part of expression while the second form can be more efficient in a processing loop. 164 | * 165 | * @param vec input vector(s); must have the same dimensionality and the same layout as the input 166 | * data used at PCA phase, that is, if DATA_AS_ROW are specified, then vec.cols==data.cols (vector 167 | * dimensionality) and vec.rows is the number of vectors to project, and the same is true for the 168 | * PCA::DATA_AS_COL case. 169 | */ 170 | public project(vec: InputArray): Mat; 171 | 172 | /** 173 | * This is an overloaded member function, provided for convenience. It differs from the above 174 | * function only in what argument(s) it accepts. 175 | * 176 | * @param vec input vector(s); must have the same dimensionality and the same layout as the input 177 | * data used at PCA phase, that is, if DATA_AS_ROW are specified, then vec.cols==data.cols (vector 178 | * dimensionality) and vec.rows is the number of vectors to project, and the same is true for the 179 | * PCA::DATA_AS_COL case. 180 | * 181 | * @param result output vectors; in case of PCA::DATA_AS_COL, the output matrix has as many columns 182 | * as the number of input vectors, this means that result.cols==vec.cols and the number of rows match 183 | * the number of principal components (for example, maxComponents parameter passed to the constructor). 184 | */ 185 | public project(vec: InputArray, result: OutputArray): InputArray; 186 | 187 | /** 188 | * Loads [eigenvalues] [eigenvectors] and [mean] from specified [FileNode] 189 | */ 190 | public read(fn: FileNode): FileNode; 191 | 192 | /** 193 | * Writes [eigenvalues] [eigenvectors] and [mean] to specified [FileStorage] 194 | */ 195 | public write(fs: FileStorage): FileStorage; 196 | } 197 | 198 | export declare const DATA_AS_ROW: Flags; // initializer: = 0 199 | 200 | export declare const DATA_AS_COL: Flags; // initializer: = 1 201 | 202 | export declare const USE_AVG: Flags; // initializer: = 2 203 | 204 | export type Flags = any; 205 | -------------------------------------------------------------------------------- /src/types/opencv/DescriptorMatcher.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | Algorithm, 3 | bool, 4 | FileNode, 5 | FileStorage, 6 | float, 7 | InputArray, 8 | InputArrayOfArrays, 9 | int, 10 | Mat, 11 | Ptr, 12 | } from "./_types"; 13 | 14 | /** 15 | * It has two groups of match methods: for matching descriptors of an image with another image or with 16 | * an image set. 17 | * 18 | * Source: 19 | * [opencv2/features2d.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/features2d.hpp#L860). 20 | * 21 | */ 22 | export declare class DescriptorMatcher extends Algorithm { 23 | /** 24 | * If the collection is not empty, the new descriptors are added to existing train descriptors. 25 | * 26 | * @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same 27 | * train image. 28 | */ 29 | public add(descriptors: InputArrayOfArrays): InputArrayOfArrays; 30 | 31 | public clear(): void; 32 | 33 | /** 34 | * @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 35 | * that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 36 | * object copy with the current parameters but with empty train data. 37 | */ 38 | public clone(emptyTrainData?: bool): Ptr; 39 | 40 | public empty(): bool; 41 | 42 | public getTrainDescriptors(): Mat; 43 | 44 | public isMaskSupported(): bool; 45 | 46 | /** 47 | * These extended variants of [DescriptorMatcher::match] methods find several best matches for each 48 | * query descriptor. The matches are returned in the distance increasing order. See 49 | * [DescriptorMatcher::match] for the details about query and train descriptors. 50 | * 51 | * @param queryDescriptors Query set of descriptors. 52 | * 53 | * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 54 | * collection stored in the class object. 55 | * 56 | * @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 57 | * 58 | * @param k Count of best matches found per each query descriptor or less if a query descriptor has 59 | * less than k possible matches in total. 60 | * 61 | * @param mask Mask specifying permissible matches between an input query and train matrices of 62 | * descriptors. 63 | * 64 | * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 65 | * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the 66 | * matches vector does not contain matches for fully masked-out query descriptors. 67 | */ 68 | public knnMatch( 69 | queryDescriptors: InputArray, 70 | trainDescriptors: InputArray, 71 | matches: any, 72 | k: int, 73 | mask?: InputArray, 74 | compactResult?: bool, 75 | ): InputArray; 76 | 77 | /** 78 | * This is an overloaded member function, provided for convenience. It differs from the above 79 | * function only in what argument(s) it accepts. 80 | * 81 | * @param queryDescriptors Query set of descriptors. 82 | * 83 | * @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 84 | * 85 | * @param k Count of best matches found per each query descriptor or less if a query descriptor has 86 | * less than k possible matches in total. 87 | * 88 | * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 89 | * descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 90 | * 91 | * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 92 | * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the 93 | * matches vector does not contain matches for fully masked-out query descriptors. 94 | */ 95 | public knnMatch( 96 | queryDescriptors: InputArray, 97 | matches: any, 98 | k: int, 99 | masks?: InputArrayOfArrays, 100 | compactResult?: bool, 101 | ): InputArray; 102 | 103 | /** 104 | * In the first variant of this method, the train descriptors are passed as an input argument. In the 105 | * second variant of the method, train descriptors collection that was set by [DescriptorMatcher::add] 106 | * is used. Optional mask (or masks) can be passed to specify which query and training descriptors can 107 | * be matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if 108 | * mask.at(i,j) is non-zero. 109 | * 110 | * @param queryDescriptors Query set of descriptors. 111 | * 112 | * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 113 | * collection stored in the class object. 114 | * 115 | * @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 116 | * descriptor. So, matches size may be smaller than the query descriptors count. 117 | * 118 | * @param mask Mask specifying permissible matches between an input query and train matrices of 119 | * descriptors. 120 | */ 121 | public match( 122 | queryDescriptors: InputArray, 123 | trainDescriptors: InputArray, 124 | matches: any, 125 | mask?: InputArray, 126 | ): InputArray; 127 | 128 | /** 129 | * This is an overloaded member function, provided for convenience. It differs from the above 130 | * function only in what argument(s) it accepts. 131 | * 132 | * @param queryDescriptors Query set of descriptors. 133 | * 134 | * @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 135 | * descriptor. So, matches size may be smaller than the query descriptors count. 136 | * 137 | * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 138 | * descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 139 | */ 140 | public match( 141 | queryDescriptors: InputArray, 142 | matches: any, 143 | masks?: InputArrayOfArrays, 144 | ): InputArray; 145 | 146 | /** 147 | * For each query descriptor, the methods find such training descriptors that the distance between 148 | * the query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches 149 | * are returned in the distance increasing order. 150 | * 151 | * @param queryDescriptors Query set of descriptors. 152 | * 153 | * @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 154 | * collection stored in the class object. 155 | * 156 | * @param matches Found matches. 157 | * 158 | * @param maxDistance Threshold for the distance between matched descriptors. Distance means here 159 | * metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in 160 | * Pixels)! 161 | * 162 | * @param mask Mask specifying permissible matches between an input query and train matrices of 163 | * descriptors. 164 | * 165 | * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 166 | * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the 167 | * matches vector does not contain matches for fully masked-out query descriptors. 168 | */ 169 | public radiusMatch( 170 | queryDescriptors: InputArray, 171 | trainDescriptors: InputArray, 172 | matches: any, 173 | maxDistance: float, 174 | mask?: InputArray, 175 | compactResult?: bool, 176 | ): InputArray; 177 | 178 | /** 179 | * This is an overloaded member function, provided for convenience. It differs from the above 180 | * function only in what argument(s) it accepts. 181 | * 182 | * @param queryDescriptors Query set of descriptors. 183 | * 184 | * @param matches Found matches. 185 | * 186 | * @param maxDistance Threshold for the distance between matched descriptors. Distance means here 187 | * metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in 188 | * Pixels)! 189 | * 190 | * @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 191 | * descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 192 | * 193 | * @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 194 | * false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the 195 | * matches vector does not contain matches for fully masked-out query descriptors. 196 | */ 197 | public radiusMatch( 198 | queryDescriptors: InputArray, 199 | matches: any, 200 | maxDistance: float, 201 | masks?: InputArrayOfArrays, 202 | compactResult?: bool, 203 | ): InputArray; 204 | 205 | public read(fileName: String): String; 206 | 207 | public read(fn: FileNode): FileNode; 208 | 209 | /** 210 | * Trains a descriptor matcher (for example, the flann index). In all methods to match, the method 211 | * [train()] is run every time before matching. Some descriptor matchers (for example, 212 | * BruteForceMatcher) have an empty implementation of this method. Other matchers really train their 213 | * inner structures (for example, [FlannBasedMatcher] trains [flann::Index] ). 214 | */ 215 | public train(): void; 216 | 217 | public write(fileName: String): String; 218 | 219 | public write(fs: FileStorage): FileStorage; 220 | 221 | public write(fs: Ptr, name?: String): Ptr; 222 | } 223 | 224 | export declare const FLANNBASED: MatcherType; // initializer: = 1 225 | 226 | export declare const BRUTEFORCE: MatcherType; // initializer: = 2 227 | 228 | export declare const BRUTEFORCE_L1: MatcherType; // initializer: = 3 229 | 230 | export declare const BRUTEFORCE_HAMMING: MatcherType; // initializer: = 4 231 | 232 | export declare const BRUTEFORCE_HAMMINGLUT: MatcherType; // initializer: = 5 233 | 234 | export declare const BRUTEFORCE_SL2: MatcherType; // initializer: = 6 235 | 236 | export type MatcherType = any; 237 | -------------------------------------------------------------------------------- /src/types/opencv/_hacks.ts: -------------------------------------------------------------------------------- 1 | // Scalar, Point, Rect, etc are defined by opencv.js (helpers.js) and we need to declare them manually: 2 | 3 | export declare class Range { 4 | public start: number; 5 | public end: number; 6 | public constructor(start: number, end: number); 7 | } 8 | 9 | export declare class Scalar extends Array { 10 | public static all(...v: number[]): Scalar; 11 | } 12 | // Hack: expose Mat super classes like Mat_, InputArray, Vector, OutputArray we make them alias of Mat to simplify and make it work 13 | export { Mat as InputArray }; 14 | export { Mat as InputOutputArray }; 15 | export { Mat as OutputArray }; 16 | export { MatVector as InputArrayOfArrays }; 17 | export { MatVector as InputOutputArrayOfArrays }; 18 | export { MatVector as OutputArrayOfArrays }; 19 | export { Scalar as GScalar }; 20 | export { Point as Point2f }; 21 | export { Point as KeyPoint }; 22 | export { Point as Point2l }; 23 | export { Size as Point2d }; 24 | export { Size as Size2d }; 25 | export { Size as Size2f }; 26 | export { Size as Size2l }; 27 | export { Rect as Rect_ }; 28 | 29 | export declare class Point { 30 | public constructor(x: number, y: number); 31 | public x: number; 32 | public y: number; 33 | } 34 | 35 | export declare class Circle { 36 | public constructor(center: Point, radius: number); 37 | public center: Point; 38 | public radius: number; 39 | } 40 | 41 | export declare class Size { 42 | public constructor(width: number, height: number); 43 | public width: number; 44 | public height: number; 45 | } 46 | 47 | export declare class Rect { 48 | public constructor(); 49 | public constructor(point: Point, size: Size); 50 | public constructor(x: number, y: number, width: number, height: number); 51 | public x: number; 52 | public y: number; 53 | public width: number; 54 | public height: number; 55 | } 56 | 57 | export declare class TermCriteria { 58 | public type: number; 59 | public maxCount: number; 60 | public epsilon: number; 61 | public constructor(); 62 | public constructor(type: number, maxCount: number, epsilon: number); 63 | } 64 | export declare const TermCriteria_EPS: any; 65 | export declare const TermCriteria_COUNT: any; 66 | export declare const TermCriteria_MAX_ITER: any; 67 | 68 | export declare class MinMaxLoc { 69 | public minVal: number; 70 | public maxVal: number; 71 | public minLoc: Point; 72 | public maxLoc: Point; 73 | public constructor(); 74 | public constructor( 75 | minVal: number, 76 | maxVal: number, 77 | minLoc: Point, 78 | maxLoc: Point, 79 | ); 80 | } 81 | 82 | // expose emscripten / opencv.js specifics 83 | 84 | export declare function exceptionFromPtr(err: number): any; 85 | export declare function onRuntimeInitialized(): any; 86 | export declare function FS_createDataFile( 87 | parent: string, 88 | name: string, 89 | data: Uint8Array, 90 | canRead: boolean, 91 | canWrite: boolean, 92 | canOwn: boolean, 93 | ): any; 94 | 95 | import { Algorithm, type LineTypes, Mat, type NormTypes, RotatedRect } from "."; 96 | import "../_cv"; 97 | 98 | /** 99 | * Base class for Contrast Limited Adaptive Histogram Equalization. 100 | */ 101 | export declare class CLAHE extends Algorithm { 102 | /** 103 | * @param clipLimit Threshold for contrast limiting. Default. 40.0, 104 | * @param totalGridSize Size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. Default: Size(8, 8) 105 | */ 106 | constructor(clipLimit?: double, totalGridSize?: Size); 107 | /** 108 | * Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization. 109 | * @param src Source image of type CV_8UC1 or CV_16UC1. 110 | * @param dst Destination image. 111 | */ 112 | apply(src: Mat, dst: Mat): void; 113 | collectGarbage(): void; 114 | /** 115 | * Returns threshold value for contrast limiting. 116 | */ 117 | getClipLimit(): double; 118 | /** 119 | * Returns Size defines the number of tiles in row and column. 120 | */ 121 | getTilesGridSize(): Size; 122 | /** 123 | * Sets threshold for contrast limiting. 124 | */ 125 | setClipLimit(clipLimit: double): void; 126 | /** 127 | * Sets size of grid for histogram equalization. Input image will be divided into equally sized rectangular tiles. 128 | * @param tileGridSize defines the number of tiles in row and column. 129 | */ 130 | setTilesGridSize(tileGridSize: Size): void; 131 | } 132 | 133 | // emscripten embind internals 134 | export declare function getInheritedInstanceCount(...a: any[]): any; 135 | export declare function getLiveInheritedInstances(...a: any[]): any; 136 | export declare function flushPendingDeletes(...a: any[]): any; 137 | export declare function setDelayFunction(...a: any[]): any; 138 | 139 | export declare class EmscriptenEmbindInstance { 140 | isAliasOf(other: any): bool; 141 | clone(): any; 142 | delete(): any; 143 | isDeleted(): boolean; 144 | deleteLater(): any; 145 | } 146 | 147 | export declare class InternalError extends Error {} 148 | export declare class BindingError extends Error {} 149 | export declare class UnboundTypeError extends Error {} 150 | export declare class PureVirtualError extends Error {} 151 | 152 | export declare class Vector extends EmscriptenEmbindInstance { 153 | get(i: number): T; 154 | get(i: number, j: number, data: any): T; 155 | set(i: number, t: T): void; 156 | put(i: number, j: number, data: any): any; 157 | size(): number; 158 | push_back(n: T): any; 159 | resize(count: number, value?: T): void; 160 | } 161 | 162 | export declare class Vec3d extends Vector {} 163 | export declare class IntVector extends Vector {} 164 | export declare class FloatVector extends Vector {} 165 | export declare class DoubleVector extends Vector {} 166 | export declare class PointVector extends Vector {} 167 | export declare class KeyPointVector extends Vector {} 168 | export declare class DMatchVector extends Vector {} 169 | export declare class DMatchVectorVector extends Vector> {} 170 | export declare class MatVector extends Vector {} 171 | 172 | export declare class RectVector extends Rect implements Vector { 173 | get(i: number): Rect; 174 | isAliasOf(...a: any[]): any; 175 | clone(...a: any[]): any; 176 | delete(...a: any[]): any; 177 | isDeleted(...a: any[]): any; 178 | deleteLater(...a: any[]): any; 179 | set(i: number, t: Rect): void; 180 | put(i: number, j: number, data: any): any; 181 | size(): number; 182 | push_back(n: Rect): void; 183 | resize(count: number, value?: Rect | undefined): void; 184 | delete(): void; 185 | } 186 | 187 | export declare class VideoCapture { 188 | public constructor(videoSource: HTMLVideoElement | string); 189 | public read(m: Mat): any; 190 | public video: HTMLVideoElement; 191 | } 192 | 193 | export type MatSize = () => Size; 194 | 195 | export declare function matFromImageData(imageData: ImageData): Mat; 196 | export declare function matFromArray( 197 | rows: number, 198 | cols: number, 199 | type: any, 200 | array: number[] | ArrayBufferView, 201 | ): Mat; 202 | 203 | export declare class ImageData { 204 | data: ArrayBufferView; 205 | width: number; 206 | height: number; 207 | } 208 | 209 | // TODO this types should be exposed by the tool - want to make it work: 210 | export declare const CV_8U: CVDataType; 211 | export declare const CV_8UC1: CVDataType; 212 | export declare const CV_8UC2: CVDataType; 213 | export declare const CV_8UC3: CVDataType; 214 | export declare const CV_8UC4: CVDataType; 215 | export declare const CV_8S: CVDataType; 216 | export declare const CV_8SC1: CVDataType; 217 | export declare const CV_8SC2: CVDataType; 218 | export declare const CV_8SC3: CVDataType; 219 | export declare const CV_8SC4: CVDataType; 220 | export declare const CV_16U: CVDataType; 221 | export declare const CV_16UC1: CVDataType; 222 | export declare const CV_16UC2: CVDataType; 223 | export declare const CV_16UC3: CVDataType; 224 | export declare const CV_16UC4: CVDataType; 225 | export declare const CV_16S: CVDataType; 226 | export declare const CV_16SC1: CVDataType; 227 | export declare const CV_16SC2: CVDataType; 228 | export declare const CV_16SC3: CVDataType; 229 | export declare const CV_16SC4: CVDataType; 230 | export declare const CV_32S: CVDataType; 231 | export declare const CV_32SC1: CVDataType; 232 | export declare const CV_32SC2: CVDataType; 233 | export declare const CV_32SC3: CVDataType; 234 | export declare const CV_32SC4: CVDataType; 235 | export declare const CV_32F: CVDataType; 236 | export declare const CV_32FC1: CVDataType; 237 | export declare const CV_32FC2: CVDataType; 238 | export declare const CV_32FC3: CVDataType; 239 | export declare const CV_32FC4: CVDataType; 240 | export declare const CV_64F: CVDataType; 241 | export declare const CV_64FC1: CVDataType; 242 | export declare const CV_64FC2: CVDataType; 243 | export declare const CV_64FC3: CVDataType; 244 | export declare const CV_64FC4: CVDataType; 245 | 246 | export type CVDataType = any; 247 | 248 | export declare function ellipse1( 249 | dst: Mat, 250 | rotatedRect: RotatedRect, 251 | ellipseColor: Scalar, 252 | arg0: number, 253 | line: LineTypes, 254 | ): void; 255 | export declare function imread( 256 | canvasOrImageHtmlElement: HTMLElement | string, 257 | ): Mat; 258 | export declare function norm1(a: Mat, b: Mat, type: NormTypes): number; 259 | export declare function imshow( 260 | canvasSource: HTMLElement | string, 261 | mat: Mat, 262 | ): void; 263 | export declare function matFromArray( 264 | rows: number, 265 | cols: number, 266 | type: any, 267 | array: any, 268 | ): Mat; 269 | 270 | // Missing imports: 271 | export type Mat4 = any; 272 | export type Mat3 = any; 273 | export type Vec3 = any; 274 | export type float_type = any; 275 | export type int = number; 276 | export type bool = boolean; 277 | export type FileNode = any; 278 | export type FileStorage = any; 279 | export type Ptr = any; 280 | export type size_t = any; 281 | export type double = number; 282 | export type float = number; 283 | export type UMat = any; 284 | export type Matrix = any; 285 | export type BucketKey = any; 286 | export type Bucket = any; 287 | export type LshStats = any; 288 | export type MatAllocator = any; 289 | export type uchar = any; 290 | export type MatStep = any; 291 | export type UMatData = any; 292 | export type typename = any; 293 | export type Vec = any; 294 | export type Point_ = any; 295 | export type Point3_ = any; 296 | export type MatCommaInitializer_ = any; 297 | export type MatIterator_ = any; 298 | export type MatConstIterator_ = any; 299 | export type AccessFlag = any; 300 | export type UMatUsageFlags = any; 301 | export type _Tp = any; 302 | export type Matx_AddOp = any; 303 | export type Matx_SubOp = any; 304 | export type _T2 = any; 305 | export type Matx_ScaleOp = any; 306 | export type Matx_MulOp = any; 307 | export type Matx_DivOp = any; 308 | export type Matx_MatMulOp = any; 309 | export type Matx_TOp = any; 310 | export type diag_type = any; 311 | export type _EqPredicate = any; 312 | export type cvhalDFT = any; 313 | export type schar = any; 314 | export type ushort = any; 315 | export type short = any; 316 | export type int64 = any; 317 | export type ErrorCallback = any; 318 | export type unsigned = any; 319 | export type uint64 = any; 320 | export type float16_t = any; 321 | export type AsyncArray = any; 322 | export type Net = any; 323 | export type Moments = any; 324 | export type uint64_t = any; 325 | export type uint32_t = any; 326 | export type int32_t = any; 327 | export type int64_t = any; 328 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/types/opencv/HOGDescriptor.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | double, 4 | FileNode, 5 | FileStorage, 6 | float, 7 | InputArray, 8 | InputOutputArray, 9 | int, 10 | Point, 11 | Size, 12 | size_t, 13 | UMat, 14 | } from "./_types"; 15 | 16 | /** 17 | * the HOG descriptor algorithm introduced by Navneet Dalal and Bill Triggs Dalal2005 . 18 | * 19 | * useful links: 20 | * 21 | * Source: 22 | * [opencv2/objdetect.hpp](https://github.com/opencv/opencv/tree/master/modules/core/include/opencv2/objdetect.hpp#L377). 23 | * 24 | */ 25 | export declare class HOGDescriptor { 26 | public blockSize: Size; 27 | 28 | public blockStride: Size; 29 | 30 | public cellSize: Size; 31 | 32 | public derivAperture: int; 33 | 34 | public free_coef: float; 35 | 36 | public gammaCorrection: bool; 37 | 38 | public histogramNormType: any; 39 | 40 | public L2HysThreshold: double; 41 | 42 | public nbins: int; 43 | 44 | public nlevels: int; 45 | 46 | public oclSvmDetector: UMat; 47 | 48 | public signedGradient: bool; 49 | 50 | public svmDetector: any; 51 | 52 | public winSigma: double; 53 | 54 | public winSize: Size; 55 | 56 | /** 57 | * aqual to [HOGDescriptor](Size(64,128), Size(16,16), Size(8,8), Size(8,8), 9 ) 58 | */ 59 | public constructor(); 60 | 61 | /** 62 | * This is an overloaded member function, provided for convenience. It differs from the above 63 | * function only in what argument(s) it accepts. 64 | * 65 | * @param _winSize sets winSize with given value. 66 | * 67 | * @param _blockSize sets blockSize with given value. 68 | * 69 | * @param _blockStride sets blockStride with given value. 70 | * 71 | * @param _cellSize sets cellSize with given value. 72 | * 73 | * @param _nbins sets nbins with given value. 74 | * 75 | * @param _derivAperture sets derivAperture with given value. 76 | * 77 | * @param _winSigma sets winSigma with given value. 78 | * 79 | * @param _histogramNormType sets histogramNormType with given value. 80 | * 81 | * @param _L2HysThreshold sets L2HysThreshold with given value. 82 | * 83 | * @param _gammaCorrection sets gammaCorrection with given value. 84 | * 85 | * @param _nlevels sets nlevels with given value. 86 | * 87 | * @param _signedGradient sets signedGradient with given value. 88 | */ 89 | public constructor( 90 | _winSize: Size, 91 | _blockSize: Size, 92 | _blockStride: Size, 93 | _cellSize: Size, 94 | _nbins: int, 95 | _derivAperture?: int, 96 | _winSigma?: double, 97 | _histogramNormType?: any, 98 | _L2HysThreshold?: double, 99 | _gammaCorrection?: bool, 100 | _nlevels?: int, 101 | _signedGradient?: bool, 102 | ); 103 | 104 | /** 105 | * This is an overloaded member function, provided for convenience. It differs from the above 106 | * function only in what argument(s) it accepts. 107 | * 108 | * @param filename The file name containing HOGDescriptor properties and coefficients for the linear 109 | * SVM classifier. 110 | */ 111 | public constructor(filename: String); 112 | 113 | /** 114 | * This is an overloaded member function, provided for convenience. It differs from the above 115 | * function only in what argument(s) it accepts. 116 | * 117 | * @param d the HOGDescriptor which cloned to create a new one. 118 | */ 119 | public constructor(d: HOGDescriptor); 120 | 121 | public checkDetectorSize(): bool; 122 | 123 | /** 124 | * @param img Matrix of the type CV_8U containing an image where HOG features will be calculated. 125 | * 126 | * @param descriptors Matrix of the type CV_32F 127 | * 128 | * @param winStride Window stride. It must be a multiple of block stride. 129 | * 130 | * @param padding Padding 131 | * 132 | * @param locations Vector of Point 133 | */ 134 | public compute( 135 | img: InputArray, 136 | descriptors: any, 137 | winStride?: Size, 138 | padding?: Size, 139 | locations?: Point, 140 | ): InputArray; 141 | 142 | /** 143 | * @param img Matrix contains the image to be computed 144 | * 145 | * @param grad Matrix of type CV_32FC2 contains computed gradients 146 | * 147 | * @param angleOfs Matrix of type CV_8UC2 contains quantized gradient orientations 148 | * 149 | * @param paddingTL Padding from top-left 150 | * 151 | * @param paddingBR Padding from bottom-right 152 | */ 153 | public computeGradient( 154 | img: InputArray, 155 | grad: InputOutputArray, 156 | angleOfs: InputOutputArray, 157 | paddingTL?: Size, 158 | paddingBR?: Size, 159 | ): InputArray; 160 | 161 | /** 162 | * @param c cloned HOGDescriptor 163 | */ 164 | public copyTo(c: HOGDescriptor): HOGDescriptor; 165 | 166 | /** 167 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 168 | * 169 | * @param foundLocations Vector of point where each point contains left-top corner point of detected 170 | * object boundaries. 171 | * 172 | * @param weights Vector that will contain confidence values for each detected object. 173 | * 174 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 175 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 176 | * the free coefficient is omitted (which is allowed), you can specify it manually here. 177 | * 178 | * @param winStride Window stride. It must be a multiple of block stride. 179 | * 180 | * @param padding Padding 181 | * 182 | * @param searchLocations Vector of Point includes set of requested locations to be evaluated. 183 | */ 184 | public detect( 185 | img: InputArray, 186 | foundLocations: any, 187 | weights: any, 188 | hitThreshold?: double, 189 | winStride?: Size, 190 | padding?: Size, 191 | searchLocations?: Point, 192 | ): InputArray; 193 | 194 | /** 195 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 196 | * 197 | * @param foundLocations Vector of point where each point contains left-top corner point of detected 198 | * object boundaries. 199 | * 200 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 201 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 202 | * the free coefficient is omitted (which is allowed), you can specify it manually here. 203 | * 204 | * @param winStride Window stride. It must be a multiple of block stride. 205 | * 206 | * @param padding Padding 207 | * 208 | * @param searchLocations Vector of Point includes locations to search. 209 | */ 210 | public detect( 211 | img: InputArray, 212 | foundLocations: any, 213 | hitThreshold?: double, 214 | winStride?: Size, 215 | padding?: Size, 216 | searchLocations?: Point, 217 | ): InputArray; 218 | 219 | /** 220 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 221 | * 222 | * @param foundLocations Vector of rectangles where each rectangle contains the detected object. 223 | * 224 | * @param foundWeights Vector that will contain confidence values for each detected object. 225 | * 226 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 227 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 228 | * the free coefficient is omitted (which is allowed), you can specify it manually here. 229 | * 230 | * @param winStride Window stride. It must be a multiple of block stride. 231 | * 232 | * @param padding Padding 233 | * 234 | * @param scale Coefficient of the detection window increase. 235 | * 236 | * @param finalThreshold Final threshold 237 | * 238 | * @param useMeanshiftGrouping indicates grouping algorithm 239 | */ 240 | public detectMultiScale( 241 | img: InputArray, 242 | foundLocations: any, 243 | foundWeights: any, 244 | hitThreshold?: double, 245 | winStride?: Size, 246 | padding?: Size, 247 | scale?: double, 248 | finalThreshold?: double, 249 | useMeanshiftGrouping?: bool, 250 | ): InputArray; 251 | 252 | /** 253 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 254 | * 255 | * @param foundLocations Vector of rectangles where each rectangle contains the detected object. 256 | * 257 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 258 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 259 | * the free coefficient is omitted (which is allowed), you can specify it manually here. 260 | * 261 | * @param winStride Window stride. It must be a multiple of block stride. 262 | * 263 | * @param padding Padding 264 | * 265 | * @param scale Coefficient of the detection window increase. 266 | * 267 | * @param finalThreshold Final threshold 268 | * 269 | * @param useMeanshiftGrouping indicates grouping algorithm 270 | */ 271 | public detectMultiScale( 272 | img: InputArray, 273 | foundLocations: any, 274 | hitThreshold?: double, 275 | winStride?: Size, 276 | padding?: Size, 277 | scale?: double, 278 | finalThreshold?: double, 279 | useMeanshiftGrouping?: bool, 280 | ): InputArray; 281 | 282 | /** 283 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 284 | * 285 | * @param foundLocations Vector of rectangles where each rectangle contains the detected object. 286 | * 287 | * @param locations Vector of DetectionROI 288 | * 289 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 290 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 291 | * the free coefficient is omitted (which is allowed), you can specify it manually here. 292 | * 293 | * @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a 294 | * group of rectangles to retain it. 295 | */ 296 | public detectMultiScaleROI( 297 | img: InputArray, 298 | foundLocations: any, 299 | locations: any, 300 | hitThreshold?: double, 301 | groupThreshold?: int, 302 | ): InputArray; 303 | 304 | /** 305 | * @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected. 306 | * 307 | * @param locations Vector of Point 308 | * 309 | * @param foundLocations Vector of Point where each Point is detected object's top-left point. 310 | * 311 | * @param confidences confidences 312 | * 313 | * @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually 314 | * it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if 315 | * the free coefficient is omitted (which is allowed), you can specify it manually here 316 | * 317 | * @param winStride winStride 318 | * 319 | * @param padding padding 320 | */ 321 | public detectROI( 322 | img: InputArray, 323 | locations: any, 324 | foundLocations: any, 325 | confidences: any, 326 | hitThreshold?: double, 327 | winStride?: any, 328 | padding?: any, 329 | ): InputArray; 330 | 331 | public getDescriptorSize(): size_t; 332 | 333 | public getWinSigma(): double; 334 | 335 | /** 336 | * @param rectList Input/output vector of rectangles. Output vector includes retained and grouped 337 | * rectangles. (The Python list is not modified in place.) 338 | * 339 | * @param weights Input/output vector of weights of rectangles. Output vector includes weights of 340 | * retained and grouped rectangles. (The Python list is not modified in place.) 341 | * 342 | * @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a 343 | * group of rectangles to retain it. 344 | * 345 | * @param eps Relative difference between sides of the rectangles to merge them into a group. 346 | */ 347 | public groupRectangles( 348 | rectList: any, 349 | weights: any, 350 | groupThreshold: int, 351 | eps: double, 352 | ): any; 353 | 354 | /** 355 | * @param filename Path of the file to read. 356 | * 357 | * @param objname The optional name of the node to read (if empty, the first top-level node will be 358 | * used). 359 | */ 360 | public load(filename: String, objname?: String): String; 361 | 362 | /** 363 | * @param fn File node 364 | */ 365 | public read(fn: FileNode): FileNode; 366 | 367 | /** 368 | * @param filename File name 369 | * 370 | * @param objname Object name 371 | */ 372 | public save(filename: String, objname?: String): String; 373 | 374 | /** 375 | * @param svmdetector coefficients for the linear SVM classifier. 376 | */ 377 | public setSVMDetector(svmdetector: InputArray): InputArray; 378 | 379 | /** 380 | * @param fs File storage 381 | * 382 | * @param objname Object name 383 | */ 384 | public write(fs: FileStorage, objname: String): FileStorage; 385 | 386 | public static getDaimlerPeopleDetector(): any; 387 | 388 | public static getDefaultPeopleDetector(): any; 389 | } 390 | 391 | export declare const DEFAULT_NLEVELS: any; // initializer: = 64 392 | 393 | export declare const DESCR_FORMAT_COL_BY_COL: DescriptorStorageFormat; // initializer: 394 | 395 | export declare const DESCR_FORMAT_ROW_BY_ROW: DescriptorStorageFormat; // initializer: 396 | 397 | export declare const L2Hys: HistogramNormType; // initializer: = 0 398 | 399 | export type DescriptorStorageFormat = any; 400 | 401 | export type HistogramNormType = any; 402 | -------------------------------------------------------------------------------- /src/types/opencv/imgproc_hist.ts: -------------------------------------------------------------------------------- 1 | import type { 2 | bool, 3 | double, 4 | float, 5 | InputArray, 6 | InputArrayOfArrays, 7 | int, 8 | OutputArray, 9 | Size, 10 | } from "./_types"; 11 | /* 12 | * # Histograms 13 | * 14 | */ 15 | /** 16 | * The function [cv::calcBackProject] calculates the back project of the histogram. That is, similarly 17 | * to [calcHist] , at each location (x, y) the function collects the values from the selected channels 18 | * in the input images and finds the corresponding histogram bin. But instead of incrementing it, the 19 | * function reads the bin value, scales it by scale , and stores in backProject(x,y) . In terms of 20 | * statistics, the function computes probability of each element value in respect with the empirical 21 | * probability distribution represented by the histogram. See how, for example, you can find and track 22 | * a bright-colored object in a scene: 23 | * 24 | * Before tracking, show the object to the camera so that it covers almost the whole frame. Calculate a 25 | * hue histogram. The histogram may have strong maximums, corresponding to the dominant colors in the 26 | * object. 27 | * When tracking, calculate a back projection of a hue plane of each input video frame using that 28 | * pre-computed histogram. Threshold the back projection to suppress weak colors. It may also make 29 | * sense to suppress pixels with non-sufficient color saturation and too dark or too bright pixels. 30 | * Find connected components in the resulting picture and choose, for example, the largest component. 31 | * 32 | * This is an approximate algorithm of the CamShift color object tracker. 33 | * 34 | * [calcHist], [compareHist] 35 | * 36 | * @param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the 37 | * same size. Each of them can have an arbitrary number of channels. 38 | * 39 | * @param nimages Number of source images. 40 | * 41 | * @param channels The list of channels used to compute the back projection. The number of channels 42 | * must match the histogram dimensionality. The first array channels are numerated from 0 to 43 | * images[0].channels()-1 , the second array channels are counted from images[0].channels() to 44 | * images[0].channels() + images[1].channels()-1, and so on. 45 | * 46 | * @param hist Input histogram that can be dense or sparse. 47 | * 48 | * @param backProject Destination back projection array that is a single-channel array of the same size 49 | * and depth as images[0] . 50 | * 51 | * @param ranges Array of arrays of the histogram bin boundaries in each dimension. See calcHist . 52 | * 53 | * @param scale Optional scale factor for the output back projection. 54 | * 55 | * @param uniform Flag indicating whether the histogram is uniform or not (see above). 56 | */ 57 | export declare function calcBackProject( 58 | images: any, 59 | nimages: int, 60 | channels: any, 61 | hist: InputArray, 62 | backProject: OutputArray, 63 | ranges: any, 64 | scale?: double, 65 | uniform?: bool, 66 | ): void; 67 | 68 | /** 69 | * This is an overloaded member function, provided for convenience. It differs from the above function 70 | * only in what argument(s) it accepts. 71 | */ 72 | export declare function calcBackProject( 73 | images: any, 74 | nimages: int, 75 | channels: any, 76 | hist: any, 77 | backProject: OutputArray, 78 | ranges: any, 79 | scale?: double, 80 | uniform?: bool, 81 | ): void; 82 | 83 | /** 84 | * This is an overloaded member function, provided for convenience. It differs from the above function 85 | * only in what argument(s) it accepts. 86 | */ 87 | export declare function calcBackProject( 88 | images: InputArrayOfArrays, 89 | channels: any, 90 | hist: InputArray, 91 | dst: OutputArray, 92 | ranges: any, 93 | scale: double, 94 | ): void; 95 | 96 | /** 97 | * The function [cv::calcHist] calculates the histogram of one or more arrays. The elements of a tuple 98 | * used to increment a histogram bin are taken from the corresponding input arrays at the same 99 | * location. The sample below shows how to compute a 2D Hue-Saturation histogram for a color image. : 100 | * 101 | * ```cpp 102 | * #include 103 | * #include 104 | * 105 | * using namespace cv; 106 | * 107 | * int main( int argc, char** argv ) 108 | * { 109 | * Mat src, hsv; 110 | * if( argc != 2 || !(src=imread(argv[1], 1)).data ) 111 | * return -1; 112 | * 113 | * cvtColor(src, hsv, COLOR_BGR2HSV); 114 | * 115 | * // Quantize the hue to 30 levels 116 | * // and the saturation to 32 levels 117 | * int hbins = 30, sbins = 32; 118 | * int histSize[] = {hbins, sbins}; 119 | * // hue varies from 0 to 179, see cvtColor 120 | * float hranges[] = { 0, 180 }; 121 | * // saturation varies from 0 (black-gray-white) to 122 | * // 255 (pure spectrum color) 123 | * float sranges[] = { 0, 256 }; 124 | * const float* ranges[] = { hranges, sranges }; 125 | * MatND hist; 126 | * // we compute the histogram from the 0-th and 1-st channels 127 | * int channels[] = {0, 1}; 128 | * 129 | * calcHist( &hsv, 1, channels, Mat(), // do not use mask 130 | * hist, 2, histSize, ranges, 131 | * true, // the histogram is uniform 132 | * false ); 133 | * double maxVal=0; 134 | * minMaxLoc(hist, 0, &maxVal, 0, 0); 135 | * 136 | * int scale = 10; 137 | * Mat histImg = Mat::zeros(sbins*scale, hbins*10, CV_8UC3); 138 | * 139 | * for( int h = 0; h < hbins; h++ ) 140 | * for( int s = 0; s < sbins; s++ ) 141 | * { 142 | * float binVal = hist.at(h, s); 143 | * int intensity = cvRound(binVal*255/maxVal); 144 | * rectangle( histImg, Point(h*scale, s*scale), 145 | * Point( (h+1)*scale - 1, (s+1)*scale - 1), 146 | * Scalar::all(intensity), 147 | * -1 ); 148 | * } 149 | * 150 | * namedWindow( "Source", 1 ); 151 | * imshow( "Source", src ); 152 | * 153 | * namedWindow( "H-S Histogram", 1 ); 154 | * imshow( "H-S Histogram", histImg ); 155 | * waitKey(); 156 | * } 157 | * ``` 158 | * 159 | * @param images Source arrays. They all should have the same depth, CV_8U, CV_16U or CV_32F , and the 160 | * same size. Each of them can have an arbitrary number of channels. 161 | * 162 | * @param nimages Number of source images. 163 | * 164 | * @param channels List of the dims channels used to compute the histogram. The first array channels 165 | * are numerated from 0 to images[0].channels()-1 , the second array channels are counted from 166 | * images[0].channels() to images[0].channels() + images[1].channels()-1, and so on. 167 | * 168 | * @param mask Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as 169 | * images[i] . The non-zero mask elements mark the array elements counted in the histogram. 170 | * 171 | * @param hist Output histogram, which is a dense or sparse dims -dimensional array. 172 | * 173 | * @param dims Histogram dimensionality that must be positive and not greater than CV_MAX_DIMS (equal 174 | * to 32 in the current OpenCV version). 175 | * 176 | * @param histSize Array of histogram sizes in each dimension. 177 | * 178 | * @param ranges Array of the dims arrays of the histogram bin boundaries in each dimension. When the 179 | * histogram is uniform ( uniform =true), then for each dimension i it is enough to specify the lower 180 | * (inclusive) boundary $L_0$ of the 0-th histogram bin and the upper (exclusive) boundary 181 | * $U_{\texttt{histSize}[i]-1}$ for the last histogram bin histSize[i]-1 . That is, in case of a 182 | * uniform histogram each of ranges[i] is an array of 2 elements. When the histogram is not uniform ( 183 | * uniform=false ), then each of ranges[i] contains histSize[i]+1 elements: $L_0, U_0=L_1, U_1=L_2, 184 | * ..., U_{\texttt{histSize[i]}-2}=L_{\texttt{histSize[i]}-1}, U_{\texttt{histSize[i]}-1}$ . The array 185 | * elements, that are not between $L_0$ and $U_{\texttt{histSize[i]}-1}$ , are not counted in the 186 | * histogram. 187 | * 188 | * @param uniform Flag indicating whether the histogram is uniform or not (see above). 189 | * 190 | * @param accumulate Accumulation flag. If it is set, the histogram is not cleared in the beginning 191 | * when it is allocated. This feature enables you to compute a single histogram from several sets of 192 | * arrays, or to update the histogram in time. 193 | */ 194 | export declare function calcHist( 195 | images: any, 196 | nimages: int, 197 | channels: any, 198 | mask: InputArray, 199 | hist: OutputArray, 200 | dims: int, 201 | histSize: any, 202 | ranges: any, 203 | uniform?: bool, 204 | accumulate?: bool, 205 | ): void; 206 | 207 | /** 208 | * This is an overloaded member function, provided for convenience. It differs from the above function 209 | * only in what argument(s) it accepts. 210 | * 211 | * this variant uses SparseMat for output 212 | */ 213 | export declare function calcHist( 214 | images: any, 215 | nimages: int, 216 | channels: any, 217 | mask: InputArray, 218 | hist: any, 219 | dims: int, 220 | histSize: any, 221 | ranges: any, 222 | uniform?: bool, 223 | accumulate?: bool, 224 | ): void; 225 | 226 | /** 227 | * This is an overloaded member function, provided for convenience. It differs from the above function 228 | * only in what argument(s) it accepts. 229 | */ 230 | export declare function calcHist( 231 | images: InputArrayOfArrays, 232 | channels: any, 233 | mask: InputArray, 234 | hist: OutputArray, 235 | histSize: any, 236 | ranges: any, 237 | accumulate?: bool, 238 | ): void; 239 | 240 | /** 241 | * The function [cv::compareHist] compares two dense or two sparse histograms using the specified 242 | * method. 243 | * 244 | * The function returns `$d(H_1, H_2)$` . 245 | * 246 | * While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable 247 | * for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling 248 | * problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms 249 | * or more general sparse configurations of weighted points, consider using the [EMD] function. 250 | * 251 | * @param H1 First compared histogram. 252 | * 253 | * @param H2 Second compared histogram of the same size as H1 . 254 | * 255 | * @param method Comparison method, see HistCompMethods 256 | */ 257 | export declare function compareHist( 258 | H1: InputArray, 259 | H2: InputArray, 260 | method: int, 261 | ): double; 262 | 263 | /** 264 | * This is an overloaded member function, provided for convenience. It differs from the above function 265 | * only in what argument(s) it accepts. 266 | */ 267 | export declare function compareHist(H1: any, H2: any, method: int): double; 268 | 269 | /** 270 | * @param clipLimit Threshold for contrast limiting. 271 | * 272 | * @param tileGridSize Size of grid for histogram equalization. Input image will be divided into 273 | * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. 274 | */ 275 | export declare function createCLAHE( 276 | clipLimit?: double, 277 | tileGridSize?: Size, 278 | ): any; 279 | 280 | /** 281 | * The function computes the earth mover distance and/or a lower boundary of the distance between the 282 | * two weighted point configurations. One of the applications described in RubnerSept98, Rubner2000 is 283 | * multi-dimensional histogram comparison for image retrieval. EMD is a transportation problem that is 284 | * solved using some modification of a simplex algorithm, thus the complexity is exponential in the 285 | * worst case, though, on average it is much faster. In the case of a real metric the lower boundary 286 | * can be calculated even faster (using linear-time algorithm) and it can be used to determine roughly 287 | * whether the two signatures are far enough so that they cannot relate to the same object. 288 | * 289 | * @param signature1 First signature, a $\texttt{size1}\times \texttt{dims}+1$ floating-point matrix. 290 | * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have a 291 | * single column (weights only) if the user-defined cost matrix is used. The weights must be 292 | * non-negative and have at least one non-zero value. 293 | * 294 | * @param signature2 Second signature of the same format as signature1 , though the number of rows may 295 | * be different. The total weights may be different. In this case an extra "dummy" point is added to 296 | * either signature1 or signature2. The weights must be non-negative and have at least one non-zero 297 | * value. 298 | * 299 | * @param distType Used metric. See DistanceTypes. 300 | * 301 | * @param cost User-defined $\texttt{size1}\times \texttt{size2}$ cost matrix. Also, if a cost matrix 302 | * is used, lower boundary lowerBound cannot be calculated because it needs a metric function. 303 | * 304 | * @param lowerBound Optional input/output parameter: lower boundary of a distance between the two 305 | * signatures that is a distance between mass centers. The lower boundary may not be calculated if the 306 | * user-defined cost matrix is used, the total weights of point configurations are not equal, or if the 307 | * signatures consist of weights only (the signature matrices have a single column). You must** 308 | * initialize *lowerBound . If the calculated distance between mass centers is greater or equal to 309 | * *lowerBound (it means that the signatures are far enough), the function does not calculate EMD. In 310 | * any case *lowerBound is set to the calculated distance between mass centers on return. Thus, if you 311 | * want to calculate both distance between mass centers and EMD, *lowerBound should be set to 0. 312 | * 313 | * @param flow Resultant $\texttt{size1} \times \texttt{size2}$ flow matrix: $\texttt{flow}_{i,j}$ is a 314 | * flow from $i$ -th point of signature1 to $j$ -th point of signature2 . 315 | */ 316 | export declare function EMD( 317 | signature1: InputArray, 318 | signature2: InputArray, 319 | distType: int, 320 | cost?: InputArray, 321 | lowerBound?: any, 322 | flow?: OutputArray, 323 | ): float; 324 | 325 | /** 326 | * The function equalizes the histogram of the input image using the following algorithm: 327 | * 328 | * Calculate the histogram `$H$` for src . 329 | * Normalize the histogram so that the sum of histogram bins is 255. 330 | * Compute the integral of the histogram: `\\[H'_i = \\sum _{0 \\le j < i} H(j)\\]` 331 | * Transform the image using `$H'$` as a look-up table: `$\\texttt{dst}(x,y) = H'(\\texttt{src}(x,y))$` 332 | * 333 | * The algorithm normalizes the brightness and increases the contrast of the image. 334 | * 335 | * @param src Source 8-bit single channel image. 336 | * 337 | * @param dst Destination image of the same size and type as src . 338 | */ 339 | export declare function equalizeHist(src: InputArray, dst: OutputArray): void; 340 | 341 | export declare function wrapperEMD( 342 | signature1: InputArray, 343 | signature2: InputArray, 344 | distType: int, 345 | cost?: InputArray, 346 | lowerBound?: any, 347 | flow?: OutputArray, 348 | ): float; 349 | 350 | /** 351 | * Correlation `\\[d(H_1,H_2) = \\frac{\\sum_I (H_1(I) - \\bar{H_1}) (H_2(I) - 352 | * \\bar{H_2})}{\\sqrt{\\sum_I(H_1(I) - \\bar{H_1})^2 \\sum_I(H_2(I) - \\bar{H_2})^2}}\\]` where 353 | * `\\[\\bar{H_k} = \\frac{1}{N} \\sum _J H_k(J)\\]` and `$N$` is a total number of histogram bins. 354 | * 355 | */ 356 | export declare const HISTCMP_CORREL: HistCompMethods; // initializer: = 0 357 | 358 | /** 359 | * Chi-Square `\\[d(H_1,H_2) = \\sum _I \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)}\\]` 360 | * 361 | */ 362 | export declare const HISTCMP_CHISQR: HistCompMethods; // initializer: = 1 363 | 364 | /** 365 | * Intersection `\\[d(H_1,H_2) = \\sum _I \\min (H_1(I), H_2(I))\\]` 366 | * 367 | */ 368 | export declare const HISTCMP_INTERSECT: HistCompMethods; // initializer: = 2 369 | 370 | /** 371 | * Bhattacharyya distance (In fact, OpenCV computes Hellinger distance, which is related to 372 | * Bhattacharyya coefficient.) `\\[d(H_1,H_2) = \\sqrt{1 - \\frac{1}{\\sqrt{\\bar{H_1} \\bar{H_2} N^2}} 373 | * \\sum_I \\sqrt{H_1(I) \\cdot H_2(I)}}\\]` 374 | * 375 | */ 376 | export declare const HISTCMP_BHATTACHARYYA: HistCompMethods; // initializer: = 3 377 | 378 | export declare const HISTCMP_HELLINGER: HistCompMethods; // initializer: = HISTCMP_BHATTACHARYYA 379 | 380 | /** 381 | * Alternative Chi-Square `\\[d(H_1,H_2) = 2 * \\sum _I 382 | * \\frac{\\left(H_1(I)-H_2(I)\\right)^2}{H_1(I)+H_2(I)}\\]` This alternative formula is regularly used 383 | * for texture comparison. See e.g. Puzicha1997 384 | * 385 | */ 386 | export declare const HISTCMP_CHISQR_ALT: HistCompMethods; // initializer: = 4 387 | 388 | /** 389 | * Kullback-Leibler divergence `\\[d(H_1,H_2) = \\sum _I H_1(I) \\log 390 | * \\left(\\frac{H_1(I)}{H_2(I)}\\right)\\]` 391 | * 392 | */ 393 | export declare const HISTCMP_KL_DIV: HistCompMethods; // initializer: = 5 394 | 395 | /** 396 | * Histogram comparison methods 397 | * 398 | */ 399 | export type HistCompMethods = any; 400 | --------------------------------------------------------------------------------