├── .eslintrc.js
├── .github
└── workflows
│ ├── build.yml
│ ├── lerna-publish-github-packages.yml
│ ├── tsc.yml
│ └── typedoc.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE
├── README.md
├── lerna.json
├── nx.json
├── package.json
├── packages
├── core
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── __tests__
│ │ └── mathUtils.test.ts
│ ├── index.ts
│ ├── jest.config.js
│ ├── lib
│ │ ├── api.ts
│ │ ├── types.ts
│ │ └── utils
│ │ │ ├── annotationUtils.ts
│ │ │ ├── colorUtils.ts
│ │ │ └── mathUtils.ts
│ ├── package.json
│ ├── tsconfig.json
│ └── typedoc.json
└── react
│ ├── .gitignore
│ ├── CHANGELOG.md
│ ├── README.md
│ ├── __tests__
│ ├── InferenceResult.test.tsx
│ └── PhotoCollector.test.tsx
│ ├── build.js
│ ├── index.tsx
│ ├── jest.config.js
│ ├── lib
│ ├── components
│ │ ├── Annotation
│ │ │ └── index.tsx
│ │ ├── InferenceResult.tsx
│ │ ├── PhotoCollector.tsx
│ │ └── index.module.css
│ ├── context
│ │ └── InferenceContext.tsx
│ ├── polyfill.ts
│ └── types.d.ts
│ ├── package.json
│ ├── tsconfig.json
│ ├── typedoc.json
│ ├── vitest.config.ts
│ └── vitest.setup.tsx
├── typedoc.base.json
├── typedoc.json
└── yarn.lock
/.eslintrc.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | env: {
3 | browser: true,
4 | es6: true,
5 | node: true,
6 | jest: true,
7 | jquery: true
8 | },
9 | extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended'],
10 | parser: '@typescript-eslint/parser',
11 | plugins: ['@typescript-eslint'],
12 | parserOptions: {
13 | ecmaVersion: 2018,
14 | sourceType: 'module',
15 | ecmaFeatures: {
16 | experimentalObjectRestSpread: true,
17 | jsx: true,
18 | modules: true
19 | }
20 | },
21 | ignorePatterns: ['**/dist', '**/types', '**/docs'],
22 | rules: {
23 | semi: ['error', 'always'],
24 | quotes: ['error', 'single'],
25 | '@typescript-eslint/no-var-requires': 'off',
26 | '@typescript-eslint/ban-ts-comment': 'off',
27 | '@typescript-eslint/ban-types': 'off',
28 | '@typescript-eslint/no-explicit-any': 'off',
29 | '@typescript-eslint/no-non-null-assertion': 'off',
30 | '@typescript-eslint/no-empty-function': 'off',
31 | },
32 | };
33 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 | on:
3 | pull_request:
4 | paths:
5 | - '.github/workflows/build.yml'
6 | - 'packages/**'
7 | - 'tsconfig.json'
8 | - 'package.json'
9 | - 'yarn.lock'
10 |
11 | jobs:
12 | ci-build:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: "Checkout"
17 | uses: actions/checkout@v2
18 | with:
19 | fetch-depth: 0
20 |
21 | - name: "Use NodeJS 16"
22 | uses: actions/setup-node@v2
23 | with:
24 | node-version: '16'
25 | cache: 'yarn'
26 |
27 | - name: "Install dependencies"
28 | run: |
29 | yarn install
30 |
31 | - name: "Build"
32 | run: |
33 | yarn build
34 |
--------------------------------------------------------------------------------
/.github/workflows/lerna-publish-github-packages.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 | on:
3 | push:
4 | branches:
5 | - main
6 |
7 | jobs:
8 | publish:
9 | name: "Lerna Publish"
10 | runs-on: ubuntu-latest
11 | if: ${{ !contains(github.event.head_commit.message, '[Bump Version]') }}
12 |
13 | steps:
14 | - name: "Checkout"
15 | uses: actions/checkout@v2
16 | with:
17 | fetch-depth: 0
18 | token: ${{ secrets.GH_PAT }}
19 |
20 | - name: "Use NodeJS 16"
21 | uses: actions/setup-node@v2
22 | with:
23 | node-version: '16'
24 | cache: 'yarn'
25 |
26 | - name: "NPM Identity"
27 | env:
28 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
29 | run: |
30 | npm config set '//registry.npmjs.org/:_authToken' "${NPM_TOKEN}"
31 |
32 | - name: "Install dependencies"
33 | run: |
34 | yarn install
35 |
36 | - name: "TSC"
37 | run: |
38 | yarn tsc
39 |
40 | - name: "Build"
41 | run: |
42 | yarn build
43 |
44 | - name: Bump Version
45 | env:
46 | GH_TOKEN: ${{ secrets.GH_PAT }}
47 | run: |
48 | git config user.name "${{ github.actor }}"
49 | git config user.email "${{ github.actor}}@users.noreply.github.com"
50 | npx lerna version --conventional-commits --conventional-graduate -m "[Bump Version] %s" --yes
51 | npx lerna publish from-git --yes
52 |
--------------------------------------------------------------------------------
/.github/workflows/tsc.yml:
--------------------------------------------------------------------------------
1 | name: TSC
2 | on:
3 | pull_request:
4 | paths:
5 | - '.github/workflows/tsc.yml'
6 | - 'packages/**'
7 | - 'tsconfig.json'
8 | - 'package.json'
9 | - 'yarn.lock'
10 |
11 | jobs:
12 | ci-tsc:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: "Checkout"
17 | uses: actions/checkout@v2
18 | with:
19 | fetch-depth: 0
20 |
21 | - name: "Use NodeJS 16"
22 | uses: actions/setup-node@v2
23 | with:
24 | node-version: '16'
25 | cache: 'yarn'
26 |
27 | - name: "Install dependencies"
28 | run: |
29 | yarn install
30 |
31 | - name: "TSC"
32 | run: |
33 | yarn tsc
34 |
--------------------------------------------------------------------------------
/.github/workflows/typedoc.yml:
--------------------------------------------------------------------------------
1 | name: Typedoc
2 |
3 | on:
4 | # Triggers the workflow on push or pull request events but only for the "main" branch
5 | push:
6 | branches: [ "main" ]
7 |
8 | # Allows you to run this workflow manually from the Actions tab
9 | workflow_dispatch:
10 |
11 | jobs:
12 | typedoc:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v3
17 | with:
18 | fetch-depth: 0
19 | token: ${{ secrets.GH_PAT }}
20 |
21 | - name: "Use NodeJS 16"
22 | uses: actions/setup-node@v2
23 | with:
24 | node-version: '16'
25 | cache: 'yarn'
26 |
27 | - name: "Install dependencies"
28 | run: |
29 | yarn install
30 |
31 | - name: TSC
32 | run: |
33 | yarn tsc
34 |
35 | - name: Docs
36 | run: |
37 | yarn docs
38 |
39 | - name: Publish pages 🚀
40 | uses: JamesIves/github-pages-deploy-action@4.1.4
41 | with:
42 | branch: gh-pages
43 | folder: docs
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | .DS_Store
3 | **/yarn-error.log
4 | docs
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | All notable changes to this project will be documented in this file.
4 | See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
5 |
6 | ## [1.0.24](https://github.com/landing-ai/landingai-js/compare/v1.0.23...v1.0.24) (2024-03-01)
7 |
8 | **Note:** Version bump only for package landingai-js
9 |
10 |
11 |
12 |
13 |
14 | ## [1.0.23](https://github.com/landing-ai/landingai-js/compare/v1.0.22...v1.0.23) (2023-08-28)
15 |
16 |
17 | ### Bug Fixes
18 |
19 | * support opening camera from android ([#23](https://github.com/landing-ai/landingai-js/issues/23)) ([222d04d](https://github.com/landing-ai/landingai-js/commit/222d04dccc87eaedb212f5de85a94420ec2ca9d3))
20 |
21 |
22 |
23 |
24 |
25 | ## [1.0.22](https://github.com/landing-ai/landingai-js/compare/v1.0.21...v1.0.22) (2023-06-28)
26 |
27 | **Note:** Version bump only for package landingai-js
28 |
29 |
30 |
31 |
32 |
33 | ## [1.0.21](https://github.com/landing-ai/landingai-js/compare/v1.0.20...v1.0.21) (2023-06-12)
34 |
35 | **Note:** Version bump only for package landingai-js
36 |
37 |
38 |
39 |
40 |
41 | ## [1.0.20](https://github.com/landing-ai/landingai-js/compare/v1.0.19...v1.0.20) (2023-06-08)
42 |
43 | **Note:** Version bump only for package landingai-js
44 |
45 |
46 |
47 |
48 |
49 | ## [1.0.19](https://github.com/landing-ai/landingai-js/compare/v1.0.18...v1.0.19) (2023-06-08)
50 |
51 | **Note:** Version bump only for package landingai-js
52 |
53 |
54 |
55 |
56 |
57 | ## [1.0.18](https://github.com/landing-ai/landingai-js/compare/v1.0.17...v1.0.18) (2023-06-06)
58 |
59 | **Note:** Version bump only for package landingai-js
60 |
61 |
62 |
63 |
64 |
65 | ## [1.0.17](https://github.com/landing-ai/landingai-js/compare/v1.0.16...v1.0.17) (2023-06-06)
66 |
67 | **Note:** Version bump only for package landingai-js
68 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 landing-ai
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # LandingLens JavaScript Library
6 | The LandingLens JavaScript library contains the LandingLens development library and examples that show how to integrate your app with LandingLens in a variety of scenarios.
7 |
8 | We've provided some examples in CodeSandbox to focus on ease of use.
9 |
10 |
11 |
12 | | Example | Description | Type |
13 | |---|---|---|
14 | | [Poker Card Suit Identification](https://codesandbox.io/s/eloquent-tesla-yzsbsk?file=/src/App.js) | This example shows how to use an Object Detection model from LandingLens to detect suits on playing cards. | CodeSandbox |
15 |
16 | ## Install the libraries
17 |
18 | ```bash
19 | npm install landingai landingai-react
20 | # OR
21 | yarn add landingai landingai-react
22 | ```
23 |
24 | ## Quick Start
25 |
26 | ### Prerequisites
27 |
28 | This library needs to communicate with the LandingLens platform to perform certain functions. (For example, the `getInferenceResult` API calls the HTTP endpoint of your deployed model). To enable communication with LandingLens, you will need the following information:
29 |
30 | - The **Endpoint ID** of your deployed model in LandingLens. You can find this on the Deploy page in LandingLens.
31 | - The **API Key** for the LandingLens organization that has the model you want to deploy. To learn how to generate these credentials, go [here](https://support.landing.ai/docs/api-key-and-api-secret).
32 |
33 | ### Collect Images and Run Inference
34 | Collect images and run inference using the endpoint you created in LandingLens:
35 |
36 | 1. Install the JS libraries.
37 | 2. Construct an `apiInfo` object and pass it to ``.
38 | 3. Render the image collector to get image blob.
39 | 4. Render the inference result component to visualize predictions.
40 |
41 | ```jsx
42 | import React from 'react';
43 | import { useState } from "react";
44 | import { InferenceContext, InferenceResult, PhotoCollector } from "landingai-react";
45 |
46 | const apiInfo = {
47 | endpoint: `https://predict.app.landing.ai/inference/v1/predict?endpoint_id=`,
48 | key: "",
49 | };
50 |
51 | export default function App() {
52 | const [image, setImage] = useState();
53 |
54 | return (
55 |
56 |
57 |
58 |
59 | );
60 | }
61 | ```
62 |
63 | See a **working example** in [here](https://codesandbox.io/s/eloquent-tesla-yzsbsk?file=/src/App.js).
64 |
65 | ## Documentation
66 |
67 | - [Landing AI JavaScript API Library Reference](https://landing-ai.github.io/landingai-js/)
68 |
--------------------------------------------------------------------------------
/lerna.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "node_modules/lerna/schemas/lerna-schema.json",
3 | "npmClient": "yarn",
4 | "useWorkspaces": true,
5 | "version": "1.0.24"
6 | }
7 |
--------------------------------------------------------------------------------
/nx.json:
--------------------------------------------------------------------------------
1 | {
2 | "tasksRunnerOptions": {
3 | "default": {
4 | "runner": "nx/tasks-runners/default",
5 | "options": {
6 | "cacheableOperations": [
7 | "tsc",
8 | "build"
9 | ]
10 | }
11 | }
12 | },
13 | "targetDefaults": {
14 | "tsc": {
15 | "dependsOn": [
16 | "^tsc"
17 | ],
18 | "outputs": [
19 | "{projectRoot}/types"
20 | ]
21 | },
22 | "build": {
23 | "dependsOn": [
24 | "^build"
25 | ],
26 | "outputs": [
27 | "{projectRoot}/dist"
28 | ]
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "landingai-js",
3 | "version": "1.0.0",
4 | "description": "Landing AI JS libraries",
5 | "main": "index.js",
6 | "repository": "git@github.com:landing-ai/landingai-js.git",
7 | "author": "Lan Tian ",
8 | "license": "MIT",
9 | "private": true,
10 | "scripts": {
11 | "build": "npx lerna run build",
12 | "tsc": "npx lerna run tsc",
13 | "lint": "eslint .",
14 | "docs": "typedoc"
15 | },
16 | "workspaces": [
17 | "packages/*"
18 | ],
19 | "devDependencies": {
20 | "@typescript-eslint/eslint-plugin": "^5.59.7",
21 | "@typescript-eslint/parser": "^5.59.7",
22 | "eslint": "^8.41.0",
23 | "lerna": "^6.6.2",
24 | "typedoc": "^0.24.7",
25 | "typescript": "^5.0.4"
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/packages/core/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | types
3 | tsconfig.tsbuildinfo
4 | .DS_Store
--------------------------------------------------------------------------------
/packages/core/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | All notable changes to this project will be documented in this file.
4 | See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
5 |
6 | ## [1.0.24](https://github.com/landing-ai/landingai-js/compare/v1.0.23...v1.0.24) (2024-03-01)
7 |
8 | **Note:** Version bump only for package landingai
9 |
10 |
11 |
12 |
13 |
14 | ## [1.0.22](https://github.com/landing-ai/landingai-js/compare/v1.0.21...v1.0.22) (2023-06-28)
15 |
16 | **Note:** Version bump only for package landingai
17 |
18 |
19 |
20 |
21 |
22 | ## [1.0.21](https://github.com/landing-ai/landingai-js/compare/v1.0.20...v1.0.21) (2023-06-12)
23 |
24 | **Note:** Version bump only for package landingai
25 |
26 |
27 |
28 |
29 |
30 | ## [1.0.20](https://github.com/landing-ai/landingai-js/compare/v1.0.19...v1.0.20) (2023-06-08)
31 |
32 | **Note:** Version bump only for package landingai
33 |
34 |
35 |
36 |
37 |
38 | ## [1.0.16](https://github.com/landing-ai/landingai-js/compare/v1.0.15...v1.0.16) (2023-06-06)
39 |
40 | **Note:** Version bump only for package landingai
41 |
--------------------------------------------------------------------------------
/packages/core/README.md:
--------------------------------------------------------------------------------
1 | # `landingai`
2 |
3 | Core functions for getting predictions from LandingLens cloud deployment endpoints and rendering predictions.
4 |
5 | ```bash
6 | npm install landingai
7 | # OR
8 | yarn add landingai
9 | ```
10 |
11 | ## Usage
12 |
13 | ### Getting prediction results
14 |
15 | ```javascript
16 | // 1. setup endpoint and credentials
17 | const apiInfo = {
18 | endpoint: "https://predict.app.landing.ai/inference/v1/predict?endpoint_id=",
19 | key: "",
20 | secret: "",
21 | }
22 | // 2. get an image and convert to blob
23 | const blob = await fetch("url-to-image").then((response) => response.blob());
24 | // 3. get predictions from the blob
25 | const { backbonepredictions } = await getInferenceResult(apiInfo, blob);
26 | // 4. convert to annotations for rendering
27 | const annotations = predictionsToAnnotations(backbonepredictions);
28 | // render annotations
29 | ```
30 |
31 | **References**
32 |
33 | * [How to get an endpoint](https://support.landing.ai/docs/cloud-deployment)
34 | * [How to get an API Key](https://support.landing.ai/docs/api-key)
35 |
--------------------------------------------------------------------------------
/packages/core/__tests__/mathUtils.test.ts:
--------------------------------------------------------------------------------
1 | import { countBy } from '../lib/utils/mathUtils';
2 |
3 | it('counts by key correctly', () => {
4 | expect(
5 | countBy([{ name: 'Alice', sex: 'female' }, { name: 'Bob', sex: 'male' }, { name: 'Choe', sex: 'female' }], 'sex'),
6 | ).toEqual({ female: 2, male: 1 });
7 | });
8 |
--------------------------------------------------------------------------------
/packages/core/index.ts:
--------------------------------------------------------------------------------
1 | export * from './lib/utils/annotationUtils';
2 | export * from './lib/utils/colorUtils';
3 | export * from './lib/utils/mathUtils';
4 | export * from './lib/types';
5 | export * from './lib/api';
6 |
--------------------------------------------------------------------------------
/packages/core/jest.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | transform: {
3 | '^.+\\.ts$': 'esbuild-jest'
4 | },
5 | };
--------------------------------------------------------------------------------
/packages/core/lib/api.ts:
--------------------------------------------------------------------------------
1 | import { ApiInfo, InferenceResult, ServerSegmentationPredictions } from './types';
2 |
3 | export class ApiError extends Error {
4 | body: any;
5 | status?: number;
6 | statusText?: string;
7 | constructor(message: string){
8 | super(message);
9 | }
10 | }
11 |
12 | /**
13 | * Call Landing AI inference endpoint to get predictions
14 | */
15 | export const getInferenceResult = async (apiInfo: ApiInfo, image: Blob): Promise => {
16 | const formData = new FormData();
17 | formData.append('file', image);
18 |
19 | const result = await fetch(
20 | apiInfo.endpoint + `&device_type=${process.env.LIB_DEVICE_TYPE ?? 'jslib'}`,
21 | {
22 | method: 'POST',
23 | headers: {
24 | Accept: '*/*',
25 | ...(apiInfo.key
26 | ? {
27 | apikey: apiInfo.key,
28 | apisecret: apiInfo.secret,
29 | }
30 | : undefined
31 | )
32 | },
33 | body: formData,
34 | }
35 | );
36 | const body: any = await result.text();
37 | let bodyJson: any;
38 | try {
39 | bodyJson = JSON.parse(body);
40 |
41 | // convert segmentation prediction fields to camel case
42 | const bitmaps = (bodyJson as InferenceResult).predictions?.bitmaps
43 | ?? ((bodyJson as InferenceResult).backbonepredictions as ServerSegmentationPredictions)?.bitmaps;
44 | if (bitmaps) {
45 | for (const key in bitmaps) {
46 | const {
47 | score,
48 | label_name: labelName,
49 | label_index: labelIndex,
50 | defect_id: defectId,
51 | bitmap,
52 | } = bitmaps[key] as any;
53 |
54 | bitmaps[key] = { score, labelName, labelIndex, defectId, bitmap };
55 | }
56 | }
57 |
58 | } catch (e) {
59 | // ignore error
60 | }
61 |
62 | if (result.status !== 200) {
63 | const error = new ApiError(bodyJson?.message ?? body);
64 | error.status = result.status;
65 | error.statusText = result.statusText;
66 | error.body = bodyJson;
67 | throw error;
68 | }
69 | return bodyJson as any;
70 | };
--------------------------------------------------------------------------------
/packages/core/lib/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Information required for calling Landing AI to get predictions.
3 | */
4 | export interface ApiInfo {
5 | /**
6 | * Landing AI API key of a particular LandingLens user. See https://support.landing.ai/docs/api-key-and-api-secret
7 | */
8 | key: string;
9 | /**
10 | * Landing AI API secret of a particular LandingLens user. See https://support.landing.ai/docs/api-key-and-api-secret
11 | *
12 | * If your API key is prefixed with `land_sk_`, the secret is not required
13 | */
14 | secret?: string;
15 | /**
16 | * The cloud deployment endpoint. See https://support.landing.ai/docs/cloud-deployment
17 | */
18 | endpoint: string;
19 | }
20 |
21 | /**
22 | * The coordinates (xmin, ymin, xmax, ymax) of the predicted bounding box.
23 | */
24 | export type Coordinates = {
25 | xmin: number;
26 | xmax: number;
27 | ymin: number;
28 | ymax: number;
29 | };
30 |
31 | /**
32 | *
33 | */
34 | export type SegmentationBitmap = {
35 | score: number;
36 | labelName: string;
37 | labelIndex: number;
38 | defectId: number;
39 | bitmap: string;
40 | };
41 |
42 | /**
43 | * The base/parent prediction class that stores the common shared properties of a prediction.
44 | */
45 | export type BasePrediction = {
46 | /**
47 | * The confidence score of this prediction.
48 | */
49 | score: number;
50 | /**
51 | * The predicted label name.
52 | */
53 | labelName: string;
54 | /**
55 | * The predicted label index. A label index is an unique integer identifies a label in your label book.
56 | * See https://support.landing.ai/docs/manage-label-book for more details.
57 | */
58 | labelIndex: number;
59 | };
60 |
61 | /**
62 | * A single bounding box prediction for an image.
63 | * It includes a predicted bounding box (xmin, ymin, xmax, ymax), confidence score and the predicted label.
64 | */
65 | export type ObjectDetectionPrediction = BasePrediction & {
66 | coordinates: Coordinates;
67 | };
68 |
69 | /**
70 | * A single segmentation mask prediction for an image.
71 | * It includes a predicted segmentation mask, confidence score and the predicted label.
72 | */
73 | export type SegmentationPrediction = BasePrediction & {
74 | bitmap: string;
75 | };
76 |
77 | /**
78 | * A single classification prediction for an image.
79 | */
80 | export type ClassificationPrediction = BasePrediction & {};
81 |
82 | /**
83 | * Prediction for one of object detection, segmentation, or classification
84 | */
85 | export type Prediction = ObjectDetectionPrediction | SegmentationPrediction | ClassificationPrediction;
86 |
87 | /**
88 | * A map where keys are uuids of predictions and values are predictions.
89 | */
90 | export type PredictionsMap = Record;
91 |
92 | /**
93 | * The converted annotation format for rendering.
94 | *
95 | * An annotation is a segmentation mask, a bounding box, or a class.
96 | */
97 | export type Annotation = {
98 | /**
99 | * A uuid string generated from the backend
100 | */
101 | id: string;
102 | /**
103 | * The predicted coordinates (xmin, ymin, xmax, ymax) of the predicted bounding box
104 | */
105 | coordinates?: Coordinates;
106 | /**
107 | * The predicted segmentation mask
108 | */
109 | bitmap?: string;
110 | /**
111 | * The color of the annotation
112 | */
113 | color: string;
114 | /**
115 | * Name of the annotation
116 | */
117 | name: string;
118 | };
119 |
120 | export type ServerSegmentationPredictions = {
121 | labelIndex: number;
122 | labelName: string;
123 | score: number;
124 | /**
125 | * Segmentation bitmaps
126 | */
127 | bitmaps?: Record | null,
128 | };
129 |
130 | /**
131 | * Inference API response
132 | */
133 | export type InferenceResult = {
134 | /**
135 | * backbone predictions. e.g. bounding boxes
136 | */
137 | backbonepredictions: PredictionsMap | ServerSegmentationPredictions | null;
138 | /**
139 | * prediction on the image
140 | */
141 | predictions: ServerSegmentationPredictions,
142 | /**
143 | * Inference type for the image.
144 | *
145 | * For object detection, segmentation and classification projects, this field will be 'ClassificationPrediction'
146 | * for the image, stating if the image is OK (no annotations detected) or NG (has annotations detected).
147 | *
148 | * In this case, please use backbonetype to differentiate the two types of projects.
149 | *
150 | * For visual prompting projects, this field will be 'SegmentationPrediction'.
151 | */
152 | type: 'SegmentationPrediction' | 'ClassificationPrediction';
153 | /**
154 | * Prediction type. Only for object detection, segmentation projects.
155 | */
156 | backbonetype: 'SegmentationPrediction' | 'ObjectDetectionPrediction';
157 | };
--------------------------------------------------------------------------------
/packages/core/lib/utils/annotationUtils.ts:
--------------------------------------------------------------------------------
1 | import { Annotation, InferenceResult, ObjectDetectionPrediction, SegmentationPrediction } from '../types';
2 | import { hexToRgb, palette } from './colorUtils';
3 |
4 | /**
5 | * Convert server format predictions into a list of {@link Annotation} for easy rendering
6 | */
7 | export function predictionsToAnnotations(inferenceResult?: InferenceResult | null) {
8 | if (!inferenceResult) {
9 | return [];
10 | }
11 | const { backbonepredictions, predictions } = inferenceResult;
12 | const predictionsMap = predictions.bitmaps ?? backbonepredictions?.bitmaps ?? backbonepredictions;
13 | return Object.entries(predictionsMap || []).map(([id, prediction]) => ({
14 | id,
15 | color: palette[prediction.labelIndex - 1],
16 | coordinates: (prediction as ObjectDetectionPrediction).coordinates,
17 | bitmap: (prediction as SegmentationPrediction).bitmap,
18 | name: prediction.labelName,
19 | } as Annotation));
20 | }
21 |
22 | const RLE_OPTIONS = { map: { Z: '0', N: '1' } };
23 | const rleDecodeMap: Record = RLE_OPTIONS.map;
24 |
25 | /**
26 | * Decode string like 1N2Z1N3Z into 1001000
27 | */
28 | export const runLengthDecode = (text: string) => {
29 | if (!text) return text;
30 | /**
31 | * Groups all encoded pieces together
32 | * 1N2Z1N3Z1N5Z1N2Z1N3Z1N3Z1N =>
33 | * ["12Z", "1N", "3Z", "1N", "5Z", "1N", "2Z", "1N", "3Z", "1N", "3Z", "1N"]
34 | */
35 | const matches = text.match(/(\d+)(\w|\s)/g);
36 | /**
37 | * Repeat each piece's last char with number
38 | * 3Z = 000 1N = 1
39 | */
40 | return matches!.reduce((acc, str) => {
41 | const decodedKey = rleDecodeMap[str.slice(-1)];
42 | const times = Number(str.slice(0, str.length - 1));
43 | return `${acc}${decodedKey.repeat(times)}`;
44 | }, '');
45 | };
46 |
47 | /**
48 | * Convert a run-length-encoded string to a blob
49 | */
50 | export const convertCompressedBitMapToBlob = async (
51 | compressedBitMap: string,
52 | color: string,
53 | width: number,
54 | height: number,
55 | ) => {
56 | const bitMap = runLengthDecode(compressedBitMap) || '';
57 | const { r, g, b } = hexToRgb(color);
58 | const offscreen = new OffscreenCanvas(width, height);
59 | const context = offscreen.getContext('2d', {
60 | desynchronized: true,
61 | });
62 | const imageData = context?.createImageData(width, height) as ImageData;
63 | for (let i = 0; i < bitMap.length; i += 1) {
64 | if (bitMap[i] === '1') {
65 | imageData.data[4 * i + 0] = r; // R value
66 | imageData.data[4 * i + 1] = g; // G value
67 | imageData.data[4 * i + 2] = b; // B value
68 | imageData.data[4 * i + 3] = 255 * 0.6; // A value
69 | }
70 | }
71 | context?.putImageData(imageData, 0, 0);
72 | return offscreen.convertToBlob();
73 | };
--------------------------------------------------------------------------------
/packages/core/lib/utils/colorUtils.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Color palette for predicted classes.
3 | */
4 | export const palette = [
5 | '#811776',
6 | '#FFFF00',
7 | '#00FFFF',
8 | '#FF00FF',
9 | '#0000FF',
10 | '#FF8F20',
11 | '#AD2E24',
12 | '#470400',
13 | '#CFF000',
14 | '#A7D141',
15 | '#41D1B3',
16 | '#70E9FF',
17 | '#00ACED',
18 | '#BFD8BD',
19 | '#FF70E9',
20 | '#FF5C9A',
21 | '#BE408F',
22 | '#8670FF',
23 | '#333275',
24 | ];
25 |
26 | /**
27 | * RGB color object
28 | */
29 | export type RGB = { r: number; g: number; b: number };
30 |
31 | /**
32 | * convert a hex color to RGB
33 | */
34 | export function hexToRgb(hex: string | null | undefined): RGB {
35 | const fallback = { r: 0, g: 0, b: 0 };
36 | if (!hex) return fallback;
37 | const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
38 | return result
39 | ? {
40 | r: parseInt(result[1], 16),
41 | g: parseInt(result[2], 16),
42 | b: parseInt(result[3], 16),
43 | }
44 | : fallback;
45 | }
46 |
47 | /**
48 | * Check if a color is dark color or not.
49 | *
50 | * For example:
51 | *
52 | * ```javascript
53 | * const textColor = isDark(someColor) ? 'white' : 'black';
54 | * ```
55 | */
56 | export function isDark(rgb: string | null | undefined | RGB) {
57 | if (typeof rgb === 'string' || rgb === null || rgb === undefined) {
58 | rgb = hexToRgb(rgb);
59 | }
60 | const { r, g, b } = rgb;
61 | const brightness = r * 0.299 + g * 0.587 + b * 0.114;
62 | return brightness < 150;
63 | }
--------------------------------------------------------------------------------
/packages/core/lib/utils/mathUtils.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Count object array by a field.
3 | *
4 | * E.g.
5 | * ```
6 | * countBy([{name: 'Alice', sex: 'female'}, {name: 'Bob', sex: 'male'}, {name: 'Choe', sex: 'female'}], 'sex')
7 | * // returns { female: 2, male: 1 }
8 | * ```
9 | */
10 | export const countBy = (arr: any[], key: string): Record => {
11 | return arr.reduce((res, item) => {
12 | if (key in item) {
13 | const value = item[key];
14 | if (!(value in res)) {
15 | res[value] = 0;
16 | }
17 | res[value]++;
18 | }
19 | return res;
20 | }, {} as Record);
21 | };
--------------------------------------------------------------------------------
/packages/core/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "landingai",
3 | "version": "1.0.24",
4 | "description": "> TODO: description",
5 | "author": "Lan Tian ",
6 | "homepage": "https://github.com/landing-ai/landingai-js#readme",
7 | "license": "MIT",
8 | "main": "dist/index.js",
9 | "sideEffects": false,
10 | "directories": {
11 | "lib": "lib",
12 | "test": "__tests__"
13 | },
14 | "files": [
15 | "dist",
16 | "types"
17 | ],
18 | "types": "types/index.d.ts",
19 | "repository": {
20 | "type": "git",
21 | "url": "git+https://github.com/landing-ai/landingai-js.git"
22 | },
23 | "scripts": {
24 | "tsc": "tsc",
25 | "build": "esbuild index.ts --bundle --outdir=dist --minify --sourcemap --packages=external --platform=node",
26 | "test": "jest"
27 | },
28 | "bugs": {
29 | "url": "https://github.com/landing-ai/landingai-js/issues"
30 | },
31 | "devDependencies": {
32 | "@types/assert": "^1.5.6",
33 | "@types/jest": "^29.5.1",
34 | "esbuild": "^0.17.19",
35 | "esbuild-jest": "^0.5.0",
36 | "jest": "^29.5.0"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/packages/core/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "outDir": "types",
4 | "target": "es2018",
5 | "lib": ["dom", "dom.iterable", "esnext"],
6 | "allowJs": true,
7 | "skipLibCheck": true,
8 | "strict": true,
9 | "forceConsistentCasingInFileNames": true,
10 | "declaration": true,
11 | "emitDeclarationOnly": true,
12 | "esModuleInterop": true,
13 | "module": "esnext",
14 | "moduleResolution": "node",
15 | "resolveJsonModule": true,
16 | "isolatedModules": true,
17 | "declarationMap": true,
18 | "incremental": true
19 | },
20 | "include": ["**/*.ts"],
21 | "exclude": ["node_modules", "types", "dist", "**/*.test.ts"]
22 | }
23 |
--------------------------------------------------------------------------------
/packages/core/typedoc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["../../typedoc.base.json"],
3 | "entryPoints": ["index.ts"]
4 | }
5 |
--------------------------------------------------------------------------------
/packages/react/.gitignore:
--------------------------------------------------------------------------------
1 | dist
2 | types
3 | tsconfig.tsbuildinfo
4 | .DS_Store
5 | coverage
--------------------------------------------------------------------------------
/packages/react/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | All notable changes to this project will be documented in this file.
4 | See [Conventional Commits](https://conventionalcommits.org) for commit guidelines.
5 |
6 | ## [1.0.24](https://github.com/landing-ai/landingai-js/compare/v1.0.23...v1.0.24) (2024-03-01)
7 |
8 | **Note:** Version bump only for package landingai-react
9 |
10 |
11 |
12 |
13 |
14 | ## [1.0.23](https://github.com/landing-ai/landingai-js/compare/v1.0.22...v1.0.23) (2023-08-28)
15 |
16 |
17 | ### Bug Fixes
18 |
19 | * support opening camera from android ([#23](https://github.com/landing-ai/landingai-js/issues/23)) ([222d04d](https://github.com/landing-ai/landingai-js/commit/222d04dccc87eaedb212f5de85a94420ec2ca9d3))
20 |
21 |
22 |
23 |
24 |
25 | ## [1.0.22](https://github.com/landing-ai/landingai-js/compare/v1.0.21...v1.0.22) (2023-06-28)
26 |
27 | **Note:** Version bump only for package landingai-react
28 |
29 |
30 |
31 |
32 |
33 | ## [1.0.21](https://github.com/landing-ai/landingai-js/compare/v1.0.20...v1.0.21) (2023-06-12)
34 |
35 | **Note:** Version bump only for package landingai-react
36 |
37 |
38 |
39 |
40 |
41 | ## [1.0.20](https://github.com/landing-ai/landingai-js/compare/v1.0.19...v1.0.20) (2023-06-08)
42 |
43 | **Note:** Version bump only for package landingai-react
44 |
45 |
46 |
47 |
48 |
49 | ## [1.0.19](https://github.com/landing-ai/landingai-js/compare/v1.0.18...v1.0.19) (2023-06-08)
50 |
51 | **Note:** Version bump only for package landingai-react
52 |
53 |
54 |
55 |
56 |
57 | ## [1.0.18](https://github.com/landing-ai/landingai-js/compare/v1.0.17...v1.0.18) (2023-06-06)
58 |
59 | **Note:** Version bump only for package landingai-react
60 |
61 |
62 |
63 |
64 |
65 | ## [1.0.17](https://github.com/landing-ai/landingai-js/compare/v1.0.16...v1.0.17) (2023-06-06)
66 |
67 | **Note:** Version bump only for package landingai.react
68 |
--------------------------------------------------------------------------------
/packages/react/README.md:
--------------------------------------------------------------------------------
1 | # `landingai-react`
2 |
3 | React components to fetch and render predictions from an image.
4 |
5 | ## Usage
6 |
7 | ```jsx
8 | import React from 'react';
9 | import { useState } from "react";
10 | import { InferenceContext, InferenceResult, PhotoCollector } from "landingai-react";
11 |
12 | const apiInfo = {
13 | endpoint: `https://predict.app.landing.ai/inference/v1/predict?endpoint_id=`,
14 | key: "",
15 | secret: "",
16 | };
17 |
18 | export default function App() {
19 | const [image, setImage] = useState();
20 |
21 | return (
22 |
23 |
24 |
25 |
26 | );
27 | }
28 | ```
29 |
30 | **References**
31 |
32 | * [How to get an endpoint](https://support.landing.ai/docs/cloud-deployment)
33 | * [How to get api key and secret](https://support.landing.ai/docs/api-key-and-api-secret)
34 |
35 |
36 | ## Examples
37 | - [Codesandbox React example](https://codesandbox.io/s/eloquent-tesla-yzsbsk?file=/src/App.js)
38 |
--------------------------------------------------------------------------------
/packages/react/__tests__/InferenceResult.test.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { InferenceContext } from '../lib/context/InferenceContext';
3 | import { InferenceResult } from '../lib/components/InferenceResult';
4 | import { ApiInfo, InferenceResult as InferenceResultType } from 'landingai';
5 | import { render, screen } from '@testing-library/react';
6 | import { vi } from 'vitest';
7 |
8 | const INFERENCE_URL =
9 | 'blob:https://test.platform.landingai.io/3828596b-5227-4e40-ab68-343934fc9ebf';
10 |
11 | global.URL.createObjectURL = vi.fn(() => INFERENCE_URL);
12 | global.URL.revokeObjectURL = vi.fn(() => {});
13 |
14 | const TestComponent: React.FC<{}> = () => {
15 | const apiInfo: ApiInfo = {
16 | endpoint: 'http://localhost/predict',
17 | key: '',
18 | secret: '',
19 | };
20 | const file = new File(['mocked content'], 'mock.png', { type: 'image/png' });
21 |
22 | return (
23 |
24 |
25 |
26 | );
27 | };
28 |
29 | const mockFetchResult = (result: InferenceResultType) => {
30 | const mockedFetch = vi.fn(() => {
31 | return Promise.resolve({
32 | ok: true,
33 | status: 200,
34 | json: () => Promise.resolve(result),
35 | text: () => Promise.resolve(JSON.stringify(result)),
36 | });
37 | });
38 | // @ts-ignore simple mock fetch for this test need, no need to be too precise
39 | global.fetch = mockedFetch;
40 | };
41 |
42 | it('InferenceResult - basic rendering for bounding boxes', async () => {
43 | mockFetchResult({
44 | backbonepredictions: {
45 | 'mocked-uuid': {
46 | coordinates: { xmin: 100, ymin: 100, xmax: 200, ymax: 200 },
47 | labelIndex: 1,
48 | labelName: 'test-label-1',
49 | score: 0.97,
50 | },
51 | },
52 | predictions: {
53 | labelIndex: 1,
54 | labelName: 'NG',
55 | score: 0.98,
56 | }
57 | });
58 |
59 | render();
60 | await screen.findByText('Total: 1 objects detected');
61 | await screen.findByText('Number of test-label-1');
62 | expect(screen.queryByText(/Class: .*/)).toBeNull();
63 | });
64 |
65 | it('InferenceResult - basic rendering for classification', async () => {
66 | mockFetchResult({
67 | backbonepredictions: null,
68 | predictions: {
69 | labelIndex: 1,
70 | labelName: 'Dog',
71 | score: 0.98,
72 | }
73 | });
74 | render();
75 | await screen.findByText('Class: Dog');
76 | expect(screen.queryByText(/Total: \d+ objects detected/)).toBeNull();
77 | expect(screen.queryByText(/Number of .*/)).toBeNull();
78 | });
79 |
--------------------------------------------------------------------------------
/packages/react/__tests__/PhotoCollector.test.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import { PhotoCollector } from '../lib/components/PhotoCollector';
3 | import { vi } from 'vitest';
4 | import { render, screen } from '@testing-library/react';
5 | import userEvent from '@testing-library/user-event';
6 |
7 | it('should return correct image blob', async () => {
8 | const setImage = vi.fn();
9 | render();
10 |
11 | const file = new File(['mocked content'], 'mock.png', { type: 'image/png' });
12 | userEvent.upload(await screen.findByTestId('select-photo-input'), [file]);
13 |
14 | await new Promise((resolve) => setTimeout(resolve, 100));
15 |
16 | expect(setImage.mock.lastCall[0]).toBeInstanceOf(File);
17 | });
--------------------------------------------------------------------------------
/packages/react/build.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | const CssModulesPlugin = require('esbuild-css-modules-plugin');
3 |
4 | require('esbuild')
5 | .build({
6 | logLevel: 'info',
7 | entryPoints: ['index.tsx'],
8 | bundle: true,
9 | outdir: 'dist',
10 | minify: true,
11 | sourcemap: true,
12 | packages: 'external',
13 | platform: 'node',
14 | plugins: [CssModulesPlugin()],
15 | })
16 | .catch(() => process.exit(1));
--------------------------------------------------------------------------------
/packages/react/index.tsx:
--------------------------------------------------------------------------------
1 | import './lib/polyfill';
2 | export * from './lib/components/InferenceResult';
3 | export * from './lib/components/PhotoCollector';
4 | export * from './lib/context/InferenceContext';
5 |
--------------------------------------------------------------------------------
/packages/react/jest.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | transform: {
3 | '^.+\\.tsx?$': 'esbuild-jest'
4 | },
5 | };
--------------------------------------------------------------------------------
/packages/react/lib/components/Annotation/index.tsx:
--------------------------------------------------------------------------------
1 | import { isDark, Annotation, convertCompressedBitMapToBlob } from 'landingai';
2 | import styles from '../index.module.css';
3 | import React, { CSSProperties, useEffect, useMemo, useState } from 'react';
4 |
5 | interface LabelNameProps {
6 | name: string;
7 | color: string;
8 | }
9 |
10 | export function LabelName(props: LabelNameProps) {
11 | const { name, color } = props;
12 | const textColor = isDark(color) ? 'white': 'black';
13 | return (
14 |
15 | {name}
16 |
17 | );
18 | }
19 |
20 | interface AnnotationComponentProps {
21 | annotation: Annotation;
22 | imageWidth: number;
23 | imageHeight: number;
24 | showLabel?: boolean;
25 | }
26 |
27 | export function AnnotationComponent(props: AnnotationComponentProps) {
28 | const { annotation } = props;
29 | if (annotation.coordinates) {
30 | return ;
31 | }
32 | if (annotation.bitmap) {
33 | return ;
34 | }
35 | return <>>;
36 | }
37 |
38 | function BoxAnnotationComponent(props: AnnotationComponentProps) {
39 | const { annotation, imageWidth, imageHeight, showLabel = false } = props;
40 |
41 | const style = useMemo(() => {
42 | const { coordinates, color } = annotation;
43 | if (coordinates) {
44 | const { xmin, xmax, ymin, ymax } = coordinates;
45 | const width = xmax - xmin;
46 | const height = ymax - ymin;
47 | return {
48 | left: `${(100 * xmin) / imageWidth}%`,
49 | top: `${(100 * ymin) / imageHeight}%`,
50 | width: `${(100 * width) / imageWidth}%`,
51 | height: `${(100 * height) / imageHeight}%`,
52 | borderColor: color,
53 | } as React.CSSProperties;
54 | }
55 | return {} as React.CSSProperties;
56 | }, [annotation, imageHeight, imageWidth]);
57 |
58 | const textBoundingRectStyles = useMemo(() => {
59 | const { color } = annotation;
60 | return {
61 | left: 0,
62 | top: -4,
63 | backgroundColor: color,
64 | color: isDark(color) ? 'white' : 'black',
65 | outlineColor: isDark(color) ? 'white' : 'black',
66 | transform: 'translateY(-100%)',
67 | } as CSSProperties;
68 | }, [annotation, imageHeight, imageWidth]);
69 |
70 | return (
71 |
72 | {showLabel && (
73 |
74 | {annotation.name}
75 |
76 | )}
77 |
78 | );
79 | }
80 |
81 | function SegAnnotationComponent(props: AnnotationComponentProps) {
82 | const { annotation, imageWidth, imageHeight } = props;
83 | const { bitmap, color } = annotation;
84 | if (!bitmap) {
85 | return null;
86 | }
87 |
88 | const [imgSrc, setImgSrc] = useState();
89 | useEffect(() => {
90 | let url: string | undefined = undefined;
91 | convertCompressedBitMapToBlob(bitmap, color, imageWidth, imageHeight).then(res => {
92 | url = URL.createObjectURL(res);
93 | setImgSrc(url);
94 | });
95 | return () => {
96 | if (url) {
97 | URL.revokeObjectURL(url);
98 | }
99 | };
100 | }, []);
101 |
102 | if (!imgSrc) {
103 | return null;
104 | }
105 |
106 | return (
107 |
108 | );
109 | }
--------------------------------------------------------------------------------
/packages/react/lib/components/InferenceResult.tsx:
--------------------------------------------------------------------------------
1 | import {
2 | predictionsToAnnotations,
3 | getInferenceResult,
4 | InferenceResult as InferenceResultType,
5 | ApiError
6 | } from 'landingai';
7 | import styles from './index.module.css';
8 | import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
9 | import { useInferenceContext } from '../context/InferenceContext';
10 | import { AnnotationComponent, LabelName } from './Annotation';
11 |
12 | export interface InferenceResultProps {
13 | image?: Blob;
14 | /**
15 | * Show labels for predicted annotations. Default is false.
16 | */
17 | showLabels?: boolean;
18 | /**
19 | * Called when there is predict error.
20 | */
21 | onPredictError?: (err: ApiError) => void;
22 | }
23 |
24 | /**
25 | * Inference result component.
26 | *
27 | * Renders the image, calls predict API and renders predictions over the image.
28 | *
29 | * Also provides summaries of the results.
30 | */
31 | export const InferenceResult: React.FC = (props) => {
32 | const { image, showLabels = false, onPredictError } = props;
33 | const apiInfo = useInferenceContext();
34 |
35 | const imageRef = useRef(null);
36 |
37 | // inference results
38 | const [inferenceResult, setInferenceResult] = useState();
39 | const annotations = useMemo(() => {
40 | return predictionsToAnnotations(inferenceResult);
41 | }, [inferenceResult]);
42 | const className = useMemo(() => {
43 | return inferenceResult?.predictions?.labelName ?? '';
44 | }, [inferenceResult]);
45 |
46 | const [isLoading, setIsLoading] = useState(false);
47 | const [preview, setPreview] = useState();
48 |
49 | const annotationCounts = useMemo(() => {
50 | const labelCountByName = annotations.reduce((acc, ann) => ({
51 | ...acc,
52 | [ann.name]: {
53 | name: ann.name,
54 | color: ann.color,
55 | count: ((acc[ann.name] as any)?.count ?? 0) + 1,
56 | }
57 | }), {} as Record);
58 | return Object.values(labelCountByName);
59 | }, [annotations]);
60 |
61 | const onPredict = useCallback(
62 | async (image: Blob) => {
63 | try {
64 | setIsLoading(true);
65 | const result = await getInferenceResult(apiInfo, image);
66 | setInferenceResult(result);
67 | } catch (err) {
68 | onPredictError?.(err as any);
69 | } finally {
70 | setIsLoading(false);
71 | }
72 | },
73 | [apiInfo]
74 | );
75 |
76 | useEffect(() => {
77 | if (image) {
78 | const objectUrl = URL.createObjectURL(image);
79 | setPreview(objectUrl);
80 |
81 | // free memory when ever this component is unmounted
82 | return () => URL.revokeObjectURL(objectUrl);
83 | }
84 | }, [image]);
85 |
86 | useEffect(() => {
87 | if (image) {
88 | onPredict(image);
89 | }
90 | }, [image, onPredict]);
91 |
92 | return (
93 | <>
94 |
95 | {!preview && (
96 |
97 | Your image will be displayed here.
98 |
99 | )}
100 | {/* Image and annotations like boxes / segemtation_mask */}
101 | {preview && (
102 |
103 |

111 | {imageRef.current && !isLoading &&
112 | annotations.map((annotation) => (
113 |
120 | ))}
121 | {isLoading &&
}
122 |
123 | )}
124 | {/* Summaries of predictions */}
125 | {preview && !isLoading && !!inferenceResult &&
126 | {inferenceResult?.type === 'ClassificationPrediction' && !inferenceResult.backbonetype &&
Class: {className}
}
127 | {inferenceResult.backbonetype === 'ObjectDetectionPrediction' && (
128 | <>
129 |
Total: {annotations.length} objects detected
130 | {annotationCounts.map(({ name, count, color }) => (
131 |
132 | Number of
133 | {count}
134 |
135 | ))}
136 | >
137 | )}
138 | {(inferenceResult?.type === 'SegmentationPrediction' || inferenceResult.backbonetype === 'SegmentationPrediction') && (
139 | <>
140 |
Legend
141 |
142 | {annotationCounts.map(({ name, color }) => (
143 |
144 |
145 |
146 | ))}
147 |
148 | >
149 | )}
150 |
}
151 |
152 | >
153 | );
154 | };
155 |
--------------------------------------------------------------------------------
/packages/react/lib/components/PhotoCollector.tsx:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import styles from './index.module.css';
3 | import { EImageType, compress } from 'image-conversion';
4 |
5 | export type PhotoCollectorProps = {
6 | setImage: (image: Blob) => void;
7 | };
8 |
9 | /**
10 | * Photo collector. This component will compress image so that:
11 | * 1. The image does not go too large
12 | * 2. EXIF attributes, such as orientations, are removed
13 | */
14 | export const PhotoCollector: React.FC = (props) => {
15 | const { setImage } = props;
16 |
17 | const capture = async (e: React.SyntheticEvent) => {
18 | const files = (e.target as HTMLInputElement).files;
19 | if (files?.length && files[0]) {
20 | // Compress to remove EXIF information like orientation
21 | const compressed = await compress(files[0], {
22 | size: 1000,
23 | type: EImageType.JPEG,
24 | quality: 0.9,
25 | });
26 | setImage(compressed);
27 | }
28 | };
29 |
30 | return (
31 |
32 |
33 |
34 |
35 | );
36 | };
37 |
--------------------------------------------------------------------------------
/packages/react/lib/components/index.module.css:
--------------------------------------------------------------------------------
1 | .bottomButton {
2 | margin-top: auto;
3 | margin-bottom: 8px;
4 | }
5 |
6 | .inputGroup {
7 | display: flex;
8 | flex-direction: column;
9 | margin-bottom: 0.5rem;
10 | }
11 |
12 | .errorLabel {
13 | color: red;
14 | }
15 |
16 | .inferenceResult {
17 | position: relative;
18 | margin-top: 16px;
19 | }
20 |
21 | .inferenceSummary {
22 | margin: 16px 0;
23 | display: flex;
24 | flex-direction: column;
25 | padding: 12px;
26 | gap: 12px;
27 | background-color: #f4f4f4;
28 | min-height: 5rem;
29 | font-size: 15px;
30 | font-weight: 600;
31 | border-radius: 0.25rem;
32 | }
33 |
34 | .boxAnnotation {
35 | position: absolute;
36 | border-width: 2px;
37 | border-style: solid;
38 | }
39 |
40 | .segAnnotation {
41 | position: absolute;
42 | left: 0;
43 | top: 0;
44 | width: 100%;
45 | height: 100%;
46 | }
47 |
48 | .labelNameCount {
49 | display: flex;
50 | justify-content: space-between;
51 | font-weight: 300;
52 | }
53 |
54 | .text {
55 | position: absolute;
56 | font-size: 0.8rem;
57 | display: flex;
58 | align-items: center;
59 | justify-content: center;
60 | padding: 2px 6px;
61 | border-radius: 4px;
62 | outline-width: 1px;
63 | outline-style: solid;
64 | }
65 |
66 | .imageContainer {
67 | position: relative;
68 | }
69 |
70 | .preview {
71 | border-radius: 0.25rem;
72 | }
73 |
74 | .photoPlaceholder {
75 | width: 100%;
76 | background-color: #f4f4f4;
77 | padding: 12px;
78 | padding-bottom: 100%;
79 | border-radius: 0.25rem;
80 | }
81 |
82 | .photoCollectorContainer {
83 | position: relative;
84 | display: flex;
85 | flex-direction: column;
86 | }
87 |
88 | .photoCollectorButton {
89 | height: 3rem;
90 | font-size: 1rem;
91 | }
92 |
93 | .fileInput {
94 | opacity: 0;
95 | position: absolute;
96 | width: 100%;
97 | height: 100%;
98 | }
99 |
100 | .blur {
101 | filter: blur(0.5rem);
102 | }
103 |
104 | .ring {
105 | display: inline-block;
106 | position: absolute;
107 | left: 50%;
108 | top: 50%;
109 | transform: translate(-50%, -50%);
110 | width: 80px;
111 | height: 80px;
112 | }
113 |
114 | .ring:after {
115 | content: " ";
116 | display: block;
117 | width: 64px;
118 | height: 64px;
119 | margin: 8px;
120 | border-radius: 50%;
121 | border: 6px solid #0056fe;
122 | border-color: #0056fe transparent #0056fe transparent;
123 | animation: ring 1.2s linear infinite;
124 | }
125 |
126 | @keyframes ring {
127 | 0% {
128 | transform: rotate(0deg);
129 | }
130 | 100% {
131 | transform: rotate(360deg);
132 | }
133 | }
134 |
135 | .labelName {
136 | padding: 0.1rem 0.5rem;
137 | border-radius: 0.3rem;
138 | border: 1px solid transparent;
139 | }
140 |
141 | .legend {
142 | display: flex;
143 | flex-wrap: wrap;
144 | }
145 |
146 | .legend .labelNameCount + .labelNameCount {
147 | margin-left: 0.5rem;
148 | }
--------------------------------------------------------------------------------
/packages/react/lib/context/InferenceContext.tsx:
--------------------------------------------------------------------------------
1 | import { ApiInfo } from 'landingai';
2 | import { createContext, useContext } from 'react';
3 |
4 | export type InferenceContextState = ApiInfo;
5 |
6 | export const initialInferenceContext: InferenceContextState = {
7 | endpoint: '',
8 | key: '',
9 | secret: '',
10 | };
11 |
12 | export const InferenceContext = createContext(initialInferenceContext);
13 |
14 | export const useInferenceContext = () => useContext(InferenceContext);
--------------------------------------------------------------------------------
/packages/react/lib/polyfill.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * window.OffscreenCanvas
3 | * https://developer.mozilla.org/en-US/docs/Web/API/OffscreenCanvas
4 | */
5 | if (typeof window !== 'undefined' && !window.OffscreenCanvas) {
6 | // @ts-ignore
7 | window.OffscreenCanvas = class OffscreenCanvas {
8 | canvas: any;
9 | constructor(width: number, height: number) {
10 | this.canvas = document.createElement('canvas');
11 | this.canvas.width = width;
12 | this.canvas.height = height;
13 |
14 | this.canvas.convertToBlob = () => {
15 | return new Promise(resolve => {
16 | this.canvas.toBlob(resolve);
17 | });
18 | };
19 |
20 | return this.canvas;
21 | }
22 | };
23 | }
24 |
25 | export default {};
26 |
--------------------------------------------------------------------------------
/packages/react/lib/types.d.ts:
--------------------------------------------------------------------------------
1 | declare module '*.module.css' {
2 | const styles: { [key: string]: string };
3 | export default styles;
4 | }
--------------------------------------------------------------------------------
/packages/react/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "landingai-react",
3 | "version": "1.0.24",
4 | "description": "> TODO: description",
5 | "author": "Lan Tian ",
6 | "homepage": "https://github.com/landing-ai/landingai-js#readme",
7 | "license": "MIT",
8 | "main": "dist/index.js",
9 | "directories": {
10 | "lib": "lib",
11 | "test": "__tests__"
12 | },
13 | "files": [
14 | "dist",
15 | "types"
16 | ],
17 | "types": "types/index.d.ts",
18 | "repository": {
19 | "type": "git",
20 | "url": "git+https://github.com/landing-ai/landingai-js.git"
21 | },
22 | "scripts": {
23 | "tsc": "tsc",
24 | "build": "node build.js",
25 | "test": "vitest",
26 | "coverage": "vitest run --coverage"
27 | },
28 | "bugs": {
29 | "url": "https://github.com/landing-ai/landingai-js/issues"
30 | },
31 | "peerDependencies": {
32 | "react": "^16.8.0 || ^17.0.0",
33 | "react-dom": "^16.8.0 || ^17.0.0"
34 | },
35 | "dependencies": {
36 | "image-conversion": "^2.1.1",
37 | "landingai": "^1.0.24"
38 | },
39 | "devDependencies": {
40 | "@testing-library/react": "^12.1.5",
41 | "@testing-library/user-event": "^12.1.5",
42 | "@types/react": "^16.8.0 || ^17.0.0",
43 | "@types/react-dom": "^16.8.0 || ^17.0.0",
44 | "esbuild": "^0.17.19",
45 | "esbuild-css-modules-plugin": "^2.7.1",
46 | "esbuild-jest": "^0.5.0",
47 | "eslint-config-prettier": "^8.8.0",
48 | "jest": "^29.5.0",
49 | "jsdom": "^22.1.0",
50 | "msw": "^1.2.1",
51 | "react": "^17.0.0",
52 | "react-dom": "^17.0.0",
53 | "vitest": "^0.31.1"
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/packages/react/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "outDir": "types",
5 | "target": "es2018",
6 | "lib": ["dom", "dom.iterable", "esnext"],
7 | "allowJs": true,
8 | "skipLibCheck": true,
9 | "strict": true,
10 | "forceConsistentCasingInFileNames": true,
11 | "declaration": true,
12 | "emitDeclarationOnly": true,
13 | "esModuleInterop": true,
14 | "module": "esnext",
15 | "moduleResolution": "node",
16 | "resolveJsonModule": true,
17 | "isolatedModules": true,
18 | "jsx": "react",
19 | "declarationMap": true,
20 | "incremental": true
21 | },
22 | "include": ["**/*.ts", "**/*.tsx"],
23 | "exclude": ["node_modules", "types", "dist", "**/*.test.ts", "**/*.test.tsx"]
24 | }
25 |
--------------------------------------------------------------------------------
/packages/react/typedoc.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": ["../../typedoc.base.json"],
3 | "entryPoints": ["index.tsx"]
4 | }
5 |
--------------------------------------------------------------------------------
/packages/react/vitest.config.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | import { defineConfig } from 'vitest/config';
4 |
5 | export default defineConfig({
6 | test: {
7 | include: ['**/*.test.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
8 | globals: true,
9 | environment: 'jsdom',
10 | testTimeout: 20000,
11 | setupFiles: './vitest.setup.tsx',
12 | watch: false,
13 | threads: false, // disable worker threads so that canvas runs in main thread
14 | coverage: {
15 | reporter: ['text', 'json', 'html'],
16 | },
17 | },
18 | });
19 |
--------------------------------------------------------------------------------
/packages/react/vitest.setup.tsx:
--------------------------------------------------------------------------------
1 | export default {};
--------------------------------------------------------------------------------
/typedoc.base.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://typedoc.org/schema.json",
3 | "includeVersion": true
4 | }
5 |
--------------------------------------------------------------------------------
/typedoc.json:
--------------------------------------------------------------------------------
1 | {
2 | "entryPoints": ["packages/*"],
3 | "name": "Landing AI JS Library",
4 | "entryPointStrategy": "packages",
5 | "includeVersion": true
6 | }
7 |
--------------------------------------------------------------------------------