├── .flake8
├── .gitattributes
├── .github
└── workflows
│ ├── ci_cd.yml
│ ├── docs.yml
│ └── stale_prs.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE.md
├── README.md
├── _infra
└── deploy
│ ├── ocr-streamlit-docker-build-image.yaml
│ └── ocr-streamlit-k8s-deploy.yaml
├── assets
└── landing-logo.png
├── docs
├── _README.md
├── _overrides
│ └── main.html
├── api
│ ├── common.md
│ ├── data_management.md
│ ├── exceptions.md
│ ├── image_source_ops.md
│ ├── notebook_utils.md
│ ├── pipeline.md
│ ├── postprocess.md
│ ├── predict.md
│ ├── st_utils.md
│ ├── storage.md
│ ├── telemetry.md
│ ├── timer.md
│ ├── transform.md
│ ├── utils.md
│ └── visualize.md
├── changelog.md
├── contributing.md
├── examples.md
├── image-acquisition
│ ├── image-acquisition.md
│ ├── image-file.md
│ ├── network-cameras.md
│ ├── screenshots.md
│ ├── video-file.md
│ └── webcam.md
├── image-operations.md
├── images
│ ├── cereal-ops
│ │ ├── cereal-brightness-0.1.jpeg
│ │ ├── cereal-brightness-2.0.jpeg
│ │ ├── cereal-color-0.1.jpeg
│ │ ├── cereal-color-2.0.jpeg
│ │ ├── cereal-contrast-0.1.jpeg
│ │ ├── cereal-contrast-2.0.jpeg
│ │ ├── cereal-crop-predictions-0.png
│ │ ├── cereal-crop-predictions-1.png
│ │ ├── cereal-crop-predictions-2.png
│ │ ├── cereal-cropped.jpeg
│ │ ├── cereal-resized-both.jpeg
│ │ ├── cereal-resized-width.jpeg
│ │ ├── cereal-sharpness-0.1.jpeg
│ │ ├── cereal-sharpness-2.0.jpeg
│ │ └── cereal.jpeg
│ ├── coffee-mug-overlay.png
│ ├── copy-endpoint-id.png
│ ├── handwriting-hello.png
│ ├── landing-apikey.png
│ ├── landing-coffee-mug-class.png
│ ├── landing-console-train-deploy.png
│ ├── landing-deploy-page.png
│ ├── landing-deploy-popup.png
│ ├── menu-api-key.png
│ └── metadata-management-ui.png
├── index.md
├── inferences
│ ├── docker-deployment.md
│ ├── frames-inference.md
│ ├── getting-started.md
│ ├── ocr.md
│ ├── overlaying-predictions.md
│ └── snowflake-native-app.md
└── metadata.md
├── examples
├── apps
│ ├── assets
│ │ └── favicon.ico
│ ├── crack-measurer
│ │ ├── README.md
│ │ ├── app.py
│ │ ├── pages
│ │ │ └── 1_Crack_Measurer.py
│ │ └── requirements.txt
│ ├── image-folder-support
│ │ ├── README.md
│ │ ├── app.py
│ │ └── requirements.txt
│ ├── license-plate-ocr
│ │ ├── README.md
│ │ ├── app.py
│ │ └── requirements.txt
│ ├── object-tracking
│ │ ├── README.md
│ │ ├── assets
│ │ │ └── app.png
│ │ ├── download_data.py
│ │ ├── main.py
│ │ ├── object_tracking.py
│ │ ├── pages
│ │ │ └── run_inference.py
│ │ └── requirements.txt
│ ├── ocr
│ │ ├── README.md
│ │ ├── app.py
│ │ ├── requirements.txt
│ │ ├── roi.py
│ │ └── static
│ │ │ └── LandingLens_OCR_logo.svg
│ ├── surfer-count
│ │ ├── README.md
│ │ ├── app.py
│ │ ├── pages
│ │ │ └── run_inference.py
│ │ └── requirements.txt
│ └── zoom-app
│ │ ├── README.md
│ │ ├── Zoom_Demo.ipynb
│ │ ├── app.py
│ │ ├── pages
│ │ └── run_inference.py
│ │ └── requirements.txt
├── capture-service
│ ├── README.md
│ └── run.py
├── edge-models
│ ├── README.md
│ ├── requirements.txt
│ ├── requirements_win.txt
│ ├── run_bundle_cls.py
│ └── run_bundle_od.py
├── license-plate-ocr-notebook
│ └── license_plate_ocr.ipynb
├── post-processings
│ └── farmland-coverage
│ │ └── farmland-coverage.ipynb
├── rtsp-capture-notebook
│ └── rtsp-capture.ipynb
├── video-analytics-notebook
│ └── video-analytics.ipynb
└── webcam-collab-notebook
│ ├── sample_images
│ └── card_01.jpeg
│ └── webcam-collab-notebook.ipynb
├── landingai
├── __init__.py
├── common.py
├── data_management
│ ├── __init__.py
│ ├── client.py
│ ├── dataset.py
│ ├── label.py
│ ├── media.py
│ ├── metadata.py
│ └── utils.py
├── exceptions.py
├── fonts
│ └── default_font_ch_en.ttf
├── image_source_ops.py
├── notebook_utils.py
├── pipeline
│ ├── __init__.py
│ ├── frameset.py
│ ├── image_source.py
│ └── postprocessing.py
├── postprocess.py
├── predict
│ ├── __init__.py
│ ├── cloud.py
│ ├── edge.py
│ ├── ocr.py
│ ├── snowflake.py
│ └── utils.py
├── st_utils.py
├── storage
│ ├── __init__.py
│ ├── data_access.py
│ └── snowflake.py
├── telemetry.py
├── timer.py
├── transform.py
├── utils.py
└── visualize.py
├── mkdocs.yml
├── pdocs
├── assets
│ └── Metadata_Management_UI.png
├── developer_guide
│ ├── 1_main.md
│ └── __init__.py
├── templates
│ └── module.html.jinja2
└── user_guide
│ ├── 1_concepts.md
│ ├── 2_credentials.md
│ ├── 3_postprocess.md
│ ├── 4_pipelines.md
│ ├── 5_data_management.md
│ └── __init__.py
├── poetry.lock
├── pyproject.toml
└── tests
├── conftest.py
├── data
├── images
│ ├── cameraman.tiff
│ ├── cereal-tiny
│ │ ├── brightness-1.5.jpeg
│ │ ├── color-1.5.jpeg
│ │ ├── contrast-1.5.jpeg
│ │ ├── original.jpeg
│ │ └── sharpness-1.5.jpeg
│ ├── cereal1.jpeg
│ ├── expected_bbox_overlay.png
│ ├── expected_ocr_overlay.png
│ ├── expected_vp_masks.png
│ ├── farm-coverage.jpg
│ ├── ocr_test.png
│ ├── palettized_image.png
│ └── wildfire1.jpeg
├── responses
│ ├── default_class_model_response.yaml
│ ├── default_vp_model_response.yaml
│ ├── test_edge_class_predict.yaml
│ ├── test_edge_od_predict.yaml
│ ├── test_edge_seg_predict.yaml
│ ├── test_get_label_map.yaml
│ ├── test_ocr_predict.yaml
│ ├── test_read_stream.yaml
│ ├── test_update_split_key_bulk_update.yaml
│ ├── test_update_split_key_unassigned.yaml
│ ├── v1_media_list_by_one_status.yaml
│ ├── v1_media_list_by_three_status.yaml
│ ├── v1_media_list_filter_by_split.yaml
│ ├── v1_media_upload_folder.yaml
│ ├── v1_media_upload_metadata.yaml
│ ├── v1_media_upload_single_file.yaml
│ ├── v1_predict_status_300.yaml
│ ├── v1_predict_status_400.yaml
│ ├── v1_predict_status_401.yaml
│ ├── v1_predict_status_403.yaml
│ ├── v1_predict_status_404.yaml
│ ├── v1_predict_status_422.yaml
│ ├── v1_predict_status_429.yaml
│ ├── v1_predict_status_500.yaml
│ ├── v1_predict_status_503.yaml
│ ├── v1_predict_status_504.yaml
│ ├── v1_set_metadata.yaml
│ ├── v1_set_metadata_multiple_medias.yaml
│ └── v1_set_metadata_single_media.yaml
└── videos
│ ├── countdown.mp4
│ └── test.mp4
├── integration
└── landingai
│ ├── test_dataset.py
│ └── test_predict_e2e.py
└── unit
└── landingai
├── data_management
├── test_label.py
├── test_media.py
└── test_metadata.py
├── pipeline
├── test_frameset.py
├── test_image_source.py
└── test_postprocessing.py
├── storage
├── test_data_access.py
└── test_snowflake.py
├── test_common.py
├── test_image_source_ops.py
├── test_postprocess.py
├── test_predict.py
├── test_telemetry.py
├── test_timer.py
├── test_utils.py
└── test_visualize.py
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | extend-ignore = E501,E203
3 | max-line-length = 88
4 | max-complexity = 15
5 | per-file-ignores = __init__.py:F401
6 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.github/workflows/ci_cd.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | push:
4 | branches: [ main ]
5 | pull_request:
6 | branches: [ main ]
7 |
8 | jobs:
9 | Test:
10 | strategy:
11 | matrix:
12 | python-version: ["3.9", "3.10", "3.11", "3.12"]
13 | os: [ ubuntu-22.04, windows-2022, macos-12 ]
14 | runs-on: ${{ matrix.os }}
15 | steps:
16 | - uses: actions/checkout@v3
17 | - uses: actions/setup-python@v4
18 | with:
19 | python-version: ${{ matrix.python-version }}
20 | - name: Install Python Poetry
21 | uses: abatilo/actions-poetry@v2.1.0
22 | with:
23 | poetry-version: 1.4.2
24 | - name: Configure poetry
25 | shell: bash
26 | run: poetry config virtualenvs.in-project true
27 | - name: Print Python environment information
28 | run: |
29 | poetry env info
30 | poetry --version
31 | poetry run pip -V
32 | - name: Install dependencies
33 | run: |
34 | # Install main dependencies first so we can see their size
35 | poetry install --all-extras
36 | - name: Linting
37 | run: |
38 | # stop the build if there are Python syntax errors or undefined names
39 | poetry run ruff .
40 | - name: Check Format
41 | run: |
42 | poetry run ruff format --check --diff .
43 | - name: Type Checking
44 | run: |
45 | poetry run mypy landingai
46 | - name: Test with pytest
47 | run: |
48 | poetry run pytest -v tests
49 |
50 | Release:
51 | needs: Test
52 | # https://github.community/t/how-do-i-specify-job-dependency-running-in-another-workflow/16482
53 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' && !contains(github.event.head_commit.message, 'chore(release):') && !contains(github.event.head_commit.message, '[skip release]')
54 | runs-on: ubuntu-latest
55 | steps:
56 | - uses: actions/setup-python@v4
57 | with:
58 | python-version: 3.10.11
59 | - name: Install Python Poetry
60 | uses: abatilo/actions-poetry@v2.1.0
61 | with:
62 | poetry-version: 1.4.2
63 | - name: Configure poetry
64 | shell: bash
65 | run: poetry config virtualenvs.in-project true
66 | - name: Checkout code
67 | uses: actions/checkout@v3
68 | with:
69 | token: ${{ secrets.GH_TOKEN }}
70 | - name: setup git config
71 | run: |
72 | git config user.name "GitHub Actions Bot"
73 | git config user.email "yazhou.cao@landing.ai"
74 | - name: Bump up version
75 | run: |
76 | poetry version patch
77 | git add pyproject.toml
78 | new_version=`poetry version`
79 | git commit -m "[skip ci] chore(release): ${new_version}"
80 | git push -f
81 | - name: Publish to PyPI
82 | run: |
83 | poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
84 | poetry publish --build -vvv
85 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: pdoc
2 |
3 | # build the documentation whenever there are new commits on main
4 | on:
5 | push:
6 | branches:
7 | - main
8 | # Alternative: only build for tags.
9 | # tags:
10 | # - '*'
11 |
12 | # security: restrict permissions for CI jobs.
13 | permissions:
14 | contents: read
15 |
16 | jobs:
17 | # Build the documentation and upload the static HTML files as an artifact.
18 | build:
19 | runs-on: ubuntu-latest
20 | steps:
21 | - uses: actions/checkout@v3
22 | - uses: actions/setup-python@v4
23 | with:
24 | python-version: 3.10.11
25 |
26 | - uses: Gr1N/setup-poetry@v8
27 | with:
28 | poetry-version: "1.2.2"
29 |
30 | - run: poetry install --all-extras
31 | - run: mkdir -p docs-build
32 | #- run: poetry run pdoc -t pdocs/templates --docformat=numpy -o docs-build landingai
33 | - run: poetry run mkdocs build -f mkdocs.yml -d docs-build/
34 |
35 | - uses: actions/upload-pages-artifact@v1
36 | with:
37 | path: docs-build/
38 |
39 | # Deploy the artifact to GitHub pages.
40 | # This is a separate job so that only actions/deploy-pages has the necessary permissions.
41 | deploy:
42 | needs: build
43 | runs-on: ubuntu-latest
44 | permissions:
45 | pages: write
46 | id-token: write
47 | environment:
48 | name: github-pages
49 | url: ${{ steps.deployment.outputs.page_url }}
50 | steps:
51 | - id: deployment
52 | uses: actions/deploy-pages@v2
53 |
--------------------------------------------------------------------------------
/.github/workflows/stale_prs.yml:
--------------------------------------------------------------------------------
1 | name: 'Close stale PRs'
2 | on:
3 | schedule:
4 | - cron: '30 1 * * *'
5 |
6 | jobs:
7 | stale:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/stale@v5
11 | with:
12 | stale-pr-message: 'This PR is stale because it has been open 15 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
13 | close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'
14 | days-before-stale: 15
15 | days-before-close: 7
16 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Prerequisites
2 | *.d
3 |
4 | # Object files
5 | *.o
6 | *.ko
7 | *.obj
8 | *.elf
9 |
10 | # Env files
11 | .env
12 |
13 | # Precompiled Headers
14 | *.gch
15 | *.pch
16 |
17 | # Libraries
18 | *.lib
19 | *.a
20 | *.la
21 | *.lo
22 |
23 | # Shared objects (inc. Windows DLLs)
24 | *.dll
25 | *.so
26 | *.so.*
27 | *.dylib
28 |
29 | # Executables
30 | *.exe
31 | *.out
32 | *.app
33 | *.i*86
34 | *.x86_64
35 | *.hex
36 |
37 | # Debug files
38 | *.dSYM/
39 | *.su
40 |
41 | # Mac files
42 | .DS_Store
43 | .DS_STORE
44 |
45 | # Old HG stuff
46 | .hg
47 | .hgignore
48 | .hgtags
49 |
50 | .git
51 | __pycache__
52 | .ipynb_checkpoints
53 | */__pycache__
54 | */.ipynb_checkpoints
55 | .local
56 | .jupyter
57 | .ipython
58 | */.terraform
59 | terraform.*
60 | .terraform.*
61 | shinobi-dvr/*
62 | .vscode/
63 |
64 | # mypy
65 | .mypy_cache/*
66 |
67 | # Distribution / packaging
68 | .Python
69 | build/
70 | develop-eggs/
71 | dist/
72 | downloads/
73 | eggs/
74 | .eggs/
75 | lib/
76 | lib64/
77 | parts/
78 | sdist/
79 | var/
80 | wheels/
81 | pip-wheel-metadata/
82 | share/python-wheels/
83 | *.egg-info/
84 | .installed.cfg
85 | *.egg
86 | MANIFEST
87 |
88 | # Output from various tools
89 | examples/output
90 | tests/output
91 | docs-build
92 |
93 | # Local or WIP files
94 | local/
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6 | and this project (v1.0.0 and above) adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7 |
8 | > WARNING: currently the `landingai` library is in alpha version, and it's not strictly following [Semantic Versioning](https://semver.org/spec/v2.0.0.html). Breaking changes will result a minor version bump instead of a major version bump. Feature releases will result in a patch version bump.
9 |
10 | ## [0.3.0] - 2023-10-04
11 |
12 | ### Major changes
13 |
14 | - [Image source iterators yielding single Frame](https://github.com/landing-ai/landingai-python/pull/125)
15 |
16 |
17 | ### Migration Guide
18 |
19 | Whenever you iterate over an image source (`NetworkedCamera`, `Webcam`, etc), each iteration yields a single `Frame`, and not a `FrameSet`.
20 | But most `FrameSet` operations were migrated to `Frame` class, so you can still use the same API to manipulate the `Frame` object with very minor changes:
21 |
22 | 1. On `for frame in image_source:`, don't use `frame.frames[0]` anymore. Instead, you should just use the `frame` object directly (to do resize, check predictions, overlay predictions, etc).
23 | 2. `Frame.save_image` receives a direct file path to where the frame image should be saved (not just a prefix, as it happens with `FrameSet`).
24 |
25 |
26 |
27 | ## [0.2.0] - 2023-07-12
28 |
29 | ### Major changes
30 |
31 | - [Refactor visual pipeline functionality](https://github.com/landing-ai/landingai-python/pull/77)
32 |
33 | ### Migration Guide
34 |
35 | 1. The `landingai.vision_pipeline` module was migrated to `landingai.pipeline.FrameSet`
36 | 2. All the image sources were consolidated under `landingai.pipeline.image_source` in particular `NetworkedCamera`
37 | 3. `read_file` is now in `landingai.storage.data_access` and now returns a dictionary. The file contents can be found under "content".
38 |
39 | ## [0.1.0] - 2023-07-06
40 |
41 | ### Major changes
42 |
43 | - [Support the latest v2 API key](https://github.com/landing-ai/landingai-python/pull/55)
44 | - [Remove support for v1 API key and secret](https://github.com/landing-ai/landingai-python/pull/56)
45 |
46 | ### Migration Guide
47 |
48 | Below section shows you how to fix the backward incompatible changes when you upgrade the version to `0.1.0`.
49 |
50 | 1. Generate your v2 API key from LandingLens. See [here](https://support.landing.ai/docs/api-key) for more information.
51 | 2. The `api_secret` parameter is removed in the `Predictor` and `OcrPredictor` class. `api_key` is a named parameter now, which means you must specify the parameter name, i.e. `api_key`, if you want to pass it to a `Predictor` as an argument.
52 | See below code as an example:
53 |
54 | **Before upgrade to `0.1.0`**
55 | ```
56 | predictor = Predictor(endpoint_id, api_key, api_secret)
57 | ```
58 | **After upgrade to `0.1.0`**
59 | ```
60 | predictor = Predictor(endpoint_id, api_key=api_key)
61 | ```
62 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright (c) 2023-2023 LandingAI LLC
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining
4 | a copy of this software and associated documentation files (the
5 | "Software"), to deal in the Software without restriction, including
6 | without limitation the rights to use, copy, modify, merge, publish,
7 | distribute, sublicense, and/or sell copies of the Software, and to
8 | permit persons to whom the Software is furnished to do so, subject to
9 | the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/_infra/deploy/ocr-streamlit-docker-build-image.yaml:
--------------------------------------------------------------------------------
1 | # Auto generated by infra-cli. Version: v2.0.3
2 |
3 | # NOTE
4 | # 1. This script builds ocr streamlit app's source code into image and push into ECR.
5 | # 2. Code build is trigged when a new commit is merged into the main branch, i.e. a PR.
6 | # 3. ECR will apply scan-on-push strategy on every new image.
7 | # 4. Each image has two tags, 1) one commit hash tag 2) latest tag; we will use latest as the default tag.
8 | version: 0.2
9 |
10 | phases:
11 | install:
12 | runtime-versions:
13 | python: 3.7
14 | commands:
15 | - apt-get update && apt-get install -y sudo git awscli
16 | - wget --no-verbose https://landing-resources.s3-us-east-2.amazonaws.com/xa/xa_0.0.3_linux_amd64.deb && dpkg -i ./xa_0.0.3_linux_amd64.deb
17 | - git clone https://${GITHUB_TOKEN}@github.com/landing-ai/infra-cli.git
18 | - cd infra-cli
19 | - INFRACLI_INSTALL_LIGHT=1 bash ./install.sh
20 | - cd ..
21 | pre_build:
22 | commands:
23 | - export XA_ENVIRONMENT=${XA_ENVIRONMENT:-$XA_ENV}
24 | - echo "Getting cross account identity for $XA_ENVIRONMENT"
25 | - $(xa)
26 | - export XA_ACCOUNT_ID=$(aws sts get-caller-identity | jq -rc '.Account')
27 | - infra-cli connect-aws $XA_ACCOUNT_ID $AWS_REGION $CLUSTER_NAME
28 | build:
29 | commands:
30 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
31 | - IMAGE_TAG=${COMMIT_HASH:=latest}
32 | - cd examples/apps/ocr
33 | - infra-cli gen-st-app lappocr
34 | - cd /tmp/infra-cli-st/apprepo/lappocr/
35 | - infra-cli build-lnd-app --tag $IMAGE_TAG
36 | artifacts:
37 | files:
38 | - "**/*"
39 | cache:
40 | paths: []
41 |
42 |
--------------------------------------------------------------------------------
/_infra/deploy/ocr-streamlit-k8s-deploy.yaml:
--------------------------------------------------------------------------------
1 | # Auto generated by infra-cli. Version: v2.0.3
2 |
3 | # NOTE
4 | # 1. This script downloads ocr streamlit app's image from ECR and deploys to EKS cluster staging and production account with Helm.
5 | # 2. The pipeline and codebuild running this are in dev2 edgelicenseserver-app-pieline.
6 | # 3. It always deploys the latest image for now.
7 | version: 0.2
8 |
9 | phases:
10 | install:
11 | runtime-versions:
12 | python: 3.9
13 | commands:
14 | - apt-get update && apt-get install -y sudo git awscli
15 | - wget --no-verbose https://landing-resources.s3-us-east-2.amazonaws.com/xa/xa_0.0.3_linux_amd64.deb && dpkg -i ./xa_0.0.3_linux_amd64.deb
16 | - git clone https://${GITHUB_TOKEN}@github.com/landing-ai/infra-cli.git
17 | - cd infra-cli
18 | - INFRACLI_INSTALL_LIGHT=1 bash ./install.sh
19 | - cd ..
20 | - DEV_ACCOUNT=$(aws sts get-caller-identity | jq -r '.Account')
21 | - aws ecr get-login-password | docker login --username AWS --password-stdin $DEV_ACCOUNT.dkr.ecr.us-east-2.amazonaws.com
22 | pre_build:
23 | commands:
24 | - export XA_ENVIRONMENT=${XA_ENVIRONMENT:-$XA_ENV}
25 | - echo "Getting cross account identity for $XA_ENVIRONMENT"
26 | - $(xa)
27 | - export XA_ACCOUNT_ID=$(aws sts get-caller-identity | jq -rc '.Account')
28 | - infra-cli connect-aws $XA_ACCOUNT_ID $AWS_REGION $CLUSTER_NAME
29 | build:
30 | commands:
31 | # make sure the cluster has pull permissions for the image
32 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
33 | - echo "Deploying to k8s cluster $CLUSTER_NAME..."
34 | - echo "Deploying helm charts edgelicenseserver-app to '$TIER'" ...
35 | - cd examples/apps/ocr
36 | - infra-cli gen-st-app lappocr
37 | - cd /tmp/infra-cli-st/apprepo/lappocr/
38 | - infra-cli deploy-lnd-app --tier $TIER --image-tag $COMMIT_HASH
39 |
40 | artifacts:
41 | files:
42 | - "**/*"
43 | cache:
44 | paths: []
45 |
46 |
--------------------------------------------------------------------------------
/assets/landing-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/assets/landing-logo.png
--------------------------------------------------------------------------------
/docs/_README.md:
--------------------------------------------------------------------------------
1 | # About
2 | This documentation is generated using [mkdocs](https://www.mkdocs.org/).
3 |
4 | You can view the documentation changes locally by running the following command:
5 |
6 | ```bash
7 | poetry run mkdocs serve -f mkdocs.yml
8 | ```
9 |
10 | Then, open http://127.0.0.1:8000/ in your browser.
11 |
12 | For deployment, check `.github/workflows/docs.yml` file.
13 |
--------------------------------------------------------------------------------
/docs/_overrides/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block footer %}
4 | {{ super() }}
5 |
6 |
7 |
14 | {% endblock %}
--------------------------------------------------------------------------------
/docs/api/common.md:
--------------------------------------------------------------------------------
1 | ::: landingai.common
--------------------------------------------------------------------------------
/docs/api/data_management.md:
--------------------------------------------------------------------------------
1 | ::: landingai.data_management
2 |
3 | ::: landingai.data_management.client
4 |
5 | ::: landingai.data_management.dataset
6 |
7 | ::: landingai.data_management.label
8 |
9 | ::: landingai.data_management.media
10 |
11 | ::: landingai.data_management.metadata
12 |
13 | ::: landingai.data_management.utils
14 |
15 |
--------------------------------------------------------------------------------
/docs/api/exceptions.md:
--------------------------------------------------------------------------------
1 | ::: landingai.exceptions
--------------------------------------------------------------------------------
/docs/api/image_source_ops.md:
--------------------------------------------------------------------------------
1 | ::: landingai.image_source_ops
--------------------------------------------------------------------------------
/docs/api/notebook_utils.md:
--------------------------------------------------------------------------------
1 | ::: landingai.notebook_utils
--------------------------------------------------------------------------------
/docs/api/pipeline.md:
--------------------------------------------------------------------------------
1 | ::: landingai.pipeline
2 |
3 | ::: landingai.pipeline.frameset
4 |
5 | ::: landingai.pipeline.image_source
6 |
7 | ::: landingai.pipeline.postprocessing
--------------------------------------------------------------------------------
/docs/api/postprocess.md:
--------------------------------------------------------------------------------
1 | ::: landingai.postprocess
--------------------------------------------------------------------------------
/docs/api/predict.md:
--------------------------------------------------------------------------------
1 | ::: landingai.predict
2 |
3 | ::: landingai.predict.cloud
4 |
5 | ::: landingai.predict.edge
6 |
7 | ::: landingai.predict.ocr
8 |
9 | ::: landingai.predict.snowflake
10 |
11 | ::: landingai.predict.utils
12 |
--------------------------------------------------------------------------------
/docs/api/st_utils.md:
--------------------------------------------------------------------------------
1 | ::: landingai.st_utils
--------------------------------------------------------------------------------
/docs/api/storage.md:
--------------------------------------------------------------------------------
1 | ::: landingai.storage
2 |
3 | ::: landingai.storage.data_access
4 |
5 | ::: landingai.storage.snowflake
--------------------------------------------------------------------------------
/docs/api/telemetry.md:
--------------------------------------------------------------------------------
1 | ::: landingai.telemetry
--------------------------------------------------------------------------------
/docs/api/timer.md:
--------------------------------------------------------------------------------
1 | ::: landingai.timer
--------------------------------------------------------------------------------
/docs/api/transform.md:
--------------------------------------------------------------------------------
1 | ::: landingai.transform
--------------------------------------------------------------------------------
/docs/api/utils.md:
--------------------------------------------------------------------------------
1 | ::: landingai.utils
--------------------------------------------------------------------------------
/docs/api/visualize.md:
--------------------------------------------------------------------------------
1 | ::: landingai.visualize
--------------------------------------------------------------------------------
/docs/changelog.md:
--------------------------------------------------------------------------------
1 | ../CHANGELOG.md
--------------------------------------------------------------------------------
/docs/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | This guide introduces you to the `landingai` development process and provides information on writing, testing, and building the `landingai` library.
4 |
5 | Read this guide if you need to perform any of the following tasks:
6 |
7 | - Install the `landingai` library locally.
8 | - Contribute to the `landingai` library.
9 |
10 | ## Install `landingai` Library Locally
11 |
12 | ### Prerequisite: Install Poetry
13 |
14 | > `landingai` uses `Poetry` for packaging and dependency management. If you want to build it from source, you have to install Poetry first. To see all possible options, refer to the [Poetry documentation](https://python-poetry.org/docs/#installation).
15 |
16 | For Linux, macOS, Windows (WSL):
17 |
18 | ```
19 | curl -sSL https://install.python-poetry.org | python3 -
20 | ```
21 |
22 | Note: You can switch to use a different Python version by specifying the python version:
23 |
24 | ```
25 | curl -sSL https://install.python-poetry.org | python3.10 -
26 | ```
27 |
28 | Or run the following command after installing Poetry:
29 |
30 | ```
31 | poetry env use 3.10
32 | ```
33 |
34 | ### Install All Dependencies
35 |
36 | ```bash
37 | poetry install --all-extras
38 | ```
39 |
40 | ### Activate the virtualenv
41 |
42 | ```bash
43 | poetry shell
44 | ```
45 |
46 | ## Test and Lint `landingai`
47 |
48 | ### Run Linting
49 |
50 | ```bash
51 | poetry run flake8 . --exclude .venv --count --show-source --statistics
52 | ```
53 |
54 | ### Run Tests
55 |
56 | ```bash
57 | poetry run pytest tests/
58 | ```
59 |
60 | ## Release
61 |
62 | The CI and CD pipelines are defined in the `.github/workflows/ci_cd.yml` file.
63 |
64 | Every git commit will trigger a release to `PyPi` at https://pypi.org/project/landingai/
65 |
66 | ### Versioning
67 |
68 | When we release a new library version, we version it using [Semantic Versioning 2.0.0](https://semver.org/)(`MAJOR.MINOR.PATCH`). The version number is defined in the `pyproject.toml` file in the `version` field.
69 |
70 | As a general rule of thumb, given a version number `MAJOR.MINOR.PATCH`, increment the:
71 |
72 | - `MAJOR` version when you make incompatible API changes.
73 | - `MINOR` version when you add functionality in a backward-compatible manner, such as adding a new feature.
74 | - `PATCH` version when you make backward-compatible bug fixes and minor changes.
75 |
76 | Note: The CD pipeline will automatically increment the `PATCH` version for every git commit.
77 | **For a `MINOR` or `MAJOR` version change, you need to manually update `pyproject.toml` to bump the version number.**
78 |
--------------------------------------------------------------------------------
/docs/examples.md:
--------------------------------------------------------------------------------
1 | We've provided some examples in Jupyter Notebooks to focus on ease of use, and some examples in Python apps to provide a more robust and complete experience.
2 |
3 | If you have a cool app that uses the LandingAI SDK and you would like to have it featured here, please [let us know](https://github.com/landing-ai/landingai-python/issues/new).
4 |
5 |
6 |
7 | | Example | Description | Type |
8 | |---|---|---|
9 | | [Poker Card Suit Identification](https://github.com/landing-ai/landingai-python/blob/main/examples/webcam-collab-notebook/webcam-collab-notebook.ipynb) | This notebook shows how to use an object detection model from LandingLens to detect suits on playing cards. A webcam is used to take photos of playing cards. | Jupyter Notebook [](https://colab.research.google.com/github/landing-ai/landingai-python/blob/main/examples/webcam-collab-notebook/webcam-collab-notebook.ipynb)|
10 | | [Door Monitoring for Home Automation](https://github.com/landing-ai/landingai-python/blob/main/examples/rtsp-capture-notebook/rtsp-capture.ipynb) | This notebook shows how to use an object detection model from LandingLens to detect whether a door is open or closed. An RTSP camera is used to acquire images. | Jupyter Notebook [](https://colab.research.google.com/github/landing-ai/landingai-python/blob/main/examples/rtsp-capture-notebook/rtsp-capture.ipynb) |
11 | | [Satellite Images and Post-Processing](https://github.com/landing-ai/landingai-python/tree/main/examples/post-processings/farmland-coverage/farmland-coverage.ipynb) | This notebook shows how to use a Visual Prompting model from LandingLens to identify different objects in satellite images. The notebook includes post-processing scripts that calculate the percentage of ground cover that each object takes up. | Jupyter Notebook [](https://colab.research.google.com/github/landing-ai/landingai-python/blob/main/examples/post-processings/farmland-coverage/farmland-coverage.ipynb) |
12 | | [License Plate Detection and Recognition](https://github.com/landing-ai/landingai-python/tree/main/examples/license-plate-ocr-notebook/license_plate_ocr.ipynb) | This notebook shows how to extract frames from a video file and use a object detection model and OCR from LandingLens to identify and recognize different license plates. | Jupyter Notebook [](https://colab.research.google.com/github/landing-ai/landingai-python/blob/main/examples/license-plate-ocr-notebook/license_plate_ocr.ipynb) |
13 | | [Streaming Video](https://github.com/landing-ai/landingai-python/tree/main/examples/capture-service) | This application shows how to continuously run inference on images extracted from a streaming RTSP video camera feed. | Python application |
14 |
15 | ## Run Examples Locally
16 |
17 | All the examples in this repo can be run locally.
18 |
19 | To give you some guidance, here's how you can run the `rtsp-capture` example locally in a shell environment:
20 |
21 | 1. Clone the repo to local:
22 | ```
23 | git clone https://github.com/landing-ai/landingai-python.git
24 | ```
25 | 2. Install the library (to see how to install `poetry`, go [here](https://python-poetry.org/docs/)):
26 | ```
27 | poetry install --with examples
28 | ```
29 | 3. Activate the virtual environment:
30 | ```
31 | poetry shell
32 | ```
33 | 4. Run: python landingai-python/examples/capture-service/run.py
34 | ```
35 | python landingai-python/examples/capture-service/run.py
36 | ```
37 |
--------------------------------------------------------------------------------
/docs/image-acquisition/image-acquisition.md:
--------------------------------------------------------------------------------
1 | The LandingAI SDK provides many sources for getting images:
2 |
3 | - [Image files](image-file.md)
4 | - [Video files](video-file.md)
5 | - [Webcam](webcam.md)
6 | - [Network cameras](network-cameras.md)
7 | - [Screenshots](screenshots.md)
8 |
--------------------------------------------------------------------------------
/docs/image-acquisition/image-file.md:
--------------------------------------------------------------------------------
1 | Creating a frame out of an image file is pretty straightforward:
2 | ```py
3 | from landingai.pipeline.frameset import Frame
4 |
5 | frame = Frame.from_image("/path/to/your/image.jpg") # (1)!
6 | frame.resize(width=512, height=512) # (2)!
7 | frame.save_image("/tmp/resized-image.png") # (3)!
8 | ```
9 |
10 | 1. Open `/path/to/your/image.jpg` image file.
11 | 2. Resize the frame to `width=512px` and `height=512px`.
12 | 3. Save the resized image to `/tmp/resized-image.png`.
13 |
14 |
15 | Alternatively, if you have a folder with multiple images, you can iterate over that
16 | using `landingai.image_source.ImageFolder`:
17 |
18 | ```py
19 | from landingai.pipeline.image_source import ImageFolder
20 |
21 | with ImageFolder("/path/to/your/images-dir/*.png") as image_folder:
22 | for i, frame in enumerate(image_folder): # (1)!
23 | frame.resize(width=512) # (2)!
24 | frame.save_image(f"/tmp/resized-image-{i}.png") # (3)!
25 | ```
26 |
27 | 1. Iterate over all png images in `/path/to/your/image-dir`
28 | 2. Resize the frame to `width=512px` (keeping aspect ratio)
29 | 3. Save the resized image to `/tmp/resized-image-.png`.
--------------------------------------------------------------------------------
/docs/image-acquisition/network-cameras.md:
--------------------------------------------------------------------------------
1 | Just like [local webcams](image-acquisition/video-file.md), you can extract frames from IP cameras.
2 |
3 | The example below iterates over the first 100 frames captured by the network camera, resizes it and saves it to
4 | a new video file.
5 |
6 | ```py
7 | from landingai.pipeline.image_source import NetworkedCamera
8 | from landingai.pipeline.frameset import FrameSet
9 |
10 | frameset = FrameSet() # (1)!
11 | with NetworkedCamera(stream_url="rtsp://192.168.0.77:8080/h264_opus.sdp", fps=1) as camera: # (2)!
12 | for i, frame in enumerate(camera): # (3)!
13 | if i >= 100:
14 | break
15 | frame.resize(width=256) # (4)!
16 | frameset.append(frame) # (5)!
17 | frameset.save_video("/tmp/resized-video.mp4") # (6)!
18 | ```
19 |
20 | 1. Creates an empty `FrameSet`, where we will store the modified frames
21 | 2. Connects to the IP camera using RTSP protocol, and captures frames at 1 frame per second
22 | 3. Iterate over each frame of the video file
23 | 4. Resize the frame to `width=256px` (keeping aspect ratio)
24 | 5. Append the resized frame to the `FrameSet`
25 | 6. Save the resized video to `/tmp/resized-video.mp4`.
26 |
27 | It is also possible to only yield frames if the camera detects motion. This is useful for reducing the number of frames
28 | processed when running inferences, for example:
29 |
30 | ```py
31 | # motion_detection_threshold is a value between 0 and 100, as a percent of pixels changed from one frame to the next.
32 | cam = NetworkedCamera(
33 | stream_url="rtsp://192.168.0.77:8080/h264_opus.sdp",
34 | fps=1,
35 | motion_detection_threshold=15,
36 | )
37 | with cam:
38 | for frame in cam:
39 | ...
40 | ```
--------------------------------------------------------------------------------
/docs/image-acquisition/screenshots.md:
--------------------------------------------------------------------------------
1 | In case you want to track what is happening in your desktop, you can use `landingai.image_source.Screenshot` to capture screenshots of your desktop.
2 |
3 | In the example below, we capture the first 20 frames, and save them to a video file.
4 |
5 | ```py
6 | from landingai.pipeline.image_source import Screenshot
7 | from landingai.pipeline.frameset import FrameSet
8 | import time
9 |
10 | frameset = FrameSet() # (1)!
11 | with Screenshot() as screenshots: # (2)!
12 | for i, frame in enumerate(screenshots): # (3)!
13 | if i >= 20:
14 | break
15 | frame.resize(width=512) # (4)!
16 | frameset.append(frame) # (5)!
17 | time.sleep(0.5) # (6)!
18 | frameset.save_video("/tmp/resized-video.mp4") # (7)!
19 | ```
20 |
21 | 1. Creates an empty `FrameSet`, where we will store the modified frames
22 | 2. Build the screenshot capture object
23 | 3. Iterate over each frame captured from the desktop
24 | 4. Resize the frame to `width=512px` (keeping aspect ratio)
25 | 5. Append the resized frame to the `FrameSet`
26 | 6. Wait for 0.5 seconds before capturing the next frame
27 | 7. Save the resized video to `/tmp/resized-video.mp4`.
28 |
--------------------------------------------------------------------------------
/docs/image-acquisition/video-file.md:
--------------------------------------------------------------------------------
1 | You can extract frames out of video files using `landingai.image_source.VideoFile`.
2 |
3 | The example below iterates over each frame of a video, resizes it and saves it to
4 | a new video file.
5 |
6 | ```py
7 | from landingai.pipeline.image_source import VideoFile
8 | from landingai.pipeline.frameset import FrameSet
9 |
10 | frameset = FrameSet() # (1)!
11 | with VideoFile("/path/to/your/file.mp4", samples_per_second=5) as video_file: # (2)!
12 | for frame in video_file: # (3)!
13 | frame.resize(width=256) # (4)!
14 | frameset.append(frame) # (5)!
15 | frameset.save_video("/tmp/resized-video.mp4") # (6)!
16 | ```
17 |
18 | 1. Creates an empty `FrameSet`, where we will store the modified frames
19 | 2. Open `/path/to/your/file.mp4` video file, at a rate of 5 frames per second.
20 | 3. Iterate over each frame of the video file
21 | 4. Resize the frame to `width=256px` (keeping aspect ratio)
22 | 5. Append the resized frame to the `FrameSet`
23 | 6. Save the resized video to `/tmp/resized-video.mp4`.
24 |
25 |
--------------------------------------------------------------------------------
/docs/image-acquisition/webcam.md:
--------------------------------------------------------------------------------
1 | Just like [video files](image-acquisition/video-file.md), you can extract frames from your local webcam using `landingai.pipeline.image_source.Webcam`.
2 |
3 | The example below iterates over the first 100 frames captured by the webcam, resizes it and saves it to
4 | a new video file.
5 |
6 | ```py
7 | from landingai.pipeline.image_source import Webcam
8 | from landingai.pipeline.frameset import FrameSet
9 |
10 | frameset = FrameSet() # (1)!
11 | with Webcam(fps=1) as camera: # (2)!
12 | for i, frame in enumerate(camera): # (3)!
13 | if i >= 100:
14 | break
15 | frame.resize(width=256) # (4)!
16 | frameset.append(frame) # (5)!
17 | frameset.save_video("/tmp/resized-video.mp4") # (6)!
18 | ```
19 |
20 | 1. Creates an empty `FrameSet`, where we will store the modified frames
21 | 2. Capture frames from the webcam at 1 frame per second
22 | 3. Iterate over each frame of the video file
23 | 4. Resize the frame to `width=256px` (keeping aspect ratio)
24 | 5. Append the resized frame to the `FrameSet`
25 | 6. Save the resized video to `/tmp/resized-video.mp4`.
26 |
27 | ## Webcam on Collab and Remote Jupyter Notebooks
28 |
29 | If you want to use your webcam while running a remote Jupyter Notebook or Google Collab, you might want to
30 | capture images from your local webcam (the one in the computer running the web UI, not the
31 | machine running the kernel).
32 |
33 | In this case, `landingai.pipeline.image_source.Webcam` will not work. Instead, you should
34 | capture the images from your webcam using `take_photo_from_webcam()` function, from `landingai.image_source_ops` package:
35 |
36 | ```py
37 | from landingai.image_source_ops import take_photo_from_webcam
38 | from landingai.pipeline.frameset import Frame
39 |
40 | frame = Frame(image=take_photo_from_webcam())
41 | frame.resize(width=256)
42 | ```
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-brightness-0.1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-brightness-0.1.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-brightness-2.0.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-brightness-2.0.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-color-0.1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-color-0.1.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-color-2.0.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-color-2.0.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-contrast-0.1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-contrast-0.1.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-contrast-2.0.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-contrast-2.0.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-crop-predictions-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-crop-predictions-0.png
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-crop-predictions-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-crop-predictions-1.png
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-crop-predictions-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-crop-predictions-2.png
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-cropped.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-cropped.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-resized-both.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-resized-both.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-resized-width.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-resized-width.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-sharpness-0.1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-sharpness-0.1.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal-sharpness-2.0.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal-sharpness-2.0.jpeg
--------------------------------------------------------------------------------
/docs/images/cereal-ops/cereal.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/cereal-ops/cereal.jpeg
--------------------------------------------------------------------------------
/docs/images/coffee-mug-overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/coffee-mug-overlay.png
--------------------------------------------------------------------------------
/docs/images/copy-endpoint-id.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/copy-endpoint-id.png
--------------------------------------------------------------------------------
/docs/images/handwriting-hello.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/handwriting-hello.png
--------------------------------------------------------------------------------
/docs/images/landing-apikey.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/landing-apikey.png
--------------------------------------------------------------------------------
/docs/images/landing-coffee-mug-class.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/landing-coffee-mug-class.png
--------------------------------------------------------------------------------
/docs/images/landing-console-train-deploy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/landing-console-train-deploy.png
--------------------------------------------------------------------------------
/docs/images/landing-deploy-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/landing-deploy-page.png
--------------------------------------------------------------------------------
/docs/images/landing-deploy-popup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/landing-deploy-popup.png
--------------------------------------------------------------------------------
/docs/images/menu-api-key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/menu-api-key.png
--------------------------------------------------------------------------------
/docs/images/metadata-management-ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/docs/images/metadata-management-ui.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # Welcome to the LandingAI Python Library Documentation
6 |
7 | The LandingAI Python Library is a set of tools to help you build computer vision applications. While some of the functionality is specific to [LandingLens](https://app.landing.ai/), the computer vision platform from LandingAI, other features can be used for managing images in general.
8 |
9 | The library includes features to acquire, process, and detect objects in your images and videos, with the least amount of code as possible.
10 |
11 | ## Quick Start
12 |
13 | ### Install
14 | First, install the LandingAI Python library:
15 |
16 | ```bash
17 | pip install landingai~=0.3.0
18 | ```
19 |
20 |
21 | ### Acquire Your First Images
22 |
23 | After installing the LandingAI Python library, you can start acquiring images from one of many image sources.
24 |
25 | For example, from a single image file:
26 |
27 | ```py
28 | from landingai.pipeline.frameset import Frame
29 |
30 | frame = Frame.from_image("/path/to/your/image.jpg") # (1)!
31 | frame.resize(width=512, height=512) # (2)!
32 | frame.save_image("/tmp/resized-image.png") # (3)!
33 | ```
34 |
35 | 1. We support several image file types. See the full list [here](https://support.landing.ai/docs/upload-images).
36 | 2. Resize the frame to 512x512p.
37 | 3. Save the resized image to `/tmp/resized-image.png`.
38 |
39 |
40 | You can also extract frames from your webcam. For example:
41 |
42 | ```py
43 | from landingai.pipeline.image_source import Webcam
44 |
45 | with Webcam(fps=0.5) as webcam: # (1)!
46 | for frame in webcam:
47 | frame.resize(width=512, height=512) # (2)!
48 | frame.save_image("/tmp/webcam-image.png") # (3)!
49 | ```
50 |
51 | 1. Capture images from the webcam at 0.5 frames per second (1 frame every 2 seconds), closing the camera at the end of the `with` block.
52 | 2. Resize the frame to 512x512p.
53 | 3. Save the images as `/tmp/webcam-image.png`.
54 |
55 |
56 | To learn how to acquire images from more sources, go to [Image Acquisition](image-acquisition/image-acquisition.md).
57 |
58 |
59 | ### Run Inference
60 |
61 | If you have deployed a computer vision model in LandingLens, you can use this library to send images to that model for inference.
62 |
63 | For example, let's say we've created and deployed a model in LandingLens that detects coffee mugs. Now, we'll use the code below to extract images (frames) from a webcam and run inference on those images.
64 |
65 | ???+ note
66 |
67 | If you don't have a LandingLens account, create one [here](https://app.landing.ai/). Learn how to use LandingLens from our [Support Center]([https://support.landing.ai/docs/landinglens-workflow](https://support.landing.ai/landinglens/en)) and [Video Tutorial Library](https://support.landing.ai/docs/landinglens-workflow-2). Need help with specific use cases? Post your questions in our [Community](https://community.landing.ai/home).
68 |
69 |
70 | ???+ note
71 | If you are running LandingLens as a Snowflake Native App, see the [Snowflake Native App](inferences/snowflake-native-app.md) section for more information.
72 |
73 |
74 | ```py
75 | from landingai.pipeline.image_source import Webcam
76 | from landingai.predict import Predictor
77 |
78 | predictor = Predictor( # (1)!
79 | endpoint_id="abcdef01-abcd-abcd-abcd-01234567890", # (2)!
80 | api_key="land_sk_xxxxxx", # (3)!
81 | )
82 | with Webcam(fps=0.5) as webcam:
83 | for frame in webcam:
84 | frame.resize(width=512) # (4)!
85 | frame.run_predict(predictor=predictor) # (5)!
86 | frame.overlay_predictions()
87 | if "coffee-mug" in frame.predictions: # (6)!
88 | frame.save_image("/tmp/latest-webcam-image.png", include_predictions=True) # (7)!
89 | ```
90 |
91 | 1. Creates a LandingLens predictor object.
92 | 2. Set the `endpoint_id` to the one from your deployed model in LandingLens: .
93 | 3. Set the `api_key` to the one from your LandingLens organization: .
94 | 4. Resize the image to `width=512`, keeping the aspect ratio. This is useful to save some bandwidth when sending the image to LandingLens for inference.
95 | 5. Runs inference in the resized frame, and adds the predictions to the `Frame`.
96 | 6. If the model predicts that an object with a class named `coffee-mug` is found...
97 | 7. Then save the image with the predictions overlaid on the image.
98 |
99 | That's it! Now, with just a few lines of code, you can detect coffee mugs in front of your webcam.
100 |
101 | Now, learn about the other ways to [acquire images](image-acquisition/image-acquisition.md), and how to process your images and [run inference on them](inferences/getting-started.md). For inspiration, check out our [Examples](examples.md).
102 |
--------------------------------------------------------------------------------
/docs/inferences/docker-deployment.md:
--------------------------------------------------------------------------------
1 | Running inferences with the standard `landingai.predict.Predictor` will send your image to LandingLens cloud, which is ideal if you don't want to worry about backend scalability, hardware provisioning, availability, etc. But this also adds some networking overhead that might limit how many inferences per second you can run.
2 |
3 | If you need to run several inferences per second, and you have your own cloud service or local machine, you might want to run inference using your own resources. For that, we provide **[Docker deployment](https://support.landing.ai/docs/docker-deploy)**, a Docker image with your LandingLens trained model embeded that you can run anywhere.
4 |
5 |
6 | ???+ note
7 |
8 | You can get more details on how to set up and run the Docker deployment container locally or in your own cloud service in our [Support Center](https://support.landing.ai/docs/docker-deploy).
9 |
10 | Once you go through the Support Center guide, you will have the model running in a container, accessible in a specific host and port. The example below refers to these as `localhost` and `8000`, respectively.
11 |
12 |
13 | Once you have your Docker deployment container running, you can run inference using the `landingai.predict.EdgePredictor` class. For example:
14 |
15 | ```py
16 | from landingai.pipeline.image_source import Webcam
17 | from landingai.predict import EdgePredictor
18 |
19 | predictor = EdgePredictor(host="localhost", port=8000) # (1)!
20 | with Webcam(fps=15) as webcam: # (2)!
21 | for frame in webcam:
22 | frame.run_predict(predictor=predictor) # (3)!
23 | frame.overlay_predictions()
24 | if "coffee-mug" in frame.predictions: # (4)!
25 | frame.save_image(
26 | "/tmp/latest-webcam-image.png",
27 | include_predictions=True
28 | )
29 | ```
30 |
31 | 1. Create an `EdgePredictor` object, specifying the host and port where your Docker container is running.
32 | 2. Capture images from the webcam at 15 frames per second, closing the camera at the end of the `with` block.
33 | 3. Run inference on the frame using the Docker container predictor.
34 | 4. If the model detects a "coffee-mug" object in the frame, save the image to a file, including the predictions as an overlay.
35 |
36 | The `EdgePredictor` class is a subclass of `Predictor`, so you can use it in the same way as the standard `Predictor` class. The only difference is that the `EdgePredictor` will send the image to your Docker container instead of sending it to LandingLens cloud.
37 |
38 | The time it takes to run the inference will vary according to the hardware where the Docker container is running (if you set it up to run on a GPU, for example, it will probably yield faster predictions).
39 |
40 | Check out the [Support Center](https://support.landing.ai/docs/docker-deploy) for more information on how to get a deployment license, run the Docker deployment with a a GPU, and more.
--------------------------------------------------------------------------------
/docs/inferences/frames-inference.md:
--------------------------------------------------------------------------------
1 | Now that you have the API key and an endpoint ID (if you don't, see [Running inferences / Getting started](inferences/getting-started.md) section), you can run your first inference using the SDK.
2 |
3 | ## Working with frames
4 |
5 | The SDK provides a variety of image sources helpers that can be used to acquire images (see [Image acquisition](image-acquisition/image-acquisition.md) section), and all those image sources yields `Frame` objects, that can be used to run inferences.
6 |
7 | The process is pretty straightforward. To run inferences in frames extracted from the webcam, for example, you will just use the `Webcam` and the `Predictor` classes:
8 |
9 | ```py
10 | from landingai.pipeline.image_source import Webcam
11 | from landingai.predict import Predictor
12 |
13 | predictor = Predictor(
14 | endpoint_id="",
15 | api_key="",
16 | )
17 | with Webcam(fps=0.5) as webcam:
18 | for frame in webcam:
19 | frame.resize(width=512)
20 | frame.run_predict(predictor=predictor)
21 | if "coffee-mug" in frame.predictions:
22 | print(frame.predictions)
23 | ```
24 |
25 | The code above will run inferences on frames extracted from the webcam, and print the predictions if a "coffee-mug" is detected in the frame. "coffee-mug", in this case, is one of the classes labeled in LandingLens.
26 |
27 | 
28 |
29 | ## Filtering predictions
30 |
31 | You can also filter the predictions by label, for example, to count only the predictions for "coffee-mug":
32 |
33 | ```py
34 | with Webcam(fps=0.5) as webcam:
35 | for frame in webcam:
36 | frame.run_predict(predictor=predictor)
37 | total_coffee_mugs = len(frame.predictions.filter_label("coffee-mug"))
38 | if len(total_coffee_mugs):
39 | print(
40 | f"Found more than one coffee-mug in the frame!
41 | Found {total_coffee_mugs}!"
42 | )
43 | ```
44 |
45 | Another way to filter the prediction result is by "confidence score". The confidence score is a value between 0 and 1 that represents how confident the model is about the prediction. For example, if the model is 100% sure that there is a coffee-mug in the frame, the confidence score will be 1. If the model is 50% sure, the confidence score will be 0.5.
46 |
47 | LandingLens by default deploys the best confidence score for you, to balance false-positives and false-negatives. But it is possible to
48 | filter the predictions by confidence score, for example, to count only the predictions with confidence score greater than 0.95:
49 |
50 | ```py
51 | with Webcam(fps=0.5) as webcam:
52 | for frame in webcam:
53 | frame.run_predict(predictor=predictor)
54 | if "coffee-mug" in frame.predictions.filter_threshold(0.95):
55 | print(f"I'm super sure I found a coffee-mug in the frame!")
56 | ```
57 |
58 | ## Predictor rate limit
59 |
60 | The `Predictor` object is the main object used to run inferences. It calls LandingLens API to execute inferences using the hosted model.
61 |
62 | Keep in mind that LandingLens has rating limit that varies with your plan. At the moment, non-enterprise plans allows for up to 40 requests per minute. If you exceed this limit, your Predictor object will retry the request automatically after some seconds.
63 |
64 | Please, adjust your image capturing rate to avoid exceeding the limit, or contact LandingAI team ([sales@landing.ai](sales@landing.ai)) to upgrade your plan for higher limits or local inferences support.
--------------------------------------------------------------------------------
/docs/inferences/getting-started.md:
--------------------------------------------------------------------------------
1 |
2 | Once you are ready to [acquire images](image-acquisition/image-acquisition.md), it's now time to perform inferences on them. This section will cover the basics of how to perform inferences on images to classify, detect objects or create segmentation masks on the acquired frames.
3 |
4 | ## Building your first model
5 |
6 | To run inferences using LandingLens, you must first build a model. If you didn't sign up before, visit https://app.landing.ai/, sign up for a free account and create a new project. If you are not familiar with LandingLens, you can find a lot of useful information in the [LandingLens support center](https://support.landing.ai/docs/landinglens-workflow).
7 |
8 | Long story short, after creating a project in LandingLens you will need to:
9 |
10 | 1. Upload some images with the thing you want to detect
11 | 2. Label those images
12 | 3. Train a model by clicking the "Train" button
13 | 4. Once the model is trained, you will need to click the "Deploy" button to deploy the model to an endpoint provided by LandingLens
14 |
15 | The image below ilustrate LandingLens panel with some example images already labeled, and the "Train" and "Deploy" buttons.
16 |
17 | 
18 |
19 | ## Getting the endpoint ID
20 |
21 | Once you click "Deploy", the easier way to deploy the model is to select "Cloud deployment" as the deployment type, and create a new endpoint. This will provide you with an endpoint that you can use to perform inferences on frames using your model.
22 |
23 | 
24 |
25 | In the "Deploy" page, you can see more details about this new endpoint, including an example on how to run prediction with the SDK, and a button to copy the endpoint ID.
26 |
27 | 
28 |
29 | Keep this endpoint ID. You will need it to run inferences using the SDK.
30 |
31 | ## Getting the API Key
32 |
33 | Apart from the endpoint ID, you will need an API Key to connect the SDK to LandingLens backend. To get the API Key, click on your user name in the top right corner of the LandingLens console, and select "API Key". Then, create a new API key by clicking the "Create New Key" button.
34 |
35 | 
36 |
37 | ## What's next?
38 |
39 | Now that you have both the API key and the endpoint ID, go to [Running inferences / Working with frames](frames-inference.md) section to run your first inference using the SDK.
--------------------------------------------------------------------------------
/docs/inferences/ocr.md:
--------------------------------------------------------------------------------
1 | A common task for computer vision is extracting text from images, also known as OCR (Optical Character Recognition).
2 |
3 | The LandingLens Python SDK has OCR models available out-of-the-box, without the need to train your own model. The models are pre-trained on a variety of fonts types, and are optimized for accuracy and speed.
4 |
5 | ## Running OCR Inference
6 |
7 | In order to extract text from an image, you can use the `landingai.predict.OcrPredictor` class, and run inference on a `Frame`.
8 |
9 | The model works well with several font types. Let's try with this example image, which contains handwriting:
10 |
11 | 
12 |
13 | ```python
14 | from landingai.predict import OcrPredictor
15 | from landingai.pipeline.image_source import Frame
16 |
17 | predictor = OcrPredictor(api_key="") # (1)!
18 |
19 | frame = Frame.from_image("/path/to/image.png") # (2)!
20 | frame.run_predict(predictor) # (3)!
21 |
22 | for prediction in frame.predictions: # (4)!
23 | print(f"{prediction.text} (Confidence: {prediction.score})") # (5)!
24 | ```
25 |
26 | 1. Create an `OcrPredictor` instance with your API key. Visit [https://app.landing.ai/](https://app.landing.ai/) and see [Getting the API Key](../getting-started#getting-the-api-key) for more details on how to get your API key. You can optionally specify a `language` parameter to tell the backend to pick a model that was trained on that particular language; the default `ch` supports Chinesse and English characters, while the value `en` only supports English characters and may sometimes provide better results if you already know the image only contains English characters.
27 | 2. Create a `Frame` instance from an image file. You could use any image source, such as a webcam, video file, screenshots, etc. See [Image Acquisition](../image-acquisition/image-acquisition.md) for more details.
28 | 3. Run inference on the frame to extract the text.
29 | 4. Iterate over the predictions.
30 | 5. Print the text and confidence score.
31 |
32 | In the example above, the output should look like this:
33 |
34 | ```text
35 | Hi From (Confidence: 0.9179285764694214)
36 | Landing.AI (Confidence: 0.7755413055419922)
37 | ```
38 |
39 | You can also use the `in` operator to check if a certain set of characters is present in the predictions:
40 |
41 | ```python
42 | if "Landing" in frame.predictions:
43 | print("Found 'Landing' written in the text!")
44 | ```
45 |
46 | The results may vary depending on the image quality, the font, and the language. Try with your own images to see how well the model performs, and [provide us feedback about the results](https://github.com/landing-ai/landingai-python/issues/new).
--------------------------------------------------------------------------------
/docs/inferences/overlaying-predictions.md:
--------------------------------------------------------------------------------
1 | Apart from counting and checking the predictions, we can also overlay the predictions on the original image. This is useful to see how well the model is performing. We can do this by using the `Frame.save_image` function. This function merges the original `Frame` image, the predictions, and the labels. It then overlays the predictions on top of the original image.
2 |
3 | ```python
4 | from landingai.pipeline.image_source import Webcam
5 | from landingai.predict import Predictor
6 |
7 | predictor = Predictor( # (1)!
8 | endpoint_id="",
9 | api_key="",
10 | )
11 |
12 | with Webcam(fps=0.5) as webcam:
13 | for frame in webcam: # (2)!
14 | frame.resize(width=512)
15 | frame.run_predict(predictor=predictor) # (3)!
16 | frame.overlay_predictions() # (4)!
17 | if "coffee-mug" in frame.predictions:
18 | frame.save_image(
19 | path="frame-with-overlay.png",
20 | include_predictions=True, # (5)!
21 | )
22 | ```
23 |
24 | 1. Creates the Predictor object with your API key and endpoint ID.
25 | 2. Iterate over each frame in the webcam feed.
26 | 3. Runs the inference on the frame.
27 | 4. Overlays the predictions on top of the original image.
28 | 5. Saves the frame to `frame-with-overlay.png` with the predictions overlaid on top of the original image.
29 |
30 |
31 |
32 | The resulting image should look more or less like this for object detection models:
33 |
34 | 
--------------------------------------------------------------------------------
/docs/inferences/snowflake-native-app.md:
--------------------------------------------------------------------------------
1 | If you are hosting LandingLens as a Snowflake Native App, you need to use the `SnowflakeNativeAppPredictor` class to run inference.
2 |
3 | In order to use this predictor class, you must first install the `snowflake` optionals when installing the `landingai` package. You can do this by running:
4 |
5 | ```sh
6 | pip install "landingai[snowflake]"
7 | ```
8 |
9 | Here is an example of how to use the `SnowflakeNativeAppPredictor` class:
10 |
11 | ```py
12 | from landingai.pipeline.image_source import Webcam
13 | from landingai.predict import SnowflakeNativeAppPredictor
14 |
15 | endpoint_id = "c4344971-fc3c-4cb8-8fd5-5144d25cbd74"
16 | url = "https://focq4dkf-rkwerpo-your-account.snowflakecomputing.app"
17 |
18 | predictor = SnowflakeNativeAppPredictor(
19 | endpoint_id=endpoint_id, # (1)!
20 | native_app_url=url, # (2)!
21 | snowflake_account="your-snowflake-account-locator", # (3)!
22 | snowflake_user="your-snowflake-user", # (4)!
23 | snowflake_private_key="your-snowflake-user-private-key", # (5)!
24 | )
25 |
26 | frame = Frame.from_image("/home/user/dataset/some-class/image-1.png")
27 | frame.run_predict(predictor=predictor)
28 | print(frame.predictions)
29 | # [
30 | # ClassificationPrediction(
31 | # score=0.9957893490791321,
32 | # label_name='some-class',
33 | # label_index=0
34 | # )
35 | # ]
36 | ```
37 |
38 | 1. User the endpoint ID you created in LandingLens.
39 | 2. The URL you use to access the Snowflake Native App. Should start with `https://`, and have no trailing slash or path in the end.
40 | 3. Your Snowflake account locator, in the format `ABC01234`
41 | 4. Your Snowflake user name. Keep in mind that this Snowflake user must have access to the application.
42 | 5. Your Snowflake user private key. This key is used to authenticate the user. Alternativelly, you can pass the `snowflake_password` parameter with the user password instead of the private key, but this is not recommended in
43 | production environment.
44 |
45 | ## Creating a user
46 |
47 | When using the `SnowflakeNativeAppPredictor` class, the preferred authentication mechanism for production
48 | environments is to use a private key. To create a user with a private key, you can run the following SQL
49 | commands in your Snowflake account:
50 |
51 | ```sql
52 | CREATE OR REPLACE USER LANDING_LIBRARY_USER
53 | LOGIN_NAME = 'LANDING_LIBRARY_USER'
54 | DISPLAY_NAME = 'LandingAI Library User'
55 | COMMENT = 'User for LandingAI library'
56 | RSA_PUBLIC_KEY = 'MIIBI...'; -- Replace this with your public key
57 | CREATE ROLE LANDINGLENS_EXTERNAL_ACCESS;
58 | GRANT ROLE LANDINGLENS_EXTERNAL_ACCESS
59 | TO USER LANDING_LIBRARY_USER;
60 | GRANT APPLICATION ROLE llens_snw_production.LLENS_PUBLIC
61 | TO ROLE LANDINGLENS_EXTERNAL_ACCESS;
62 | ```
63 |
64 | See [key-pair authentication](https://docs.snowflake.com/en/user-guide/key-pair-auth) on Snowflake's documentation for more information on how to generate a key pair.
65 |
--------------------------------------------------------------------------------
/examples/apps/assets/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/examples/apps/assets/favicon.ico
--------------------------------------------------------------------------------
/examples/apps/crack-measurer/README.md:
--------------------------------------------------------------------------------
1 | This application measures the width and length of a crack on a concrete surface. To use
2 | the application, you must first calibrate the camera by selecting a line on the image
3 | and entering the actual distance of the line in inches. Once this is done, you can upload
4 | an image with a crack and the crack will be segmented by a LandingLens model, and then
5 | analyzed so the length and maximum width of the crack can be found.
6 |
7 | To run this app, first install the requirements: `pip install -r examples/apps/crack-measurer/requirements.txt`
8 |
9 | Then run: `streamlit run examples/apps/crack-measurer/app.py`
10 |
--------------------------------------------------------------------------------
/examples/apps/crack-measurer/app.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import streamlit as st
4 | from PIL import Image, ImageDraw
5 | from streamlit_image_coordinates import streamlit_image_coordinates
6 |
7 | import landingai.st_utils as lst
8 |
9 | if "points" not in st.session_state:
10 | st.session_state["points"] = []
11 |
12 |
13 | def get_ellipse_coords(point: tuple[int, int]) -> tuple[int, int, int, int]:
14 | center = point
15 | radius = 10
16 | return (
17 | center[0] - radius,
18 | center[1] - radius,
19 | center[0] + radius,
20 | center[1] + radius,
21 | )
22 |
23 |
24 | def main():
25 | lst.setup_page("Measure Crack Dimensions")
26 |
27 | st.sidebar.title("Camera Calibration 📷")
28 |
29 | title_str = f"""
30 |
37 |
Camera Calibration 📷
38 | """
39 | st.markdown(title_str, unsafe_allow_html=True)
40 |
41 | col1, col2 = st.columns((2, 5), gap="small")
42 |
43 | with col2:
44 | file_uploader_label = "If not known, take a picture of an object with \
45 | known size using the same zoom as used for the \
46 | crack photos, and click on the two ends of the \
47 | object below"
48 | uploaded_file = st.file_uploader(
49 | file_uploader_label, type=["jpg", "jpeg", "png"]
50 | )
51 | if uploaded_file is not None:
52 | img = Image.open(uploaded_file).convert("RGB")
53 | img.thumbnail((512, 512))
54 | draw = ImageDraw.Draw(img)
55 |
56 | for point in st.session_state["points"]:
57 | coords = get_ellipse_coords(point)
58 | draw.ellipse(coords, fill="red")
59 | if len(st.session_state["points"]) == 2:
60 | draw.line(
61 | st.session_state["points"],
62 | fill="red",
63 | width=5,
64 | )
65 | value = streamlit_image_coordinates(img, key="pil")
66 |
67 | if value is not None:
68 | point = value["x"], value["y"]
69 | if point not in st.session_state["points"]:
70 | st.session_state["points"].append(point)
71 | st.session_state["points"] = st.session_state["points"][-2:]
72 | st.experimental_rerun()
73 | with col1:
74 | st.write("#")
75 | measure = st.number_input(
76 | "How man inches long is this object?", min_value=0.1, value=0.1
77 | )
78 | if len(st.session_state["points"]) == 2:
79 | dist = math.dist(
80 | st.session_state["points"][0], st.session_state["points"][1]
81 | )
82 | st.session_state["inch_to_pixels"] = dist / measure
83 | st.write("Pixels per inch: ", st.session_state["inch_to_pixels"])
84 |
85 |
86 | if __name__ == "__main__":
87 | main()
88 |
--------------------------------------------------------------------------------
/examples/apps/crack-measurer/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | landingai
3 | python-opencv
4 | streamlit-image-coordinates
5 | centerline
6 | shapely
7 | scikit-image
8 |
--------------------------------------------------------------------------------
/examples/apps/image-folder-support/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | This example is a Streamlit app that supports running inference with a local image folder.
4 |
5 | In particular, this app allows you to run inference with a local image folder, visualize the prediction result, analyze the prediction distribution, and export the prediction result to an CSV file.
6 |
7 | ### Target Audience
8 | We recommend that users have:
9 |
10 | - Basic Python programming skills.
11 | - A basic understanding of the Streamlit library. For more information, go [here](https://docs.streamlit.io/library/get-started/main-concepts).
12 |
13 | ### Install the App
14 |
15 | ```
16 | pip install -r examples/apps/image-folder-support/requirements.txt
17 | ```
18 |
19 | ### Launch the App
20 |
21 | ```
22 | streamlit run examples/apps/image-folder-support/app.py
23 | ```
24 |
--------------------------------------------------------------------------------
/examples/apps/image-folder-support/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | streamlit-image-select
3 | landingai
4 | plotly
--------------------------------------------------------------------------------
/examples/apps/license-plate-ocr/README.md:
--------------------------------------------------------------------------------
1 | This is an example Streamlit app that runs a LandingLens object detection model to detect and the recognize license plate numbers.
2 |
3 | To run this app, first install the requirements:
4 |
5 | ```
6 | pip install -r examples/apps/license-plate-ocr/requirements.txt
7 | ```
8 |
9 | Then run:
10 |
11 | ```
12 | streamlit run examples/apps/license-plate-ocr/app.py
13 | ```
14 |
15 | 
16 |
--------------------------------------------------------------------------------
/examples/apps/license-plate-ocr/app.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | from pathlib import Path
3 |
4 | import streamlit as st
5 | from streamlit_image_select import image_select
6 |
7 | import landingai.pipeline as pl
8 | from landingai import visualize
9 | from landingai.postprocess import crop
10 | from landingai.predict import OcrPredictor, Predictor
11 | from landingai.st_utils import (
12 | get_api_key_or_use_default,
13 | render_api_config_form,
14 | setup_page,
15 | )
16 |
17 | setup_page(page_title="License Plate Detection and Recognition")
18 |
19 | st.sidebar.title("Configuration")
20 | api_key = "land_sk_aMemWbpd41yXnQ0tXvZMh59ISgRuKNRKjJEIUHnkiH32NBJAwf"
21 | od_model_endpoint = "e001c156-5de0-43f3-9991-f19699b31202"
22 |
23 | with st.sidebar:
24 | render_api_config_form()
25 |
26 |
27 | def detect_license_plates(frames):
28 | bounding_boxes = []
29 | overlayed_frames = []
30 |
31 | predictor = Predictor(od_model_endpoint, api_key=api_key)
32 | od_predictor_bar = st.progress(0.0, text="Detecting license plates...")
33 |
34 | for i, frame in enumerate(frames):
35 | prediction = predictor.predict(frame)
36 | # store predictions in a list
37 | overlay = visualize.overlay_predictions(prediction, frame)
38 | bounding_boxes.append(prediction)
39 | overlayed_frames.append(overlay)
40 | od_predictor_bar.progress((i + 1) / len(frames), "Detecting license plates...")
41 |
42 | return bounding_boxes, overlayed_frames
43 |
44 |
45 | def extract_frames(video):
46 | temp_dir = tempfile.mkdtemp()
47 | saved_video_file = Path(temp_dir) / video.name
48 | saved_video_file.write_bytes(video.read())
49 | video_source = pl.image_source.VideoFile(
50 | str(saved_video_file), samples_per_second=1
51 | )
52 | frames = []
53 | with st.spinner(text="Extracting frames from video file..."):
54 | frames.extend(frame_info.image for frame_info in video_source)
55 | st.success("Frame Extraction Finished!")
56 | with st.expander("Preview extracted frames"):
57 | selected_img = image_select(
58 | label=f"Total {len(frames)} images",
59 | images=frames,
60 | key="preview_input_images",
61 | use_container_width=False,
62 | )
63 | st.image(selected_img)
64 | return frames
65 |
66 |
67 | st.caption("Download below sample video file to try out the app or upload yours.")
68 | st.video("https://drive.google.com/uc?id=16iwE7mcz9zHqKCw2ilx0QEwSCjDdXEW4")
69 |
70 | if video := st.file_uploader(
71 | "Upload a video file contains license plates to get started"
72 | ):
73 | st.video(video)
74 | frames = extract_frames(video)
75 | # run prediction of frames
76 | bounding_boxes, overlayed_frames = detect_license_plates(frames)
77 |
78 | # show frames with overlayed bounding boxes
79 | for i, frame in enumerate(overlayed_frames):
80 | if len(bounding_boxes[i]) == 0:
81 | continue
82 | st.image(frame, width=800)
83 |
84 | cropped_imgs = [
85 | crop(bboxes, frame) for frame, bboxes in zip(frames, bounding_boxes)
86 | ]
87 |
88 | st.subheader(f"Found and cropped {len(cropped_imgs)} license plates below")
89 | # show 5 overlayed frames
90 | for i, cropped in enumerate(cropped_imgs):
91 | if len(cropped) == 0:
92 | continue
93 | for plate in cropped:
94 | st.image(plate)
95 |
96 | # run OCR
97 | # set staging OCR API key
98 | api_key = get_api_key_or_use_default()
99 | if not api_key:
100 | st.error("Please set your API key in the sidebar to run OCR.")
101 | st.stop()
102 |
103 | st.subheader(f"Run OCR on the above {len(cropped_imgs)} license plates")
104 | ocr_predictor = OcrPredictor(api_key=api_key)
105 | ocr_preds, overlayed_ocr = [], []
106 | ocr_predictor_bar = st.progress(0.0, text="Run OCR prediction...")
107 | for frame in cropped_imgs:
108 | for plate in frame:
109 | ocr_pred = ocr_predictor.predict(plate)
110 | ocr_preds.append(ocr_pred)
111 | overlay = visualize.overlay_predictions(ocr_pred, plate)
112 | overlayed_ocr.append(overlay)
113 | ocr_predictor_bar.progress(
114 | (i + 1) / len(cropped_imgs), "Running OCR prediction..."
115 | )
116 |
117 | for frame, ocr_pred in zip(overlayed_ocr, ocr_preds):
118 | if len(ocr_pred) == 0:
119 | continue
120 | st.image(frame)
121 | for text in ocr_pred:
122 | st.write(text.text)
123 |
--------------------------------------------------------------------------------
/examples/apps/license-plate-ocr/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | streamlit-image-select
3 | landingai
4 |
--------------------------------------------------------------------------------
/examples/apps/object-tracking/README.md:
--------------------------------------------------------------------------------
1 | This is an example Streamlit app that runs a LandingLens object detection model on a live steam to
2 | monitor northbound traffic, southbound traffic, and parked cars on a road.
3 |
4 | To run this app, first install the requirements:
5 |
6 | ```
7 | pip install -r examples/apps/object-tracking/requirements.txt
8 | ```
9 |
10 | Then run:
11 |
12 | ```
13 | streamlit run examples/apps/object-tracking/main.py
14 | ```
15 |
16 | 
17 |
--------------------------------------------------------------------------------
/examples/apps/object-tracking/assets/app.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/examples/apps/object-tracking/assets/app.png
--------------------------------------------------------------------------------
/examples/apps/object-tracking/download_data.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from pathlib import Path
3 | from typing import Callable, List
4 |
5 | import cv2
6 | import m3u8
7 | import numpy.typing as npt
8 | import requests
9 |
10 | M3U8_URL = (
11 | "https://live.hdontap.com/hls/hosb1/sunset-static_swellmagenet.stream/playlist.m3u8"
12 | )
13 | TS_URL = (
14 | "https://edge06.nginx.hdontap.com/hosb1/sunset-static_swellmagenet.stream/media_"
15 | )
16 |
17 | arg_parser = argparse.ArgumentParser()
18 | arg_parser.add_argument("--out_dir", type=str, default="data")
19 | arg_parser.add_argument("--video_file", type=str, default="vid1")
20 | args = arg_parser.parse_args()
21 |
22 |
23 | def get_latest_ts_file(out_path: str) -> None:
24 | if Path(out_path).suffix != ".ts":
25 | raise ValueError(f"Must be a .ts file, got {out_path}")
26 | r = requests.get(M3U8_URL)
27 | m3u8_r = m3u8.loads(r.text)
28 | playlist_uri = m3u8_r.data["playlists"][0]["uri"]
29 | r = requests.get(playlist_uri)
30 | m3u8_r = m3u8.loads(r.text)
31 | media_sequence = m3u8_r.data["media_sequence"]
32 | ts_file = requests.get(TS_URL + str(media_sequence) + ".ts")
33 | with open(out_path, "wb") as f:
34 | f.write(ts_file.content)
35 |
36 |
37 | def get_frames(video_file: str, skip_frame: int = 5) -> List[npt.NDArray]:
38 | cap = cv2.VideoCapture(video_file)
39 | frames = []
40 | i = 0
41 | read, frame = cap.read()
42 | while read:
43 | if i % skip_frame == 0 and frame is not None:
44 | frames.append(frame)
45 | read, frame = cap.read()
46 | i += 1
47 | return frames
48 |
49 |
50 | def write_frames(video_file: str, out_dir: str) -> List[str]:
51 | out_dir_p = Path(out_dir)
52 | out_dir_p.mkdir(parents=True, exist_ok=True)
53 | frames = get_frames(video_file)
54 | video_file_name = Path(video_file).name
55 | output_files = []
56 | for i, frame in enumerate(frames):
57 | file_name = str(out_dir_p / video_file_name) + ".frame" + str(i) + ".jpg"
58 | cv2.imwrite(file_name, frame)
59 | output_files.append(file_name)
60 |
61 | return output_files
62 |
63 |
64 | def crop_data(
65 | input_files: List[str], crop: Callable[[npt.NDArray], npt.NDArray]
66 | ) -> None:
67 | for f in input_files:
68 | img = cv2.imread(f)
69 | img = crop(img)
70 | cv2.imwrite(f, img)
71 |
72 |
73 | if __name__ == "__main__":
74 | video_file = str((Path(args.out_dir) / Path(args.video_file)).with_suffix(".ts"))
75 | get_latest_ts_file(video_file)
76 | files = write_frames(video_file, args.out_dir)
77 | crop_data(files, lambda x: x[600:800, 1300:1700])
78 |
--------------------------------------------------------------------------------
/examples/apps/object-tracking/main.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 | st.title("LandingAI Traffic Tracking")
4 | st.write(
5 | "This application will grab the latest 10 second traffic clip of the Pacific Coast Highway"
6 | "going to Malibu and count the number of cars going northbound, southbound and parked."
7 | )
8 | st.write("Please enter your LandingLens API credentials and CloudInference Endpoint ID")
9 | api_key = st.text_input(
10 | "LandingLens API Key", value=st.session_state.get("api_key", "")
11 | )
12 | endpoint_id = st.text_input(
13 | "CloudInference Endpoint ID",
14 | value=st.session_state.get("endpoint_id", ""),
15 | )
16 |
17 |
18 | def save(api_key, endpoint_id):
19 | st.session_state["api_key"] = api_key
20 | st.session_state["endpoint_id"] = endpoint_id
21 |
22 |
23 | st.button("Save", on_click=save(api_key, endpoint_id))
24 |
--------------------------------------------------------------------------------
/examples/apps/object-tracking/pages/run_inference.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import streamlit as st
4 | from download_data import get_frames, get_latest_ts_file
5 | from object_tracking import (
6 | filter_parked_cars,
7 | filter_spurious_preds,
8 | get_northbound_southbound,
9 | get_preds,
10 | track_iou,
11 | write_video,
12 | )
13 |
14 | from landingai.predict import Predictor
15 |
16 | VIDEO_CACHE_PATH = "cached_data"
17 |
18 |
19 | def get_latest_traffic():
20 | Path(VIDEO_CACHE_PATH).mkdir(parents=True, exist_ok=True)
21 | get_latest_ts_file("vid.ts")
22 | frames = get_frames("vid.ts")
23 | predictor = Predictor(
24 | st.session_state["endpoint_id"],
25 | api_key=st.session_state["api_key"],
26 | )
27 | bboxes = get_preds(frames, predictor)
28 | tracks, all_idx_to_track = track_iou(bboxes)
29 | write_video(frames, bboxes, all_idx_to_track, "vid_out.mp4")
30 | tracks, parked = filter_parked_cars(tracks, len(frames))
31 | tracks, _ = filter_spurious_preds(tracks)
32 | northbound, southbound = get_northbound_southbound(tracks)
33 | st.video(open("vid_out.mp4", "rb").read())
34 | st.write(f"Northbound Traffic: **{len(northbound)}** cars per 10s")
35 | st.write(f"Southbound Traffic: **{len(southbound)}** cars per 10s")
36 | st.write(f"Parked Cars: **{parked}**")
37 |
38 |
39 | st.title("LandingAI Traffic Counter")
40 | button = st.button("Get Latest Traffic", on_click=get_latest_traffic)
41 | st.divider()
42 |
--------------------------------------------------------------------------------
/examples/apps/object-tracking/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | landingai
3 | m3u8
4 | tqdm
--------------------------------------------------------------------------------
/examples/apps/ocr/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | This example is a Streamlit app that supports running OCR inference.
4 |
5 | In particular, this app allows you to run inference with uploaded images or images from your camera.
6 |
7 | ### Target Audience
8 | We recommend that users have:
9 |
10 | - Basic Python programming skills.
11 | - A basic understanding of the Streamlit library. For more information, go [here](https://docs.streamlit.io/library/get-started/main-concepts).
12 |
13 | ### Installation
14 |
15 | ```
16 | pip install -r examples/apps/ocr/requirements.txt
17 | ```
18 |
19 | ### Launch the App
20 |
21 | ```
22 | streamlit run examples/apps/ocr/app.py
23 | ```
24 |
--------------------------------------------------------------------------------
/examples/apps/ocr/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | # Need a custom fork version for two bug fixes: 1) can't render canvas on a subpath url 2) canvas doesn't support mobile
3 | landingai-streamlit-drawable-canvas
4 | extra-streamlit-components
5 | landingai
6 |
--------------------------------------------------------------------------------
/examples/apps/surfer-count/README.md:
--------------------------------------------------------------------------------
1 | This is an example Streamlit app that runs a LandingLens object detection model on a live steam to identify the surfers in a surfing spot in souther california.
2 |
3 | To run this app, first install the requirements:
4 |
5 | ```
6 | pip install -r examples/apps/surfer-count/requirements.txt
7 | ```
8 |
9 | Then run:
10 |
11 | ```
12 | streamlit run examples/apps/surfer-count/main.py
13 | ```
14 |
--------------------------------------------------------------------------------
/examples/apps/surfer-count/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 | st.title("Topanga Beach Surfer Counter")
4 | st.write(
5 | "This application will grab the latest 10s clip of surfers from the Topanga Beach surf cam"
6 | "and count the number of surfers there."
7 | )
8 | st.write("Please enter your LandingLens API key and Cloud Inference Endpoint ID.")
9 | api_key = st.text_input(
10 | "LandingLens API Key", value=st.session_state.get("api_key", "")
11 | )
12 | endpoint_id = st.text_input(
13 | "Cloud Inference Endpoint ID",
14 | value=st.session_state.get("endpoint_id", ""),
15 | )
16 |
17 |
18 | def save(api_key: str, endpoint_id: str):
19 | st.session_state["api_key"] = api_key
20 | st.session_state["endpoint_id"] = endpoint_id
21 |
22 |
23 | st.button("Save", on_click=save(api_key, endpoint_id))
24 |
--------------------------------------------------------------------------------
/examples/apps/surfer-count/pages/run_inference.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import streamlit as st
4 |
5 | from landingai.pipeline.image_source import FrameSet, NetworkedCamera
6 | from landingai.pipeline.postprocessing import get_class_counts
7 | from landingai.predict import Predictor
8 |
9 | VIDEO_CACHE_PATH = Path("cached_data")
10 | VIDEO_CACHE_PATH.mkdir(exist_ok=True, parents=True)
11 | VIDEO_CACHE_PATH = VIDEO_CACHE_PATH / "latest.mp4"
12 | VIDEO_LEN_SEC = 10
13 | FPS = 2
14 | PLAYLIST_URL = (
15 | "https://live.hdontap.com/hls/hosb1/topanga_swellmagnet.stream/playlist.m3u8"
16 | )
17 |
18 |
19 | def get_latest_surfer_count():
20 | vid_src = NetworkedCamera(PLAYLIST_URL, fps=FPS)
21 | surfer_model = Predictor(
22 | st.session_state["endpoint_id"], api_key=st.session_state["api_key"]
23 | )
24 |
25 | frs = FrameSet()
26 | for i, frame in enumerate(vid_src):
27 | if i >= VIDEO_LEN_SEC * FPS:
28 | break
29 | frs.extend(frame)
30 | frs.run_predict(predictor=surfer_model).overlay_predictions()
31 | frs.save_video(str(VIDEO_CACHE_PATH), video_fps=FPS, image_src="overlay")
32 | surfers = (get_class_counts(frs)["surfer"]) / (VIDEO_LEN_SEC * FPS)
33 | st.video(open(VIDEO_CACHE_PATH, "rb").read())
34 | st.write(f"Surfer count: **{surfers}**")
35 |
36 |
37 | st.title("Surfer Counter")
38 | button = st.button("Get Topanga Beach Surfer Count", on_click=get_latest_surfer_count)
39 |
--------------------------------------------------------------------------------
/examples/apps/surfer-count/requirements.txt:
--------------------------------------------------------------------------------
1 | streamlit
2 | landingai
3 |
--------------------------------------------------------------------------------
/examples/apps/zoom-app/README.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | Ensure you have python 3.9 or higher installed on your machine. If you do not, follow these [instructions](https://docs.anaconda.com/free/anaconda/install/index.html) to install python. Run `pip install -r requirements.txt` to install the requirements.
4 |
5 | Once the requirements are installed run `streamlit run app.py` to run the application. It should show up on your browser, you may have to give python permissions to view your camera for it to run properly.
6 |
--------------------------------------------------------------------------------
/examples/apps/zoom-app/Zoom_Demo.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "id": "35b5d573-1892-4cb5-a76c-5c04bd3625b2",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from landingai.pipeline.image_source import NetworkedCamera\n",
11 | "from landingai.predict import Predictor\n",
12 | "from landingai.image_source_ops import take_photo_from_webcam\n",
13 | "from landingai.visualize import overlay_bboxes"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": null,
19 | "id": "d0777efc",
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "# Using the SDK for Inference\n",
24 | "# ===========================\n",
25 | "\n",
26 | "# Put your own API Key and Endpoint ID here\n",
27 | "api_key = \"land_sk_JkygHlib8SgryZUgumM6r8GWYfQqiKdE36xDzo4K85fDihpnuG\"\n",
28 | "endpoint_id = \"7e8c1f16-947f-45cd-9f5d-c5bdf8791126\""
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "id": "8232a76a",
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "# Build the predictor object for your model\n",
39 | "model = Predictor(endpoint_id, api_key=api_key)"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "id": "13828949",
46 | "metadata": {},
47 | "outputs": [],
48 | "source": [
49 | "# Grab a frame frome your webcam and run predict on it\n",
50 | "image = take_photo_from_webcam()\n",
51 | "prediction = model.predict(image)\n",
52 | "overlay_bboxes(prediction, image)"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": null,
58 | "id": "07e2035e",
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "# Additional SDK features\n",
63 | "# =======================\n",
64 | "\n",
65 | "from landingai.pipeline.frameset import FrameSet\n",
66 | "\n",
67 | "# Run inference on a single image\n",
68 | "frs = FrameSet.from_image(\"image.jpg\")\n",
69 | "frs.run_predict(predictor=model).overlay_predictions().show_image().show_image(image_src=\"overlay\")"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": null,
75 | "id": "c418e677",
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "from landingai.data_management import Media\n",
80 | "\n",
81 | "project_id = 45412657190923\n",
82 | "\n",
83 | "# List images\n",
84 | "media_client = Media(project_id, api_key)\n",
85 | "media_client.ls(offset=0, limit=2)"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "id": "d1cf29ca",
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "# This code is needed to run asyncio loops in jupyter notebooks, it is not needed if\n",
96 | "# you are running python from the terminal.\n",
97 | "import nest_asyncio\n",
98 | "nest_asyncio.apply()\n",
99 | "\n",
100 | "# Upload image, make sure you have an image in your local directory called image.jpg\n",
101 | "media_client.upload(\"image.jpg\", split=\"dev\")"
102 | ]
103 | }
104 | ],
105 | "metadata": {
106 | "kernelspec": {
107 | "display_name": "Python 3 (ipykernel)",
108 | "language": "python",
109 | "name": "python3"
110 | },
111 | "language_info": {
112 | "codemirror_mode": {
113 | "name": "ipython",
114 | "version": 3
115 | },
116 | "file_extension": ".py",
117 | "mimetype": "text/x-python",
118 | "name": "python",
119 | "nbconvert_exporter": "python",
120 | "pygments_lexer": "ipython3",
121 | "version": "3.10.11"
122 | }
123 | },
124 | "nbformat": 4,
125 | "nbformat_minor": 5
126 | }
127 |
--------------------------------------------------------------------------------
/examples/apps/zoom-app/app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 |
3 |
4 | st.title("")
5 | api_key = st.text_input(
6 | "LandingLens API Key", value=st.session_state.get("api_key", "")
7 | )
8 | endpoint_id = st.text_input(
9 | "Cloud Endpoint ID", value=st.session_state.get("endpoint_id", "")
10 | )
11 |
12 |
13 | def save(api_key: str, endpoint_id: str):
14 | st.session_state["api_key"] = api_key
15 | st.session_state["endpoint_id"] = endpoint_id
16 |
17 |
18 | st.button("Save", on_click=save(api_key, endpoint_id))
19 |
--------------------------------------------------------------------------------
/examples/apps/zoom-app/pages/run_inference.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import streamlit as st
3 | import pandas as pd
4 | import altair as alt
5 |
6 | from landingai.pipeline.image_source import Webcam
7 | from landingai.predict import Predictor
8 |
9 |
10 | if "api_key" in st.session_state and "endpoint_id" in st.session_state:
11 | model = Predictor(
12 | st.session_state["endpoint_id"], api_key=st.session_state["api_key"]
13 | )
14 | # video_src = NetworkedCamera(0, fps=1)
15 | image_placeholder = st.empty()
16 | bar_chart_placeholder = st.empty()
17 |
18 | pred_counts = {"Facing Camera": 0, "Facing Away": 0}
19 | with Webcam(fps=0.5) as video_src:
20 | for frame in video_src:
21 | frame.run_predict(model).overlay_predictions()
22 | if len(frame.frames) > 0:
23 | frame_with_pred = frame.frames[-1].other_images["overlay"]
24 | image_placeholder.empty()
25 | with image_placeholder.container():
26 | st.image(np.array(frame_with_pred))
27 |
28 | if len(frame.predictions) > 0:
29 | pred = frame.predictions[-1].label_name
30 | pred_counts[pred] += 1
31 | with bar_chart_placeholder.container():
32 | data = pd.DataFrame(
33 | pred_counts.items(), columns=["label", "count"]
34 | )
35 | chart = (
36 | alt.Chart(data)
37 | .mark_bar()
38 | .encode(
39 | x="label",
40 | y="count",
41 | color=alt.Color(
42 | "label",
43 | scale=alt.Scale(
44 | domain=["Facing Camera", "Facing Away"],
45 | range=["#762172", "#FFD700"],
46 | ),
47 | ),
48 | )
49 | ).interactive()
50 | st.altair_chart(chart, use_container_width=True)
51 | else:
52 | st.warning("Please enter your API Key and Endpoint ID in the sidebar.")
53 |
--------------------------------------------------------------------------------
/examples/apps/zoom-app/requirements.txt:
--------------------------------------------------------------------------------
1 | landingai
2 | streamlit
3 |
--------------------------------------------------------------------------------
/examples/capture-service/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction
2 |
3 | This example focuses on how to continuously run inference on images extracted from streaming video. This application shows how to use a Segmentation model from LandingLens to detect sky and clouds in images extracted from a streaming RTSP video camera feed. A traffic camera is used to capture images.
4 |
5 | ## Run the Example
6 |
7 | ### Prerequisites
8 |
9 | Before starting, install the LandingAI Python library. For more information, see the top-level `README.md`.
10 |
11 | ## Run the Example Source and Inference Model
12 |
13 | To launch the program, run this command:
14 |
15 | ```bash
16 | python examples/capture-service/run.py
17 | ```
18 |
19 | The program captures frames from the video feed every few seconds, and then runs inference on those images. After inference is complete, a pop-up window appears, showing the captured image with the model's predictions overlaid on it.
20 |
21 | > Note: The endpoints for Free Trial LandingLens accounts have a limit of 20 inferences/minute. Do not call inference more than that rate. You can change the inference frequency by configuring a constant in `_CAPTURE_INTERVAL` in the `run.py` file.
22 |
23 | ## Customize the Example
24 |
25 | 1. Set up a camera that exposes an RTSP URL to your network (your local intranet). If you're not sure if the RTSP URL is working, learn how to test it in this [article](https://support.ipconfigure.com/hc/en-us/articles/115005588503-Using-VLC-to-test-camera-stream).
26 | 2. Train a model in LandingLens, and deploy it to an endpoint via [Cloud Deployment](https://support.landing.ai/landinglens/docs/cloud-deployment).
27 | 3. Get the `endpoint id`, `api key` and `api secret` from LandingLens.
28 | 4. Open the file `examples/capture-service/run.py`, and update the following with your information: `api_key`, `endpoint_id` and `stream_url`.
29 |
30 |
31 | ## Collect Images to Train Your Model
32 |
33 | You need to collect images and train your model with those images before you can run inference. You can use the same script to collect training images by setting `capture_frame` to `True` and `inference_mode` to `False`. The default parameters of the `stream()` function are:
34 |
35 | ```
36 | def stream(capture_frame=False, inference_mode=True):
37 | ...
38 | ```
39 |
--------------------------------------------------------------------------------
/examples/capture-service/run.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from datetime import datetime
3 |
4 | from landingai.pipeline.image_source import NetworkedCamera
5 | from landingai.predict import Predictor, EdgePredictor
6 | import argparse
7 |
8 | logging.basicConfig(
9 | level=logging.INFO,
10 | format="%(asctime)s %(filename)s %(funcName)s %(message)s",
11 | datefmt="%Y-%m-%d %H:%M:%S",
12 | )
13 |
14 | _LOGGER = logging.getLogger(__name__)
15 |
16 | # How many frames per second should we capture and process
17 | _CAPTURE_INTERVAL = (
18 | 0.1 # In seconds. Set to None if you want to capture at the maximum rate
19 | )
20 |
21 | # Public Cloud & Sky detection segmentation model
22 | api_key = "land_sk_aMemWbpd41yXnQ0tXvZMh59ISgRuKNRKjJEIUHnkiH32NBJAwf"
23 | endpoint_id = "432d58f6-6cd4-4108-a01c-2f023503d838"
24 | model_id = "9315c71e-31af-451f-9b38-120e035e6240"
25 |
26 | #
27 | # Below we provide some links to public cameras. Local RTSP cameras can also be used by specifying a local URL
28 | # like "rtsp://172.25.101.151/ch0_0.h264". In order to find the URL for your camera, this page is a good start
29 | # https://www.ispyconnect.com/cameras
30 |
31 | # Apple test stream
32 | # stream_url = "http://devimages.apple.com/iphone/samples/bipbop/bipbopall.m3u8"
33 | # Dexter Avenue AL - https://www.wsfa.com/weather/cams/
34 | # stream_url = "https://s78.ipcamlive.com/streams/4etrywblgzguidm6v/stream.m3u8"
35 | # Louisiana Department of Transportation - https://511la.org/
36 | # stream_url = "https://itsstreamingbr.dotd.la.gov/public/br-cam-015.streams/playlist.m3u8"
37 | stream_url = (
38 | "https://itsstreamingbr2.dotd.la.gov/public/lkc-cam-271.streams/playlist.m3u8"
39 | )
40 |
41 |
42 | if __name__ == "__main__":
43 | parser = argparse.ArgumentParser(
44 | description="Capture a live traffic camera and run a cloud segmentation model on it"
45 | )
46 |
47 | parser.add_argument(
48 | "--localinference",
49 | action="store_true",
50 | help="Use a local LandingLens docker inference service",
51 | )
52 | args = parser.parse_args()
53 | if args.localinference:
54 | # Local inference model example. In order to use it, you need to manually run the local inference server with the "cloud & sky" model.
55 | try:
56 | cloud_sky_model = EdgePredictor()
57 | except ConnectionError:
58 | _LOGGER.error(
59 | f"""Failed to connect to the local LandingLens docker inference service. Have you launched the LandingLens container? If not please read the guide here (https://support.landing.ai/docs/docker-deploy)\nOnce you have installed it and obtained a license, run:
60 | docker run -p 8000:8000 --rm --name landingedge\\
61 | -e LANDING_LICENSE_KEY=YOUR_LICENSE_KEY \\
62 | public.ecr.aws/landing-ai/deploy:latest \\
63 | run-model-id -name sdk_example \\
64 | -k {api_key}\\
65 | -m {model_id}
66 | """
67 | )
68 | exit(1)
69 | else:
70 | # Cloud inference model to segment clouds
71 | cloud_sky_model = Predictor(endpoint_id, api_key=api_key)
72 |
73 | Camera = NetworkedCamera(
74 | stream_url, motion_detection_threshold=1, capture_interval=_CAPTURE_INTERVAL
75 | )
76 | _LOGGER.info("Starting")
77 | start_time = datetime.now()
78 | for frame in Camera:
79 | _LOGGER.info(
80 | f"Acquisition time {(datetime.now()-start_time).total_seconds():.5f} sec"
81 | )
82 | frame = frame.downsize(width=1024)
83 | start_time = datetime.now()
84 | frame = frame.run_predict(predictor=cloud_sky_model)
85 | _LOGGER.info(
86 | f"Inference time {(datetime.now()-start_time).total_seconds():.2f} sec"
87 | )
88 | _LOGGER.debug(f"Detailed inference metrics {cloud_sky_model.get_metrics()}")
89 | # Do some further processing on the pipeline
90 | frame = (
91 | frame.overlay_predictions()
92 | # .show_image()
93 | .show_image(include_predictions=True)
94 | # .save_image(filename_prefix="./capture")
95 | )
96 | start_time = datetime.now()
97 |
--------------------------------------------------------------------------------
/examples/edge-models/README.md:
--------------------------------------------------------------------------------
1 | # Edge Models
2 |
3 | ## Installation
4 |
5 | - Python >= 3.10
6 |
7 | Linux:
8 | ```
9 | pip install -r requirements.txt
10 | ```
11 |
12 | Windows:
13 | ```
14 | pip install -r requirements_win.txt
15 | ```
16 |
17 | Note: tflite-runtime is not available for Windows, so we suggest to use Tensorflow.
18 |
19 | ## Run
20 |
21 | Basic:
22 | ```
23 | python run_bundle_cls.py published_model_bundle.zip image.png
24 | ```
25 |
26 | Enabling hardware acceleration (this is just an example, add your own library path):
27 | ```
28 | python run_bundle_cls.py published_model_bundle.zip image.png /usr/lib/libvx_delegate.so
29 | ```
30 |
--------------------------------------------------------------------------------
/examples/edge-models/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.26.4
2 | tflite-runtime==2.14
3 | opencv-python-headless==4.7.0.72
4 |
--------------------------------------------------------------------------------
/examples/edge-models/requirements_win.txt:
--------------------------------------------------------------------------------
1 | numpy==1.26.4
2 | tensorflow==2.18.0
3 | opencv-python-headless==4.7.0.72
4 |
--------------------------------------------------------------------------------
/examples/video-analytics-notebook/video-analytics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "# Using video analytics to manage inventory\n",
9 | "\n",
10 | "This example shows how an object detection model can be used to identify products on display directly from a video recording. Some of the applications of this approach could be:\n",
11 | "1. Counting stock of a certain SKU\n",
12 | "2. Detecting if a SKU is missing from the display\n",
13 | "3. Auditing the amount of shelf space assigned to each SKU\n",
14 | "\n",
15 | "As part of this notebook we also showcase the SDK ability to create a vision pipeline that consumes data directly from Google Drive"
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "## Setup LandingAI SDK"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 4,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "api_key = \"land_sk_aMemWbpd41yXnQ0tXvZMh59ISgRuKNRKjJEIUHnkiH32NBJAwf\" \n",
32 | "endpoint_id = \"067ce386-3958-4d98-be31-9a9af07e361a\"\n",
33 | "video_file = \"https://drive.google.com/uc?export=download&id=12I5r1siMRzcejuFxI-izRshBtmWRmVxQ\"\n",
34 | "\n",
35 | "# Install LandingAI's SDK only if needed to avoid unnecessary restarts\n",
36 | "try:\n",
37 | " import landingai\n",
38 | "except ImportError:\n",
39 | " import os\n",
40 | " from IPython.display import display, Markdown\n",
41 | " display(Markdown(\"## Installing modules. You will need to restart runtime.\"))\n",
42 | " !pip install landingai\n",
43 | " display(Markdown(\"## Please restart runtime.\"))"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "## Run analytics over the video\n",
51 | "In this example, the predictor will identify the presence of one type of candy (i.e. \"Alfajor\"). This example also shows how we can speed up the process by sampling video frames. In this case the video was recorded at 30 FPS but we will only analyze 2 per second."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "import landingai.pipeline as pl\n",
61 | "from landingai.predict import Predictor, EdgePredictor\n",
62 | "\n",
63 | "video_source = pl.image_source.VideoFile(video_file, samples_per_second=2) # Sample only 2 frames per second\n",
64 | "products_model = Predictor(endpoint_id, api_key=api_key)\n",
65 | "\n",
66 | "frs = pl.FrameSet()\n",
67 | "for frame in video_source:\n",
68 | " frs.extend(\n",
69 | " frame.run_predict(predictor=products_model)\n",
70 | " .overlay_predictions()\n",
71 | " )\n",
72 | "frs.save_video(\"./out.mp4\", image_src=\"overlay\", video_fps=video_source.properties()[2])\n",
73 | "display(\"Done processing\")"
74 | ]
75 | },
76 | {
77 | "cell_type": "markdown",
78 | "metadata": {},
79 | "source": [
80 | "## Show results"
81 | ]
82 | },
83 | {
84 | "cell_type": "code",
85 | "execution_count": 8,
86 | "metadata": {},
87 | "outputs": [
88 | {
89 | "data": {
90 | "text/html": [
91 | ""
94 | ],
95 | "text/plain": [
96 | ""
97 | ]
98 | },
99 | "execution_count": 8,
100 | "metadata": {},
101 | "output_type": "execute_result"
102 | }
103 | ],
104 | "source": [
105 | "from landingai.notebook_utils import display_video\n",
106 | "display_video(\"./out.mp4\")"
107 | ]
108 | }
109 | ],
110 | "metadata": {
111 | "kernelspec": {
112 | "display_name": "Python 3 (ipykernel)",
113 | "language": "python",
114 | "name": "python3"
115 | },
116 | "language_info": {
117 | "codemirror_mode": {
118 | "name": "ipython",
119 | "version": 3
120 | },
121 | "file_extension": ".py",
122 | "mimetype": "text/x-python",
123 | "name": "python",
124 | "nbconvert_exporter": "python",
125 | "pygments_lexer": "ipython3",
126 | "version": "3.11.4"
127 | },
128 | "vscode": {
129 | "interpreter": {
130 | "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"
131 | }
132 | }
133 | },
134 | "nbformat": 4,
135 | "nbformat_minor": 2
136 | }
137 |
--------------------------------------------------------------------------------
/examples/webcam-collab-notebook/sample_images/card_01.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/examples/webcam-collab-notebook/sample_images/card_01.jpeg
--------------------------------------------------------------------------------
/landingai/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | .. include:: ../README.md
3 | .. include:: ../CHANGELOG.md
4 | .. include:: ../pdocs/user_guide/1_concepts.md
5 | .. include:: ../pdocs/user_guide/2_credentials.md
6 | .. include:: ../pdocs/user_guide/3_postprocess.md
7 | .. include:: ../pdocs/user_guide/4_pipelines.md
8 | .. include:: ../pdocs/user_guide/5_data_management.md
9 | .. include:: ../pdocs/developer_guide/1_main.md
10 | """
11 |
--------------------------------------------------------------------------------
/landingai/data_management/__init__.py:
--------------------------------------------------------------------------------
1 | from .media import Media # noqa
2 | from .metadata import Metadata # noqa
3 |
--------------------------------------------------------------------------------
/landingai/data_management/label.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Optional
2 |
3 | from landingai.data_management.client import GET_DEFECTS, LandingLens
4 |
5 |
6 | class Label:
7 | """Label management API client.
8 | This class provides a set of APIs to manage the label of a particular project on LandingLens.
9 | For example, you can use this class to list all the available labels for a given project.
10 |
11 | Example
12 | -------
13 | >>> client = Label(project_id, api_key)
14 | >>> client.get_label_map()
15 | >>> {'0': 'ok', '1': 'cat', '2': 'dog'}
16 |
17 | Parameters
18 | ----------
19 | project_id: int
20 | LandingLens project id. Can override this default in individual commands.
21 | api_key: Optional[str]
22 | LandingLens API Key. If it's not provided, it will be read from the environment variable LANDINGAI_API_KEY, or from .env file on your project root directory.
23 | """
24 |
25 | def __init__(self, project_id: int, api_key: Optional[str] = None):
26 | self._client = LandingLens(project_id=project_id, api_key=api_key)
27 |
28 | def get_label_map(self) -> Dict[str, str]:
29 | """Get all the available labels for a given project.
30 |
31 | Returns
32 | ----------
33 | Dict[str, str]
34 | A dictionary of label index to label name.
35 | ```
36 | # Example output
37 | {
38 | "0": "ok",
39 | "1": "cat",
40 | "2": "dog",
41 | "3": "duck",
42 | }
43 | ```
44 | """
45 | project_id = self._client._project_id
46 | resp = self._client._api(GET_DEFECTS, params={"projectId": project_id})
47 | resp_data = resp["data"]
48 | label_map = {str(label["indexId"]): label["name"] for label in resp_data}
49 | label_map["0"] = "ok"
50 | return label_map
51 |
--------------------------------------------------------------------------------
/landingai/data_management/metadata.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional, Union
2 |
3 | from landingai.data_management.client import METADATA_GET, METADATA_UPDATE, LandingLens
4 | from landingai.data_management.utils import (
5 | PrettyPrintable,
6 | ids_to_metadata,
7 | metadata_to_ids,
8 | obj_to_dict,
9 | )
10 |
11 |
12 | class Metadata:
13 | """Metadata management API client.
14 | This class provides a set of APIs to manage the metadata of the medias (images) uploaded to LandingLens.
15 | For example, you can use this class to update the metadata of the uploaded medias.
16 |
17 | Example
18 | -------
19 | >>> client = Metadata(project_id, api_key)
20 | >>> client.update([101, 102, 103], creator="tom")
21 |
22 | Parameters
23 | ----------
24 | project_id: int
25 | LandingLens project id. Can override this default in individual commands.
26 | api_key: Optional[str]
27 | LandingLens API Key. If it's not provided, it will be read from the environment variable LANDINGAI_API_KEY, or from .env file on your project root directory.
28 | """
29 |
30 | def __init__(self, project_id: int, api_key: Optional[str] = None):
31 | self._client = LandingLens(project_id=project_id, api_key=api_key)
32 |
33 | def update(
34 | self,
35 | media_ids: Union[int, List[int]],
36 | **input_metadata: Optional[Dict[str, Any]],
37 | ) -> Dict[str, Any]:
38 | """Update or insert a dictionary of metadata for a set of medias.
39 |
40 | Parameters
41 | ----------
42 | media_ids
43 | Media ids to update.
44 | input_metadata
45 | A dictionary of metadata to be updated or inserted. The key of the metadata
46 | needs to be created/registered (for the first time) on LandingLens before
47 | calling update().
48 |
49 | Returns
50 | ----------
51 | Dict[str, Any]
52 | The result from the update().
53 | ```
54 | # Example output
55 | {
56 | "project_id": 12345,
57 | "metadata": [...],
58 | "media_ids": [123, 124]],
59 | }
60 | ```
61 | """
62 | project_id = self._client._project_id
63 | if (
64 | not media_ids
65 | or isinstance(media_ids, bool)
66 | or (not isinstance(media_ids, int) and len(media_ids) == 0)
67 | ):
68 | raise ValueError("Missing required flags: {'media_ids'}")
69 |
70 | if not input_metadata:
71 | raise ValueError("Missing required flags: {'metadata'}")
72 |
73 | dataset_id = self._client.get_project_property(project_id, "dataset_id")
74 |
75 | if isinstance(media_ids, int):
76 | media_ids = [media_ids]
77 | else:
78 | # to avoid errors due to things like numpy.int
79 | media_ids = list(map(int, media_ids))
80 |
81 | metadata_mapping, id_to_metadata = self._client.get_metadata_mappings(
82 | project_id
83 | )
84 |
85 | body = _MetadataUploadRequestBody(
86 | selectOption=_SelectOption(media_ids),
87 | project=_Project(project_id, dataset_id),
88 | metadata=metadata_to_ids(input_metadata, metadata_mapping),
89 | )
90 |
91 | resp = self._client._api(METADATA_UPDATE, data=obj_to_dict(body))
92 | resp_data = resp["data"]
93 | return {
94 | "project_id": project_id,
95 | "metadata": ids_to_metadata(resp_data[0]["metadata"], id_to_metadata),
96 | "media_ids": [media["mediaId"] for media in resp_data],
97 | }
98 |
99 | def get(self, media_id: int) -> Dict[str, str]:
100 | """Return all the metadata associated with a given media."""
101 | resp = self._client._api(
102 | METADATA_GET, params={"objectId": media_id, "objectType": "media"}
103 | )
104 | _, id_to_metadata = self._client.get_metadata_mappings(self._client._project_id)
105 | return {id_to_metadata[int(k)]: v for k, v in resp["data"].items()}
106 |
107 |
108 | class _SelectOption(PrettyPrintable):
109 | def __init__(self, selected_media: List[int]) -> None:
110 | self.selected_media = selected_media
111 | self.unselected_media: List[Union[int, List[int]]] = []
112 | self.field_filter_map: Dict[str, Any] = {}
113 | self.column_filter_map: Dict[str, Any] = {}
114 | self.is_unselect_mode = False
115 |
116 |
117 | class _Project(PrettyPrintable):
118 | def __init__(
119 | self,
120 | project_id: int,
121 | dataset_id: int,
122 | ) -> None:
123 | self.project_id = project_id
124 | self.dataset_id = dataset_id
125 |
126 |
127 | class _MetadataUploadRequestBody(PrettyPrintable):
128 | def __init__(
129 | self,
130 | selectOption: _SelectOption,
131 | project: _Project,
132 | metadata: Dict[str, Any],
133 | ) -> None:
134 | self.selectOption = selectOption
135 | self.project = project
136 | self.metadata = metadata
137 |
--------------------------------------------------------------------------------
/landingai/data_management/utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | import pprint
3 | from enum import Enum
4 | from typing import Any, Dict, cast
5 |
6 |
7 | def metadata_to_ids(
8 | input_metadata: Dict[str, Any], metadata_mapping: Dict[str, Any]
9 | ) -> Dict[str, Any]:
10 | validate_metadata(input_metadata, metadata_mapping)
11 | return {
12 | metadata_mapping[key][0]: val
13 | for key, val in input_metadata.items()
14 | if key in metadata_mapping
15 | }
16 |
17 |
18 | def ids_to_metadata(
19 | metadata_ids: Dict[str, Any], id_to_metadata: Dict[int, str]
20 | ) -> Dict[str, Any]:
21 | return {
22 | id_to_metadata[int(key)]: val
23 | for key, val in metadata_ids.items()
24 | if int(key) in id_to_metadata
25 | }
26 |
27 |
28 | def to_camel_case(snake_str: str) -> str:
29 | """Convert a snake case string to camel case"""
30 | words = snake_str.split("_")
31 | return words[0] + "".join(word.title() for word in words[1:])
32 |
33 |
34 | def validate_metadata(
35 | input_metadata: Dict[str, Any], metadata_mapping: Dict[str, Any]
36 | ) -> None:
37 | """Validate the input metadata against the metadata mapping. Raise ValueError if any metadata keys are not available."""
38 | not_allowed = set(input_metadata.keys()) - set(metadata_mapping.keys())
39 | # TODO: Validate also values and maybe types. Or shouldn't it be the job of the server?
40 | if len(not_allowed) > 0:
41 | raise ValueError(
42 | f"""Not allowed fields: {not_allowed}.
43 | Available fields are {metadata_mapping.keys()}.
44 | If you want to add new fields, please add it to the associated project on the LandingLens platform."""
45 | )
46 |
47 |
48 | def obj_to_dict(obj: object) -> Dict[str, Any]:
49 | """Convert an object to a json dictionary with camel case keys"""
50 | json_body = json.dumps(obj, cls=Encoder)
51 | return cast(Dict[str, Any], json.loads(json_body))
52 |
53 |
54 | def obj_to_params(obj: object) -> Dict[str, Any]:
55 | """Convert an object to query parameters in dict format where the dict keys are in camel case."""
56 | return {
57 | to_camel_case(k): v if isinstance(v, list) else json.dumps(v, cls=Encoder)
58 | for k, v in obj.__dict__.items()
59 | }
60 |
61 |
62 | class Encoder(json.JSONEncoder):
63 | """JSON encoder that converts all keys to camel case"""
64 |
65 | def default(self, obj: object) -> Any:
66 | if isinstance(obj, dict):
67 | return {to_camel_case(k): v for k, v in obj.items()}
68 | if isinstance(obj, Enum):
69 | return obj._name_
70 | return {to_camel_case(k): v for k, v in obj.__dict__.items()}
71 |
72 |
73 | class PrettyPrintable:
74 | """A mix-in class that enables its subclass to be serialized into pretty printed string"""
75 |
76 | def to_str(self) -> str:
77 | """Returns the string representation of the model"""
78 | return pprint.pformat(self.__dict__)
79 |
80 | def __repr__(self) -> str:
81 | """For `print` and `pprint`"""
82 | return self.to_str()
83 |
--------------------------------------------------------------------------------
/landingai/fonts/default_font_ch_en.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/landingai/fonts/default_font_ch_en.ttf
--------------------------------------------------------------------------------
/landingai/notebook_utils.py:
--------------------------------------------------------------------------------
1 | """This module contains common notebook utilities that are used across the example notebooks in this repo.
2 | It's only intended for examples provided by this repo. When using the SDK in your own project, you don't need to use this module.
3 | """
4 |
5 | from functools import lru_cache
6 |
7 |
8 | def is_running_in_colab_notebook() -> bool:
9 | """Return True if the code is running in a Google Colab notebook."""
10 | try:
11 | from IPython import get_ipython
12 |
13 | return get_ipython().__class__.__module__ == "google.colab._shell" # type: ignore
14 | except ImportError:
15 | return False # Probably standard Python interpreter
16 |
17 |
18 | def is_running_in_jupyter_notebook() -> bool:
19 | """Return True if the code is running in a Jupyter notebook."""
20 | try:
21 | from IPython import get_ipython
22 |
23 | # See: https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
24 | shell = get_ipython().__class__.__name__
25 | if shell == "ZMQInteractiveShell":
26 | return True # Jupyter notebook or qtconsole
27 | elif shell == "TerminalInteractiveShell":
28 | return False # Terminal running IPython
29 | else:
30 | return False # Other type (?)
31 | except ImportError:
32 | return False # Probably standard Python interpreter
33 |
34 |
35 | @lru_cache(maxsize=None)
36 | def is_running_in_notebook() -> bool:
37 | """Return True if the code is running in a notebook."""
38 | return is_running_in_colab_notebook() or is_running_in_jupyter_notebook()
39 |
40 |
41 | def display_video(path_to_file: str): # type: ignore
42 | """Return a notebook-independent video object that can be shown on Jupyter and Colab."""
43 | if is_running_in_colab_notebook():
44 | from IPython.display import HTML
45 | from base64 import b64encode
46 |
47 | mp4 = open(path_to_file, "rb").read()
48 | data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
49 | return HTML(
50 | """
51 |
54 | """
55 | % data_url
56 | )
57 | else:
58 | from IPython.display import Video
59 |
60 | return Video(path_to_file)
61 |
--------------------------------------------------------------------------------
/landingai/pipeline/__init__.py:
--------------------------------------------------------------------------------
1 | """The vision pipeline abstraction helps chain image processing operations as
2 | sequence of steps. Each step consumes and produces a `FrameSet` which typically
3 | contains a source image and derivative metadata and images."""
4 |
5 | # Import image_source to enable IDE auto completion
6 | import landingai.pipeline.image_source as image_source # noqa: F401
7 | import landingai.pipeline.postprocessing as postprocessing # noqa: F401
8 |
9 | from .frameset import FrameSet # noqa: F401
10 |
--------------------------------------------------------------------------------
/landingai/pipeline/postprocessing.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Sequence, cast
2 |
3 | from landingai.pipeline.frameset import FrameSet
4 | from landingai.postprocess import class_counts
5 | from landingai.common import ClassificationPrediction
6 |
7 |
8 | def get_class_counts(
9 | frs: FrameSet, add_id_to_classname: bool = False
10 | ) -> Dict[str, int]:
11 | """This method returns the number of occurrences of each detected class in the FrameSet.
12 |
13 | Parameters
14 | ----------
15 | add_id_to_classname : bool, optional
16 | By default, detections with the same class names and different defect
17 | id will be counted as the same. Set to True if you want to count them
18 | separately
19 |
20 | Returns
21 | -------
22 | Dict[str, int]
23 | A dictionary with the counts
24 | ```
25 | Example:
26 | {
27 | "cat": 10,
28 | "dog": 3
29 | }
30 | ```
31 | """
32 | counts = {}
33 | for frame in frs.frames:
34 | # Here is a sample return from class_counts: {1: (3, 'Heart'), 3: (3, 'Club'), 4: (3, 'Spade'), 2: (3, 'Diamond')}
35 | if frame.predictions._inner_type == "OcrPrediction":
36 | raise TypeError("Can't count classes for OcrPredictor")
37 | predictions = cast(Sequence[ClassificationPrediction], frame.predictions)
38 | for k, v in class_counts(predictions).items():
39 | if add_id_to_classname: # This is useful if class names are not unique
40 | class_name = f"{v[1]}_{k}"
41 | else:
42 | class_name = v[1]
43 | if class_name not in counts:
44 | counts[class_name] = v[0]
45 | else:
46 | counts[class_name] += v[0]
47 | return counts
48 |
--------------------------------------------------------------------------------
/landingai/predict/__init__.py:
--------------------------------------------------------------------------------
1 | from .edge import EdgePredictor # noqa: F401
2 | from .cloud import Predictor # noqa: F401
3 | from .ocr import OcrPredictor # noqa: F401
4 | from .snowflake import SnowflakeNativeAppPredictor # noqa: F401
5 |
--------------------------------------------------------------------------------
/landingai/predict/utils.py:
--------------------------------------------------------------------------------
1 | """Module for making predictions on LandingLens models."""
2 |
3 | import json
4 | import logging
5 | from typing import Any, Dict, List, Optional, Tuple, Type
6 |
7 | import requests
8 | from requests import Session
9 | from requests.adapters import HTTPAdapter
10 | from urllib3.util.retry import Retry
11 |
12 | from landingai.common import (
13 | Prediction,
14 | )
15 | from landingai.exceptions import HttpResponse
16 | from landingai.timer import Timer
17 |
18 | _LOGGER = logging.getLogger(__name__)
19 |
20 |
21 | def serialize_rois(rois: List[List[Tuple[int, int]]], mode: str) -> str:
22 | """Serialize the regions of interest into a JSON string."""
23 | rois_payload = [
24 | {
25 | "location": [{"x": coord[0], "y": coord[1]} for coord in roi],
26 | "mode": mode,
27 | }
28 | for roi in rois
29 | ]
30 | return json.dumps([rois_payload])
31 |
32 |
33 | class PredictionExtractor:
34 | """The base class for all extractors. This is useful for type checking."""
35 |
36 | @staticmethod
37 | def extract_prediction(response: Any) -> List[Prediction]:
38 | raise NotImplementedError()
39 |
40 |
41 | def create_requests_session(
42 | url: str, num_retry: int, headers: Dict[str, str]
43 | ) -> Session:
44 | """Create a requests session with retry"""
45 | session = Session()
46 | retries = Retry(
47 | # TODO: make them configurable
48 | # The 5XX retry scheme needs to account for the circuit breaker which will shutdown a service for 10 seconds
49 | total=num_retry, # Defaults to 3
50 | backoff_factor=7, # This the amount of seconds to wait on the second retry (i.e. 0, 7, 21). First retry is immediate.
51 | raise_on_redirect=True,
52 | raise_on_status=False, # We are already raising exceptions during backend invocations
53 | allowed_methods=["GET", "POST", "PUT"],
54 | status_forcelist=[
55 | # 408 Request Timeout , 413 Content Too Large
56 | # 429, # Too Many Requests (ie. rate limiter). This is handled externally
57 | # 500 Internal Server Error -> We don't retry here since it tends to reflect determinist software bugs
58 | 502, # Bad Gateway
59 | 503, # Service Unavailable (include cloud circuit breaker)
60 | 504, # Gateway Timeout
61 | ],
62 | )
63 | session.mount(
64 | url, HTTPAdapter(max_retries=retries if num_retry > 0 else num_retry)
65 | ) # Since POST is not idempotent we will ony retry on the this specific API
66 | session.headers.update(headers)
67 | return session
68 |
69 |
70 | @Timer(name="_do_inference", log_fn=_LOGGER.debug)
71 | def get_cloudinference_prediction(
72 | session: Session,
73 | endpoint_url: str,
74 | files: Dict[str, Any],
75 | params: Dict[str, Any],
76 | extractor_class: Type[PredictionExtractor],
77 | *,
78 | data: Optional[Dict[str, Any]] = None,
79 | ) -> Tuple[List[Prediction], Dict[str, int]]:
80 | """Call the inference endpoint and extract the prediction result."""
81 | try:
82 | resp = session.post(endpoint_url, files=files, params=params, data=data)
83 | except requests.exceptions.ConnectionError as e:
84 | raise ConnectionError(
85 | f"Failed to connect to the model server. Please double check the model server url ({endpoint_url}) is correct.\nException detail: {e}"
86 | ) from e
87 | response = HttpResponse.from_response(resp)
88 | _LOGGER.debug("Response: %s", response)
89 | response.raise_for_status()
90 | json_dict = response.json()
91 | # OCR response is a list of list of predictions
92 | if isinstance(json_dict, list):
93 | return (extractor_class.extract_prediction(json_dict), {})
94 | # Save performance metrics for debugging
95 | performance_metrics = json_dict.get("latency", {})
96 | return (extractor_class.extract_prediction(json_dict), performance_metrics)
97 |
--------------------------------------------------------------------------------
/landingai/storage/__init__.py:
--------------------------------------------------------------------------------
1 | """Third-party storage integrations."""
2 |
--------------------------------------------------------------------------------
/landingai/storage/data_access.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import re
4 | import tempfile
5 | from pathlib import Path
6 | from typing import Any, Dict, Optional
7 | from urllib.parse import unquote, urlparse
8 |
9 | import requests
10 |
11 | _LOGGER = logging.getLogger(__name__)
12 |
13 |
14 | # TODO: support output type stream
15 | def read_file(url: str) -> Dict[str, Any]:
16 | """Read bytes from a URL.
17 | Typically, the URL is a presigned URL (for example, from Amazon S3 or Snowflake) that points to a video or image file.
18 | Returns
19 | -------
20 | Dict[str, Any]
21 | Returns the content under "content". Optionally may return "filename" in case the server provided it.
22 | """
23 | response = requests.get(url, allow_redirects=True) # True is the default behavior
24 | try:
25 | response.raise_for_status()
26 | except requests.exceptions.HTTPError as e:
27 | reason = f"{e.response.text} (status code: {e.response.status_code})"
28 | msg_prefix = f"Failed to read from url ({url}) due to {reason}"
29 | if response.status_code == 403:
30 | error_msg = f"{msg_prefix}. Please double check the url is not expired and it's well-formed."
31 | raise ValueError(error_msg) from e
32 | elif response.status_code == 404:
33 | raise FileNotFoundError(
34 | f"{msg_prefix}. Please double check the file exists and the url is well-formed."
35 | ) from e
36 | else:
37 | error_msg = f"{msg_prefix}. Please try again later or reach out to us via our LandingAI platform."
38 | raise ValueError(error_msg) from e
39 | if response.status_code >= 300:
40 | raise ValueError(
41 | f"Failed to read from url ({url}) due to {response.text} (status code: {response.status_code})"
42 | )
43 | ret = {"content": response.content}
44 | # Check if server returned the file name
45 | if "content-disposition" in response.headers:
46 | m = re.findall(
47 | "filename=[\"']*([^;\"']+)", response.headers["content-disposition"]
48 | )
49 | if len(m): # if there is a match select the first one
50 | ret["filename"] = m[0]
51 | _LOGGER.info(
52 | f"Received content with length {len(response.content)}, type {response.headers.get('Content-Type')}"
53 | # and filename "+ str(ret["filename"])
54 | )
55 |
56 | return ret
57 |
58 |
59 | def download_file(
60 | url: str,
61 | file_output_path: Optional[Path] = None,
62 | ) -> str:
63 | """Download a file from a public url. This function will follow HTTP redirects
64 |
65 | Parameters
66 | ----------
67 | url : str
68 | Source url
69 | file_output_path : Optional[Path], optional
70 | The local output file path for the downloaded file. If no path is provided, the file will be saved into a temporary directory provided by the OS (which could get deleted after reboot), and when possible the extension of the downloaded file will be included in the output file path.
71 |
72 | Returns
73 | -------
74 | Path
75 | Path to the downloaded file
76 | """
77 | # TODO: It would be nice for this function to not re-download if the src has not been updated
78 | ret = read_file(url) # Fetch the file
79 | if file_output_path is not None:
80 | with open(str(file_output_path), "wb") as f: # type: Any
81 | f.write(ret["content"])
82 |
83 | else:
84 | suffix = ""
85 | if "filename" in ret:
86 | # use filename provided by server
87 | suffix = "--" + str(ret["filename"])
88 | else:
89 | # try to get the name from the URL
90 | r = urlparse(url)
91 | suffix = "--" + os.path.basename(unquote(r.path))
92 | with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as f:
93 | f.write(ret["content"])
94 | return f.name # type: ignore
95 |
96 |
97 | def fetch_from_uri(uri: str, **kwargs) -> Path: # type: ignore
98 | """Check if the URI is local and fetch it if it is not
99 |
100 | Parameters
101 | ----------
102 | uri : str
103 | Supported URIs
104 | - local paths
105 | - file://
106 | - http://
107 | - https://
108 |
109 |
110 | Returns
111 | -------
112 | Path
113 | Path to a local resource
114 | """
115 | # TODO support other URIs
116 | # snowflake://stage/filename (credentials will be passed on kwargs)
117 | r = urlparse(uri)
118 | # Match local unix and windows paths (e.g. C:\)
119 | if r.scheme == "" or r.scheme == "file" or len(r.scheme) == 1:
120 | # The file is already local
121 | return Path(uri)
122 | if r.scheme == "http" or r.scheme == "https":
123 | # Fetch the file from the web
124 | return Path(download_file(uri))
125 | raise ValueError(f"URI not supported {uri}")
126 |
--------------------------------------------------------------------------------
/landingai/storage/snowflake.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import tempfile
3 | from pathlib import Path
4 | from typing import Optional
5 |
6 | from pydantic import Field
7 | from pydantic_settings import BaseSettings, SettingsConfigDict
8 |
9 | from landingai.storage.data_access import download_file
10 |
11 | _LOGGER = logging.getLogger(__name__)
12 |
13 |
14 | class SnowflakeCredential(BaseSettings):
15 | """Snowflake API credential. It's used to connect to Snowflake.
16 | It supports loading from environment variables or .env files.
17 |
18 | The supported name of the environment variables are (case-insensitive):
19 | - SNOWFLAKE_USER
20 | - SNOWFLAKE_PASSWORD
21 | - SNOWFLAKE_ACCOUNT
22 |
23 | Environment variables will always take priority over values loaded from a dotenv file.
24 | """
25 |
26 | user: str
27 | password: str
28 | account: str
29 |
30 | model_config = SettingsConfigDict(
31 | env_file=".env",
32 | env_prefix="SNOWFLAKE_",
33 | case_sensitive=False,
34 | extra="ignore",
35 | )
36 |
37 |
38 | class SnowflakeDBConfig(BaseSettings):
39 | """Snowflake connection config.
40 | It supports loading from environment variables or .env files.
41 |
42 | The supported name of the environment variables are (case-insensitive):
43 | - SNOWFLAKE_WAREHOUSE
44 | - SNOWFLAKE_DATABASE
45 | - SNOWFLAKE_SCHEMA
46 |
47 | Environment variables will always take priority over values loaded from a dotenv file.
48 | """
49 |
50 | warehouse: str
51 | database: str
52 | # NOTE: the name "schema" is reserved by pydantic, so we use "snowflake_schema" instead.
53 | snowflake_schema: str = Field(..., validation_alias="SNOWFLAKE_SCHEMA")
54 |
55 | model_config = SettingsConfigDict(
56 | env_file=".env",
57 | env_prefix="SNOWFLAKE_",
58 | case_sensitive=False,
59 | extra="ignore",
60 | )
61 |
62 |
63 | def save_remote_file_to_local(
64 | remote_filename: str,
65 | stage_name: str,
66 | *,
67 | local_output: Optional[Path] = None,
68 | credential: Optional[SnowflakeCredential] = None,
69 | connection_config: Optional[SnowflakeDBConfig] = None,
70 | ) -> Path:
71 | """Save a file stored in Snowflake to local disk.
72 | If local_output is not provided, a temporary directory will be created and used.
73 | If credential or connection_config is not provided, it will read from environment variable or .env file instead.
74 | """
75 | url = get_snowflake_presigned_url(
76 | remote_filename,
77 | stage_name,
78 | credential=credential,
79 | connection_config=connection_config,
80 | )
81 | if local_output is None:
82 | local_output = Path(tempfile.mkdtemp())
83 | file_path = local_output / remote_filename
84 | file_path.parent.mkdir(parents=True, exist_ok=True)
85 | download_file(url, file_output_path=file_path)
86 | _LOGGER.info(f"Saved file {remote_filename} to {file_path}")
87 | return file_path
88 |
89 |
90 | def get_snowflake_presigned_url(
91 | remote_filename: str,
92 | stage_name: str,
93 | *,
94 | credential: Optional[SnowflakeCredential] = None,
95 | connection_config: Optional[SnowflakeDBConfig] = None,
96 | ) -> str:
97 | """Get a presigned URL for a file stored in Snowflake.
98 | NOTE: Snowflake returns a valid URL even if the file doesn't exist.
99 | So the downstream needs to check if the file exists first.
100 | """
101 | import snowflake.connector # type: ignore
102 |
103 | if credential is None:
104 | credential = SnowflakeCredential()
105 | if connection_config is None:
106 | connection_config = SnowflakeDBConfig()
107 |
108 | ctx = snowflake.connector.connect(
109 | user=credential.user,
110 | password=credential.password,
111 | account=credential.account,
112 | warehouse=connection_config.warehouse,
113 | database=connection_config.database,
114 | schema=connection_config.snowflake_schema,
115 | )
116 | cur = ctx.cursor()
117 | exec_res = cur.execute(f"LIST @{stage_name}")
118 | if exec_res is None:
119 | raise ValueError(f"Failed to list files in stage: {stage_name}")
120 | files = exec_res.fetchall()
121 | _LOGGER.debug(f"Files in stage {stage_name}: {files}")
122 | exec_res = cur.execute(
123 | f"SELECT get_presigned_url(@{stage_name}, '{remote_filename}') as url"
124 | )
125 | if exec_res is None:
126 | raise ValueError(
127 | f"Failed to get presigned url for file: {remote_filename} in stage: {stage_name}"
128 | )
129 | result = exec_res.fetchall()
130 | if len(result) == 0 or len(result[0]) == 0:
131 | raise FileNotFoundError(
132 | f"File ({remote_filename}) not found in stage {stage_name}. Please double check the file exists in the expected location, stage: {stage_name}, db config: {connection_config}."
133 | )
134 | result_url: str = result[0][0]
135 | _LOGGER.info(f"Result url: {result_url}")
136 | return result_url
137 |
--------------------------------------------------------------------------------
/landingai/telemetry.py:
--------------------------------------------------------------------------------
1 | """This module contains the telemetry configuration and APIs for the landingai package (intented for internal use only)."""
2 |
3 |
4 | import os
5 | import platform
6 | import sys
7 | from functools import lru_cache
8 | from importlib.metadata import version
9 | from pathlib import Path
10 | from typing import Dict
11 |
12 | from landingai.notebook_utils import (
13 | is_running_in_colab_notebook,
14 | is_running_in_jupyter_notebook,
15 | )
16 | from landingai.st_utils import is_running_in_streamlit
17 |
18 |
19 | @lru_cache(maxsize=None)
20 | def get_runtime_environment_info() -> Dict[str, str]:
21 | """Return a set of runtime environment information in key value pairs."""
22 | return {
23 | "lib_type": "pylib",
24 | "lib_version": version("landingai"),
25 | "python_version": platform.python_version(),
26 | "os": platform.platform(),
27 | "runtime": _resolve_python_runtime(),
28 | }
29 |
30 |
31 | @lru_cache(maxsize=None)
32 | def is_running_in_pytest() -> bool:
33 | """Return True if the code is running in a pytest session."""
34 | # See: https://stackoverflow.com/questions/25188119/test-if-code-is-executed-from-within-a-py-test-session
35 | return "pytest" in sys.modules
36 |
37 |
38 | def _resolve_python_runtime() -> str:
39 | if is_running_in_colab_notebook():
40 | runtime = "colab"
41 | elif is_running_in_jupyter_notebook():
42 | runtime = "notebook"
43 | elif is_running_in_streamlit():
44 | runtime = "streamlit"
45 | elif is_running_in_pytest():
46 | runtime = "pytest"
47 | else:
48 | runtime = Path(os.environ.get("_", "unknown")).name
49 | return runtime
50 |
--------------------------------------------------------------------------------
/landingai/transform.py:
--------------------------------------------------------------------------------
1 | """Module for image transformations."""
2 |
3 | from typing import List, Tuple
4 |
5 | import cv2
6 | import numpy as np
7 | import PIL.Image
8 |
9 |
10 | def crop_rotated_rectangle(
11 | image: PIL.Image.Image,
12 | rect: List[Tuple[int, int]],
13 | angle: float,
14 | ) -> Tuple[np.ndarray, List[Tuple[int, int]]]:
15 | """Crop the input image based on the rotated rectangle.
16 | The rectangle is calculated based on the rotated rectangle's corners.
17 |
18 | Parameters
19 | ----------
20 | img
21 | the input image to be cropped
22 | rect
23 | the unrotated rectangle (in parallel with the edges of the image)
24 | angle
25 | the angle of the rotated rectangle
26 | Returns
27 | -------
28 | Tuple[np.ndarray, List[Tuple[int, int]]]
29 | the cropped image and the coordinates of the rotated rectangle
30 | """
31 | # rot_matrix = cv2.getRotationMatrix2D(center=center, angle=angle, scale=1)
32 | # corners = cv2.transform(np.array(rect)[None], rot_matrix)
33 | # quad_box = corners[0].tolist()
34 | # return get_minarea_rect_crop(img, corners), quad_box
35 | [[left, top], [right, top], [right, bottom], [left, bottom]] = rect
36 | center = ((left + right) / 2, (top + bottom) / 2)
37 | img_np = np.asarray(image)
38 | shape = (img_np.shape[1], img_np.shape[0])
39 |
40 | matrix = cv2.getRotationMatrix2D(center=center, angle=angle, scale=1)
41 | img_np = cv2.warpAffine(src=img_np, M=matrix, dsize=shape)
42 | width, height = rect[1][0] - rect[0][0], rect[3][1] - rect[0][1]
43 | x, y = int(center[0] - width / 2), int(center[1] - height / 2)
44 |
45 | img_np = img_np[y : y + height, x : x + width]
46 | corners = cv2.transform(np.array(rect)[None], matrix)
47 | quad_box: List[Tuple[int, int]] = corners[0].tolist()
48 |
49 | return img_np, quad_box
50 |
--------------------------------------------------------------------------------
/landingai/utils.py:
--------------------------------------------------------------------------------
1 | """Module for common utility functions."""
2 | import io
3 | import logging
4 | import os
5 | from typing import Optional, Union
6 |
7 | import numpy as np
8 | import PIL.Image
9 | from pydantic import ValidationError
10 |
11 | from landingai.common import APIKey
12 | from landingai.exceptions import InvalidApiKeyError
13 | from landingai.timer import Timer
14 |
15 | _LLENS_SUPPORTED_IMAGE_FORMATS = ["JPG", "JPEG", "PNG", "BMP"]
16 | _DEFAULT_FORMAT = "JPEG"
17 | _IMG_SERIALIZATION_FORMAT_KEY = "DEFAULT_IMAGE_SERIALIZATION_FORMAT"
18 |
19 |
20 | _LOGGER = logging.getLogger(__name__)
21 |
22 |
23 | @Timer(name="serialize_image", log_fn=_LOGGER.debug)
24 | def serialize_image(image: Union[np.ndarray, PIL.Image.Image]) -> bytes:
25 | """Serialize the input image into bytes of an image file.
26 |
27 | The image file encoding format is set to "JPEG" for optimal latency.
28 | For RGBA images, the encoding format will be `PNG` to preserve transparency.
29 | For palettized image, i.e. mode `P` or `PA`, the image will be converted to RGB before encoding.
30 | In addition, the image file encoding format can be overwritten by the environment variable `DEFAULT_IMAGE_SERIALIZATION_FORMAT`.
31 | Supported image serialization formats are: JPEG, PNG, BMP.
32 | """
33 | if image is None or (isinstance(image, np.ndarray) and len(image) == 0):
34 | raise ValueError(f"Input image must be non-emtpy, but got: {image}")
35 | format = _resolve_serialization_format(image)
36 | _LOGGER.debug("Use %s as the serialization format.", format)
37 | if isinstance(image, np.ndarray):
38 | image = PIL.Image.fromarray(image)
39 | if image.mode in ["P", "PA"]:
40 | image = image.convert("RGB")
41 | img_buffer = io.BytesIO()
42 | image.save(img_buffer, format=format)
43 | buffer_bytes = img_buffer.getvalue()
44 | img_buffer.close()
45 | return buffer_bytes
46 |
47 |
48 | def _resolve_serialization_format(image: Union[np.ndarray, PIL.Image.Image]) -> str:
49 | default_format = os.getenv(_IMG_SERIALIZATION_FORMAT_KEY, _DEFAULT_FORMAT)
50 | assert default_format.upper() in _LLENS_SUPPORTED_IMAGE_FORMATS
51 | if isinstance(image, np.ndarray):
52 | return default_format
53 |
54 | assert isinstance(image, PIL.Image.Image)
55 | if image.mode == "RGBA":
56 | return "PNG"
57 | return "JPEG"
58 |
59 |
60 | def load_api_credential(api_key: Optional[str] = None) -> APIKey:
61 | """Load API credential from different sources.
62 |
63 | Parameters
64 | ----------
65 | api_key:
66 | The API key argument to be passed in, by default None.
67 | The API key can be provided as arguments or loaded from the environment variables or .env file.
68 | The api key loading priority is: arguments > environment variables > .env file.
69 |
70 | Returns
71 | -------
72 | APIKey
73 | An APIKey (v2 key) instance.
74 | """
75 | if api_key is not None:
76 | return APIKey(api_key=api_key)
77 | else:
78 | # Load from environment variables or .env file
79 | try:
80 | return APIKey()
81 | except ValidationError as e:
82 | raise InvalidApiKeyError(
83 | "API key is either not provided or invalid."
84 | ) from e
85 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: LandingAI Python Library Documentation
2 | site_url: https://landing-ai.github.io/
3 | repo_url: https://github.com/landing-ai/landingai-python
4 | edit_uri: edit/main/docs/
5 |
6 |
7 | theme:
8 | name: "material"
9 | custom_dir: docs/_overrides
10 | features:
11 | - content.code.copy
12 | - content.code.annotate
13 | - content.action.edit
14 |
15 | plugins:
16 | - mkdocstrings
17 | - search
18 |
19 | markdown_extensions:
20 | # Syntax highlight
21 | - pymdownx.highlight:
22 | anchor_linenums: true
23 | line_spans: __span
24 | pygments_lang_class: true
25 | - pymdownx.inlinehilite
26 | - pymdownx.snippets
27 | - pymdownx.superfences
28 |
29 | # Multiline note/warning/etc blocks (https://squidfunk.github.io/mkdocs-material/reference/admonitions)
30 | - admonition
31 | - pymdownx.details
32 |
33 | nav:
34 | - Quick start: index.md
35 | - Examples: examples.md
36 | - Image Acquisition:
37 | - Image Acquisition: image-acquisition/image-acquisition.md
38 | - Image Files: image-acquisition/image-file.md
39 | - Video Files: image-acquisition/video-file.md
40 | - Webcam: image-acquisition/webcam.md
41 | - Network Camera: image-acquisition/network-cameras.md
42 | - Screenshots: image-acquisition/screenshots.md
43 | - Image Operations: image-operations.md
44 | - Running Inferences:
45 | - Getting Started: inferences/getting-started.md
46 | - Working with Frames: inferences/frames-inference.md
47 | - Overlaying Predictions: inferences/overlaying-predictions.md
48 | - Running Inferences Locally: inferences/docker-deployment.md
49 | - Snowflake Native App: inferences/snowflake-native-app.md
50 | - Extracting Text (OCR) [beta]: inferences/ocr.md
51 | - Data Management: metadata.md
52 | #- Image operations (soon): image-operations.md
53 | - APIs:
54 | - landingai.common: api/common.md
55 | - landingai.data_management: api/data_management.md
56 | - landingai.exceptions: api/exceptions.md
57 | - landingai.image_source_ops: api/image_source_ops.md
58 | - landingai.notebook_utils: api/notebook_utils.md
59 | - landingai.pipeline: api/pipeline.md
60 | - landingai.postprocess: api/postprocess.md
61 | - landingai.predict: api/predict.md
62 | - landingai.st_utils: api/st_utils.md
63 | - landingai.storage: api/storage.md
64 | - landingai.telemetry: api/telemetry.md
65 | - landingai.timer: api/timer.md
66 | - landingai.transform: api/transform.md
67 | - landingai.utils: api/utils.md
68 | - landingai.visualize: api/visualize.md
69 | - Changelog: changelog.md
70 | - Contributing: contributing.md
71 |
--------------------------------------------------------------------------------
/pdocs/assets/Metadata_Management_UI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/pdocs/assets/Metadata_Management_UI.png
--------------------------------------------------------------------------------
/pdocs/developer_guide/1_main.md:
--------------------------------------------------------------------------------
1 | # Developer Guide
2 |
3 | This guide introduces you to the `landingai` development process and provides information on writing, testing, and building the `landingai` library.
4 |
5 | Read this guide if you need to perform any of the following tasks:
6 |
7 | - Install the `landingai` library locally.
8 | - Contribute to the `landingai` library.
9 |
10 | ## Install `landingai` Library Locally
11 |
12 | ### Prerequisite: Install Poetry
13 |
14 | > `landingai` uses `Poetry` for packaging and dependency management. If you want to build it from source, you have to install Poetry first. To see all possible options, refer to the [Poetry documentation](https://python-poetry.org/docs/#installation).
15 |
16 | For Linux, macOS, Windows (WSL):
17 |
18 | ```
19 | curl -sSL https://install.python-poetry.org | python3 -
20 | ```
21 |
22 | Note: You can switch to use a different Python version by specifying the python version:
23 |
24 | ```
25 | curl -sSL https://install.python-poetry.org | python3.10 -
26 | ```
27 |
28 | Or run the following command after installing Poetry:
29 |
30 | ```
31 | poetry env use 3.10
32 | ```
33 |
34 | ### Install All Dependencies
35 |
36 | ```bash
37 | poetry install --all-extras
38 | ```
39 |
40 | ### Activate the virtualenv
41 |
42 | ```bash
43 | poetry shell
44 | ```
45 |
46 | ## Test and Lint `landingai`
47 |
48 | ### Run Linting
49 |
50 | ```bash
51 | poetry run flake8 . --exclude .venv --count --show-source --statistics
52 | ```
53 |
54 | ### Run Tests
55 |
56 | ```bash
57 | poetry run pytest tests/
58 | ```
59 |
60 | ## Release
61 |
62 | The CI and CD pipelines are defined in the `.github/workflows/ci_cd.yml` file.
63 |
64 | Every git commit will trigger a release to `PyPi` at https://pypi.org/project/landingai/
65 |
66 | ### Versioning
67 |
68 | When we release a new library version, we version it using [Semantic Versioning 2.0.0](https://semver.org/)(`MAJOR.MINOR.PATCH`). The version number is defined in the `pyproject.toml` file in the `version` field.
69 |
70 | As a general rule of thumb, given a version number `MAJOR.MINOR.PATCH`, increment the:
71 |
72 | - `MAJOR` version when you make incompatible API changes.
73 | - `MINOR` version when you add functionality in a backward-compatible manner, such as adding a new feature.
74 | - `PATCH` version when you make backward-compatible bug fixes and minor changes.
75 |
76 | Note: The CD pipeline will automatically increment the `PATCH` version for every git commit.
77 | **For a `MINOR` or `MAJOR` version change, you need to manually update `pyproject.toml` to bump the version number.**
78 |
--------------------------------------------------------------------------------
/pdocs/developer_guide/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/pdocs/developer_guide/__init__.py
--------------------------------------------------------------------------------
/pdocs/templates/module.html.jinja2:
--------------------------------------------------------------------------------
1 | {% extends "default/module.html.jinja2" %}
2 | {% block nav_footer %}
3 | {% if footer_text %}
4 |
5 | {% endif %}
6 |
7 |
8 |
9 |
16 | {% endblock %}
--------------------------------------------------------------------------------
/pdocs/user_guide/2_credentials.md:
--------------------------------------------------------------------------------
1 | ## Manage API Credentials
2 |
3 | If you send images to an endpoint through API (Cloud Deployment), you must add your API Key to the API call. You can generate the API Key in LandingLens. This API key is also known as API key v2. See [here](https://support.landing.ai/docs/api-key-and-api-secret) for more information.
4 |
5 | Once you have generated the API key, here are three ways to configure your API Key, ordered by the priority in which they are loaded:
6 |
7 | 1. Pass it as function parameters. See `landingai.predict.Predictor`.
8 | 2. Set it as environment variables. For example: `export LANDINGAI_API_KEY=...`.
9 | 3. Store it in an `.env` file under your project root directory. For example, here is a set of credentials in an `.env` file:
10 |
11 | ```
12 | LANDINGAI_API_KEY=land_sk_v7b0hdyfj6271xy2o9lmiwkkcb12345
13 | ```
14 |
15 | ### Legacy API key and secret
16 |
17 | In the past, LandingLens supports generating a key and secret pair, which is known as API key v1. This key is no longer supported in `landingai` Python package in version `0.1.0` and above.
18 |
19 | See [here](https://support.landing.ai/docs/api-key) for how to generate API v2 key.
20 |
21 | ### FAQ
22 |
23 | #### What's the difference between the v1 API key and v2 API key
24 |
25 | Here are a few differences:
26 | 1. The v2 API key always starts with a prefix "land_sk_" whereas the v1 API key doesn't.
27 | 2. The v1 API key always comes with a secret string, and the SDK requires both whereas the v2 API key only has a single string.
28 | 3. Users can only generate v2 API keys after July 2023.
29 |
--------------------------------------------------------------------------------
/pdocs/user_guide/3_postprocess.md:
--------------------------------------------------------------------------------
1 | ## Post-processing
2 |
3 | The `landingai` library provides a set of common post-processing APIs to further transform your inference result data.
4 |
5 | Some common use cases are:
6 |
7 | - Get the predicted class count over the predictions of a set of images
8 | - Get the predicted pixel area (coverage) over the predictions of a set of images
9 |
10 | See the `landingai.postprocess` module for more details.
11 |
--------------------------------------------------------------------------------
/pdocs/user_guide/4_pipelines.md:
--------------------------------------------------------------------------------
1 | ## Vision Pipelines
2 |
3 | ### Image acquisition
4 | Pipelines can simplify complex vision tasks by breaking them into a sequence of operations that are applied to every image. Images are modeled as `landingai.pipeline.frameset.Frame` and sequences of images are modeled as `landingai.pipeline.frameset.FrameSet`. Most image sources will produce a `FrameSet` even when it contains a single image.
5 |
6 | For example, a `landingai.pipeline.image_source.NetworkedCamera` can connect to a live video source and expose it as `FrameSet` iterator. This is convenient as it allows subsequent stages of the pipeline to introduce new derived frames as part of the processing.
7 |
8 | Other examples of data acquisition classes are `landingai.pipeline.image_source.Webcam`, to collect images directly from the webcam of the current device; and `landingai.pipeline.image_source.Screenshot`, that takes screenshots of the current device.
9 |
10 | ### Running predictions
11 |
12 | You can use data pipelines to run predictions quite easily, using the `landingai.predict.Predictor` class. You just need to get from the platform the endpoint_id to where your model was deployed, and your API key:
13 |
14 | ```python
15 | predictor = Predictor(endpoint_id="abcde-1234-xxxx", api_key="land_sk_xxxx")
16 |
17 | # Get images from Webcam at a 1 FPS rate, run predictions on it and save results.
18 | with Webcam(fps=1) as webcam:
19 | for frame in webcam:
20 | frame
21 | .downsize(width=512)
22 | .run_predict(predictor=predictor)
23 | .save_image(f"/tmp/detected-object", image_src="overlay")
24 | ```
25 |
26 | You can also check the prediction result using some of the helper methods:
27 |
28 | ```python
29 | predictor = Predictor(endpoint_id="abcde-1234-xxxx", api_key="land_sk_xxxx")
30 | # Get images from Webcam at a 1 FPS rate, run predictions on it and check if
31 | # in the prediction we find "coffee-mug", a class created in LandingLens platform:
32 | with Webcam(fps=1) as webcam:
33 | for frame in webcam:
34 | frame = frame
35 | .downsize(width=512)
36 | .run_predict(predictor=predictor)
37 | if "coffee-mug" in frame.predictions:
38 | print(f"Found {len(frame.predictions)} coffee mugs in the image")
39 | ```
40 |
41 | FrameSet predictions has also other methods to help filter predicted classes:
42 |
43 | ```python
44 | # Returns only the predictions with a confidence score above 0.9
45 | sure_predictions = frame.predictions.filter_threshold(0.9)
46 |
47 | # Returns only predictions for the "teapot" class
48 | teapot_predictions = frame.predictions.filter_label("teapot")
49 | ```
50 |
51 | As an other example, if we are detecting faces from a live stream, we may want to first use an object detection model to identify the regions of interest and then break the initial `Frame` into multiple frames (one per face). Subsequent stages of the pipeline may apply other models to individual faces.
52 |
53 | The following example shows a basic pipeline that applies several processing layers starting from a single image. In this case after running inference (i.e. `run_predict`), the `overlay_predictions` function creates a copy of the original image (i.e. `frs[0].image`) and populates `frs[0].other_images["overlay"]` with the results.
54 |
55 | ```python
56 | frs= FrameSet.from_image("sample_images/1196.png")
57 | frs.run_predict(predictor=...)
58 | .overlay_predictions()
59 | .show_image()
60 | .show_image(image_src="overlay")
61 |
62 | ```
63 |
64 | For more details on the operations supported on pipelines, go to `landingai.pipeline.frameset.FrameSet`.
65 |
--------------------------------------------------------------------------------
/pdocs/user_guide/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/pdocs/user_guide/__init__.py
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "landingai"
3 | version = "0.3.49"
4 | description = "Helper library for interacting with LandingAI LandingLens"
5 | authors = ["LandingAI "]
6 | readme = "README.md"
7 | packages = [{include = "landingai"}]
8 |
9 | [build-system]
10 | requires = ["poetry-core"]
11 | build-backend = "poetry.core.masonry.api"
12 |
13 | [tool.poetry.urls]
14 | "Homepage" = "https://landing.ai"
15 | "repository" = "https://github.com/landing-ai/landingai-python"
16 | "documentation" = "https://landing-ai.github.io/landingai-python/landingai.html"
17 |
18 | [tool.poetry.dependencies] # main dependency group
19 | python = ">=3.9,<4.0"
20 |
21 | opencv-python = ">=4.5,<5.0" # about 87MB (exclude transitive dependencies)
22 | numpy = ">=1.21.0,<2.0.0"
23 | pillow = ">=9.0,<11.0"
24 | pydantic = "2.*"
25 | requests = "2.*"
26 | urllib3 = "^1.26.0"
27 | # snowflake-connector-python = "3.0.*" # about 51MB (exclude transitive dependencies)
28 | bbox-visualizer = "^0.1.0"
29 | segmentation-mask-overlay = "^0.3.4"
30 | imageio = { version = "2.*", extras = ["ffmpeg"] }
31 | aiohttp = { version = ">=3.7.3,<4.0.0", extras = ["speedups"] }
32 | aiofiles = ">=0.7.0,<1.0.0"
33 | tqdm = ">=4.64.0,<5.0.0"
34 | tenacity = "^8.2.3"
35 | pandas = "2.*"
36 | requests-toolbelt = "^1.0.0"
37 |
38 | # Snowflake optionals
39 | snowflake-connector-python = {version="^3.10.0", optional = true}
40 | cryptography = {version="^39.0.0", optional = true}
41 | pydantic-settings = "^2.3.1"
42 |
43 | [tool.poetry.group.dev.dependencies]
44 | ruff = "^0.1.8"
45 | autoflake = "1.*"
46 | pytest = "7.*"
47 | pdoc = "14.*"
48 | responses = "^0.23.1"
49 | mypy = "^1.3.0"
50 | types-requests = "^2.31.0.0"
51 | types-pillow = "^9.5.0.4"
52 | data-science-types = "^0.2.23"
53 | testbook = "^0.4.2"
54 | types-aiofiles = "^23.1.0.4"
55 | types-tqdm = "^4.65.0.1"
56 | aioresponses = "^0.7.4"
57 | setuptools = "^68.0.0"
58 | mkdocs = "^1.5.3"
59 | mkdocstrings = {extras = ["python"], version = "^0.23.0"}
60 | mkdocs-material = "^9.4.2"
61 |
62 | [tool.poetry.extras]
63 | snowflake = ["snowflake-connector-python", "cryptography"]
64 |
65 | [tool.poetry.group.examples.dependencies]
66 | jupyterlab = "4.*"
67 |
68 | [tool.pytest.ini_options]
69 | log_cli = true
70 | log_cli_level = "INFO"
71 | log_cli_format = "%(asctime)s [%(levelname)s] %(message)s (%(filename)s:%(lineno)s)"
72 | log_cli_date_format = "%Y-%m-%d %H:%M:%S"
73 |
74 |
75 | [tool.mypy]
76 | exclude = "landingai/tests"
77 | plugins = "pydantic.mypy"
78 | show_error_context = true
79 | pretty = true
80 | check_untyped_defs = true
81 | disallow_untyped_defs = true
82 | no_implicit_optional = true
83 | strict_optional = true
84 | strict_equality = true
85 | strict_concatenate = true
86 | warn_redundant_casts = true
87 | warn_unused_configs = true
88 | warn_unused_ignores = false
89 | warn_return_any = true
90 | show_error_codes = true
91 | disallow_any_unimported = true
92 |
93 | [[tool.mypy.overrides]]
94 | ignore_missing_imports = true
95 | module = [
96 | "cv2.*",
97 | "segmentation_mask_overlay.*",
98 | "bbox_visualizer.*",
99 | "streamlit.*",
100 | "requests_toolbelt.*",
101 | ]
102 |
103 | [tool.ruff]
104 | # Exclude a variety of commonly ignored directories.
105 | exclude = [
106 | ".bzr",
107 | ".direnv",
108 | ".eggs",
109 | ".git",
110 | ".git-rewrite",
111 | ".hg",
112 | ".mypy_cache",
113 | ".nox",
114 | ".pants.d",
115 | ".pytype",
116 | ".ruff_cache",
117 | ".svn",
118 | ".tox",
119 | ".venv",
120 | "__pypackages__",
121 | "_build",
122 | "buck-out",
123 | "build",
124 | "dist",
125 | "node_modules",
126 | "venv",
127 | ]
128 |
129 | # Same as Black.
130 | line-length = 88
131 | indent-width = 4
132 |
133 | # Assume Python 3.9
134 | target-version = "py39"
135 |
136 | [tool.ruff.lint]
137 | # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
138 | # Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
139 | # McCabe complexity (`C901`) by default.
140 | select = ["E4", "E7", "E9", "F"]
141 | ignore = []
142 |
143 | # Allow fix for all enabled rules (when `--fix`) is provided.
144 | fixable = ["ALL"]
145 | unfixable = []
146 |
147 | # Allow unused variables when underscore-prefixed.
148 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
149 |
150 | [tool.ruff.format]
151 | # Like Black, use double quotes for strings.
152 | quote-style = "double"
153 |
154 | # Like Black, indent with spaces, rather than tabs.
155 | indent-style = "space"
156 |
157 | # Like Black, respect magic trailing commas.
158 | skip-magic-trailing-comma = false
159 |
160 | # Like Black, automatically detect the appropriate line ending.
161 | line-ending = "auto"
162 |
163 | # Enable auto-formatting of code examples in docstrings. Markdown,
164 | # reStructuredText code/literal blocks and doctests are all supported.
165 | #
166 | # This is currently disabled by default, but it is planned for this
167 | # to be opt-out in the future.
168 | docstring-code-format = false
169 |
170 | # Set the line length limit used when formatting code snippets in
171 | # docstrings.
172 | #
173 | # This only has an effect when the `docstring-code-format` setting is
174 | # enabled.
175 | docstring-code-line-length = "dynamic"
176 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from landingai.common import SegmentationPrediction
7 |
8 |
9 | @pytest.fixture
10 | def seg_mask_validator():
11 | def assert_seg_mask(pred: SegmentationPrediction, expected: Dict[str, Any]):
12 | assert pred.label_name == expected["label_name"]
13 | assert pred.label_index == expected["label_index"]
14 | np.testing.assert_almost_equal(
15 | pred.score, expected["score"], decimal=3, err_msg="SEG score mismatch"
16 | )
17 | assert pred.num_predicted_pixels == expected["num_predicted_pixels"]
18 | assert (
19 | pred.percentage_predicted_pixels == expected["percentage_predicted_pixels"]
20 | )
21 | assert pred.decoded_boolean_mask.shape == expected["mask_shape"]
22 | assert np.unique(pred.decoded_boolean_mask).tolist() == [0, 1]
23 | assert np.unique(pred.decoded_index_mask).tolist() == [0, pred.label_index]
24 | if "encoded_mask" in expected:
25 | assert pred.encoded_mask == expected["encoded_mask"]
26 |
27 | return assert_seg_mask
28 |
--------------------------------------------------------------------------------
/tests/data/images/cameraman.tiff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cameraman.tiff
--------------------------------------------------------------------------------
/tests/data/images/cereal-tiny/brightness-1.5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal-tiny/brightness-1.5.jpeg
--------------------------------------------------------------------------------
/tests/data/images/cereal-tiny/color-1.5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal-tiny/color-1.5.jpeg
--------------------------------------------------------------------------------
/tests/data/images/cereal-tiny/contrast-1.5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal-tiny/contrast-1.5.jpeg
--------------------------------------------------------------------------------
/tests/data/images/cereal-tiny/original.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal-tiny/original.jpeg
--------------------------------------------------------------------------------
/tests/data/images/cereal-tiny/sharpness-1.5.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal-tiny/sharpness-1.5.jpeg
--------------------------------------------------------------------------------
/tests/data/images/cereal1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/cereal1.jpeg
--------------------------------------------------------------------------------
/tests/data/images/expected_bbox_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/expected_bbox_overlay.png
--------------------------------------------------------------------------------
/tests/data/images/expected_ocr_overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/expected_ocr_overlay.png
--------------------------------------------------------------------------------
/tests/data/images/expected_vp_masks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/expected_vp_masks.png
--------------------------------------------------------------------------------
/tests/data/images/farm-coverage.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/farm-coverage.jpg
--------------------------------------------------------------------------------
/tests/data/images/ocr_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/ocr_test.png
--------------------------------------------------------------------------------
/tests/data/images/palettized_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/palettized_image.png
--------------------------------------------------------------------------------
/tests/data/images/wildfire1.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/images/wildfire1.jpeg
--------------------------------------------------------------------------------
/tests/data/responses/default_class_model_response.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"backbonetype":null,"backbonepredictions":null,"predictions":{"score":0.9956502318382263,"labelIndex":0,"labelName":"HasFire"},"type":"ClassificationPrediction","latency":{"preprocess_s":0.0031490325927734375,"infer_s":0.10236716270446777,"postprocess_s":4.1961669921875e-05,"serialize_s":0.00015473365783691406,"input_conversion_s":0.002999544143676758,"model_loading_s":0.0002105236053466797},"model_id":"59eff733-1dcd-4ace-b104-9041c745f1da"}'
5 | content_type: text/plain
6 | method: POST
7 | status: 200
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=8fc1bc53-c5c1-4154-8cc1-a08f2e17ba43&device_type=pylib&runtime=pytest
9 |
--------------------------------------------------------------------------------
/tests/data/responses/test_edge_class_predict.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"type":"ClassificationPrediction","predictions":{"labelName":"HasFire","defectId":77008,"labelIndex":0,"score":0.9956502318382263},"metadata":{"image_id":"","inspection_station_id":"","location_id":"","capture_timestamp":"2023-06-12T22:10:45.2830543-07:00"}}'
5 | content_type: text/plain
6 | method: POST
7 | status: 200
8 | url: http://localhost:8123/images
9 |
--------------------------------------------------------------------------------
/tests/data/responses/test_edge_od_predict.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"type":"ObjectDetectionPrediction","predictions":{"f82d7158-cb75-4ef6-94f0-0eb8d27161c3":{"score":0.993014,"labelIndex":1,"defectId":42255,"labelName":"Screw","coordinates":{"xmin":945,"xmax":1118,"ymin":1603,"ymax":1795}},"7cef99d4-91bd-4363-92e1-17ef8c928409":{"score":0.9921601,"labelIndex":1,"defectId":42255,"labelName":"Screw","coordinates":{"xmin":436,"xmax":640,"ymin":1037,"ymax":1203}},"05f7db0c-cbe3-4f15-b81b-4fc430f210fd":{"score":0.9547385,"labelIndex":1,"defectId":42255,"labelName":"Screw","coordinates":{"xmin":1515,"xmax":1977,"ymin":1419,"ymax":1787}}},"metadata":{"image_id":"","inspection_station_id":"","location_id":"","capture_timestamp":"2023-06-13T16:58:01.1159388-07:00"}}'
5 | content_type: text/plain
6 | method: POST
7 | status: 200
8 | url: http://localhost:8123/images
9 |
--------------------------------------------------------------------------------
/tests/data/responses/test_get_label_map.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"code":0,"message":"","data":[{"id":84689,"name":"num_plate","descriptionText":"","orgId":1,"projectId":34243219343364,"sections":[],"createdAt":"2023-07-21T19:43:50.625Z","color":null,"defect_book_example_ids":[],"indexId":1,"updatedAt":null,"isArchived":null,"updatedByUserId":"912093c0-2842-4c0e-9131-1069df8255c5"},{"id":84690,"name":"number_plate","descriptionText":"","orgId":1,"projectId":34243219343364,"sections":[],"createdAt":"2023-07-21T19:44:39.318Z","color":null,"defect_book_example_ids":[],"indexId":2,"updatedAt":null,"isArchived":null,"updatedByUserId":"912093c0-2842-4c0e-9131-1069df8255c5"}]}'
5 | content_type: text/plain
6 | method: GET
7 | status: 200
8 | url: https://app.landing.ai/api/defect/defects?projectId=34243219343364
9 |
--------------------------------------------------------------------------------
/tests/data/responses/test_ocr_predict.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '[[{"text":"test","score":0.6326617002487183,"location":[{"x":598,"y":248},{"x":818,"y":250},{"x":818,"y":303},{"x":598,"y":301}]}]]'
5 | content_type: text/plain
6 | method: POST
7 | status: 200
8 | url: https://app.landing.ai/ocr/v1/detect-text
9 |
--------------------------------------------------------------------------------
/tests/data/responses/test_read_stream.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: "\uFFFDPNG\r\n\x1A\n\0\0\0\rIHDR\0\0\0\x02\0\0\0\x02\b\x02\0\0\0\uFFFD\u051A\
5 | s\0\0\0\vIDATx\uFFFDc`@\x06\0\0\x0E\0\x01\uFFFD\uFFFDs\uFFFD\0\0\0\0IEND\uFFFD\
6 | B`\uFFFD"
7 | content_type: text/plain; charset=utf-8
8 | method: GET
9 | status: 200
10 | url: https://landing-aviinference-benchmarking-dev.s3.us-east-2.amazonaws.com/images/test.png?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjELP%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMSJHMEUCIFrGe9p9sBKng7GnrdeTPETmaXUz2RItwW9DtpyBXxkEAiEAomX3OFUwJUduZIJ5ujvONJUYK3qj9kOhHlZ7WvUuRAMq9gMI3P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARACGgw5NzAwNzMwNDE5OTMiDFFuNFyCyCXH%2BAiTkCrKA8DfON81axWS6frqZ0soRRkJIFlaVJyGkTlWkchYZqb5hi%2BqnIYX6sxKwtM07QOQ%2FQoAgHmGxIsLQA84YwKq2Zty7RI%2Fsxtq%2BdMEe3oeZjX%2FO%2F%2Fp%2BvYuoQXbzRDHZz%2BNL6sJaySsNzsh5lRU5qjGowff7dBBdLPswhWYlZxnng5YPSJjlvZodABudG8S88B%2BV3Ml%2BC%2Fd2Q%2Bf0FCaiyMHfMCECGlBMIXRatEmuMJksEu%2Bfhrz5IoypolbBWwCsBZOeloRcz50L5%2FlBwqyUkSD7KliJel1rN2Qoq8mCLXgY9ySHBl%2BKDgrR1n8Nh0eR99t2BQ57EcOcswSoQeqAVehFPuLCRBLpVuiP7BG4h%2Fqdi%2FoQnr5t1wrSP1T7DWzhH4uCZDNdERrYGG9RoDaxdvMJl05xpt7%2B6d1E%2BFD7hRzLRW1q9Rg7pdKmOtPE4XlPBC4MYMp7lTgXvI4QGA1nB6rTsqTwie%2Fm1q6g9%2FpRXbffuNu3pt%2FN7Vf8bRl%2B1dx%2F7CKzsgTvRDildpuYYCzHIyszQGlLFSEhtId7%2BOrCdIymmW7FcC9Adt0g31oij2FTrhLUVkf1DGzWVmWOE2A6el%2FG7IXDZJawEpNhOuCMKWUtKMGOpQCCeSRlMNN9jETYk%2B0JhcI3zmvvBTqsjGt4jrZ9FjJ2Dq2JhYvoCmBf%2F%2BrrhulxH2bYs740CdLfuDnTK8VSeJTjTHSOeRlb2r5%2Bx4DYIS3CDlj2clxWll0Vyl8Vzl8M7h%2BPwViB3zPbuB%2BsMMqPkh9uoc8uLie1aaGpQ57vqfHuJtZWLIzEptogTG2I92WCAPsiAECJkTPfNlnrtP%2F6VCzTlI1OrAKoonDuGIdp5UeA107oEaXjWo7WLQTXULRHY8q16HU86K0pkwqREK%2FPpNgZSBRQ7RymMgxJI%2F5eYWfT2JHayND6PIgOSDjtIHbqKRPVwgQOdni3vypgBWt043Koh4zLbu7bh6W%2Fm9gBRlRLvY%2BW2C1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T185235Z&X-Amz-SignedHeaders=host&X-Amz-Expires=18000&X-Amz-Credential=ASIA6DXG35RE4HTVSB3E%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=debb56818b5126fec2ffbc06630bb94940f5f30fb2add48e67f262226df78c82
11 | - response:
12 | auto_calculate_content_length: false
13 | body: '
14 |
15 | AccessDeniedRequest has expired1202023-05-23T18:50:36Z2023-05-23T19:51:27ZT19MN8X4DC2R8PKZOoOB8rJB1hK9hP7/8Pu2h2Q2DMJ+4JyHyDp3k7cKHquUhHk8ffNnXomlguD+xPCC77v5eU+bLcU='
16 | content_type: text/plain
17 | method: GET
18 | status: 403
19 | url: https://landing-aviinference-benchmarking-dev.s3.us-east-2.amazonaws.com/images/1mp_cereal_1.jpeg?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjELP%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMSJHMEUCIFrGe9p9sBKng7GnrdeTPETmaXUz2RItwW9DtpyBXxkEAiEAomX3OFUwJUduZIJ5ujvONJUYK3qj9kOhHlZ7WvUuRAMq9gMI3P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARACGgw5NzAwNzMwNDE5OTMiDFFuNFyCyCXH%2BAiTkCrKA8DfON81axWS6frqZ0soRRkJIFlaVJyGkTlWkchYZqb5hi%2BqnIYX6sxKwtM07QOQ%2FQoAgHmGxIsLQA84YwKq2Zty7RI%2Fsxtq%2BdMEe3oeZjX%2FO%2F%2Fp%2BvYuoQXbzRDHZz%2BNL6sJaySsNzsh5lRU5qjGowff7dBBdLPswhWYlZxnng5YPSJjlvZodABudG8S88B%2BV3Ml%2BC%2Fd2Q%2Bf0FCaiyMHfMCECGlBMIXRatEmuMJksEu%2Bfhrz5IoypolbBWwCsBZOeloRcz50L5%2FlBwqyUkSD7KliJel1rN2Qoq8mCLXgY9ySHBl%2BKDgrR1n8Nh0eR99t2BQ57EcOcswSoQeqAVehFPuLCRBLpVuiP7BG4h%2Fqdi%2FoQnr5t1wrSP1T7DWzhH4uCZDNdERrYGG9RoDaxdvMJl05xpt7%2B6d1E%2BFD7hRzLRW1q9Rg7pdKmOtPE4XlPBC4MYMp7lTgXvI4QGA1nB6rTsqTwie%2Fm1q6g9%2FpRXbffuNu3pt%2FN7Vf8bRl%2B1dx%2F7CKzsgTvRDildpuYYCzHIyszQGlLFSEhtId7%2BOrCdIymmW7FcC9Adt0g31oij2FTrhLUVkf1DGzWVmWOE2A6el%2FG7IXDZJawEpNhOuCMKWUtKMGOpQCCeSRlMNN9jETYk%2B0JhcI3zmvvBTqsjGt4jrZ9FjJ2Dq2JhYvoCmBf%2F%2BrrhulxH2bYs740CdLfuDnTK8VSeJTjTHSOeRlb2r5%2Bx4DYIS3CDlj2clxWll0Vyl8Vzl8M7h%2BPwViB3zPbuB%2BsMMqPkh9uoc8uLie1aaGpQ57vqfHuJtZWLIzEptogTG2I92WCAPsiAECJkTPfNlnrtP%2F6VCzTlI1OrAKoonDuGIdp5UeA107oEaXjWo7WLQTXULRHY8q16HU86K0pkwqREK%2FPpNgZSBRQ7RymMgxJI%2F5eYWfT2JHayND6PIgOSDjtIHbqKRPVwgQOdni3vypgBWt043Koh4zLbu7bh6W%2Fm9gBRlRLvY%2BW2C1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T184836Z&X-Amz-SignedHeaders=host&X-Amz-Expires=120&X-Amz-Credential=ASIA6DXG35RE4HTVSB3E%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=9d62d37e678546692ad1a33091f668fe8d2a2248d867a5b96628ef68a68cc00c
20 | - response:
21 | auto_calculate_content_length: false
22 | body: '
23 |
24 | PermanentRedirectThe bucket you are attempting
25 | to access must be addressed using the specified endpoint. Please send all future
26 | requests to this endpoint.test.s3.us-east-2.amazonaws.comtestQVFRWD11M3GWZH3YXOCm/60ZeZ2Wz/qEg9c7Y1hWcpp49xxtyozbnpOOWZSaV6cAAPb0cWRcx243vXRvQ+7ZadSGmXQ='
27 | content_type: text/plain
28 | method: GET
29 | status: 301
30 | url: https://test.s3.us-west-2.amazonaws.com/img.png?X-Amz-Security-Token=12345
31 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_media_list_filter_by_split.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"code":0,"message":"","data":{"id":30863867234314,"orgId":376,"name":"Tiger
5 | team test - Data Management CLI Test","inviteOnly":false,"datasetId":38648,"importStatus":"created","importInfo":null,"labelType":"bounding_box","isDeleted":null,"registeredModelId":"84a48de7-ead4-42bb-9771-56a549b3ba5d","modelDefaultMetricKey":null,"customizedTrainingConfig":null,"createdAt":"2023-07-02T17:21:21.400Z","coverMediaId":10300417,"updatedByUserId":"8d36bffa-200e-11ed-861d-0242ac120002","users":[{"id":"d810bae9-3310-4a95-bad8-9bf6fd78e444","accountId":"925f5145-13ad-eb02-99cf-b2af34988ddc","name":"asia","lastName":"cao","username":"1@yazhou.cao@landing.ai","status":"ACTIVE","email":"yazhou.cao@landing.ai","userRole":"admin","orgId":376,"rank":null,"ssoUser":false,"stripeUserId":null,"expirationDate":null,"readOnly":false,"internal":false,"account_id":"925f5145-13ad-eb02-99cf-b2af34988ddc","userProject":{"id":164392,"orgId":376,"userId":"d810bae9-3310-4a95-bad8-9bf6fd78e444","projectId":30863867234314,"role":"owner","isOwner":true,"permissions":["upload_data","train_model","deploy_model"],"lastOpenedTime":"2023-07-02T23:41:03.000Z"}}]}}'
6 | content_type: text/plain
7 | method: GET
8 | status: 200
9 | url: https://app.landing.ai/api/v1/project/with_users?projectId=30863867234314
10 | - response:
11 | auto_calculate_content_length: false
12 | body: '{"code":0,"message":"","data":{"22023":{"id":22023,"projectId":30863867234314,"name":"split","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376},"22024":{"id":22024,"projectId":30863867234314,"name":"creator","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376}}}'
13 | content_type: text/plain
14 | method: GET
15 | status: 200
16 | url: https://app.landing.ai/api/v1/metadata/get_metadata_by_projectId?projectId=30863867234314
17 | - response:
18 | auto_calculate_content_length: false
19 | body: '{"code":0,"message":"","data":[{"id":10300466,"mediaType":"image","srcType":"user","srcName":"cli/sdk","properties":{"width":20,"height":20,"imgType":"jpeg"},"name":"image_1688325604365.jpeg","uploadTime":"2023-07-02T19:20:05.898Z","mediaStatus":"approved"},{"id":10300465,"mediaType":"image","srcType":"user","srcName":"cli/sdk","properties":{"width":20,"height":20,"imgType":"jpeg"},"name":"image_1688325604364.jpeg","uploadTime":"2023-07-02T19:20:05.832Z","mediaStatus":"approved"}]}'
20 | content_type: text/plain
21 | method: GET
22 | status: 200
23 | url: https://app.landing.ai/api/v1/dataset/medias?projectId=30863867234314&datasetId=38648&sortOptions=%7B%22offset%22%3A+0%2C+%22limit%22%3A+1000%7D&columnFilterMap=%7B%7D&metadataFilterMap=%7B%2222023%22%3A+%7B%22%3D%22%3A+%22dev%22%7D%7D&columnsToReturn=id&columnsToReturn=mediaType&columnsToReturn=srcType&columnsToReturn=srcName&columnsToReturn=properties&columnsToReturn=name&columnsToReturn=uploadTime&columnsToReturn=mediaStatus
24 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_media_upload_folder.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{
5 | "code": 200,
6 | "message": "Successfully uploaded the image.",
7 | "data": {
8 | "id": 1234567,
9 | "path": "s3://landinglens-customer-data/12345678/901234567/dataset/4321/media/2024-03-26T22-21-49-216Z-image.jpeg",
10 | "name": "image.jpeg",
11 | "properties": {
12 | "width": 20,
13 | "format": "jpeg",
14 | "height": 20,
15 | "orientation": 1
16 | },
17 | "uploadTime": "2024-03-26T22:21:49.666Z"
18 | }
19 | }'
20 | content_type: text/plain
21 | method: POST
22 | status: 201
23 | url: https://app.landing.ai/pictor/v1/upload
24 | - response:
25 | auto_calculate_content_length: false
26 | body: '{"code":0,"message":"","data":{"id":30863867234314,"orgId":376,"name":"Tiger
27 | team test - Data Management CLI Test","inviteOnly":false,"datasetId":38648,"importStatus":"created","importInfo":null,"labelType":"bounding_box","isDeleted":null,"registeredModelId":"84a48de7-ead4-42bb-9771-56a549b3ba5d","modelDefaultMetricKey":null,"customizedTrainingConfig":null,"createdAt":"2023-07-02T17:21:21.400Z","coverMediaId":10300417,"updatedByUserId":"8d36bffa-200e-11ed-861d-0242ac120002","users":[{"id":"d810bae9-3310-4a95-bad8-9bf6fd78e444","accountId":"925f5145-13ad-eb02-99cf-b2af34988ddc","name":"asia","lastName":"cao","username":"1@yazhou.cao@landing.ai","status":"ACTIVE","email":"yazhou.cao@landing.ai","userRole":"admin","orgId":376,"rank":null,"ssoUser":false,"stripeUserId":null,"expirationDate":null,"readOnly":false,"internal":false,"account_id":"925f5145-13ad-eb02-99cf-b2af34988ddc","userProject":{"id":164392,"orgId":376,"userId":"d810bae9-3310-4a95-bad8-9bf6fd78e444","projectId":30863867234314,"role":"owner","isOwner":true,"permissions":["upload_data","train_model","deploy_model"],"lastOpenedTime":"2023-07-02T23:41:03.000Z"}}]}}'
28 | content_type: text/plain
29 | method: GET
30 | status: 200
31 | url: https://app.landing.ai/api/v1/project/with_users?projectId=30863867234314
32 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_300.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: 300 Multiple Choices
5 | content_type: text/plain
6 | method: POST
7 | status: 300
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=8fc1bc53-c5c1-4154-8cc1-a08f2e17ba43
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_400.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"detail":"Missing parameter: either file (bytes) or file_url (s3 presigned
5 | url) should be provided."}'
6 | content_type: text/plain
7 | method: POST
8 | status: 400
9 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=db90b68d-cbfd-4a9c-8dc2-ebc4c3f6e5a4
10 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_401.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"errorCode":"UNAUTHORIZED","message":"Missing user record with either
5 | apikey key or id -1","data":{}}'
6 | content_type: text/plain
7 | method: POST
8 | status: 401
9 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=8fc1bc53-c5c1-4154-8cc1-a08f2e17ba43
10 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_403.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"errorCode":"QuotaExceeded","message":"QuotaExceeded: feature \"Usage\"
5 | exceeded usage quota, org_id: 22635583588352","data":{"policy":"Usage"}}'
6 | content_type: text/plain
7 | method: POST
8 | status: 403
9 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=dfa79692-75eb-4a48-b02e-b273751adbae
10 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_404.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '404 page not found
5 |
6 | '
7 | content_type: text/plain
8 | method: POST
9 | status: 404
10 | url: https://predict.app.landing.ai/v0/foo?endpoint_id=8fc1bc53-c5c1-4154-8cc1-a08f2e17ba43
11 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_422.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"detail":[{"loc":["query","endpoint_id"],"msg":"value is not a valid uuid","type":"type_error.uuid"}]}'
5 | content_type: text/plain
6 | method: POST
7 | status: 422
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=12345
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_429.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: 429 Too Many Requests
5 | content_type: text/plain
6 | method: POST
7 | status: 429
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=8fc1bc53-c5c1-4154-8cc1-a08f2e17ba43
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_500.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"message":"Internal server error"}'
5 | content_type: text/plain
6 | method: POST
7 | status: 500
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=db90b68d-cbfd-4a9c-8dc2-ebc4c3f6e5a4
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_503.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: 503 Service Unavailable
5 | content_type: text/plain
6 | method: POST
7 | status: 503
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=db90b68d-cbfd-4a9c-8dc2-ebc4c3f6e5a4
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_predict_status_504.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: 504 Gateway Timeout
5 | content_type: text/plain
6 | method: POST
7 | status: 504
8 | url: https://predict.app.landing.ai/inference/v1/predict?endpoint_id=db90b68d-cbfd-4a9c-8dc2-ebc4c3f6e5a4
9 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_set_metadata.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"code":0,"message":"","data":{"id":30863867234314,"orgId":376,"name":"Tiger
5 | team test - Data Management CLI Test","inviteOnly":false,"datasetId":38648,"importStatus":"created","importInfo":null,"labelType":"bounding_box","isDeleted":null,"registeredModelId":"84a48de7-ead4-42bb-9771-56a549b3ba5d","modelDefaultMetricKey":null,"customizedTrainingConfig":null,"createdAt":"2023-07-02T17:21:21.400Z","coverMediaId":10300417,"updatedByUserId":"8d36bffa-200e-11ed-861d-0242ac120002","users":[{"id":"d810bae9-3310-4a95-bad8-9bf6fd78e444","accountId":"925f5145-13ad-eb02-99cf-b2af34988ddc","name":"asia","lastName":"cao","username":"1@yazhou.cao@landing.ai","status":"ACTIVE","email":"yazhou.cao@landing.ai","userRole":"admin","orgId":376,"rank":null,"ssoUser":false,"stripeUserId":null,"expirationDate":null,"readOnly":false,"internal":false,"account_id":"925f5145-13ad-eb02-99cf-b2af34988ddc","userProject":{"id":164392,"orgId":376,"userId":"d810bae9-3310-4a95-bad8-9bf6fd78e444","projectId":30863867234314,"role":"owner","isOwner":true,"permissions":["upload_data","train_model","deploy_model"],"lastOpenedTime":"2023-07-02T23:41:03.000Z"}}]}}'
6 | content_type: text/plain
7 | method: GET
8 | status: 200
9 | url: https://app.landing.ai/api/v1/project/with_users?projectId=30863867234314
10 | - response:
11 | auto_calculate_content_length: false
12 | body: '{"code":0,"message":"","data":{"22023":{"id":22023,"projectId":30863867234314,"name":"split","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376},"22024":{"id":22024,"projectId":30863867234314,"name":"creator","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376}}}'
13 | content_type: text/plain
14 | method: GET
15 | status: 200
16 | url: https://app.landing.ai/api/v1/metadata/get_metadata_by_projectId?projectId=30863867234314
17 | - response:
18 | auto_calculate_content_length: false
19 | body: '{"code":0,"message":"","data":[{"orgId":376,"objectId":10300467,"objectType":"media","metadata":{"22024":"tom"}},{"orgId":376,"objectId":10300466,"objectType":"media","metadata":{"22024":"tom"}},{"orgId":376,"objectId":10300465,"objectType":"media","metadata":{"22024":"tom"}}]}'
20 | content_type: text/plain
21 | method: POST
22 | status: 200
23 | url: https://app.landing.ai/api/v1/object/medias_metadata
24 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_set_metadata_multiple_medias.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"code":0,"message":"","data":{"id":30863867234314,"orgId":376,"name":"Tiger
5 | team test - Data Management CLI Test","inviteOnly":false,"datasetId":38648,"importStatus":"created","importInfo":null,"labelType":"bounding_box","isDeleted":null,"registeredModelId":"84a48de7-ead4-42bb-9771-56a549b3ba5d","modelDefaultMetricKey":null,"customizedTrainingConfig":null,"createdAt":"2023-07-02T17:21:21.400Z","coverMediaId":10300417,"updatedByUserId":"8d36bffa-200e-11ed-861d-0242ac120002","users":[{"id":"d810bae9-3310-4a95-bad8-9bf6fd78e444","accountId":"925f5145-13ad-eb02-99cf-b2af34988ddc","name":"asia","lastName":"cao","username":"1@yazhou.cao@landing.ai","status":"ACTIVE","email":"yazhou.cao@landing.ai","userRole":"admin","orgId":376,"rank":null,"ssoUser":false,"stripeUserId":null,"expirationDate":null,"readOnly":false,"internal":false,"account_id":"925f5145-13ad-eb02-99cf-b2af34988ddc","userProject":{"id":164392,"orgId":376,"userId":"d810bae9-3310-4a95-bad8-9bf6fd78e444","projectId":30863867234314,"role":"owner","isOwner":true,"permissions":["upload_data","train_model","deploy_model"],"lastOpenedTime":"2023-07-02T23:41:03.000Z"}}]}}'
6 | content_type: text/plain
7 | method: GET
8 | status: 200
9 | url: https://app.landing.ai/api/v1/project/with_users?projectId=30863867234314
10 | - response:
11 | auto_calculate_content_length: false
12 | body: '{"code":0,"message":"","data":{"22023":{"id":22023,"projectId":30863867234314,"name":"split","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376},"22024":{"id":22024,"projectId":30863867234314,"name":"creator","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376}}}'
13 | content_type: text/plain
14 | method: GET
15 | status: 200
16 | url: https://app.landing.ai/api/v1/metadata/get_metadata_by_projectId?projectId=30863867234314
17 | - response:
18 | auto_calculate_content_length: false
19 | body: '{"code":0,"message":"","data":[{"orgId":376,"mediaId":10300467,"objectType":"media","metadata":{"22024":"tom"}},{"orgId":376,"mediaId":10300466,"objectType":"media","metadata":{"22024":"tom"}},{"orgId":376,"mediaId":10300465,"objectType":"media","metadata":{"22024":"tom"}}]}'
20 | content_type: text/plain
21 | method: POST
22 | status: 200
23 | url: https://app.landing.ai/api/v1/object/medias_metadata
24 |
--------------------------------------------------------------------------------
/tests/data/responses/v1_set_metadata_single_media.yaml:
--------------------------------------------------------------------------------
1 | responses:
2 | - response:
3 | auto_calculate_content_length: false
4 | body: '{"code":0,"message":"","data":{"id":30863867234314,"orgId":376,"name":"Tiger
5 | team test - Data Management CLI Test","inviteOnly":false,"datasetId":38648,"importStatus":"created","importInfo":null,"labelType":"bounding_box","isDeleted":null,"registeredModelId":"84a48de7-ead4-42bb-9771-56a549b3ba5d","modelDefaultMetricKey":null,"customizedTrainingConfig":null,"createdAt":"2023-07-02T17:21:21.400Z","coverMediaId":10300417,"updatedByUserId":"8d36bffa-200e-11ed-861d-0242ac120002","users":[{"id":"d810bae9-3310-4a95-bad8-9bf6fd78e444","accountId":"925f5145-13ad-eb02-99cf-b2af34988ddc","name":"asia","lastName":"cao","username":"1@yazhou.cao@landing.ai","status":"ACTIVE","email":"yazhou.cao@landing.ai","userRole":"admin","orgId":376,"rank":null,"ssoUser":false,"stripeUserId":null,"expirationDate":null,"readOnly":false,"internal":false,"account_id":"925f5145-13ad-eb02-99cf-b2af34988ddc","userProject":{"id":164392,"orgId":376,"userId":"d810bae9-3310-4a95-bad8-9bf6fd78e444","projectId":30863867234314,"role":"owner","isOwner":true,"permissions":["upload_data","train_model","deploy_model"],"lastOpenedTime":"2023-07-02T23:41:03.000Z"}}]}}'
6 | content_type: text/plain
7 | method: GET
8 | status: 200
9 | url: https://app.landing.ai/api/v1/project/with_users?projectId=30863867234314
10 | - response:
11 | auto_calculate_content_length: false
12 | body: '{"code":0,"message":"","data":{"22023":{"id":22023,"projectId":30863867234314,"name":"split","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376},"22024":{"id":22024,"projectId":30863867234314,"name":"creator","type":"text","predefinedChoices":null,"allowMultiple":false,"valueFlexible":true,"orgId":376}}}'
13 | content_type: text/plain
14 | method: GET
15 | status: 200
16 | url: https://app.landing.ai/api/v1/metadata/get_metadata_by_projectId?projectId=30863867234314
17 | - response:
18 | auto_calculate_content_length: false
19 | body: '{"code":0,"message":"","data":[{"orgId":376,"mediaId":10300467,"objectType":"media","metadata":{"22023":"train"}}]}'
20 | content_type: text/plain
21 | method: POST
22 | status: 200
23 | url: https://app.landing.ai/api/v1/object/medias_metadata
24 |
--------------------------------------------------------------------------------
/tests/data/videos/countdown.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/videos/countdown.mp4
--------------------------------------------------------------------------------
/tests/data/videos/test.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/landing-ai/landingai-python/63831d8534f0f1206f7cb4f90128c633fac8f7f4/tests/data/videos/test.mp4
--------------------------------------------------------------------------------
/tests/integration/landingai/test_dataset.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import pytest
4 |
5 | from landingai.data_management.dataset import LegacyTrainingDataset, TrainingDataset
6 |
7 |
8 | @pytest.mark.skip(
9 | reason="need more work to make it a real test. Test it manually for now."
10 | )
11 | def test_dataset_client__get_dataset_version_id():
12 | api_key = ""
13 | dataset = TrainingDataset(project_id=48946424893450, api_key=api_key)
14 | project_model_info = dataset.get_project_model_info()
15 | assert project_model_info["dataset_version_id"] == 45815
16 |
17 |
18 | @pytest.mark.skip(
19 | reason="need more work to make it a real test. Test it manually for now."
20 | )
21 | def test_dataset_client_get_training_dataset():
22 | api_key = ""
23 | dataset = TrainingDataset(project_id=13777903228933, api_key=api_key)
24 | output = Path("test_fetch_fne_dataset")
25 | output = dataset.get_training_dataset(output, include_image_metadata=True)
26 | assert len(output) > 0
27 | assert output.columns.tolist() == [
28 | "id",
29 | "split",
30 | "classes",
31 | "seg_mask_prediction_path",
32 | "okng_threshold",
33 | "media_level_predicted_score",
34 | "label_id",
35 | "seg_mask_label_path",
36 | "media_level_label",
37 | "metadata",
38 | ]
39 |
40 |
41 | @pytest.mark.skip(
42 | reason="need more work to make it a real test. Test it manually for now."
43 | )
44 | def test_legacy_dataset_client_get_legacy_training_dataset_predictions():
45 | cookie = ""
46 | project_id = 13777903228933
47 | dataset = LegacyTrainingDataset(project_id=project_id, cookie=cookie)
48 | # job_id = "cc9576f7-299a-4452-a6f5-6447a2677c80"
49 | job_id = "3627db83-26be-433a-8406-76c019ebde45"
50 | output = Path("test_fetch_legacy_dataset")
51 | output = dataset.get_legacy_training_dataset(output, job_id=job_id)
52 | print(output)
53 | assert len(output) > 0
54 |
55 |
56 | if __name__ == "__main__":
57 | import logging
58 |
59 | logging.basicConfig(
60 | level=logging.INFO,
61 | format="%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s",
62 | )
63 | # test_dataset_client_get_training_dataset()
64 | test_legacy_dataset_client_get_legacy_training_dataset_predictions()
65 |
--------------------------------------------------------------------------------
/tests/unit/landingai/data_management/test_label.py:
--------------------------------------------------------------------------------
1 | import responses
2 |
3 | from landingai.data_management.label import Label
4 |
5 |
6 | @responses.activate
7 | def test_get_label_map():
8 | responses._add_from_file(file_path="tests/data/responses/test_get_label_map.yaml")
9 | project_id = 34243219343364
10 | api_key = "land_sk_12345"
11 | client = Label(project_id, api_key)
12 | res = client.get_label_map()
13 | assert res == {
14 | "0": "ok",
15 | "1": "num_plate",
16 | "2": "number_plate",
17 | }
18 |
--------------------------------------------------------------------------------
/tests/unit/landingai/data_management/test_metadata.py:
--------------------------------------------------------------------------------
1 | import responses
2 |
3 | from landingai.data_management.metadata import Metadata
4 |
5 | _API_KEY = "land_sk_12345"
6 | _PROJECT_ID = 30863867234314
7 |
8 |
9 | @responses.activate
10 | def test_set_metadata_for_single_media():
11 | responses._add_from_file(
12 | file_path="tests/data/responses/v1_set_metadata_single_media.yaml"
13 | )
14 | metadata = Metadata(_PROJECT_ID, _API_KEY)
15 | resp = metadata.update(10300467, split="train")
16 | assert resp["project_id"] == _PROJECT_ID
17 | assert len(resp["media_ids"]) == 1
18 | response_metadata = resp["metadata"]
19 | assert len(response_metadata) == 1
20 | assert response_metadata["split"] == "train"
21 |
22 |
23 | @responses.activate
24 | def test_set_metadata_for_multiple_media():
25 | responses._add_from_file(
26 | file_path="tests/data/responses/v1_set_metadata_multiple_medias.yaml"
27 | )
28 | media_ids = [10300467, 10300466, 10300465]
29 | metadata = Metadata(_PROJECT_ID, _API_KEY)
30 | resp = metadata.update(media_ids, creator="tom")
31 | assert resp["project_id"] == _PROJECT_ID
32 | assert resp["media_ids"] == media_ids
33 | response_metadata = resp["metadata"]
34 | assert len(response_metadata) == 1
35 | assert response_metadata["creator"] == "tom"
36 |
--------------------------------------------------------------------------------
/tests/unit/landingai/pipeline/test_postprocessing.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from landingai.common import ObjectDetectionPrediction, OcrPrediction
4 | from landingai.pipeline.frameset import FrameSet, PredictionList
5 | from landingai.pipeline.postprocessing import get_class_counts
6 |
7 |
8 | def test_class_counts():
9 | preds = PredictionList(
10 | [
11 | ObjectDetectionPrediction(
12 | id="1",
13 | label_index=0,
14 | label_name="screw",
15 | score=0.623112,
16 | bboxes=(432, 1035, 651, 1203),
17 | ),
18 | ObjectDetectionPrediction(
19 | id="2",
20 | label_index=0,
21 | label_name="screw",
22 | score=0.892,
23 | bboxes=(1519, 1414, 1993, 1800),
24 | ),
25 | ObjectDetectionPrediction(
26 | id="3",
27 | label_index=0,
28 | label_name="screw",
29 | score=0.7,
30 | bboxes=(948, 1592, 1121, 1797),
31 | ),
32 | ]
33 | )
34 |
35 | frs = FrameSet.from_image("tests/data/images/cereal1.jpeg")
36 | frs[0].predictions = preds
37 | counts = get_class_counts(frs)
38 | assert counts["screw"] == 3
39 |
40 |
41 | def test_it_doesnt_work_for_ocr():
42 | frs = FrameSet.from_image("tests/data/images/cereal1.jpeg")
43 | frs[0].predictions = PredictionList(
44 | [OcrPrediction(score=3.1416, text="LandingAI", location=[])]
45 | )
46 | with pytest.raises(TypeError):
47 | get_class_counts(frs)
48 |
--------------------------------------------------------------------------------
/tests/unit/landingai/storage/test_data_access.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | import responses
5 |
6 | from landingai.storage.data_access import download_file, fetch_from_uri, read_file
7 |
8 |
9 | # from responses import _recorder
10 | # @_recorder.record(file_path="tests/data/responses/test_read_stream.yaml")
11 | @responses.activate
12 | def test_read_file():
13 | responses._add_from_file(file_path="tests/data/responses/test_read_stream.yaml")
14 |
15 | url = "https://landing-aviinference-benchmarking-dev.s3.us-east-2.amazonaws.com/images/test.png?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjELP%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMSJHMEUCIFrGe9p9sBKng7GnrdeTPETmaXUz2RItwW9DtpyBXxkEAiEAomX3OFUwJUduZIJ5ujvONJUYK3qj9kOhHlZ7WvUuRAMq9gMI3P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARACGgw5NzAwNzMwNDE5OTMiDFFuNFyCyCXH%2BAiTkCrKA8DfON81axWS6frqZ0soRRkJIFlaVJyGkTlWkchYZqb5hi%2BqnIYX6sxKwtM07QOQ%2FQoAgHmGxIsLQA84YwKq2Zty7RI%2Fsxtq%2BdMEe3oeZjX%2FO%2F%2Fp%2BvYuoQXbzRDHZz%2BNL6sJaySsNzsh5lRU5qjGowff7dBBdLPswhWYlZxnng5YPSJjlvZodABudG8S88B%2BV3Ml%2BC%2Fd2Q%2Bf0FCaiyMHfMCECGlBMIXRatEmuMJksEu%2Bfhrz5IoypolbBWwCsBZOeloRcz50L5%2FlBwqyUkSD7KliJel1rN2Qoq8mCLXgY9ySHBl%2BKDgrR1n8Nh0eR99t2BQ57EcOcswSoQeqAVehFPuLCRBLpVuiP7BG4h%2Fqdi%2FoQnr5t1wrSP1T7DWzhH4uCZDNdERrYGG9RoDaxdvMJl05xpt7%2B6d1E%2BFD7hRzLRW1q9Rg7pdKmOtPE4XlPBC4MYMp7lTgXvI4QGA1nB6rTsqTwie%2Fm1q6g9%2FpRXbffuNu3pt%2FN7Vf8bRl%2B1dx%2F7CKzsgTvRDildpuYYCzHIyszQGlLFSEhtId7%2BOrCdIymmW7FcC9Adt0g31oij2FTrhLUVkf1DGzWVmWOE2A6el%2FG7IXDZJawEpNhOuCMKWUtKMGOpQCCeSRlMNN9jETYk%2B0JhcI3zmvvBTqsjGt4jrZ9FjJ2Dq2JhYvoCmBf%2F%2BrrhulxH2bYs740CdLfuDnTK8VSeJTjTHSOeRlb2r5%2Bx4DYIS3CDlj2clxWll0Vyl8Vzl8M7h%2BPwViB3zPbuB%2BsMMqPkh9uoc8uLie1aaGpQ57vqfHuJtZWLIzEptogTG2I92WCAPsiAECJkTPfNlnrtP%2F6VCzTlI1OrAKoonDuGIdp5UeA107oEaXjWo7WLQTXULRHY8q16HU86K0pkwqREK%2FPpNgZSBRQ7RymMgxJI%2F5eYWfT2JHayND6PIgOSDjtIHbqKRPVwgQOdni3vypgBWt043Koh4zLbu7bh6W%2Fm9gBRlRLvY%2BW2C1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T185235Z&X-Amz-SignedHeaders=host&X-Amz-Expires=18000&X-Amz-Credential=ASIA6DXG35RE4HTVSB3E%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=debb56818b5126fec2ffbc06630bb94940f5f30fb2add48e67f262226df78c82"
16 | data = read_file(url)[
17 | "content"
18 | ] # working url, expecting a response of status code 200
19 | assert isinstance(data, bytes)
20 | assert len(data) == 84
21 | with pytest.raises(ValueError):
22 | url = "https://landing-aviinference-benchmarking-dev.s3.us-east-2.amazonaws.com/images/1mp_cereal_1.jpeg?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjELP%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMSJHMEUCIFrGe9p9sBKng7GnrdeTPETmaXUz2RItwW9DtpyBXxkEAiEAomX3OFUwJUduZIJ5ujvONJUYK3qj9kOhHlZ7WvUuRAMq9gMI3P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARACGgw5NzAwNzMwNDE5OTMiDFFuNFyCyCXH%2BAiTkCrKA8DfON81axWS6frqZ0soRRkJIFlaVJyGkTlWkchYZqb5hi%2BqnIYX6sxKwtM07QOQ%2FQoAgHmGxIsLQA84YwKq2Zty7RI%2Fsxtq%2BdMEe3oeZjX%2FO%2F%2Fp%2BvYuoQXbzRDHZz%2BNL6sJaySsNzsh5lRU5qjGowff7dBBdLPswhWYlZxnng5YPSJjlvZodABudG8S88B%2BV3Ml%2BC%2Fd2Q%2Bf0FCaiyMHfMCECGlBMIXRatEmuMJksEu%2Bfhrz5IoypolbBWwCsBZOeloRcz50L5%2FlBwqyUkSD7KliJel1rN2Qoq8mCLXgY9ySHBl%2BKDgrR1n8Nh0eR99t2BQ57EcOcswSoQeqAVehFPuLCRBLpVuiP7BG4h%2Fqdi%2FoQnr5t1wrSP1T7DWzhH4uCZDNdERrYGG9RoDaxdvMJl05xpt7%2B6d1E%2BFD7hRzLRW1q9Rg7pdKmOtPE4XlPBC4MYMp7lTgXvI4QGA1nB6rTsqTwie%2Fm1q6g9%2FpRXbffuNu3pt%2FN7Vf8bRl%2B1dx%2F7CKzsgTvRDildpuYYCzHIyszQGlLFSEhtId7%2BOrCdIymmW7FcC9Adt0g31oij2FTrhLUVkf1DGzWVmWOE2A6el%2FG7IXDZJawEpNhOuCMKWUtKMGOpQCCeSRlMNN9jETYk%2B0JhcI3zmvvBTqsjGt4jrZ9FjJ2Dq2JhYvoCmBf%2F%2BrrhulxH2bYs740CdLfuDnTK8VSeJTjTHSOeRlb2r5%2Bx4DYIS3CDlj2clxWll0Vyl8Vzl8M7h%2BPwViB3zPbuB%2BsMMqPkh9uoc8uLie1aaGpQ57vqfHuJtZWLIzEptogTG2I92WCAPsiAECJkTPfNlnrtP%2F6VCzTlI1OrAKoonDuGIdp5UeA107oEaXjWo7WLQTXULRHY8q16HU86K0pkwqREK%2FPpNgZSBRQ7RymMgxJI%2F5eYWfT2JHayND6PIgOSDjtIHbqKRPVwgQOdni3vypgBWt043Koh4zLbu7bh6W%2Fm9gBRlRLvY%2BW2C1&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T184836Z&X-Amz-SignedHeaders=host&X-Amz-Expires=120&X-Amz-Credential=ASIA6DXG35RE4HTVSB3E%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=9d62d37e678546692ad1a33091f668fe8d2a2248d867a5b96628ef68a68cc00c"
23 | read_file(url) # expired
24 |
25 | with pytest.raises(ValueError):
26 | url = (
27 | "https://test.s3.us-west-2.amazonaws.com/img.png?X-Amz-Security-Token=12345"
28 | )
29 | read_file(url) # 301, redirect
30 |
31 |
32 | def test_download_file():
33 | url = "https://www.google.com/images/branding/googlelogo/2x/googlelogo_light_color_272x92dp.png"
34 | file = download_file(url=url)
35 | assert os.path.exists(file) == 1
36 | assert os.path.getsize(file) > 5000
37 |
38 |
39 | def test_fetch_from_uri():
40 | # Test the local access case
41 | uri = "tests/data/images/cereal1.jpeg"
42 | local_file = fetch_from_uri(uri=uri)
43 | assert os.path.getsize(local_file) > 5000
44 |
--------------------------------------------------------------------------------
/tests/unit/landingai/storage/test_snowflake.py:
--------------------------------------------------------------------------------
1 | from unittest import mock
2 | from pathlib import Path
3 |
4 | import pytest
5 |
6 | from landingai.storage.snowflake import (
7 | SnowflakeCredential,
8 | SnowflakeDBConfig,
9 | get_snowflake_presigned_url,
10 | save_remote_file_to_local,
11 | )
12 |
13 |
14 | def test_load_snowflake_settings_from_env_file(tmp_path):
15 | env_file: Path = tmp_path / ".env"
16 | env_file.write_text(
17 | """
18 | SNOWFLAKE_WAREHOUSE=test_warehouse
19 | SNOWFLAKE_DATABASE=test_database
20 | SNOWFLAKE_SCHEMA=test_schema
21 | """
22 | )
23 | # Overwrite the default env_prefix to avoid conflict with the real .env
24 | SnowflakeDBConfig.model_config["env_file"] = str(env_file)
25 | snowflake_settings = SnowflakeDBConfig()
26 | assert snowflake_settings.warehouse == "test_warehouse"
27 | assert snowflake_settings.database == "test_database"
28 | assert snowflake_settings.snowflake_schema == "test_schema"
29 | # reset back to the default config
30 | SnowflakeDBConfig.model_config["env_file"] = ".env"
31 | env_file.unlink()
32 |
33 |
34 | @pytest.mark.skip
35 | @mock.patch("snowflake.connector.connect")
36 | def test_get_snowflake_url(mock_snowflake_connector):
37 | query_result1 = [
38 | (
39 | "s3://landingai-tiger-workspace-dev2/demo_videos/roadway.mp4",
40 | 12783177,
41 | "f7b7e0c5cb961a0564c0432b0a6213e7",
42 | "Thu, 18 May 2023 00:29:50 GMT",
43 | ),
44 | (
45 | "s3://landingai-tiger-workspace-dev2/demo_videos/roadway2.mp4",
46 | 12646205,
47 | "8d70b7f52d72c14ecc492541a17dc15d",
48 | "Thu, 18 May 2023 00:29:56 GMT",
49 | ),
50 | ]
51 | query_result2 = [
52 | (
53 | "https://landingai-tiger-workspace-dev2.s3.us-east-2.amazonaws.com/demo_videos/roadway.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T203918Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3599&X-Amz-Credential=AKIA6DXG35RETU7RQM6F%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=f57a9557f1e8c4b44f3dede68d70a084263e00eecb325a7811da1cd82dbf3102",
54 | )
55 | ]
56 | mock_con = mock_snowflake_connector.return_value
57 | mock_cur = mock_con.cursor.return_value
58 | mock_cur.execute.return_value.fetchall.side_effect = [query_result1, query_result2]
59 | credential = SnowflakeCredential(user="test", password="test", account="test")
60 | connection_config = SnowflakeDBConfig(
61 | warehouse="XSMALLTEST", database="TIGER_DEMO_DB", snowflake_schema="PUBLIC"
62 | )
63 | filename = "roadway.mp4"
64 | stage_name = "VIDEO_FILES_STAGE"
65 | url = get_snowflake_presigned_url(
66 | filename, stage_name, credential=credential, connection_config=connection_config
67 | )
68 | assert (
69 | url
70 | == "https://landingai-tiger-workspace-dev2.s3.us-east-2.amazonaws.com/demo_videos/roadway.mp4?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20230523T203918Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3599&X-Amz-Credential=AKIA6DXG35RETU7RQM6F%2F20230523%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Signature=f57a9557f1e8c4b44f3dede68d70a084263e00eecb325a7811da1cd82dbf3102"
71 | )
72 |
73 |
74 | @pytest.mark.skip
75 | @pytest.mark.skip(
76 | reason="This is a real test, which needs to be run manually with a valid snowflake credential."
77 | )
78 | def test_save_remote_file_to_local():
79 | connection_config = SnowflakeDBConfig(
80 | warehouse="XSMALLTEST", database="TIGER_DEMO_DB", snowflake_schema="PUBLIC"
81 | )
82 | filename = "roadway.mp4"
83 | stage_name = "VIDEO_FILES_STAGE"
84 | saved_path = save_remote_file_to_local(
85 | filename, stage_name, connection_config=connection_config
86 | )
87 | print(saved_path)
88 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_common.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 |
4 | import numpy as np
5 | import pytest
6 | from pydantic import ValidationError
7 |
8 | from landingai.common import (
9 | APIKey,
10 | ObjectDetectionPrediction,
11 | SegmentationPrediction,
12 | decode_bitmap_rle,
13 | )
14 | from landingai.exceptions import InvalidApiKeyError
15 |
16 |
17 | def test_load_api_key_from_constructor():
18 | key = APIKey(api_key="land_sk_1234")
19 | assert key.api_key == "land_sk_1234"
20 | with pytest.raises(InvalidApiKeyError):
21 | APIKey(api_key="1234")
22 |
23 |
24 | def test_load_api_key_from_env_var():
25 | os.environ["landingai_api_key"] = "1234"
26 | with pytest.raises(InvalidApiKeyError):
27 | APIKey()
28 | os.environ["landingai_api_key"] = "land_sk_1234"
29 | key = APIKey()
30 | assert key.api_key == "land_sk_1234"
31 | del os.environ["landingai_api_key"]
32 |
33 |
34 | def test_load_api_key_from_env_file(tmp_path):
35 | env_file: Path = tmp_path / ".env"
36 | env_file.write_text(
37 | """
38 | LANDINGAI_API_KEY="land_sk_2222"
39 | LANDINGAI_API_SECRET="abcd"
40 | """
41 | )
42 | # Start testing
43 | credential = APIKey(_env_file=str(env_file))
44 | assert credential.api_key == "land_sk_2222"
45 | env_file.unlink()
46 | with pytest.raises(ValidationError):
47 | APIKey()
48 |
49 |
50 | def test_decode_bitmap_rle():
51 | encoded_mask = "2N3Z2N5Z"
52 | encoding_map = {"Z": 0, "N": 1}
53 | decoded_mask = decode_bitmap_rle(encoded_mask, encoding_map)
54 | assert decoded_mask == [1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
55 |
56 |
57 | def test_segmentation_prediction_properties():
58 | encoded_mask = "2N3Z2N5Z"
59 | encoding_map = {"Z": 0, "N": 1}
60 | label_index = 3
61 | prediction = SegmentationPrediction(
62 | id="123",
63 | label_index=label_index,
64 | label_name="class1",
65 | score=0.5,
66 | encoded_mask=encoded_mask,
67 | encoding_map=encoding_map,
68 | mask_shape=(3, 4),
69 | )
70 | expected = np.array([1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0]).reshape((3, 4))
71 | np.testing.assert_array_almost_equal(prediction.decoded_boolean_mask, expected)
72 | np.testing.assert_array_almost_equal(
73 | prediction.decoded_index_mask, expected * label_index
74 | )
75 | assert prediction.num_predicted_pixels == 4
76 |
77 |
78 | def test_object_detection_prediction_properties():
79 | label_index = 3
80 | prediction = ObjectDetectionPrediction(
81 | id="123",
82 | label_index=label_index,
83 | label_name="class1",
84 | score=0.5,
85 | bboxes=(1, 2, 31, 42),
86 | )
87 | assert prediction.num_predicted_pixels == 1200
88 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_image_source_ops.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from unittest import mock
3 |
4 | import numpy as np
5 | import pytest
6 |
7 | from landingai.image_source_ops import (
8 | probe_video,
9 | sample_images_from_video,
10 | take_photo_from_webcam,
11 | )
12 |
13 |
14 | def test_probe():
15 | test_video_file_path = "tests/data/videos/test.mp4"
16 | total_frames, sample_size, video_length_seconds = probe_video(
17 | test_video_file_path, 1.0
18 | )
19 | assert total_frames == 48
20 | assert sample_size == 2
21 | assert video_length_seconds == 2.0
22 |
23 |
24 | def test_probe_file_not_exist(tmp_path: Path):
25 | with pytest.raises(FileNotFoundError):
26 | non_exist_file = str(tmp_path / "non_exist.mp4")
27 | probe_video(non_exist_file, 1.0)
28 |
29 |
30 | def test_sample_images_from_video(tmp_path: Path):
31 | test_video_file_path = "tests/data/videos/test.mp4"
32 | result = sample_images_from_video(test_video_file_path, tmp_path)
33 | assert len(result) == 2
34 | assert len(list(tmp_path.glob("*.jpg"))) == 2
35 |
36 |
37 | def test_sample_images_from_video_no_sampling(tmp_path: Path):
38 | test_video_file_path = "tests/data/videos/test.mp4"
39 | result = sample_images_from_video(
40 | test_video_file_path, tmp_path, samples_per_second=0
41 | )
42 | assert len(result) == 48
43 | assert len(list(tmp_path.glob("*.jpg"))) == 48
44 |
45 |
46 | @mock.patch("landingai.image_source_ops.cv2.namedWindow")
47 | @mock.patch("landingai.image_source_ops.cv2.imshow")
48 | @mock.patch("landingai.image_source_ops.cv2.waitKey")
49 | @mock.patch("landingai.image_source_ops.cv2.VideoCapture")
50 | def test_take_photo_from_webcam(
51 | mocked_video_capture, mocked_wait_key, mocked_imshow, mocked_named_window
52 | ):
53 | mocked_video_capture.return_value.read.return_value = (
54 | True,
55 | np.zeros((480, 640, 3), dtype=np.uint8),
56 | )
57 | mocked_wait_key.return_value = 32
58 | img = take_photo_from_webcam()
59 | assert img.size == (640, 480)
60 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_postprocess.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import responses
4 | from PIL import Image
5 |
6 | from landingai.common import ClassificationPrediction, ObjectDetectionPrediction
7 | from landingai.postprocess import (
8 | crop,
9 | rescale_bboxes,
10 | rescale_bboxes_by_image_size,
11 | segmentation_class_pixel_coverage,
12 | )
13 | from landingai.predict import Predictor
14 |
15 |
16 | @responses.activate
17 | def test_segmentation_class_pixel_coverage():
18 | responses._add_from_file(
19 | file_path="tests/data/responses/default_vp_model_response.yaml"
20 | )
21 | endpoint_id = "63035608-9d24-4342-8042-e4b08e084fde"
22 | predictor = Predictor(endpoint_id, api_key="land_sk_12345")
23 | img = np.asarray(Image.open("tests/data/images/farm-coverage.jpg"))
24 | predictions = []
25 | for img in [img] * 3:
26 | predictions.extend(predictor.predict(img))
27 | coverage = segmentation_class_pixel_coverage(predictions)
28 | assert len(coverage) == 4
29 | assert coverage[3] == (0.007018192416185215, "Green Field")
30 | assert coverage[4] == (0.24161325195570196, "Brown Field")
31 | assert coverage[5] == (0.40297209139620843, "Trees")
32 | assert coverage[6] == (0.340975356067672, "Structure")
33 |
34 |
35 | def test_rescale_bboxes_by_image_size():
36 | input = [
37 | ObjectDetectionPrediction(
38 | id="123",
39 | score=0.1,
40 | label_index=1,
41 | label_name="class1",
42 | bboxes=(4, 10, 22, 23),
43 | ),
44 | ObjectDetectionPrediction(
45 | id="124",
46 | score=0.1,
47 | label_index=1,
48 | label_name="class2",
49 | bboxes=(0, 0, 17, 33),
50 | ),
51 | ]
52 | raw_image = Image.new("RGB", (100, 200))
53 | resized_image = raw_image.resize((20, 60))
54 | result = rescale_bboxes_by_image_size(input, raw_image, resized_image)
55 | assert result[0].bboxes == (0, 3, 5, 7)
56 | assert result[1].bboxes == (0, 0, 4, 10)
57 |
58 | raw_image = Image.new("RGB", (150, 300))
59 | resized_image = raw_image.resize((100, 200))
60 | result = rescale_bboxes_by_image_size(input, resized_image, raw_image)
61 | assert result[0].bboxes == (6, 15, 33, 35)
62 | assert result[1].bboxes == (0, 0, 26, 50)
63 |
64 |
65 | def test_rescale_bboxes():
66 | input = [
67 | ObjectDetectionPrediction(
68 | id="123",
69 | score=0.1,
70 | label_index=1,
71 | label_name="class1",
72 | bboxes=(4, 10, 22, 23),
73 | ),
74 | ObjectDetectionPrediction(
75 | id="124",
76 | score=0.1,
77 | label_index=1,
78 | label_name="class2",
79 | bboxes=(0, 0, 17, 33),
80 | ),
81 | ]
82 | result = rescale_bboxes(input, (0.3, 0.2))
83 | assert result[0].bboxes == (0, 3, 5, 7)
84 | assert result[1].bboxes == (0, 0, 4, 10)
85 |
86 | result = rescale_bboxes(input, 1.5)
87 | assert result[0].bboxes == (6, 15, 33, 35)
88 | assert result[1].bboxes == (0, 0, 26, 50)
89 |
90 |
91 | def test_crop():
92 | img = np.zeros((50, 100, 3), dtype=np.uint8)
93 | img[20:31, 40:61, :] = 255
94 | img[40:51, 90:96, :] = 254
95 | preds = [
96 | ObjectDetectionPrediction(
97 | score=0.9, label_name="A", label_index=1, id="1", bboxes=(40, 20, 60, 30)
98 | ),
99 | ObjectDetectionPrediction(
100 | score=0.9, label_name="B", label_index=2, id="2", bboxes=(90, 40, 95, 50)
101 | ),
102 | ]
103 | output = crop(preds, img)
104 | assert output[0].size == (20, 10)
105 | assert np.count_nonzero(np.asarray(output[0])) == 20 * 10 * 3
106 | assert output[1].size == (5, 10)
107 | assert np.count_nonzero(np.asarray(output[1])) == 5 * 10 * 3
108 | # Empty preds should return empty list
109 | assert crop([], img) == []
110 |
111 |
112 | def test_crop_with_invalid_prediction():
113 | prediction = ClassificationPrediction(
114 | id="123",
115 | label_index=1,
116 | label_name="class1",
117 | score=0.5,
118 | )
119 | img = np.zeros((100, 50, 3), dtype=np.uint8)
120 | with pytest.raises(ValueError):
121 | crop([prediction], img)
122 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_telemetry.py:
--------------------------------------------------------------------------------
1 | from landingai.telemetry import get_runtime_environment_info
2 |
3 |
4 | def test_get_environment_info():
5 | info = get_runtime_environment_info()
6 | assert info["lib_type"] == "pylib"
7 | assert info["runtime"] == "pytest"
8 | assert "lib_version" in info
9 | assert "python_version" in info
10 | assert "os" in info
11 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_timer.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from landingai.timer import Timer
4 |
5 |
6 | def test_timer(caplog):
7 | t = Timer(name="manual")
8 | t.start()
9 | time.sleep(0.1)
10 | t.stop()
11 | assert "Timer 'manual' finished. Elapsed time:" in caplog.text
12 |
13 | with Timer(name="context manager"):
14 | time.sleep(0.2)
15 | assert "Timer 'context manager' finished. Elapsed time:" in caplog.text
16 |
17 | @Timer(name="decorator")
18 | def do_stuff():
19 | time.sleep(0.3)
20 |
21 | do_stuff()
22 | assert "Timer 'decorator' finished. Elapsed time:" in caplog.text
23 |
24 | with Timer():
25 | time.sleep(0.2)
26 | assert "Timer 'default' finished. Elapsed time:" in caplog.text
27 |
28 |
29 | def test_timer_get_global_stats():
30 | timer_keys = ["1", "2"]
31 | for k in timer_keys:
32 | for _ in range(2 * int(k)):
33 | with Timer(name=k):
34 | time.sleep(0.2 * int(k))
35 |
36 | for k in timer_keys:
37 | actual = Timer.stats.stats(k)
38 | assert actual["count"] == int(k) * 2
39 | assert actual["sum_total"] >= 0.2 * int(k)
40 |
--------------------------------------------------------------------------------
/tests/unit/landingai/test_utils.py:
--------------------------------------------------------------------------------
1 | import io
2 | import os
3 | from pathlib import Path
4 |
5 | import numpy as np
6 | import PIL.Image
7 | import pytest
8 |
9 | from landingai.common import APIKey
10 | from landingai.exceptions import InvalidApiKeyError
11 | from landingai.utils import _DEFAULT_FORMAT, load_api_credential, serialize_image
12 |
13 |
14 | def test_load_api_credential_invalid_key():
15 | with pytest.raises(InvalidApiKeyError):
16 | load_api_credential()
17 | with pytest.raises(InvalidApiKeyError):
18 | load_api_credential(api_key="fake_key")
19 | with pytest.raises(InvalidApiKeyError):
20 | os.environ["landingai_api_key"] = "1234"
21 | load_api_credential()
22 |
23 |
24 | def test_load_api_credential_from_constructor():
25 | actual = load_api_credential(api_key="land_sk_1234")
26 | assert actual.api_key == "land_sk_1234"
27 |
28 |
29 | def test_load_api_credential_from_env_var():
30 | os.environ["landingai_api_key"] = "land_sk_123"
31 | actual = load_api_credential()
32 | assert actual.api_key == "land_sk_123"
33 | del os.environ["landingai_api_key"]
34 |
35 |
36 | def test_load_api_credential_from_env_file(tmp_path):
37 | env_file: Path = tmp_path / ".env"
38 | env_file.write_text(
39 | """
40 | LANDINGAI_API_KEY="land_sk_12345"
41 | """
42 | )
43 | # Overwrite the default env_prefix to avoid conflict with the real .env
44 | APIKey.model_config["env_file"] = str(env_file)
45 | actual = load_api_credential()
46 | assert actual.api_key == "land_sk_12345"
47 | # reset back to the default config
48 | APIKey.model_config["env_file"] = ".env"
49 | env_file.unlink()
50 |
51 |
52 | @pytest.mark.parametrize(
53 | "expected",
54 | [
55 | PIL.Image.open("tests/data/images/wildfire1.jpeg"),
56 | PIL.Image.open("tests/data/images/ocr_test.png"),
57 | PIL.Image.open("tests/data/images/cameraman.tiff"),
58 | PIL.Image.open("tests/data/images/palettized_image.png"),
59 | PIL.Image.new("L", (15, 20)),
60 | PIL.Image.new("RGBA", (35, 25)),
61 | np.random.randint(0, 255, (30, 40, 3), dtype=np.uint8),
62 | ],
63 | )
64 | def test_serialize_image(expected):
65 | serialized_img = serialize_image(expected)
66 | assert len(serialized_img) > 0
67 | actual = PIL.Image.open(io.BytesIO(serialized_img))
68 | if isinstance(expected, PIL.Image.Image):
69 | assert actual.size == expected.size
70 | expected_mode = expected.mode if not expected.mode.startswith("P") else "RGB"
71 | assert actual.mode == expected_mode
72 | expected_format = _DEFAULT_FORMAT if expected.mode != "RGBA" else "PNG"
73 | assert actual.format == expected_format
74 | else:
75 | assert actual.size == expected.shape[:2][::-1]
76 |
--------------------------------------------------------------------------------