├── src └── omni_lpr │ ├── py.typed │ ├── __init__.py │ ├── settings.py │ ├── errors.py │ ├── mcp.py │ ├── api_models.py │ ├── event_store.py │ ├── rest.py │ ├── __main__.py │ └── tools.py ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── publish_pypi.yml │ ├── lints.yml │ ├── tests.yml │ └── publish_docker.yml ├── poetry.toml ├── codecov.yml ├── mcp.json.example ├── tests ├── testdata │ ├── plates │ │ ├── Sweden_licenseplate_EU.JPG │ │ ├── Malaysia_penang_license_plate_front.JPG │ │ ├── Eecs00c.png │ │ ├── Swe-Temporary.jpg │ │ ├── 46-GZB-8_license_plate_of_the_Netherlands.JPG │ │ ├── Botswana-rear-plate.jpg │ │ ├── Swedish_dealer_plate.jpg │ │ ├── Swedish_licenseplate.jpg │ │ ├── 2008_indonesian_plate.png │ │ ├── Algeria_plate_white_2018.jpg │ │ ├── Sweden_license_plate.jpg │ │ ├── Swedish_license_plate_2019.jpg │ │ ├── Canada_Federal_license_plate_78579.jpg │ │ ├── Sweden_SE_license_plate_before_1973.jpg │ │ ├── Swedish_license_plate_for_Taxis.jpg │ │ ├── Brazilian_vehicle_license_plate_(2018-).jpg │ │ ├── Sweden_diplomatic_license_plate_DL004B.jpg │ │ ├── Swedish_military_license_plate_118076.jpg │ │ ├── 500px-Vietnamese_plate_for_Ho_Chi_Minh_City.jpg │ │ ├── South_Korean_License_Plate_for_Rent_Passenger_car_-_Ho.jpg │ │ └── plate_text.json │ └── dummy_image.png ├── test_mcp_protocol.py ├── test_e2e.py ├── conftest.py ├── test_server.py └── test_tools.py ├── CODE_OF_CONDUCT.md ├── docs ├── assets │ ├── screenshots │ │ ├── mcp-inspector-1.png │ │ ├── mcp-inspector-2.png │ │ ├── mcp-inspector-3.png │ │ ├── lmstudio-list-models-1.png │ │ └── lmstudio-detect-plates-1.png │ └── images │ │ ├── make_figures.sh │ │ ├── dummy_figure.dot │ │ ├── logo.svg │ │ └── dummy_figure.svg ├── github_mcp_registry │ ├── README.md │ └── server.json └── README.md ├── .env.example ├── .editorconfig ├── .dockerignore ├── .pre-commit-config.yaml ├── examples ├── shared.py ├── rest │ ├── health_check_example.py │ ├── list_models_example.py │ ├── recognize_plate_from_path_example.py │ ├── detect_and_recognize_plate_from_path_example.py │ ├── recognize_plate_example.py │ ├── detect_and_recognize_plate_example.py │ ├── recognize_plate_from_upload_example.py │ └── detect_and_recognize_plate_from_upload_example.py ├── mcp │ ├── list_models_example.py │ ├── recognize_plate_from_path_example.py │ ├── detect_and_recognize_plate_from_path_example.py │ └── recognize_plate_example.py └── README.md ├── scripts ├── remove_containers.sh └── docker_entrypoint.sh ├── LICENSE ├── .gitignore ├── ROADMAP.md ├── CONTRIBUTING.md ├── .gitattributes ├── Dockerfile ├── pyproject.toml ├── logo.svg ├── Makefile └── README.md /src/omni_lpr/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [ habedi ] 2 | -------------------------------------------------------------------------------- /poetry.toml: -------------------------------------------------------------------------------- 1 | [virtualenvs] 2 | in-project = true 3 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "examples/*" 3 | - "scripts/*" 4 | - "tests/*" 5 | -------------------------------------------------------------------------------- /mcp.json.example: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "omni-lpr-local": { 4 | "url": "http://127.0.0.1:8000/mcp/" 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /tests/testdata/plates/Sweden_licenseplate_EU.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/habedi/omni-lpr/HEAD/tests/testdata/plates/Sweden_licenseplate_EU.JPG -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | 3 | We adhere to the [Python Software Foundation Code of Conduct](https://policies.python.org/python.org/code-of-conduct). 4 | -------------------------------------------------------------------------------- /tests/testdata/dummy_image.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:0814f352fb86bdaf7e38beefd1272a41090e4e700dcdcbcb41898cdf1f6ee37c 3 | size 70 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Malaysia_penang_license_plate_front.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/habedi/omni-lpr/HEAD/tests/testdata/plates/Malaysia_penang_license_plate_front.JPG -------------------------------------------------------------------------------- /tests/testdata/plates/Eecs00c.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:f887ac615672e8cf70eeda38e2ca84a2326a1af5d7d0ecb36aa38edea6b995cd 3 | size 15287 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swe-Temporary.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:c639760a44cafab7afc9e5937ebc518c6e9bb647ccd96bcc75b459d3a34494fe 3 | size 64819 4 | -------------------------------------------------------------------------------- /docs/assets/screenshots/mcp-inspector-1.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:adf97bf7573e175d83c1ffc1d86219fef6bc6a931e6e736b75e8ee25147bfca3 3 | size 146667 4 | -------------------------------------------------------------------------------- /docs/assets/screenshots/mcp-inspector-2.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:0a796d46aec0ff65d26e965d68612a75ee4447ad1c15a8e8b43a2536ff079afe 3 | size 142857 4 | -------------------------------------------------------------------------------- /docs/assets/screenshots/mcp-inspector-3.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:d033b0cf2c4f14ba89b0ca36cb3def63ef198e436cd6b29cc2e4d6ba624ca290 3 | size 197770 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/46-GZB-8_license_plate_of_the_Netherlands.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/habedi/omni-lpr/HEAD/tests/testdata/plates/46-GZB-8_license_plate_of_the_Netherlands.JPG -------------------------------------------------------------------------------- /tests/testdata/plates/Botswana-rear-plate.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:3d8c0a6b2bde10ce01a130951025cf1f0cac5cc0a6ee32515d672e538f4c08f8 3 | size 31830 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swedish_dealer_plate.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:385f2d5f288c1b8e00dcac0ffcc7f5b37119a37c865c1b25d3538f4f2e0f5f93 3 | size 52873 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swedish_licenseplate.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:22ac78ab056fcc4c0cfe7219d9265dc1524ab82f6c788226c465d9e4b19c3324 3 | size 5788 4 | -------------------------------------------------------------------------------- /docs/assets/screenshots/lmstudio-list-models-1.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:d9c2a88d81a3834de0eaa7ee7fda2356b5efa5b0d7a60c9d61dc8e615b7d83b1 3 | size 176790 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/2008_indonesian_plate.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:cd0a7f6d06b7eace8968672bd69ba2f086b4c10a226e63f0d8b0f006e1af83e2 3 | size 60702 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Algeria_plate_white_2018.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:fc0fa792028cb8b604e323f5f02d8b531c157e29ead65693de33e0c6538704b6 3 | size 139265 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Sweden_license_plate.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:1f7da8be191b7c34ded3d0f478c22e624e69da446b905c7840cc958c5ccc4150 3 | size 271716 4 | -------------------------------------------------------------------------------- /docs/assets/screenshots/lmstudio-detect-plates-1.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:695edc00a96db09f8544b48107cf30b36a3de334ec0dafb047a6669381d507eb 3 | size 138169 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swedish_license_plate_2019.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:9a751299b47034acd76e89f83ab9c57744c55e4345523bc3d2abafc8b34f77ab 3 | size 195068 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Canada_Federal_license_plate_78579.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:b91325322645961b9ef7528d73c42a42d750dddc761783b0683be8b0332a8577 3 | size 354290 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Sweden_SE_license_plate_before_1973.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:e072a24238a8d6050cd1a2511a6f7c546f6f4a3692a992b6ded4ebc5699098ca 3 | size 15484 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swedish_license_plate_for_Taxis.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:fd5757cc9b1c6ad79fae618ebc8408d194646cb8013306a8ac461769c1348f1d 3 | size 61697 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Brazilian_vehicle_license_plate_(2018-).jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:ec21e2c03add6d1a350aaa33bb5c8280855fd99e93c4ab9b9b1ae85bf68c6cbc 3 | size 235810 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Sweden_diplomatic_license_plate_DL004B.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:fedd0d2440220189681633b049e664d171e15a854d439664f4e9ead7557c5252 3 | size 151436 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/Swedish_military_license_plate_118076.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:e5879b2cee9fdbf97b1834c17280db75ec0f4c6356dc48ede9c96696a7c7ae48 3 | size 42788 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/500px-Vietnamese_plate_for_Ho_Chi_Minh_City.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:048c0e5373ebe74d45e3a24643cd9ef0cf2cf1b8826c500ae90dfb9f247a98dd 3 | size 26115 4 | -------------------------------------------------------------------------------- /tests/testdata/plates/South_Korean_License_Plate_for_Rent_Passenger_car_-_Ho.jpg: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:85fade0a7b4170e901c58cbc7cdb16abf52dc699021808432616fe3c215f67b0 3 | size 87408 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Discussions 4 | url: https://github.com/habedi/omni-lpr/discussions 5 | about: Please ask and answer general questions here 6 | -------------------------------------------------------------------------------- /docs/assets/images/make_figures.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # You need to have Graphviz installed to run this script 4 | # On Debian-based OSes, you can install it using: sudo apt-get install graphviz 5 | 6 | # Directory containing .dot files (with default value) 7 | ASSET_DIR=${1:-"."} 8 | 9 | # Make figures from .dot files 10 | for f in "${ASSET_DIR}"/*.dot; do 11 | dot -Tsvg "$f" -o "${f%.dot}.svg" 12 | done 13 | -------------------------------------------------------------------------------- /docs/github_mcp_registry/README.md: -------------------------------------------------------------------------------- 1 | ## Publish to MCP Registry 2 | 3 | To publish Omni-LPR to the [GitHub MCP Registry](https://github.com/mcp), follow these steps: 4 | 5 | 1. Download the latest version of `mcp-publisher` binary from [modelcontextprotocol/registry](https://github.com/modelcontextprotocol/registry) and add it to your PATH. 6 | 2. Run `mcp-publisher login github` and follow the instructions to authenticate. 7 | 3. Run `mcp-publisher publish`. 8 | 9 | > [!NOTE] 10 | > Run all of these commands in this directory (`docs/github_mcp_registry`). 11 | -------------------------------------------------------------------------------- /src/omni_lpr/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logging.basicConfig() 4 | 5 | from importlib.metadata import PackageNotFoundError, version 6 | 7 | from .__main__ import main, starlette_app 8 | 9 | _logger = logging.getLogger(__name__) 10 | 11 | try: 12 | __version__ = version("omni-lpr") 13 | except PackageNotFoundError: 14 | __version__ = "0.0.0-unknown" 15 | _logger.warning( 16 | "Could not determine package version using importlib.metadata. " 17 | "Is the library installed correctly?" 18 | ) 19 | 20 | __all__ = ["main", "starlette_app"] 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | 1. 17 | 18 | **Expected behavior** 19 | A clear and concise description of what you expected to happen. 20 | 21 | **Logs** 22 | If applicable, add logs to help explain your problem. 23 | 24 | **Additional context** 25 | Add any other context about the problem here. 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Example Omni-LPR Server Settings 2 | # Rename this file to `.env` to use it. 3 | 4 | # The host to bind the server to. 5 | HOST=127.0.0.1 6 | 7 | # The port to bind the server to. 8 | PORT=8000 9 | 10 | # The log level to use for the server. 11 | # Options: DEBUG, INFO, WARNING, ERROR, CRITICAL 12 | LOG_LEVEL=INFO 13 | 14 | # The maximum image size for uploads in megabytes. 15 | MAX_IMAGE_SIZE_MB=5 16 | 17 | # The number of models to keep in the cache. 18 | MODEL_CACHE_SIZE=16 19 | 20 | # The default OCR model to use for license plate recognition. 21 | DEFAULT_OCR_MODEL=cct-xs-v1-global-model 22 | 23 | # The default detector model to use for license plate detection. 24 | DEFAULT_DETECTOR_MODEL=yolo-v9-t-384-license-plate-end2end 25 | -------------------------------------------------------------------------------- /docs/github_mcp_registry/server.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://static.modelcontextprotocol.io/schemas/2025-09-16/server.schema.json", 3 | "name": "io.github.habedi/omni-lpr", 4 | "description": "An MCP server for automatic license plate recognition", 5 | "status": "active", 6 | "repository": { 7 | "url": "https://github.com/habedi/omni-lpr", 8 | "source": "github" 9 | }, 10 | "version": "0.3.3", 11 | "packages": [ 12 | { 13 | "registryType": "pypi", 14 | "registryBaseUrl": "https://pypi.org", 15 | "identifier": "omni-lpr", 16 | "version": "0.3.3", 17 | "transport": { 18 | "type": "streamable-http", 19 | "url": "http://127.0.0.1:8000/mcp/" 20 | } 21 | } 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | 3 | # Top-most EditorConfig file 4 | root = true 5 | 6 | # Global settings (applicable to all files unless overridden) 7 | [*] 8 | charset = utf-8 # Default character encoding 9 | end_of_line = lf # Use LF for line endings (Unix-style) 10 | indent_style = space # Use spaces for indentation 11 | indent_size = 4 # Default indentation size 12 | insert_final_newline = true # Make sure files end with a newline 13 | trim_trailing_whitespace = true # Remove trailing whitespace 14 | 15 | [*.py] 16 | max_line_length = 100 17 | 18 | # Markdown files 19 | [*.md] 20 | max_line_length = 120 21 | trim_trailing_whitespace = false 22 | 23 | # Bash scripts 24 | [*.sh] 25 | indent_size = 2 26 | 27 | # YAML files 28 | [*.{yml,yaml}] 29 | indent_size = 2 30 | 31 | # JSON files 32 | [*.json] 33 | indent_size = 2 34 | -------------------------------------------------------------------------------- /.github/workflows/publish_pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | call_tests: 14 | uses: ./.github/workflows/tests.yml 15 | 16 | publish_to_pypi: 17 | runs-on: ubuntu-latest 18 | needs: call_tests 19 | 20 | steps: 21 | - name: Checkout Repository 22 | uses: actions/checkout@v4 23 | 24 | - name: Set Up Python 25 | uses: actions/setup-python@v4 26 | with: 27 | python-version: "3.10" 28 | 29 | - name: Install Dependencies 30 | run: | 31 | pip install --upgrade pip 32 | pip install poetry 33 | make install 34 | 35 | - name: Build and Publish to PyPI 36 | run: | 37 | source .venv/bin/activate 38 | PYPI_TOKEN=${{ secrets.PYPI_API_TOKEN }} make publish 39 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # File: ./.dockerignore 2 | # Git 3 | .git/ 4 | .gitignore 5 | 6 | # Docker 7 | # removed `Dockerfile` to ensure Dockerfile is included in build context 8 | .dockerignore 9 | 10 | # Python virtual environment 11 | .venv/ 12 | venv/ 13 | 14 | # Python cache and compiled files 15 | __pycache__/ 16 | *.pyc 17 | *.pyo 18 | *.pyd 19 | 20 | # Build artifacts and distribution files 21 | build/ 22 | dist/ 23 | *.egg-info/ 24 | .eggs/ 25 | 26 | # Test, linting, and coverage artifacts 27 | .pytest_cache/ 28 | .mypy_cache/ 29 | .ruff_cache/ 30 | .coverage 31 | coverage.xml 32 | 33 | # Project configuration files not needed in the image 34 | .pre-commit-config.yaml 35 | codecov.yml 36 | poetry.toml 37 | # poetry.lock 38 | # Makefile 39 | env.example 40 | .env 41 | 42 | # Documentation, examples, and tests 43 | docs/ 44 | examples/ 45 | # scripts/ 46 | tests/ 47 | 48 | # Additional files 49 | CONTRIBUTING.md 50 | CODE_OF_CONDUCT.md 51 | #LICENSE 52 | #README.md 53 | -------------------------------------------------------------------------------- /.github/workflows/lints.yml: -------------------------------------------------------------------------------- 1 | name: Run Linter Checks 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - main 8 | paths-ignore: 9 | - '**.md' 10 | - 'docs/**' 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | lint: 17 | runs-on: ubuntu-latest 18 | 19 | strategy: 20 | matrix: 21 | python-version: [ "3.10", "3.11", "3.12", "3.13" ] 22 | 23 | steps: 24 | - name: Checkout Repository 25 | uses: actions/checkout@v4 26 | 27 | - name: Set Up Python ${{ matrix.python-version }} 28 | uses: actions/setup-python@v4 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | 32 | - name: Install Dependencies 33 | run: | 34 | pip install --upgrade pip 35 | pip install poetry 36 | make install 37 | 38 | - name: Run Linter Checks 39 | run: | 40 | source .venv/bin/activate 41 | make lint 42 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_stages: [ pre-push ] 2 | default_language_version: 3 | python: python3.12 4 | fail_fast: false 5 | 6 | repos: 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v5.0.0 9 | hooks: 10 | - id: trailing-whitespace 11 | args: [ --markdown-linebreak-ext=md ] 12 | - id: end-of-file-fixer 13 | - id: mixed-line-ending 14 | - id: check-merge-conflict 15 | - id: check-added-large-files 16 | - id: detect-private-key 17 | - id: check-yaml 18 | - id: check-toml 19 | - id: check-json 20 | - id: check-docstring-first 21 | - id: pretty-format-json 22 | args: [ --autofix, --no-sort-keys ] 23 | 24 | - repo: local 25 | hooks: 26 | - id: format 27 | name: Format Code 28 | entry: make format 29 | language: system 30 | pass_filenames: false 31 | stages: [ pre-commit ] 32 | 33 | - id: test 34 | name: Run Tests 35 | entry: make test 36 | language: system 37 | pass_filenames: false 38 | -------------------------------------------------------------------------------- /examples/shared.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import base64 3 | import os 4 | 5 | 6 | def get_args(default_url: str): 7 | """Parses and returns command-line arguments for the examples.""" 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument( 10 | "--image-path", 11 | type=str, 12 | default="tests/testdata/plates/Eecs00c.png", 13 | help="The absolute or relative path to the image.", 14 | ) 15 | parser.add_argument( 16 | "--url", 17 | type=str, 18 | default=default_url, 19 | help="The URL for the endpoint.", 20 | ) 21 | return parser.parse_args() 22 | 23 | 24 | def get_image_base64(image_path: str) -> str | None: 25 | """Reads an image file and returns its base64-encoded content.""" 26 | abs_image_path = os.path.abspath(image_path) 27 | try: 28 | with open(abs_image_path, "rb") as f: 29 | return base64.b64encode(f.read()).decode("utf-8") 30 | except FileNotFoundError: 31 | print(f"Error: Image file not found at {abs_image_path}") 32 | return None 33 | -------------------------------------------------------------------------------- /scripts/remove_containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script removes recently exited Docker containers and their associated images. 4 | # It targets containers that have exited within the last hour (3600 seconds). 5 | AGE=3600 # seconds (=1 hour) 6 | now=$(date +%s) 7 | 8 | # Collect recent exited container IDs 9 | recent_containers=$( 10 | docker ps -a -q -f status=exited \ 11 | | while read -r id; do 12 | created=$(docker inspect --format='{{.Created}}' "$id") 13 | created_epoch=$(date -d "$created" +%s) 14 | if [ $((now - created_epoch)) -le $AGE ]; then 15 | printf '%s\n' "$id" 16 | fi 17 | done 18 | ) 19 | 20 | # Remove containers (if any) 21 | if [ -n "$recent_containers" ]; then 22 | printf '%s\n' "$recent_containers" | xargs -r docker rm 23 | # get unique image IDs referenced by those containers and remove them 24 | printf '%s\n' "$recent_containers" \ 25 | | xargs -r -I {} docker inspect --format='{{.Image}}' {} \ 26 | | sort -u \ 27 | | xargs -r docker image rm 28 | fi 29 | 30 | # Alternative: remove dangling images (safely) 31 | docker image prune -f 32 | -------------------------------------------------------------------------------- /src/omni_lpr/settings.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import PackageNotFoundError, version 2 | from typing import Literal 3 | 4 | from pydantic_settings import BaseSettings, SettingsConfigDict 5 | 6 | 7 | def get_pkg_version() -> str: 8 | """Fetches the package version, falling back to '0.0.0' if not installed.""" 9 | try: 10 | return version("omni-lpr") 11 | except PackageNotFoundError: 12 | return "0.0.0" 13 | 14 | 15 | class ServerSettings(BaseSettings): 16 | """Server and model configuration settings.""" 17 | 18 | model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore") 19 | 20 | pkg_version: str = get_pkg_version() 21 | host: str = "127.0.0.1" 22 | port: int = 8000 23 | log_level: str = "INFO" 24 | max_image_size_mb: int = 5 25 | model_cache_size: int = 16 26 | execution_device: Literal["auto", "cpu", "cuda", "openvino"] = "auto" 27 | default_ocr_model: str = "cct-xs-v1-global-model" 28 | default_detector_model: str = "yolo-v9-t-384-license-plate-end2end" 29 | 30 | 31 | # Singleton instance 32 | settings = ServerSettings() 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Hassan Abedi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/rest/health_check_example.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import httpx 4 | 5 | 6 | def main(): 7 | """Performs a health check against the server.""" 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument( 10 | "--url", 11 | type=str, 12 | default="http://127.0.0.1:8000/api/health", 13 | help="The URL for the health check endpoint.", 14 | ) 15 | args = parser.parse_args() 16 | 17 | print(f"Sending health check request to {args.url}") 18 | 19 | try: 20 | # Send the GET request 21 | response = httpx.get(args.url, timeout=10) 22 | response.raise_for_status() 23 | 24 | # Print the result 25 | print("Response from server:") 26 | print(response.json()) 27 | 28 | except httpx.RequestError as e: 29 | print(f"An error occurred while requesting {e.request.url!r}.") 30 | print(e) 31 | except httpx.HTTPStatusError as e: 32 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 33 | print(f"Response body: {e.response.text}") 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /examples/rest/list_models_example.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import httpx 4 | 5 | 6 | def main(): 7 | """Sends a request to the list_models tool and prints the result.""" 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument( 10 | "--url", 11 | type=str, 12 | default="http://127.0.0.1:8000/api/v1/tools/list_models/invoke", 13 | help="The URL for the endpoint.", 14 | ) 15 | args = parser.parse_args() 16 | 17 | print(f"Sending request to {args.url}") 18 | 19 | try: 20 | # Send the POST request 21 | response = httpx.post(args.url, timeout=30, json={}) 22 | response.raise_for_status() 23 | 24 | # Print the result 25 | print("Response from server:") 26 | print(response.json()) 27 | 28 | except httpx.RequestError as e: 29 | print(f"An error occurred while requesting {e.request.url!r}.") 30 | print(e) 31 | except httpx.HTTPStatusError as e: 32 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 33 | print(f"Response body: {e.response.text}") 34 | 35 | 36 | if __name__ == "__main__": 37 | main() 38 | -------------------------------------------------------------------------------- /src/omni_lpr/errors.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Any, Optional 3 | 4 | from pydantic import BaseModel, Field 5 | 6 | 7 | class ErrorCode(str, Enum): 8 | """Enum for error codes used in Omni-LPR responses""" 9 | 10 | VALIDATION_ERROR = "VALIDATION_ERROR" 11 | DESERIALIZATION_ERROR = "DESERIALIZATION_ERROR" 12 | TOOL_LOGIC_ERROR = "TOOL_LOGIC_ERROR" 13 | UNKNOWN_ERROR = "UNKNOWN_ERROR" 14 | 15 | 16 | class APIError(BaseModel): 17 | """Represents an error returned by the Omni-LPR API""" 18 | 19 | code: ErrorCode = Field(..., description="A unique code identifying the error type.") 20 | message: str = Field(..., description="A human-readable message describing the error.") 21 | details: Optional[Any] = Field(None, description="Optional structured details about the error.") 22 | 23 | 24 | class ToolLogicError(Exception): 25 | """Custom exception for tool-related errors that can be mapped to APIError""" 26 | 27 | def __init__( 28 | self, message: str, code: ErrorCode = ErrorCode.TOOL_LOGIC_ERROR, details: Any = None 29 | ): 30 | super().__init__(message) 31 | self.error = APIError(code=code, message=message, details=details) 32 | -------------------------------------------------------------------------------- /tests/test_mcp_protocol.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.asyncio 5 | async def test_mcp_post_endpoint_invalid_payload(test_app_client): 6 | """ 7 | Posting an invalid payload to the MCP endpoint should result in a client error. 8 | We accept any 4xx response here to confirm the route is wired correctly. 9 | """ 10 | # Note: Added trailing slash to URL to avoid 307 redirect 11 | # Empty body 12 | resp = await test_app_client.post("/mcp/", content="") 13 | assert 400 <= resp.status_code < 500 14 | 15 | # Invalid JSON 16 | resp = await test_app_client.post( 17 | "/mcp/", 18 | headers={"Content-Type": "application/json"}, 19 | content="{not: valid}", 20 | ) 21 | assert 400 <= resp.status_code < 500 22 | 23 | 24 | @pytest.mark.asyncio 25 | async def test_mcp_get_endpoint_for_session(test_app_client): 26 | """ 27 | A GET request without a session ID should be handled gracefully. 28 | The MCP server should respond with a 4xx error if the session is not found. 29 | """ 30 | # Note: Added trailing slash to URL to avoid the HTTP 307 redirect 31 | resp = await test_app_client.get("/mcp/") 32 | assert 400 <= resp.status_code < 500 33 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Run Tests 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_call: 6 | pull_request: 7 | branches: 8 | - main 9 | paths-ignore: 10 | - '**.md' 11 | - 'docs/**' 12 | push: 13 | tags: 14 | - "v*" 15 | 16 | permissions: 17 | contents: read 18 | 19 | jobs: 20 | tests: 21 | runs-on: ubuntu-latest 22 | 23 | strategy: 24 | matrix: 25 | # Define the Python versions to test against 26 | python-version: [ "3.10", "3.11", "3.12", "3.13" ] 27 | 28 | steps: 29 | - name: Checkout Repository 30 | uses: actions/checkout@v4 31 | with: 32 | lfs: false 33 | 34 | - name: Set Up Python ${{ matrix.python-version }} 35 | uses: actions/setup-python@v4 36 | with: 37 | python-version: ${{ matrix.python-version }} 38 | 39 | - name: Install Dependencies 40 | run: | 41 | pip install --upgrade pip 42 | pip install poetry 43 | make install 44 | 45 | - name: Run Tests with Coverage 46 | run: | 47 | source .venv/bin/activate 48 | make test 49 | 50 | - name: Upload coverage reports to Codecov 51 | uses: codecov/codecov-action@v5 52 | with: 53 | token: ${{ secrets.CODECOV_TOKEN }} 54 | -------------------------------------------------------------------------------- /examples/rest/recognize_plate_from_path_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args 8 | 9 | 10 | def main(): 11 | """Sends a license plate image path to the REST API and prints the result.""" 12 | args = get_args( 13 | default_url="http://127.0.0.1:8000/api/v1/tools/recognize_plate_from_path/invoke" 14 | ) 15 | 16 | # The data to send in the POST request 17 | # This tool takes a file path or URL directly. 18 | data = {"path": args.image_path} 19 | 20 | print(f"Sending request to {args.url} with image path: {args.image_path}") 21 | 22 | try: 23 | # Send the POST request 24 | response = httpx.post(args.url, json=data, timeout=30) 25 | response.raise_for_status() 26 | 27 | # Print the result 28 | print("Response from server:") 29 | print(response.json()) 30 | 31 | except httpx.RequestError as e: 32 | print(f"An error occurred while requesting {e.request.url!r}.") 33 | print(e) 34 | except httpx.HTTPStatusError as e: 35 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 36 | print(f"Response body: {e.response.text}") 37 | 38 | 39 | if __name__ == "__main__": 40 | main() 41 | -------------------------------------------------------------------------------- /examples/rest/detect_and_recognize_plate_from_path_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args 8 | 9 | 10 | def main(): 11 | """Sends a license plate image path to the REST API and prints the result.""" 12 | args = get_args( 13 | default_url="http://127.0.0.1:8000/api/v1/tools/detect_and_recognize_plate_from_path/invoke" 14 | ) 15 | 16 | # The data to send in the POST request 17 | # This tool takes a file path or URL directly. 18 | data = {"path": args.image_path} 19 | 20 | print(f"Sending request to {args.url} with image path: {args.image_path}") 21 | 22 | try: 23 | # Send the POST request 24 | response = httpx.post(args.url, json=data, timeout=30) 25 | response.raise_for_status() 26 | 27 | # Print the result 28 | print("Response from server:") 29 | print(response.json()) 30 | 31 | except httpx.RequestError as e: 32 | print(f"An error occurred while requesting {e.request.url!r}.") 33 | print(e) 34 | except httpx.HTTPStatusError as e: 35 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 36 | print(f"Response body: {e.response.text}") 37 | 38 | 39 | if __name__ == "__main__": 40 | main() 41 | -------------------------------------------------------------------------------- /examples/rest/recognize_plate_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args, get_image_base64 8 | 9 | 10 | def main(): 11 | """Sends a license plate image to the REST API and prints the result.""" 12 | args = get_args(default_url="http://127.0.0.1:8000/api/v1/tools/recognize_plate/invoke") 13 | 14 | # Read the image file and encode it in base64 15 | image_base64 = get_image_base64(args.image_path) 16 | if not image_base64: 17 | return 18 | 19 | # The data to send in the POST request 20 | data = {"image_base64": image_base64} 21 | 22 | print(f"Sending request to {args.url} with image: {args.image_path}") 23 | 24 | try: 25 | # Send the POST request 26 | response = httpx.post(args.url, json=data, timeout=30) 27 | response.raise_for_status() 28 | 29 | # Print the result 30 | print("Response from server:") 31 | print(response.json()) 32 | 33 | except httpx.RequestError as e: 34 | print(f"An error occurred while requesting {e.request.url!r}.") 35 | print(e) 36 | except httpx.HTTPStatusError as e: 37 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 38 | print(f"Response body: {e.response.text}") 39 | 40 | 41 | if __name__ == "__main__": 42 | main() 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python specific 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Virtual environments 7 | .env/ 8 | env/ 9 | .venv/ 10 | venv/ 11 | 12 | # Packaging and distribution files 13 | .Python 14 | build/ 15 | dist/ 16 | *.egg-info/ 17 | *.egg 18 | MANIFEST 19 | 20 | # Dependency directories 21 | develop-eggs/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | .installed.cfg 32 | 33 | # Test and coverage reports 34 | htmlcov/ 35 | .tox/ 36 | .coverage 37 | .coverage.* 38 | .cache 39 | nosetests.xml 40 | coverage.xml 41 | *.cover 42 | .hypothesis/ 43 | .pytest_cache/ 44 | .benchmarks/ 45 | 46 | # IDE specific files and directories 47 | .idea/ 48 | *.iml 49 | .vscode/ 50 | 51 | # Jupyter Notebook files 52 | .ipynb_checkpoints 53 | 54 | # Temporary files created by editors and the system and folders to ignore 55 | *.swp 56 | *~ 57 | *.bak 58 | *.tmp 59 | temp/ 60 | output/ 61 | tmp/ 62 | tmp2/ 63 | out/ 64 | out2/ 65 | 66 | # Database files (SQLite, DuckDB, etc.) 67 | *.duckdb 68 | *.db 69 | *.wal 70 | *.sqlite 71 | 72 | # Dependency lock files (uncomment to ignore) 73 | poetry.lock 74 | 75 | # Documentation files 76 | site/ 77 | 78 | # Miscellaneous files and directories to ignore 79 | # Add any additional file patterns a directory names that should be ignored down here 80 | .env 81 | *_output.txt 82 | *.log 83 | .mcpregistry_github_token 84 | .mcpregistry_registry_token 85 | -------------------------------------------------------------------------------- /examples/rest/detect_and_recognize_plate_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args, get_image_base64 8 | 9 | 10 | def main(): 11 | """Sends a license plate image to the REST API and prints the result.""" 12 | args = get_args( 13 | default_url="http://127.0.0.1:8000/api/v1/tools/detect_and_recognize_plate/invoke" 14 | ) 15 | 16 | # Read the image file and encode it in base64 17 | image_base64 = get_image_base64(args.image_path) 18 | if not image_base64: 19 | return 20 | 21 | # The data to send in the POST request 22 | data = {"image_base64": image_base64} 23 | 24 | print(f"Sending request to {args.url} with image: {args.image_path}") 25 | 26 | try: 27 | # Send the POST request 28 | response = httpx.post(args.url, json=data, timeout=30) 29 | response.raise_for_status() 30 | 31 | # Print the result 32 | print("Response from server:") 33 | print(response.json()) 34 | 35 | except httpx.RequestError as e: 36 | print(f"An error occurred while requesting {e.request.url!r}.") 37 | print(e) 38 | except httpx.HTTPStatusError as e: 39 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 40 | print(f"Response body: {e.response.text}") 41 | 42 | 43 | if __name__ == "__main__": 44 | main() 45 | -------------------------------------------------------------------------------- /scripts/docker_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | echo "Container entrypoint executing..." 5 | echo "Starting the Omni-LPR server with Gunicorn..." 6 | 7 | # Defaults (can be overridden at runtime with -e) 8 | : "${GUNICORN_WORKERS:=4}" 9 | : "${HOST:=0.0.0.0}" 10 | : "${PORT:=8000}" 11 | : "${GUNICORN_EXTRA_ARGS:=}" 12 | 13 | VENV_BIN="/home/appuser/app/.venv/bin" 14 | GUNICORN_BIN="${VENV_BIN}/gunicorn" 15 | 16 | # Make sure Python can import the package in `src/` 17 | export PYTHONPATH="/home/appuser/app/src:${PYTHONPATH:-}" 18 | 19 | if [ ! -x "${GUNICORN_BIN}" ]; then 20 | echo "Error: gunicorn not found at ${GUNICORN_BIN}" 21 | echo "Contents of ${VENV_BIN}:" 22 | ls -la "${VENV_BIN}" || true 23 | exit 1 24 | fi 25 | 26 | export PATH="${VENV_BIN}:$PATH" 27 | 28 | BIND="${HOST}:${PORT}" 29 | 30 | # Word-split GUNICORN_EXTRA_ARGS and store them in an array. 31 | read -ra GUNICORN_EXTRA_ARGS_ARRAY <<< "${GUNICORN_EXTRA_ARGS:-}" 32 | echo "Running: ${GUNICORN_BIN} -w ${GUNICORN_WORKERS} -k uvicorn.workers.UvicornWorker --bind ${BIND} ${GUNICORN_EXTRA_ARGS} omni_lpr:starlette_app" 33 | 34 | # Exec so Gunicorn is PID 1 35 | exec "${GUNICORN_BIN}" \ 36 | -w "${GUNICORN_WORKERS}" \ 37 | -k uvicorn.workers.UvicornWorker \ 38 | --bind "${BIND}" \ 39 | --access-logfile "-" \ 40 | --access-logformat '{"time": "%(t)s", "remote_addr": "%(h)s", "request": "%(r)s", "status": %(s)s, "bytes": %(b)s, "referer": "%(f)s", "user_agent": "%(a)s"}' \ 41 | "${GUNICORN_EXTRA_ARGS_ARRAY[@]}" \ 42 | omni_lpr:starlette_app 43 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | ## Feature Roadmap 2 | 3 | This document includes the roadmap for the Omni-LPR project. 4 | It outlines features to be implemented and their current status. 5 | 6 | > [!IMPORTANT] 7 | > This roadmap is a work in progress and is subject to change. 8 | 9 | ### Feature Roadmap 10 | 11 | - **Core ALPR Capabilities** 12 | 13 | - [x] License plate detection. 14 | - [x] License plate recognition. 15 | - [x] Optimized models for CPU, OpenVINO, and CUDA backends. 16 | 17 | - **Interfaces and Developer Experience** 18 | 19 | - [x] MCP interface for AI agent integration. 20 | - [x] REST API for all core functions/tools. 21 | - [x] Standardized JSON error responses. 22 | - [x] Interactive API documentation (Swagger UI and ReDoc). 23 | - [x] Support for direct image uploads (`multipart/form-data`). 24 | - [x] Switch from deprecated SSE to streamable HTTP for transport. 25 | 26 | - **Performance** 27 | 28 | - [x] Asynchronous I/O for concurrent requests. 29 | - [x] Simple LRU cache for recently processed images. 30 | - [ ] Request batching for model inference. 31 | 32 | - **Integrations** 33 | 34 | - [x] Standalone microservice architecture. 35 | - [x] MCP and REST API usage examples. 36 | - [ ] A Python client library to simplify interaction with the REST API. 37 | 38 | - **Deployment** 39 | 40 | - [x] Pre-built Docker images for each hardware backend. 41 | - [x] Configuration via environment variables and CLI arguments. 42 | - [ ] A Helm chart for Kubernetes deployment. 43 | 44 | - **Benchmarks** 45 | 46 | - [ ] Performance benchmarks for different hardware and request types. 47 | -------------------------------------------------------------------------------- /src/omni_lpr/mcp.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import mcp.types as types 4 | from mcp.server.lowlevel import Server 5 | 6 | from .tools import tool_registry 7 | 8 | _logger = logging.getLogger(__name__) 9 | 10 | app = Server("omni-lpr") 11 | 12 | 13 | @app.call_tool() 14 | async def call_tool_handler(name: str, arguments: dict) -> list[types.ContentBlock]: 15 | """ 16 | Handles the execution of a tool call by delegating the request to the tool registry. 17 | 18 | This asynchronous function processes tool call requests by identifying the tool 19 | by its name and providing the required arguments. The function returns the 20 | results of the tool execution as a list of content blocks. 21 | 22 | Parameters: 23 | name: str 24 | The name of the tool to be called. 25 | arguments: dict 26 | A dictionary containing the arguments required for the tool. 27 | 28 | Returns: 29 | list[types.ContentBlock] 30 | A list of content blocks resulting from the tool's processing. 31 | """ 32 | _logger.debug(f"Tool call received: {name} with arguments: {arguments}") 33 | return await tool_registry.call(name, arguments) 34 | 35 | 36 | @app.list_tools() 37 | async def list_tools_handler() -> list[types.Tool]: 38 | """ 39 | Handles the listing of tools available in the application. 40 | 41 | This function is responsible for responding to requests that need a 42 | list of registered tools. It uses the centralized tool registry to 43 | fetch and return the available tools in a structured manner. 44 | 45 | Returns: 46 | list[types.Tool]: A list containing the registered tools. 47 | """ 48 | _logger.debug("Tool list requested.") 49 | return tool_registry.list() 50 | -------------------------------------------------------------------------------- /examples/mcp/list_models_example.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import anyio 4 | from mcp import ClientSession, types 5 | from mcp.client.streamable_http import streamablehttp_client 6 | 7 | 8 | async def amain(url: str): 9 | """Connects to the MCP server and calls the list_models tool.""" 10 | print(f"Connecting to MCP server using Streamable HTTP at {url}") 11 | 12 | try: 13 | # Fix: Unpack all three returned values, ignoring the third. 14 | async with streamablehttp_client(url) as (read, write, _): 15 | async with ClientSession(read, write) as session: 16 | await session.initialize() 17 | print("Client initialized.") 18 | 19 | tool_name = "list_models" 20 | print(f"Calling tool '{tool_name}'") 21 | result = await session.call_tool(tool_name) 22 | 23 | print("Response from server:") 24 | content_block = result.content[0] 25 | if isinstance(content_block, types.TextContent): 26 | print(content_block.text) 27 | else: 28 | print(result) 29 | 30 | except Exception as e: 31 | print(f"An error occurred: {e}") 32 | print("Please ensure the Omni-LPR server is running and accessible at the specified URL.") 33 | 34 | 35 | def main(): 36 | """Parses command-line arguments and runs the async main function.""" 37 | parser = argparse.ArgumentParser() 38 | parser.add_argument( 39 | "--url", 40 | type=str, 41 | default="http://127.0.0.1:8000/mcp/", 42 | help="The URL for the endpoint.", 43 | ) 44 | args = parser.parse_args() 45 | anyio.run(amain, args.url) 46 | 47 | 48 | if __name__ == "__main__": 49 | main() 50 | -------------------------------------------------------------------------------- /examples/rest/recognize_plate_from_upload_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args 8 | 9 | 10 | def main(): 11 | """Sends a license plate image to the REST API as a file upload and prints the result.""" 12 | args = get_args(default_url="http://127.0.0.1:8000/api/v1/tools/recognize_plate/invoke") 13 | 14 | print(f"Sending request to {args.url} with image file: {args.image_path}") 15 | 16 | try: 17 | with open(args.image_path, "rb") as f: 18 | # The 'files' parameter is used to send multipart/form-data. 19 | # We build a dictionary where each value is a tuple. For form fields, 20 | # the tuple is (None, value), and for files, it's (filename, file-like-object, content-type). 21 | files = { 22 | "image": (args.image_path, f, "image/png"), 23 | "ocr_model": (None, "cct-s-v1-global-model"), 24 | } 25 | 26 | # Send the POST request 27 | response = httpx.post(args.url, files=files, timeout=30) 28 | response.raise_for_status() 29 | 30 | # Print the result 31 | print("Response from server:") 32 | print(response.json()) 33 | 34 | except FileNotFoundError: 35 | print(f"Error: Image file not found at {args.image_path}") 36 | except httpx.RequestError as e: 37 | print(f"An error occurred while requesting {e.request.url!r}.") 38 | print(e) 39 | except httpx.HTTPStatusError as e: 40 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 41 | print(f"Response body: {e.response.text}") 42 | 43 | 44 | if __name__ == "__main__": 45 | main() 46 | -------------------------------------------------------------------------------- /examples/mcp/recognize_plate_from_path_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import anyio 7 | from mcp import ClientSession, types 8 | from mcp.client.streamable_http import streamablehttp_client 9 | from shared import get_args 10 | 11 | 12 | async def amain(image_path: str, url: str): 13 | """Connects to the MCP server and calls the recognize_plate_from_path tool.""" 14 | print(f"Connecting to MCP server using Streamable HTTP at {url}") 15 | 16 | try: 17 | # Fix: Unpack all three returned values, ignoring the third. 18 | async with streamablehttp_client(url) as (read, write, _): 19 | async with ClientSession(read, write) as session: 20 | await session.initialize() 21 | print("Client initialized.") 22 | 23 | tool_name = "recognize_plate_from_path" 24 | tool_args = {"path": image_path} 25 | 26 | print(f"Calling tool '{tool_name}' with image path: {image_path}") 27 | result = await session.call_tool(tool_name, arguments=tool_args) 28 | 29 | print("Response from server:") 30 | content_block = result.content[0] 31 | if isinstance(content_block, types.TextContent): 32 | print(content_block.text) 33 | else: 34 | print(result) 35 | 36 | except Exception as e: 37 | print(f"An error occurred: {e}") 38 | print("Please ensure the Omni-LPR server is running and accessible at the specified URL.") 39 | 40 | 41 | def main(): 42 | """Parses command-line arguments and runs the async main function.""" 43 | args = get_args(default_url="http://127.0.0.1:8000/mcp/") 44 | anyio.run(amain, args.image_path, args.url) 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /examples/mcp/detect_and_recognize_plate_from_path_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import anyio 7 | from mcp import ClientSession, types 8 | from mcp.client.streamable_http import streamablehttp_client 9 | from shared import get_args 10 | 11 | 12 | async def amain(image_path: str, url: str): 13 | """Connects to the MCP server and calls the detect_and_recognize_plate_from_path tool.""" 14 | print(f"Connecting to MCP server using Streamable HTTP at {url}") 15 | 16 | try: 17 | # Use the correct client and unpack all three returned values 18 | async with streamablehttp_client(url) as (read, write, _): 19 | async with ClientSession(read, write) as session: 20 | await session.initialize() 21 | print("Client initialized.") 22 | 23 | tool_name = "detect_and_recognize_plate_from_path" 24 | tool_args = {"path": image_path} 25 | 26 | print(f"Calling tool '{tool_name}' with image path: {image_path}") 27 | result = await session.call_tool(tool_name, arguments=tool_args) 28 | 29 | print("Response from server:") 30 | content_block = result.content[0] 31 | if isinstance(content_block, types.TextContent): 32 | print(content_block.text) 33 | else: 34 | print(result) 35 | 36 | except Exception as e: 37 | print(f"An error occurred: {e}") 38 | print("Please ensure the Omni-LPR server is running and accessible at the specified URL.") 39 | 40 | 41 | def main(): 42 | """Parses command-line arguments and runs the async main function.""" 43 | args = get_args(default_url="http://127.0.0.1:8000/mcp/") 44 | anyio.run(amain, args.image_path, args.url) 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /examples/rest/detect_and_recognize_plate_from_upload_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import httpx 7 | from shared import get_args 8 | 9 | 10 | def main(): 11 | """Sends an image to the REST API as a file upload for detection and recognition, and prints the result.""" 12 | args = get_args( 13 | default_url="http://127.0.0.1:8000/api/v1/tools/detect_and_recognize_plate/invoke" 14 | ) 15 | 16 | print(f"Sending request to {args.url} with image file: {args.image_path}") 17 | 18 | try: 19 | with open(args.image_path, "rb") as f: 20 | # The 'files' parameter is used to send multipart/form-data. 21 | # We build a dictionary where each value is a tuple. For form fields, 22 | # the tuple is (None, value), and for files, it's (filename, file-like-object, content-type). 23 | files = { 24 | "image": (args.image_path, f, "image/png"), 25 | "detector_model": (None, "yolo-v9-t-384-license-plate-end2end"), 26 | "ocr_model": (None, "cct-s-v1-global-model"), 27 | } 28 | 29 | # Send the POST request 30 | response = httpx.post(args.url, files=files, timeout=30) 31 | response.raise_for_status() 32 | 33 | # Print the result 34 | print("Response from server:") 35 | print(response.json()) 36 | 37 | except FileNotFoundError: 38 | print(f"Error: Image file not found at {args.image_path}") 39 | except httpx.RequestError as e: 40 | print(f"An error occurred while requesting {e.request.url!r}.") 41 | print(e) 42 | except httpx.HTTPStatusError as e: 43 | print(f"Error response {e.response.status_code} while requesting {e.request.url!r}.") 44 | print(f"Response body: {e.response.text}") 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /tests/testdata/plates/plate_text.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "file": "Eecs00c.png", 4 | "text": "STH 561" 5 | }, 6 | { 7 | "file": "Sweden_diplomatic_license_plate_DL004B.jpg", 8 | "text": "DL004B" 9 | }, 10 | { 11 | "file": "Sweden_licenseplate_EU.JPG", 12 | "text": "KSF 762" 13 | }, 14 | { 15 | "file": "Sweden_license_plate.jpg", 16 | "text": "EPR479" 17 | }, 18 | { 19 | "file": "Sweden_SE_license_plate_before_1973.jpg", 20 | "text": "A30505" 21 | }, 22 | { 23 | "file": "Swedish_dealer_plate.jpg", 24 | "text": "MLB 206" 25 | }, 26 | { 27 | "file": "Swedish_license_plate_2019.jpg", 28 | "text": "DLH 15S" 29 | }, 30 | { 31 | "file": "Swedish_license_plate_for_Taxis.jpg", 32 | "text": "MLB942T" 33 | }, 34 | { 35 | "file": "Swedish_licenseplate.jpg", 36 | "text": "UDC 888" 37 | }, 38 | { 39 | "file": "Swedish_military_license_plate_118076.jpg", 40 | "text": "118076" 41 | }, 42 | { 43 | "file": "Swe-Temporary.jpg", 44 | "text": "MLB426" 45 | }, 46 | { 47 | "file": "46-GZB-8_license_plate_of_the_Netherlands.JPG", 48 | "text": "46-GZB-8" 49 | }, 50 | { 51 | "file": "500px-Vietnamese_plate_for_Ho_Chi_Minh_City.jpg", 52 | "text": "51F-178.12" 53 | }, 54 | { 55 | "file": "2008_indonesian_plate.png", 56 | "text": "XX 1234ABC" 57 | }, 58 | { 59 | "file": "Algeria_plate_white_2018.jpg", 60 | "text": "02865 114 11" 61 | }, 62 | { 63 | "file": "Botswana-rear-plate.jpg", 64 | "text": "B886AJR" 65 | }, 66 | { 67 | "file": "Brazilian_vehicle_license_plate_(2018-).jpg", 68 | "text": "LSN4I49" 69 | }, 70 | { 71 | "file": "Canada_Federal_license_plate_78579.jpg", 72 | "text": "78579" 73 | }, 74 | { 75 | "files": "Malaysia_penang_license_plate_front.JPG", 76 | "text": "PFQ 5217" 77 | }, 78 | { 79 | "file": "South_Korean_License_Plate_for_Rent_Passenger_car_-_Ho.jpg", 80 | "text": "33 5598" 81 | } 82 | ] 83 | -------------------------------------------------------------------------------- /examples/mcp/recognize_plate_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | 6 | import anyio 7 | from mcp import ClientSession, types 8 | from mcp.client.streamable_http import streamablehttp_client 9 | from shared import get_args, get_image_base64 10 | 11 | 12 | async def amain(image_path: str, url: str): 13 | """Connects to the MCP server and calls the recognize_plate tool.""" 14 | image_base64 = get_image_base64(image_path) 15 | if not image_base64: 16 | return 17 | 18 | print(f"Connecting to MCP server using Streamable HTTP at {url}") 19 | 20 | try: 21 | # Fix: Unpack all three returned values, ignoring the third. 22 | async with streamablehttp_client(url) as (read, write, _): 23 | async with ClientSession(read, write) as session: 24 | await session.initialize() 25 | print("Client initialized.") 26 | 27 | tool_name = "recognize_plate" 28 | tool_args = {"image_base64": image_base64} 29 | 30 | print(f"Calling tool '{tool_name}' with image: {image_path}") 31 | result = await session.call_tool(tool_name, arguments=tool_args) 32 | 33 | print("Response from server:") 34 | content_block = result.content[0] 35 | if isinstance(content_block, types.TextContent): 36 | print(content_block.text) 37 | else: 38 | print(result) 39 | 40 | except Exception as e: 41 | print(f"An error occurred: {e}") 42 | print("Please ensure the Omni-LPR server is running and accessible at the specified URL.") 43 | 44 | 45 | def main(): 46 | """Parses command-line arguments and runs the async main function.""" 47 | args = get_args(default_url="http://127.0.0.1:8000/mcp/") 48 | anyio.run(amain, args.image_path, args.url) 49 | 50 | 51 | if __name__ == "__main__": 52 | main() 53 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contribution Guidelines 2 | 3 | Thank you for considering contributing to this project! 4 | Contributions are always welcome and appreciated. 5 | 6 | ### How to Contribute 7 | 8 | Please check the [issue tracker](https://github.com/habedi/omni-lpr/issues) to see if there is an issue 9 | you 10 | would like to work on or if it has already been resolved. 11 | 12 | #### Reporting Bugs 13 | 14 | 1. Open an issue on the [issue tracker](https://github.com/habedi/omni-lpr/issues). 15 | 2. Include information such as steps to reproduce the observed behavior and relevant logs or screenshots. 16 | 17 | #### Suggesting Features 18 | 19 | 1. Open an issue on the [issue tracker](https://github.com/habedi/omni-lpr/issues). 20 | 2. Provide details about the feature, its purpose, and potential implementation ideas. 21 | 22 | ### Submitting Pull Requests 23 | 24 | - Make sure all tests pass before submitting a pull request. 25 | - Write a clear description of the changes you made and the reasons behind them. 26 | 27 | > [!IMPORTANT] 28 | > It's assumed that by submitting a pull request, you agree to license your contributions under the project's license. 29 | 30 | ### Development Workflow 31 | 32 | #### Prerequisites 33 | 34 | Install GNU Make if it's not already installed on your system. 35 | 36 | ```shell 37 | # For Debian-based systems like Debian, Ubuntu, etc. 38 | sudo apt-get install make 39 | ``` 40 | 41 | - Use the `make setup` command to install the development dependencies. 42 | - Use the `make install` command to install the Python dependencies. 43 | 44 | #### Code Style 45 | 46 | - Use the `make format` command to format the code. 47 | 48 | #### Running Tests 49 | 50 | - Use the `make test` command to run the tests. 51 | 52 | #### Running Linter Checks 53 | 54 | - Use the `make lint` command to run the linter checks. 55 | 56 | #### See Available Commands 57 | 58 | - Run `make help` to see all available commands for managing different tasks. 59 | 60 | ### Code of Conduct 61 | 62 | We adhere to the project's [Code of Conduct](CODE_OF_CONDUCT.md). 63 | -------------------------------------------------------------------------------- /.github/workflows/publish_docker.yml: -------------------------------------------------------------------------------- 1 | name: Publish Docker Images 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - "v*" 8 | 9 | permissions: 10 | contents: read 11 | packages: write 12 | 13 | jobs: 14 | 15 | call_tests: 16 | uses: ./.github/workflows/tests.yml 17 | 18 | build_and_publish: 19 | name: Build and Publish Docker Image 20 | runs-on: ubuntu-latest 21 | needs: call_tests 22 | strategy: 23 | matrix: 24 | image_type: [ cpu, openvino, cuda ] 25 | 26 | steps: 27 | - name: Checkout Repository 28 | uses: actions/checkout@v4 29 | 30 | - name: Set up QEMU 31 | uses: docker/setup-qemu-action@v3 32 | with: 33 | platforms: linux/amd64 34 | 35 | - name: Set up Docker Buildx 36 | uses: docker/setup-buildx-action@v3 37 | 38 | - name: Log in to GitHub Container Registry 39 | uses: docker/login-action@v3 40 | with: 41 | registry: ghcr.io 42 | username: ${{ github.repository_owner }} 43 | password: ${{ secrets.GITHUB_TOKEN }} 44 | 45 | - name: Extract Docker Metadata 46 | id: meta 47 | uses: docker/metadata-action@v5 48 | with: 49 | images: ghcr.io/${{ github.repository }}-${{ matrix.image_type }} 50 | tags: | 51 | type=semver,pattern={{version}} 52 | type=semver,pattern={{major}}.{{minor}} 53 | type=semver,pattern={{major}} 54 | 55 | - name: Set Fallback Tag (`latest`) 56 | id: fallback 57 | run: | 58 | if [ -z "${{ steps.meta.outputs.tags }}" ]; then 59 | echo "tags=ghcr.io/${{ github.repository }}-${{ matrix.image_type }}:latest" >> $GITHUB_OUTPUT 60 | else 61 | # Make sure tags value is trimmed and only the first tag is used if there are multiple 62 | first_tag=$(echo "${{ steps.meta.outputs.tags }}" | head -n1) 63 | echo "tags=${first_tag}" >> $GITHUB_OUTPUT 64 | fi 65 | 66 | - name: Build and Push Docker Image 67 | uses: docker/build-push-action@v6 68 | with: 69 | context: . 70 | file: ./Dockerfile 71 | target: ${{ matrix.image_type }} 72 | push: true 73 | tags: ${{ steps.fallback.outputs.tags }} 74 | labels: ${{ steps.meta.outputs.labels }} 75 | cache-from: type=gha 76 | cache-to: type=gha,mode=max 77 | platforms: linux/amd64 78 | provenance: false 79 | build-args: | 80 | BACKEND=${{ matrix.image_type }} 81 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Common document and text file formats 2 | *.docx filter=lfs diff=lfs merge=lfs -text 3 | *.doc filter=lfs diff=lfs merge=lfs -text 4 | *.pdf filter=lfs diff=lfs merge=lfs -text 5 | *.xls filter=lfs diff=lfs merge=lfs -text 6 | *.xlsx filter=lfs diff=lfs merge=lfs -text 7 | *.ppt filter=lfs diff=lfs merge=lfs -text 8 | *.pptx filter=lfs diff=lfs merge=lfs -text 9 | 10 | # Common image formats 11 | *.jpg filter=lfs diff=lfs merge=lfs -text 12 | *.jpeg filter=lfs diff=lfs merge=lfs -text 13 | *.png filter=lfs diff=lfs merge=lfs -text 14 | *.gif filter=lfs diff=lfs merge=lfs -text 15 | *.bmp filter=lfs diff=lfs merge=lfs -text 16 | *.tiff filter=lfs diff=lfs merge=lfs -text 17 | *.tif filter=lfs diff=lfs merge=lfs -text 18 | 19 | # Common compressed file formats 20 | *.zip filter=lfs diff=lfs merge=lfs -text 21 | *.gz filter=lfs diff=lfs merge=lfs -text 22 | *.tar filter=lfs diff=lfs merge=lfs -text 23 | *.tgz filter=lfs diff=lfs merge=lfs -text 24 | *.bz2 filter=lfs diff=lfs merge=lfs -text 25 | *.7z filter=lfs diff=lfs merge=lfs -text 26 | *.rar filter=lfs diff=lfs merge=lfs -text 27 | 28 | # Common file formats in machine learning projects 29 | *.bin filter=lfs diff=lfs merge=lfs -text 30 | *.model filter=lfs diff=lfs merge=lfs -text 31 | *.h5 filter=lfs diff=lfs merge=lfs -text 32 | *.tfrecord filter=lfs diff=lfs merge=lfs -text 33 | *.hdf5 filter=lfs diff=lfs merge=lfs -text 34 | *.keras filter=lfs diff=lfs merge=lfs -text 35 | *.pth filter=lfs diff=lfs merge=lfs -text 36 | *.pt filter=lfs diff=lfs merge=lfs -text 37 | *.joblib filter=lfs diff=lfs merge=lfs -text 38 | *.pkl filter=lfs diff=lfs merge=lfs -text 39 | *.pickle filter=lfs diff=lfs merge=lfs -text 40 | *.npy filter=lfs diff=lfs merge=lfs -text 41 | 42 | # Common audio and video formats 43 | *.mp3 filter=lfs diff=lfs merge=lfs -text 44 | *.mp4 filter=lfs diff=lfs merge=lfs -text 45 | *.wav filter=lfs diff=lfs merge=lfs -text 46 | *.avi filter=lfs diff=lfs merge=lfs -text 47 | *.mov filter=lfs diff=lfs merge=lfs -text 48 | *.flac filter=lfs diff=lfs merge=lfs -text 49 | *.mkv filter=lfs diff=lfs merge=lfs -text 50 | *.webm filter=lfs diff=lfs merge=lfs -text 51 | *.ogg filter=lfs diff=lfs merge=lfs -text 52 | *.ogv filter=lfs diff=lfs merge=lfs -text 53 | 54 | # Common data transfer formats 55 | #*.csv filter=lfs diff=lfs merge=lfs -text 56 | #*.tsv filter=lfs diff=lfs merge=lfs -text 57 | #*.json filter=lfs diff=lfs merge=lfs -text 58 | #*.xml filter=lfs diff=lfs merge=lfs -text 59 | *.parquet filter=lfs diff=lfs merge=lfs -text 60 | *.feather filter=lfs diff=lfs merge=lfs -text 61 | *.msgpack filter=lfs diff=lfs merge=lfs -text 62 | *.avro filter=lfs diff=lfs merge=lfs -text 63 | *.arrow filter=lfs diff=lfs merge=lfs -text 64 | *.orc filter=lfs diff=lfs merge=lfs -text 65 | 66 | # Exclude files from language stats (GitHub Linguist) 67 | *.ipynb linguist-vendored 68 | Makefile linguist-vendored 69 | -------------------------------------------------------------------------------- /src/omni_lpr/api_models.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional 2 | 3 | from pydantic import BaseModel, Field 4 | 5 | 6 | # A developer-friendly version of an MCP ContentBlock for JSON responses 7 | class JsonContentBlock(BaseModel): 8 | """A container for structured JSON data in a response.""" 9 | 10 | type: str = Field("json", description="The type of the content block.", examples=["json"]) 11 | data: Any = Field( 12 | ..., 13 | description="The structured JSON payload.", 14 | examples=[{"plate_text": "HELLO", "confidence": 0.9}], 15 | ) 16 | 17 | 18 | class ToolResponse(BaseModel): 19 | """Defines the successful response structure for a tool invocation.""" 20 | 21 | content: List[JsonContentBlock] = Field( 22 | ..., description="A list of content blocks containing the tool's output." 23 | ) 24 | 25 | 26 | # Models for structured, machine-readable error responses 27 | class ErrorDetail(BaseModel): 28 | """Provides specific details about a validation error.""" 29 | 30 | loc: List[str] = Field(..., description="The location of the error (e.g., the field name).") 31 | msg: str = Field(..., description="A human-readable message for the specific error.") 32 | type: str = Field(..., description="The type of the error.") 33 | 34 | 35 | class ErrorBody(BaseModel): 36 | """The main error object containing codes and messages.""" 37 | 38 | code: str = Field( 39 | ..., 40 | description="A unique code for the error type (e.g., 'VALIDATION_ERROR').", 41 | examples=["VALIDATION_ERROR"], 42 | ) 43 | message: str = Field( 44 | ..., 45 | description="A high-level, human-readable error message.", 46 | examples=["Input validation failed."], 47 | ) 48 | details: Optional[List[ErrorDetail]] = Field( 49 | None, description="Optional list of specific validation errors." 50 | ) 51 | 52 | 53 | class ErrorResponse(BaseModel): 54 | """The top-level structure for all API error responses.""" 55 | 56 | error: ErrorBody 57 | 58 | 59 | # Model for listing tools 60 | class ToolDefinition(BaseModel): 61 | """Represents the definition of a single tool.""" 62 | 63 | name: str = Field(..., examples=["recognize_plate"]) 64 | title: str = Field(..., examples=["Recognize License Plate"]) 65 | description: str = Field( 66 | ..., examples=["Recognizes text from a pre-cropped image of a license plate."] 67 | ) 68 | inputSchema: dict = Field( 69 | ..., 70 | examples=[ 71 | { 72 | "type": "object", 73 | "properties": {"image_base64": {"type": "string"}}, 74 | "required": ["image_base64"], 75 | } 76 | ], 77 | ) 78 | 79 | 80 | class ToolListResponse(BaseModel): 81 | """The response model for listing all available tools.""" 82 | 83 | tools: List[ToolDefinition] 84 | -------------------------------------------------------------------------------- /src/omni_lpr/event_store.py: -------------------------------------------------------------------------------- 1 | """ 2 | In-memory event store for demonstrating resumability functionality. 3 | 4 | This is a simple implementation intended for examples and testing, 5 | not for production use where a persistent storage solution would be more appropriate. 6 | """ 7 | 8 | import logging 9 | from collections import deque 10 | from dataclasses import dataclass 11 | from uuid import uuid4 12 | 13 | from mcp.server.streamable_http import EventCallback, EventId, EventMessage, EventStore, StreamId 14 | from mcp.types import JSONRPCMessage 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | @dataclass 20 | class EventEntry: 21 | """ 22 | Represents an event entry in the event store. 23 | """ 24 | 25 | event_id: EventId 26 | stream_id: StreamId 27 | message: JSONRPCMessage 28 | 29 | 30 | class InMemoryEventStore(EventStore): 31 | """ 32 | Simple in-memory implementation of the EventStore interface for resumability. 33 | This implementation keeps only the last N events per stream for memory efficiency. 34 | """ 35 | 36 | def __init__(self, max_events_per_stream: int = 200): 37 | """Initialize the event store. 38 | 39 | Args: 40 | max_events_per_stream: Maximum number of events to keep per stream 41 | """ 42 | self.max_events_per_stream = max_events_per_stream 43 | self.streams: dict[StreamId, deque[EventEntry]] = {} 44 | self.event_index: dict[EventId, EventEntry] = {} 45 | 46 | async def store_event(self, stream_id: StreamId, message: JSONRPCMessage) -> EventId: 47 | """Stores an event with a generated event ID.""" 48 | event_id = str(uuid4()) 49 | event_entry = EventEntry(event_id=event_id, stream_id=stream_id, message=message) 50 | 51 | if stream_id not in self.streams: 52 | self.streams[stream_id] = deque(maxlen=self.max_events_per_stream) 53 | 54 | if len(self.streams[stream_id]) == self.max_events_per_stream: 55 | oldest_event = self.streams[stream_id][0] 56 | self.event_index.pop(oldest_event.event_id, None) 57 | 58 | self.streams[stream_id].append(event_entry) 59 | self.event_index[event_id] = event_entry 60 | 61 | return event_id 62 | 63 | async def replay_events_after( 64 | self, 65 | last_event_id: EventId, 66 | send_callback: EventCallback, 67 | ) -> StreamId | None: 68 | """Replays events that occurred after the specified event ID.""" 69 | if last_event_id not in self.event_index: 70 | logger.warning(f"Event ID {last_event_id} not found in store") 71 | return None 72 | 73 | last_event = self.event_index[last_event_id] 74 | stream_id = last_event.stream_id 75 | stream_events = self.streams.get(last_event.stream_id, deque()) 76 | 77 | found_last = False 78 | for event in stream_events: 79 | if found_last: 80 | await send_callback(EventMessage(event.message, event.event_id)) 81 | elif event.event_id == last_event_id: 82 | found_last = True 83 | 84 | return stream_id 85 | -------------------------------------------------------------------------------- /docs/assets/images/dummy_figure.dot: -------------------------------------------------------------------------------- 1 | digraph G { 2 | node [fontname = "Arial", fontsize = 12]; 3 | 4 | // Title in the top left corner with margin 5 | label = " Typical Structure of a Python Library"; 6 | labelloc = "t"; 7 | labeljust = "l"; 8 | fontsize = 16; 9 | fontcolor = "black"; 10 | margin = 0.2 11 | 12 | // Define node colors for different types 13 | "Library" [shape = folder, style = filled, fillcolor = lightblue, label = "Library"]; 14 | 15 | // Package 1 structure 16 | subgraph cluster_package1 { 17 | label = "Package1"; 18 | "Package1" [shape = folder, style = filled, fillcolor = lightgreen, label = "Package1"]; 19 | "Module1_1" [shape = box, style = filled, fillcolor = lightyellow, label = "module1_1.py"]; 20 | "Module1_2" [shape = box, style = filled, fillcolor = lightyellow, label = "module1_2.py"]; 21 | 22 | "ClassA" [shape = ellipse, style = filled, fillcolor = lightcoral, label = "ClassA"]; 23 | "ClassA_method1" [shape = ellipse, style = filled, fillcolor = lightpink, label = "method1()"]; 24 | "ClassA_method2" [shape = ellipse, style = filled, fillcolor = lightpink, label = "method2()"]; 25 | 26 | "ClassC" [shape = ellipse, style = filled, fillcolor = lightcoral, label = "ClassC"]; 27 | "ClassC_method1" [shape = ellipse, style = filled, fillcolor = lightpink, label = "method1()"]; 28 | "ClassC_method2" [shape = ellipse, style = filled, fillcolor = lightpink, label = "method2()"]; 29 | 30 | "function1_1" [shape = ellipse, style = filled, fillcolor = lightgrey, label = "function1_1()"]; 31 | "function1_2" [shape = ellipse, style = filled, fillcolor = lightgrey, label = "function1_2()"]; 32 | 33 | "Package1" -> "Module1_1"; 34 | "Package1" -> "Module1_2"; 35 | 36 | "Module1_1" -> "ClassA"; 37 | "Module1_1" -> "function1_1"; 38 | "Module1_1" -> "function1_2"; 39 | 40 | "ClassA" -> "ClassA_method1"; 41 | "ClassA" -> "ClassA_method2"; 42 | 43 | "Module1_2" -> "ClassC"; 44 | "ClassC" -> "ClassC_method1"; 45 | "ClassC" -> "ClassC_method2"; 46 | } 47 | 48 | // Package 2 structure 49 | subgraph cluster_package2 { 50 | label = "Package2"; 51 | labelloc = "t"; 52 | labeljust = "r"; 53 | 54 | "Package2" [shape = folder, style = filled, fillcolor = lightgreen, label = "Package2"]; 55 | "Module2_1" [shape = box, style = filled, fillcolor = lightyellow, label = "module2_1.py"]; 56 | 57 | "ClassB" [shape = ellipse, style = filled, fillcolor = lightcoral, label = "ClassB"]; 58 | "ClassB_method1" [shape = ellipse, style = filled, fillcolor = lightpink, label = "method1()"]; 59 | 60 | "function2_1" [shape = ellipse, style = filled, fillcolor = lightgrey, label = "function2_1()"]; 61 | "function2_2" [shape = ellipse, style = filled, fillcolor = lightgrey, label = "function2_2()"]; 62 | 63 | "Package2" -> "Module2_1"; 64 | 65 | "Module2_1" -> "ClassB"; 66 | "Module2_1" -> "function2_1"; 67 | "Module2_1" -> "function2_2"; 68 | 69 | "ClassB" -> "ClassB_method1"; 70 | } 71 | 72 | // Relationships from Library to Packages 73 | "Library" -> "Package1"; 74 | "Library" -> "Package2"; 75 | } 76 | -------------------------------------------------------------------------------- /tests/test_e2e.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from httpx import AsyncClient 3 | 4 | # This image is from https://free-images.com/display/deadman_ranch_ancient_buildings_10.html 5 | # and is marked as Public Domain CC0. 6 | IMAGE_URL = "https://free-images.com/lg/ba26/deadman_ranch_ancient_buildings_10.jpg" 7 | 8 | 9 | @pytest.mark.e2e 10 | @pytest.mark.asyncio 11 | async def test_detect_and_recognize_plate_from_path_e2e(test_app_client: AsyncClient): 12 | """ 13 | End-to-end test for the 'detect_and_recognize_plate_from_path' tool. 14 | This test uses a real-world image from a URL and expects a successful detection. 15 | """ 16 | response = await test_app_client.post( 17 | "/api/v1/tools/detect_and_recognize_plate_from_path/invoke", 18 | json={ 19 | "path": IMAGE_URL, 20 | "ocr_model": "cct-xs-v1-global-model", 21 | "detector_model": "yolo-v9-t-384-license-plate-end2end", 22 | }, 23 | ) 24 | 25 | assert response.status_code == 200, f"Request failed: {response.text}" 26 | response_json = response.json() 27 | content = response_json["content"][0]["data"] 28 | 29 | # The image contains multiple license plates. We expect the tool to find at least one. 30 | assert len(content) > 0 31 | # Check that the first result has the expected structure. 32 | assert "ocr" in content[0] 33 | assert "text" in content[0]["ocr"] 34 | assert "confidence" in content[0]["ocr"] 35 | assert "detection" in content[0] 36 | assert "bounding_box" in content[0]["detection"] 37 | assert "confidence" in content[0]["detection"] 38 | 39 | 40 | @pytest.mark.e2e 41 | @pytest.mark.asyncio 42 | async def test_no_plate_detection_e2e(test_app_client: AsyncClient): 43 | """ 44 | Tests that the 'detect_and_recognize_plate_from_path' tool returns an 45 | empty list when given an image without a license plate. 46 | """ 47 | # This is a public domain image of a landscape from Wikimedia Commons. 48 | image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Peyto_Lake-Banff_NP-Canada.jpg/500px-Peyto_Lake-Banff_NP-Canada.jpg" 49 | response = await test_app_client.post( 50 | "/api/v1/tools/detect_and_recognize_plate_from_path/invoke", 51 | json={ 52 | "path": image_url, 53 | "ocr_model": "cct-xs-v1-global-model", 54 | "detector_model": "yolo-v9-t-384-license-plate-end2end", 55 | }, 56 | ) 57 | 58 | assert response.status_code == 200, f"Request failed: {response.text}" 59 | response_json = response.json() 60 | content = response_json["content"][0]["data"] 61 | assert content == [] 62 | 63 | 64 | @pytest.mark.e2e 65 | @pytest.mark.asyncio 66 | async def test_list_models_e2e(test_app_client: AsyncClient): 67 | """ 68 | End-to-end test for the 'list_models' tool. 69 | """ 70 | response = await test_app_client.post( 71 | "/api/v1/tools/list_models/invoke", 72 | json={}, 73 | ) 74 | 75 | assert response.status_code == 200, f"Request failed: {response.text}" 76 | response_json = response.json() 77 | content = response_json["content"][0]["data"] 78 | 79 | assert "detector_models" in content 80 | assert "ocr_models" in content 81 | assert isinstance(content["detector_models"], list) 82 | assert isinstance(content["ocr_models"], list) 83 | assert len(content["detector_models"]) > 0 84 | assert len(content["ocr_models"]) > 0 85 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from contextlib import asynccontextmanager 2 | from pathlib import Path 3 | 4 | import pytest 5 | from asgi_lifespan import LifespanManager 6 | from httpx import ASGITransport, AsyncClient 7 | from mcp.server.streamable_http_manager import StreamableHTTPSessionManager 8 | from starlette.applications import Starlette 9 | from starlette.routing import Mount, Route 10 | from starlette.types import Receive, Scope, Send 11 | 12 | # Import what we need to build the app, but not the app instances themselves 13 | from omni_lpr.event_store import InMemoryEventStore 14 | from omni_lpr.mcp import app as mcp_app 15 | from omni_lpr.rest import api_spec, setup_rest_routes 16 | from omni_lpr.tools import setup_cache, setup_tools, tool_registry 17 | 18 | 19 | @pytest.fixture 20 | def test_data_path(): 21 | """Returns the path to the test data directory.""" 22 | return Path(__file__).parent / "testdata" 23 | 24 | 25 | def create_test_app(with_tools: bool = True) -> Starlette: 26 | """ 27 | Factory function to create a fully isolated app instance for testing. 28 | This includes creating a new session manager for each app. 29 | """ 30 | # 1. Create a new session manager for this specific test app 31 | event_store = InMemoryEventStore() 32 | session_manager = StreamableHTTPSessionManager(app=mcp_app, event_store=event_store) 33 | 34 | # 2. Define lifespan and handlers that close over the new session manager 35 | @asynccontextmanager 36 | async def lifespan(app: Starlette): 37 | async with session_manager.run(): 38 | yield 39 | 40 | async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None: 41 | await session_manager.handle_request(scope, receive, send) 42 | 43 | # 3. Create a fresh Starlette app with the isolated lifespan 44 | app = Starlette(debug=True, lifespan=lifespan) 45 | 46 | # 4. Manage tool registry state based on test type 47 | if with_tools: 48 | tool_registry._tools.clear() 49 | tool_registry._tool_definitions.clear() 50 | tool_registry._tool_models.clear() 51 | setup_tools() 52 | setup_cache() 53 | else: 54 | tool_registry._tools.clear() 55 | tool_registry._tool_definitions.clear() 56 | tool_registry._tool_models.clear() 57 | 58 | # 5. Set up routes on the isolated app 59 | # We need to re-import the health_check to avoid scope issues 60 | from omni_lpr.__main__ import health_check 61 | 62 | health_route = Route("/api/health", endpoint=health_check, methods=["GET"]) 63 | 64 | api_v1_app = Starlette() 65 | api_v1_app.router.routes.extend(setup_rest_routes()) 66 | api_spec.register(api_v1_app) 67 | 68 | app.routes.extend( 69 | [ 70 | Mount("/mcp/", app=handle_streamable_http), 71 | health_route, 72 | Mount("/api/v1", app=api_v1_app), 73 | ] 74 | ) 75 | return app 76 | 77 | 78 | @pytest.fixture 79 | async def test_app_client(): 80 | """ 81 | Provides a configured test client that correctly handles the ASGI lifespan. 82 | """ 83 | app = create_test_app(with_tools=True) 84 | async with LifespanManager(app): 85 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client: 86 | yield client 87 | 88 | 89 | @pytest.fixture 90 | async def no_tools_test_app_client(): 91 | """ 92 | Provides a test client where no tools have been registered. 93 | """ 94 | app = create_test_app(with_tools=False) 95 | async with LifespanManager(app): 96 | async with AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client: 97 | yield client 98 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # File: Dockerfile 2 | ARG BACKEND=cpu 3 | 4 | FROM python:3.12-slim-trixie as builder 5 | ENV PYTHONDONTWRITEBYTECODE=1 \ 6 | PYTHONUNBUFFERED=1 \ 7 | POETRY_NO_INTERACTION=1 \ 8 | POETRY_VIRTUALENVS_IN_PROJECT=true \ 9 | DEBIAN_FRONTEND=noninteractive 10 | 11 | WORKDIR /app 12 | 13 | RUN apt-get update -q && \ 14 | apt-get install -qy --no-install-recommends python3-pip make && \ 15 | pip install --no-cache-dir poetry && \ 16 | poetry self add poetry-plugin-export && \ 17 | rm -rf /var/lib/apt/lists/* 18 | 19 | COPY pyproject.toml README.md LICENSE ./ 20 | COPY ./src ./src 21 | COPY ./scripts ./scripts 22 | COPY Makefile ./ 23 | 24 | RUN case ${BACKEND} in \ 25 | cuda) poetry export -f requirements.txt --without-hashes --extras cuda -o requirements.txt ;; \ 26 | openvino) poetry export -f requirements.txt --without-hashes --extras openvino -o requirements.txt ;; \ 27 | *) poetry export -f requirements.txt --without-hashes -o requirements.txt ;; \ 28 | esac 29 | 30 | FROM builder as common 31 | WORKDIR /home/appuser/app 32 | COPY --from=builder /app/requirements.txt ./requirements.txt 33 | COPY --from=builder /app/pyproject.toml ./pyproject.toml 34 | COPY --from=builder /app/README.md ./README.md 35 | COPY --from=builder /app/LICENSE ./LICENSE 36 | COPY --from=builder /app/src ./src 37 | COPY --from=builder /app/scripts ./scripts 38 | COPY --from=builder /app/Makefile ./Makefile 39 | 40 | # --- Common final image base for CPU/OpenVINO --- 41 | FROM python:3.12-slim-trixie as common-final 42 | 43 | RUN apt-get update && apt-get install -y --no-install-recommends \ 44 | libglib2.0-0 libgl1 libsm6 libxext6 libxrender1 && \ 45 | rm -rf /var/lib/apt/lists/* 46 | 47 | RUN useradd --create-home --shell /bin/bash appuser && mkdir -p /home/appuser/app 48 | 49 | WORKDIR /home/appuser/app 50 | 51 | COPY --from=common /home/appuser/app /home/appuser/app 52 | 53 | RUN python -m venv /home/appuser/app/.venv && \ 54 | /home/appuser/app/.venv/bin/pip install --upgrade pip && \ 55 | /home/appuser/app/.venv/bin/pip install --no-deps --no-cache-dir -r requirements.txt && \ 56 | /home/appuser/app/.venv/bin/pip install --no-deps --no-cache-dir . && \ 57 | chown -R appuser:appuser /home/appuser/app 58 | 59 | USER appuser 60 | 61 | ENV PATH="/home/appuser/app/.venv/bin:$PATH" 62 | 63 | EXPOSE 8000 64 | 65 | ENTRYPOINT ["/bin/bash", "/home/appuser/app/scripts/docker_entrypoint.sh"] 66 | 67 | # --- CPU final image --- 68 | FROM common-final as cpu 69 | 70 | # --- OpenVINO final image --- 71 | FROM common-final as openvino 72 | 73 | # --- CUDA final image --- 74 | 75 | FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 as cuda 76 | 77 | RUN apt-get update && apt-get install -y --no-install-recommends \ 78 | python3.11 python3.11-venv python3-pip \ 79 | libglib2.0-0 libgl1 libsm6 libxext6 libxrender1 && \ 80 | rm -rf /var/lib/apt/lists/* 81 | 82 | RUN useradd --create-home --shell /bin/bash appuser && mkdir -p /home/appuser/app 83 | 84 | WORKDIR /home/appuser/app 85 | 86 | COPY --from=common /home/appuser/app /home/appuser/app 87 | 88 | RUN python3.11 -m venv /home/appuser/app/.venv && \ 89 | /home/appuser/app/.venv/bin/pip install --upgrade pip && \ 90 | /home/appuser/app/.venv/bin/pip install --no-deps --no-cache-dir -r requirements.txt && \ 91 | /home/appuser/app/.venv/bin/pip install --no-deps --no-cache-dir . && \ 92 | chown -R appuser:appuser /home/appuser/app 93 | 94 | USER appuser 95 | 96 | ENV PATH="/home/appuser/app/.venv/bin:$PATH" 97 | 98 | EXPOSE 8000 99 | 100 | ENTRYPOINT ["/bin/bash", "/home/appuser/app/scripts/docker_entrypoint.sh"] 101 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Omni-LPR Examples 2 | 3 | This directory contains examples of how to use the Omni-LPR server via the REST and MCP interfaces. 4 | 5 | ### Prerequisites 6 | 7 | Before running the examples, make sure the Omni-LPR server is running. 8 | 9 | If you have installed the package via `pip`, you can start the server with: 10 | 11 | ```bash 12 | omni-lpr 13 | ``` 14 | 15 | If you are running from a development environment, you can use Poetry: 16 | 17 | ```bash 18 | poetry run omni-lpr 19 | ``` 20 | 21 | The server will be available at `http://127.0.0.1:8000` by default. 22 | 23 | ### Running the Examples 24 | 25 | The example scripts are designed to be run from the root of the repository. Each script accepts command-line arguments 26 | to specify parameters like the image path. 27 | 28 | For example, to run the REST API example for recognizing a plate from a file path: 29 | 30 | ```bash 31 | # Using a pip installation 32 | python examples/rest/recognize_plate_from_path_example.py --image-path /path/to/your/image.png 33 | 34 | # Or from a development environment 35 | poetry run python examples/rest/recognize_plate_from_path_example.py --image-path /path/to/your/image.png 36 | ``` 37 | 38 | To see all available options for an example, use the `--help` flag: 39 | 40 | ```bash 41 | python examples/rest/recognize_plate_from_path_example.py --help 42 | ``` 43 | 44 | For convenience, you can also use the `make` commands from the root of the repository to run all examples for a specific 45 | API: 46 | 47 | - **Run all REST API examples:** `make example-rest` 48 | - **Run all MCP examples:** `make example-mcp` 49 | 50 | ### Example Files 51 | 52 | | # | File | Description | 53 | |----|--------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| 54 | | 1 | [`rest/recognize_plate_example.py`](rest/recognize_plate_example.py) | REST example for `recognize_plate` (base64 image). | 55 | | 2 | [`mcp/recognize_plate_example.py`](mcp/recognize_plate_example.py) | MCP example for `recognize_plate` (base64 image). | 56 | | 3 | [`rest/recognize_plate_from_path_example.py`](rest/recognize_plate_from_path_example.py) | REST example for `recognize_plate_from_path`. | 57 | | 4 | [`mcp/recognize_plate_from_path_example.py`](mcp/recognize_plate_from_path_example.py) | MCP example for `recognize_plate_from_path`. | 58 | | 5 | [`rest/detect_and_recognize_plate_from_path_example.py`](rest/detect_and_recognize_plate_from_path_example.py) | REST example for `detect_and_recognize_plate_from_path`. | 59 | | 6 | [`mcp/detect_and_recognize_plate_from_path_example.py`](mcp/detect_and_recognize_plate_from_path_example.py) | MCP example for `detect_and_recognize_plate_from_path`. | 60 | | 7 | [`rest/detect_and_recognize_plate_example.py`](rest/detect_and_recognize_plate_example.py) | REST example for `detect_and_recognize_plate` (base64 image). | 61 | | 8 | [`rest/list_models_example.py`](rest/list_models_example.py) | REST example for `list_models`. | 62 | | 9 | [`mcp/list_models_example.py`](mcp/list_models_example.py) | MCP example for `list_models`. | 63 | | 10 | [`rest/recognize_plate_from_upload_example.py`](rest/recognize_plate_from_upload_example.py) | REST example for `recognize_plate` (file upload). | 64 | | 11 | [`rest/detect_and_recognize_plate_from_upload_example.py`](rest/detect_and_recognize_plate_from_upload_example.py) | REST example for `detect_and_recognize_plate` (file upload). | 65 | | 12 | [`rest/health_check_example.py`](rest/health_check_example.py) | Example for checking the server's health status. | 66 | -------------------------------------------------------------------------------- /tests/test_server.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_health_check(test_app_client): 8 | """Test the health check endpoint.""" 9 | response = await test_app_client.get("/api/health") 10 | assert response.status_code == 200 11 | response_json = response.json() 12 | assert response_json["status"] == "ok" 13 | assert "version" in response_json 14 | 15 | 16 | @pytest.mark.asyncio 17 | async def test_list_tools_with_no_tools(no_tools_test_app_client): 18 | """Test the GET /tools endpoint when no tools are registered.""" 19 | response = await no_tools_test_app_client.get("/api/v1/tools") 20 | assert response.status_code == 200 21 | response_json = response.json() 22 | assert response_json == {"tools": []} 23 | 24 | 25 | @pytest.mark.asyncio 26 | async def test_list_tools_endpoint(test_app_client): 27 | """Test the GET /tools endpoint.""" 28 | response = await test_app_client.get("/api/v1/tools") 29 | assert response.status_code == 200 30 | response_json = response.json() 31 | assert "tools" in response_json 32 | assert isinstance(response_json["tools"], list) 33 | # Check for a known tool 34 | detect_tool = next( 35 | (t for t in response_json["tools"] if t["name"] == "detect_and_recognize_plate"), 36 | None, 37 | ) 38 | assert detect_tool is not None 39 | assert detect_tool["title"] == "Detect and Recognize License Plate" 40 | assert "inputSchema" in detect_tool 41 | assert "image_base64" in detect_tool["inputSchema"]["properties"] 42 | 43 | 44 | @pytest.mark.asyncio 45 | async def test_tool_invocation_endpoint(test_app_client): 46 | """Test the POST /tools/{tool_name}/invoke endpoint.""" 47 | response = await test_app_client.post("/api/v1/tools/list_models/invoke", json={}) 48 | assert response.status_code == 200 49 | response_json = response.json() 50 | assert "content" in response_json 51 | assert isinstance(response_json["content"], list) 52 | assert response_json["content"][0]["type"] == "json" 53 | data = response_json["content"][0]["data"] 54 | assert "detector_models" in data 55 | assert "ocr_models" in data 56 | 57 | 58 | @pytest.mark.asyncio 59 | async def test_tool_invocation_with_empty_body(test_app_client): 60 | """Test invoking a tool with an empty request body.""" 61 | response = await test_app_client.post( 62 | "/api/v1/tools/list_models/invoke", content="", headers={"Content-Length": "0"} 63 | ) 64 | assert response.status_code == 200 65 | response_json = response.json() 66 | assert "content" in response_json 67 | data = response_json["content"][0]["data"] 68 | assert "detector_models" in data 69 | 70 | 71 | @pytest.mark.asyncio 72 | async def test_tool_invocation_with_multipart_form_data(test_app_client, test_data_path, mocker): 73 | """Test invoking a tool with a multipart/form-data request (file upload).""" 74 | # Mock the actual model loading and processing 75 | mocker.patch("anyio.to_thread.run_sync", return_value=["MOCKED-RESULT"]) 76 | mocker.patch("omni_lpr.tools._get_image_from_source", new_callable=AsyncMock) 77 | mocker.patch("omni_lpr.tools._get_ocr_recognizer") 78 | 79 | image_path = test_data_path / "dummy_image.png" 80 | image_bytes = image_path.read_bytes() 81 | 82 | files = {"image": ("dummy_image.png", image_bytes, "image/png")} 83 | data = {"ocr_model": "cct-s-v1-global-model"} 84 | response = await test_app_client.post( 85 | "/api/v1/tools/recognize_plate/invoke", files=files, data=data 86 | ) 87 | 88 | assert response.status_code == 200, f"Request failed: {response.text}" 89 | response_json = response.json() 90 | assert "content" in response_json 91 | assert response_json["content"][0]["type"] == "json" 92 | result_data = response_json["content"][0]["data"] 93 | assert result_data == ["MOCKED-RESULT"] 94 | 95 | 96 | @pytest.mark.asyncio 97 | async def test_swagger_docs_available(test_app_client): 98 | """Ensure the Swagger UI endpoint stays reachable under /api/v1/apidoc/swagger.""" 99 | response = await test_app_client.get("/api/v1/apidoc/swagger") 100 | assert response.status_code == 200 101 | assert response.headers["content-type"].startswith("text/html") 102 | assert "Swagger UI" in response.text 103 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "omni-lpr" 3 | version = "0.3.4" 4 | description = "A multi-interface (REST and MCP) server for automatic license plate recognition" 5 | readme = "README.md" 6 | license = { text = "MIT" } 7 | authors = [ 8 | { name = "Hassan Abedi", email = "hassan.abedi.t+omnilpr@gmail.com" } 9 | ] 10 | maintainers = [ 11 | { name = "Hassan Abedi", email = "hassan.abedi.t+omnilpr@gmail.com" } 12 | ] 13 | 14 | keywords = [ 15 | "alpr", 16 | "license plate recognition", 17 | "automatic license plate recognition", 18 | "computer vision", 19 | "rest-api", 20 | "mcp", 21 | "microservice", 22 | "onnx", 23 | "cuda", 24 | ] 25 | 26 | classifiers = [ 27 | "Development Status :: 4 - Beta", 28 | "Intended Audience :: Developers", 29 | "License :: OSI Approved :: MIT License", 30 | "Operating System :: OS Independent", 31 | "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", 32 | "Topic :: Scientific/Engineering :: Image Recognition", 33 | "Topic :: Software Development :: Libraries :: Python Modules", 34 | "Topic :: Utilities", 35 | "Programming Language :: Python :: 3", 36 | "Programming Language :: Python :: 3.10", 37 | "Programming Language :: Python :: 3.11", 38 | "Programming Language :: Python :: 3.12", 39 | "Programming Language :: Python :: 3.13", 40 | ] 41 | 42 | requires-python = ">=3.10,<4.0" 43 | dependencies = [ 44 | "python-dotenv (>=1.1.0,<2.0.0)", 45 | "mcp[cli] (>=1.12.3,<2.0.0)", 46 | "pydantic-settings (>=2.10.1,<3.0.0)", 47 | "click (>=8.2.1,<9.0.0)", 48 | "pillow (>=11.3.0,<12.0.0)", 49 | "gunicorn (>=23.0.0,<24.0.0)", 50 | "python-json-logger (>=3.3.0,<4.0.0)", 51 | "httpx (>=0.28.1,<0.29.0)", 52 | "fast-alpr[onnx] (>=0.3.0,<0.4.0)", 53 | "pydantic (>=2.11.7,<3.0.0)", 54 | "spectree[starlette] (>=1.5.4,<2.0.0)", 55 | "async-lru (>=2.0.4,<3.0.0)", 56 | "opencv-python (>=4.12.0.88,<5.0.0.0)", 57 | ] 58 | 59 | [project.optional-dependencies] 60 | openvino = [ 61 | "fast-alpr[onnx-openvino] (>=0.3.0,<0.4.0)", 62 | ] 63 | cuda = [ 64 | "fast-alpr[onnx-gpu] (>=0.3.0,<0.4.0)", 65 | ] 66 | dev = [ 67 | "pytest (>=8.0.1,<9.0.0)", 68 | "pytest-cov (>=6.0.0,<7.0.0)", 69 | "pytest-mock (>=3.14.0,<4.0.0)", 70 | "pytest-asyncio (>=1.1.0,<2.0.0)", 71 | "mypy (>=1.11.1,<2.0.0)", 72 | "ruff (>=0.9.3,<1.0.0)", 73 | "pre-commit (>=4.2.0,<5.0.0)", 74 | "asgi-lifespan[dev] (>=2.1.0,<3.0.0)", 75 | ] 76 | 77 | [project.urls] 78 | Repository = "https://github.com/habedi/omni-lpr" 79 | Documentation = "https://github.com/habedi/omni-lpr/blob/main/README.md" 80 | 81 | [project.scripts] 82 | omni-lpr = "omni_lpr.__main__:main" 83 | 84 | [tool.poetry] 85 | include = ["README.md"] 86 | packages = [{ include = "omni_lpr", from = "src" }] 87 | package-mode = true 88 | 89 | [build-system] 90 | requires = ["poetry-core"] 91 | build-backend = "poetry.core.masonry.api" 92 | 93 | [tool.pytest.ini_options] 94 | pythonpath = ["src"] 95 | testpaths = ["tests"] 96 | markers = [ 97 | "e2e: marks tests as end-to-end tests", 98 | ] 99 | addopts = [ 100 | "--tb=short", 101 | #"--disable-warnings", 102 | "--cov=src", 103 | "--cov-branch", 104 | "--cov-report=term", 105 | "--cov-report=xml", 106 | "-rs" 107 | ] 108 | asyncio_mode = "auto" 109 | asyncio_default_fixture_loop_scope = "function" 110 | asyncio_default_test_loop_scope = "function" 111 | 112 | [tool.coverage.run] 113 | branch = true 114 | parallel = true 115 | source = ["."] 116 | omit = ["tests/*"] 117 | 118 | [tool.coverage.report] 119 | show_missing = false 120 | skip_empty = true 121 | precision = 2 122 | 123 | [tool.mypy] 124 | python_version = "3.10" 125 | ignore_missing_imports = true 126 | disallow_untyped_defs = true 127 | disallow_untyped_calls = true 128 | disallow_incomplete_defs = true 129 | check_untyped_defs = true 130 | warn_return_any = true 131 | strict_optional = true 132 | warn_redundant_casts = true 133 | exclude = "^(examples/|scripts/|tests/)" 134 | 135 | [tool.ruff] 136 | exclude = [ 137 | ".bzr", ".direnv", ".eggs", ".git", ".git-rewrite", ".hg", ".mypy_cache", 138 | ".nox", ".pants.d", ".pytype", ".ruff_cache", ".svn", ".tox", ".venv", 139 | "__pypackages__", "_build", "buck-out", "build", "dist", "node_modules", 140 | "venv", "tests" 141 | ] 142 | line-length = 100 143 | indent-width = 4 144 | src = ["src"] 145 | target-version = "py310" 146 | unsafe-fixes = false 147 | 148 | [tool.ruff.lint] 149 | select = ["ANN", "E", "F", "I", "W", "B", "RUF", "SIM", "C90"] 150 | ignore = ["D100", "D101", "D102", "D103", "D104", "D105", "D106", "D107"] 151 | fixable = ["ALL"] 152 | unfixable = [] 153 | dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" 154 | 155 | [tool.ruff.format] 156 | quote-style = "double" 157 | indent-style = "space" 158 | skip-magic-trailing-comma = false 159 | line-ending = "auto" 160 | 161 | [tool.ruff.lint.pydocstyle] 162 | convention = "google" 163 | 164 | [tool.ruff.lint.per-file-ignores] 165 | "tests/**/*.py" = [] 166 | -------------------------------------------------------------------------------- /src/omni_lpr/rest.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import logging 4 | 5 | from pydantic import BaseModel, ValidationError 6 | from spectree import Response, SpecTree 7 | from starlette.requests import Request 8 | from starlette.responses import JSONResponse 9 | from starlette.routing import Route 10 | 11 | from .api_models import ( 12 | ErrorResponse, 13 | JsonContentBlock, 14 | ToolListResponse, 15 | ToolResponse, 16 | ) 17 | from .settings import settings 18 | from .tools import tool_registry 19 | 20 | # Initialize logger 21 | _logger = logging.getLogger(__name__) 22 | 23 | # Initialize Spectree for API documentation generation 24 | api_spec = SpecTree( 25 | "starlette", 26 | title="Omni-LPR REST API", 27 | description="A multi-interface server for automatic license plate recognition.", 28 | version=settings.pkg_version, 29 | mode="strict", 30 | # Fix: Make doc paths relative to the sub-app's root 31 | swagger_url="/docs", 32 | redoc_url="/redoc", 33 | naming_strategy=lambda model: model.__name__, 34 | servers=[{"url": "/api/v1"}], 35 | ) 36 | 37 | 38 | @api_spec.validate(resp=Response(HTTP_200=ToolListResponse), tags=["Tool Listing"]) 39 | async def list_tools(request: Request) -> JSONResponse: 40 | """ 41 | Lists all available tools. 42 | """ 43 | tools = tool_registry.list() 44 | tool_dicts = [dict(t) for t in tools] 45 | response_data = ToolListResponse(tools=tool_dicts) 46 | return JSONResponse(response_data.model_dump()) 47 | 48 | 49 | async def _parse_tool_arguments(request: Request, model: BaseModel) -> BaseModel: 50 | """ 51 | Parses and validates tool arguments from an incoming request. 52 | """ 53 | content_type = request.headers.get("content-type", "") 54 | 55 | if "application/json" in content_type: 56 | _logger.debug("Processing 'application/json' request.") 57 | body = await request.body() 58 | json_data = json.loads(body) if body else {} 59 | return model(**json_data) 60 | 61 | if "multipart/form-data" in content_type: 62 | _logger.debug("Processing 'multipart/form-data' request.") 63 | form = await request.form() 64 | image_upload = form.get("image") 65 | if not image_upload: 66 | raise ValueError("Missing 'image' part in multipart form.") 67 | 68 | image_bytes = await image_upload.read() 69 | image_base64 = base64.b64encode(image_bytes).decode("utf-8") 70 | 71 | params = {k: v for k, v in form.items() if k != "image"} 72 | params["image_base64"] = image_base64 73 | return model(**params) 74 | 75 | if model.model_fields: 76 | _logger.warning(f"Unsupported Content-Type: {content_type}") 77 | raise ValueError("Unsupported Content-Type. Use application/json or multipart/form-data.") 78 | else: 79 | return model() 80 | 81 | 82 | @api_spec.validate( 83 | resp=Response( 84 | HTTP_200=ToolResponse, 85 | HTTP_400=ErrorResponse, 86 | HTTP_404=ErrorResponse, 87 | HTTP_500=ErrorResponse, 88 | ), 89 | tags=["Tool Invocation"], 90 | ) 91 | async def invoke_tool(request: Request) -> JSONResponse: 92 | """ 93 | Handles the execution of a specific tool identified by its name. 94 | """ 95 | tool_name = request.path_params["tool_name"] 96 | _logger.info(f"REST endpoint 'invoke_tool' called for tool: '{tool_name}'") 97 | 98 | if tool_name not in tool_registry._tools: 99 | error = ErrorResponse( 100 | error={"code": "NOT_FOUND", "message": f"Tool '{tool_name}' not found."} 101 | ) 102 | return JSONResponse(error.model_dump(), status_code=404) 103 | 104 | input_model = tool_registry._tool_models.get(tool_name, BaseModel) 105 | 106 | try: 107 | validated_args = await _parse_tool_arguments(request, input_model) 108 | mcp_content_blocks = await tool_registry.call_validated(tool_name, validated_args) 109 | api_content_blocks = [ 110 | JsonContentBlock(data=json.loads(block.text)) for block in mcp_content_blocks 111 | ] 112 | response_data = ToolResponse(content=api_content_blocks) 113 | return JSONResponse(response_data.model_dump()) 114 | 115 | except ValidationError as e: 116 | error = ErrorResponse( 117 | error={ 118 | "code": "VALIDATION_ERROR", 119 | "message": "Input validation failed.", 120 | "details": e.errors(), 121 | } 122 | ) 123 | return JSONResponse(error.model_dump(), status_code=400) 124 | except (json.JSONDecodeError, ValueError) as e: 125 | error = ErrorResponse(error={"code": "BAD_REQUEST", "message": str(e)}) 126 | return JSONResponse(error.model_dump(), status_code=400) 127 | except Exception as e: 128 | _logger.error(f"An unexpected error occurred in tool '{tool_name}': {e}", exc_info=True) 129 | error = ErrorResponse( 130 | error={"code": "INTERNAL_SERVER_ERROR", "message": "An internal server error occurred."} 131 | ) 132 | return JSONResponse(error.model_dump(), status_code=500) 133 | 134 | 135 | def setup_rest_routes() -> list[Route]: 136 | """ 137 | Creates and decorates all REST API routes. 138 | """ 139 | routes = [ 140 | Route("/tools", endpoint=list_tools, methods=["GET"]), 141 | Route("/tools/{tool_name}/invoke", endpoint=invoke_tool, methods=["POST"]), 142 | ] 143 | return routes 144 | -------------------------------------------------------------------------------- /logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 18 | 38 | 39 | 40 | 46 | 50 | 52 | 56 | 60 | 64 | 68 | 72 | 76 | 80 | 81 | 84 | 88 | 92 | 93 | 96 | 100 | 104 | 105 | 106 | -------------------------------------------------------------------------------- /src/omni_lpr/__main__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from contextlib import asynccontextmanager 3 | 4 | import click 5 | from mcp.server.streamable_http_manager import StreamableHTTPSessionManager 6 | from pythonjsonlogger import jsonlogger 7 | from starlette.applications import Starlette 8 | from starlette.middleware.cors import CORSMiddleware 9 | from starlette.responses import JSONResponse 10 | from starlette.routing import Mount, Route 11 | from starlette.types import Receive, Scope, Send 12 | 13 | from .event_store import InMemoryEventStore 14 | from .mcp import app as mcp_app 15 | from .settings import settings 16 | from .tools import setup_cache, setup_tools 17 | 18 | _logger = logging.getLogger(__name__) 19 | 20 | 21 | def setup_logging(log_level: str): 22 | level = logging.getLevelName(log_level.upper()) 23 | logHandler = logging.StreamHandler() 24 | formatter = jsonlogger.JsonFormatter("%(asctime)s %(name)s %(levelname)s %(message)s") 25 | logHandler.setFormatter(formatter) 26 | logging.basicConfig(level=level, handlers=[logHandler]) 27 | _logger.info(f"Logging configured with level: {log_level.upper()}") 28 | 29 | 30 | async def health_check(_request): 31 | """Health check endpoint.""" 32 | _logger.debug("Health check requested.") 33 | return JSONResponse({"status": "ok", "version": settings.pkg_version}) 34 | 35 | 36 | # --- Setup Streamable HTTP Manager for the main app --- 37 | event_store = InMemoryEventStore() 38 | session_manager = StreamableHTTPSessionManager(app=mcp_app, event_store=event_store) 39 | 40 | 41 | async def handle_streamable_http(scope: Scope, receive: Receive, send: Send) -> None: 42 | """ASGI handler for streamable HTTP connections.""" 43 | await session_manager.handle_request(scope, receive, send) 44 | 45 | 46 | @asynccontextmanager 47 | async def lifespan(app: Starlette): 48 | """Context manager for managing the session manager lifecycle.""" 49 | async with session_manager.run(): 50 | _logger.info("Application started with StreamableHTTP session manager.") 51 | try: 52 | yield 53 | finally: 54 | _logger.info("Application shutting down...") 55 | 56 | 57 | # Create main app with lifespan manager 58 | starlette_app = Starlette(debug=True, lifespan=lifespan) 59 | 60 | 61 | def setup_app_routes(main_app: Starlette): 62 | """Adds routes to the Starlette application.""" 63 | from .rest import api_spec, setup_rest_routes 64 | 65 | # 1. Create a separate sub-application for the documented v1 API 66 | api_v1_app = Starlette() 67 | api_v1_app.router.routes.extend(setup_rest_routes()) 68 | 69 | # 2. Register spectree only on the sub-application 70 | api_spec.register(api_v1_app) 71 | 72 | # 3. Define other routes for the main application 73 | health_route = Route("/api/health", endpoint=health_check, methods=["GET"]) 74 | 75 | # 4. Mount the sub-app and add other routes to the main app 76 | main_app.routes.extend( 77 | [ 78 | Mount("/mcp/", app=handle_streamable_http), 79 | health_route, 80 | Mount("/api/v1", app=api_v1_app), 81 | ] 82 | ) 83 | 84 | 85 | # Run setup logic at import time 86 | setup_tools() 87 | setup_app_routes(starlette_app) 88 | 89 | # Wrap the final app with CORS middleware 90 | starlette_app = CORSMiddleware( 91 | starlette_app, 92 | allow_origins=["*"], 93 | allow_methods=["GET", "POST", "DELETE"], 94 | expose_headers=["Mcp-Session-Id"], 95 | ) 96 | 97 | 98 | @click.command() 99 | @click.option("--host", default=None, help="The host to bind to.", envvar="HOST") 100 | @click.option("--port", default=None, type=int, help="The port to bind to.", envvar="PORT") 101 | @click.option("--log-level", default=None, help="The log level to use.", envvar="LOG_LEVEL") 102 | # ... (the rest of the file is unchanged) ... 103 | @click.option( 104 | "--default-ocr-model", 105 | default=None, 106 | help="The default OCR model to use.", 107 | envvar="DEFAULT_OCR_MODEL", 108 | ) 109 | @click.option( 110 | "--default-detector-model", 111 | default=None, 112 | help="The default detector model to use.", 113 | envvar="DEFAULT_DETECTOR_MODEL", 114 | ) 115 | @click.option( 116 | "--max-image-size-mb", 117 | default=None, 118 | type=int, 119 | help="The maximum image size in megabytes.", 120 | envvar="MAX_IMAGE_SIZE_MB", 121 | ) 122 | @click.option( 123 | "--model-cache-size", 124 | default=None, 125 | type=int, 126 | help="The number of models to keep in the cache.", 127 | envvar="MODEL_CACHE_SIZE", 128 | ) 129 | def main( 130 | host: str | None, 131 | port: int | None, 132 | log_level: str | None, 133 | default_ocr_model: str | None, 134 | default_detector_model: str | None, 135 | max_image_size_mb: int | None, 136 | model_cache_size: int | None, 137 | ) -> int: 138 | """Main entrypoint for the omni-lpr server.""" 139 | import uvicorn 140 | 141 | # Override settings from CLI if provided 142 | if host: 143 | settings.host = host 144 | if port: 145 | settings.port = port 146 | if log_level: 147 | settings.log_level = log_level 148 | if default_ocr_model: 149 | settings.default_ocr_model = default_ocr_model 150 | if default_detector_model: 151 | settings.default_detector_model = default_detector_model 152 | if max_image_size_mb: 153 | settings.max_image_size_mb = max_image_size_mb 154 | if model_cache_size: 155 | settings.model_cache_size = model_cache_size 156 | 157 | setup_logging(settings.log_level) 158 | _logger.info("Setting up cache...") 159 | setup_cache() 160 | 161 | _logger.info(f"Starting Streamable HTTP server on {settings.host}:{settings.port}") 162 | uvicorn.run(starlette_app, host=settings.host, port=settings.port) 163 | return 0 164 | 165 | 166 | if __name__ == "__main__": 167 | main() 168 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # ============================================================================== 2 | # VARIABLES 3 | # ============================================================================== 4 | PYTHON ?= python3 5 | PIP ?= pip3 6 | DEP_MNGR ?= poetry 7 | DOCS_DIR ?= docs 8 | DOCKERFILE ?= Dockerfile 9 | GUNICORN_NUM_WORKERS ?= 4 10 | 11 | # Server configuration (can be overridden by environment variables) 12 | PORT ?= 8000 13 | HOST ?= 0.0.0.0 14 | 15 | # Directories and files to clean 16 | CACHE_DIRS = .mypy_cache .pytest_cache .ruff_cache 17 | COVERAGE = .coverage htmlcov coverage.xml 18 | DIST_DIRS = dist junit 19 | TMP_DIRS = site 20 | 21 | .DEFAULT_GOAL := help 22 | 23 | # ============================================================================== 24 | # HELP 25 | # ============================================================================== 26 | .PHONY: help 27 | help: ## Show the help messages for all targets 28 | @echo "Usage: make " 29 | @echo "" 30 | @echo "Targets:" 31 | @grep -E '^[a-zA-Z_-]+:.*## .*$$' Makefile | \ 32 | awk 'BEGIN {FS = ":.*## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}' 33 | 34 | # ============================================================================== 35 | # SETUP & INSTALLATION 36 | # ============================================================================== 37 | .PHONY: setup 38 | setup: ## Install system dependencies and dependency manager (e.g., Poetry) 39 | sudo apt-get update 40 | sudo apt-get install -y python3-pip docker.io 41 | $(PIP) install --upgrade pip 42 | $(PIP) install $(DEP_MNGR) 43 | 44 | .PHONY: install 45 | install: ## Install Python dependencies 46 | $(DEP_MNGR) install --extras dev --no-interaction 47 | 48 | # ============================================================================== 49 | # QUALITY & TESTING 50 | # ============================================================================== 51 | .PHONY: test 52 | test: ## Run tests 53 | $(DEP_MNGR) run pytest 54 | 55 | .PHONY: lint 56 | lint: ## Run linter checks 57 | $(DEP_MNGR) run ruff check --fix 58 | 59 | .PHONY: format 60 | format: ## Format code 61 | $(DEP_MNGR) run ruff format 62 | 63 | .PHONY: typecheck 64 | typecheck: ## Typecheck code 65 | $(DEP_MNGR) run mypy . 66 | 67 | .PHONY: setup-hooks 68 | setup-hooks: ## Install Git hooks (pre-commit and pre-push) 69 | $(DEP_MNGR) run pre-commit install --hook-type pre-commit 70 | $(DEP_MNGR) run pre-commit install --hook-type pre-push 71 | $(DEP_MNGR) run pre-commit install-hooks 72 | 73 | .PHONY: test-hooks 74 | test-hooks: ## Test Git hooks on all files 75 | $(DEP_MNGR) run pre-commit run --all-files 76 | 77 | # ============================================================================== 78 | # APPLICATION 79 | # ============================================================================== 80 | .PHONY: run 81 | run: ## Start the server 82 | @echo "Starting the server..." 83 | $(DEP_MNGR) run omni-lpr --host $(HOST) --port $(PORT) --log-level DEBUG 84 | 85 | .PHONY: run-gunicorn 86 | run-gunicorn: ## Start the server with Gunicorn 87 | @echo "Starting the Omni-LPR server with Gunicorn..." 88 | $(DEP_MNGR) run gunicorn -w $(GUNICORN_NUM_WORKERS) -k uvicorn.workers.UvicornWorker omni_lpr:starlette_app 89 | 90 | # ============================================================================== 91 | # BUILD & PUBLISH 92 | # ============================================================================== 93 | .PHONY: build 94 | build: ## Build distributions 95 | $(DEP_MNGR) build 96 | 97 | .PHONY: publish 98 | publish: ## Publish to PyPI (requires PYPI_TOKEN) 99 | $(DEP_MNGR) config pypi-token.pypi $(PYPI_TOKEN) 100 | $(DEP_MNGR) publish --build 101 | 102 | # ============================================================================== 103 | # EXAMPLES 104 | # ============================================================================== 105 | .PHONY: example-rest example-mcp 106 | 107 | SERVER_PID := /tmp/omni-lpr-server.pid 108 | 109 | # Define the lists of example files 110 | REST_EXAMPLES := $(wildcard examples/rest/*.py) 111 | MCP_EXAMPLES := $(wildcard examples/mcp/*.py) 112 | 113 | define run_examples 114 | @echo "Starting server in background..." 115 | $(DEP_MNGR) run omni-lpr > /dev/null 2>&1 & echo $$! > $(SERVER_PID) 116 | @echo "Waiting for server to start..." 117 | @while ! nc -z 127.0.0.1 8000; do sleep 1; done 118 | @echo "Server started. Running $(1) examples..." 119 | @for example in $(2); do \ 120 | echo "\n--- Running $$example ---"; \ 121 | $(DEP_MNGR) run python $$example; \ 122 | done 123 | @echo "\nStopping server..." 124 | @kill `cat $(SERVER_PID)` 125 | endef 126 | 127 | example-rest: ## Run all REST API examples 128 | $(call run_examples,"REST",$(REST_EXAMPLES)) 129 | 130 | example-mcp: ## Run all MCP API examples 131 | $(call run_examples,"MCP",$(MCP_EXAMPLES)) 132 | 133 | # ============================================================================== 134 | # DOCKER 135 | # ============================================================================== 136 | IMAGE_NAME ?= omni-lpr 137 | 138 | .PHONY: docker-build-cpu 139 | docker-build-cpu: ## Build the Docker image for CPU 140 | docker build -t $(IMAGE_NAME):cpu --build-arg BACKEND=cpu --target cpu -f Dockerfile . 141 | 142 | .PHONY: docker-build-cuda 143 | docker-build-cuda: ## Build the Docker image for CUDA 144 | docker build -t $(IMAGE_NAME):cuda --build-arg BACKEND=cuda --target cuda -f Dockerfile . 145 | 146 | .PHONY: docker-build-openvino 147 | docker-build-openvino: ## Build the Docker image for OpenVINO 148 | docker build -t $(IMAGE_NAME):openvino --build-arg BACKEND=openvino --target openvino -f Dockerfile . 149 | 150 | .PHONY: docker-build 151 | docker-build: docker-build-cpu ## Build the default Docker image (CPU) 152 | 153 | .PHONY: docker-run-cpu 154 | docker-run-cpu: ## Run the CPU Docker container 155 | docker run --rm -it -p $(PORT):$(PORT) $(IMAGE_NAME):cpu 156 | 157 | .PHONY: docker-run-cuda 158 | docker-run-cuda: ## Run the CUDA Docker container 159 | docker run --rm -it --gpus all -p $(PORT):$(PORT) -e EXECUTION_DEVICE=cuda $(IMAGE_NAME):cuda 160 | 161 | .PHONY: docker-run-openvino 162 | docker-run-openvino: ## Run the OpenVINO Docker container 163 | docker run --rm -it -p $(PORT):$(PORT) -e EXECUTION_DEVICE=openvino $(IMAGE_NAME):openvino 164 | 165 | .PHONY: docker-run 166 | docker-run: docker-run-cpu ## Run the default Docker container (CPU) 167 | 168 | # ============================================================================== 169 | # MAINTENANCE 170 | # ============================================================================== 171 | .PHONY: clean 172 | clean: ## Remove caches and build artifacts 173 | find . -type f -name '*.pyc' -delete 174 | find . -type d -name '__pycache__' -exec rm -rf {} + 175 | rm -rf $(CACHE_DIRS) $(COVERAGE) $(DIST_DIRS) $(TMP_DIRS) 176 | 177 | .PHONY: docker-prune 178 | docker-prune: ## Remove dangling (untagged) Docker images 179 | @echo "Removing dangling Docker images..." 180 | docker image prune -f 181 | -------------------------------------------------------------------------------- /docs/assets/images/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 25 | 28 | 31 | 33 | 34 | 35 | 38 | 41 | 44 | 47 | 50 | 53 | 56 | 59 | 62 | 65 | 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | Omni-LPR Logo 4 | 5 |
6 | 7 |

Omni-LPR

8 | 9 | [![Tests](https://img.shields.io/github/actions/workflow/status/habedi/omni-lpr/tests.yml?label=tests&style=flat&labelColor=333333&logo=github&logoColor=white)](https://github.com/habedi/omni-lpr/actions/workflows/tests.yml) 10 | [![Code Coverage](https://img.shields.io/codecov/c/github/habedi/omni-lpr?style=flat&label=coverage&labelColor=333333&logo=codecov&logoColor=white)](https://codecov.io/gh/habedi/omni-lpr) 11 | [![Code Quality](https://img.shields.io/codefactor/grade/github/habedi/omni-lpr?style=flat&label=code%20quality&labelColor=333333&logo=codefactor&logoColor=white)](https://www.codefactor.io/repository/github/habedi/omni-lpr) 12 | [![Python Version](https://img.shields.io/badge/python-%3E=3.10-3776ab?style=flat&labelColor=333333&logo=python&logoColor=white)](https://github.com/habedi/omni-lpr) 13 | [![PyPI](https://img.shields.io/pypi/v/omni-lpr?style=flat&labelColor=333333&logo=pypi&logoColor=white)](https://pypi.org/project/omni-lpr/) 14 | [![License](https://img.shields.io/badge/license-MIT-00acc1?style=flat&labelColor=333333&logo=open-source-initiative&logoColor=white)](https://github.com/habedi/omni-lpr/blob/main/LICENSE) 15 |
16 | [![Documentation](https://img.shields.io/badge/docs-read-8ca0d7?style=flat&labelColor=282c34)](https://github.com/habedi/omni-lpr/tree/main/docs) 17 | [![Examples](https://img.shields.io/badge/examples-view-green?style=flat&labelColor=282c34)](https://github.com/habedi/omni-lpr/tree/main/examples) 18 | [![Docker Image (CPU)](https://img.shields.io/badge/Docker-CPU-007ec6?style=flat&logo=docker)](https://github.com/habedi/omni-lpr/pkgs/container/omni-lpr-cpu) 19 | [![Docker Image (OpenVINO)](https://img.shields.io/badge/Docker-OpenVINO-007ec6?style=flat&logo=docker)](https://github.com/habedi/omni-lpr/pkgs/container/omni-lpr-openvino) 20 | [![Docker Image (CUDA)](https://img.shields.io/badge/Docker-CUDA-007ec6?style=flat&logo=docker)](https://github.com/habedi/omni-lpr/pkgs/container/omni-lpr-cuda) 21 | 22 | A multi-interface (REST and MCP) server for automatic license plate recognition 23 | 24 |
25 | 26 | --- 27 | 28 | Omni-LPR is a self-hostable server that provides automatic license plate recognition (ALPR) capabilities via a REST API 29 | and the Model Context Protocol (MCP). It can be used both as a standalone ALPR microservice and as an ALPR toolbox for 30 | AI agents and large language models (LLMs). 31 | 32 | ### Why Omni-LPR? 33 | 34 | Using Omni-LPR can have the following benefits: 35 | 36 | - **Decoupling.** Your main application can be in any programming language. It doesn't need to be tangled up with Python 37 | or specific ML dependencies because the server handles all of that. 38 | 39 | - **Multiple Interfaces.** You aren't locked into one way of communicating. You can use a standard REST API from any 40 | app, or you can use MCP, which is designed for AI agent integration. 41 | 42 | - **Ready-to-Deploy.** You don't have to build it from scratch. There are pre-built Docker images that are easy to 43 | deploy and start using immediately. 44 | 45 | - **Hardware Acceleration.** The server is optimized for the hardware you have. It supports generic CPUs (ONNX), Intel 46 | CPUs (OpenVINO), and NVIDIA GPUs (CUDA). 47 | 48 | - **Asynchronous I/O.** It's built on Starlette, which means it has high-performance, non-blocking I/O. It can handle 49 | many concurrent requests without getting bogged down. 50 | 51 | - **Scalability.** Because it's a separate service, it can be scaled independently of your main application. If you 52 | suddenly need more ALPR power, you can scale Omni-LPR up without touching anything else. 53 | 54 | 55 | See the [ROADMAP.md](ROADMAP.md) for the list of implemented and planned features. 56 | 57 | > [!IMPORTANT] 58 | > Omni-LPR is in early development, so bugs and breaking API changes are expected. 59 | > Please use the [issues page](https://github.com/habedi/omni-lpr/issues) to report bugs or request features. 60 | 61 | --- 62 | 63 | ### Quickstart 64 | 65 | You can get started with Omni-LPR in a few minutes by following the steps described below. 66 | 67 | #### 1. Install the Server 68 | 69 | You can install Omni-LPR using `pip`: 70 | 71 | ```sh 72 | pip install omni-lpr 73 | ``` 74 | 75 | #### 2. Start the Server 76 | 77 | When installed, start the server with a single command: 78 | 79 | ```sh 80 | omni-lpr 81 | ``` 82 | 83 | By default, the server will be listening on `http://127.0.0.1:8000`. 84 | You can confirm it's running by accessing the health check endpoint: 85 | 86 | ```sh 87 | curl http://127.0.0.1:8000/api/health 88 | # Sample expected output: {"status": "ok", "version": "0.3.4"} 89 | ``` 90 | 91 | #### 3. Recognize a License Plate 92 | 93 | Now you can make a request to recognize a license plate from an image. 94 | The example below uses a publicly available image URL. 95 | 96 | ```sh 97 | curl -X POST \ 98 | -H "Content-Type: application/json" \ 99 | -d '{"path": "https://www.olavsplates.com/foto_n/n_cx11111.jpg"}' \ 100 | http://127.0.0.1:8000/api/v1/tools/detect_and_recognize_plate_from_path/invoke 101 | ``` 102 | 103 | You should receive a JSON response with the detected license plate information. 104 | 105 | ### Usage 106 | 107 | Omni-LPR exposes its capabilities as "tools" that can be called via a REST API or over the MCP. 108 | 109 | #### Available Tools 110 | 111 | The server provides tools for listing models, recognizing plates from image data, and recognizing plates from a path. 112 | 113 | - `list_models`: Lists the available detector and OCR models. 114 | 115 | - **Tools that process image data** (provided as Base64 or file upload): 116 | - `recognize_plate`: Recognizes text from a pre-cropped license plate image. 117 | - `detect_and_recognize_plate`: Detects and recognizes all license plates in a full image. 118 | 119 | - **Tools that process an image path** (a URL or local file path): 120 | - `recognize_plate_from_path`: Recognizes text from a pre-cropped license plate image at a given path. 121 | - `detect_and_recognize_plate_from_path`: Detects and recognizes plates in a full image at a given path. 122 | 123 | For more details on how to use the different tools and provide image data, please see the 124 | [API Documentation](docs/README.md). 125 | 126 | #### REST API 127 | 128 | The REST API provides a standard way to interact with the server. All tool endpoints are available under the `/api/v1` 129 | prefix. Once the server is running, you can access interactive API documentation in the Swagger UI 130 | at http://127.0.0.1:8000/api/v1/apidoc/swagger. 131 | 132 | #### MCP Interface 133 | 134 | The server also exposes its tools over the MCP for integration with AI agents and LLMs. The MCP endpoint is available at 135 | http://127.0.0.1:8000/mcp/, via streamable HTTP. 136 | 137 | You can use a tool like [MCP Inspector](https://github.com/modelcontextprotocol/inspector) to explore the available MCP 138 | tools. 139 | 140 |
141 | 142 | MCP Inspector Screenshot 143 | 144 |
145 | 146 | ### Integration 147 | 148 | You can connect any client that supports the MCP protocol to the server. 149 | The following examples show how to use the server with [LM Studio](https://lmstudio.ai/). 150 | 151 | #### LM Studio Configuration 152 | 153 | ```json 154 | { 155 | "mcpServers": { 156 | "omni-lpr-local": { 157 | "url": "http://127.0.0.1:8000/mcp/" 158 | } 159 | } 160 | } 161 | ``` 162 | 163 | #### Tool Usage Examples 164 | 165 | The screenshot of using the `list_models` tool in LM Studio to list the available models for the APLR. 166 | 167 |
168 | 169 | LM Studio Screenshot 1 170 | 171 |
172 | 173 | The screenshot below shows using the `detect_and_recognize_plate_from_path` tool in LM Studio to detect and recognize 174 | the license plate from an [image available on the web](https://www.olavsplates.com/foto_n/n_cx11111.jpg). 175 | 176 |
177 | 178 | LM Studio Screenshot 2 179 | 180 |
181 | 182 | --- 183 | 184 | ### Documentation 185 | 186 | Omni-LPR documentation is available [here](docs). 187 | 188 | #### Examples 189 | 190 | Check out the [examples](examples) directory for usage examples. 191 | 192 | --- 193 | 194 | ### Contributing 195 | 196 | Contributions are always welcome! 197 | Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started. 198 | 199 | ### License 200 | 201 | Omni-LPR is licensed under the MIT License (see [LICENSE](LICENSE)). 202 | 203 | ### Acknowledgements 204 | 205 | - This project uses the awesome [fast-plate-ocr](https://github.com/ankandrew/fast-plate-ocr) 206 | and [fast-alpr](https://github.com/ankandrew/fast-alpr) Python libraries. 207 | - The project logo is from [SVG Repo](https://www.svgrepo.com/svg/237124/license-plate-number). 208 | 209 | 210 | 211 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## Documentation 2 | 3 | This document provides detailed information about installing, configuring, and using Omni-LPR. 4 | For a quick start, please see the main [README.md](../README.md) file. 5 | 6 | ### Installation 7 | 8 | You can run Omni-LPR either by installing it as a Python library or by using a pre-built Docker image. 9 | 10 | #### Python Installation 11 | 12 | You can install Omni-LPR via `pip`. By default, this will use the CPU-optimized ONNX models. 13 | 14 | ```sh 15 | pip install omni-lpr 16 | ``` 17 | 18 | For hardware-specific optimizations, you can install optional dependencies: 19 | 20 | - **OpenVINO (Intel CPUs):** `pip install omni-lpr[openvino]` 21 | - **CUDA (NVIDIA GPUs):** `pip install omni-lpr[cuda]` 22 | 23 | #### Docker Installation 24 | 25 | Pre-built Docker images are available from the [GitHub Container Registry](https://github.com/habedi/omni-lpr/packages). 26 | You can pull the images and run them directly. 27 | 28 | - **CPU Image (ONNX):** 29 | ```sh 30 | docker run --rm -it -p 8000:8000 ghcr.io/habedi/omni-lpr-cpu:latest 31 | ``` 32 | 33 | - **CPU Image (OpenVINO):** 34 | ```sh 35 | docker run --rm -it -p 8000:8000 -e EXECUTION_DEVICE=openvino ghcr.io/habedi/omni-lpr-openvino:latest 36 | ``` 37 | 38 | - **GPU Image (CUDA):** 39 | ```sh 40 | docker run --rm -it --gpus all -p 8000:8000 -e EXECUTION_DEVICE=cuda ghcr.io/habedi/omni-lpr-cuda:latest 41 | ``` 42 | 43 | > [!NOTE] 44 | > The `latest` tag refers to the latest stable release. You can replace `latest` with a specific version tag (for 45 | > example, `0.2.0`) from the [list of available packages](https://github.com/habedi/omni-lpr/packages). 46 | 47 | For developers, you can also build the Docker images locally using the provided [Makefile](../Makefile). 48 | 49 | - **CPU (default):** `make docker-build-cpu` 50 | - **OpenVINO:** `make docker-build-openvino` 51 | - **CUDA:** `make docker-build-cuda` 52 | 53 | ### API Documentation 54 | 55 | The server exposes its functionality via two interfaces: a REST API and the MCP. Additionally, a health check endpoint 56 | is available at `GET /api/health`. 57 | 58 | #### REST API 59 | 60 | The REST API provides a simple way to interact with the server using standard HTTP requests. 61 | All tool endpoints are available under the `/api/v1` prefix. 62 | 63 | > [!TIP] 64 | > This project provides interactive API documentation (Swagger UI and ReDoc). Once the server is running, you can access 65 | > them at: 66 | > - **Swagger UI**: http://127.0.0.1:8000/api/v1/apidoc/swagger 67 | > - **ReDoc**: http://127.0.0.1:8000/api/v1/apidoc/redoc 68 | 69 | ##### Providing Image Data 70 | 71 | The server's tools can process images provided in several ways. The key is to use the right tool for your input method: 72 | 73 | 1. **Image Data (`image_base64`)**: For tools like `recognize_plate` and `detect_and_recognize_plate`, you provide the 74 | actual image data. The REST API accepts this data in two formats: 75 | - As a Base64-encoded string within a JSON object (`"Content-Type: application/json"`). 76 | - As a direct file upload (`"Content-Type: multipart/form-data"`). The server automatically converts the 77 | uploaded file into a Base64 string for the tool. 78 | 79 | 2. **Image Path (`path`)**: For tools like `recognize_plate_from_path` and `detect_and_recognize_plate_from_path`, 80 | you provide a URL or a local file path to the image in a JSON object. 81 | 82 | ##### Listing Available Tools 83 | 84 | To get a list of available tools and their input schemas, send a `GET` request to the `/api/v1/tools` endpoint. 85 | This helps you see which tools are available and what parameters they expect (for example, `image_base64` or `path`). 86 | 87 | ```sh 88 | curl http://127.0.0.1:8000/api/v1/tools 89 | ``` 90 | 91 | This will return a JSON array of tool objects, each with a `name`, `description`, and `input_schema`. 92 | 93 | ##### Invoking a Tool 94 | 95 | To call a specific tool, send a `POST` request to its invocation endpoint: `/api/v1/tools/{tool_name}/invoke`. 96 | 97 | ###### Example 1: Using a tool that takes image data (`recognize_plate`) 98 | 99 | This tool expects the `image_base64` parameter. You can provide it via a JSON request or a file upload. 100 | 101 | **Option A: With a Base64 string in JSON** 102 | 103 | ```sh 104 | # On macOS: base64 -i /path/to/your/image.jpg | pbcopy 105 | # On Linux: base64 /path/to/your/image.jpg | xsel -ib 106 | 107 | curl -X POST \ 108 | -H "Content-Type: application/json" \ 109 | -d '{"image_base64": "PASTE_YOUR_BASE64_STRING_HERE"}' \ 110 | http://127.0.0.1:8000/api/v1/tools/recognize_plate/invoke 111 | ``` 112 | 113 | **Option B: With a direct file upload** 114 | 115 | ```sh 116 | curl -X POST \ 117 | -F "image=@/path/to/your/image.jpg" \ 118 | http://127.0.0.1:8000/api/v1/tools/recognize_plate/invoke 119 | ``` 120 | 121 | ###### Example 2: Using a tool that takes an image path (`recognize_plate_from_path`) 122 | 123 | This tool expects the `path` parameter, which can be a URL or a local file path accessible by the server. 124 | 125 | ```sh 126 | curl -X POST \ 127 | -H "Content-Type: application/json" \ 128 | -d '{"path": "https://www.olavsplates.com/foto_n/n_cx11111.jpg"}' \ 129 | http://127.0.0.1:8000/api/v1/tools/recognize_plate_from_path/invoke 130 | ``` 131 | 132 | #### MCP Interface 133 | 134 | The server also exposes its capabilities as tools over the MCP. 135 | The MCP endpoint is available at http://127.0.0.1:8000/mcp/. 136 | 137 | ##### Available Tools 138 | 139 | The following tools are implemented and can be called via the MCP interface: 140 | 141 | * `recognize_plate`: Recognizes text from a pre-cropped image of a license plate. 142 | * `recognize_plate_from_path`: Recognizes text from a pre-cropped license plate image at a given URL or local file 143 | path. 144 | * `detect_and_recognize_plate`: Detects and recognizes all license plates in an image. 145 | * `detect_and_recognize_plate_from_path`: Detects and recognizes license plates from an image at a given URL or 146 | local file path. 147 | * `list_models`: Lists the available detector and OCR models. 148 | 149 | ### Startup Configuration 150 | 151 | The server can be configured using command-line arguments or environment variables. Environment variables are read from 152 | a `.env` file if it exists. Command-line arguments take precedence over environment variables. 153 | 154 | | Argument | Env Var | Description | Default | 155 | |----------------------------|--------------------------|----------------------------------------------------------------|---------------------------------------| 156 | | `--port` | `PORT` | Server port | `8000` | 157 | | `--host` | `HOST` | Server host | `127.0.0.1` | 158 | | `--log-level` | `LOG_LEVEL` | Logging level | `INFO` | 159 | | `--max-image-size-mb` | `MAX_IMAGE_SIZE_MB` | Maximum image size for uploads (in MB) | `5` | 160 | | `--model-cache-size` | `MODEL_CACHE_SIZE` | Number of models to keep in cache | `16` | 161 | | `--execution-device` | `EXECUTION_DEVICE` | Device for model inference (`auto`, `cpu`, `cuda`, `openvino`) | `auto` | 162 | | `--default-ocr-model` | `DEFAULT_OCR_MODEL` | Default OCR model | `cct-xs-v1-global-model` | 163 | | `--default-detector-model` | `DEFAULT_DETECTOR_MODEL` | Default detector model | `yolo-v9-t-384-license-plate-end2end` | 164 | 165 | ### Concurrency and Worker Configuration 166 | 167 | Omni-LPR can be run in two ways: directly via the `omni-lpr` command, or using the official Docker images. 168 | The way you run it affects how it handles concurrent requests and how you should configure it, especially for the 169 | stateful MCP interface. 170 | 171 | #### Running with Docker (Gunicorn) 172 | 173 | The Docker images use Gunicorn as a process manager to run multiple Uvicorn workers. 174 | This setup is ideal for production as it allows the server to handle many REST API requests in parallel. 175 | 176 | - **Default Behavior**: By default, the Docker images start with 4 worker processes. 177 | - **The MCP Problem**: The MCP is stateful (in most cases). With multiple workers, Gunicorn may route requests for the 178 | same session to different processes, causing errors. 179 | - **Solution**: If you plan to use the MCP interface, you must configure the Docker container to run with only one 180 | worker. You can do this by setting the `GUNICORN_WORKERS` environment variable. 181 | 182 | **Example: Running Docker with a single worker for MCP compatibility** 183 | 184 | ```sh 185 | docker run --rm -it -p 8000:8000 \ 186 | -e GUNICORN_WORKERS=1 \ 187 | ghcr.io/habedi/omni-lpr-cpu:latest 188 | ``` 189 | 190 | If you are only using the stateless REST API, you can leave the worker count at the default of 4 (or higher) for better 191 | performance. 192 | 193 | ### Hardware Acceleration Configuration 194 | 195 | To use hardware acceleration (like an NVIDIA GPU or Intel's OpenVINO), you need to perform two steps: 196 | 197 | 1. **Install the correct package**: You must install `omni-lpr` with the appropriate "extra" to get the necessary 198 | hardware-specific libraries. 199 | 2. **Set the Execution Device**: You must set the `EXECUTION_DEVICE` environment variable when running the server to 200 | tell the application which backend to activate. 201 | 202 | This two-step process guarantees that the application has the required libraries before it tries to use them. 203 | 204 | #### Example: Using CUDA for NVIDIA GPUs 205 | 206 | **Step 1: Install with the `[cuda]` extra** 207 | 208 | ```sh 209 | pip install omni-lpr[cuda] 210 | ``` 211 | 212 | **Step 2: Run the server with `EXECUTION_DEVICE` set to `cuda`** 213 | 214 | ```sh 215 | EXECUTION_DEVICE=cuda omni-lpr 216 | ``` 217 | 218 | #### Example: Using OpenVINO for Intel CPUs 219 | 220 | **Step 1: Install with the `[openvino]` extra** 221 | 222 | ```sh 223 | pip install omni-lpr[openvino] 224 | ``` 225 | 226 | **Step 2: Run the server with `EXECUTION_DEVICE` set to `openvino`** 227 | 228 | ```sh 229 | EXECUTION_DEVICE=openvino omni-lpr 230 | ``` 231 | 232 | > [!NOTE] 233 | > If you set `EXECUTION_DEVICE` to `cuda` or `openvino` without having installed the corresponding package extra, the 234 | > application will fail to start with an error. 235 | > This is intentional to prevent silent fallbacks to the CPU. 236 | 237 | **Example: Forcing OpenVINO execution** 238 | 239 | ```sh 240 | docker run --rm -it -p 8000:8000 \ 241 | -e EXECUTION_DEVICE=openvino \ 242 | ghcr.io/habedi/omni-lpr-openvino:latest 243 | ``` 244 | 245 | #### Running with the `omni-lpr` Command (Uvicorn) 246 | 247 | When you install the package via `pip` and run the `omni-lpr` command, it uses Uvicorn directly as the web server. 248 | 249 | - **Default Behavior**: This method always runs with a single worker process. 250 | - **MCP Compatibility**: Because it only uses one worker, this method is always compatible with the MCP interface out of 251 | the box. No special configuration is needed. 252 | 253 | ### Available Models 254 | 255 | You can override the default models for a specific request by passing `detector_model` and `ocr_model` arguments in your 256 | request. 257 | 258 | #### Available OCR Models: 259 | 260 | - `cct-xs-v1-global-model` (default) 261 | - `cct-s-v1-global-model` 262 | 263 | #### Available Detector Models: 264 | 265 | - `yolo-v9-s-608-license-plate-end2end` 266 | - `yolo-v9-t-640-license-plate-end2end` 267 | - `yolo-v9-t-512-license-plate-end2end` 268 | - `yolo-v9-t-416-license-plate-end2end` 269 | - `yolo-v9-t-384-license-plate-end2end` (default) 270 | - `yolo-v9-t-256-license-plate-end2end` 271 | 272 | > [!NOTE] 273 | > Models are from the [fast-plate-ocr](https://github.com/ankandrew/fast-plate-ocr) 274 | > and [fast-alpr](https://github.com/ankandrew/fast-alpr) projects. Please refer to their repositories for more 275 | > information. 276 | 277 | ### Security Considerations 278 | 279 | - **Network Exposure:** It is recommended to run Omni-LPR in a trusted network environment. Avoid exposing the server to 280 | the public internet unless strictly necessary. 281 | - **Reverse Proxy:** If you need to expose the server to the internet, use a reverse proxy (like Nginx or Caddy) to 282 | handle incoming requests. This allows you to terminate TLS, handle rate limiting, and provide an extra layer of 283 | security. 284 | - **Authentication:** The server does not have a built-in authentication mechanism. If you need to restrict access, 285 | implement authentication at the reverse proxy level. 286 | - **Input Validation:** The API uses Pydantic for input validation, which helps prevent many common injection-style 287 | attacks. However, always be mindful of the data you are sending. 288 | -------------------------------------------------------------------------------- /docs/assets/images/dummy_figure.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | G 11 | 12 |      Typical 13 | Structure of a Python Library 14 | 15 | 16 | cluster_package1 17 | 18 | Package1 19 | 20 | 21 | cluster_package2 22 | 23 | Package2 24 | 25 | 26 | 27 | Library 28 | 30 | Library 31 | 32 | 33 | 34 | Package1 35 | 37 | Package1 38 | 39 | 40 | 41 | Library->Package1 42 | 43 | 44 | 45 | 46 | 47 | Package2 48 | 50 | Package2 51 | 52 | 53 | 54 | Library->Package2 55 | 56 | 57 | 58 | 59 | 60 | Module1_1 61 | 62 | module1_1.py 63 | 64 | 65 | 66 | Package1->Module1_1 67 | 68 | 69 | 70 | 71 | 72 | Module1_2 73 | 74 | module1_2.py 75 | 76 | 77 | 78 | Package1->Module1_2 79 | 80 | 81 | 82 | 83 | 84 | ClassA 85 | 86 | ClassA 87 | 88 | 89 | 90 | Module1_1->ClassA 91 | 92 | 93 | 94 | 95 | 96 | function1_1 97 | 98 | function1_1() 99 | 100 | 101 | 102 | Module1_1->function1_1 103 | 104 | 105 | 106 | 107 | 108 | function1_2 109 | 110 | function1_2() 111 | 112 | 113 | 114 | Module1_1->function1_2 115 | 116 | 117 | 118 | 119 | 120 | ClassC 121 | 122 | ClassC 123 | 124 | 125 | 126 | Module1_2->ClassC 127 | 128 | 129 | 130 | 131 | 132 | ClassA_method1 133 | 134 | method1() 135 | 136 | 137 | 138 | ClassA->ClassA_method1 139 | 140 | 141 | 142 | 143 | 144 | ClassA_method2 145 | 146 | method2() 147 | 148 | 149 | 150 | ClassA->ClassA_method2 151 | 152 | 153 | 154 | 155 | 156 | ClassC_method1 157 | 158 | method1() 159 | 160 | 161 | 162 | ClassC->ClassC_method1 163 | 164 | 165 | 166 | 167 | 168 | ClassC_method2 169 | 170 | method2() 171 | 172 | 173 | 174 | ClassC->ClassC_method2 175 | 176 | 177 | 178 | 179 | 180 | Module2_1 181 | 182 | module2_1.py 183 | 184 | 185 | 186 | Package2->Module2_1 187 | 188 | 189 | 190 | 191 | 192 | ClassB 193 | 194 | ClassB 195 | 196 | 197 | 198 | Module2_1->ClassB 199 | 200 | 201 | 202 | 203 | 204 | function2_1 205 | 206 | function2_1() 207 | 208 | 209 | 210 | Module2_1->function2_1 211 | 212 | 213 | 214 | 215 | 216 | function2_2 217 | 218 | function2_2() 219 | 220 | 221 | 222 | Module2_1->function2_2 223 | 224 | 225 | 226 | 227 | 228 | ClassB_method1 229 | 230 | method1() 231 | 232 | 233 | 234 | ClassB->ClassB_method1 235 | 236 | 237 | 238 | 239 | 240 | -------------------------------------------------------------------------------- /tests/test_tools.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | from dataclasses import asdict, dataclass 4 | from typing import get_args 5 | from unittest.mock import AsyncMock, MagicMock 6 | 7 | import httpx 8 | import pytest 9 | from mcp import types 10 | from pydantic import BaseModel 11 | 12 | from omni_lpr import tools 13 | from omni_lpr.errors import ErrorCode, ToolLogicError 14 | from omni_lpr.settings import settings 15 | from omni_lpr.tools import ( 16 | DetectorModel, 17 | ListModelsArgs, 18 | OcrModel, 19 | ToolRegistry, 20 | list_models, 21 | setup_cache, 22 | setup_tools, 23 | tool_registry as global_tool_registry, 24 | ) 25 | 26 | TINY_PNG_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=" 27 | 28 | _MAX_BASE64_LENGTH = int(settings.max_image_size_mb * 1024 * 1024 * 4 / 3) 29 | _NEXT_BASE64_MULTIPLE_OF_4 = 4 - (_MAX_BASE64_LENGTH % 4) 30 | OVERSIZED_BASE64 = "a" * (_MAX_BASE64_LENGTH + _NEXT_BASE64_MULTIPLE_OF_4) 31 | 32 | 33 | @dataclass 34 | class MockBoundingBox: 35 | x1: int 36 | y1: int 37 | x2: int 38 | y2: int 39 | 40 | 41 | @dataclass 42 | class MockDetectionResult: 43 | bounding_box: MockBoundingBox 44 | confidence: float 45 | 46 | 47 | @dataclass 48 | class MockOcrResult: 49 | text: str 50 | confidence: float 51 | 52 | 53 | @dataclass 54 | class MockALPRResult: 55 | detection: MockDetectionResult 56 | ocr: MockOcrResult 57 | 58 | 59 | @pytest.fixture 60 | def mock_alpr_result(): 61 | return MockALPRResult( 62 | detection=MockDetectionResult( 63 | bounding_box=MockBoundingBox(x1=10, y1=20, x2=100, y2=50), confidence=0.99 64 | ), 65 | ocr=MockOcrResult(text="TEST1234", confidence=0.95), 66 | ) 67 | 68 | 69 | @pytest.fixture(autouse=True) 70 | def clear_caches_and_registry(): 71 | """Clears all tool-related caches and the global registry before each test.""" 72 | # Clear the LRU caches on the functions 73 | setup_cache() 74 | tools._get_ocr_recognizer.cache_clear() 75 | tools._get_alpr_instance.cache_clear() 76 | 77 | # Clear the tool registry 78 | global_tool_registry._tools.clear() 79 | global_tool_registry._tool_definitions.clear() 80 | global_tool_registry._tool_models.clear() 81 | 82 | # Reset the placeholder models to their default state 83 | tools.RecognizePlateArgs = BaseModel 84 | tools.RecognizePlateFromPathArgs = BaseModel 85 | tools.DetectAndRecognizePlateArgs = BaseModel 86 | tools.DetectAndRecognizePlateFromPathArgs = BaseModel 87 | 88 | 89 | @pytest.fixture 90 | def tool_registry(mocker): 91 | """Provides a fresh ToolRegistry instance for isolated tests.""" 92 | registry = ToolRegistry() 93 | mocker.patch("omni_lpr.tools.tool_registry", registry) 94 | return registry 95 | 96 | 97 | def test_register_and_list_tools(tool_registry: ToolRegistry): 98 | tool_definition = types.Tool( 99 | name="test_tool", 100 | title="Test Tool", 101 | description="A tool for testing.", 102 | inputSchema={"type": "object", "properties": {}}, 103 | ) 104 | 105 | class TestArgs(BaseModel): 106 | pass 107 | 108 | @tool_registry.register(tool_definition, TestArgs) 109 | async def test_tool(_: TestArgs): 110 | return [types.TextContent(type="text", text="success")] 111 | 112 | listed_tools = tool_registry.list() 113 | assert len(listed_tools) == 1 114 | assert listed_tools[0] == tool_definition 115 | 116 | 117 | @pytest.mark.asyncio 118 | async def test_call_tool_success(tool_registry: ToolRegistry): 119 | class TestArgs(BaseModel): 120 | message: str 121 | 122 | tool_definition = types.Tool( 123 | name="test_tool", 124 | title="Test", 125 | description="A test", 126 | inputSchema=TestArgs.model_json_schema(), 127 | ) 128 | 129 | @tool_registry.register(tool_definition, TestArgs) 130 | async def test_tool(args: TestArgs): 131 | return [types.TextContent(type="text", text=args.message)] 132 | 133 | result = await tool_registry.call("test_tool", {"message": "hello"}) 134 | assert len(result) == 1 135 | assert isinstance(result[0], types.TextContent) 136 | assert result[0].text == "hello" 137 | 138 | 139 | @pytest.mark.asyncio 140 | async def test_call_tool_validation_error(tool_registry: ToolRegistry): 141 | class TestArgs(BaseModel): 142 | message: str 143 | 144 | tool_definition = types.Tool( 145 | name="test_tool", 146 | title="Test", 147 | description="A test", 148 | inputSchema=TestArgs.model_json_schema(), 149 | ) 150 | 151 | @tool_registry.register(tool_definition, TestArgs) 152 | async def test_tool(args: TestArgs): 153 | return [types.TextContent(type="text", text=args.message)] 154 | 155 | with pytest.raises(ToolLogicError, match="Input validation failed"): 156 | await tool_registry.call("test_tool", {"wrong_arg": "hello"}) 157 | 158 | 159 | @pytest.mark.asyncio 160 | async def test_call_unknown_tool(tool_registry: ToolRegistry): 161 | with pytest.raises(ToolLogicError, match="Unknown tool: unknown_tool"): 162 | await tool_registry.call("unknown_tool", {}) 163 | 164 | 165 | @pytest.mark.asyncio 166 | async def test_recognize_plate_base64_tool_success(mocker): 167 | setup_tools() 168 | mocker.patch("anyio.to_thread.run_sync", return_value=["TEST-123"]) 169 | mock_get_image = mocker.patch( 170 | "omni_lpr.tools._get_image_from_source", return_value=AsyncMock() 171 | ) 172 | mocker.patch("omni_lpr.tools._get_ocr_recognizer", return_value=AsyncMock()) 173 | 174 | result = await global_tool_registry.call( 175 | "recognize_plate", {"image_base64": TINY_PNG_BASE64} 176 | ) 177 | 178 | assert json.loads(result[0].text) == ["TEST-123"] 179 | mock_get_image.assert_called_once_with(image_base64=TINY_PNG_BASE64, path=None) 180 | 181 | 182 | @pytest.mark.asyncio 183 | async def test_recognize_plate_path_tool_success(mocker): 184 | setup_tools() 185 | mocker.patch("anyio.to_thread.run_sync", return_value=["TEST-123"]) 186 | mock_get_image = mocker.patch( 187 | "omni_lpr.tools._get_image_from_source", return_value=AsyncMock() 188 | ) 189 | mocker.patch("omni_lpr.tools._get_ocr_recognizer", return_value=AsyncMock()) 190 | 191 | result = await global_tool_registry.call( 192 | "recognize_plate_from_path", {"path": "/fake/path.jpg"} 193 | ) 194 | 195 | assert json.loads(result[0].text) == ["TEST-123"] 196 | mock_get_image.assert_called_once_with(image_base64=None, path="/fake/path.jpg") 197 | 198 | 199 | @pytest.mark.asyncio 200 | async def test_detect_and_recognize_plate_base64_tool_success(mocker, mock_alpr_result): 201 | setup_tools() 202 | mocker.patch("anyio.to_thread.run_sync", return_value=[mock_alpr_result]) 203 | mock_get_image = mocker.patch( 204 | "omni_lpr.tools._get_image_from_source", return_value=AsyncMock() 205 | ) 206 | mocker.patch("omni_lpr.tools._get_alpr_instance", return_value=AsyncMock()) 207 | 208 | result = await global_tool_registry.call( 209 | "detect_and_recognize_plate", {"image_base64": TINY_PNG_BASE64} 210 | ) 211 | 212 | expected_dict = [asdict(mock_alpr_result)] 213 | assert json.loads(result[0].text) == expected_dict 214 | mock_get_image.assert_called_once_with(image_base64=TINY_PNG_BASE64, path=None) 215 | 216 | 217 | @pytest.mark.asyncio 218 | async def test_detect_and_recognize_plate_path_tool_success(mocker, mock_alpr_result): 219 | setup_tools() 220 | mocker.patch("anyio.to_thread.run_sync", return_value=[mock_alpr_result]) 221 | mock_get_image = mocker.patch( 222 | "omni_lpr.tools._get_image_from_source", return_value=AsyncMock() 223 | ) 224 | mocker.patch("omni_lpr.tools._get_alpr_instance", return_value=AsyncMock()) 225 | 226 | result = await global_tool_registry.call( 227 | "detect_and_recognize_plate_from_path", {"path": "/fake/path.jpg"} 228 | ) 229 | 230 | expected_dict = [asdict(mock_alpr_result)] 231 | assert json.loads(result[0].text) == expected_dict 232 | mock_get_image.assert_called_once_with(image_base64=None, path="/fake/path.jpg") 233 | 234 | 235 | @pytest.mark.asyncio 236 | @pytest.mark.parametrize( 237 | "tool_name, invalid_data, expected_error_msg", 238 | [ 239 | ("recognize_plate", {"image_base64": ""}, "image_base64 cannot be empty"), 240 | ( 241 | "recognize_plate", 242 | {"image_base64": OVERSIZED_BASE64}, 243 | "Input image is too large", 244 | ), 245 | ("recognize_plate", {"image_base64": "not-base64"}, "Invalid base64 string"), 246 | ("recognize_plate_from_path", {"path": " "}, "Path cannot be empty"), 247 | ( 248 | "recognize_plate", 249 | {"image_base64": TINY_PNG_BASE64, "path": "/path"}, 250 | "Extra inputs are not permitted", 251 | ), 252 | ("recognize_plate", {}, "Field required"), 253 | ], 254 | ) 255 | async def test_tool_validation_errors(tool_name, invalid_data, expected_error_msg): 256 | setup_tools() 257 | with pytest.raises(ToolLogicError) as excinfo: 258 | await global_tool_registry.call(tool_name, invalid_data) 259 | 260 | assert excinfo.value.error.code == ErrorCode.VALIDATION_ERROR 261 | assert expected_error_msg in str(excinfo.value.error.details) 262 | 263 | 264 | @pytest.mark.asyncio 265 | async def test_recognizer_model_caching(mocker): 266 | setup_tools() 267 | mock_recognizer_instance = MagicMock() 268 | mock_recognizer_instance.run.return_value = ["CACHED"] 269 | mock_recognizer_class = mocker.patch( 270 | "fast_plate_ocr.LicensePlateRecognizer", return_value=mock_recognizer_instance 271 | ) 272 | mocker.patch("omni_lpr.tools._get_image_from_source", return_value=AsyncMock()) 273 | 274 | # Call tool with first OCR model 275 | await global_tool_registry.call( 276 | "recognize_plate", 277 | {"image_base64": TINY_PNG_BASE64, "ocr_model": "cct-s-v1-global-model"}, 278 | ) 279 | # Call it again, should be cached 280 | await global_tool_registry.call( 281 | "recognize_plate", 282 | {"image_base64": TINY_PNG_BASE64, "ocr_model": "cct-s-v1-global-model"}, 283 | ) 284 | mock_recognizer_class.assert_called_once_with("cct-s-v1-global-model") 285 | 286 | # Call tool with the second OCR model 287 | await global_tool_registry.call( 288 | "recognize_plate", 289 | {"image_base64": TINY_PNG_BASE64, "ocr_model": "cct-xs-v1-global-model"}, 290 | ) 291 | assert mock_recognizer_class.call_count == 2 292 | 293 | 294 | @pytest.mark.asyncio 295 | async def test_alpr_instance_caching(mocker): 296 | setup_tools() 297 | mock_alpr_instance = MagicMock() 298 | mock_alpr_instance.predict.return_value = [] 299 | mock_alpr_class = mocker.patch("fast_alpr.ALPR", return_value=mock_alpr_instance) 300 | mocker.patch("omni_lpr.tools._get_image_from_source", return_value=AsyncMock()) 301 | 302 | # Call with first set of models 303 | args_1 = { 304 | "image_base64": TINY_PNG_BASE64, 305 | "detector_model": "yolo-v9-t-384-license-plate-end2end", 306 | "ocr_model": "cct-s-v1-global-model", 307 | } 308 | await global_tool_registry.call("detect_and_recognize_plate", args_1) 309 | await global_tool_registry.call("detect_and_recognize_plate", args_1) 310 | mock_alpr_class.assert_called_once_with( 311 | detector_model="yolo-v9-t-384-license-plate-end2end", 312 | ocr_model="cct-s-v1-global-model", 313 | ocr_device="auto", 314 | detector_providers=None, 315 | ) 316 | 317 | # Call with the second set of models 318 | args_2 = { 319 | "image_base64": TINY_PNG_BASE64, 320 | "detector_model": "yolo-v9-t-256-license-plate-end2end", 321 | "ocr_model": "cct-xs-v1-global-model", 322 | } 323 | await global_tool_registry.call("detect_and_recognize_plate", args_2) 324 | assert mock_alpr_class.call_count == 2 325 | 326 | 327 | @pytest.mark.asyncio 328 | async def test_list_models(): 329 | result = await list_models(ListModelsArgs()) 330 | assert len(result) == 1 331 | assert isinstance(result[0], types.TextContent) 332 | models = json.loads(result[0].text) 333 | expected = { 334 | "detector_models": list(get_args(DetectorModel)), 335 | "ocr_models": list(get_args(OcrModel)), 336 | } 337 | assert models == expected 338 | 339 | 340 | @pytest.mark.asyncio 341 | async def test_get_image_from_source_url_fails(mocker): 342 | # Mock httpx to return a 404 error 343 | mock_response = httpx.Response(404) 344 | mocker.patch( 345 | "httpx.AsyncClient.get", 346 | side_effect=httpx.HTTPStatusError("Not Found", request=mocker.MagicMock(), 347 | response=mock_response) 348 | ) 349 | 350 | setup_tools() # To register the tools 351 | with pytest.raises(ToolLogicError) as exc_info: 352 | await global_tool_registry.call( 353 | "detect_and_recognize_plate_from_path", 354 | {"path": "http://example.com/notfound.jpg"} 355 | ) 356 | assert "Failed to fetch image from URL" in str(exc_info.value) 357 | 358 | 359 | @pytest.mark.asyncio 360 | async def test_get_image_from_corrupted_data(): 361 | """Tests that a corrupted image raises a ValueError.""" 362 | setup_tools() 363 | corrupted_base64 = base64.b64encode(b"this is not an image").decode("utf-8") 364 | with pytest.raises(ToolLogicError, match="not a valid image file"): 365 | await global_tool_registry.call( 366 | "recognize_plate", {"image_base64": corrupted_base64} 367 | ) 368 | 369 | 370 | @pytest.mark.asyncio 371 | async def test_unsupported_image_format_from_path(tmp_path): 372 | """Tests that an unsupported image format from a path raises a ValueError.""" 373 | setup_tools() 374 | unsupported_file = tmp_path / "test.txt" 375 | unsupported_file.write_text("this is not an image") 376 | 377 | with pytest.raises(ToolLogicError, match="not a valid image file"): 378 | await global_tool_registry.call( 379 | "recognize_plate_from_path", {"path": str(unsupported_file)} 380 | ) 381 | 382 | 383 | @pytest.mark.asyncio 384 | async def test_empty_image_data(): 385 | """Tests that providing empty image data raises a validation error.""" 386 | setup_tools() 387 | with pytest.raises(ToolLogicError) as exc_info: 388 | await global_tool_registry.call("recognize_plate", {"image_base64": ""}) 389 | assert "image_base64 cannot be empty" in str(exc_info.value.error.details) 390 | 391 | 392 | @pytest.mark.asyncio 393 | async def test_get_image_from_directory_path_raises_error(tmp_path): 394 | """Tests that providing a path to a directory raises a ToolLogicError.""" 395 | setup_tools() 396 | # tmp_path is a pytest fixture that provides a temporary directory 397 | directory_path = tmp_path 398 | with pytest.raises(ToolLogicError) as exc_info: 399 | await global_tool_registry.call("recognize_plate_from_path", {"path": str(directory_path)}) 400 | # The specific error can vary by OS (like IsADirectoryError on Linux), 401 | # so we check for a substring that indicates a read failure on a directory. 402 | assert "Is a directory" in str(exc_info.value) or "read failed" in str(exc_info.value) 403 | -------------------------------------------------------------------------------- /src/omni_lpr/tools.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import io 3 | import json 4 | import logging 5 | from dataclasses import asdict 6 | from functools import partial 7 | from typing import ( 8 | TYPE_CHECKING, 9 | Annotated, 10 | Any, 11 | Literal, 12 | Optional, 13 | Type, 14 | get_args, 15 | ) 16 | 17 | import anyio 18 | import httpx 19 | import mcp.types as types 20 | import numpy as np 21 | from async_lru import alru_cache 22 | from PIL import Image, UnidentifiedImageError 23 | from pydantic import ( 24 | BaseModel, 25 | ConfigDict, 26 | Field, 27 | ValidationError, 28 | ValidationInfo, 29 | field_validator, 30 | ) 31 | from pydantic_core import PydanticCustomError 32 | 33 | from .errors import ErrorCode, ToolLogicError 34 | from .settings import settings 35 | 36 | if TYPE_CHECKING: 37 | from fast_alpr import ALPR 38 | from fast_plate_ocr import LicensePlateRecognizer 39 | 40 | _logger = logging.getLogger(__name__) 41 | 42 | 43 | class ImageFetchError(Exception): 44 | """Raised when fetching an image from a remote URL fails with an HTTP status. 45 | 46 | Attributes: 47 | status_code: the HTTP status code returned by the remote server. 48 | """ 49 | 50 | def __init__(self, status_code: int, message: str | None = None): 51 | super().__init__(message or f"Failed to fetch image from URL: {status_code}") 52 | self.status_code = status_code 53 | 54 | 55 | # --- Reusable Pydantic Types and Validators --- 56 | def _validate_base64(v: Any, _: ValidationInfo) -> str: 57 | """Validator to ensure a string is valid Base64.""" 58 | if not isinstance(v, str): 59 | raise PydanticCustomError("not_base64_string", "A valid Base64 string is required.") 60 | if not v: 61 | raise ValueError("image_base64 cannot be empty.") 62 | 63 | # Calculate the maximum allowed Base64 string length for a given image size in MB. 64 | # Base64 encoding increases the size by a factor of 4/3. 65 | max_len = int(settings.max_image_size_mb * 1024 * 1024 * 4 / 3) 66 | if len(v) > max_len: 67 | raise ValueError( 68 | f"Input image is too large. The maximum size is {settings.max_image_size_mb}MB." 69 | ) 70 | 71 | try: 72 | base64.b64decode(v) 73 | except (ValueError, TypeError) as e: 74 | raise ValueError(f"Invalid base64 string provided. Error: {e}") from e 75 | return v 76 | 77 | 78 | from pydantic import BeforeValidator 79 | 80 | # Annotated type for Base64 image strings 81 | Base64ImageStr = Annotated[str, BeforeValidator(_validate_base64)] 82 | 83 | # --- Define allowed models as Literal types for validation --- 84 | DetectorModel = Literal[ 85 | "yolo-v9-s-608-license-plate-end2end", 86 | "yolo-v9-t-640-license-plate-end2end", 87 | "yolo-v9-t-512-license-plate-end2end", 88 | "yolo-v9-t-416-license-plate-end2end", 89 | "yolo-v9-t-384-license-plate-end2end", 90 | "yolo-v9-t-256-license-plate-end2end", 91 | ] 92 | 93 | OcrModel = Literal["cct-s-v1-global-model", "cct-xs-v1-global-model"] 94 | 95 | 96 | # --- Pydantic Models for Input Validation --- 97 | # These models are placeholders. The actual models with dynamic default 98 | # values are defined and used within the setup_tools() function. 99 | class RecognizePlateArgs(BaseModel): 100 | pass 101 | 102 | 103 | class RecognizePlateFromPathArgs(BaseModel): 104 | pass 105 | 106 | 107 | class DetectAndRecognizePlateArgs(BaseModel): 108 | pass 109 | 110 | 111 | class DetectAndRecognizePlateFromPathArgs(BaseModel): 112 | pass 113 | 114 | 115 | class ListModelsArgs(BaseModel): 116 | """Input arguments for listing available models.""" 117 | 118 | model_config = ConfigDict(extra="forbid") 119 | 120 | 121 | class ToolRegistry: 122 | """ 123 | Manages the registration and execution of tools. 124 | 125 | This class provides a centralized mechanism to register tools, their 126 | definitions, and their input validation models. It handles the dynamic 127 | calling of tools, including input validation and error handling. 128 | """ 129 | 130 | def __init__(self): 131 | """Initializes the ToolRegistry with empty storage for tools.""" 132 | self._tools: dict[str, callable] = {} 133 | self._tool_definitions: list[types.Tool] = [] 134 | self._tool_models: dict[str, Type[BaseModel]] = {} 135 | 136 | def register(self, tool_definition: types.Tool, model: Type[BaseModel]): 137 | """ 138 | Returns a decorator to register a tool with its definition and model. 139 | 140 | Args: 141 | tool_definition: The MCP tool definition. 142 | model: The Pydantic model for input validation. 143 | 144 | Returns: 145 | A decorator that registers the decorated function as a tool. 146 | """ 147 | 148 | def decorator(func: callable) -> callable: 149 | """ 150 | Decorator to register a tool function. 151 | 152 | Args: 153 | func: The async tool function to register. 154 | 155 | Returns: 156 | The original function, now registered as a tool. 157 | """ 158 | self.register_tool(tool_definition, model, func) 159 | return func 160 | 161 | return decorator 162 | 163 | def register_tool(self, tool_definition: types.Tool, model: Type[BaseModel], func: callable): 164 | """ 165 | Registers a tool directly without using a decorator. 166 | 167 | Args: 168 | tool_definition: The MCP tool definition. 169 | model: The Pydantic model for input validation. 170 | func: The async tool function to register. 171 | 172 | Raises: 173 | ValueError: If a tool with the same name is already registered. 174 | """ 175 | name = tool_definition.name 176 | if name in self._tools: 177 | raise ValueError(f"Tool '{name}' is already registered.") 178 | self._tools[name] = func 179 | self._tool_definitions.append(tool_definition) 180 | self._tool_models[name] = model 181 | 182 | async def call_validated( 183 | self, name: str, validated_args: BaseModel 184 | ) -> list[types.ContentBlock]: 185 | """ 186 | Executes a tool with already validated Pydantic model arguments. 187 | 188 | This method is an internal-facing counterpart to `call`. It bypasses 189 | the validation step and directly executes the tool's logic. 190 | 191 | Args: 192 | name: The name of the tool to execute. 193 | validated_args: An instance of the tool's Pydantic model containing 194 | the validated arguments. 195 | 196 | Returns: 197 | A list of MCP ContentBlocks produced by the tool. 198 | 199 | Raises: 200 | ToolLogicError: If the tool execution fails. 201 | """ 202 | func = self._tools[name] 203 | try: 204 | return await func(validated_args) 205 | except ToolLogicError: 206 | raise # Don't re-wrap our own errors 207 | except Exception as e: 208 | error_message = f"An unexpected error occurred in tool '{name}': {e}" 209 | _logger.exception(error_message) 210 | raise ToolLogicError( 211 | message=error_message, 212 | code=ErrorCode.TOOL_LOGIC_ERROR, 213 | ) from e 214 | 215 | async def call(self, name: str, arguments: dict) -> list[types.ContentBlock]: 216 | """ 217 | Validates arguments and executes a tool by its name. 218 | 219 | This is the primary method for invoking a tool. It performs the 220 | following steps: 221 | 1. Checks if the tool exists. 222 | 2. Retrieves the associated Pydantic model for the tool. 223 | 3. Validates the incoming `arguments` dictionary against the model. 224 | 4. If validation succeeds, it calls the tool's implementation. 225 | 5. If validation fails, it raises a `ToolLogicError`. 226 | 227 | Args: 228 | name: The name of the tool to call. 229 | arguments: A dictionary of input arguments for the tool. 230 | 231 | Returns: 232 | A list of MCP ContentBlocks produced by the tool. 233 | 234 | Raises: 235 | ToolLogicError: If the tool is unknown, no validation model is 236 | registered, or input validation fails. 237 | """ 238 | if name not in self._tools: 239 | _logger.warning(f"Unknown tool requested: {name}") 240 | raise ToolLogicError(message=f"Unknown tool: {name}", code=ErrorCode.VALIDATION_ERROR) 241 | 242 | model = self._tool_models.get(name) 243 | if not model: 244 | raise ToolLogicError( 245 | message=f"No validation model registered for tool '{name}'.", 246 | code=ErrorCode.UNKNOWN_ERROR, 247 | ) 248 | 249 | try: 250 | validated_args = model(**arguments) 251 | except ValidationError as e: 252 | _logger.error(f"Input validation failed for tool '{name}': {e}") 253 | raise ToolLogicError( 254 | message=f"Input validation failed for tool '{name}'.", 255 | code=ErrorCode.VALIDATION_ERROR, 256 | details=e.errors(), 257 | ) from e 258 | 259 | return await self.call_validated(name, validated_args) 260 | 261 | def list(self) -> list[types.Tool]: 262 | """ 263 | Lists all registered tools. 264 | 265 | Returns: 266 | A list of MCP tool definitions. 267 | """ 268 | return self._tool_definitions 269 | 270 | 271 | tool_registry = ToolRegistry() 272 | 273 | 274 | async def _get_ocr_recognizer(ocr_model: str) -> "LicensePlateRecognizer": 275 | """ 276 | Loads and caches a license plate OCR model. 277 | The alru_cache decorator handles caching. 278 | """ 279 | _logger.info(f"Loading license plate OCR model: {ocr_model}") 280 | from fast_plate_ocr import LicensePlateRecognizer 281 | 282 | # The LicensePlateRecognizer is not async, so we run it in a thread 283 | return await anyio.to_thread.run_sync(LicensePlateRecognizer, ocr_model) 284 | 285 | 286 | async def _get_image_from_source( 287 | *, image_base64: Optional[str] = None, path: Optional[str] = None 288 | ) -> Image.Image: 289 | """ 290 | Retrieves an image from either a Base64 string or a path/URL. 291 | 292 | Returns a PIL Image object in RGB format. 293 | """ 294 | image_bytes: Optional[bytes] = None 295 | source_for_error_msg = "" 296 | 297 | if image_base64: 298 | source_for_error_msg = "Base64 data" 299 | image_bytes = base64.b64decode(image_base64) 300 | 301 | elif path: 302 | source_for_error_msg = f"path '{path}'" 303 | if path.startswith(("http://", "https://")): 304 | try: 305 | async with httpx.AsyncClient() as client: 306 | response = await client.get(path) 307 | response.raise_for_status() 308 | image_bytes = await response.aread() 309 | except httpx.HTTPStatusError as e: 310 | # Raise a specific error so callers can decide how to handle 311 | # different HTTP status codes (e.g., 403 forbidden can be 312 | # treated as 'no plates' in some contexts). 313 | status_code = getattr(e.response, "status_code", None) 314 | raise ImageFetchError(status_code or -1) from e 315 | else: 316 | try: 317 | image_bytes = await anyio.Path(path).read_bytes() 318 | except FileNotFoundError: 319 | raise ValueError(f"File not found at path: {path}") 320 | 321 | if not image_bytes: 322 | # This should not be reached if the Pydantic model validation is correct 323 | raise ValueError("No image source provided.") 324 | 325 | try: 326 | image = Image.open(io.BytesIO(image_bytes)) 327 | return image.convert("RGB") 328 | except UnidentifiedImageError as e: 329 | raise ValueError(f"Data from {source_for_error_msg} is not a valid image file.") from e 330 | 331 | 332 | async def _recognize_plate_logic( 333 | ocr_model: str, image_base64: Optional[str] = None, path: Optional[str] = None 334 | ) -> list[types.ContentBlock]: 335 | """Core logic to recognize a license plate from an image.""" 336 | try: 337 | image_rgb = await _get_image_from_source(image_base64=image_base64, path=path) 338 | except ImageFetchError as e: 339 | # Treat 403 (Forbidden) as a non-fatal condition (e.g., remote host 340 | # blocks access). Return an empty result for these cases so the 341 | # higher-level API returns a successful response with no plates. 342 | if e.status_code == 403: 343 | _logger.warning("Failed to load image for OCR: %s. Returning empty result.", e) 344 | return [types.TextContent(type="text", text=json.dumps([]))] 345 | # Other HTTP errors should propagate and be surface as tool errors. 346 | raise 347 | 348 | except ValueError: 349 | # Non-HTTP-related image loading errors (invalid data, missing file, 350 | # etc.) should propagate and be treated as tool errors by the 351 | # registry, so we don't swallow them here. 352 | raise 353 | 354 | recognizer = await _get_ocr_recognizer(ocr_model) 355 | image_np = np.array(image_rgb) 356 | result = await anyio.to_thread.run_sync(recognizer.run, image_np) 357 | 358 | _logger.info(f"License plate recognized: {result}") 359 | return [types.TextContent(type="text", text=json.dumps(result))] 360 | 361 | 362 | async def _get_alpr_instance(detector_model: str, ocr_model: str) -> "ALPR": 363 | """ 364 | Loads and caches an ALPR instance for a given detector and OCR model. 365 | The alru_cache decorator handles caching. 366 | """ 367 | _logger.info( 368 | f"Loading ALPR instance with detector '{detector_model}', " 369 | f"OCR '{ocr_model}', and device '{settings.execution_device}'" 370 | ) 371 | from fast_alpr import ALPR 372 | 373 | providers = None 374 | # ocr_device does not support 'openvino', so we map it to 'cpu' in that case. 375 | ocr_device_for_alpr = ( 376 | settings.execution_device if settings.execution_device != "openvino" else "cpu" 377 | ) 378 | 379 | if settings.execution_device == "cuda": 380 | providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] 381 | elif settings.execution_device == "openvino": 382 | providers = ["OpenVINOExecutionProvider", "CPUExecutionProvider"] 383 | elif settings.execution_device == "cpu": 384 | providers = ["CPUExecutionProvider"] 385 | 386 | # The ALPR constructor is not async, so we run it in a thread 387 | alpr_constructor = partial( 388 | ALPR, 389 | detector_model=detector_model, 390 | ocr_model=ocr_model, 391 | ocr_device=ocr_device_for_alpr, 392 | detector_providers=providers, 393 | ) 394 | return await anyio.to_thread.run_sync(alpr_constructor) 395 | 396 | 397 | async def _detect_and_recognize_plate_logic( 398 | detector_model: str, 399 | ocr_model: str, 400 | image_base64: Optional[str] = None, 401 | path: Optional[str] = None, 402 | ) -> list[types.ContentBlock]: 403 | """Core logic to detect and recognize a license plate from an image.""" 404 | try: 405 | image_rgb = await _get_image_from_source(image_base64=image_base64, path=path) 406 | except ImageFetchError as e: 407 | if e.status_code == 403: 408 | _logger.warning("Failed to load image for detection: %s. Returning empty result.", e) 409 | return [types.TextContent(type="text", text=json.dumps([]))] 410 | raise 411 | except ValueError: 412 | # Propagate non-HTTP image loading errors so they are reported as 413 | # tool failures to the caller. 414 | raise 415 | 416 | alpr = await _get_alpr_instance(detector_model, ocr_model) 417 | image_np = np.array(image_rgb) 418 | results = await anyio.to_thread.run_sync(alpr.predict, image_np) 419 | 420 | results_dict = [asdict(res) for res in results] 421 | 422 | _logger.info(f"ALPR processed. Found {len(results_dict)} plate(s).") 423 | return [types.TextContent(type="text", text=json.dumps(results_dict))] 424 | 425 | 426 | # --- Tool-specific wrapper functions --- 427 | 428 | 429 | async def recognize_plate_base64_tool(args: "RecognizePlateArgs") -> list[types.ContentBlock]: 430 | """Tool wrapper for recognizing a plate from a Base64 image.""" 431 | return await _recognize_plate_logic(ocr_model=args.ocr_model, image_base64=args.image_base64) 432 | 433 | 434 | async def recognize_plate_path_tool( 435 | args: "RecognizePlateFromPathArgs", 436 | ) -> list[types.ContentBlock]: 437 | """Tool wrapper for recognizing a plate from an image path or URL.""" 438 | return await _recognize_plate_logic(ocr_model=args.ocr_model, path=args.path) 439 | 440 | 441 | async def detect_and_recognize_plate_base64_tool( 442 | args: "DetectAndRecognizePlateArgs", 443 | ) -> list[types.ContentBlock]: 444 | """Tool wrapper for detecting and recognizing a plate from a Base64 image.""" 445 | return await _detect_and_recognize_plate_logic( 446 | detector_model=args.detector_model, 447 | ocr_model=args.ocr_model, 448 | image_base64=args.image_base64, 449 | ) 450 | 451 | 452 | async def detect_and_recognize_plate_path_tool( 453 | args: "DetectAndRecognizePlateFromPathArgs", 454 | ) -> list[types.ContentBlock]: 455 | """Tool wrapper for detecting and recognizing a plate from an image path or URL.""" 456 | return await _detect_and_recognize_plate_logic( 457 | detector_model=args.detector_model, ocr_model=args.ocr_model, path=args.path 458 | ) 459 | 460 | 461 | async def list_models(_: ListModelsArgs) -> list[types.ContentBlock]: 462 | """Lists available detector and OCR models.""" 463 | models = { 464 | "detector_models": list(get_args(DetectorModel)), 465 | "ocr_models": list(get_args(OcrModel)), 466 | } 467 | return [types.TextContent(type="text", text=json.dumps(models))] 468 | 469 | 470 | def setup_cache(): 471 | """ 472 | Sets up the cache for model loading functions. 473 | 474 | This function must be called after the settings are finalized (e.g., after 475 | CLI overrides are applied) but before any tool is called. It re-wraps the 476 | model loading functions with an `alru_cache` decorator configured with the 477 | `model_cache_size` from the settings. 478 | """ 479 | global _get_ocr_recognizer, _get_alpr_instance 480 | _get_ocr_recognizer = alru_cache(maxsize=settings.model_cache_size)(_get_ocr_recognizer) 481 | _get_alpr_instance = alru_cache(maxsize=settings.model_cache_size)(_get_alpr_instance) 482 | 483 | 484 | def setup_tools(): 485 | """ 486 | Initializes and registers all the tools for the application. 487 | 488 | This function is the central point for tool setup. It defines the Pydantic 489 | models for each tool's arguments, using settings for default values. 490 | It then creates a tool definition for each tool and registers it, along with 491 | its corresponding model and implementation function, in the global 492 | `tool_registry`. 493 | 494 | This setup is designed to be called once at application startup. 495 | """ 496 | 497 | # --- Dynamically Defined Pydantic Models --- 498 | # By defining these here, we can use the loaded `settings` for default values. 499 | global \ 500 | RecognizePlateArgs, \ 501 | RecognizePlateFromPathArgs, \ 502 | DetectAndRecognizePlateArgs, \ 503 | DetectAndRecognizePlateFromPathArgs 504 | 505 | class RecognizePlateArgs(BaseModel): 506 | """Input arguments for recognizing text from a license plate image.""" 507 | 508 | model_config = ConfigDict(extra="forbid") 509 | image_base64: Base64ImageStr 510 | ocr_model: OcrModel = Field(default=settings.default_ocr_model) 511 | 512 | class RecognizePlateFromPathArgs(BaseModel): 513 | """Input arguments for recognizing text from a license plate image path.""" 514 | 515 | model_config = ConfigDict(extra="forbid") 516 | path: str = Field(..., examples=["https://example.com/plate.jpg"]) 517 | ocr_model: OcrModel = Field(default=settings.default_ocr_model) 518 | 519 | @field_validator("path") 520 | @classmethod 521 | def path_must_not_be_empty(cls, v: str) -> str: 522 | if not v or not v.strip(): 523 | raise ValueError("Path cannot be empty.") 524 | return v 525 | 526 | class DetectAndRecognizePlateArgs(BaseModel): 527 | """Input arguments for detecting and recognizing a license plate from an image.""" 528 | 529 | model_config = ConfigDict(extra="forbid") 530 | image_base64: Base64ImageStr 531 | detector_model: DetectorModel = Field(default=settings.default_detector_model) 532 | ocr_model: OcrModel = Field(default=settings.default_ocr_model) 533 | 534 | class DetectAndRecognizePlateFromPathArgs(BaseModel): 535 | """Input arguments for detecting and recognizing a license plate from a path.""" 536 | 537 | model_config = ConfigDict(extra="forbid") 538 | path: str = Field(..., examples=["https://example.com/car.jpg"]) 539 | detector_model: DetectorModel = Field(default=settings.default_detector_model) 540 | ocr_model: OcrModel = Field(default=settings.default_ocr_model) 541 | 542 | @field_validator("path") 543 | @classmethod 544 | def path_must_not_be_empty(cls, v: str) -> str: 545 | if not v or not v.strip(): 546 | raise ValueError("Path cannot be empty.") 547 | return v 548 | 549 | # --- Tool Registration --- 550 | 551 | # Tool 1: recognize_plate 552 | recognize_plate_tool_definition = types.Tool( 553 | name="recognize_plate", 554 | title="Recognize License Plate", 555 | description="Recognizes text from a pre-cropped image of a license plate.", 556 | inputSchema=RecognizePlateArgs.model_json_schema(), 557 | ) 558 | tool_registry.register_tool( 559 | tool_definition=recognize_plate_tool_definition, 560 | model=RecognizePlateArgs, 561 | func=recognize_plate_base64_tool, 562 | ) 563 | 564 | # Tool 2: recognize_plate_from_path 565 | recognize_plate_from_path_tool_definition = types.Tool( 566 | name="recognize_plate_from_path", 567 | title="Recognize License Plate from Path", 568 | description="Recognizes text from a pre-cropped license plate image located at a given URL or local file path.", 569 | inputSchema=RecognizePlateFromPathArgs.model_json_schema(), 570 | ) 571 | tool_registry.register_tool( 572 | tool_definition=recognize_plate_from_path_tool_definition, 573 | model=RecognizePlateFromPathArgs, 574 | func=recognize_plate_path_tool, 575 | ) 576 | 577 | # Tool 3: detect_and_recognize_plate 578 | detect_and_recognize_plate_tool_definition = types.Tool( 579 | name="detect_and_recognize_plate", 580 | title="Detect and Recognize License Plate", 581 | description="Detects and recognizes all license plates available in an image.", 582 | inputSchema=DetectAndRecognizePlateArgs.model_json_schema(), 583 | ) 584 | tool_registry.register_tool( 585 | tool_definition=detect_and_recognize_plate_tool_definition, 586 | model=DetectAndRecognizePlateArgs, 587 | func=detect_and_recognize_plate_base64_tool, 588 | ) 589 | 590 | # Tool 4: detect_and_recognize_plate_from_path 591 | detect_and_recognize_plate_from_path_tool_definition = types.Tool( 592 | name="detect_and_recognize_plate_from_path", 593 | title="Detect and Recognize License Plate from Path", 594 | description="Detects and recognizes license plates in an image at a given URL or local file path.", 595 | inputSchema=DetectAndRecognizePlateFromPathArgs.model_json_schema(), 596 | ) 597 | tool_registry.register_tool( 598 | tool_definition=detect_and_recognize_plate_from_path_tool_definition, 599 | model=DetectAndRecognizePlateFromPathArgs, 600 | func=detect_and_recognize_plate_path_tool, 601 | ) 602 | 603 | # Tool 5: list_models 604 | list_models_tool_definition = types.Tool( 605 | name="list_models", 606 | title="List Available Models", 607 | description="Lists the available detector and OCR models.", 608 | inputSchema=ListModelsArgs.model_json_schema(), 609 | ) 610 | tool_registry.register_tool( 611 | tool_definition=list_models_tool_definition, 612 | model=ListModelsArgs, 613 | func=list_models, 614 | ) 615 | --------------------------------------------------------------------------------